Merge branch 'master' of github.com:ethcore/parity into move_hash

This commit is contained in:
debris 2016-07-30 14:14:25 +02:00
commit e9b3740ea8
154 changed files with 6895 additions and 3541 deletions

166
.gitlab-ci.yml Normal file
View File

@ -0,0 +1,166 @@
stages:
- build
- deploy
variables:
GIT_DEPTH: "3"
SIMPLECOV: "true"
RUST_BACKTRACE: "1"
cache:
key: "$CI_BUILD_NAME/$CI_BUILD_REF_NAME"
untracked: true
linux-beta:
stage: build
image: ethcore/rust:beta
script:
- cargo build --release --verbose
- strip target/release/parity
tags:
- rust
- rust-beta
artifacts:
paths:
- target/release/parity
name: "${CI_BUILD_NAME}_parity"
linux-stable:
stage: build
image: ethcore/rust:stable
script:
- cargo build --release --verbose
- strip target/release/parity
tags:
- rust
- rust-stable
artifacts:
paths:
- target/release/parity
name: "${CI_BUILD_NAME}_parity"
linux-nightly:
stage: build
image: ethcore/rust:nightly
script:
- cargo build --release --verbose
- strip target/release/parity
tags:
- rust
- rust-nightly
artifacts:
paths:
- target/release/parity
name: "${CI_BUILD_NAME}_parity"
linux-centos:
stage: build
image: ethcore/rust-centos:latest
script:
- export CXX="g++"
- export CC="gcc"
- cargo build --release --verbose
- strip target/release/parity
tags:
- rust
- rust-centos
artifacts:
paths:
- target/release/parity
name: "${CI_BUILD_NAME}_parity"
linux-armv7:
stage: build
image: ethcore/rust-arm:latest
script:
- export CXX=arm-linux-gnueabihf-g++
- export CC=arm-linux-gnueabihf-gcc
- mkdir -p .cargo
- echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config
- echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config
- cat .cargo/config
- cargo build --target armv7-unknown-linux-gnueabihf --release --verbose
- arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity
tags:
- rust
- rust-arm
artifacts:
paths:
- target/armv7-unknown-linux-gnueabihf/release/parity
name: "${CI_BUILD_NAME}_parity"
linux-arm:
stage: build
image: ethcore/rust-arm:latest
script:
- export CXX=arm-linux-gnueabihf-g++
- export CC=arm-linux-gnueabihf-gcc
- mkdir -p .cargo
- echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config
- echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config
- cat .cargo/config
- cargo build --target arm-unknown-linux-gnueabihf --release --verbose
- arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity
tags:
- rust
- rust-arm
artifacts:
paths:
- target/arm-unknown-linux-gnueabihf/release/parity
name: "${CI_BUILD_NAME}_parity"
linux-armv6:
stage: build
image: ethcore/rust-arm:latest
script:
- export CXX=arm-linux-gnueabi-g++
- export CC=arm-linux-gnueabi-gcc
- mkdir -p .cargo
- echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config
- echo "linker= \"arm-linux-gnueabi-gcc\"" >> .cargo/config
- cat .cargo/config
- cargo build --target arm-unknown-linux-gnueabi --release --verbose
- arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity
tags:
- rust
- rust-arm
artifacts:
paths:
- target/arm-unknown-linux-gnueabi/release/parity
name: "${CI_BUILD_NAME}_parity"
linux-aarch64:
stage: build
image: ethcore/rust-arm:latest
script:
- export CXX=aarch64-linux-gnu-g++
- export CC=aarch64-linux-gnu-gcc
- mkdir -p .cargo
- echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config
- echo "linker= \"aarch64-linux-gnu-gcc\"" >> .cargo/config
- cat .cargo/config
- cargo build --target aarch64-unknown-linux-gnu --release --verbose
- aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity
tags:
- rust
- rust-arm
artifacts:
paths:
- target/aarch64-unknown-linux-gnu/release/parity
name: "${CI_BUILD_NAME}_parity"
darwin:
stage: build
script:
- cargo build --release --verbose
tags:
- osx
artifacts:
paths:
- target/release/parity
name: "${CI_BUILD_NAME}_parity"
windows:
stage: build
script:
- set INCLUDE=C:\Program Files (x86)\Microsoft SDKs\Windows\v7.1A\Include;C:\vs2015\VC\include;C:\Program Files (x86)\Windows Kits\10\Include\10.0.10240.0\ucrt
- set LIB=C:\vs2015\VC\lib;C:\Program Files (x86)\Windows Kits\10\Lib\10.0.10240.0\ucrt\x64
- set RUST_BACKTRACE=1
- SET
- rustup default stable-x86_64-pc-windows-msvc
- cargo build --release --verbose
tags:
- rust-windows
artifacts:
paths:
- target/release/parity.exe
- target/release/parity.pdb
name: "${CI_BUILD_NAME}_parity"

View File

@ -16,9 +16,11 @@ git:
matrix: matrix:
include: include:
- rust: stable - rust: stable
env: FEATURES="--features travis-beta" RUN_TESTS="true" env: RUN_TESTS="true"
- rust: beta - rust: beta
env: FEATURES="--features travis-beta" RUN_COVERAGE="true" env: RUN_COVERAGE="true"
- rust: stable
env: RUN_DOCS="true"
env: env:
global: global:
@ -27,6 +29,7 @@ env:
- RUST_BACKTRACE="1" - RUST_BACKTRACE="1"
- RUN_TESTS="false" - RUN_TESTS="false"
- RUN_COVERAGE="false" - RUN_COVERAGE="false"
- RUN_DOCS="false"
# GH_TOKEN for documentation # GH_TOKEN for documentation
- secure: bumJASbZSU8bxJ0EyPUJmu16AiV9EXOpyOj86Jlq/Ty9CfwGqsSXt96uDyE+OUJf34RUFQMsw0nk37/zC4lcn6kqk2wpuH3N/o85Zo/cVZY/NusBWLQqtT5VbYWsV+u2Ua4Tmmsw8yVYQhYwU2ZOejNpflL+Cs9XGgORp1L+/gMRMC2y5Se6ZhwnKPQlRJ8LGsG1dzjQULxzADIt3/zuspNBS8a2urJwlHfGMkvHDoUWCviP/GXoSqw3TZR7FmKyxE19I8n9+iSvm9+oZZquvcgfUxMHn8Gq/b44UbPvjtFOg2yam4xdWXF/RyWCHdc/R9EHorSABeCbefIsm+zcUF3/YQxwpSxM4IZEeH2rTiC7dcrsKw3XsO16xFQz5YI5Bay+CT/wTdMmJd7DdYz7Dyf+pOvcM9WOf/zorxYWSBOMYy0uzbusU2iyIghQ82s7E/Ahg+WARtPgkuTLSB5aL1oCTBKHqQscMr7lo5Ti6RpWLxEdTQMBznc+bMr+6dEtkEcG9zqc6cE9XX+ox3wTU6+HVMfQ1ltCntJ4UKcw3A6INEbw9wgocQa812CIASQ2fE+SCAbz6JxBjIAlFUnD1lUB7S8PdMPwn9plfQgKQ2A5YZqg6FnBdf0rQXIJYxQWKHXj/rBHSUCT0tHACDlzTA+EwWggvkP5AGIxRxm8jhw= - secure: bumJASbZSU8bxJ0EyPUJmu16AiV9EXOpyOj86Jlq/Ty9CfwGqsSXt96uDyE+OUJf34RUFQMsw0nk37/zC4lcn6kqk2wpuH3N/o85Zo/cVZY/NusBWLQqtT5VbYWsV+u2Ua4Tmmsw8yVYQhYwU2ZOejNpflL+Cs9XGgORp1L+/gMRMC2y5Se6ZhwnKPQlRJ8LGsG1dzjQULxzADIt3/zuspNBS8a2urJwlHfGMkvHDoUWCviP/GXoSqw3TZR7FmKyxE19I8n9+iSvm9+oZZquvcgfUxMHn8Gq/b44UbPvjtFOg2yam4xdWXF/RyWCHdc/R9EHorSABeCbefIsm+zcUF3/YQxwpSxM4IZEeH2rTiC7dcrsKw3XsO16xFQz5YI5Bay+CT/wTdMmJd7DdYz7Dyf+pOvcM9WOf/zorxYWSBOMYy0uzbusU2iyIghQ82s7E/Ahg+WARtPgkuTLSB5aL1oCTBKHqQscMr7lo5Ti6RpWLxEdTQMBznc+bMr+6dEtkEcG9zqc6cE9XX+ox3wTU6+HVMfQ1ltCntJ4UKcw3A6INEbw9wgocQa812CIASQ2fE+SCAbz6JxBjIAlFUnD1lUB7S8PdMPwn9plfQgKQ2A5YZqg6FnBdf0rQXIJYxQWKHXj/rBHSUCT0tHACDlzTA+EwWggvkP5AGIxRxm8jhw=
- KCOV_CMD="./kcov-master/tmp/usr/local/bin/kcov" - KCOV_CMD="./kcov-master/tmp/usr/local/bin/kcov"
@ -61,13 +64,13 @@ install:
) )
script: script:
- if [ "$RUN_TESTS" = "true" ]; then ./test.sh; fi - if [ "$RUN_TESTS" = "true" ]; then ./test.sh --verbose; fi
- if [ "$RUN_COVERAGE" = "true" ]; then ./scripts/cov.sh "$KCOV_CMD"; fi - if [ "$RUN_COVERAGE" = "true" ]; then ./scripts/cov.sh "$KCOV_CMD"; fi
after_success: | after_success: |
[ $TRAVIS_BRANCH = master ] && [ $TRAVIS_BRANCH = master ] &&
[ $TRAVIS_PULL_REQUEST = false ] && [ $TRAVIS_PULL_REQUEST = false ] &&
[ $TRAVIS_RUST_VERSION = stable ] && [ "$RUN_DOCS" = "true" ] &&
./scripts/doc.sh && ./scripts/doc.sh &&
pip install --user ghp-import && pip install --user ghp-import &&
/home/travis/.local/bin/ghp-import -n target/doc && /home/travis/.local/bin/ghp-import -n target/doc &&

16
Cargo.lock generated
View File

@ -620,7 +620,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "json-ipc-server" name = "json-ipc-server"
version = "0.2.4" version = "0.2.4"
source = "git+https://github.com/ethcore/json-ipc-server.git#902b031b8f50a59ecb4f389cbec1d264a98556bc" source = "git+https://github.com/ethcore/json-ipc-server.git#93c2756f669c6a1872dec1ef755a0870f40c03c3"
dependencies = [ dependencies = [
"bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -899,7 +899,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "parity-dapps" name = "parity-dapps"
version = "0.6.0" version = "0.6.0"
source = "git+https://github.com/ethcore/parity-ui.git#fb88ca259fa8eda6e54d9a04b325abd9eec2818b" source = "git+https://github.com/ethcore/parity-ui.git#7120546d08d4d9eb648e255c04935002223d362f"
dependencies = [ dependencies = [
"aster 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", "aster 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
@ -913,7 +913,7 @@ dependencies = [
[[package]] [[package]]
name = "parity-dapps-home" name = "parity-dapps-home"
version = "0.6.0" version = "0.6.0"
source = "git+https://github.com/ethcore/parity-ui.git#fb88ca259fa8eda6e54d9a04b325abd9eec2818b" source = "git+https://github.com/ethcore/parity-ui.git#7120546d08d4d9eb648e255c04935002223d362f"
dependencies = [ dependencies = [
"parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)", "parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)",
] ]
@ -921,7 +921,7 @@ dependencies = [
[[package]] [[package]]
name = "parity-dapps-signer" name = "parity-dapps-signer"
version = "0.6.0" version = "0.6.0"
source = "git+https://github.com/ethcore/parity-ui.git#fb88ca259fa8eda6e54d9a04b325abd9eec2818b" source = "git+https://github.com/ethcore/parity-ui.git#7120546d08d4d9eb648e255c04935002223d362f"
dependencies = [ dependencies = [
"parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)", "parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)",
] ]
@ -929,7 +929,7 @@ dependencies = [
[[package]] [[package]]
name = "parity-dapps-status" name = "parity-dapps-status"
version = "0.6.0" version = "0.6.0"
source = "git+https://github.com/ethcore/parity-ui.git#fb88ca259fa8eda6e54d9a04b325abd9eec2818b" source = "git+https://github.com/ethcore/parity-ui.git#7120546d08d4d9eb648e255c04935002223d362f"
dependencies = [ dependencies = [
"parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)", "parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)",
] ]
@ -937,7 +937,7 @@ dependencies = [
[[package]] [[package]]
name = "parity-dapps-wallet" name = "parity-dapps-wallet"
version = "0.6.0" version = "0.6.0"
source = "git+https://github.com/ethcore/parity-ui.git#fb88ca259fa8eda6e54d9a04b325abd9eec2818b" source = "git+https://github.com/ethcore/parity-ui.git#7120546d08d4d9eb648e255c04935002223d362f"
dependencies = [ dependencies = [
"parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)", "parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)",
] ]
@ -1094,7 +1094,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "rocksdb" name = "rocksdb"
version = "0.4.5" version = "0.4.5"
source = "git+https://github.com/ethcore/rust-rocksdb#dd597245bfcb621c6ffc45478e1fda0b05d2f409" source = "git+https://github.com/ethcore/rust-rocksdb#eadce7f74cfe92b99ce63a77af425b47857239b8"
dependencies = [ dependencies = [
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
"rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)", "rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)",
@ -1103,7 +1103,7 @@ dependencies = [
[[package]] [[package]]
name = "rocksdb-sys" name = "rocksdb-sys"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/ethcore/rust-rocksdb#dd597245bfcb621c6ffc45478e1fda0b05d2f409" source = "git+https://github.com/ethcore/rust-rocksdb#eadce7f74cfe92b99ce63a77af425b47857239b8"
dependencies = [ dependencies = [
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -57,18 +57,12 @@ ui = ["dapps", "ethcore-signer/ui"]
use-precompiled-js = ["ethcore-dapps/use-precompiled-js", "ethcore-signer/use-precompiled-js"] use-precompiled-js = ["ethcore-dapps/use-precompiled-js", "ethcore-signer/use-precompiled-js"]
dapps = ["ethcore-dapps"] dapps = ["ethcore-dapps"]
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"]
travis-beta = ["ethcore/json-tests"]
travis-nightly = ["ethcore/json-tests", "dev"]
ipc = ["ethcore/ipc"] ipc = ["ethcore/ipc"]
[[bin]] [[bin]]
path = "parity/main.rs" path = "parity/main.rs"
name = "parity" name = "parity"
[[bin]]
path = "parity/sync/main.rs"
name = "sync"
[profile.release] [profile.release]
debug = true debug = true
lto = false lto = false

View File

@ -31,10 +31,10 @@ install:
build: off build: off
test_script: test_script:
- cargo test --verbose --release --no-default-features - cargo test --verbose --release
after_test: after_test:
- cargo build --verbose --release --no-default-features - cargo build --verbose --release
- ps: if($env:cert) { Start-FileDownload $env:cert -FileName $env:keyfile } - ps: if($env:cert) { Start-FileDownload $env:cert -FileName $env:keyfile }
- ps: if($env:cert) { signtool sign /f $env:keyfile /p $env:certpass target\release\parity.exe } - ps: if($env:cert) { signtool sign /f $env:keyfile /p $env:certpass target\release\parity.exe }
- makensis.exe nsis\installer.nsi - makensis.exe nsis\installer.nsi

View File

@ -18,7 +18,9 @@
"accountStartNonce": "0x00", "accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID" : "0x1" "networkID" : "0x1",
"forkBlock": "0x1d4c00",
"forkCanonHash": "0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -137,7 +137,9 @@
"accountStartNonce": "0x00", "accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID" : "0x1" "networkID" : "0x1",
"forkBlock": "0x1d4c00",
"forkCanonHash": "0x4985f5ca3d2afbec36529aa96f74de3cc10a2a4a6c44f2157a57d2c6059a11bb"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -61,8 +61,8 @@ impl Account {
nonce: pod.nonce, nonce: pod.nonce,
storage_root: SHA3_NULL_RLP, storage_root: SHA3_NULL_RLP,
storage_overlay: RefCell::new(pod.storage.into_iter().map(|(k, v)| (k, (Filth::Dirty, v))).collect()), storage_overlay: RefCell::new(pod.storage.into_iter().map(|(k, v)| (k, (Filth::Dirty, v))).collect()),
code_hash: Some(pod.code.sha3()), code_hash: pod.code.as_ref().map(|c| c.sha3()),
code_cache: pod.code, code_cache: pod.code.as_ref().map_or_else(|| { warn!("POD account with unknown code is being created! Assuming no code."); vec![] }, |c| c.clone()),
filth: Filth::Dirty, filth: Filth::Dirty,
} }
} }
@ -288,6 +288,16 @@ mod tests {
use super::*; use super::*;
use account_db::*; use account_db::*;
#[test]
fn account_compress() {
let raw = Account::new_basic(2.into(), 4.into()).rlp();
let rlp = UntrustedRlp::new(&raw);
let compact_vec = rlp.compress(RlpType::Snapshot).to_vec();
assert!(raw.len() > compact_vec.len());
let again_raw = UntrustedRlp::new(&compact_vec).decompress(RlpType::Snapshot);
assert_eq!(raw, again_raw.to_vec());
}
#[test] #[test]
fn storage_at() { fn storage_at() {
let mut db = MemoryDB::new(); let mut db = MemoryDB::new();

View File

@ -192,7 +192,7 @@ impl AccountProvider {
pub fn accounts_info(&self) -> Result<HashMap<H160, AccountMeta>, Error> { pub fn accounts_info(&self) -> Result<HashMap<H160, AccountMeta>, Error> {
let r: HashMap<H160, AccountMeta> = self.sstore.accounts() let r: HashMap<H160, AccountMeta> = self.sstore.accounts()
.into_iter() .into_iter()
.map(|a| (H160(a.clone().into()), self.account_meta(a).unwrap_or(Default::default()))) .map(|a| (H160(a.clone().into()), self.account_meta(a).unwrap_or_else(|_| Default::default())))
.collect(); .collect();
Ok(r) Ok(r)
} }

View File

@ -17,6 +17,7 @@
//! Evm input params. //! Evm input params.
use common::*; use common::*;
use ethjson; use ethjson;
use types::executed::CallType;
/// Transaction value /// Transaction value
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
@ -58,7 +59,10 @@ pub struct ActionParams {
/// Code being executed. /// Code being executed.
pub code: Option<Bytes>, pub code: Option<Bytes>,
/// Input data. /// Input data.
pub data: Option<Bytes> pub data: Option<Bytes>,
/// Type of call
pub call_type: CallType,
} }
impl Default for ActionParams { impl Default for ActionParams {
@ -73,16 +77,18 @@ impl Default for ActionParams {
gas_price: U256::zero(), gas_price: U256::zero(),
value: ActionValue::Transfer(U256::zero()), value: ActionValue::Transfer(U256::zero()),
code: None, code: None,
data: None data: None,
call_type: CallType::None,
} }
} }
} }
impl From<ethjson::vm::Transaction> for ActionParams { impl From<ethjson::vm::Transaction> for ActionParams {
fn from(t: ethjson::vm::Transaction) -> Self { fn from(t: ethjson::vm::Transaction) -> Self {
let address: Address = t.address.into();
ActionParams { ActionParams {
code_address: Address::new(), code_address: Address::new(),
address: t.address.into(), address: address,
sender: t.sender.into(), sender: t.sender.into(),
origin: t.origin.into(), origin: t.origin.into(),
code: Some(t.code.into()), code: Some(t.code.into()),
@ -90,6 +96,7 @@ impl From<ethjson::vm::Transaction> for ActionParams {
gas: t.gas.into(), gas: t.gas.into(),
gas_price: t.gas_price.into(), gas_price: t.gas_price.into(),
value: ActionValue::Transfer(t.value.into()), value: ActionValue::Transfer(t.value.into()),
call_type: match address.is_zero() { true => CallType::None, false => CallType::Call }, // TODO @debris is this correct?
} }
} }
} }

View File

@ -17,10 +17,10 @@
//! Blockchain block. //! Blockchain block.
use common::*; use common::*;
use engine::*; use engines::Engine;
use state::*; use state::*;
use verification::PreverifiedBlock; use verification::PreverifiedBlock;
use trace::Trace; use trace::FlatTrace;
use evm::Factory as EvmFactory; use evm::Factory as EvmFactory;
/// A block, encoded as it is on the block chain. /// A block, encoded as it is on the block chain.
@ -76,7 +76,7 @@ pub struct ExecutedBlock {
receipts: Vec<Receipt>, receipts: Vec<Receipt>,
transactions_set: HashSet<H256>, transactions_set: HashSet<H256>,
state: State, state: State,
traces: Option<Vec<Trace>>, traces: Option<Vec<Vec<FlatTrace>>>,
} }
/// A set of references to `ExecutedBlock` fields that are publicly accessible. /// A set of references to `ExecutedBlock` fields that are publicly accessible.
@ -92,7 +92,7 @@ pub struct BlockRefMut<'a> {
/// State. /// State.
pub state: &'a mut State, pub state: &'a mut State,
/// Traces. /// Traces.
pub traces: &'a Option<Vec<Trace>>, pub traces: &'a Option<Vec<Vec<FlatTrace>>>,
} }
/// A set of immutable references to `ExecutedBlock` fields that are publicly accessible. /// A set of immutable references to `ExecutedBlock` fields that are publicly accessible.
@ -108,7 +108,7 @@ pub struct BlockRef<'a> {
/// State. /// State.
pub state: &'a State, pub state: &'a State,
/// Traces. /// Traces.
pub traces: &'a Option<Vec<Trace>>, pub traces: &'a Option<Vec<Vec<FlatTrace>>>,
} }
impl ExecutedBlock { impl ExecutedBlock {
@ -169,7 +169,7 @@ pub trait IsBlock {
fn receipts(&self) -> &[Receipt] { &self.block().receipts } fn receipts(&self) -> &[Receipt] { &self.block().receipts }
/// Get all information concerning transaction tracing in this block. /// Get all information concerning transaction tracing in this block.
fn traces(&self) -> &Option<Vec<Trace>> { &self.block().traces } fn traces(&self) -> &Option<Vec<Vec<FlatTrace>>> { &self.block().traces }
/// Get all uncles in this block. /// Get all uncles in this block.
fn uncles(&self) -> &[Header] { &self.block().base.uncles } fn uncles(&self) -> &[Header] { &self.block().base.uncles }
@ -337,9 +337,9 @@ impl<'x> OpenBlock<'x> {
self.block.transactions_set.insert(h.unwrap_or_else(||t.hash())); self.block.transactions_set.insert(h.unwrap_or_else(||t.hash()));
self.block.base.transactions.push(t); self.block.base.transactions.push(t);
let t = outcome.trace; let t = outcome.trace;
self.block.traces.as_mut().map(|traces| traces.push(t.expect("self.block.traces.is_some(): so we must be tracing: qed"))); self.block.traces.as_mut().map(|traces| traces.push(t));
self.block.receipts.push(outcome.receipt); self.block.receipts.push(outcome.receipt);
Ok(&self.block.receipts.last().unwrap()) Ok(self.block.receipts.last().unwrap())
} }
Err(x) => Err(From::from(x)) Err(x) => Err(From::from(x))
} }

View File

@ -18,10 +18,11 @@
//! Sorts them ready for blockchain insertion. //! Sorts them ready for blockchain insertion.
use std::thread::{JoinHandle, self}; use std::thread::{JoinHandle, self};
use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering};
use std::sync::{Condvar as SCondvar, Mutex as SMutex};
use util::*; use util::*;
use verification::*; use verification::*;
use error::*; use error::*;
use engine::Engine; use engines::Engine;
use views::*; use views::*;
use header::*; use header::*;
use service::*; use service::*;
@ -36,7 +37,7 @@ const MIN_MEM_LIMIT: usize = 16384;
const MIN_QUEUE_LIMIT: usize = 512; const MIN_QUEUE_LIMIT: usize = 512;
/// Block queue configuration /// Block queue configuration
#[derive(Debug)] #[derive(Debug, PartialEq)]
pub struct BlockQueueConfig { pub struct BlockQueueConfig {
/// Maximum number of blocks to keep in unverified queue. /// Maximum number of blocks to keep in unverified queue.
/// When the limit is reached, is_full returns true. /// When the limit is reached, is_full returns true.
@ -80,12 +81,12 @@ impl BlockQueueInfo {
pub struct BlockQueue { pub struct BlockQueue {
panic_handler: Arc<PanicHandler>, panic_handler: Arc<PanicHandler>,
engine: Arc<Box<Engine>>, engine: Arc<Box<Engine>>,
more_to_verify: Arc<Condvar>, more_to_verify: Arc<SCondvar>,
verification: Arc<Verification>, verification: Arc<Verification>,
verifiers: Vec<JoinHandle<()>>, verifiers: Vec<JoinHandle<()>>,
deleting: Arc<AtomicBool>, deleting: Arc<AtomicBool>,
ready_signal: Arc<QueueSignal>, ready_signal: Arc<QueueSignal>,
empty: Arc<Condvar>, empty: Arc<SCondvar>,
processing: RwLock<HashSet<H256>>, processing: RwLock<HashSet<H256>>,
max_queue_size: usize, max_queue_size: usize,
max_mem_use: usize, max_mem_use: usize,
@ -133,6 +134,8 @@ struct Verification {
verified: Mutex<VecDeque<PreverifiedBlock>>, verified: Mutex<VecDeque<PreverifiedBlock>>,
verifying: Mutex<VecDeque<VerifyingBlock>>, verifying: Mutex<VecDeque<VerifyingBlock>>,
bad: Mutex<HashSet<H256>>, bad: Mutex<HashSet<H256>>,
more_to_verify: SMutex<()>,
empty: SMutex<()>,
} }
impl BlockQueue { impl BlockQueue {
@ -143,15 +146,18 @@ impl BlockQueue {
verified: Mutex::new(VecDeque::new()), verified: Mutex::new(VecDeque::new()),
verifying: Mutex::new(VecDeque::new()), verifying: Mutex::new(VecDeque::new()),
bad: Mutex::new(HashSet::new()), bad: Mutex::new(HashSet::new()),
more_to_verify: SMutex::new(()),
empty: SMutex::new(()),
}); });
let more_to_verify = Arc::new(Condvar::new()); let more_to_verify = Arc::new(SCondvar::new());
let deleting = Arc::new(AtomicBool::new(false)); let deleting = Arc::new(AtomicBool::new(false));
let ready_signal = Arc::new(QueueSignal { let ready_signal = Arc::new(QueueSignal {
deleting: deleting.clone(), deleting: deleting.clone(),
signalled: AtomicBool::new(false), signalled: AtomicBool::new(false),
message_channel: message_channel message_channel: message_channel
}); });
let empty = Arc::new(Condvar::new()); let empty = Arc::new(SCondvar::new());
let panic_handler = PanicHandler::new_in_arc(); let panic_handler = PanicHandler::new_in_arc();
let mut verifiers: Vec<JoinHandle<()>> = Vec::new(); let mut verifiers: Vec<JoinHandle<()>> = Vec::new();
@ -190,17 +196,17 @@ impl BlockQueue {
} }
} }
fn verify(verification: Arc<Verification>, engine: Arc<Box<Engine>>, wait: Arc<Condvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>, empty: Arc<Condvar>) { fn verify(verification: Arc<Verification>, engine: Arc<Box<Engine>>, wait: Arc<SCondvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>, empty: Arc<SCondvar>) {
while !deleting.load(AtomicOrdering::Acquire) { while !deleting.load(AtomicOrdering::Acquire) {
{ {
let mut unverified = verification.unverified.lock(); let mut more_to_verify = verification.more_to_verify.lock().unwrap();
if unverified.is_empty() && verification.verifying.lock().is_empty() { if verification.unverified.lock().is_empty() && verification.verifying.lock().is_empty() {
empty.notify_all(); empty.notify_all();
} }
while unverified.is_empty() && !deleting.load(AtomicOrdering::Acquire) { while verification.unverified.lock().is_empty() && !deleting.load(AtomicOrdering::Acquire) {
wait.wait(&mut unverified); more_to_verify = wait.wait(more_to_verify).unwrap();
} }
if deleting.load(AtomicOrdering::Acquire) { if deleting.load(AtomicOrdering::Acquire) {
@ -276,18 +282,18 @@ impl BlockQueue {
/// Wait for unverified queue to be empty /// Wait for unverified queue to be empty
pub fn flush(&self) { pub fn flush(&self) {
let mut unverified = self.verification.unverified.lock(); let mut lock = self.verification.empty.lock().unwrap();
while !unverified.is_empty() || !self.verification.verifying.lock().is_empty() { while !self.verification.unverified.lock().is_empty() || !self.verification.verifying.lock().is_empty() {
self.empty.wait(&mut unverified); lock = self.empty.wait(lock).unwrap();
} }
} }
/// Check if the block is currently in the queue /// Check if the block is currently in the queue
pub fn block_status(&self, hash: &H256) -> BlockStatus { pub fn block_status(&self, hash: &H256) -> BlockStatus {
if self.processing.read().contains(&hash) { if self.processing.read().contains(hash) {
return BlockStatus::Queued; return BlockStatus::Queued;
} }
if self.verification.bad.lock().contains(&hash) { if self.verification.bad.lock().contains(hash) {
return BlockStatus::Bad; return BlockStatus::Bad;
} }
BlockStatus::Unknown BlockStatus::Unknown
@ -340,7 +346,7 @@ impl BlockQueue {
bad.reserve(block_hashes.len()); bad.reserve(block_hashes.len());
for hash in block_hashes { for hash in block_hashes {
bad.insert(hash.clone()); bad.insert(hash.clone());
processing.remove(&hash); processing.remove(hash);
} }
let mut new_verified = VecDeque::new(); let mut new_verified = VecDeque::new();
@ -362,7 +368,7 @@ impl BlockQueue {
} }
let mut processing = self.processing.write(); let mut processing = self.processing.write();
for hash in block_hashes { for hash in block_hashes {
processing.remove(&hash); processing.remove(hash);
} }
} }

View File

@ -14,6 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::bytes::Bytes;
use util::numbers::{U256,H256}; use util::numbers::{U256,H256};
use header::BlockNumber; use header::BlockNumber;
@ -25,5 +26,7 @@ pub struct BestBlock {
/// Best block number. /// Best block number.
pub number: BlockNumber, pub number: BlockNumber,
/// Best block total difficulty. /// Best block total difficulty.
pub total_difficulty: U256 pub total_difficulty: U256,
/// Best block uncompressed bytes
pub block: Bytes,
} }

View File

@ -31,6 +31,7 @@ use types::tree_route::TreeRoute;
use blockchain::update::ExtrasUpdate; use blockchain::update::ExtrasUpdate;
use blockchain::{CacheSize, ImportRoute, Config}; use blockchain::{CacheSize, ImportRoute, Config};
use db::{Writable, Readable, CacheUpdatePolicy}; use db::{Writable, Readable, CacheUpdatePolicy};
use client::{DB_COL_EXTRA, DB_COL_HEADERS, DB_COL_BODIES};
const LOG_BLOOMS_LEVELS: usize = 3; const LOG_BLOOMS_LEVELS: usize = 3;
const LOG_BLOOMS_ELEMENTS_PER_INDEX: usize = 16; const LOG_BLOOMS_ELEMENTS_PER_INDEX: usize = 16;
@ -58,29 +59,37 @@ pub trait BlockProvider {
/// Get the partial-header of a block. /// Get the partial-header of a block.
fn block_header(&self, hash: &H256) -> Option<Header> { fn block_header(&self, hash: &H256) -> Option<Header> {
self.block(hash).map(|bytes| BlockView::new(&bytes).header()) self.block_header_data(hash).map(|header| decode(&header))
} }
/// Get the header RLP of a block.
fn block_header_data(&self, hash: &H256) -> Option<Bytes>;
/// Get the block body (uncles and transactions).
fn block_body(&self, hash: &H256) -> Option<Bytes>;
/// Get a list of uncles for a given block. /// Get a list of uncles for a given block.
/// Returns None if block does not exist. /// Returns None if block does not exist.
fn uncles(&self, hash: &H256) -> Option<Vec<Header>> { fn uncles(&self, hash: &H256) -> Option<Vec<Header>> {
self.block(hash).map(|bytes| BlockView::new(&bytes).uncles()) self.block_body(hash).map(|bytes| BodyView::new(&bytes).uncles())
} }
/// Get a list of uncle hashes for a given block. /// Get a list of uncle hashes for a given block.
/// Returns None if block does not exist. /// Returns None if block does not exist.
fn uncle_hashes(&self, hash: &H256) -> Option<Vec<H256>> { fn uncle_hashes(&self, hash: &H256) -> Option<Vec<H256>> {
self.block(hash).map(|bytes| BlockView::new(&bytes).uncle_hashes()) self.block_body(hash).map(|bytes| BodyView::new(&bytes).uncle_hashes())
} }
/// Get the number of given block's hash. /// Get the number of given block's hash.
fn block_number(&self, hash: &H256) -> Option<BlockNumber> { fn block_number(&self, hash: &H256) -> Option<BlockNumber> {
self.block(hash).map(|bytes| BlockView::new(&bytes).header_view().number()) self.block_details(hash).map(|details| details.number)
} }
/// Get transaction with given transaction hash. /// Get transaction with given transaction hash.
fn transaction(&self, address: &TransactionAddress) -> Option<LocalizedTransaction> { fn transaction(&self, address: &TransactionAddress) -> Option<LocalizedTransaction> {
self.block(&address.block_hash).and_then(|bytes| BlockView::new(&bytes).localized_transaction_at(address.index)) self.block_body(&address.block_hash)
.and_then(|bytes| self.block_number(&address.block_hash)
.and_then(|n| BodyView::new(&bytes).localized_transaction_at(&address.block_hash, n, address.index)))
} }
/// Get transaction receipt. /// Get transaction receipt.
@ -91,7 +100,9 @@ pub trait BlockProvider {
/// Get a list of transactions for a given block. /// Get a list of transactions for a given block.
/// Returns None if block does not exist. /// Returns None if block does not exist.
fn transactions(&self, hash: &H256) -> Option<Vec<LocalizedTransaction>> { fn transactions(&self, hash: &H256) -> Option<Vec<LocalizedTransaction>> {
self.block(hash).map(|bytes| BlockView::new(&bytes).localized_transactions()) self.block_body(hash)
.and_then(|bytes| self.block_number(hash)
.map(|n| BodyView::new(&bytes).localized_transactions(hash, n)))
} }
/// Returns reference to genesis hash. /// Returns reference to genesis hash.
@ -110,7 +121,8 @@ pub trait BlockProvider {
#[derive(Debug, Hash, Eq, PartialEq, Clone)] #[derive(Debug, Hash, Eq, PartialEq, Clone)]
enum CacheID { enum CacheID {
Block(H256), BlockHeader(H256),
BlockBody(H256),
BlockDetails(H256), BlockDetails(H256),
BlockHashes(BlockNumber), BlockHashes(BlockNumber),
TransactionAddresses(H256), TransactionAddresses(H256),
@ -127,7 +139,7 @@ impl bc::group::BloomGroupDatabase for BlockChain {
fn blooms_at(&self, position: &bc::group::GroupPosition) -> Option<bc::group::BloomGroup> { fn blooms_at(&self, position: &bc::group::GroupPosition) -> Option<bc::group::BloomGroup> {
let position = LogGroupPosition::from(position.clone()); let position = LogGroupPosition::from(position.clone());
self.note_used(CacheID::BlocksBlooms(position.clone())); self.note_used(CacheID::BlocksBlooms(position.clone()));
self.extras_db.read_with_cache(&self.blocks_blooms, &position).map(Into::into) self.db.read_with_cache(DB_COL_EXTRA, &self.blocks_blooms, &position).map(Into::into)
} }
} }
@ -143,7 +155,8 @@ pub struct BlockChain {
best_block: RwLock<BestBlock>, best_block: RwLock<BestBlock>,
// block cache // block cache
blocks: RwLock<HashMap<H256, Bytes>>, block_headers: RwLock<HashMap<H256, Bytes>>,
block_bodies: RwLock<HashMap<H256, Bytes>>,
// extra caches // extra caches
block_details: RwLock<HashMap<H256, BlockDetails>>, block_details: RwLock<HashMap<H256, BlockDetails>>,
@ -152,39 +165,96 @@ pub struct BlockChain {
blocks_blooms: RwLock<HashMap<LogGroupPosition, BloomGroup>>, blocks_blooms: RwLock<HashMap<LogGroupPosition, BloomGroup>>,
block_receipts: RwLock<HashMap<H256, BlockReceipts>>, block_receipts: RwLock<HashMap<H256, BlockReceipts>>,
extras_db: Database, db: Arc<Database>,
blocks_db: Database,
cache_man: RwLock<CacheManager>, cache_man: RwLock<CacheManager>,
insert_lock: Mutex<()>
} }
impl BlockProvider for BlockChain { impl BlockProvider for BlockChain {
/// Returns true if the given block is known /// Returns true if the given block is known
/// (though not necessarily a part of the canon chain). /// (though not necessarily a part of the canon chain).
fn is_known(&self, hash: &H256) -> bool { fn is_known(&self, hash: &H256) -> bool {
self.extras_db.exists_with_cache(&self.block_details, hash) self.db.exists_with_cache(DB_COL_EXTRA, &self.block_details, hash)
} }
/// Get raw block data /// Get raw block data
fn block(&self, hash: &H256) -> Option<Bytes> { fn block(&self, hash: &H256) -> Option<Bytes> {
match (self.block_header_data(hash), self.block_body(hash)) {
(Some(header), Some(body)) => {
let mut block = RlpStream::new_list(3);
let body_rlp = Rlp::new(&body);
block.append_raw(&header, 1);
block.append_raw(body_rlp.at(0).as_raw(), 1);
block.append_raw(body_rlp.at(1).as_raw(), 1);
Some(block.out())
},
_ => None,
}
}
/// Get block header data
fn block_header_data(&self, hash: &H256) -> Option<Bytes> {
// Check cache first
{ {
let read = self.blocks.read(); let read = self.block_headers.read();
if let Some(v) = read.get(hash) { if let Some(v) = read.get(hash) {
return Some(v.clone()); return Some(v.clone());
} }
} }
let opt = self.blocks_db.get(hash) // Check if it's the best block
{
let best_block = self.best_block.read();
if &best_block.hash == hash {
return Some(Rlp::new(&best_block.block).at(0).as_raw().to_vec());
}
}
// Read from DB and populate cache
let opt = self.db.get(DB_COL_HEADERS, hash)
.expect("Low level database error. Some issue with disk?"); .expect("Low level database error. Some issue with disk?");
self.note_used(CacheID::Block(hash.clone())); self.note_used(CacheID::BlockHeader(hash.clone()));
match opt { match opt {
Some(b) => { Some(b) => {
let bytes: Bytes = b.to_vec(); let bytes: Bytes = UntrustedRlp::new(&b).decompress(RlpType::Blocks).to_vec();
let mut write = self.blocks.write(); let mut write = self.block_headers.write();
write.insert(hash.clone(), bytes.clone());
Some(bytes)
},
None => None
}
}
/// Get block body data
fn block_body(&self, hash: &H256) -> Option<Bytes> {
// Check cache first
{
let read = self.block_bodies.read();
if let Some(v) = read.get(hash) {
return Some(v.clone());
}
}
// Check if it's the best block
{
let best_block = self.best_block.read();
if &best_block.hash == hash {
return Some(Self::block_to_body(&best_block.block));
}
}
// Read from DB and populate cache
let opt = self.db.get(DB_COL_BODIES, hash)
.expect("Low level database error. Some issue with disk?");
self.note_used(CacheID::BlockBody(hash.clone()));
match opt {
Some(b) => {
let bytes: Bytes = UntrustedRlp::new(&b).decompress(RlpType::Blocks).to_vec();
let mut write = self.block_bodies.write();
write.insert(hash.clone(), bytes.clone()); write.insert(hash.clone(), bytes.clone());
Some(bytes) Some(bytes)
}, },
@ -195,25 +265,25 @@ impl BlockProvider for BlockChain {
/// Get the familial details concerning a block. /// Get the familial details concerning a block.
fn block_details(&self, hash: &H256) -> Option<BlockDetails> { fn block_details(&self, hash: &H256) -> Option<BlockDetails> {
self.note_used(CacheID::BlockDetails(hash.clone())); self.note_used(CacheID::BlockDetails(hash.clone()));
self.extras_db.read_with_cache(&self.block_details, hash) self.db.read_with_cache(DB_COL_EXTRA, &self.block_details, hash)
} }
/// Get the hash of given block's number. /// Get the hash of given block's number.
fn block_hash(&self, index: BlockNumber) -> Option<H256> { fn block_hash(&self, index: BlockNumber) -> Option<H256> {
self.note_used(CacheID::BlockHashes(index)); self.note_used(CacheID::BlockHashes(index));
self.extras_db.read_with_cache(&self.block_hashes, &index) self.db.read_with_cache(DB_COL_EXTRA, &self.block_hashes, &index)
} }
/// Get the address of transaction with given hash. /// Get the address of transaction with given hash.
fn transaction_address(&self, hash: &H256) -> Option<TransactionAddress> { fn transaction_address(&self, hash: &H256) -> Option<TransactionAddress> {
self.note_used(CacheID::TransactionAddresses(hash.clone())); self.note_used(CacheID::TransactionAddresses(hash.clone()));
self.extras_db.read_with_cache(&self.transaction_addresses, hash) self.db.read_with_cache(DB_COL_EXTRA, &self.transaction_addresses, hash)
} }
/// Get receipts of block with given hash. /// Get receipts of block with given hash.
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts> { fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts> {
self.note_used(CacheID::BlockReceipts(hash.clone())); self.note_used(CacheID::BlockReceipts(hash.clone()));
self.extras_db.read_with_cache(&self.block_receipts, hash) self.db.read_with_cache(DB_COL_EXTRA, &self.block_receipts, hash)
} }
/// Returns numbers of blocks containing given bloom. /// Returns numbers of blocks containing given bloom.
@ -249,27 +319,7 @@ impl<'a> Iterator for AncestryIter<'a> {
impl BlockChain { impl BlockChain {
/// Create new instance of blockchain from given Genesis /// Create new instance of blockchain from given Genesis
pub fn new(config: Config, genesis: &[u8], path: &Path) -> BlockChain { pub fn new(config: Config, genesis: &[u8], db: Arc<Database>) -> BlockChain {
// open extras db
let mut extras_path = path.to_path_buf();
extras_path.push("extras");
let extras_db = match config.db_cache_size {
None => Database::open_default(extras_path.to_str().unwrap()).unwrap(),
Some(cache_size) => Database::open(
&DatabaseConfig::with_cache(cache_size/2),
extras_path.to_str().unwrap()).unwrap(),
};
// open blocks db
let mut blocks_path = path.to_path_buf();
blocks_path.push("blocks");
let blocks_db = match config.db_cache_size {
None => Database::open_default(blocks_path.to_str().unwrap()).unwrap(),
Some(cache_size) => Database::open(
&DatabaseConfig::with_cache(cache_size/2),
blocks_path.to_str().unwrap()).unwrap(),
};
let mut cache_man = CacheManager{cache_usage: VecDeque::new(), in_use: HashSet::new()}; let mut cache_man = CacheManager{cache_usage: VecDeque::new(), in_use: HashSet::new()};
(0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new())); (0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new()));
@ -281,39 +331,21 @@ impl BlockChain {
elements_per_index: LOG_BLOOMS_ELEMENTS_PER_INDEX, elements_per_index: LOG_BLOOMS_ELEMENTS_PER_INDEX,
}, },
best_block: RwLock::new(BestBlock::default()), best_block: RwLock::new(BestBlock::default()),
blocks: RwLock::new(HashMap::new()), block_headers: RwLock::new(HashMap::new()),
block_bodies: RwLock::new(HashMap::new()),
block_details: RwLock::new(HashMap::new()), block_details: RwLock::new(HashMap::new()),
block_hashes: RwLock::new(HashMap::new()), block_hashes: RwLock::new(HashMap::new()),
transaction_addresses: RwLock::new(HashMap::new()), transaction_addresses: RwLock::new(HashMap::new()),
blocks_blooms: RwLock::new(HashMap::new()), blocks_blooms: RwLock::new(HashMap::new()),
block_receipts: RwLock::new(HashMap::new()), block_receipts: RwLock::new(HashMap::new()),
extras_db: extras_db, db: db.clone(),
blocks_db: blocks_db,
cache_man: RwLock::new(cache_man), cache_man: RwLock::new(cache_man),
insert_lock: Mutex::new(()),
}; };
// load best block // load best block
let best_block_hash = match bc.extras_db.get(b"best").unwrap() { let best_block_hash = match bc.db.get(DB_COL_EXTRA, b"best").unwrap() {
Some(best) => { Some(best) => {
let new_best = H256::from_slice(&best); H256::from_slice(&best)
if !bc.blocks_db.get(&new_best).unwrap().is_some() {
warn!("Best block {} not found", new_best.hex());
}
/* TODO: enable this once the best block issue is resolved
while !bc.blocks_db.get(&new_best).unwrap().is_some() {
match bc.rewind() {
Some(h) => {
new_best = h;
}
None => {
warn!("Can't rewind blockchain");
break;
}
}
info!("Restored mismatched best block. Was: {}, new: {}", H256::from_slice(&best).hex(), new_best.hex());
}*/
new_best
} }
None => { None => {
// best block does not exist // best block does not exist
@ -329,23 +361,32 @@ impl BlockChain {
children: vec![] children: vec![]
}; };
bc.blocks_db.put(&hash, genesis).unwrap(); let batch = DBTransaction::new(&db);
batch.put(DB_COL_HEADERS, &hash, block.header_rlp().as_raw()).unwrap();
let batch = DBTransaction::new(); batch.put(DB_COL_BODIES, &hash, &Self::block_to_body(&genesis)).unwrap();
batch.write(&hash, &details);
batch.write(&header.number(), &hash);
batch.put(b"best", &hash).unwrap();
bc.extras_db.write(batch).unwrap();
batch.write(DB_COL_EXTRA, &hash, &details);
batch.write(DB_COL_EXTRA, &header.number(), &hash);
batch.put(DB_COL_EXTRA, b"best", &hash).unwrap();
bc.db.write(batch).expect("Low level database error. Some issue with disk?");
hash hash
} }
}; };
{ {
// Fetch best block details
let best_block_number = bc.block_number(&best_block_hash).unwrap();
let best_block_total_difficulty = bc.block_details(&best_block_hash).unwrap().total_difficulty;
let best_block_rlp = bc.block(&best_block_hash).unwrap();
// and write them
let mut best_block = bc.best_block.write(); let mut best_block = bc.best_block.write();
best_block.number = bc.block_number(&best_block_hash).unwrap(); *best_block = BestBlock {
best_block.total_difficulty = bc.block_details(&best_block_hash).unwrap().total_difficulty; number: best_block_number,
best_block.hash = best_block_hash; total_difficulty: best_block_total_difficulty,
hash: best_block_hash,
block: best_block_rlp,
};
} }
bc bc
@ -354,44 +395,52 @@ impl BlockChain {
/// Returns true if the given parent block has given child /// Returns true if the given parent block has given child
/// (though not necessarily a part of the canon chain). /// (though not necessarily a part of the canon chain).
fn is_known_child(&self, parent: &H256, hash: &H256) -> bool { fn is_known_child(&self, parent: &H256, hash: &H256) -> bool {
self.extras_db.read_with_cache(&self.block_details, parent).map_or(false, |d| d.children.contains(hash)) self.db.read_with_cache(DB_COL_EXTRA, &self.block_details, parent).map_or(false, |d| d.children.contains(hash))
} }
/// Rewind to a previous block /// Rewind to a previous block
#[cfg(test)] #[cfg(test)]
fn rewind(&self) -> Option<H256> { fn rewind(&self) -> Option<H256> {
use db::Key; use db::Key;
let batch = DBTransaction::new(); let batch = self.db.transaction();
// track back to the best block we have in the blocks database // track back to the best block we have in the blocks database
if let Some(best_block_hash) = self.extras_db.get(b"best").unwrap() { if let Some(best_block_hash) = self.db.get(DB_COL_EXTRA, b"best").unwrap() {
let best_block_hash = H256::from_slice(&best_block_hash); let best_block_hash = H256::from_slice(&best_block_hash);
if best_block_hash == self.genesis_hash() { if best_block_hash == self.genesis_hash() {
return None; return None;
} }
if let Some(extras) = self.extras_db.read(&best_block_hash) as Option<BlockDetails> { if let Some(extras) = self.db.read(DB_COL_EXTRA, &best_block_hash) as Option<BlockDetails> {
type DetailsKey = Key<BlockDetails, Target=H264>; type DetailsKey = Key<BlockDetails, Target=H264>;
batch.delete(&(DetailsKey::key(&best_block_hash))).unwrap(); batch.delete(DB_COL_EXTRA, &(DetailsKey::key(&best_block_hash))).unwrap();
let hash = extras.parent; let hash = extras.parent;
let range = extras.number as bc::Number .. extras.number as bc::Number; let range = extras.number as bc::Number .. extras.number as bc::Number;
let chain = bc::group::BloomGroupChain::new(self.blooms_config, self); let chain = bc::group::BloomGroupChain::new(self.blooms_config, self);
let changes = chain.replace(&range, vec![]); let changes = chain.replace(&range, vec![]);
for (k, v) in changes.into_iter() { for (k, v) in changes.into_iter() {
batch.write(&LogGroupPosition::from(k), &BloomGroup::from(v)); batch.write(DB_COL_EXTRA, &LogGroupPosition::from(k), &BloomGroup::from(v));
} }
batch.put(b"best", &hash).unwrap(); batch.put(DB_COL_EXTRA, b"best", &hash).unwrap();
let best_block_total_difficulty = self.block_details(&hash).unwrap().total_difficulty;
let best_block_rlp = self.block(&hash).unwrap();
let mut best_block = self.best_block.write(); let mut best_block = self.best_block.write();
best_block.number = extras.number - 1; *best_block = BestBlock {
best_block.total_difficulty = self.block_details(&hash).unwrap().total_difficulty; number: extras.number - 1,
best_block.hash = hash; total_difficulty: best_block_total_difficulty,
hash: hash,
block: best_block_rlp,
};
// update parent extras // update parent extras
if let Some(mut details) = self.extras_db.read(&hash) as Option<BlockDetails> { if let Some(mut details) = self.db.read(DB_COL_EXTRA, &hash) as Option<BlockDetails> {
details.children.clear(); details.children.clear();
batch.write(&hash, &details); batch.write(DB_COL_EXTRA, &hash, &details);
} }
self.extras_db.write(batch).unwrap(); self.db.write(batch).expect("Writing to db failed");
self.block_details.write().clear(); self.block_details.write().clear();
self.block_hashes.write().clear(); self.block_hashes.write().clear();
self.blocks.write().clear(); self.block_headers.write().clear();
self.block_bodies.write().clear();
self.block_receipts.write().clear(); self.block_receipts.write().clear();
return Some(hash); return Some(hash);
} }
@ -498,7 +547,7 @@ impl BlockChain {
/// Inserts the block into backing cache database. /// Inserts the block into backing cache database.
/// Expects the block to be valid and already verified. /// Expects the block to be valid and already verified.
/// If the block is already known, does nothing. /// If the block is already known, does nothing.
pub fn insert_block(&self, bytes: &[u8], receipts: Vec<Receipt>) -> ImportRoute { pub fn insert_block(&self, batch: &DBTransaction, bytes: &[u8], receipts: Vec<Receipt>) -> ImportRoute {
// create views onto rlp // create views onto rlp
let block = BlockView::new(bytes); let block = BlockView::new(bytes);
let header = block.header_view(); let header = block.header_view();
@ -508,45 +557,99 @@ impl BlockChain {
return ImportRoute::none(); return ImportRoute::none();
} }
let _lock = self.insert_lock.lock(); let block_rlp = UntrustedRlp::new(bytes);
let compressed_header = block_rlp.at(0).unwrap().compress(RlpType::Blocks);
let compressed_body = UntrustedRlp::new(&Self::block_to_body(bytes)).compress(RlpType::Blocks);
// store block in db // store block in db
self.blocks_db.put(&hash, &bytes).unwrap(); batch.put(DB_COL_HEADERS, &hash, &compressed_header).unwrap();
batch.put(DB_COL_BODIES, &hash, &compressed_body).unwrap();
let info = self.block_info(bytes); let info = self.block_info(bytes);
self.apply_update(ExtrasUpdate { if let BlockLocation::BranchBecomingCanonChain(ref d) = info.location {
info!(target: "reorg", "Reorg to {} ({} {} {})",
Colour::Yellow.bold().paint(format!("#{} {}", info.number, info.hash)),
Colour::Red.paint(d.retracted.iter().join(" ")),
Colour::White.paint(format!("#{} {}", self.block_details(&d.ancestor).expect("`ancestor` is in the route; qed").number, d.ancestor)),
Colour::Green.paint(d.enacted.iter().join(" "))
);
}
self.apply_update(batch, ExtrasUpdate {
block_hashes: self.prepare_block_hashes_update(bytes, &info), block_hashes: self.prepare_block_hashes_update(bytes, &info),
block_details: self.prepare_block_details_update(bytes, &info), block_details: self.prepare_block_details_update(bytes, &info),
block_receipts: self.prepare_block_receipts_update(receipts, &info), block_receipts: self.prepare_block_receipts_update(receipts, &info),
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
blocks_blooms: self.prepare_block_blooms_update(bytes, &info), blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
info: info.clone(), info: info.clone(),
block: bytes,
}); });
ImportRoute::from(info) ImportRoute::from(info)
} }
/// Applies extras update. /// Get inserted block info which is critical to prepare extras updates.
fn apply_update(&self, update: ExtrasUpdate) { fn block_info(&self, block_bytes: &[u8]) -> BlockInfo {
let batch = DBTransaction::new(); let block = BlockView::new(block_bytes);
let header = block.header_view();
let hash = block.sha3();
let number = header.number();
let parent_hash = header.parent_hash();
let parent_details = self.block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash));
let total_difficulty = parent_details.total_difficulty + header.difficulty();
let is_new_best = total_difficulty > self.best_block_total_difficulty();
BlockInfo {
hash: hash,
number: number,
total_difficulty: total_difficulty,
location: if is_new_best {
// on new best block we need to make sure that all ancestors
// are moved to "canon chain"
// find the route between old best block and the new one
let best_hash = self.best_block_hash();
let route = self.tree_route(best_hash, parent_hash);
assert_eq!(number, parent_details.number + 1);
match route.blocks.len() {
0 => BlockLocation::CanonChain,
_ => {
let retracted = route.blocks.iter().take(route.index).cloned().collect::<Vec<_>>().into_iter().collect::<Vec<_>>();
let enacted = route.blocks.into_iter().skip(route.index).collect::<Vec<_>>();
BlockLocation::BranchBecomingCanonChain(BranchBecomingCanonChainData {
ancestor: route.ancestor,
enacted: enacted,
retracted: retracted,
})
}
}
} else {
BlockLocation::Branch
}
}
}
/// Applies extras update.
fn apply_update(&self, batch: &DBTransaction, update: ExtrasUpdate) {
{ {
for hash in update.block_details.keys().cloned() { for hash in update.block_details.keys().cloned() {
self.note_used(CacheID::BlockDetails(hash)); self.note_used(CacheID::BlockDetails(hash));
} }
let mut write_details = self.block_details.write(); let mut write_details = self.block_details.write();
batch.extend_with_cache(&mut *write_details, update.block_details, CacheUpdatePolicy::Overwrite); batch.extend_with_cache(DB_COL_EXTRA, &mut *write_details, update.block_details, CacheUpdatePolicy::Overwrite);
} }
{ {
let mut write_receipts = self.block_receipts.write(); let mut write_receipts = self.block_receipts.write();
batch.extend_with_cache(&mut *write_receipts, update.block_receipts, CacheUpdatePolicy::Remove); batch.extend_with_cache(DB_COL_EXTRA, &mut *write_receipts, update.block_receipts, CacheUpdatePolicy::Remove);
} }
{ {
let mut write_blocks_blooms = self.blocks_blooms.write(); let mut write_blocks_blooms = self.blocks_blooms.write();
batch.extend_with_cache(&mut *write_blocks_blooms, update.blocks_blooms, CacheUpdatePolicy::Remove); batch.extend_with_cache(DB_COL_EXTRA, &mut *write_blocks_blooms, update.blocks_blooms, CacheUpdatePolicy::Remove);
} }
// These cached values must be updated last with all three locks taken to avoid // These cached values must be updated last with all three locks taken to avoid
@ -557,11 +660,12 @@ impl BlockChain {
match update.info.location { match update.info.location {
BlockLocation::Branch => (), BlockLocation::Branch => (),
_ => { _ => {
batch.put(b"best", &update.info.hash).unwrap(); batch.put(DB_COL_EXTRA, b"best", &update.info.hash).unwrap();
*best_block = BestBlock { *best_block = BestBlock {
hash: update.info.hash, hash: update.info.hash,
number: update.info.number, number: update.info.number,
total_difficulty: update.info.total_difficulty total_difficulty: update.info.total_difficulty,
block: update.block.to_vec(),
}; };
} }
} }
@ -569,11 +673,8 @@ impl BlockChain {
let mut write_hashes = self.block_hashes.write(); let mut write_hashes = self.block_hashes.write();
let mut write_txs = self.transaction_addresses.write(); let mut write_txs = self.transaction_addresses.write();
batch.extend_with_cache(&mut *write_hashes, update.block_hashes, CacheUpdatePolicy::Remove); batch.extend_with_cache(DB_COL_EXTRA, &mut *write_hashes, update.block_hashes, CacheUpdatePolicy::Remove);
batch.extend_with_cache(&mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Remove); batch.extend_with_cache(DB_COL_EXTRA, &mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Remove);
// update extras database
self.extras_db.write(batch).unwrap();
} }
} }
@ -582,7 +683,7 @@ impl BlockChain {
if self.is_known(&first) { if self.is_known(&first) {
Some(AncestryIter { Some(AncestryIter {
current: first, current: first,
chain: &self, chain: self,
}) })
} else { } else {
None None
@ -613,48 +714,6 @@ impl BlockChain {
Some(ret) Some(ret)
} }
/// Get inserted block info which is critical to prepare extras updates.
fn block_info(&self, block_bytes: &[u8]) -> BlockInfo {
let block = BlockView::new(block_bytes);
let header = block.header_view();
let hash = block.sha3();
let number = header.number();
let parent_hash = header.parent_hash();
let parent_details = self.block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash));
let total_difficulty = parent_details.total_difficulty + header.difficulty();
let is_new_best = total_difficulty > self.best_block_total_difficulty();
BlockInfo {
hash: hash,
number: number,
total_difficulty: total_difficulty,
location: if is_new_best {
// on new best block we need to make sure that all ancestors
// are moved to "canon chain"
// find the route between old best block and the new one
let best_hash = self.best_block_hash();
let route = self.tree_route(best_hash, parent_hash);
assert_eq!(number, parent_details.number + 1);
match route.blocks.len() {
0 => BlockLocation::CanonChain,
_ => {
let retracted = route.blocks.iter().take(route.index).cloned().collect::<Vec<H256>>();
BlockLocation::BranchBecomingCanonChain(BranchBecomingCanonChainData {
ancestor: route.ancestor,
enacted: route.blocks.into_iter().skip(route.index).collect(),
retracted: retracted.into_iter().rev().collect(),
})
}
}
} else {
BlockLocation::Branch
}
}
}
/// This function returns modified block hashes. /// This function returns modified block hashes.
fn prepare_block_hashes_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<BlockNumber, H256> { fn prepare_block_hashes_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<BlockNumber, H256> {
let mut block_hashes = HashMap::new(); let mut block_hashes = HashMap::new();
@ -668,7 +727,7 @@ impl BlockChain {
block_hashes.insert(number, info.hash.clone()); block_hashes.insert(number, info.hash.clone());
}, },
BlockLocation::BranchBecomingCanonChain(ref data) => { BlockLocation::BranchBecomingCanonChain(ref data) => {
let ancestor_number = self.block_number(&data.ancestor).unwrap(); let ancestor_number = self.block_number(&data.ancestor).expect("Block number of ancestor is always in DB");
let start_number = ancestor_number + 1; let start_number = ancestor_number + 1;
for (index, hash) in data.enacted.iter().cloned().enumerate() { for (index, hash) in data.enacted.iter().cloned().enumerate() {
@ -762,8 +821,8 @@ impl BlockChain {
let range = start_number as bc::Number..self.best_block_number() as bc::Number; let range = start_number as bc::Number..self.best_block_number() as bc::Number;
let mut blooms: Vec<bc::Bloom> = data.enacted.iter() let mut blooms: Vec<bc::Bloom> = data.enacted.iter()
.map(|hash| self.block(hash).unwrap()) .map(|hash| self.block_header_data(hash).unwrap())
.map(|bytes| BlockView::new(&bytes).header_view().log_bloom()) .map(|bytes| HeaderView::new(&bytes).log_bloom())
.map(Bloom::from) .map(Bloom::from)
.map(Into::into) .map(Into::into)
.collect(); .collect();
@ -795,10 +854,16 @@ impl BlockChain {
self.best_block.read().total_difficulty self.best_block.read().total_difficulty
} }
/// Get best block header
pub fn best_block_header(&self) -> Bytes {
let block = self.best_block.read();
BlockView::new(&block.block).header_view().rlp().as_raw().to_vec()
}
/// Get current cache size. /// Get current cache size.
pub fn cache_size(&self) -> CacheSize { pub fn cache_size(&self) -> CacheSize {
CacheSize { CacheSize {
blocks: self.blocks.read().heap_size_of_children(), blocks: self.block_headers.read().heap_size_of_children() + self.block_bodies.read().heap_size_of_children(),
block_details: self.block_details.read().heap_size_of_children(), block_details: self.block_details.read().heap_size_of_children(),
transaction_addresses: self.transaction_addresses.read().heap_size_of_children(), transaction_addresses: self.transaction_addresses.read().heap_size_of_children(),
blocks_blooms: self.blocks_blooms.read().heap_size_of_children(), blocks_blooms: self.blocks_blooms.read().heap_size_of_children(),
@ -823,11 +888,23 @@ impl BlockChain {
/// Ticks our cache system and throws out any old data. /// Ticks our cache system and throws out any old data.
pub fn collect_garbage(&self) { pub fn collect_garbage(&self) {
if self.cache_size().total() < self.pref_cache_size.load(AtomicOrder::Relaxed) { return; } if self.cache_size().total() < self.pref_cache_size.load(AtomicOrder::Relaxed) {
// rotate cache
let mut cache_man = self.cache_man.write();
const AVERAGE_BYTES_PER_CACHE_ENTRY: usize = 400; //estimated
if cache_man.cache_usage[0].len() > self.pref_cache_size.load(AtomicOrder::Relaxed) / COLLECTION_QUEUE_SIZE / AVERAGE_BYTES_PER_CACHE_ENTRY {
trace!("Cache rotation, cache_size = {}", self.cache_size().total());
let cache = cache_man.cache_usage.pop_back().unwrap();
cache_man.cache_usage.push_front(cache);
}
return;
}
for _ in 0..COLLECTION_QUEUE_SIZE { for i in 0..COLLECTION_QUEUE_SIZE {
{ {
let mut blocks = self.blocks.write(); trace!("Cache cleanup round started {}, cache_size = {}", i, self.cache_size().total());
let mut block_headers = self.block_headers.write();
let mut block_bodies = self.block_bodies.write();
let mut block_details = self.block_details.write(); let mut block_details = self.block_details.write();
let mut block_hashes = self.block_hashes.write(); let mut block_hashes = self.block_hashes.write();
let mut transaction_addresses = self.transaction_addresses.write(); let mut transaction_addresses = self.transaction_addresses.write();
@ -838,7 +915,8 @@ impl BlockChain {
for id in cache_man.cache_usage.pop_back().unwrap().into_iter() { for id in cache_man.cache_usage.pop_back().unwrap().into_iter() {
cache_man.in_use.remove(&id); cache_man.in_use.remove(&id);
match id { match id {
CacheID::Block(h) => { blocks.remove(&h); }, CacheID::BlockHeader(h) => { block_headers.remove(&h); },
CacheID::BlockBody(h) => { block_bodies.remove(&h); },
CacheID::BlockDetails(h) => { block_details.remove(&h); } CacheID::BlockDetails(h) => { block_details.remove(&h); }
CacheID::BlockHashes(h) => { block_hashes.remove(&h); } CacheID::BlockHashes(h) => { block_hashes.remove(&h); }
CacheID::TransactionAddresses(h) => { transaction_addresses.remove(&h); } CacheID::TransactionAddresses(h) => { transaction_addresses.remove(&h); }
@ -851,32 +929,74 @@ impl BlockChain {
// TODO: handle block_hashes properly. // TODO: handle block_hashes properly.
block_hashes.clear(); block_hashes.clear();
blocks.shrink_to_fit(); block_headers.shrink_to_fit();
block_bodies.shrink_to_fit();
block_details.shrink_to_fit(); block_details.shrink_to_fit();
block_hashes.shrink_to_fit(); block_hashes.shrink_to_fit();
transaction_addresses.shrink_to_fit(); transaction_addresses.shrink_to_fit();
blocks_blooms.shrink_to_fit(); blocks_blooms.shrink_to_fit();
block_receipts.shrink_to_fit(); block_receipts.shrink_to_fit();
} }
trace!("Cache cleanup round complete {}, cache_size = {}", i, self.cache_size().total());
if self.cache_size().total() < self.max_cache_size.load(AtomicOrder::Relaxed) { break; } if self.cache_size().total() < self.max_cache_size.load(AtomicOrder::Relaxed) { break; }
} }
// TODO: m_lastCollection = chrono::system_clock::now(); // TODO: m_lastCollection = chrono::system_clock::now();
} }
/// Create a block body from a block.
pub fn block_to_body(block: &[u8]) -> Bytes {
let mut body = RlpStream::new_list(2);
let block_rlp = Rlp::new(block);
body.append_raw(block_rlp.at(1).as_raw(), 1);
body.append_raw(block_rlp.at(2).as_raw(), 1);
body.out()
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
#![cfg_attr(feature="dev", allow(similar_names))] #![cfg_attr(feature="dev", allow(similar_names))]
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc;
use rustc_serialize::hex::FromHex; use rustc_serialize::hex::FromHex;
use util::{Database, DatabaseConfig};
use util::hash::*; use util::hash::*;
use util::sha3::Hashable; use util::sha3::Hashable;
use receipt::Receipt;
use blockchain::{BlockProvider, BlockChain, Config, ImportRoute}; use blockchain::{BlockProvider, BlockChain, Config, ImportRoute};
use tests::helpers::*; use tests::helpers::*;
use devtools::*; use devtools::*;
use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer}; use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
use views::BlockView; use views::BlockView;
use client;
fn new_db(path: &str) -> Arc<Database> {
Arc::new(Database::open(&DatabaseConfig::with_columns(client::DB_NO_OF_COLUMNS), path).unwrap())
}
#[test]
fn should_cache_best_block() {
// given
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
let first = canon_chain.generate(&mut finalizer).unwrap();
let temp = RandomTempPath::new();
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
assert_eq!(bc.best_block_number(), 0);
// when
let batch = db.transaction();
bc.insert_block(&batch, &first, vec![]);
// NOTE no db.write here (we want to check if best block is cached)
// then
assert_eq!(bc.best_block_number(), 1);
assert!(bc.block(&bc.best_block_hash()).is_some(), "Best block should be queryable even without DB write.");
}
#[test] #[test]
fn basic_blockchain_insert() { fn basic_blockchain_insert() {
@ -888,16 +1008,18 @@ mod tests {
let first_hash = BlockView::new(&first).header_view().sha3(); let first_hash = BlockView::new(&first).header_view().sha3();
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
assert_eq!(bc.genesis_hash(), genesis_hash.clone()); assert_eq!(bc.genesis_hash(), genesis_hash.clone());
assert_eq!(bc.best_block_number(), 0);
assert_eq!(bc.best_block_hash(), genesis_hash.clone()); assert_eq!(bc.best_block_hash(), genesis_hash.clone());
assert_eq!(bc.block_hash(0), Some(genesis_hash.clone())); assert_eq!(bc.block_hash(0), Some(genesis_hash.clone()));
assert_eq!(bc.block_hash(1), None); assert_eq!(bc.block_hash(1), None);
assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![]); assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![]);
bc.insert_block(&first, vec![]); let batch = db.transaction();
bc.insert_block(&batch, &first, vec![]);
db.write(batch).unwrap();
assert_eq!(bc.block_hash(0), Some(genesis_hash.clone())); assert_eq!(bc.block_hash(0), Some(genesis_hash.clone()));
assert_eq!(bc.best_block_number(), 1); assert_eq!(bc.best_block_number(), 1);
@ -916,14 +1038,17 @@ mod tests {
let genesis_hash = BlockView::new(&genesis).header_view().sha3(); let genesis_hash = BlockView::new(&genesis).header_view().sha3();
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
let mut block_hashes = vec![genesis_hash.clone()]; let mut block_hashes = vec![genesis_hash.clone()];
let batch = db.transaction();
for _ in 0..10 { for _ in 0..10 {
let block = canon_chain.generate(&mut finalizer).unwrap(); let block = canon_chain.generate(&mut finalizer).unwrap();
block_hashes.push(BlockView::new(&block).header_view().sha3()); block_hashes.push(BlockView::new(&block).header_view().sha3());
bc.insert_block(&block, vec![]); bc.insert_block(&batch, &block, vec![]);
} }
db.write(batch).unwrap();
block_hashes.reverse(); block_hashes.reverse();
@ -948,17 +1073,21 @@ mod tests {
let b5a = canon_chain.generate(&mut finalizer).unwrap(); let b5a = canon_chain.generate(&mut finalizer).unwrap();
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); let db = new_db(temp.as_str());
bc.insert_block(&b1a, vec![]); let bc = BlockChain::new(Config::default(), &genesis, db.clone());
bc.insert_block(&b1b, vec![]);
bc.insert_block(&b2a, vec![]); let batch = db.transaction();
bc.insert_block(&b2b, vec![]); bc.insert_block(&batch, &b1a, vec![]);
bc.insert_block(&b3a, vec![]); bc.insert_block(&batch, &b1b, vec![]);
bc.insert_block(&b3b, vec![]); bc.insert_block(&batch, &b2a, vec![]);
bc.insert_block(&b4a, vec![]); bc.insert_block(&batch, &b2b, vec![]);
bc.insert_block(&b4b, vec![]); bc.insert_block(&batch, &b3a, vec![]);
bc.insert_block(&b5a, vec![]); bc.insert_block(&batch, &b3b, vec![]);
bc.insert_block(&b5b, vec![]); bc.insert_block(&batch, &b4a, vec![]);
bc.insert_block(&batch, &b4b, vec![]);
bc.insert_block(&batch, &b5a, vec![]);
bc.insert_block(&batch, &b5b, vec![]);
db.write(batch).unwrap();
assert_eq!( assert_eq!(
[&b4b, &b3b, &b2b].iter().map(|b| BlockView::new(b).header()).collect::<Vec<_>>(), [&b4b, &b3b, &b2b].iter().map(|b| BlockView::new(b).header()).collect::<Vec<_>>(),
@ -989,11 +1118,17 @@ mod tests {
let best_block_hash = b3a_hash.clone(); let best_block_hash = b3a_hash.clone();
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); let db = new_db(temp.as_str());
let ir1 = bc.insert_block(&b1, vec![]); let bc = BlockChain::new(Config::default(), &genesis, db.clone());
let ir2 = bc.insert_block(&b2, vec![]);
let ir3b = bc.insert_block(&b3b, vec![]); let batch = db.transaction();
let ir3a = bc.insert_block(&b3a, vec![]); let ir1 = bc.insert_block(&batch, &b1, vec![]);
let ir2 = bc.insert_block(&batch, &b2, vec![]);
let ir3b = bc.insert_block(&batch, &b3b, vec![]);
db.write(batch).unwrap();
let batch = db.transaction();
let ir3a = bc.insert_block(&batch, &b3a, vec![]);
db.write(batch).unwrap();
assert_eq!(ir1, ImportRoute { assert_eq!(ir1, ImportRoute {
enacted: vec![b1_hash], enacted: vec![b1_hash],
@ -1094,14 +1229,19 @@ mod tests {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
{ {
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
assert_eq!(bc.best_block_hash(), genesis_hash); assert_eq!(bc.best_block_hash(), genesis_hash);
bc.insert_block(&first, vec![]); let batch = db.transaction();
bc.insert_block(&batch, &first, vec![]);
db.write(batch).unwrap();
assert_eq!(bc.best_block_hash(), first_hash); assert_eq!(bc.best_block_hash(), first_hash);
} }
{ {
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
assert_eq!(bc.best_block_hash(), first_hash); assert_eq!(bc.best_block_hash(), first_hash);
} }
} }
@ -1154,8 +1294,11 @@ mod tests {
let b1_hash = H256::from_str("f53f268d23a71e85c7d6d83a9504298712b84c1a2ba220441c86eeda0bf0b6e3").unwrap(); let b1_hash = H256::from_str("f53f268d23a71e85c7d6d83a9504298712b84c1a2ba220441c86eeda0bf0b6e3").unwrap();
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); let db = new_db(temp.as_str());
bc.insert_block(&b1, vec![]); let bc = BlockChain::new(Config::default(), &genesis, db.clone());
let batch = db.transaction();
bc.insert_block(&batch, &b1, vec![]);
db.write(batch).unwrap();
let transactions = bc.transactions(&b1_hash).unwrap(); let transactions = bc.transactions(&b1_hash).unwrap();
assert_eq!(transactions.len(), 7); assert_eq!(transactions.len(), 7);
@ -1164,6 +1307,13 @@ mod tests {
} }
} }
fn insert_block(db: &Arc<Database>, bc: &BlockChain, bytes: &[u8], receipts: Vec<Receipt>) -> ImportRoute {
let batch = db.transaction();
let res = bc.insert_block(&batch, bytes, receipts);
db.write(batch).unwrap();
res
}
#[test] #[test]
fn test_bloom_filter_simple() { fn test_bloom_filter_simple() {
// TODO: From here // TODO: From here
@ -1185,27 +1335,28 @@ mod tests {
let b2a = canon_chain.with_bloom(bloom_ba.clone()).generate(&mut finalizer).unwrap(); let b2a = canon_chain.with_bloom(bloom_ba.clone()).generate(&mut finalizer).unwrap();
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
assert_eq!(blocks_b1, vec![]); assert_eq!(blocks_b1, vec![]);
assert_eq!(blocks_b2, vec![]); assert_eq!(blocks_b2, vec![]);
bc.insert_block(&b1, vec![]); insert_block(&db, &bc, &b1, vec![]);
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
assert_eq!(blocks_b1, vec![1]); assert_eq!(blocks_b1, vec![1]);
assert_eq!(blocks_b2, vec![]); assert_eq!(blocks_b2, vec![]);
bc.insert_block(&b2, vec![]); insert_block(&db, &bc, &b2, vec![]);
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
assert_eq!(blocks_b1, vec![1]); assert_eq!(blocks_b1, vec![1]);
assert_eq!(blocks_b2, vec![2]); assert_eq!(blocks_b2, vec![2]);
// hasn't been forked yet // hasn't been forked yet
bc.insert_block(&b1a, vec![]); insert_block(&db, &bc, &b1a, vec![]);
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5); let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5);
@ -1214,7 +1365,7 @@ mod tests {
assert_eq!(blocks_ba, vec![]); assert_eq!(blocks_ba, vec![]);
// fork has happend // fork has happend
bc.insert_block(&b2a, vec![]); insert_block(&db, &bc, &b2a, vec![]);
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5); let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5);
@ -1223,7 +1374,7 @@ mod tests {
assert_eq!(blocks_ba, vec![1, 2]); assert_eq!(blocks_ba, vec![1, 2]);
// fork back // fork back
bc.insert_block(&b3, vec![]); insert_block(&db, &bc, &b3, vec![]);
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5); let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5);
@ -1241,21 +1392,25 @@ mod tests {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
{ {
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
let uncle = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap(); let uncle = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
let batch = db.transaction();
// create a longer fork // create a longer fork
for _ in 0..5 { for _ in 0..5 {
let canon_block = canon_chain.generate(&mut finalizer).unwrap(); let canon_block = canon_chain.generate(&mut finalizer).unwrap();
bc.insert_block(&canon_block, vec![]); bc.insert_block(&batch, &canon_block, vec![]);
} }
assert_eq!(bc.best_block_number(), 5); assert_eq!(bc.best_block_number(), 5);
bc.insert_block(&uncle, vec![]); bc.insert_block(&batch, &uncle, vec![]);
db.write(batch).unwrap();
} }
// re-loading the blockchain should load the correct best block. // re-loading the blockchain should load the correct best block.
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
assert_eq!(bc.best_block_number(), 5); assert_eq!(bc.best_block_number(), 5);
} }
@ -1271,10 +1426,13 @@ mod tests {
let second_hash = BlockView::new(&second).header_view().sha3(); let second_hash = BlockView::new(&second).header_view().sha3();
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
bc.insert_block(&first, vec![]); let batch = db.transaction();
bc.insert_block(&second, vec![]); bc.insert_block(&batch, &first, vec![]);
bc.insert_block(&batch, &second, vec![]);
db.write(batch).unwrap();
assert_eq!(bc.rewind(), Some(first_hash.clone())); assert_eq!(bc.rewind(), Some(first_hash.clone()));
assert!(!bc.is_known(&second_hash)); assert!(!bc.is_known(&second_hash));

View File

@ -17,7 +17,7 @@
//! Blockchain configuration. //! Blockchain configuration.
/// Blockchain configuration. /// Blockchain configuration.
#[derive(Debug)] #[derive(Debug, PartialEq)]
pub struct Config { pub struct Config {
/// Preferred cache size in bytes. /// Preferred cache size in bytes.
pub pref_cache_size: usize, pub pref_cache_size: usize,

View File

@ -6,9 +6,11 @@ use blooms::BloomGroup;
use super::extras::{BlockDetails, BlockReceipts, TransactionAddress, LogGroupPosition}; use super::extras::{BlockDetails, BlockReceipts, TransactionAddress, LogGroupPosition};
/// Block extras update info. /// Block extras update info.
pub struct ExtrasUpdate { pub struct ExtrasUpdate<'a> {
/// Block info. /// Block info.
pub info: BlockInfo, pub info: BlockInfo,
/// Current block uncompressed rlp bytes
pub block: &'a [u8],
/// Modified block hashes. /// Modified block hashes.
pub block_hashes: HashMap<BlockNumber, H256>, pub block_hashes: HashMap<BlockNumber, H256>,
/// Modified block details. /// Modified block details.

View File

@ -17,16 +17,16 @@
use std::collections::{HashSet, HashMap, VecDeque}; use std::collections::{HashSet, HashMap, VecDeque};
use std::ops::Deref; use std::ops::Deref;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use std::path::{Path, PathBuf}; use std::path::{Path};
use std::fmt; use std::fmt;
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
use std::time::{Instant}; use std::time::{Instant};
use time::precise_time_ns; use time::precise_time_ns;
// util // util
use util::{journaldb, rlp, Bytes, Stream, View, PerfTimer, Itertools, Mutex, RwLock}; use util::{journaldb, rlp, Bytes, View, PerfTimer, Itertools, Mutex, RwLock};
use util::journaldb::JournalDB; use util::journaldb::JournalDB;
use util::rlp::{RlpStream, Rlp, UntrustedRlp}; use util::rlp::{UntrustedRlp};
use util::numbers::*; use util::numbers::*;
use util::panics::*; use util::panics::*;
use util::io::*; use util::io::*;
@ -34,14 +34,13 @@ use util::sha3::*;
use util::kvdb::*; use util::kvdb::*;
// other // other
use views::BlockView; use views::{BlockView, HeaderView, BodyView};
use error::{ImportError, ExecutionError, BlockError, ImportResult}; use error::{ImportError, ExecutionError, ReplayError, BlockError, ImportResult};
use header::BlockNumber; use header::BlockNumber;
use state::State; use state::State;
use spec::Spec; use spec::Spec;
use basic_types::Seal; use basic_types::Seal;
use engine::Engine; use engines::Engine;
use views::HeaderView;
use service::ClientIoMessage; use service::ClientIoMessage;
use env_info::LastHashes; use env_info::LastHashes;
use verification; use verification;
@ -53,8 +52,7 @@ use types::filter::Filter;
use log_entry::LocalizedLogEntry; use log_entry::LocalizedLogEntry;
use block_queue::{BlockQueue, BlockQueueInfo}; use block_queue::{BlockQueue, BlockQueueInfo};
use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute};
use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig, use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig, BlockChainClient, MiningBlockChainClient,
DatabaseCompactionProfile, BlockChainClient, MiningBlockChainClient,
TraceFilter, CallAnalytics, BlockImportError, Mode, ChainNotify}; TraceFilter, CallAnalytics, BlockImportError, Mode, ChainNotify};
use client::Error as ClientError; use client::Error as ClientError;
use env_info::EnvInfo; use env_info::EnvInfo;
@ -62,6 +60,7 @@ use executive::{Executive, Executed, TransactOptions, contract_address};
use receipt::LocalizedReceipt; use receipt::LocalizedReceipt;
use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase}; use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase};
use trace; use trace;
use trace::FlatTransactionTraces;
use evm::Factory as EvmFactory; use evm::Factory as EvmFactory;
use miner::{Miner, MinerService}; use miner::{Miner, MinerService};
use util::TrieFactory; use util::TrieFactory;
@ -123,6 +122,7 @@ pub struct Client {
chain: Arc<BlockChain>, chain: Arc<BlockChain>,
tracedb: Arc<TraceDB<BlockChain>>, tracedb: Arc<TraceDB<BlockChain>>,
engine: Arc<Box<Engine>>, engine: Arc<Box<Engine>>,
db: Arc<Database>,
state_db: Mutex<Box<JournalDB>>, state_db: Mutex<Box<JournalDB>>,
block_queue: BlockQueue, block_queue: BlockQueue,
report: RwLock<ClientReport>, report: RwLock<ClientReport>,
@ -141,26 +141,23 @@ pub struct Client {
} }
const HISTORY: u64 = 1200; const HISTORY: u64 = 1200;
// DO NOT TOUCH THIS ANY MORE UNLESS YOU REALLY KNOW WHAT YOU'RE DOING. // database columns
// Altering it will force a blanket DB update for *all* JournalDB-derived /// Column for State
// databases. pub const DB_COL_STATE: Option<u32> = Some(0);
// Instead, add/upgrade the version string of the individual JournalDB-derived database /// Column for Block headers
// of which you actually want force an upgrade. pub const DB_COL_HEADERS: Option<u32> = Some(1);
const CLIENT_DB_VER_STR: &'static str = "5.3"; /// Column for Block bodies
pub const DB_COL_BODIES: Option<u32> = Some(2);
/// Get the path for the databases given the root path and information on the databases. /// Column for Extras
pub fn get_db_path(path: &Path, pruning: journaldb::Algorithm, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf { pub const DB_COL_EXTRA: Option<u32> = Some(3);
let mut dir = path.to_path_buf(); /// Column for Traces
dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default())); pub const DB_COL_TRACE: Option<u32> = Some(4);
//TODO: sec/fat: pruned/full versioning /// Number of columns in DB
// version here is a bit useless now, since it's controlled only be the pruning algo. pub const DB_NO_OF_COLUMNS: Option<u32> = Some(5);
dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, pruning));
dir
}
/// Append a path element to the given path and return the string. /// Append a path element to the given path and return the string.
pub fn append_path(path: &Path, item: &str) -> String { pub fn append_path<P>(path: P, item: &str) -> String where P: AsRef<Path> {
let mut p = path.to_path_buf(); let mut p = path.as_ref().to_path_buf();
p.push(item); p.push(item);
p.to_str().unwrap().to_owned() p.to_str().unwrap().to_owned()
} }
@ -174,40 +171,28 @@ impl Client {
miner: Arc<Miner>, miner: Arc<Miner>,
message_channel: IoChannel<ClientIoMessage>, message_channel: IoChannel<ClientIoMessage>,
) -> Result<Arc<Client>, ClientError> { ) -> Result<Arc<Client>, ClientError> {
let path = get_db_path(path, config.pruning, spec.genesis_header().hash(), spec.fork_name.as_ref()); let path = path.to_path_buf();
let gb = spec.genesis_block(); let gb = spec.genesis_block();
let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path)); let mut db_config = DatabaseConfig::with_columns(DB_NO_OF_COLUMNS);
let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone()))); db_config.cache_size = config.db_cache_size;
db_config.compaction = config.db_compaction.compaction_profile();
db_config.wal = config.db_wal;
let mut state_db_config = match config.db_cache_size { let db = Arc::new(Database::open(&db_config, &path.to_str().unwrap()).expect("Error opening database"));
None => DatabaseConfig::default(), let chain = Arc::new(BlockChain::new(config.blockchain, &gb, db.clone()));
Some(cache_size) => DatabaseConfig::with_cache(cache_size), let tracedb = Arc::new(try!(TraceDB::new(config.tracing, db.clone(), chain.clone())));
};
if config.db_compaction == DatabaseCompactionProfile::HDD {
state_db_config = state_db_config.compaction(CompactionProfile::hdd());
}
let mut state_db = journaldb::new(
&append_path(&path, "state"),
config.pruning,
state_db_config
);
let mut state_db = journaldb::new(db.clone(), config.pruning, DB_COL_STATE);
if state_db.is_empty() && spec.ensure_db_good(state_db.as_hashdb_mut()) { if state_db.is_empty() && spec.ensure_db_good(state_db.as_hashdb_mut()) {
state_db.commit(0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); let batch = DBTransaction::new(&db);
state_db.commit(&batch, 0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
db.write(batch).expect("Error writing genesis state to state DB");
} }
if !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.contains(h.state_root())) { if !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.contains(h.state_root())) {
warn!("State root not found for block #{} ({})", chain.best_block_number(), chain.best_block_hash().hex()); warn!("State root not found for block #{} ({})", chain.best_block_number(), chain.best_block_hash().hex());
} }
/* TODO: enable this once the best block issue is resolved
while !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.contains(h.state_root())) {
warn!("State root not found for block #{} ({}), recovering...", chain.best_block_number(), chain.best_block_hash().hex());
chain.rewind();
}*/
let engine = Arc::new(spec.engine); let engine = Arc::new(spec.engine);
let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone()); let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone());
@ -222,6 +207,7 @@ impl Client {
chain: chain, chain: chain,
tracedb: tracedb, tracedb: tracedb,
engine: engine, engine: engine,
db: db,
state_db: Mutex::new(state_db), state_db: Mutex::new(state_db),
block_queue: block_queue, block_queue: block_queue,
report: RwLock::new(Default::default()), report: RwLock::new(Default::default()),
@ -297,7 +283,7 @@ impl Client {
} }
// Verify Block Family // Verify Block Family
let verify_family_result = self.verifier.verify_block_family(&header, &block.bytes, engine, self.chain.deref()); let verify_family_result = self.verifier.verify_block_family(header, &block.bytes, engine, self.chain.deref());
if let Err(e) = verify_family_result { if let Err(e) = verify_family_result {
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(()); return Err(());
@ -315,7 +301,7 @@ impl Client {
let last_hashes = self.build_last_hashes(header.parent_hash.clone()); let last_hashes = self.build_last_hashes(header.parent_hash.clone());
let db = self.state_db.lock().boxed_clone(); let db = self.state_db.lock().boxed_clone();
let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, &self.vm_factory, self.trie_factory.clone()); let enact_result = enact_verified(block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, &self.vm_factory, self.trie_factory.clone());
if let Err(e) = enact_result { if let Err(e) = enact_result {
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(()); return Err(());
@ -323,7 +309,7 @@ impl Client {
// Final Verification // Final Verification
let locked_block = enact_result.unwrap(); let locked_block = enact_result.unwrap();
if let Err(e) = self.verifier.verify_block_final(&header, locked_block.block().header()) { if let Err(e) = self.verifier.verify_block_final(header, locked_block.block().header()) {
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(()); return Err(());
} }
@ -360,7 +346,7 @@ impl Client {
/// This is triggered by a message coming from a block queue when the block is ready for insertion /// This is triggered by a message coming from a block queue when the block is ready for insertion
pub fn import_verified_blocks(&self) -> usize { pub fn import_verified_blocks(&self) -> usize {
let max_blocks_to_import = 64; let max_blocks_to_import = 64;
let (imported_blocks, import_results, invalid_blocks, original_best, imported, duration) = { let (imported_blocks, import_results, invalid_blocks, imported, duration) = {
let mut imported_blocks = Vec::with_capacity(max_blocks_to_import); let mut imported_blocks = Vec::with_capacity(max_blocks_to_import);
let mut invalid_blocks = HashSet::new(); let mut invalid_blocks = HashSet::new();
let mut import_results = Vec::with_capacity(max_blocks_to_import); let mut import_results = Vec::with_capacity(max_blocks_to_import);
@ -370,8 +356,6 @@ impl Client {
let start = precise_time_ns(); let start = precise_time_ns();
let blocks = self.block_queue.drain(max_blocks_to_import); let blocks = self.block_queue.drain(max_blocks_to_import);
let original_best = self.chain_info().best_block_hash;
for block in blocks { for block in blocks {
let header = &block.header; let header = &block.header;
if invalid_blocks.contains(&header.parent_hash) { if invalid_blocks.contains(&header.parent_hash) {
@ -405,7 +389,7 @@ impl Client {
} }
} }
let duration_ns = precise_time_ns() - start; let duration_ns = precise_time_ns() - start;
(imported_blocks, import_results, invalid_blocks, original_best, imported, duration_ns) (imported_blocks, import_results, invalid_blocks, imported, duration_ns)
}; };
{ {
@ -429,10 +413,6 @@ impl Client {
} }
} }
if self.chain_info().best_block_hash != original_best {
self.miner.update_sealing(self);
}
imported imported
} }
@ -449,23 +429,30 @@ impl Client {
// Commit results // Commit results
let receipts = block.receipts().to_owned(); let receipts = block.receipts().to_owned();
let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new)); let traces = block.traces().clone().unwrap_or_else(Vec::new);
let traces: Vec<FlatTransactionTraces> = traces.into_iter()
.map(Into::into)
.collect();
//let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new));
let batch = DBTransaction::new(&self.db);
// CHECK! I *think* this is fine, even if the state_root is equal to another // CHECK! I *think* this is fine, even if the state_root is equal to another
// already-imported block of the same number. // already-imported block of the same number.
// TODO: Prove it with a test. // TODO: Prove it with a test.
block.drain().commit(number, hash, ancient).expect("State DB commit failed."); block.drain().commit(&batch, number, hash, ancient).expect("State DB commit failed.");
// And update the chain after commit to prevent race conditions let route = self.chain.insert_block(&batch, block_data, receipts);
// (when something is in chain but you are not able to fetch details) self.tracedb.import(&batch, TraceImportRequest {
let route = self.chain.insert_block(block_data, receipts); traces: traces.into(),
self.tracedb.import(TraceImportRequest {
traces: traces,
block_hash: hash.clone(), block_hash: hash.clone(),
block_number: number, block_number: number,
enacted: route.enacted.clone(), enacted: route.enacted.clone(),
retracted: route.retracted.len() retracted: route.retracted.len()
}); });
// Final commit to the DB
self.db.write(batch).expect("State DB write failed.");
self.update_last_hashes(&parent, hash); self.update_last_hashes(&parent, hash);
route route
} }
@ -484,12 +471,12 @@ impl Client {
pub fn import_queued_transactions(&self, transactions: &[Bytes]) -> usize { pub fn import_queued_transactions(&self, transactions: &[Bytes]) -> usize {
let _timer = PerfTimer::new("import_queued_transactions"); let _timer = PerfTimer::new("import_queued_transactions");
self.queue_transactions.fetch_sub(transactions.len(), AtomicOrdering::SeqCst); self.queue_transactions.fetch_sub(transactions.len(), AtomicOrdering::SeqCst);
let txs = transactions.iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect(); let txs = transactions.iter().filter_map(|bytes| UntrustedRlp::new(bytes).as_val().ok()).collect();
let results = self.miner.import_external_transactions(self, txs); let results = self.miner.import_external_transactions(self, txs);
results.len() results.len()
} }
/// Attempt to get a copy of a specific block's state. /// Attempt to get a copy of a specific block's final state.
/// ///
/// This will not fail if given BlockID::Latest. /// This will not fail if given BlockID::Latest.
/// Otherwise, this can fail (but may not) if the DB prunes state. /// Otherwise, this can fail (but may not) if the DB prunes state.
@ -520,6 +507,21 @@ impl Client {
}) })
} }
/// Attempt to get a copy of a specific block's beginning state.
///
/// This will not fail if given BlockID::Latest.
/// Otherwise, this can fail (but may not) if the DB prunes state.
pub fn state_at_beginning(&self, id: BlockID) -> Option<State> {
// fast path for latest state.
match id {
BlockID::Pending => self.state_at(BlockID::Latest),
id => match self.block_number(id) {
None | Some(0) => None,
Some(n) => self.state_at(BlockID::Number(n - 1)),
}
}
}
/// Get a copy of the best block's state. /// Get a copy of the best block's state.
pub fn state(&self) -> State { pub fn state(&self) -> State {
State::from_existing( State::from_existing(
@ -676,6 +678,46 @@ impl BlockChainClient for Client {
ret ret
} }
fn replay(&self, id: TransactionID, analytics: CallAnalytics) -> Result<Executed, ReplayError> {
let address = try!(self.transaction_address(id).ok_or(ReplayError::TransactionNotFound));
let header_data = try!(self.block_header(BlockID::Hash(address.block_hash)).ok_or(ReplayError::StatePruned));
let body_data = try!(self.block_body(BlockID::Hash(address.block_hash)).ok_or(ReplayError::StatePruned));
let mut state = try!(self.state_at_beginning(BlockID::Hash(address.block_hash)).ok_or(ReplayError::StatePruned));
let txs = BodyView::new(&body_data).transactions();
if address.index >= txs.len() {
return Err(ReplayError::TransactionNotFound);
}
let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false };
let view = HeaderView::new(&header_data);
let last_hashes = self.build_last_hashes(view.hash());
let mut env_info = EnvInfo {
number: view.number(),
author: view.author(),
timestamp: view.timestamp(),
difficulty: view.difficulty(),
last_hashes: last_hashes,
gas_used: U256::zero(),
gas_limit: view.gas_limit(),
};
for t in txs.iter().take(address.index) {
match Executive::new(&mut state, &env_info, self.engine.deref().deref(), &self.vm_factory).transact(t, Default::default()) {
Ok(x) => { env_info.gas_used = env_info.gas_used + x.gas_used; }
Err(ee) => { return Err(ReplayError::Execution(ee)) }
}
}
let t = &txs[address.index];
let orig = state.clone();
let mut ret = Executive::new(&mut state, &env_info, self.engine.deref().deref(), &self.vm_factory).transact(t, options);
if analytics.state_diffing {
if let Ok(ref mut x) = ret {
x.state_diff = Some(state.diff_from(orig));
}
}
ret.map_err(|ee| ReplayError::Execution(ee))
}
fn keep_alive(&self) { fn keep_alive(&self) {
if self.mode != Mode::Active { if self.mode != Mode::Active {
self.wake_up(); self.wake_up();
@ -683,24 +725,20 @@ impl BlockChainClient for Client {
} }
} }
fn best_block_header(&self) -> Bytes {
self.chain.best_block_header()
}
fn block_header(&self, id: BlockID) -> Option<Bytes> { fn block_header(&self, id: BlockID) -> Option<Bytes> {
Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_header_data(&hash))
} }
fn block_body(&self, id: BlockID) -> Option<Bytes> { fn block_body(&self, id: BlockID) -> Option<Bytes> {
Self::block_hash(&self.chain, id).and_then(|hash| { Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_body(&hash))
self.chain.block(&hash).map(|bytes| {
let rlp = Rlp::new(&bytes);
let mut body = RlpStream::new_list(2);
body.append_raw(rlp.at(1).as_raw(), 1);
body.append_raw(rlp.at(2).as_raw(), 1);
body.out()
})
})
} }
fn block(&self, id: BlockID) -> Option<Bytes> { fn block(&self, id: BlockID) -> Option<Bytes> {
if let &BlockID::Pending = &id { if let BlockID::Pending = id {
if let Some(block) = self.miner.pending_block() { if let Some(block) = self.miner.pending_block() {
return Some(block.rlp_bytes(Seal::Without)); return Some(block.rlp_bytes(Seal::Without));
} }
@ -719,7 +757,7 @@ impl BlockChainClient for Client {
} }
fn block_total_difficulty(&self, id: BlockID) -> Option<U256> { fn block_total_difficulty(&self, id: BlockID) -> Option<U256> {
if let &BlockID::Pending = &id { if let BlockID::Pending = id {
if let Some(block) = self.miner.pending_block() { if let Some(block) = self.miner.pending_block() {
return Some(*block.header.difficulty() + self.block_total_difficulty(BlockID::Latest).expect("blocks in chain have details; qed")); return Some(*block.header.difficulty() + self.block_total_difficulty(BlockID::Latest).expect("blocks in chain have details; qed"));
} }
@ -753,13 +791,13 @@ impl BlockChainClient for Client {
fn uncle(&self, id: UncleID) -> Option<Bytes> { fn uncle(&self, id: UncleID) -> Option<Bytes> {
let index = id.position; let index = id.position;
self.block(id.block).and_then(|block| BlockView::new(&block).uncle_rlp_at(index)) self.block_body(id.block).and_then(|body| BodyView::new(&body).uncle_rlp_at(index))
} }
fn transaction_receipt(&self, id: TransactionID) -> Option<LocalizedReceipt> { fn transaction_receipt(&self, id: TransactionID) -> Option<LocalizedReceipt> {
self.transaction_address(id).and_then(|address| { self.transaction_address(id).and_then(|address| self.chain.block_number(&address.block_hash).and_then(|block_number| {
let t = self.chain.block(&address.block_hash) let t = self.chain.block_body(&address.block_hash)
.and_then(|block| BlockView::new(&block).localized_transaction_at(address.index)); .and_then(|block| BodyView::new(&block).localized_transaction_at(&address.block_hash, block_number, address.index));
match (t, self.chain.transaction_receipt(&address)) { match (t, self.chain.transaction_receipt(&address)) {
(Some(tx), Some(receipt)) => { (Some(tx), Some(receipt)) => {
@ -798,7 +836,7 @@ impl BlockChainClient for Client {
}, },
_ => None _ => None
} }
}) }))
} }
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> { fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
@ -874,7 +912,7 @@ impl BlockChainClient for Client {
blocks.into_iter() blocks.into_iter()
.filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash))) .filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash)))
.filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts))) .filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
.filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes()))) .filter_map(|(number, hash, receipts)| self.chain.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes())))
.flat_map(|(number, hash, receipts, hashes)| { .flat_map(|(number, hash, receipts, hashes)| {
let mut log_index = 0; let mut log_index = 0;
receipts.into_iter() receipts.into_iter()
@ -1004,8 +1042,6 @@ impl MiningBlockChainClient for Client {
let _timer = PerfTimer::new("import_sealed_block"); let _timer = PerfTimer::new("import_sealed_block");
let start = precise_time_ns(); let start = precise_time_ns();
let original_best = self.chain_info().best_block_hash;
let h = block.header().hash(); let h = block.header().hash();
let number = block.header().number(); let number = block.header().number();
@ -1013,26 +1049,19 @@ impl MiningBlockChainClient for Client {
let route = self.commit_block(block, &h, &block_data); let route = self.commit_block(block, &h, &block_data);
trace!(target: "client", "Imported sealed block #{} ({})", number, h); trace!(target: "client", "Imported sealed block #{} ({})", number, h);
{ let (enacted, retracted) = self.calculate_enacted_retracted(&[route]);
let (enacted, retracted) = self.calculate_enacted_retracted(&[route]); self.miner.chain_new_blocks(self, &[h.clone()], &[], &enacted, &retracted);
self.miner.chain_new_blocks(self, &[h.clone()], &[], &enacted, &retracted);
self.notify(|notify| {
notify.new_blocks(
vec![h.clone()],
vec![],
enacted.clone(),
retracted.clone(),
vec![h.clone()],
precise_time_ns() - start,
);
});
}
if self.chain_info().best_block_hash != original_best {
self.miner.update_sealing(self);
}
self.notify(|notify| {
notify.new_blocks(
vec![h.clone()],
vec![],
enacted.clone(),
retracted.clone(),
vec![h.clone()],
precise_time_ns() - start,
);
});
Ok(h) Ok(h)
} }
} }

View File

@ -14,13 +14,14 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::str::FromStr;
pub use std::time::Duration; pub use std::time::Duration;
pub use block_queue::BlockQueueConfig; pub use block_queue::BlockQueueConfig;
pub use blockchain::Config as BlockChainConfig; pub use blockchain::Config as BlockChainConfig;
pub use trace::{Config as TraceConfig, Switch}; pub use trace::{Config as TraceConfig, Switch};
pub use evm::VMType; pub use evm::VMType;
pub use verification::VerifierType; pub use verification::VerifierType;
use util::journaldb; use util::{journaldb, CompactionProfile};
use util::trie::TrieSpec; use util::trie::TrieSpec;
/// Client state db compaction profile /// Client state db compaction profile
@ -33,7 +34,31 @@ pub enum DatabaseCompactionProfile {
} }
impl Default for DatabaseCompactionProfile { impl Default for DatabaseCompactionProfile {
fn default() -> Self { DatabaseCompactionProfile::Default } fn default() -> Self {
DatabaseCompactionProfile::Default
}
}
impl DatabaseCompactionProfile {
/// Returns corresponding compaction profile.
pub fn compaction_profile(&self) -> CompactionProfile {
match *self {
DatabaseCompactionProfile::Default => Default::default(),
DatabaseCompactionProfile::HDD => CompactionProfile::hdd(),
}
}
}
impl FromStr for DatabaseCompactionProfile {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"ssd" | "default" => Ok(DatabaseCompactionProfile::Default),
"hdd" => Ok(DatabaseCompactionProfile::HDD),
_ => Err("Invalid compaction profile given. Expected hdd/ssd (default).".into()),
}
}
} }
/// Operating mode for the client. /// Operating mode for the client.
@ -50,11 +75,13 @@ pub enum Mode {
} }
impl Default for Mode { impl Default for Mode {
fn default() -> Self { Mode::Active } fn default() -> Self {
Mode::Active
}
} }
/// Client configuration. Includes configs for all sub-systems. /// Client configuration. Includes configs for all sub-systems.
#[derive(Debug, Default)] #[derive(Debug, PartialEq, Default)]
pub struct ClientConfig { pub struct ClientConfig {
/// Block queue configuration. /// Block queue configuration.
pub queue: BlockQueueConfig, pub queue: BlockQueueConfig,
@ -74,8 +101,32 @@ pub struct ClientConfig {
pub db_cache_size: Option<usize>, pub db_cache_size: Option<usize>,
/// State db compaction profile /// State db compaction profile
pub db_compaction: DatabaseCompactionProfile, pub db_compaction: DatabaseCompactionProfile,
/// Should db have WAL enabled?
pub db_wal: bool,
/// Operating mode /// Operating mode
pub mode: Mode, pub mode: Mode,
/// Type of block verifier used by client. /// Type of block verifier used by client.
pub verifier_type: VerifierType, pub verifier_type: VerifierType,
} }
#[cfg(test)]
mod test {
use super::{DatabaseCompactionProfile, Mode};
#[test]
fn test_default_compaction_profile() {
assert_eq!(DatabaseCompactionProfile::default(), DatabaseCompactionProfile::Default);
}
#[test]
fn test_parsing_compaction_profile() {
assert_eq!(DatabaseCompactionProfile::Default, "ssd".parse().unwrap());
assert_eq!(DatabaseCompactionProfile::Default, "default".parse().unwrap());
assert_eq!(DatabaseCompactionProfile::HDD, "hdd".parse().unwrap());
}
#[test]
fn test_mode_default() {
assert_eq!(Mode::default(), Mode::Active);
}
}

View File

@ -37,7 +37,7 @@ use spec::Spec;
use block_queue::BlockQueueInfo; use block_queue::BlockQueueInfo;
use block::{OpenBlock, SealedBlock}; use block::{OpenBlock, SealedBlock};
use executive::Executed; use executive::Executed;
use error::ExecutionError; use error::{ExecutionError, ReplayError};
use trace::LocalizedTrace; use trace::LocalizedTrace;
/// Test client. /// Test client.
@ -190,7 +190,7 @@ impl TestBlockChainClient {
gas_price: U256::one(), gas_price: U256::one(),
nonce: U256::zero() nonce: U256::zero()
}; };
let signed_tx = tx.sign(&keypair.secret()); let signed_tx = tx.sign(keypair.secret());
txs.append(&signed_tx); txs.append(&signed_tx);
txs.out() txs.out()
}, },
@ -248,7 +248,8 @@ impl TestBlockChainClient {
pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> { pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge, DatabaseConfig::default()); let db = Database::open_default(temp.as_str()).unwrap();
let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, None);
GuardedTempResult { GuardedTempResult {
_temp: temp, _temp: temp,
result: Some(journal_db) result: Some(journal_db)
@ -292,6 +293,10 @@ impl BlockChainClient for TestBlockChainClient {
Ok(self.execution_result.read().clone().unwrap()) Ok(self.execution_result.read().clone().unwrap())
} }
fn replay(&self, _id: TransactionID, _analytics: CallAnalytics) -> Result<Executed, ReplayError> {
Ok(self.execution_result.read().clone().unwrap())
}
fn block_total_difficulty(&self, _id: BlockID) -> Option<U256> { fn block_total_difficulty(&self, _id: BlockID) -> Option<U256> {
Some(U256::zero()) Some(U256::zero())
} }
@ -359,6 +364,10 @@ impl BlockChainClient for TestBlockChainClient {
unimplemented!(); unimplemented!();
} }
fn best_block_header(&self) -> Bytes {
self.block_header(BlockID::Hash(self.chain_info().best_block_hash)).expect("Best block always have header.")
}
fn block_header(&self, id: BlockID) -> Option<Bytes> { fn block_header(&self, id: BlockID) -> Option<Bytes> {
self.block_hash(id).and_then(|hash| self.blocks.read().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec())) self.block_hash(id).and_then(|hash| self.blocks.read().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec()))
} }
@ -366,8 +375,8 @@ impl BlockChainClient for TestBlockChainClient {
fn block_body(&self, id: BlockID) -> Option<Bytes> { fn block_body(&self, id: BlockID) -> Option<Bytes> {
self.block_hash(id).and_then(|hash| self.blocks.read().get(&hash).map(|r| { self.block_hash(id).and_then(|hash| self.blocks.read().get(&hash).map(|r| {
let mut stream = RlpStream::new_list(2); let mut stream = RlpStream::new_list(2);
stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1); stream.append_raw(Rlp::new(r).at(1).as_raw(), 1);
stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1); stream.append_raw(Rlp::new(r).at(2).as_raw(), 1);
stream.out() stream.out()
})) }))
} }

View File

@ -26,7 +26,7 @@ use transaction::{LocalizedTransaction, SignedTransaction};
use log_entry::LocalizedLogEntry; use log_entry::LocalizedLogEntry;
use filter::Filter; use filter::Filter;
use views::{BlockView}; use views::{BlockView};
use error::{ImportResult, ExecutionError}; use error::{ImportResult, ExecutionError, ReplayError};
use receipt::LocalizedReceipt; use receipt::LocalizedReceipt;
use trace::LocalizedTrace; use trace::LocalizedTrace;
use evm::Factory as EvmFactory; use evm::Factory as EvmFactory;
@ -145,10 +145,7 @@ pub trait BlockChainClient : Sync + Send {
fn chain_info(&self) -> BlockChainInfo; fn chain_info(&self) -> BlockChainInfo;
/// Get the best block header. /// Get the best block header.
fn best_block_header(&self) -> Bytes { fn best_block_header(&self) -> Bytes;
// TODO: lock blockchain only once
self.block_header(BlockID::Hash(self.chain_info().best_block_hash)).unwrap()
}
/// Returns numbers of blocks containing given bloom. /// Returns numbers of blocks containing given bloom.
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option<Vec<BlockNumber>>; fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option<Vec<BlockNumber>>;
@ -160,6 +157,9 @@ pub trait BlockChainClient : Sync + Send {
// TODO: should be able to accept blockchain location for call. // TODO: should be able to accept blockchain location for call.
fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result<Executed, ExecutionError>; fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result<Executed, ExecutionError>;
/// Replays a given transaction for inspection.
fn replay(&self, t: TransactionID, analytics: CallAnalytics) -> Result<Executed, ReplayError>;
/// Returns traces matching given filter. /// Returns traces matching given filter.
fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>>; fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>>;

View File

@ -62,14 +62,14 @@ pub trait Key<T> {
/// Should be used to write value into database. /// Should be used to write value into database.
pub trait Writable { pub trait Writable {
/// Writes the value into the database. /// Writes the value into the database.
fn write<T, R>(&self, key: &Key<T, Target = R>, value: &T) where T: Encodable, R: Deref<Target = [u8]>; fn write<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>, value: &T) where T: Encodable, R: Deref<Target = [u8]>;
/// Writes the value into the database and updates the cache. /// Writes the value into the database and updates the cache.
fn write_with_cache<K, T, R>(&self, cache: &mut Cache<K, T>, key: K, value: T, policy: CacheUpdatePolicy) where fn write_with_cache<K, T, R>(&self, col: Option<u32>, cache: &mut Cache<K, T>, key: K, value: T, policy: CacheUpdatePolicy) where
K: Key<T, Target = R> + Hash + Eq, K: Key<T, Target = R> + Hash + Eq,
T: Encodable, T: Encodable,
R: Deref<Target = [u8]> { R: Deref<Target = [u8]> {
self.write(&key, &value); self.write(col, &key, &value);
match policy { match policy {
CacheUpdatePolicy::Overwrite => { CacheUpdatePolicy::Overwrite => {
cache.insert(key, value); cache.insert(key, value);
@ -81,20 +81,20 @@ pub trait Writable {
} }
/// Writes the values into the database and updates the cache. /// Writes the values into the database and updates the cache.
fn extend_with_cache<K, T, R>(&self, cache: &mut Cache<K, T>, values: HashMap<K, T>, policy: CacheUpdatePolicy) where fn extend_with_cache<K, T, R>(&self, col: Option<u32>, cache: &mut Cache<K, T>, values: HashMap<K, T>, policy: CacheUpdatePolicy) where
K: Key<T, Target = R> + Hash + Eq, K: Key<T, Target = R> + Hash + Eq,
T: Encodable, T: Encodable,
R: Deref<Target = [u8]> { R: Deref<Target = [u8]> {
match policy { match policy {
CacheUpdatePolicy::Overwrite => { CacheUpdatePolicy::Overwrite => {
for (key, value) in values.into_iter() { for (key, value) in values.into_iter() {
self.write(&key, &value); self.write(col, &key, &value);
cache.insert(key, value); cache.insert(key, value);
} }
}, },
CacheUpdatePolicy::Remove => { CacheUpdatePolicy::Remove => {
for (key, value) in &values { for (key, value) in &values {
self.write(key, value); self.write(col, key, value);
cache.remove(key); cache.remove(key);
} }
}, },
@ -105,12 +105,12 @@ pub trait Writable {
/// Should be used to read values from database. /// Should be used to read values from database.
pub trait Readable { pub trait Readable {
/// Returns value for given key. /// Returns value for given key.
fn read<T, R>(&self, key: &Key<T, Target = R>) -> Option<T> where fn read<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> Option<T> where
T: Decodable, T: Decodable,
R: Deref<Target = [u8]>; R: Deref<Target = [u8]>;
/// Returns value for given key either in cache or in database. /// Returns value for given key either in cache or in database.
fn read_with_cache<K, T, C>(&self, cache: &RwLock<C>, key: &K) -> Option<T> where fn read_with_cache<K, T, C>(&self, col: Option<u32>, cache: &RwLock<C>, key: &K) -> Option<T> where
K: Key<T> + Eq + Hash + Clone, K: Key<T> + Eq + Hash + Clone,
T: Clone + Decodable, T: Clone + Decodable,
C: Cache<K, T> { C: Cache<K, T> {
@ -121,7 +121,7 @@ pub trait Readable {
} }
} }
self.read(key).map(|value: T|{ self.read(col, key).map(|value: T|{
let mut write = cache.write(); let mut write = cache.write();
write.insert(key.clone(), value.clone()); write.insert(key.clone(), value.clone());
value value
@ -129,10 +129,10 @@ pub trait Readable {
} }
/// Returns true if given value exists. /// Returns true if given value exists.
fn exists<T, R>(&self, key: &Key<T, Target = R>) -> bool where R: Deref<Target= [u8]>; fn exists<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> bool where R: Deref<Target= [u8]>;
/// Returns true if given value exists either in cache or in database. /// Returns true if given value exists either in cache or in database.
fn exists_with_cache<K, T, R, C>(&self, cache: &RwLock<C>, key: &K) -> bool where fn exists_with_cache<K, T, R, C>(&self, col: Option<u32>, cache: &RwLock<C>, key: &K) -> bool where
K: Eq + Hash + Key<T, Target = R>, K: Eq + Hash + Key<T, Target = R>,
R: Deref<Target = [u8]>, R: Deref<Target = [u8]>,
C: Cache<K, T> { C: Cache<K, T> {
@ -143,13 +143,13 @@ pub trait Readable {
} }
} }
self.exists::<T, R>(key) self.exists::<T, R>(col, key)
} }
} }
impl Writable for DBTransaction { impl Writable for DBTransaction {
fn write<T, R>(&self, key: &Key<T, Target = R>, value: &T) where T: Encodable, R: Deref<Target = [u8]> { fn write<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>, value: &T) where T: Encodable, R: Deref<Target = [u8]> {
let result = self.put(&key.key(), &encode(value)); let result = self.put(col, &key.key(), &encode(value));
if let Err(err) = result { if let Err(err) = result {
panic!("db put failed, key: {:?}, err: {:?}", &key.key() as &[u8], err); panic!("db put failed, key: {:?}, err: {:?}", &key.key() as &[u8], err);
} }
@ -157,8 +157,8 @@ impl Writable for DBTransaction {
} }
impl Readable for Database { impl Readable for Database {
fn read<T, R>(&self, key: &Key<T, Target = R>) -> Option<T> where T: Decodable, R: Deref<Target = [u8]> { fn read<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> Option<T> where T: Decodable, R: Deref<Target = [u8]> {
let result = self.get(&key.key()); let result = self.get(col, &key.key());
match result { match result {
Ok(option) => option.map(|v| decode(&v)), Ok(option) => option.map(|v| decode(&v)),
@ -168,8 +168,8 @@ impl Readable for Database {
} }
} }
fn exists<T, R>(&self, key: &Key<T, Target = R>) -> bool where R: Deref<Target = [u8]> { fn exists<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> bool where R: Deref<Target = [u8]> {
let result = self.get(&key.key()); let result = self.get(col, &key.key());
match result { match result {
Ok(v) => v.is_some(), Ok(v) => v.is_some(),

View File

@ -19,8 +19,8 @@
use common::*; use common::*;
use account_provider::AccountProvider; use account_provider::AccountProvider;
use block::*; use block::*;
use spec::{CommonParams, Spec}; use spec::CommonParams;
use engine::*; use engines::Engine;
use evm::Schedule; use evm::Schedule;
use ethjson; use ethjson;
@ -176,16 +176,16 @@ impl Header {
} }
} }
/// Create a new test chain spec with `BasicAuthority` consensus engine.
pub fn new_test_authority() -> Spec { Spec::load(include_bytes!("../res/test_authority.json")) }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*;
use common::*; use common::*;
use block::*; use block::*;
use tests::helpers::*; use tests::helpers::*;
use account_provider::AccountProvider; use account_provider::AccountProvider;
use spec::Spec;
/// Create a new test chain spec with `BasicAuthority` consensus engine.
fn new_test_authority() -> Spec { Spec::load(include_bytes!("../../res/test_authority.json")) }
#[test] #[test]
fn has_valid_metadata() { fn has_valid_metadata() {

View File

@ -14,7 +14,13 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Consensus engine specification //! Consensus engine specification and basic implementations.
mod null_engine;
mod basic_authority;
pub use self::null_engine::NullEngine;
pub use self::basic_authority::BasicAuthority;
use common::*; use common::*;
use account_provider::AccountProvider; use account_provider::AccountProvider;

View File

@ -17,12 +17,12 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use util::hash::Address; use util::hash::Address;
use builtin::Builtin; use builtin::Builtin;
use engine::Engine; use engines::Engine;
use spec::CommonParams; use spec::CommonParams;
use evm::Schedule; use evm::Schedule;
use env_info::EnvInfo; use env_info::EnvInfo;
/// An engine which does not provide any consensus mechanism. /// An engine which does not provide any consensus mechanism and does not seal blocks.
pub struct NullEngine { pub struct NullEngine {
params: CommonParams, params: CommonParams,
builtins: BTreeMap<Address, Builtin>, builtins: BTreeMap<Address, Builtin>,

View File

@ -22,7 +22,7 @@ use basic_types::LogBloom;
use client::Error as ClientError; use client::Error as ClientError;
use ipc::binary::{BinaryConvertError, BinaryConvertable}; use ipc::binary::{BinaryConvertError, BinaryConvertable};
use types::block_import_error::BlockImportError; use types::block_import_error::BlockImportError;
pub use types::executed::ExecutionError; pub use types::executed::{ExecutionError, ReplayError};
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
/// Errors concerning transaction processing. /// Errors concerning transaction processing.

View File

@ -18,7 +18,7 @@ use ethash::{quick_get_difficulty, EthashManager, H256 as EH256};
use common::*; use common::*;
use block::*; use block::*;
use spec::CommonParams; use spec::CommonParams;
use engine::*; use engines::Engine;
use evm::Schedule; use evm::Schedule;
use ethjson; use ethjson;

View File

@ -18,6 +18,7 @@
use util::common::*; use util::common::*;
use evm::{self, Schedule}; use evm::{self, Schedule};
use types::executed::CallType;
use env_info::*; use env_info::*;
/// Result of externalities create function. /// Result of externalities create function.
@ -69,13 +70,15 @@ pub trait Ext {
/// and true if subcall was successfull. /// and true if subcall was successfull.
#[cfg_attr(feature="dev", allow(too_many_arguments))] #[cfg_attr(feature="dev", allow(too_many_arguments))]
fn call(&mut self, fn call(&mut self,
gas: &U256, gas: &U256,
sender_address: &Address, sender_address: &Address,
receive_address: &Address, receive_address: &Address,
value: Option<U256>, value: Option<U256>,
data: &[u8], data: &[u8],
code_address: &Address, code_address: &Address,
output: &mut [u8]) -> MessageCallResult; output: &mut [u8],
call_type: CallType
) -> MessageCallResult;
/// Returns code at given address /// Returns code at given address
fn extcode(&self, address: &Address) -> Bytes; fn extcode(&self, address: &Address) -> Bytes;

View File

@ -21,7 +21,7 @@ use std::fmt;
use evm::Evm; use evm::Evm;
use util::{U256, Uint}; use util::{U256, Uint};
#[derive(Debug, Clone)] #[derive(Debug, PartialEq, Clone)]
/// Type of EVM to use. /// Type of EVM to use.
pub enum VMType { pub enum VMType {
/// JIT EVM /// JIT EVM

View File

@ -38,6 +38,7 @@ use self::memory::Memory;
use std::marker::PhantomData; use std::marker::PhantomData;
use common::*; use common::*;
use types::executed::CallType;
use super::instructions::{self, Instruction, InstructionInfo}; use super::instructions::{self, Instruction, InstructionInfo};
use evm::{self, MessageCallResult, ContractCreateResult, GasLeft, CostType}; use evm::{self, MessageCallResult, ContractCreateResult, GasLeft, CostType};
@ -96,13 +97,13 @@ impl<Cost: CostType> evm::Evm for Interpreter<Cost> {
self.mem.clear(); self.mem.clear();
let code = &params.code.as_ref().unwrap(); let code = &params.code.as_ref().unwrap();
let valid_jump_destinations = self.find_jump_destinations(&code); let valid_jump_destinations = self.find_jump_destinations(code);
let mut gasometer = Gasometer::<Cost>::new(try!(Cost::from_u256(params.gas))); let mut gasometer = Gasometer::<Cost>::new(try!(Cost::from_u256(params.gas)));
let mut stack = VecStack::with_capacity(ext.schedule().stack_limit, U256::zero()); let mut stack = VecStack::with_capacity(ext.schedule().stack_limit, U256::zero());
let mut reader = CodeReader { let mut reader = CodeReader {
position: 0, position: 0,
code: &code code: code
}; };
let infos = &*instructions::INSTRUCTIONS; let infos = &*instructions::INSTRUCTIONS;
@ -274,7 +275,7 @@ impl<Cost: CostType> Interpreter<Cost> {
return Ok(InstructionResult::Ok); return Ok(InstructionResult::Ok);
} }
let create_result = ext.create(&gas.as_u256(), &endowment, &contract_code); let create_result = ext.create(&gas.as_u256(), &endowment, contract_code);
return match create_result { return match create_result {
ContractCreateResult::Created(address, gas_left) => { ContractCreateResult::Created(address, gas_left) => {
stack.push(address_to_u256(address)); stack.push(address_to_u256(address));
@ -311,16 +312,16 @@ impl<Cost: CostType> Interpreter<Cost> {
}); });
// Get sender & receive addresses, check if we have balance // Get sender & receive addresses, check if we have balance
let (sender_address, receive_address, has_balance) = match instruction { let (sender_address, receive_address, has_balance, call_type) = match instruction {
instructions::CALL => { instructions::CALL => {
let has_balance = ext.balance(&params.address) >= value.unwrap(); let has_balance = ext.balance(&params.address) >= value.unwrap();
(&params.address, &code_address, has_balance) (&params.address, &code_address, has_balance, CallType::Call)
}, },
instructions::CALLCODE => { instructions::CALLCODE => {
let has_balance = ext.balance(&params.address) >= value.unwrap(); let has_balance = ext.balance(&params.address) >= value.unwrap();
(&params.address, &params.address, has_balance) (&params.address, &params.address, has_balance, CallType::CallCode)
}, },
instructions::DELEGATECALL => (&params.sender, &params.address, true), instructions::DELEGATECALL => (&params.sender, &params.address, true, CallType::DelegateCall),
_ => panic!(format!("Unexpected instruction {} in CALL branch.", instruction)) _ => panic!(format!("Unexpected instruction {} in CALL branch.", instruction))
}; };
@ -335,7 +336,7 @@ impl<Cost: CostType> Interpreter<Cost> {
// and we don't want to copy // and we don't want to copy
let input = unsafe { ::std::mem::transmute(self.mem.read_slice(in_off, in_size)) }; let input = unsafe { ::std::mem::transmute(self.mem.read_slice(in_off, in_size)) };
let output = self.mem.writeable_slice(out_off, out_size); let output = self.mem.writeable_slice(out_off, out_size);
ext.call(&call_gas.as_u256(), sender_address, receive_address, value, input, &code_address, output) ext.call(&call_gas.as_u256(), sender_address, receive_address, value, input, &code_address, output, call_type)
}; };
return match call_result { return match call_result {

View File

@ -15,6 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use common::*; use common::*;
use types::executed::CallType;
use evm::{self, Ext, Schedule, Factory, GasLeft, VMType, ContractCreateResult, MessageCallResult}; use evm::{self, Ext, Schedule, Factory, GasLeft, VMType, ContractCreateResult, MessageCallResult};
use std::fmt::Debug; use std::fmt::Debug;
@ -36,7 +37,7 @@ pub struct FakeCall {
receive_address: Option<Address>, receive_address: Option<Address>,
value: Option<U256>, value: Option<U256>,
data: Bytes, data: Bytes,
code_address: Option<Address> code_address: Option<Address>,
} }
/// Fake externalities test structure. /// Fake externalities test structure.
@ -119,7 +120,9 @@ impl Ext for FakeExt {
value: Option<U256>, value: Option<U256>,
data: &[u8], data: &[u8],
code_address: &Address, code_address: &Address,
_output: &mut [u8]) -> MessageCallResult { _output: &mut [u8],
_call_type: CallType
) -> MessageCallResult {
self.calls.insert(FakeCall { self.calls.insert(FakeCall {
call_type: FakeCallType::Call, call_type: FakeCallType::Call,

View File

@ -17,11 +17,12 @@
//! Transaction Execution environment. //! Transaction Execution environment.
use common::*; use common::*;
use state::*; use state::*;
use engine::*; use engines::Engine;
use types::executed::CallType;
use evm::{self, Ext, Factory, Finalize}; use evm::{self, Ext, Factory, Finalize};
use externalities::*; use externalities::*;
use substate::*; use substate::*;
use trace::{Trace, Tracer, NoopTracer, ExecutiveTracer, VMTrace, VMTracer, ExecutiveVMTracer, NoopVMTracer}; use trace::{FlatTrace, Tracer, NoopTracer, ExecutiveTracer, VMTrace, VMTracer, ExecutiveVMTracer, NoopVMTracer};
use crossbeam; use crossbeam;
pub use types::executed::{Executed, ExecutionResult}; pub use types::executed::{Executed, ExecutionResult};
@ -39,6 +40,7 @@ pub fn contract_address(address: &Address, nonce: &U256) -> Address {
} }
/// Transaction execution options. /// Transaction execution options.
#[derive(Default)]
pub struct TransactOptions { pub struct TransactOptions {
/// Enable call tracing. /// Enable call tracing.
pub tracing: bool, pub tracing: bool,
@ -173,6 +175,7 @@ impl<'a> Executive<'a> {
value: ActionValue::Transfer(t.value), value: ActionValue::Transfer(t.value),
code: Some(t.data.clone()), code: Some(t.data.clone()),
data: None, data: None,
call_type: CallType::None,
}; };
(self.create(params, &mut substate, &mut tracer, &mut vm_tracer), vec![]) (self.create(params, &mut substate, &mut tracer, &mut vm_tracer), vec![])
}, },
@ -187,6 +190,7 @@ impl<'a> Executive<'a> {
value: ActionValue::Transfer(t.value), value: ActionValue::Transfer(t.value),
code: self.state.code(address), code: self.state.code(address),
data: Some(t.data.clone()), data: Some(t.data.clone()),
call_type: CallType::Call,
}; };
// TODO: move output upstream // TODO: move output upstream
let mut out = vec![]; let mut out = vec![];
@ -195,7 +199,7 @@ impl<'a> Executive<'a> {
}; };
// finalize here! // finalize here!
Ok(try!(self.finalize(t, substate, gas_left, output, tracer.traces().pop(), vm_tracer.drain()))) Ok(try!(self.finalize(t, substate, gas_left, output, tracer.traces(), vm_tracer.drain())))
} }
fn exec_vm<T, V>( fn exec_vm<T, V>(
@ -248,8 +252,6 @@ impl<'a> Executive<'a> {
} }
trace!("Executive::call(params={:?}) self.env_info={:?}", params, self.info); trace!("Executive::call(params={:?}) self.env_info={:?}", params, self.info);
let delegate_call = params.code_address != params.address;
if self.engine.is_builtin(&params.code_address) { if self.engine.is_builtin(&params.code_address) {
// if destination is builtin, try to execute it // if destination is builtin, try to execute it
@ -274,9 +276,7 @@ impl<'a> Executive<'a> {
trace_info, trace_info,
cost, cost,
trace_output, trace_output,
self.depth, vec![]
vec![],
delegate_call
); );
} }
@ -285,7 +285,7 @@ impl<'a> Executive<'a> {
// just drain the whole gas // just drain the whole gas
self.state.revert_snapshot(); self.state.revert_snapshot();
tracer.trace_failed_call(trace_info, self.depth, vec![], delegate_call); tracer.trace_failed_call(trace_info, vec![]);
Err(evm::Error::OutOfGas) Err(evm::Error::OutOfGas)
} }
@ -317,11 +317,9 @@ impl<'a> Executive<'a> {
trace_info, trace_info,
gas - gas_left, gas - gas_left,
trace_output, trace_output,
self.depth, traces
traces,
delegate_call
), ),
_ => tracer.trace_failed_call(trace_info, self.depth, traces, delegate_call), _ => tracer.trace_failed_call(trace_info, traces),
}; };
trace!(target: "executive", "substate={:?}; unconfirmed_substate={:?}\n", substate, unconfirmed_substate); trace!(target: "executive", "substate={:?}; unconfirmed_substate={:?}\n", substate, unconfirmed_substate);
@ -333,7 +331,7 @@ impl<'a> Executive<'a> {
// otherwise it's just a basic transaction, only do tracing, if necessary. // otherwise it's just a basic transaction, only do tracing, if necessary.
self.state.clear_snapshot(); self.state.clear_snapshot();
tracer.trace_call(trace_info, U256::zero(), trace_output, self.depth, vec![], delegate_call); tracer.trace_call(trace_info, U256::zero(), trace_output, vec![]);
Ok(params.gas) Ok(params.gas)
} }
} }
@ -370,7 +368,7 @@ impl<'a> Executive<'a> {
let gas = params.gas; let gas = params.gas;
let created = params.address.clone(); let created = params.address.clone();
let mut subvmtracer = vm_tracer.prepare_subtrace(&params.code.as_ref().expect("two ways into create (Externalities::create and Executive::transact_with_tracer); both place `Some(...)` `code` in `params`; qed")); let mut subvmtracer = vm_tracer.prepare_subtrace(params.code.as_ref().expect("two ways into create (Externalities::create and Executive::transact_with_tracer); both place `Some(...)` `code` in `params`; qed"));
let res = { let res = {
self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::InitContract(trace_output.as_mut()), &mut subtracer, &mut subvmtracer) self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::InitContract(trace_output.as_mut()), &mut subtracer, &mut subvmtracer)
@ -384,10 +382,9 @@ impl<'a> Executive<'a> {
gas - gas_left, gas - gas_left,
trace_output, trace_output,
created, created,
self.depth,
subtracer.traces() subtracer.traces()
), ),
_ => tracer.trace_failed_create(trace_info, self.depth, subtracer.traces()) _ => tracer.trace_failed_create(trace_info, subtracer.traces())
}; };
self.enact_result(&res, substate, unconfirmed_substate); self.enact_result(&res, substate, unconfirmed_substate);
@ -401,7 +398,7 @@ impl<'a> Executive<'a> {
substate: Substate, substate: Substate,
result: evm::Result<U256>, result: evm::Result<U256>,
output: Bytes, output: Bytes,
trace: Option<Trace>, trace: Vec<FlatTrace>,
vm_trace: Option<VMTrace> vm_trace: Option<VMTrace>
) -> ExecutionResult { ) -> ExecutionResult {
let schedule = self.engine.schedule(self.info); let schedule = self.engine.schedule(self.info);
@ -493,8 +490,9 @@ mod tests {
use substate::*; use substate::*;
use tests::helpers::*; use tests::helpers::*;
use trace::trace; use trace::trace;
use trace::{Trace, Tracer, NoopTracer, ExecutiveTracer}; use trace::{FlatTrace, Tracer, NoopTracer, ExecutiveTracer};
use trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff, VMTracer, NoopVMTracer, ExecutiveVMTracer}; use trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff, VMTracer, NoopVMTracer, ExecutiveVMTracer};
use types::executed::CallType;
#[test] #[test]
fn test_contract_address() { fn test_contract_address() {
@ -628,6 +626,7 @@ mod tests {
params.gas = U256::from(100_000); params.gas = U256::from(100_000);
params.code = Some(code.clone()); params.code = Some(code.clone());
params.value = ActionValue::Transfer(U256::from(100)); params.value = ActionValue::Transfer(U256::from(100));
params.call_type = CallType::Call;
let mut state_result = get_temp_state(); let mut state_result = get_temp_state();
let mut state = state_result.reference_mut(); let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(100)); state.add_balance(&sender, &U256::from(100));
@ -645,35 +644,37 @@ mod tests {
assert_eq!(gas_left, U256::from(44_752)); assert_eq!(gas_left, U256::from(44_752));
let expected_trace = vec![ Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
subtraces: 1,
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "cd1722f3947def4cf144679da39c4c32bdc35681".into(), from: "cd1722f3947def4cf144679da39c4c32bdc35681".into(),
to: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(), to: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(),
value: 100.into(), value: 100.into(),
gas: 100000.into(), gas: 100000.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(55_248), gas_used: U256::from(55_248),
output: vec![], output: vec![],
}), }),
subs: vec![Trace { }, FlatTrace {
depth: 1, trace_address: vec![0].into_iter().collect(),
action: trace::Action::Create(trace::Create { subtraces: 0,
from: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(), action: trace::Action::Create(trace::Create {
value: 23.into(), from: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(),
gas: 67979.into(), value: 23.into(),
init: vec![96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53, 85] gas: 67979.into(),
}), init: vec![96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53, 85]
result: trace::Res::Create(trace::CreateResult { }),
gas_used: U256::from(3224), result: trace::Res::Create(trace::CreateResult {
address: Address::from_str("c6d80f262ae5e0f164e5fde365044d7ada2bfa34").unwrap(), gas_used: U256::from(3224),
code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53] address: Address::from_str("c6d80f262ae5e0f164e5fde365044d7ada2bfa34").unwrap(),
}), code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53]
subs: vec![] }),
}]
}]; }];
assert_eq!(tracer.traces(), expected_trace); assert_eq!(tracer.traces(), expected_trace);
let expected_vm_trace = VMTrace { let expected_vm_trace = VMTrace {
@ -751,8 +752,9 @@ mod tests {
assert_eq!(gas_left, U256::from(96_776)); assert_eq!(gas_left, U256::from(96_776));
let expected_trace = vec![Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
subtraces: 0,
action: trace::Action::Create(trace::Create { action: trace::Action::Create(trace::Create {
from: params.sender, from: params.sender,
value: 100.into(), value: 100.into(),
@ -764,8 +766,8 @@ mod tests {
address: params.address, address: params.address,
code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53] code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53]
}), }),
subs: vec![]
}]; }];
assert_eq!(tracer.traces(), expected_trace); assert_eq!(tracer.traces(), expected_trace);
let expected_vm_trace = VMTrace { let expected_vm_trace = VMTrace {
@ -1009,7 +1011,7 @@ mod tests {
gas: U256::from(100_000), gas: U256::from(100_000),
gas_price: U256::zero(), gas_price: U256::zero(),
nonce: U256::zero() nonce: U256::zero()
}.sign(&keypair.secret()); }.sign(keypair.secret());
let sender = t.sender().unwrap(); let sender = t.sender().unwrap();
let contract = contract_address(&sender, &U256::zero()); let contract = contract_address(&sender, &U256::zero());
@ -1076,7 +1078,7 @@ mod tests {
gas: U256::from(100_000), gas: U256::from(100_000),
gas_price: U256::zero(), gas_price: U256::zero(),
nonce: U256::one() nonce: U256::one()
}.sign(&keypair.secret()); }.sign(keypair.secret());
let sender = t.sender().unwrap(); let sender = t.sender().unwrap();
let mut state_result = get_temp_state(); let mut state_result = get_temp_state();
@ -1109,7 +1111,7 @@ mod tests {
gas: U256::from(80_001), gas: U256::from(80_001),
gas_price: U256::zero(), gas_price: U256::zero(),
nonce: U256::zero() nonce: U256::zero()
}.sign(&keypair.secret()); }.sign(keypair.secret());
let sender = t.sender().unwrap(); let sender = t.sender().unwrap();
let mut state_result = get_temp_state(); let mut state_result = get_temp_state();
@ -1144,7 +1146,7 @@ mod tests {
gas: U256::from(100_000), gas: U256::from(100_000),
gas_price: U256::one(), gas_price: U256::one(),
nonce: U256::zero() nonce: U256::zero()
}.sign(&keypair.secret()); }.sign(keypair.secret());
let sender = t.sender().unwrap(); let sender = t.sender().unwrap();
let mut state_result = get_temp_state(); let mut state_result = get_temp_state();

View File

@ -17,10 +17,11 @@
//! Transaction Execution environment. //! Transaction Execution environment.
use common::*; use common::*;
use state::*; use state::*;
use engine::*; use engines::Engine;
use executive::*; use executive::*;
use evm::{self, Schedule, Ext, ContractCreateResult, MessageCallResult, Factory}; use evm::{self, Schedule, Ext, ContractCreateResult, MessageCallResult, Factory};
use substate::*; use substate::*;
use types::executed::CallType;
use trace::{Tracer, VMTracer}; use trace::{Tracer, VMTracer};
/// Policy for handling output data on `RETURN` opcode. /// Policy for handling output data on `RETURN` opcode.
@ -148,6 +149,7 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT
value: ActionValue::Transfer(*value), value: ActionValue::Transfer(*value),
code: Some(code.to_vec()), code: Some(code.to_vec()),
data: None, data: None,
call_type: CallType::None,
}; };
self.state.inc_nonce(&self.origin_info.address); self.state.inc_nonce(&self.origin_info.address);
@ -170,7 +172,8 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT
value: Option<U256>, value: Option<U256>,
data: &[u8], data: &[u8],
code_address: &Address, code_address: &Address,
output: &mut [u8] output: &mut [u8],
call_type: CallType
) -> MessageCallResult { ) -> MessageCallResult {
trace!(target: "externalities", "call"); trace!(target: "externalities", "call");
@ -184,6 +187,7 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT
gas_price: self.origin_info.gas_price, gas_price: self.origin_info.gas_price,
code: self.state.code(code_address), code: self.state.code(code_address),
data: Some(data.to_vec()), data: Some(data.to_vec()),
call_type: call_type,
}; };
if let Some(value) = value { if let Some(value) = value {
@ -263,7 +267,7 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT
self.state.transfer_balance(&address, refund_address, &balance); self.state.transfer_balance(&address, refund_address, &balance);
} }
self.tracer.trace_suicide(address, balance, refund_address.clone(), self.depth + 1); self.tracer.trace_suicide(address, balance, refund_address.clone());
self.substate.suicides.insert(address); self.substate.suicides.insert(address);
} }
@ -272,7 +276,7 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT
} }
fn env_info(&self) -> &EnvInfo { fn env_info(&self) -> &EnvInfo {
&self.env_info self.env_info
} }
fn depth(&self) -> usize { fn depth(&self) -> usize {
@ -296,13 +300,14 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT
mod tests { mod tests {
use common::*; use common::*;
use state::*; use state::*;
use engine::*; use engines::Engine;
use evm::{Ext}; use evm::{Ext};
use substate::*; use substate::*;
use tests::helpers::*; use tests::helpers::*;
use devtools::GuardedTempResult; use devtools::GuardedTempResult;
use super::*; use super::*;
use trace::{NoopTracer, NoopVMTracer}; use trace::{NoopTracer, NoopVMTracer};
use types::executed::CallType;
fn get_test_origin() -> OriginInfo { fn get_test_origin() -> OriginInfo {
OriginInfo { OriginInfo {
@ -421,7 +426,9 @@ mod tests {
Some(U256::from_str("0000000000000000000000000000000000000000000000000000000000150000").unwrap()), Some(U256::from_str("0000000000000000000000000000000000000000000000000000000000150000").unwrap()),
&[], &[],
&Address::new(), &Address::new(),
&mut output); &mut output,
CallType::Call
);
} }
#[test] #[test]
@ -455,7 +462,7 @@ mod tests {
{ {
let vm_factory = Default::default(); let vm_factory = Default::default();
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer); let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer);
ext.suicide(&refund_account); ext.suicide(refund_account);
} }
assert_eq!(setup.sub_state.suicides.len(), 1); assert_eq!(setup.sub_state.suicides.len(), 1);

View File

@ -234,7 +234,7 @@ impl Header {
s.append(&self.extra_data); s.append(&self.extra_data);
if let Seal::With = with_seal { if let Seal::With = with_seal {
for b in &self.seal { for b in &self.seal {
s.append_raw(&b, 1); s.append_raw(b, 1);
} }
} }
} }

View File

@ -17,11 +17,12 @@
use super::test_common::*; use super::test_common::*;
use state::*; use state::*;
use executive::*; use executive::*;
use engine::*; use engines::Engine;
use evm; use evm;
use evm::{Schedule, Ext, Factory, Finalize, VMType, ContractCreateResult, MessageCallResult}; use evm::{Schedule, Ext, Factory, Finalize, VMType, ContractCreateResult, MessageCallResult};
use externalities::*; use externalities::*;
use substate::*; use substate::*;
use types::executed::CallType;
use tests::helpers::*; use tests::helpers::*;
use ethjson; use ethjson;
use trace::{Tracer, NoopTracer}; use trace::{Tracer, NoopTracer};
@ -37,7 +38,7 @@ struct CallCreate {
impl From<ethjson::vm::Call> for CallCreate { impl From<ethjson::vm::Call> for CallCreate {
fn from(c: ethjson::vm::Call) -> Self { fn from(c: ethjson::vm::Call) -> Self {
let dst: Option<_> = c.destination.into(); let dst: Option<ethjson::hash::Address> = c.destination.into();
CallCreate { CallCreate {
data: c.data.into(), data: c.data.into(),
destination: dst.map(Into::into), destination: dst.map(Into::into),
@ -109,13 +110,15 @@ impl<'a, T, V> Ext for TestExt<'a, T, V> where T: Tracer, V: VMTracer {
} }
fn call(&mut self, fn call(&mut self,
gas: &U256, gas: &U256,
_sender_address: &Address, _sender_address: &Address,
receive_address: &Address, receive_address: &Address,
value: Option<U256>, value: Option<U256>,
data: &[u8], data: &[u8],
_code_address: &Address, _code_address: &Address,
_output: &mut [u8]) -> MessageCallResult { _output: &mut [u8],
_call_type: CallType
) -> MessageCallResult {
self.callcreates.push(CallCreate { self.callcreates.push(CallCreate {
data: data.to_vec(), data: data.to_vec(),
destination: Some(receive_address.clone()), destination: Some(receive_address.clone()),

View File

@ -49,7 +49,7 @@ fn do_json_test(json_data: &[u8]) -> Vec<String> {
fail_unless(t.gas_price == tx.gas_price.into()); fail_unless(t.gas_price == tx.gas_price.into());
fail_unless(t.nonce == tx.nonce.into()); fail_unless(t.nonce == tx.nonce.into());
fail_unless(t.value == tx.value.into()); fail_unless(t.value == tx.value.into());
let to: Option<_> = tx.to.into(); let to: Option<ethjson::hash::Address> = tx.to.into();
let to: Option<Address> = to.map(Into::into); let to: Option<Address> = to.map(Into::into);
match t.action { match t.action {
Action::Call(dest) => fail_unless(Some(dest) == to), Action::Call(dest) => fail_unless(Some(dest) == to),

View File

@ -15,7 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethjson; use ethjson;
use util::{H256, MemoryDB, TrieMut, TrieSpec, TrieFactory}; use util::{H256, MemoryDB, TrieSpec, TrieFactory};
fn test_trie(json: &[u8], trie: TrieSpec) -> Vec<String> { fn test_trie(json: &[u8], trie: TrieSpec) -> Vec<String> {
let tests = ethjson::trie::Test::load(json).unwrap(); let tests = ethjson::trie::Test::load(json).unwrap();

View File

@ -15,23 +15,20 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![warn(missing_docs)] #![warn(missing_docs)]
#![cfg_attr(feature="benches", feature(test))]
#![cfg_attr(feature="dev", feature(plugin))] #![cfg_attr(feature="dev", feature(plugin))]
#![cfg_attr(feature="dev", plugin(clippy))] #![cfg_attr(feature="dev", plugin(clippy))]
// Clippy config // Clippy settings
// TODO [todr] not really sure // Most of the time much more readable
#![cfg_attr(feature="dev", allow(needless_range_loop))] #![cfg_attr(feature="dev", allow(needless_range_loop))]
// Shorter than if-else // Shorter than if-else
#![cfg_attr(feature="dev", allow(match_bool))] #![cfg_attr(feature="dev", allow(match_bool))]
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. // Keeps consistency (all lines with `.clone()`).
#![cfg_attr(feature="dev", allow(clone_on_copy))] #![cfg_attr(feature="dev", allow(clone_on_copy))]
// In most cases it expresses function flow better
#![cfg_attr(feature="dev", allow(if_not_else))]
// TODO [todr] a lot of warnings to be fixed // TODO [todr] a lot of warnings to be fixed
#![cfg_attr(feature="dev", allow(needless_borrow))]
#![cfg_attr(feature="dev", allow(assign_op_pattern))] #![cfg_attr(feature="dev", allow(assign_op_pattern))]
#![cfg_attr(feature="benches", feature(test))]
//! Ethcore library //! Ethcore library
//! //!
@ -102,7 +99,7 @@ extern crate ethcore_devtools as devtools;
#[cfg(feature = "jit" )] extern crate evmjit; #[cfg(feature = "jit" )] extern crate evmjit;
pub mod account_provider; pub mod account_provider;
pub mod basic_authority; pub mod engines;
pub mod block; pub mod block;
pub mod block_queue; pub mod block_queue;
pub mod client; pub mod client;
@ -114,7 +111,6 @@ pub mod trace;
pub mod spec; pub mod spec;
pub mod views; pub mod views;
pub mod pod_state; pub mod pod_state;
pub mod engine;
pub mod migrations; pub mod migrations;
pub mod miner; pub mod miner;
pub mod snapshot; pub mod snapshot;
@ -130,7 +126,6 @@ mod pod_account;
mod state; mod state;
mod account; mod account;
mod account_db; mod account_db;
mod null_engine;
mod builtin; mod builtin;
mod substate; mod substate;
mod executive; mod executive;

View File

@ -0,0 +1,21 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Blocks database migrations.
mod v8;
pub use self::v8::V8;

View File

@ -0,0 +1,37 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! This migration compresses the state db.
use util::migration::{SimpleMigration, Progress};
use util::rlp::{Compressible, UntrustedRlp, View, RlpType};
/// Compressing migration.
#[derive(Default)]
pub struct V8(Progress);
impl SimpleMigration for V8 {
fn version(&self) -> u32 {
8
}
fn columns(&self) -> Option<u32> { None }
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
self.0.tick();
Some((key,UntrustedRlp::new(&value).compress(RlpType::Blocks).to_vec()))
}
}

View File

@ -34,9 +34,10 @@ impl ToV6 {
} }
impl SimpleMigration for ToV6 { impl SimpleMigration for ToV6 {
fn version(&self) -> u32 {
6 fn columns(&self) -> Option<u32> { None }
}
fn version(&self) -> u32 { 6 }
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> { fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {

View File

@ -1,4 +1,25 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Database migrations. //! Database migrations.
pub mod extras;
pub mod state; pub mod state;
pub mod blocks;
pub mod extras;
mod v9;
pub use self::v9::ToV9;
pub use self::v9::Extract;

View File

@ -22,7 +22,7 @@ use std::collections::HashMap;
use util::Bytes; use util::Bytes;
use util::hash::{Address, FixedHash, H256}; use util::hash::{Address, FixedHash, H256};
use util::kvdb::Database; use util::kvdb::Database;
use util::migration::{Batch, Config, Error, Migration, SimpleMigration}; use util::migration::{Batch, Config, Error, Migration, SimpleMigration, Progress};
use util::rlp::{decode, Rlp, RlpStream, Stream, View}; use util::rlp::{decode, Rlp, RlpStream, Stream, View};
use util::sha3::Hashable; use util::sha3::Hashable;
@ -63,19 +63,16 @@ fn attempt_migrate(mut key_h: H256, val: &[u8]) -> Option<H256> {
/// Version for `ArchiveDB`. /// Version for `ArchiveDB`.
#[derive(Default)] #[derive(Default)]
pub struct ArchiveV7(usize); pub struct ArchiveV7(Progress);
impl SimpleMigration for ArchiveV7 { impl SimpleMigration for ArchiveV7 {
fn version(&self) -> u32 {
7 fn columns(&self) -> Option<u32> { None }
}
fn version(&self) -> u32 { 7 }
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> { fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
self.0 += 1; self.0.tick();
if self.0 == 100_000 {
self.0 = 0;
flush!(".");
}
if key.len() != 32 { if key.len() != 32 {
// metadata key, ignore. // metadata key, ignore.
@ -109,7 +106,7 @@ impl OverlayRecentV7 {
// walk all journal entries in the database backwards. // walk all journal entries in the database backwards.
// find migrations for any possible inserted keys. // find migrations for any possible inserted keys.
fn walk_journal(&mut self, source: &Database) -> Result<(), Error> { fn walk_journal(&mut self, source: &Database) -> Result<(), Error> {
if let Some(val) = try!(source.get(V7_LATEST_ERA_KEY).map_err(Error::Custom)) { if let Some(val) = try!(source.get(None, V7_LATEST_ERA_KEY).map_err(Error::Custom)) {
let mut era = decode::<u64>(&val); let mut era = decode::<u64>(&val);
loop { loop {
let mut index: usize = 0; let mut index: usize = 0;
@ -120,7 +117,7 @@ impl OverlayRecentV7 {
r.out() r.out()
}; };
if let Some(journal_raw) = try!(source.get(&entry_key).map_err(Error::Custom)) { if let Some(journal_raw) = try!(source.get(None, &entry_key).map_err(Error::Custom)) {
let rlp = Rlp::new(&journal_raw); let rlp = Rlp::new(&journal_raw);
// migrate all inserted keys. // migrate all inserted keys.
@ -153,7 +150,7 @@ impl OverlayRecentV7 {
// replace all possible inserted/deleted keys with their migrated counterparts // replace all possible inserted/deleted keys with their migrated counterparts
// and commit the altered entries. // and commit the altered entries.
fn migrate_journal(&self, source: &Database, mut batch: Batch, dest: &mut Database) -> Result<(), Error> { fn migrate_journal(&self, source: &Database, mut batch: Batch, dest: &mut Database) -> Result<(), Error> {
if let Some(val) = try!(source.get(V7_LATEST_ERA_KEY).map_err(Error::Custom)) { if let Some(val) = try!(source.get(None, V7_LATEST_ERA_KEY).map_err(Error::Custom)) {
try!(batch.insert(V7_LATEST_ERA_KEY.into(), val.to_owned(), dest)); try!(batch.insert(V7_LATEST_ERA_KEY.into(), val.to_owned(), dest));
let mut era = decode::<u64>(&val); let mut era = decode::<u64>(&val);
@ -166,7 +163,7 @@ impl OverlayRecentV7 {
r.out() r.out()
}; };
if let Some(journal_raw) = try!(source.get(&entry_key).map_err(Error::Custom)) { if let Some(journal_raw) = try!(source.get(None, &entry_key).map_err(Error::Custom)) {
let rlp = Rlp::new(&journal_raw); let rlp = Rlp::new(&journal_raw);
let id: H256 = rlp.val_at(0); let id: H256 = rlp.val_at(0);
let mut inserted_keys: Vec<(H256, Bytes)> = Vec::new(); let mut inserted_keys: Vec<(H256, Bytes)> = Vec::new();
@ -221,22 +218,25 @@ impl OverlayRecentV7 {
} }
impl Migration for OverlayRecentV7 { impl Migration for OverlayRecentV7 {
fn columns(&self) -> Option<u32> { None }
fn version(&self) -> u32 { 7 } fn version(&self) -> u32 { 7 }
// walk all records in the database, attempting to migrate any possible and // walk all records in the database, attempting to migrate any possible and
// keeping records of those that we do. then migrate the journal using // keeping records of those that we do. then migrate the journal using
// this information. // this information.
fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database) -> Result<(), Error> { fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option<u32>) -> Result<(), Error> {
let mut batch = Batch::new(config); let mut batch = Batch::new(config, col);
// check version metadata. // check version metadata.
match try!(source.get(V7_VERSION_KEY).map_err(Error::Custom)) { match try!(source.get(None, V7_VERSION_KEY).map_err(Error::Custom)) {
Some(ref version) if decode::<u32>(&*version) == DB_VERSION => {} Some(ref version) if decode::<u32>(&*version) == DB_VERSION => {}
_ => return Err(Error::MigrationImpossible), // missing or wrong version _ => return Err(Error::MigrationImpossible), // missing or wrong version
} }
let mut count = 0; let mut count = 0;
for (key, value) in source.iter() { for (key, value) in source.iter(None) {
count += 1; count += 1;
if count == 100_000 { if count == 100_000 {
count = 0; count = 0;

View File

@ -0,0 +1,82 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! This migration consolidates all databases into single one using Column Families.
use util::{Rlp, RlpStream, View, Stream};
use util::kvdb::Database;
use util::migration::{Batch, Config, Error, Migration, Progress};
/// Which part of block to preserve
pub enum Extract {
/// Extract block header RLP.
Header,
/// Extract block body RLP.
Body,
/// Don't change the value.
All,
}
/// Consolidation of extras/block/state databases into single one.
pub struct ToV9 {
progress: Progress,
column: Option<u32>,
extract: Extract,
}
impl ToV9 {
/// Creates new V9 migration and assigns all `(key,value)` pairs from `source` DB to given Column Family
pub fn new(column: Option<u32>, extract: Extract) -> Self {
ToV9 {
progress: Progress::default(),
column: column,
extract: extract,
}
}
}
impl Migration for ToV9 {
fn columns(&self) -> Option<u32> { Some(5) }
fn version(&self) -> u32 { 9 }
fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option<u32>) -> Result<(), Error> {
let mut batch = Batch::new(config, self.column);
for (key, value) in source.iter(col) {
self.progress.tick();
match self.extract {
Extract::Header => {
try!(batch.insert(key.to_vec(), Rlp::new(&value).at(0).as_raw().to_vec(), dest))
},
Extract::Body => {
let mut body = RlpStream::new_list(2);
let block_rlp = Rlp::new(&value);
body.append_raw(block_rlp.at(1).as_raw(), 1);
body.append_raw(block_rlp.at(2).as_raw(), 1);
try!(batch.insert(key.to_vec(), body.out(), dest))
},
Extract::All => {
try!(batch.insert(key.to_vec(), value.to_vec(), dest))
}
}
}
batch.commit(dest)
}
}

View File

@ -15,7 +15,6 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use rayon::prelude::*; use rayon::prelude::*;
use std::sync::atomic::{self, AtomicBool};
use std::time::{Instant, Duration}; use std::time::{Instant, Duration};
use util::*; use util::*;
@ -29,14 +28,14 @@ use error::*;
use transaction::SignedTransaction; use transaction::SignedTransaction;
use receipt::Receipt; use receipt::Receipt;
use spec::Spec; use spec::Spec;
use engine::Engine; use engines::Engine;
use miner::{MinerService, MinerStatus, TransactionQueue, AccountDetails, TransactionOrigin}; use miner::{MinerService, MinerStatus, TransactionQueue, AccountDetails, TransactionOrigin};
use miner::work_notify::WorkPoster; use miner::work_notify::WorkPoster;
use client::TransactionImportResult; use client::TransactionImportResult;
use miner::price_info::PriceInfo; use miner::price_info::PriceInfo;
/// Different possible definitions for pending transaction set. /// Different possible definitions for pending transaction set.
#[derive(Debug)] #[derive(Debug, PartialEq)]
pub enum PendingSet { pub enum PendingSet {
/// Always just the transactions in the queue. These have had only cheap checks. /// Always just the transactions in the queue. These have had only cheap checks.
AlwaysQueue, AlwaysQueue,
@ -48,7 +47,7 @@ pub enum PendingSet {
} }
/// Configures the behaviour of the miner. /// Configures the behaviour of the miner.
#[derive(Debug)] #[derive(Debug, PartialEq)]
pub struct MinerOptions { pub struct MinerOptions {
/// URLs to notify when there is new work. /// URLs to notify when there is new work.
pub new_work_notify: Vec<String>, pub new_work_notify: Vec<String>,
@ -77,12 +76,12 @@ impl Default for MinerOptions {
MinerOptions { MinerOptions {
new_work_notify: vec![], new_work_notify: vec![],
force_sealing: false, force_sealing: false,
reseal_on_external_tx: true, reseal_on_external_tx: false,
reseal_on_own_tx: true, reseal_on_own_tx: true,
tx_gas_limit: !U256::zero(), tx_gas_limit: !U256::zero(),
tx_queue_size: 1024, tx_queue_size: 1024,
pending_set: PendingSet::AlwaysQueue, pending_set: PendingSet::AlwaysQueue,
reseal_min_period: Duration::from_secs(0), reseal_min_period: Duration::from_secs(2),
work_queue_size: 20, work_queue_size: 20,
enable_resubmission: true, enable_resubmission: true,
} }
@ -90,6 +89,7 @@ impl Default for MinerOptions {
} }
/// Options for the dynamic gas price recalibrator. /// Options for the dynamic gas price recalibrator.
#[derive(Debug, PartialEq)]
pub struct GasPriceCalibratorOptions { pub struct GasPriceCalibratorOptions {
/// Base transaction price to match against. /// Base transaction price to match against.
pub usd_per_tx: f32, pub usd_per_tx: f32,
@ -98,9 +98,9 @@ pub struct GasPriceCalibratorOptions {
} }
/// The gas price validator variant for a `GasPricer`. /// The gas price validator variant for a `GasPricer`.
#[derive(Debug, PartialEq)]
pub struct GasPriceCalibrator { pub struct GasPriceCalibrator {
options: GasPriceCalibratorOptions, options: GasPriceCalibratorOptions,
next_calibration: Instant, next_calibration: Instant,
} }
@ -128,6 +128,7 @@ impl GasPriceCalibrator {
} }
/// Struct to look after updating the acceptable gas price of a miner. /// Struct to look after updating the acceptable gas price of a miner.
#[derive(Debug, PartialEq)]
pub enum GasPricer { pub enum GasPricer {
/// A fixed gas price in terms of Wei - always the argument given. /// A fixed gas price in terms of Wei - always the argument given.
Fixed(U256), Fixed(U256),
@ -157,15 +158,20 @@ impl GasPricer {
} }
} }
struct SealingWork {
queue: UsingQueue<ClosedBlock>,
enabled: bool,
}
/// Keeps track of transactions using priority queue and holds currently mined block. /// Keeps track of transactions using priority queue and holds currently mined block.
pub struct Miner { pub struct Miner {
// NOTE [ToDr] When locking always lock in this order! // NOTE [ToDr] When locking always lock in this order!
transaction_queue: Arc<Mutex<TransactionQueue>>, transaction_queue: Arc<Mutex<TransactionQueue>>,
sealing_work: Mutex<UsingQueue<ClosedBlock>>, sealing_work: Mutex<SealingWork>,
// for sealing... // for sealing...
options: MinerOptions, options: MinerOptions,
sealing_enabled: AtomicBool,
next_allowed_reseal: Mutex<Instant>, next_allowed_reseal: Mutex<Instant>,
sealing_block_last_request: Mutex<u64>, sealing_block_last_request: Mutex<u64>,
gas_range_target: RwLock<(U256, U256)>, gas_range_target: RwLock<(U256, U256)>,
@ -184,10 +190,9 @@ impl Miner {
Miner { Miner {
transaction_queue: Arc::new(Mutex::new(TransactionQueue::new())), transaction_queue: Arc::new(Mutex::new(TransactionQueue::new())),
options: Default::default(), options: Default::default(),
sealing_enabled: AtomicBool::new(false),
next_allowed_reseal: Mutex::new(Instant::now()), next_allowed_reseal: Mutex::new(Instant::now()),
sealing_block_last_request: Mutex::new(0), sealing_block_last_request: Mutex::new(0),
sealing_work: Mutex::new(UsingQueue::new(20)), sealing_work: Mutex::new(SealingWork{queue: UsingQueue::new(20), enabled: false}),
gas_range_target: RwLock::new((U256::zero(), U256::zero())), gas_range_target: RwLock::new((U256::zero(), U256::zero())),
author: RwLock::new(Address::default()), author: RwLock::new(Address::default()),
extra_data: RwLock::new(Vec::new()), extra_data: RwLock::new(Vec::new()),
@ -204,10 +209,9 @@ impl Miner {
let txq = Arc::new(Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, options.tx_gas_limit))); let txq = Arc::new(Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, options.tx_gas_limit)));
Arc::new(Miner { Arc::new(Miner {
transaction_queue: txq, transaction_queue: txq,
sealing_enabled: AtomicBool::new(options.force_sealing || !options.new_work_notify.is_empty()),
next_allowed_reseal: Mutex::new(Instant::now()), next_allowed_reseal: Mutex::new(Instant::now()),
sealing_block_last_request: Mutex::new(0), sealing_block_last_request: Mutex::new(0),
sealing_work: Mutex::new(UsingQueue::new(options.work_queue_size)), sealing_work: Mutex::new(SealingWork{queue: UsingQueue::new(options.work_queue_size), enabled: options.force_sealing || !options.new_work_notify.is_empty()}),
gas_range_target: RwLock::new((U256::zero(), U256::zero())), gas_range_target: RwLock::new((U256::zero(), U256::zero())),
author: RwLock::new(Address::default()), author: RwLock::new(Address::default()),
extra_data: RwLock::new(Vec::new()), extra_data: RwLock::new(Vec::new()),
@ -229,12 +233,12 @@ impl Miner {
/// Get `Some` `clone()` of the current pending block's state or `None` if we're not sealing. /// Get `Some` `clone()` of the current pending block's state or `None` if we're not sealing.
pub fn pending_state(&self) -> Option<State> { pub fn pending_state(&self) -> Option<State> {
self.sealing_work.lock().peek_last_ref().map(|b| b.block().fields().state.clone()) self.sealing_work.lock().queue.peek_last_ref().map(|b| b.block().fields().state.clone())
} }
/// Get `Some` `clone()` of the current pending block's state or `None` if we're not sealing. /// Get `Some` `clone()` of the current pending block's state or `None` if we're not sealing.
pub fn pending_block(&self) -> Option<Block> { pub fn pending_block(&self) -> Option<Block> {
self.sealing_work.lock().peek_last_ref().map(|b| b.base().clone()) self.sealing_work.lock().queue.peek_last_ref().map(|b| b.base().clone())
} }
/// Prepares new block for sealing including top transactions from queue. /// Prepares new block for sealing including top transactions from queue.
@ -256,7 +260,7 @@ impl Miner {
let (transactions, mut open_block, original_work_hash) = { let (transactions, mut open_block, original_work_hash) = {
let transactions = {self.transaction_queue.lock().top_transactions()}; let transactions = {self.transaction_queue.lock().top_transactions()};
let mut sealing_work = self.sealing_work.lock(); let mut sealing_work = self.sealing_work.lock();
let last_work_hash = sealing_work.peek_last_ref().map(|pb| pb.block().fields().header.hash()); let last_work_hash = sealing_work.queue.peek_last_ref().map(|pb| pb.block().fields().header.hash());
let best_hash = chain.best_block_header().sha3(); let best_hash = chain.best_block_header().sha3();
/* /*
// check to see if last ClosedBlock in would_seals is actually same parent block. // check to see if last ClosedBlock in would_seals is actually same parent block.
@ -266,7 +270,7 @@ impl Miner {
// otherwise, leave everything alone. // otherwise, leave everything alone.
// otherwise, author a fresh block. // otherwise, author a fresh block.
*/ */
let open_block = match sealing_work.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) { let open_block = match sealing_work.queue.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) {
Some(old_block) => { Some(old_block) => {
trace!(target: "miner", "Already have previous work; updating and returning"); trace!(target: "miner", "Already have previous work; updating and returning");
// add transactions to old_block // add transactions to old_block
@ -357,7 +361,7 @@ impl Miner {
let (work, is_new) = { let (work, is_new) = {
let mut sealing_work = self.sealing_work.lock(); let mut sealing_work = self.sealing_work.lock();
let last_work_hash = sealing_work.peek_last_ref().map(|pb| pb.block().fields().header.hash()); let last_work_hash = sealing_work.queue.peek_last_ref().map(|pb| pb.block().fields().header.hash());
trace!(target: "miner", "Checking whether we need to reseal: orig={:?} last={:?}, this={:?}", original_work_hash, last_work_hash, block.block().fields().header.hash()); trace!(target: "miner", "Checking whether we need to reseal: orig={:?} last={:?}, this={:?}", original_work_hash, last_work_hash, block.block().fields().header.hash());
let (work, is_new) = if last_work_hash.map_or(true, |h| h != block.block().fields().header.hash()) { let (work, is_new) = if last_work_hash.map_or(true, |h| h != block.block().fields().header.hash()) {
trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash()); trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash());
@ -365,16 +369,16 @@ impl Miner {
let number = block.block().fields().header.number(); let number = block.block().fields().header.number();
let difficulty = *block.block().fields().header.difficulty(); let difficulty = *block.block().fields().header.difficulty();
let is_new = original_work_hash.map_or(true, |h| block.block().fields().header.hash() != h); let is_new = original_work_hash.map_or(true, |h| block.block().fields().header.hash() != h);
sealing_work.push(block); sealing_work.queue.push(block);
// If push notifications are enabled we assume all work items are used. // If push notifications are enabled we assume all work items are used.
if self.work_poster.is_some() && is_new { if self.work_poster.is_some() && is_new {
sealing_work.use_last_ref(); sealing_work.queue.use_last_ref();
} }
(Some((pow_hash, difficulty, number)), is_new) (Some((pow_hash, difficulty, number)), is_new)
} else { } else {
(None, false) (None, false)
}; };
trace!(target: "miner", "prepare_sealing: leaving (last={:?})", sealing_work.peek_last_ref().map(|b| b.block().fields().header.hash())); trace!(target: "miner", "prepare_sealing: leaving (last={:?})", sealing_work.queue.peek_last_ref().map(|b| b.block().fields().header.hash()));
(work, is_new) (work, is_new)
}; };
if is_new { if is_new {
@ -391,14 +395,22 @@ impl Miner {
/// Returns true if we had to prepare new pending block /// Returns true if we had to prepare new pending block
fn enable_and_prepare_sealing(&self, chain: &MiningBlockChainClient) -> bool { fn enable_and_prepare_sealing(&self, chain: &MiningBlockChainClient) -> bool {
trace!(target: "miner", "enable_and_prepare_sealing: entering"); trace!(target: "miner", "enable_and_prepare_sealing: entering");
let have_work = self.sealing_work.lock().peek_last_ref().is_some(); let prepare_new = {
trace!(target: "miner", "enable_and_prepare_sealing: have_work={}", have_work); let mut sealing_work = self.sealing_work.lock();
if !have_work { let have_work = sealing_work.queue.peek_last_ref().is_some();
trace!(target: "miner", "enable_and_prepare_sealing: have_work={}", have_work);
if !have_work {
sealing_work.enabled = true;
true
} else {
false
}
};
if prepare_new {
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// | NOTE Code below requires transaction_queue and sealing_work locks. | // | NOTE Code below requires transaction_queue and sealing_work locks. |
// | Make sure to release the locks before calling that method. | // | Make sure to release the locks before calling that method. |
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
self.sealing_enabled.store(true, atomic::Ordering::Relaxed);
self.prepare_sealing(chain); self.prepare_sealing(chain);
} }
let mut sealing_block_last_request = self.sealing_block_last_request.lock(); let mut sealing_block_last_request = self.sealing_block_last_request.lock();
@ -408,8 +420,8 @@ impl Miner {
*sealing_block_last_request = best_number; *sealing_block_last_request = best_number;
} }
// Return if // Return if we restarted
!have_work prepare_new
} }
fn add_transactions_to_queue(&self, chain: &MiningBlockChainClient, transactions: Vec<SignedTransaction>, origin: TransactionOrigin, transaction_queue: &mut TransactionQueue) -> fn add_transactions_to_queue(&self, chain: &MiningBlockChainClient, transactions: Vec<SignedTransaction>, origin: TransactionOrigin, transaction_queue: &mut TransactionQueue) ->
@ -448,13 +460,13 @@ impl MinerService for Miner {
MinerStatus { MinerStatus {
transactions_in_pending_queue: status.pending, transactions_in_pending_queue: status.pending,
transactions_in_future_queue: status.future, transactions_in_future_queue: status.future,
transactions_in_pending_block: sealing_work.peek_last_ref().map_or(0, |b| b.transactions().len()), transactions_in_pending_block: sealing_work.queue.peek_last_ref().map_or(0, |b| b.transactions().len()),
} }
} }
fn call(&self, chain: &MiningBlockChainClient, t: &SignedTransaction, analytics: CallAnalytics) -> Result<Executed, ExecutionError> { fn call(&self, chain: &MiningBlockChainClient, t: &SignedTransaction, analytics: CallAnalytics) -> Result<Executed, ExecutionError> {
let sealing_work = self.sealing_work.lock(); let sealing_work = self.sealing_work.lock();
match sealing_work.peek_last_ref() { match sealing_work.queue.peek_last_ref() {
Some(work) => { Some(work) => {
let block = work.block(); let block = work.block();
@ -501,7 +513,7 @@ impl MinerService for Miner {
fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 { fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 {
let sealing_work = self.sealing_work.lock(); let sealing_work = self.sealing_work.lock();
sealing_work.peek_last_ref().map_or_else( sealing_work.queue.peek_last_ref().map_or_else(
|| chain.latest_balance(address), || chain.latest_balance(address),
|b| b.block().fields().state.balance(address) |b| b.block().fields().state.balance(address)
) )
@ -509,7 +521,7 @@ impl MinerService for Miner {
fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> H256 { fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> H256 {
let sealing_work = self.sealing_work.lock(); let sealing_work = self.sealing_work.lock();
sealing_work.peek_last_ref().map_or_else( sealing_work.queue.peek_last_ref().map_or_else(
|| chain.latest_storage_at(address, position), || chain.latest_storage_at(address, position),
|b| b.block().fields().state.storage_at(address, position) |b| b.block().fields().state.storage_at(address, position)
) )
@ -517,12 +529,12 @@ impl MinerService for Miner {
fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 { fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 {
let sealing_work = self.sealing_work.lock(); let sealing_work = self.sealing_work.lock();
sealing_work.peek_last_ref().map_or_else(|| chain.latest_nonce(address), |b| b.block().fields().state.nonce(address)) sealing_work.queue.peek_last_ref().map_or_else(|| chain.latest_nonce(address), |b| b.block().fields().state.nonce(address))
} }
fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option<Bytes> { fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option<Bytes> {
let sealing_work = self.sealing_work.lock(); let sealing_work = self.sealing_work.lock();
sealing_work.peek_last_ref().map_or_else(|| chain.code(address), |b| b.block().fields().state.code(address)) sealing_work.queue.peek_last_ref().map_or_else(|| chain.code(address), |b| b.block().fields().state.code(address))
} }
fn set_author(&self, author: Address) { fn set_author(&self, author: Address) {
@ -671,8 +683,8 @@ impl MinerService for Miner {
let queue = self.transaction_queue.lock(); let queue = self.transaction_queue.lock();
let sw = self.sealing_work.lock(); let sw = self.sealing_work.lock();
// TODO: should only use the sealing_work when it's current (it could be an old block) // TODO: should only use the sealing_work when it's current (it could be an old block)
let sealing_set = match self.sealing_enabled.load(atomic::Ordering::Relaxed) { let sealing_set = match sw.enabled {
true => sw.peek_last_ref(), true => sw.queue.peek_last_ref(),
false => None, false => None,
}; };
match (&self.options.pending_set, sealing_set) { match (&self.options.pending_set, sealing_set) {
@ -684,8 +696,8 @@ impl MinerService for Miner {
fn pending_transactions_hashes(&self) -> Vec<H256> { fn pending_transactions_hashes(&self) -> Vec<H256> {
let queue = self.transaction_queue.lock(); let queue = self.transaction_queue.lock();
let sw = self.sealing_work.lock(); let sw = self.sealing_work.lock();
let sealing_set = match self.sealing_enabled.load(atomic::Ordering::Relaxed) { let sealing_set = match sw.enabled {
true => sw.peek_last_ref(), true => sw.queue.peek_last_ref(),
false => None, false => None,
}; };
match (&self.options.pending_set, sealing_set) { match (&self.options.pending_set, sealing_set) {
@ -697,8 +709,8 @@ impl MinerService for Miner {
fn transaction(&self, hash: &H256) -> Option<SignedTransaction> { fn transaction(&self, hash: &H256) -> Option<SignedTransaction> {
let queue = self.transaction_queue.lock(); let queue = self.transaction_queue.lock();
let sw = self.sealing_work.lock(); let sw = self.sealing_work.lock();
let sealing_set = match self.sealing_enabled.load(atomic::Ordering::Relaxed) { let sealing_set = match sw.enabled {
true => sw.peek_last_ref(), true => sw.queue.peek_last_ref(),
false => None, false => None,
}; };
match (&self.options.pending_set, sealing_set) { match (&self.options.pending_set, sealing_set) {
@ -708,7 +720,8 @@ impl MinerService for Miner {
} }
fn pending_receipts(&self) -> BTreeMap<H256, Receipt> { fn pending_receipts(&self) -> BTreeMap<H256, Receipt> {
match (self.sealing_enabled.load(atomic::Ordering::Relaxed), self.sealing_work.lock().peek_last_ref()) { let sealing_work = self.sealing_work.lock();
match (sealing_work.enabled, sealing_work.queue.peek_last_ref()) {
(true, Some(pending)) => { (true, Some(pending)) => {
let hashes = pending.transactions() let hashes = pending.transactions()
.iter() .iter()
@ -727,27 +740,43 @@ impl MinerService for Miner {
} }
fn update_sealing(&self, chain: &MiningBlockChainClient) { fn update_sealing(&self, chain: &MiningBlockChainClient) {
if self.sealing_enabled.load(atomic::Ordering::Relaxed) { trace!(target: "miner", "update_sealing");
let current_no = chain.chain_info().best_block_number; let requires_reseal = {
let has_local_transactions = self.transaction_queue.lock().has_local_pending_transactions(); let mut sealing_work = self.sealing_work.lock();
let last_request = *self.sealing_block_last_request.lock(); if sealing_work.enabled {
let should_disable_sealing = !self.forced_sealing() trace!(target: "miner", "update_sealing: sealing enabled");
&& !has_local_transactions let current_no = chain.chain_info().best_block_number;
&& current_no > last_request let has_local_transactions = self.transaction_queue.lock().has_local_pending_transactions();
&& current_no - last_request > SEALING_TIMEOUT_IN_BLOCKS; let last_request = *self.sealing_block_last_request.lock();
let should_disable_sealing = !self.forced_sealing()
&& !has_local_transactions
&& current_no > last_request
&& current_no - last_request > SEALING_TIMEOUT_IN_BLOCKS;
if should_disable_sealing { trace!(target: "miner", "update_sealing: should_disable_sealing={}; current_no={}, last_request={}", should_disable_sealing, current_no, last_request);
trace!(target: "miner", "Miner sleeping (current {}, last {})", current_no, last_request);
self.sealing_enabled.store(false, atomic::Ordering::Relaxed); if should_disable_sealing {
self.sealing_work.lock().reset(); trace!(target: "miner", "Miner sleeping (current {}, last {})", current_no, last_request);
sealing_work.enabled = false;
sealing_work.queue.reset();
false
} else {
// sealing enabled and we don't want to sleep.
*self.next_allowed_reseal.lock() = Instant::now() + self.options.reseal_min_period;
true
}
} else { } else {
*self.next_allowed_reseal.lock() = Instant::now() + self.options.reseal_min_period; // sealing is disabled.
// -------------------------------------------------------------------------- false
// | NOTE Code below requires transaction_queue and sealing_work locks. |
// | Make sure to release the locks before calling that method. |
// --------------------------------------------------------------------------
self.prepare_sealing(chain);
} }
};
if requires_reseal {
// --------------------------------------------------------------------------
// | NOTE Code below requires transaction_queue and sealing_work locks. |
// | Make sure to release the locks before calling that method. |
// --------------------------------------------------------------------------
self.prepare_sealing(chain);
} }
} }
@ -756,13 +785,13 @@ impl MinerService for Miner {
self.enable_and_prepare_sealing(chain); self.enable_and_prepare_sealing(chain);
trace!(target: "miner", "map_sealing_work: sealing prepared"); trace!(target: "miner", "map_sealing_work: sealing prepared");
let mut sealing_work = self.sealing_work.lock(); let mut sealing_work = self.sealing_work.lock();
let ret = sealing_work.use_last_ref(); let ret = sealing_work.queue.use_last_ref();
trace!(target: "miner", "map_sealing_work: leaving use_last_ref={:?}", ret.as_ref().map(|b| b.block().fields().header.hash())); trace!(target: "miner", "map_sealing_work: leaving use_last_ref={:?}", ret.as_ref().map(|b| b.block().fields().header.hash()));
ret.map(f) ret.map(f)
} }
fn submit_seal(&self, chain: &MiningBlockChainClient, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> { fn submit_seal(&self, chain: &MiningBlockChainClient, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> {
let result = if let Some(b) = self.sealing_work.lock().get_used_if(if self.options.enable_resubmission { GetAction::Clone } else { GetAction::Take }, |b| &b.hash() == &pow_hash) { let result = if let Some(b) = self.sealing_work.lock().queue.get_used_if(if self.options.enable_resubmission { GetAction::Clone } else { GetAction::Take }, |b| &b.hash() == &pow_hash) {
b.lock().try_seal(self.engine(), seal).or_else(|_| { b.lock().try_seal(self.engine(), seal).or_else(|_| {
warn!(target: "miner", "Mined solution rejected: Invalid."); warn!(target: "miner", "Mined solution rejected: Invalid.");
Err(Error::PowInvalid) Err(Error::PowInvalid)
@ -781,6 +810,8 @@ impl MinerService for Miner {
} }
fn chain_new_blocks(&self, chain: &MiningBlockChainClient, _imported: &[H256], _invalid: &[H256], enacted: &[H256], retracted: &[H256]) { fn chain_new_blocks(&self, chain: &MiningBlockChainClient, _imported: &[H256], _invalid: &[H256], enacted: &[H256], retracted: &[H256]) {
trace!(target: "miner", "chain_new_blocks");
fn fetch_transactions(chain: &MiningBlockChainClient, hash: &H256) -> Vec<SignedTransaction> { fn fetch_transactions(chain: &MiningBlockChainClient, hash: &H256) -> Vec<SignedTransaction> {
let block = chain let block = chain
.block(BlockID::Hash(*hash)) .block(BlockID::Hash(*hash))
@ -836,11 +867,13 @@ impl MinerService for Miner {
}); });
} }
// -------------------------------------------------------------------------- if enacted.len() > 0 {
// | NOTE Code below requires transaction_queue and sealing_work locks. | // --------------------------------------------------------------------------
// | Make sure to release the locks before calling that method. | // | NOTE Code below requires transaction_queue and sealing_work locks. |
// -------------------------------------------------------------------------- // | Make sure to release the locks before calling that method. |
self.update_sealing(chain); // --------------------------------------------------------------------------
self.update_sealing(chain);
}
} }
} }

View File

@ -35,7 +35,7 @@ pub struct WorkPoster {
impl WorkPoster { impl WorkPoster {
pub fn new(urls: &[String]) -> Self { pub fn new(urls: &[String]) -> Self {
let urls = urls.into_iter().filter_map(|u| { let urls = urls.into_iter().filter_map(|u| {
match Url::parse(&u) { match Url::parse(u) {
Ok(url) => Some(url), Ok(url) => Some(url),
Err(e) => { Err(e) => {
warn!("Error parsing URL {} : {}", u, e); warn!("Error parsing URL {} : {}", u, e);

View File

@ -28,8 +28,8 @@ pub struct PodAccount {
pub balance: U256, pub balance: U256,
/// The nonce of the account. /// The nonce of the account.
pub nonce: U256, pub nonce: U256,
/// The code of the account. /// The code of the account or `None` in the special case that it is unknown.
pub code: Bytes, pub code: Option<Bytes>,
/// The storage of the account. /// The storage of the account.
pub storage: BTreeMap<H256, H256>, pub storage: BTreeMap<H256, H256>,
} }
@ -38,7 +38,7 @@ impl PodAccount {
/// Construct new object. /// Construct new object.
#[cfg(test)] #[cfg(test)]
pub fn new(balance: U256, nonce: U256, code: Bytes, storage: BTreeMap<H256, H256>) -> PodAccount { pub fn new(balance: U256, nonce: U256, code: Bytes, storage: BTreeMap<H256, H256>) -> PodAccount {
PodAccount { balance: balance, nonce: nonce, code: code, storage: storage } PodAccount { balance: balance, nonce: nonce, code: Some(code), storage: storage }
} }
/// Convert Account to a PodAccount. /// Convert Account to a PodAccount.
@ -48,7 +48,7 @@ impl PodAccount {
balance: *acc.balance(), balance: *acc.balance(),
nonce: *acc.nonce(), nonce: *acc.nonce(),
storage: acc.storage_overlay().iter().fold(BTreeMap::new(), |mut m, (k, &(_, ref v))| {m.insert(k.clone(), v.clone()); m}), storage: acc.storage_overlay().iter().fold(BTreeMap::new(), |mut m, (k, &(_, ref v))| {m.insert(k.clone(), v.clone()); m}),
code: acc.code().unwrap().to_vec(), code: acc.code().map(|x| x.to_vec()),
} }
} }
@ -58,14 +58,15 @@ impl PodAccount {
stream.append(&self.nonce); stream.append(&self.nonce);
stream.append(&self.balance); stream.append(&self.balance);
stream.append(&sec_trie_root(self.storage.iter().map(|(k, v)| (k.to_vec(), encode(&U256::from(v.as_slice())).to_vec())).collect())); stream.append(&sec_trie_root(self.storage.iter().map(|(k, v)| (k.to_vec(), encode(&U256::from(v.as_slice())).to_vec())).collect()));
stream.append(&self.code.sha3()); stream.append(&self.code.as_ref().unwrap_or(&vec![]).sha3());
stream.out() stream.out()
} }
/// Place additional data into given hash DB. /// Place additional data into given hash DB.
pub fn insert_additional(&self, db: &mut AccountDBMut) { pub fn insert_additional(&self, db: &mut AccountDBMut) {
if !self.code.is_empty() { match self.code {
db.insert(&self.code); Some(ref c) if !c.is_empty() => { db.insert(c); }
_ => {}
} }
let mut r = H256::new(); let mut r = H256::new();
let mut t = SecTrieDBMut::new(db, &mut r); let mut t = SecTrieDBMut::new(db, &mut r);
@ -80,7 +81,7 @@ impl From<ethjson::blockchain::Account> for PodAccount {
PodAccount { PodAccount {
balance: a.balance.into(), balance: a.balance.into(),
nonce: a.nonce.into(), nonce: a.nonce.into(),
code: a.code.into(), code: Some(a.code.into()),
storage: a.storage.into_iter().map(|(key, value)| { storage: a.storage.into_iter().map(|(key, value)| {
let key: U256 = key.into(); let key: U256 = key.into();
let value: U256 = value.into(); let value: U256 = value.into();
@ -95,7 +96,7 @@ impl From<ethjson::spec::Account> for PodAccount {
PodAccount { PodAccount {
balance: a.balance.map_or_else(U256::zero, Into::into), balance: a.balance.map_or_else(U256::zero, Into::into),
nonce: a.nonce.map_or_else(U256::zero, Into::into), nonce: a.nonce.map_or_else(U256::zero, Into::into),
code: vec![], code: Some(vec![]),
storage: BTreeMap::new() storage: BTreeMap::new()
} }
} }
@ -103,7 +104,13 @@ impl From<ethjson::spec::Account> for PodAccount {
impl fmt::Display for PodAccount { impl fmt::Display for PodAccount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "(bal={}; nonce={}; code={} bytes, #{}; storage={} items)", self.balance, self.nonce, self.code.len(), self.code.sha3(), self.storage.len()) write!(f, "(bal={}; nonce={}; code={} bytes, #{}; storage={} items)",
self.balance,
self.nonce,
self.code.as_ref().map_or(0, |c| c.len()),
self.code.as_ref().map_or_else(H256::new, |c| c.sha3()),
self.storage.len()
)
} }
} }
@ -114,13 +121,13 @@ pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option<A
(None, Some(x)) => Some(AccountDiff { (None, Some(x)) => Some(AccountDiff {
balance: Diff::Born(x.balance), balance: Diff::Born(x.balance),
nonce: Diff::Born(x.nonce), nonce: Diff::Born(x.nonce),
code: Diff::Born(x.code.clone()), code: Diff::Born(x.code.as_ref().expect("account is newly created; newly created accounts must be given code; all caches should remain in place; qed").clone()),
storage: x.storage.iter().map(|(k, v)| (k.clone(), Diff::Born(v.clone()))).collect(), storage: x.storage.iter().map(|(k, v)| (k.clone(), Diff::Born(v.clone()))).collect(),
}), }),
(Some(x), None) => Some(AccountDiff { (Some(x), None) => Some(AccountDiff {
balance: Diff::Died(x.balance), balance: Diff::Died(x.balance),
nonce: Diff::Died(x.nonce), nonce: Diff::Died(x.nonce),
code: Diff::Died(x.code.clone()), code: Diff::Died(x.code.as_ref().expect("account is deleted; only way to delete account is running SUICIDE; account must have had own code cached to make operation; all caches should remain in place; qed").clone()),
storage: x.storage.iter().map(|(k, v)| (k.clone(), Diff::Died(v.clone()))).collect(), storage: x.storage.iter().map(|(k, v)| (k.clone(), Diff::Died(v.clone()))).collect(),
}), }),
(Some(pre), Some(post)) => { (Some(pre), Some(post)) => {
@ -130,11 +137,14 @@ pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option<A
let r = AccountDiff { let r = AccountDiff {
balance: Diff::new(pre.balance, post.balance), balance: Diff::new(pre.balance, post.balance),
nonce: Diff::new(pre.nonce, post.nonce), nonce: Diff::new(pre.nonce, post.nonce),
code: Diff::new(pre.code.clone(), post.code.clone()), code: match (pre.code.clone(), post.code.clone()) {
(Some(pre_code), Some(post_code)) => Diff::new(pre_code, post_code),
_ => Diff::Same,
},
storage: storage.into_iter().map(|k| storage: storage.into_iter().map(|k|
(k.clone(), Diff::new( (k.clone(), Diff::new(
pre.storage.get(&k).cloned().unwrap_or_else(H256::new), pre.storage.get(k).cloned().unwrap_or_else(H256::new),
post.storage.get(&k).cloned().unwrap_or_else(H256::new) post.storage.get(k).cloned().unwrap_or_else(H256::new)
))).collect(), ))).collect(),
}; };
if r.balance.is_same() && r.nonce.is_same() && r.code.is_same() && r.storage.is_empty() { if r.balance.is_same() && r.nonce.is_same() && r.code.is_same() && r.storage.is_empty() {
@ -156,7 +166,7 @@ mod test {
#[test] #[test]
fn existence() { fn existence() {
let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: vec![], storage: map![]}; let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: Some(vec![]), storage: map![]};
assert_eq!(diff_pod(Some(&a), Some(&a)), None); assert_eq!(diff_pod(Some(&a), Some(&a)), None);
assert_eq!(diff_pod(None, Some(&a)), Some(AccountDiff{ assert_eq!(diff_pod(None, Some(&a)), Some(AccountDiff{
balance: Diff::Born(69.into()), balance: Diff::Born(69.into()),
@ -168,8 +178,8 @@ mod test {
#[test] #[test]
fn basic() { fn basic() {
let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: vec![], storage: map![]}; let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: Some(vec![]), storage: map![]};
let b = PodAccount{balance: 42.into(), nonce: 1.into(), code: vec![], storage: map![]}; let b = PodAccount{balance: 42.into(), nonce: 1.into(), code: Some(vec![]), storage: map![]};
assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff { assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff {
balance: Diff::Changed(69.into(), 42.into()), balance: Diff::Changed(69.into(), 42.into()),
nonce: Diff::Changed(0.into(), 1.into()), nonce: Diff::Changed(0.into(), 1.into()),
@ -180,8 +190,8 @@ mod test {
#[test] #[test]
fn code() { fn code() {
let a = PodAccount{balance: 0.into(), nonce: 0.into(), code: vec![], storage: map![]}; let a = PodAccount{balance: 0.into(), nonce: 0.into(), code: Some(vec![]), storage: map![]};
let b = PodAccount{balance: 0.into(), nonce: 1.into(), code: vec![0], storage: map![]}; let b = PodAccount{balance: 0.into(), nonce: 1.into(), code: Some(vec![0]), storage: map![]};
assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff { assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff {
balance: Diff::Same, balance: Diff::Same,
nonce: Diff::Changed(0.into(), 1.into()), nonce: Diff::Changed(0.into(), 1.into()),
@ -195,13 +205,13 @@ mod test {
let a = PodAccount { let a = PodAccount {
balance: 0.into(), balance: 0.into(),
nonce: 0.into(), nonce: 0.into(),
code: vec![], code: Some(vec![]),
storage: map_into![1 => 1, 2 => 2, 3 => 3, 4 => 4, 5 => 0, 6 => 0, 7 => 0] storage: map_into![1 => 1, 2 => 2, 3 => 3, 4 => 4, 5 => 0, 6 => 0, 7 => 0]
}; };
let b = PodAccount { let b = PodAccount {
balance: 0.into(), balance: 0.into(),
nonce: 0.into(), nonce: 0.into(),
code: vec![], code: Some(vec![]),
storage: map_into![1 => 1, 2 => 3, 3 => 0, 5 => 0, 7 => 7, 8 => 0, 9 => 9] storage: map_into![1 => 1, 2 => 3, 3 => 0, 5 => 0, 7 => 7, 8 => 0, 9 => 9]
}; };
assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff { assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff {

View File

@ -138,7 +138,7 @@ impl IoHandler<ClientIoMessage> for ClientIoHandler {
fn message(&self, _io: &IoContext<ClientIoMessage>, net_message: &ClientIoMessage) { fn message(&self, _io: &IoContext<ClientIoMessage>, net_message: &ClientIoMessage) {
match *net_message { match *net_message {
ClientIoMessage::BlockVerified => { self.client.import_verified_blocks(); } ClientIoMessage::BlockVerified => { self.client.import_verified_blocks(); }
ClientIoMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(&transactions); } ClientIoMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(transactions); }
_ => {} // ignore other messages _ => {} // ignore other messages
} }
} }
@ -175,7 +175,7 @@ mod tests {
let service = ClientService::start( let service = ClientService::start(
ClientConfig::default(), ClientConfig::default(),
get_test_spec(), get_test_spec(),
&temp_path.as_path(), temp_path.as_path(),
Arc::new(Miner::with_spec(get_test_spec())), Arc::new(Miner::with_spec(get_test_spec())),
); );
assert!(service.is_ok()); assert!(service.is_ok());

View File

@ -27,9 +27,10 @@ use error::Error;
use ids::BlockID; use ids::BlockID;
use views::{BlockView, HeaderView}; use views::{BlockView, HeaderView};
use util::{Bytes, Hashable, HashDB, JournalDB, snappy, TrieDB, TrieDBMut, TrieMut}; use util::{Bytes, Hashable, HashDB, JournalDB, snappy, TrieDB, TrieDBMut, TrieMut, DBTransaction};
use util::error::UtilError;
use util::hash::{FixedHash, H256}; use util::hash::{FixedHash, H256};
use util::rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View}; use util::rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View, Compressible, RlpType};
use self::account::Account; use self::account::Account;
use self::block::AbridgedBlock; use self::block::AbridgedBlock;
@ -261,7 +262,8 @@ pub fn chunk_state(db: &HashDB, root: &H256, path: &Path) -> Result<Vec<H256>, E
let account_db = AccountDB::from_hash(db, account_key_hash); let account_db = AccountDB::from_hash(db, account_key_hash);
let fat_rlp = try!(account.to_fat_rlp(&account_db)); let fat_rlp = try!(account.to_fat_rlp(&account_db));
try!(chunker.push(account_key, fat_rlp)); let compressed_rlp = UntrustedRlp::new(&fat_rlp).compress(RlpType::Snapshot).to_vec();
try!(chunker.push(account_key, compressed_rlp));
} }
if chunker.cur_size != 0 { if chunker.cur_size != 0 {
@ -358,7 +360,9 @@ impl StateRebuilder {
try!(rebuild_account_trie(db.as_hashdb_mut(), account_chunk, out_pairs_chunk)); try!(rebuild_account_trie(db.as_hashdb_mut(), account_chunk, out_pairs_chunk));
// commit the db changes we made in this thread. // commit the db changes we made in this thread.
try!(db.commit(0, &H256::zero(), None)); let batch = DBTransaction::new(&db.backing());
try!(db.commit(&batch, 0, &H256::zero(), None));
try!(db.backing().write(batch).map_err(UtilError::SimpleString));
Ok(()) Ok(())
}); });
@ -387,7 +391,9 @@ impl StateRebuilder {
} }
} }
try!(self.db.commit(0, &H256::zero(), None)); let batch = DBTransaction::new(&self.db.backing());
try!(self.db.commit(&batch, 0, &H256::zero(), None));
try!(self.db.backing().write(batch).map_err(|e| Error::Util(e.into())));
Ok(()) Ok(())
} }
@ -400,7 +406,8 @@ fn rebuild_account_trie(db: &mut HashDB, account_chunk: &[&[u8]], out_chunk: &mu
let account_rlp = UntrustedRlp::new(account_pair); let account_rlp = UntrustedRlp::new(account_pair);
let hash: H256 = try!(account_rlp.val_at(0)); let hash: H256 = try!(account_rlp.val_at(0));
let fat_rlp = try!(account_rlp.at(1)); let decompressed = try!(account_rlp.at(1)).decompress(RlpType::Snapshot);
let fat_rlp = UntrustedRlp::new(&decompressed[..]);
let thin_rlp = { let thin_rlp = {
let mut acct_db = AccountDBMut::from_hash(db.as_hashdb_mut(), hash); let mut acct_db = AccountDBMut::from_hash(db.as_hashdb_mut(), hash);

View File

@ -17,14 +17,12 @@
//! Parameters for a block chain. //! Parameters for a block chain.
use common::*; use common::*;
use engine::*; use engines::{Engine, NullEngine, BasicAuthority};
use pod_state::*; use pod_state::*;
use null_engine::*;
use account_db::*; use account_db::*;
use super::genesis::Genesis; use super::genesis::Genesis;
use super::seal::Generic as GenericSeal; use super::seal::Generic as GenericSeal;
use ethereum; use ethereum;
use basic_authority::BasicAuthority;
use ethjson; use ethjson;
/// Parameters common to all engines. /// Parameters common to all engines.
@ -38,6 +36,8 @@ pub struct CommonParams {
pub network_id: U256, pub network_id: U256,
/// Minimum gas limit. /// Minimum gas limit.
pub min_gas_limit: U256, pub min_gas_limit: U256,
/// Fork block to check.
pub fork_block: Option<(BlockNumber, H256)>,
} }
impl From<ethjson::spec::Params> for CommonParams { impl From<ethjson::spec::Params> for CommonParams {
@ -47,6 +47,7 @@ impl From<ethjson::spec::Params> for CommonParams {
maximum_extra_data_size: p.maximum_extra_data_size.into(), maximum_extra_data_size: p.maximum_extra_data_size.into(),
network_id: p.network_id.into(), network_id: p.network_id.into(),
min_gas_limit: p.min_gas_limit.into(), min_gas_limit: p.min_gas_limit.into(),
fork_block: if let (Some(n), Some(h)) = (p.fork_block, p.fork_hash) { Some((n.into(), h.into())) } else { None },
} }
} }
} }
@ -151,6 +152,9 @@ impl Spec {
/// Get the configured Network ID. /// Get the configured Network ID.
pub fn network_id(&self) -> U256 { self.params.network_id } pub fn network_id(&self) -> U256 { self.params.network_id }
/// Get the configured network fork block.
pub fn fork_block(&self) -> Option<(BlockNumber, H256)> { self.params.fork_block }
/// Get the header of the genesis block. /// Get the header of the genesis block.
pub fn genesis_header(&self) -> Header { pub fn genesis_header(&self) -> Header {
Header { Header {

View File

@ -15,11 +15,11 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use common::*; use common::*;
use engine::Engine; use engines::Engine;
use executive::{Executive, TransactOptions}; use executive::{Executive, TransactOptions};
use evm::Factory as EvmFactory; use evm::Factory as EvmFactory;
use account_db::*; use account_db::*;
use trace::Trace; use trace::FlatTrace;
use pod_account::*; use pod_account::*;
use pod_state::{self, PodState}; use pod_state::{self, PodState};
use types::state_diff::StateDiff; use types::state_diff::StateDiff;
@ -29,7 +29,7 @@ pub struct ApplyOutcome {
/// The receipt for the applied transaction. /// The receipt for the applied transaction.
pub receipt: Receipt, pub receipt: Receipt,
/// The trace for the applied transaction, if None if tracing is disabled. /// The trace for the applied transaction, if None if tracing is disabled.
pub trace: Option<Trace>, pub trace: Vec<FlatTrace>,
} }
/// Result type for the execution ("application") of a transaction. /// Result type for the execution ("application") of a transaction.
@ -122,7 +122,7 @@ impl State {
fn insert_cache(&self, address: &Address, account: Option<Account>) { fn insert_cache(&self, address: &Address, account: Option<Account>) {
if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() { if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() {
if !snapshot.contains_key(&address) { if !snapshot.contains_key(address) {
snapshot.insert(address.clone(), self.cache.borrow_mut().insert(address.clone(), account)); snapshot.insert(address.clone(), self.cache.borrow_mut().insert(address.clone(), account));
return; return;
} }
@ -132,7 +132,7 @@ impl State {
fn note_cache(&self, address: &Address) { fn note_cache(&self, address: &Address) {
if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() { if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() {
if !snapshot.contains_key(&address) { if !snapshot.contains_key(address) {
snapshot.insert(address.clone(), self.cache.borrow().get(address).cloned()); snapshot.insert(address.clone(), self.cache.borrow().get(address).cloned());
} }
} }
@ -151,7 +151,7 @@ impl State {
/// Create a new contract at address `contract`. If there is already an account at the address /// Create a new contract at address `contract`. If there is already an account at the address
/// it will have its code reset, ready for `init_code()`. /// it will have its code reset, ready for `init_code()`.
pub fn new_contract(&mut self, contract: &Address, balance: U256) { pub fn new_contract(&mut self, contract: &Address, balance: U256) {
self.insert_cache(&contract, Some(Account::new_contract(balance, self.account_start_nonce))); self.insert_cache(contract, Some(Account::new_contract(balance, self.account_start_nonce)));
} }
/// Remove an existing account. /// Remove an existing account.
@ -162,7 +162,7 @@ impl State {
/// Determine whether an account exists. /// Determine whether an account exists.
pub fn exists(&self, a: &Address) -> bool { pub fn exists(&self, a: &Address) -> bool {
let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
self.cache.borrow().get(&a).unwrap_or(&None).is_some() || db.contains(&a) self.cache.borrow().get(a).unwrap_or(&None).is_some() || db.contains(a)
} }
/// Get the balance of account `a`. /// Get the balance of account `a`.
@ -329,7 +329,7 @@ impl State {
let have_key = self.cache.borrow().contains_key(a); let have_key = self.cache.borrow().contains_key(a);
if !have_key { if !have_key {
let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
self.insert_cache(a, db.get(&a).map(Account::from_rlp)) self.insert_cache(a, db.get(a).map(Account::from_rlp))
} }
if require_code { if require_code {
if let Some(ref mut account) = self.cache.borrow_mut().get_mut(a).unwrap().as_mut() { if let Some(ref mut account) = self.cache.borrow_mut().get_mut(a).unwrap().as_mut() {
@ -350,7 +350,7 @@ impl State {
let have_key = self.cache.borrow().contains_key(a); let have_key = self.cache.borrow().contains_key(a);
if !have_key { if !have_key {
let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
self.insert_cache(a, db.get(&a).map(Account::from_rlp)) self.insert_cache(a, db.get(a).map(Account::from_rlp))
} else { } else {
self.note_cache(a); self.note_cache(a);
} }
@ -402,7 +402,8 @@ use spec::*;
use transaction::*; use transaction::*;
use util::log::init_log; use util::log::init_log;
use trace::trace; use trace::trace;
use trace::trace::{Trace}; use trace::FlatTrace;
use types::executed::CallType;
#[test] #[test]
fn should_apply_create_transaction() { fn should_apply_create_transaction() {
@ -427,8 +428,9 @@ fn should_apply_create_transaction() {
state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
subtraces: 0,
action: trace::Action::Create(trace::Create { action: trace::Action::Create(trace::Create {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
value: 100.into(), value: 100.into(),
@ -440,8 +442,7 @@ fn should_apply_create_transaction() {
address: Address::from_str("8988167e088c87cd314df6d3c2b83da5acb93ace").unwrap(), address: Address::from_str("8988167e088c87cd314df6d3c2b83da5acb93ace").unwrap(),
code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53] code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53]
}), }),
subs: vec![] }];
});
assert_eq!(result.trace, expected_trace); assert_eq!(result.trace, expected_trace);
} }
@ -488,8 +489,8 @@ fn should_trace_failed_create_transaction() {
state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
action: trace::Action::Create(trace::Create { action: trace::Action::Create(trace::Create {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
value: 100.into(), value: 100.into(),
@ -497,8 +498,8 @@ fn should_trace_failed_create_transaction() {
init: vec![91, 96, 0, 86], init: vec![91, 96, 0, 86],
}), }),
result: trace::Res::FailedCreate, result: trace::Res::FailedCreate,
subs: vec![] subtraces: 0
}); }];
assert_eq!(result.trace, expected_trace); assert_eq!(result.trace, expected_trace);
} }
@ -527,21 +528,22 @@ fn should_trace_call_transaction() {
state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(), to: 0xa.into(),
value: 100.into(), value: 100.into(),
gas: 79000.into(), gas: 79000.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(3), gas_used: U256::from(3),
output: vec![] output: vec![]
}), }),
subs: vec![] subtraces: 0,
}); }];
assert_eq!(result.trace, expected_trace); assert_eq!(result.trace, expected_trace);
} }
@ -569,21 +571,22 @@ fn should_trace_basic_call_transaction() {
state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(), to: 0xa.into(),
value: 100.into(), value: 100.into(),
gas: 79000.into(), gas: 79000.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(0), gas_used: U256::from(0),
output: vec![] output: vec![]
}), }),
subs: vec![] subtraces: 0,
}); }];
assert_eq!(result.trace, expected_trace); assert_eq!(result.trace, expected_trace);
} }
@ -611,21 +614,24 @@ fn should_trace_call_transaction_to_builtin() {
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap(); let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap();
assert_eq!(result.trace, Some(Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: "0000000000000000000000000000000000000001".into(), to: "0000000000000000000000000000000000000001".into(),
value: 0.into(), value: 0.into(),
gas: 79_000.into(), gas: 79_000.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(3000), gas_used: U256::from(3000),
output: vec![] output: vec![]
}), }),
subs: vec![] subtraces: 0,
})); }];
assert_eq!(result.trace, expected_trace);
} }
#[test] #[test]
@ -652,21 +658,23 @@ fn should_not_trace_subcall_transaction_to_builtin() {
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap(); let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(), to: 0xa.into(),
value: 0.into(), value: 0.into(),
gas: 79000.into(), gas: 79000.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(28_061), gas_used: U256::from(28_061),
output: vec![] output: vec![]
}), }),
subs: vec![] subtraces: 0,
}); }];
assert_eq!(result.trace, expected_trace); assert_eq!(result.trace, expected_trace);
} }
@ -695,21 +703,38 @@ fn should_not_trace_callcode() {
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap(); let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
subtraces: 1,
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(), to: 0xa.into(),
value: 0.into(), value: 0.into(),
gas: 79000.into(), gas: 79000.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(64), gas_used: 64.into(),
output: vec![] output: vec![]
}), }),
subs: vec![] }, FlatTrace {
}); trace_address: vec![0].into_iter().collect(),
subtraces: 0,
action: trace::Action::Call(trace::Call {
from: 0xa.into(),
to: 0xa.into(),
value: 0.into(),
gas: 4096.into(),
input: vec![],
call_type: CallType::CallCode,
}),
result: trace::Res::Call(trace::CallResult {
gas_used: 3.into(),
output: vec![],
}),
}];
assert_eq!(result.trace, expected_trace); assert_eq!(result.trace, expected_trace);
} }
@ -741,21 +766,38 @@ fn should_not_trace_delegatecall() {
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap(); let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
subtraces: 1,
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(), to: 0xa.into(),
value: 0.into(), value: 0.into(),
gas: 79000.into(), gas: 79000.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(61), gas_used: U256::from(61),
output: vec![] output: vec![]
}), }),
subs: vec![] }, FlatTrace {
}); trace_address: vec![0].into_iter().collect(),
subtraces: 0,
action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(),
value: 0.into(),
gas: 32768.into(),
input: vec![],
call_type: CallType::DelegateCall,
}),
result: trace::Res::Call(trace::CallResult {
gas_used: 3.into(),
output: vec![],
}),
}];
assert_eq!(result.trace, expected_trace); assert_eq!(result.trace, expected_trace);
} }
@ -783,20 +825,19 @@ fn should_trace_failed_call_transaction() {
state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(), to: 0xa.into(),
value: 100.into(), value: 100.into(),
gas: 79000.into(), gas: 79000.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: trace::Res::FailedCall, result: trace::Res::FailedCall,
subs: vec![] subtraces: 0,
}); }];
println!("trace: {:?}", result.trace);
assert_eq!(result.trace, expected_trace); assert_eq!(result.trace, expected_trace);
} }
@ -826,35 +867,38 @@ fn should_trace_call_with_subcall_transaction() {
state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0, let expected_trace = vec![FlatTrace {
trace_address: Default::default(),
subtraces: 1,
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(), to: 0xa.into(),
value: 100.into(), value: 100.into(),
gas: 79000.into(), gas: 79000.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(69), gas_used: U256::from(69),
output: vec![] output: vec![]
}), }),
subs: vec![Trace { }, FlatTrace {
depth: 1, trace_address: vec![0].into_iter().collect(),
action: trace::Action::Call(trace::Call { subtraces: 0,
from: 0xa.into(), action: trace::Action::Call(trace::Call {
to: 0xb.into(), from: 0xa.into(),
value: 0.into(), to: 0xb.into(),
gas: 78934.into(), value: 0.into(),
input: vec![], gas: 78934.into(),
}), input: vec![],
result: trace::Res::Call(trace::CallResult { call_type: CallType::Call,
gas_used: U256::from(3), }),
output: vec![] result: trace::Res::Call(trace::CallResult {
}), gas_used: U256::from(3),
subs: vec![] output: vec![]
}] }),
}); }];
assert_eq!(result.trace, expected_trace); assert_eq!(result.trace, expected_trace);
} }
@ -883,32 +927,34 @@ fn should_trace_call_with_basic_subcall_transaction() {
state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
subtraces: 1,
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(), to: 0xa.into(),
value: 100.into(), value: 100.into(),
gas: 79000.into(), gas: 79000.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(31761), gas_used: U256::from(31761),
output: vec![] output: vec![]
}), }),
subs: vec![Trace { }, FlatTrace {
depth: 1, trace_address: vec![0].into_iter().collect(),
action: trace::Action::Call(trace::Call { subtraces: 0,
from: 0xa.into(), action: trace::Action::Call(trace::Call {
to: 0xb.into(), from: 0xa.into(),
value: 69.into(), to: 0xb.into(),
gas: 2300.into(), value: 69.into(),
input: vec![], gas: 2300.into(),
}), input: vec![],
result: trace::Res::Call(trace::CallResult::default()), call_type: CallType::Call,
subs: vec![] }),
}] result: trace::Res::Call(trace::CallResult::default()),
}); }];
assert_eq!(result.trace, expected_trace); assert_eq!(result.trace, expected_trace);
} }
@ -937,21 +983,22 @@ fn should_not_trace_call_with_invalid_basic_subcall_transaction() {
state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
subtraces: 0,
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(), to: 0xa.into(),
value: 100.into(), value: 100.into(),
gas: 79000.into(), gas: 79000.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(31761), gas_used: U256::from(31761),
output: vec![] output: vec![]
}), }),
subs: vec![] }];
});
assert_eq!(result.trace, expected_trace); assert_eq!(result.trace, expected_trace);
} }
@ -981,32 +1028,34 @@ fn should_trace_failed_subcall_transaction() {
state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
subtraces: 1,
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(), to: 0xa.into(),
value: 100.into(), value: 100.into(),
gas: 79000.into(), gas: 79000.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(79_000), gas_used: U256::from(79_000),
output: vec![] output: vec![]
}), }),
subs: vec![Trace { }, FlatTrace {
depth: 1, trace_address: vec![0].into_iter().collect(),
action: trace::Action::Call(trace::Call { subtraces: 0,
from: 0xa.into(), action: trace::Action::Call(trace::Call {
to: 0xb.into(), from: 0xa.into(),
value: 0.into(), to: 0xb.into(),
gas: 78934.into(), value: 0.into(),
input: vec![], gas: 78934.into(),
}), input: vec![],
result: trace::Res::FailedCall, call_type: CallType::Call,
subs: vec![] }),
}] result: trace::Res::FailedCall,
}); }];
assert_eq!(result.trace, expected_trace); assert_eq!(result.trace, expected_trace);
} }
@ -1037,49 +1086,52 @@ fn should_trace_call_with_subcall_with_subcall_transaction() {
state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
subtraces: 1,
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(), to: 0xa.into(),
value: 100.into(), value: 100.into(),
gas: 79000.into(), gas: 79000.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(135), gas_used: U256::from(135),
output: vec![] output: vec![]
}), }),
subs: vec![Trace { }, FlatTrace {
depth: 1, trace_address: vec![0].into_iter().collect(),
action: trace::Action::Call(trace::Call { subtraces: 1,
from: 0xa.into(), action: trace::Action::Call(trace::Call {
to: 0xb.into(), from: 0xa.into(),
value: 0.into(), to: 0xb.into(),
gas: 78934.into(), value: 0.into(),
input: vec![], gas: 78934.into(),
}), input: vec![],
result: trace::Res::Call(trace::CallResult { call_type: CallType::Call,
gas_used: U256::from(69), }),
output: vec![] result: trace::Res::Call(trace::CallResult {
}), gas_used: U256::from(69),
subs: vec![Trace { output: vec![]
depth: 2, }),
action: trace::Action::Call(trace::Call { }, FlatTrace {
from: 0xb.into(), trace_address: vec![0, 0].into_iter().collect(),
to: 0xc.into(), subtraces: 0,
value: 0.into(), action: trace::Action::Call(trace::Call {
gas: 78868.into(), from: 0xb.into(),
input: vec![], to: 0xc.into(),
}), value: 0.into(),
result: trace::Res::Call(trace::CallResult { gas: 78868.into(),
gas_used: U256::from(3), input: vec![],
output: vec![] call_type: CallType::Call,
}), }),
subs: vec![] result: trace::Res::Call(trace::CallResult {
}] gas_used: U256::from(3),
}] output: vec![]
}); }),
}];
assert_eq!(result.trace, expected_trace); assert_eq!(result.trace, expected_trace);
} }
@ -1110,46 +1162,50 @@ fn should_trace_failed_subcall_with_subcall_transaction() {
state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0, let expected_trace = vec![FlatTrace {
trace_address: Default::default(),
subtraces: 1,
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(), to: 0xa.into(),
value: 100.into(), value: 100.into(),
gas: 79000.into(), gas: 79000.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(79_000), gas_used: U256::from(79_000),
output: vec![] output: vec![]
}), })
subs: vec![Trace { }, FlatTrace {
depth: 1, trace_address: vec![0].into_iter().collect(),
subtraces: 1,
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: 0xa.into(), from: 0xa.into(),
to: 0xb.into(), to: 0xb.into(),
value: 0.into(), value: 0.into(),
gas: 78934.into(), gas: 78934.into(),
input: vec![], input: vec![],
}), call_type: CallType::Call,
result: trace::Res::FailedCall, }),
subs: vec![Trace { result: trace::Res::FailedCall,
depth: 2, }, FlatTrace {
action: trace::Action::Call(trace::Call { trace_address: vec![0, 0].into_iter().collect(),
from: 0xb.into(), subtraces: 0,
to: 0xc.into(), action: trace::Action::Call(trace::Call {
value: 0.into(), from: 0xb.into(),
gas: 78868.into(), to: 0xc.into(),
input: vec![], value: 0.into(),
}), gas: 78868.into(),
result: trace::Res::Call(trace::CallResult { call_type: CallType::Call,
gas_used: U256::from(3), input: vec![],
output: vec![] }),
}), result: trace::Res::Call(trace::CallResult {
subs: vec![] gas_used: U256::from(3),
}] output: vec![]
}] }),
}); }];
assert_eq!(result.trace, expected_trace); assert_eq!(result.trace, expected_trace);
} }
@ -1179,30 +1235,32 @@ fn should_trace_suicide() {
state.add_balance(t.sender().as_ref().unwrap(), &100.into()); state.add_balance(t.sender().as_ref().unwrap(), &100.into());
let vm_factory = Default::default(); let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace { let expected_trace = vec![FlatTrace {
depth: 0, trace_address: Default::default(),
subtraces: 1,
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(), to: 0xa.into(),
value: 100.into(), value: 100.into(),
gas: 79000.into(), gas: 79000.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: 3.into(), gas_used: 3.into(),
output: vec![] output: vec![]
}), }),
subs: vec![Trace { }, FlatTrace {
depth: 1, trace_address: vec![0].into_iter().collect(),
action: trace::Action::Suicide(trace::Suicide { subtraces: 0,
address: 0xa.into(), action: trace::Action::Suicide(trace::Suicide {
refund_address: 0xb.into(), address: 0xa.into(),
balance: 150.into(), refund_address: 0xb.into(),
}), balance: 150.into(),
result: trace::Res::None, }),
subs: vec![] result: trace::Res::None,
}] }];
});
assert_eq!(result.trace, expected_trace); assert_eq!(result.trace, expected_trace);
} }

View File

@ -14,14 +14,14 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use client::{BlockChainClient, Client, ClientConfig}; use client::{self, BlockChainClient, Client, ClientConfig};
use common::*; use common::*;
use spec::*; use spec::*;
use block::{OpenBlock, Drain}; use block::{OpenBlock, Drain};
use blockchain::{BlockChain, Config as BlockChainConfig}; use blockchain::{BlockChain, Config as BlockChainConfig};
use state::*; use state::*;
use evm::Schedule; use evm::Schedule;
use engine::*; use engines::Engine;
use ethereum; use ethereum;
use devtools::*; use devtools::*;
use miner::Miner; use miner::Miner;
@ -246,12 +246,23 @@ pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> GuardedTempResult<Arc<
} }
} }
fn new_db(path: &str) -> Arc<Database> {
Arc::new(
Database::open(&DatabaseConfig::with_columns(client::DB_NO_OF_COLUMNS), path)
.expect("Opening database for tests should always work.")
)
}
pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult<BlockChain> { pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult<BlockChain> {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), temp.as_path()); let db = new_db(temp.as_str());
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
let batch = db.transaction();
for block_order in 1..block_number { for block_order in 1..block_number {
bc.insert_block(&create_unverifiable_block(block_order, bc.best_block_hash()), vec![]); bc.insert_block(&batch, &create_unverifiable_block(block_order, bc.best_block_hash()), vec![]);
} }
db.write(batch).unwrap();
GuardedTempResult::<BlockChain> { GuardedTempResult::<BlockChain> {
_temp: temp, _temp: temp,
@ -261,10 +272,15 @@ pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult<BlockCh
pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempResult<BlockChain> { pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempResult<BlockChain> {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), temp.as_path()); let db = new_db(temp.as_str());
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
let batch = db.transaction();
for block_order in 1..block_number { for block_order in 1..block_number {
bc.insert_block(&create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]); bc.insert_block(&batch, &create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]);
} }
db.write(batch).unwrap();
GuardedTempResult::<BlockChain> { GuardedTempResult::<BlockChain> {
_temp: temp, _temp: temp,
@ -274,7 +290,8 @@ pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempRes
pub fn generate_dummy_empty_blockchain() -> GuardedTempResult<BlockChain> { pub fn generate_dummy_empty_blockchain() -> GuardedTempResult<BlockChain> {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), temp.as_path()); let db = new_db(temp.as_str());
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
GuardedTempResult::<BlockChain> { GuardedTempResult::<BlockChain> {
_temp: temp, _temp: temp,
@ -284,7 +301,8 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult<BlockChain> {
pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> { pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge, DatabaseConfig::default()); let journal_db = get_temp_journal_db_in(temp.as_path());
GuardedTempResult { GuardedTempResult {
_temp: temp, _temp: temp,
result: Some(journal_db) result: Some(journal_db)
@ -294,6 +312,7 @@ pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> {
pub fn get_temp_state() -> GuardedTempResult<State> { pub fn get_temp_state() -> GuardedTempResult<State> {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let journal_db = get_temp_journal_db_in(temp.as_path()); let journal_db = get_temp_journal_db_in(temp.as_path());
GuardedTempResult { GuardedTempResult {
_temp: temp, _temp: temp,
result: Some(State::new(journal_db, U256::from(0), Default::default())), result: Some(State::new(journal_db, U256::from(0), Default::default())),
@ -301,7 +320,8 @@ pub fn get_temp_state() -> GuardedTempResult<State> {
} }
pub fn get_temp_journal_db_in(path: &Path) -> Box<JournalDB> { pub fn get_temp_journal_db_in(path: &Path) -> Box<JournalDB> {
journaldb::new(path.to_str().unwrap(), journaldb::Algorithm::EarlyMerge, DatabaseConfig::default()) let db = new_db(path.to_str().expect("Only valid utf8 paths for tests."));
journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, None)
} }
pub fn get_temp_state_in(path: &Path) -> State { pub fn get_temp_state_in(path: &Path) -> State {

View File

@ -1,58 +0,0 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::rlp::*;
use basic_types::LogBloom;
use super::Trace;
/// Traces created by transactions from the same block.
#[derive(Clone)]
pub struct BlockTraces(Vec<Trace>);
impl From<Vec<Trace>> for BlockTraces {
fn from(traces: Vec<Trace>) -> Self {
BlockTraces(traces)
}
}
impl Into<Vec<Trace>> for BlockTraces {
fn into(self) -> Vec<Trace> {
self.0
}
}
impl Decodable for BlockTraces {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
let traces = try!(Decodable::decode(decoder));
let block_traces = BlockTraces(traces);
Ok(block_traces)
}
}
impl Encodable for BlockTraces {
fn rlp_append(&self, s: &mut RlpStream) {
Encodable::rlp_append(&self.0, s)
}
}
impl BlockTraces {
/// Returns bloom of all traces in given block.
pub fn bloom(&self) -> LogBloom {
self.0.iter()
.fold(LogBloom::default(), |acc, trace| acc | trace.bloom())
}
}

View File

@ -15,6 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Traces config. //! Traces config.
use std::str::FromStr;
use bloomchain::Config as BloomConfig; use bloomchain::Config as BloomConfig;
use trace::Error; use trace::Error;
@ -29,6 +30,25 @@ pub enum Switch {
Auto, Auto,
} }
impl Default for Switch {
fn default() -> Self {
Switch::Auto
}
}
impl FromStr for Switch {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"on" => Ok(Switch::On),
"off" => Ok(Switch::Off),
"auto" => Ok(Switch::Auto),
other => Err(format!("Invalid switch value: {}", other))
}
}
}
impl Switch { impl Switch {
/// Tries to turn old switch to new value. /// Tries to turn old switch to new value.
pub fn turn_to(&self, to: Switch) -> Result<bool, Error> { pub fn turn_to(&self, to: Switch) -> Result<bool, Error> {
@ -41,7 +61,7 @@ impl Switch {
} }
/// Traces config. /// Traces config.
#[derive(Debug, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct Config { pub struct Config {
/// Indicates if tracing should be enabled or not. /// Indicates if tracing should be enabled or not.
/// If it's None, it will be automatically configured. /// If it's None, it will be automatically configured.
@ -55,7 +75,7 @@ pub struct Config {
impl Default for Config { impl Default for Config {
fn default() -> Self { fn default() -> Self {
Config { Config {
enabled: Switch::Auto, enabled: Switch::default(),
blooms: BloomConfig { blooms: BloomConfig {
levels: 3, levels: 3,
elements_per_index: 16, elements_per_index: 16,
@ -64,3 +84,20 @@ impl Default for Config {
} }
} }
} }
#[cfg(test)]
mod tests {
use super::Switch;
#[test]
fn test_switch_parsing() {
assert_eq!(Switch::On, "on".parse().unwrap());
assert_eq!(Switch::Off, "off".parse().unwrap());
assert_eq!(Switch::Auto, "auto".parse().unwrap());
}
#[test]
fn test_switch_default() {
assert_eq!(Switch::default(), Switch::Auto);
}
}

View File

@ -18,15 +18,15 @@
use std::ops::{Deref, DerefMut}; use std::ops::{Deref, DerefMut};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use std::path::Path;
use bloomchain::{Number, Config as BloomConfig}; use bloomchain::{Number, Config as BloomConfig};
use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup}; use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup};
use util::{H256, H264, Database, DatabaseConfig, DBTransaction, RwLock}; use util::{H256, H264, Database, DBTransaction, RwLock};
use header::BlockNumber; use header::BlockNumber;
use trace::{LocalizedTrace, Config, Switch, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras, Error}; use trace::{LocalizedTrace, Config, Switch, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras, Error};
use db::{Key, Writable, Readable, CacheUpdatePolicy}; use db::{Key, Writable, Readable, CacheUpdatePolicy};
use blooms; use blooms;
use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces}; use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
use client::DB_COL_TRACE;
const TRACE_DB_VER: &'static [u8] = b"1.0"; const TRACE_DB_VER: &'static [u8] = b"1.0";
@ -94,7 +94,7 @@ pub struct TraceDB<T> where T: DatabaseExtras {
traces: RwLock<HashMap<H256, FlatBlockTraces>>, traces: RwLock<HashMap<H256, FlatBlockTraces>>,
blooms: RwLock<HashMap<TraceGroupPosition, blooms::BloomGroup>>, blooms: RwLock<HashMap<TraceGroupPosition, blooms::BloomGroup>>,
// db // db
tracesdb: Database, tracesdb: Arc<Database>,
// config, // config,
bloom_config: BloomConfig, bloom_config: BloomConfig,
// tracing enabled // tracing enabled
@ -106,24 +106,15 @@ pub struct TraceDB<T> where T: DatabaseExtras {
impl<T> BloomGroupDatabase for TraceDB<T> where T: DatabaseExtras { impl<T> BloomGroupDatabase for TraceDB<T> where T: DatabaseExtras {
fn blooms_at(&self, position: &GroupPosition) -> Option<BloomGroup> { fn blooms_at(&self, position: &GroupPosition) -> Option<BloomGroup> {
let position = TraceGroupPosition::from(position.clone()); let position = TraceGroupPosition::from(position.clone());
self.tracesdb.read_with_cache(&self.blooms, &position).map(Into::into) self.tracesdb.read_with_cache(DB_COL_TRACE, &self.blooms, &position).map(Into::into)
} }
} }
impl<T> TraceDB<T> where T: DatabaseExtras { impl<T> TraceDB<T> where T: DatabaseExtras {
/// Creates new instance of `TraceDB`. /// Creates new instance of `TraceDB`.
pub fn new(config: Config, path: &Path, extras: Arc<T>) -> Result<Self, Error> { pub fn new(config: Config, tracesdb: Arc<Database>, extras: Arc<T>) -> Result<Self, Error> {
let mut tracedb_path = path.to_path_buf();
tracedb_path.push("tracedb");
let tracesdb = match config.db_cache_size {
None => Database::open_default(tracedb_path.to_str().unwrap()).unwrap(),
Some(db_cache) => Database::open(
&DatabaseConfig::with_cache(db_cache),
tracedb_path.to_str().unwrap()).unwrap(),
};
// check if in previously tracing was enabled // check if in previously tracing was enabled
let old_tracing = match tracesdb.get(b"enabled").unwrap() { let old_tracing = match tracesdb.get(DB_COL_TRACE, b"enabled").unwrap() {
Some(ref value) if value as &[u8] == &[0x1] => Switch::On, Some(ref value) if value as &[u8] == &[0x1] => Switch::On,
Some(ref value) if value as &[u8] == &[0x0] => Switch::Off, Some(ref value) if value as &[u8] == &[0x0] => Switch::Off,
Some(_) => { panic!("tracesdb is corrupted") }, Some(_) => { panic!("tracesdb is corrupted") },
@ -137,8 +128,10 @@ impl<T> TraceDB<T> where T: DatabaseExtras {
false => [0x0] false => [0x0]
}; };
tracesdb.put(b"enabled", &encoded_tracing).unwrap(); let batch = DBTransaction::new(&tracesdb);
tracesdb.put(b"version", TRACE_DB_VER).unwrap(); batch.put(DB_COL_TRACE, b"enabled", &encoded_tracing).unwrap();
batch.put(DB_COL_TRACE, b"version", TRACE_DB_VER).unwrap();
tracesdb.write(batch).unwrap();
let db = TraceDB { let db = TraceDB {
traces: RwLock::new(HashMap::new()), traces: RwLock::new(HashMap::new()),
@ -154,7 +147,7 @@ impl<T> TraceDB<T> where T: DatabaseExtras {
/// Returns traces for block with hash. /// Returns traces for block with hash.
fn traces(&self, block_hash: &H256) -> Option<FlatBlockTraces> { fn traces(&self, block_hash: &H256) -> Option<FlatBlockTraces> {
self.tracesdb.read_with_cache(&self.traces, block_hash) self.tracesdb.read_with_cache(DB_COL_TRACE, &self.traces, block_hash)
} }
/// Returns vector of transaction traces for given block. /// Returns vector of transaction traces for given block.
@ -197,7 +190,7 @@ impl<T> TraceDB<T> where T: DatabaseExtras {
action: trace.action, action: trace.action,
result: trace.result, result: trace.result,
subtraces: trace.subtraces, subtraces: trace.subtraces,
trace_address: trace.trace_address, trace_address: trace.trace_address.into_iter().collect(),
transaction_number: tx_number, transaction_number: tx_number,
transaction_hash: tx_hash.clone(), transaction_hash: tx_hash.clone(),
block_number: block_number, block_number: block_number,
@ -217,20 +210,18 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
/// Traces of import request's enacted blocks are expected to be already in database /// Traces of import request's enacted blocks are expected to be already in database
/// or to be the currently inserted trace. /// or to be the currently inserted trace.
fn import(&self, request: ImportRequest) { fn import(&self, batch: &DBTransaction, request: ImportRequest) {
// fast return if tracing is disabled // fast return if tracing is disabled
if !self.tracing_enabled() { if !self.tracing_enabled() {
return; return;
} }
let batch = DBTransaction::new();
// at first, let's insert new block traces // at first, let's insert new block traces
{ {
let mut traces = self.traces.write(); let mut traces = self.traces.write();
// it's important to use overwrite here, // it's important to use overwrite here,
// cause this value might be queried by hash later // cause this value might be queried by hash later
batch.write_with_cache(traces.deref_mut(), request.block_hash, request.traces.into(), CacheUpdatePolicy::Overwrite); batch.write_with_cache(DB_COL_TRACE, traces.deref_mut(), request.block_hash, request.traces, CacheUpdatePolicy::Overwrite);
} }
// now let's rebuild the blooms // now let's rebuild the blooms
@ -256,19 +247,18 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
.collect::<HashMap<TraceGroupPosition, blooms::BloomGroup>>(); .collect::<HashMap<TraceGroupPosition, blooms::BloomGroup>>();
let mut blooms = self.blooms.write(); let mut blooms = self.blooms.write();
batch.extend_with_cache(blooms.deref_mut(), blooms_to_insert, CacheUpdatePolicy::Remove); batch.extend_with_cache(DB_COL_TRACE, blooms.deref_mut(), blooms_to_insert, CacheUpdatePolicy::Remove);
} }
self.tracesdb.write(batch).unwrap();
} }
fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec<usize>) -> Option<LocalizedTrace> { fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec<usize>) -> Option<LocalizedTrace> {
let trace_position_deq = trace_position.into_iter().collect();
self.extras.block_hash(block_number) self.extras.block_hash(block_number)
.and_then(|block_hash| self.transactions_traces(&block_hash) .and_then(|block_hash| self.transactions_traces(&block_hash)
.and_then(|traces| traces.into_iter().nth(tx_position)) .and_then(|traces| traces.into_iter().nth(tx_position))
.map(Into::<Vec<FlatTrace>>::into) .map(Into::<Vec<FlatTrace>>::into)
// this may and should be optimized // this may and should be optimized
.and_then(|traces| traces.into_iter().find(|trace| trace.trace_address == trace_position)) .and_then(|traces| traces.into_iter().find(|trace| trace.trace_address == trace_position_deq))
.map(|trace| { .map(|trace| {
let tx_hash = self.extras.transaction_hash(block_number, tx_position) let tx_hash = self.extras.transaction_hash(block_number, tx_position)
.expect("Expected to find transaction hash. Database is probably corrupted"); .expect("Expected to find transaction hash. Database is probably corrupted");
@ -277,7 +267,7 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
action: trace.action, action: trace.action,
result: trace.result, result: trace.result,
subtraces: trace.subtraces, subtraces: trace.subtraces,
trace_address: trace.trace_address, trace_address: trace.trace_address.into_iter().collect(),
transaction_number: tx_position, transaction_number: tx_position,
transaction_hash: tx_hash, transaction_hash: tx_hash,
block_number: block_number, block_number: block_number,
@ -301,7 +291,7 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
action: trace.action, action: trace.action,
result: trace.result, result: trace.result,
subtraces: trace.subtraces, subtraces: trace.subtraces,
trace_address: trace.trace_address, trace_address: trace.trace_address.into_iter().collect(),
transaction_number: tx_position, transaction_number: tx_position,
transaction_hash: tx_hash.clone(), transaction_hash: tx_hash.clone(),
block_number: block_number, block_number: block_number,
@ -328,7 +318,7 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
action: trace.action, action: trace.action,
result: trace.result, result: trace.result,
subtraces: trace.subtraces, subtraces: trace.subtraces,
trace_address: trace.trace_address, trace_address: trace.trace_address.into_iter().collect(),
transaction_number: tx_position, transaction_number: tx_position,
transaction_hash: tx_hash.clone(), transaction_hash: tx_hash.clone(),
block_number: block_number, block_number: block_number,
@ -361,12 +351,15 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
mod tests { mod tests {
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use util::{Address, U256, H256}; use util::{Address, U256, H256, Database, DatabaseConfig, DBTransaction};
use devtools::RandomTempPath; use devtools::RandomTempPath;
use header::BlockNumber; use header::BlockNumber;
use trace::{Config, Switch, TraceDB, Database, DatabaseExtras, ImportRequest}; use trace::{Config, Switch, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest};
use trace::{BlockTraces, Trace, Filter, LocalizedTrace, AddressesFilter}; use trace::{Filter, LocalizedTrace, AddressesFilter};
use trace::trace::{Call, Action, Res}; use trace::trace::{Call, Action, Res};
use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
use client::DB_NO_OF_COLUMNS;
use types::executed::CallType;
struct NoopExtras; struct NoopExtras;
@ -405,28 +398,33 @@ mod tests {
} }
} }
fn new_db(path: &str) -> Arc<Database> {
Arc::new(Database::open(&DatabaseConfig::with_columns(DB_NO_OF_COLUMNS), path).unwrap())
}
#[test] #[test]
fn test_reopening_db_with_tracing_off() { fn test_reopening_db_with_tracing_off() {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let db = new_db(temp.as_str());
let mut config = Config::default(); let mut config = Config::default();
// set autotracing // set autotracing
config.enabled = Switch::Auto; config.enabled = Switch::Auto;
{ {
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false); assert_eq!(tracedb.tracing_enabled(), false);
} }
{ {
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false); assert_eq!(tracedb.tracing_enabled(), false);
} }
config.enabled = Switch::Off; config.enabled = Switch::Off;
{ {
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false); assert_eq!(tracedb.tracing_enabled(), false);
} }
} }
@ -434,32 +432,33 @@ mod tests {
#[test] #[test]
fn test_reopening_db_with_tracing_on() { fn test_reopening_db_with_tracing_on() {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let db = new_db(temp.as_str());
let mut config = Config::default(); let mut config = Config::default();
// set tracing on // set tracing on
config.enabled = Switch::On; config.enabled = Switch::On;
{ {
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true); assert_eq!(tracedb.tracing_enabled(), true);
} }
{ {
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true); assert_eq!(tracedb.tracing_enabled(), true);
} }
config.enabled = Switch::Auto; config.enabled = Switch::Auto;
{ {
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true); assert_eq!(tracedb.tracing_enabled(), true);
} }
config.enabled = Switch::Off; config.enabled = Switch::Off;
{ {
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false); assert_eq!(tracedb.tracing_enabled(), false);
} }
} }
@ -468,34 +467,36 @@ mod tests {
#[should_panic] #[should_panic]
fn test_invalid_reopening_db() { fn test_invalid_reopening_db() {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let db = new_db(temp.as_str());
let mut config = Config::default(); let mut config = Config::default();
// set tracing on // set tracing on
config.enabled = Switch::Off; config.enabled = Switch::Off;
{ {
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true); assert_eq!(tracedb.tracing_enabled(), true);
} }
config.enabled = Switch::On; config.enabled = Switch::On;
TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); // should panic! TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); // should panic!
} }
fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest { fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest {
ImportRequest { ImportRequest {
traces: BlockTraces::from(vec![Trace { traces: FlatBlockTraces::from(vec![FlatTransactionTraces::from(vec![FlatTrace {
depth: 0, trace_address: Default::default(),
subtraces: 0,
action: Action::Call(Call { action: Action::Call(Call {
from: Address::from(1), from: 1.into(),
to: Address::from(2), to: 2.into(),
value: U256::from(3), value: 3.into(),
gas: U256::from(4), gas: 4.into(),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: Res::FailedCall, result: Res::FailedCall,
subs: vec![], }])]),
}]),
block_hash: block_hash.clone(), block_hash: block_hash.clone(),
block_number: block_number, block_number: block_number,
enacted: vec![block_hash], enacted: vec![block_hash],
@ -511,6 +512,7 @@ mod tests {
value: U256::from(3), value: U256::from(3),
gas: U256::from(4), gas: U256::from(4),
input: vec![], input: vec![],
call_type: CallType::Call,
}), }),
result: Res::FailedCall, result: Res::FailedCall,
trace_address: vec![], trace_address: vec![],
@ -526,6 +528,7 @@ mod tests {
#[test] #[test]
fn test_import() { fn test_import() {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let db = Arc::new(Database::open(&DatabaseConfig::with_columns(DB_NO_OF_COLUMNS), temp.as_str()).unwrap());
let mut config = Config::default(); let mut config = Config::default();
config.enabled = Switch::On; config.enabled = Switch::On;
let block_0 = H256::from(0xa1); let block_0 = H256::from(0xa1);
@ -539,11 +542,13 @@ mod tests {
extras.transaction_hashes.insert(0, vec![tx_0.clone()]); extras.transaction_hashes.insert(0, vec![tx_0.clone()]);
extras.transaction_hashes.insert(1, vec![tx_1.clone()]); extras.transaction_hashes.insert(1, vec![tx_1.clone()]);
let tracedb = TraceDB::new(config, temp.as_path(), Arc::new(extras)).unwrap(); let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)).unwrap();
// import block 0 // import block 0
let request = create_simple_import_request(0, block_0.clone()); let request = create_simple_import_request(0, block_0.clone());
tracedb.import(request); let batch = DBTransaction::new(&db);
tracedb.import(&batch, request);
db.write(batch).unwrap();
let filter = Filter { let filter = Filter {
range: (0..0), range: (0..0),
@ -557,7 +562,9 @@ mod tests {
// import block 1 // import block 1
let request = create_simple_import_request(1, block_1.clone()); let request = create_simple_import_request(1, block_1.clone());
tracedb.import(request); let batch = DBTransaction::new(&db);
tracedb.import(&batch, request);
db.write(batch).unwrap();
let filter = Filter { let filter = Filter {
range: (0..1), range: (0..1),

View File

@ -18,13 +18,53 @@
use util::{Bytes, Address, U256}; use util::{Bytes, Address, U256};
use action_params::ActionParams; use action_params::ActionParams;
use trace::trace::{Trace, Call, Create, Action, Res, CreateResult, CallResult, VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff, Suicide}; use trace::trace::{Call, Create, Action, Res, CreateResult, CallResult, VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff, Suicide};
use trace::{Tracer, VMTracer}; use trace::{Tracer, VMTracer, FlatTrace};
/// Simple executive tracer. Traces all calls and creates. Ignores delegatecalls. /// Simple executive tracer. Traces all calls and creates. Ignores delegatecalls.
#[derive(Default)] #[derive(Default)]
pub struct ExecutiveTracer { pub struct ExecutiveTracer {
traces: Vec<Trace>, traces: Vec<FlatTrace>,
}
fn top_level_subtraces(traces: &[FlatTrace]) -> usize {
traces.iter().filter(|t| t.trace_address.is_empty()).count()
}
fn update_trace_address(traces: Vec<FlatTrace>) -> Vec<FlatTrace> {
// input traces are expected to be ordered like
// []
// [0]
// [0, 0]
// [0, 1]
// []
// [0]
//
// so they can be transformed to
//
// [0]
// [0, 0]
// [0, 0, 0]
// [0, 0, 1]
// [1]
// [1, 0]
let mut top_subtrace_index = 0;
let mut subtrace_subtraces_left = 0;
traces.into_iter().map(|mut trace| {
let is_top_subtrace = trace.trace_address.is_empty();
trace.trace_address.push_front(top_subtrace_index);
if is_top_subtrace {
subtrace_subtraces_left = trace.subtraces;
} else {
subtrace_subtraces_left -= 1;
}
if subtrace_subtraces_left == 0 {
top_subtrace_index += 1;
}
trace
}).collect()
} }
impl Tracer for ExecutiveTracer { impl Tracer for ExecutiveTracer {
@ -40,74 +80,73 @@ impl Tracer for ExecutiveTracer {
Some(vec![]) Some(vec![])
} }
fn trace_call(&mut self, call: Option<Call>, gas_used: U256, output: Option<Bytes>, depth: usize, subs: Vec<Trace>, delegate_call: bool) { fn trace_call(&mut self, call: Option<Call>, gas_used: U256, output: Option<Bytes>, subs: Vec<FlatTrace>) {
// don't trace if it's DELEGATECALL or CALLCODE. let trace = FlatTrace {
if delegate_call { trace_address: Default::default(),
return; subtraces: top_level_subtraces(&subs),
}
let trace = Trace {
depth: depth,
subs: subs,
action: Action::Call(call.expect("self.prepare_trace_call().is_some(): so we must be tracing: qed")), action: Action::Call(call.expect("self.prepare_trace_call().is_some(): so we must be tracing: qed")),
result: Res::Call(CallResult { result: Res::Call(CallResult {
gas_used: gas_used, gas_used: gas_used,
output: output.expect("self.prepare_trace_output().is_some(): so we must be tracing: qed") output: output.expect("self.prepare_trace_output().is_some(): so we must be tracing: qed")
}) }),
}; };
debug!(target: "trace", "Traced call {:?}", trace);
self.traces.push(trace); self.traces.push(trace);
self.traces.extend(update_trace_address(subs));
} }
fn trace_create(&mut self, create: Option<Create>, gas_used: U256, code: Option<Bytes>, address: Address, depth: usize, subs: Vec<Trace>) { fn trace_create(&mut self, create: Option<Create>, gas_used: U256, code: Option<Bytes>, address: Address, subs: Vec<FlatTrace>) {
let trace = Trace { let trace = FlatTrace {
depth: depth, subtraces: top_level_subtraces(&subs),
subs: subs,
action: Action::Create(create.expect("self.prepare_trace_create().is_some(): so we must be tracing: qed")), action: Action::Create(create.expect("self.prepare_trace_create().is_some(): so we must be tracing: qed")),
result: Res::Create(CreateResult { result: Res::Create(CreateResult {
gas_used: gas_used, gas_used: gas_used,
code: code.expect("self.prepare_trace_output.is_some(): so we must be tracing: qed"), code: code.expect("self.prepare_trace_output.is_some(): so we must be tracing: qed"),
address: address address: address
}) }),
trace_address: Default::default(),
}; };
debug!(target: "trace", "Traced create {:?}", trace);
self.traces.push(trace); self.traces.push(trace);
self.traces.extend(update_trace_address(subs));
} }
fn trace_failed_call(&mut self, call: Option<Call>, depth: usize, subs: Vec<Trace>, delegate_call: bool) { fn trace_failed_call(&mut self, call: Option<Call>, subs: Vec<FlatTrace>) {
// don't trace if it's DELEGATECALL or CALLCODE. let trace = FlatTrace {
if delegate_call { trace_address: Default::default(),
return; subtraces: top_level_subtraces(&subs),
}
let trace = Trace {
depth: depth,
subs: subs,
action: Action::Call(call.expect("self.prepare_trace_call().is_some(): so we must be tracing: qed")), action: Action::Call(call.expect("self.prepare_trace_call().is_some(): so we must be tracing: qed")),
result: Res::FailedCall, result: Res::FailedCall,
}; };
debug!(target: "trace", "Traced failed call {:?}", trace);
self.traces.push(trace); self.traces.push(trace);
self.traces.extend(update_trace_address(subs));
} }
fn trace_failed_create(&mut self, create: Option<Create>, depth: usize, subs: Vec<Trace>) { fn trace_failed_create(&mut self, create: Option<Create>, subs: Vec<FlatTrace>) {
let trace = Trace { let trace = FlatTrace {
depth: depth, subtraces: top_level_subtraces(&subs),
subs: subs,
action: Action::Create(create.expect("self.prepare_trace_create().is_some(): so we must be tracing: qed")), action: Action::Create(create.expect("self.prepare_trace_create().is_some(): so we must be tracing: qed")),
result: Res::FailedCreate, result: Res::FailedCreate,
trace_address: Default::default(),
}; };
debug!(target: "trace", "Traced failed create {:?}", trace);
self.traces.push(trace); self.traces.push(trace);
self.traces.extend(update_trace_address(subs));
} }
fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address, depth: usize) { fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address) {
let trace = Trace { let trace = FlatTrace {
depth: depth, subtraces: 0,
subs: vec![],
action: Action::Suicide(Suicide { action: Action::Suicide(Suicide {
address: address, address: address,
refund_address: refund_address, refund_address: refund_address,
balance: balance, balance: balance,
}), }),
result: Res::None, result: Res::None,
trace_address: Default::default(),
}; };
debug!(target: "trace", "Traced failed suicide {:?}", trace);
self.traces.push(trace); self.traces.push(trace);
} }
@ -115,7 +154,7 @@ impl Tracer for ExecutiveTracer {
ExecutiveTracer::default() ExecutiveTracer::default()
} }
fn traces(self) -> Vec<Trace> { fn traces(self) -> Vec<FlatTrace> {
self.traces self.traces
} }
} }

View File

@ -17,12 +17,12 @@
//! Traces import request. //! Traces import request.
use util::H256; use util::H256;
use header::BlockNumber; use header::BlockNumber;
use trace::BlockTraces; use trace::FlatBlockTraces;
/// Traces import request. /// Traces import request.
pub struct ImportRequest { pub struct ImportRequest {
/// Traces to import. /// Traces to import.
pub traces: BlockTraces, pub traces: FlatBlockTraces,
/// Hash of traces block. /// Hash of traces block.
pub block_hash: H256, pub block_hash: H256,
/// Number of traces block. /// Number of traces block.

View File

@ -16,28 +16,26 @@
//! Tracing //! Tracing
mod block;
mod bloom; mod bloom;
mod config; mod config;
mod db; mod db;
mod error; mod error;
mod executive_tracer; mod executive_tracer;
pub mod flat;
mod import; mod import;
mod noop_tracer; mod noop_tracer;
pub use types::trace_types::*; pub use types::trace_types::*;
pub use self::block::BlockTraces;
pub use self::config::{Config, Switch}; pub use self::config::{Config, Switch};
pub use self::db::TraceDB; pub use self::db::TraceDB;
pub use self::error::Error; pub use self::error::Error;
pub use types::trace_types::trace::{Trace, VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff}; pub use types::trace_types::trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff};
pub use types::trace_types::flat::{FlatTrace, FlatTransactionTraces, FlatBlockTraces};
pub use self::noop_tracer::{NoopTracer, NoopVMTracer}; pub use self::noop_tracer::{NoopTracer, NoopVMTracer};
pub use self::executive_tracer::{ExecutiveTracer, ExecutiveVMTracer}; pub use self::executive_tracer::{ExecutiveTracer, ExecutiveVMTracer};
pub use types::trace_types::filter::{Filter, AddressesFilter}; pub use types::trace_types::filter::{Filter, AddressesFilter};
pub use self::import::ImportRequest; pub use self::import::ImportRequest;
pub use self::localized::LocalizedTrace; pub use self::localized::LocalizedTrace;
use util::{Bytes, Address, U256, H256}; use util::{Bytes, Address, U256, H256, DBTransaction};
use self::trace::{Call, Create}; use self::trace::{Call, Create};
use action_params::ActionParams; use action_params::ActionParams;
use header::BlockNumber; use header::BlockNumber;
@ -59,9 +57,7 @@ pub trait Tracer: Send {
call: Option<Call>, call: Option<Call>,
gas_used: U256, gas_used: U256,
output: Option<Bytes>, output: Option<Bytes>,
depth: usize, subs: Vec<FlatTrace>,
subs: Vec<Trace>,
delegate_call: bool
); );
/// Stores trace create info. /// Stores trace create info.
@ -71,24 +67,23 @@ pub trait Tracer: Send {
gas_used: U256, gas_used: U256,
code: Option<Bytes>, code: Option<Bytes>,
address: Address, address: Address,
depth: usize, subs: Vec<FlatTrace>
subs: Vec<Trace>
); );
/// Stores failed call trace. /// Stores failed call trace.
fn trace_failed_call(&mut self, call: Option<Call>, depth: usize, subs: Vec<Trace>, delegate_call: bool); fn trace_failed_call(&mut self, call: Option<Call>, subs: Vec<FlatTrace>);
/// Stores failed create trace. /// Stores failed create trace.
fn trace_failed_create(&mut self, create: Option<Create>, depth: usize, subs: Vec<Trace>); fn trace_failed_create(&mut self, create: Option<Create>, subs: Vec<FlatTrace>);
/// Stores suicide info. /// Stores suicide info.
fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address, depth: usize); fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address);
/// Spawn subtracer which will be used to trace deeper levels of execution. /// Spawn subtracer which will be used to trace deeper levels of execution.
fn subtracer(&self) -> Self where Self: Sized; fn subtracer(&self) -> Self where Self: Sized;
/// Consumes self and returns all traces. /// Consumes self and returns all traces.
fn traces(self) -> Vec<Trace>; fn traces(self) -> Vec<FlatTrace>;
} }
/// Used by executive to build VM traces. /// Used by executive to build VM traces.
@ -126,7 +121,7 @@ pub trait Database {
fn tracing_enabled(&self) -> bool; fn tracing_enabled(&self) -> bool;
/// Imports new block traces. /// Imports new block traces.
fn import(&self, request: ImportRequest); fn import(&self, batch: &DBTransaction, request: ImportRequest);
/// Returns localized trace at given position. /// Returns localized trace at given position.
fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec<usize>) -> Option<LocalizedTrace>; fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec<usize>) -> Option<LocalizedTrace>;

View File

@ -18,8 +18,8 @@
use util::{Bytes, Address, U256}; use util::{Bytes, Address, U256};
use action_params::ActionParams; use action_params::ActionParams;
use trace::{Tracer, VMTracer}; use trace::{Tracer, VMTracer, FlatTrace};
use trace::trace::{Trace, Call, Create, VMTrace}; use trace::trace::{Call, Create, VMTrace};
/// Nonoperative tracer. Does not trace anything. /// Nonoperative tracer. Does not trace anything.
pub struct NoopTracer; pub struct NoopTracer;
@ -37,32 +37,32 @@ impl Tracer for NoopTracer {
None None
} }
fn trace_call(&mut self, call: Option<Call>, _: U256, output: Option<Bytes>, _: usize, _: Vec<Trace>, _: bool) { fn trace_call(&mut self, call: Option<Call>, _: U256, output: Option<Bytes>, _: Vec<FlatTrace>) {
assert!(call.is_none(), "self.prepare_trace_call().is_none(): so we can't be tracing: qed"); assert!(call.is_none(), "self.prepare_trace_call().is_none(): so we can't be tracing: qed");
assert!(output.is_none(), "self.prepare_trace_output().is_none(): so we can't be tracing: qed"); assert!(output.is_none(), "self.prepare_trace_output().is_none(): so we can't be tracing: qed");
} }
fn trace_create(&mut self, create: Option<Create>, _: U256, code: Option<Bytes>, _: Address, _: usize, _: Vec<Trace>) { fn trace_create(&mut self, create: Option<Create>, _: U256, code: Option<Bytes>, _: Address, _: Vec<FlatTrace>) {
assert!(create.is_none(), "self.prepare_trace_create().is_none(): so we can't be tracing: qed"); assert!(create.is_none(), "self.prepare_trace_create().is_none(): so we can't be tracing: qed");
assert!(code.is_none(), "self.prepare_trace_output().is_none(): so we can't be tracing: qed"); assert!(code.is_none(), "self.prepare_trace_output().is_none(): so we can't be tracing: qed");
} }
fn trace_failed_call(&mut self, call: Option<Call>, _: usize, _: Vec<Trace>, _: bool) { fn trace_failed_call(&mut self, call: Option<Call>, _: Vec<FlatTrace>) {
assert!(call.is_none(), "self.prepare_trace_call().is_none(): so we can't be tracing: qed"); assert!(call.is_none(), "self.prepare_trace_call().is_none(): so we can't be tracing: qed");
} }
fn trace_failed_create(&mut self, create: Option<Create>, _: usize, _: Vec<Trace>) { fn trace_failed_create(&mut self, create: Option<Create>, _: Vec<FlatTrace>) {
assert!(create.is_none(), "self.prepare_trace_create().is_none(): so we can't be tracing: qed"); assert!(create.is_none(), "self.prepare_trace_create().is_none(): so we can't be tracing: qed");
} }
fn trace_suicide(&mut self, _address: Address, _balance: U256, _refund_address: Address, _depth: usize) { fn trace_suicide(&mut self, _address: Address, _balance: U256, _refund_address: Address) {
} }
fn subtracer(&self) -> Self { fn subtracer(&self) -> Self {
NoopTracer NoopTracer
} }
fn traces(self) -> Vec<Trace> { fn traces(self) -> Vec<FlatTrace> {
vec![] vec![]
} }
} }

View File

@ -18,7 +18,8 @@
use util::numbers::*; use util::numbers::*;
use util::Bytes; use util::Bytes;
use trace::{Trace, VMTrace}; use util::rlp::*;
use trace::{VMTrace, FlatTrace};
use types::log_entry::LogEntry; use types::log_entry::LogEntry;
use types::state_diff::StateDiff; use types::state_diff::StateDiff;
use ipc::binary::BinaryConvertError; use ipc::binary::BinaryConvertError;
@ -26,6 +27,43 @@ use std::fmt;
use std::mem; use std::mem;
use std::collections::VecDeque; use std::collections::VecDeque;
/// The type of the call-like instruction.
#[derive(Debug, PartialEq, Clone, Binary)]
pub enum CallType {
/// Not a CALL.
None,
/// CALL.
Call,
/// CALLCODE.
CallCode,
/// DELEGATECALL.
DelegateCall,
}
impl Encodable for CallType {
fn rlp_append(&self, s: &mut RlpStream) {
let v = match *self {
CallType::None => 0u32,
CallType::Call => 1,
CallType::CallCode => 2,
CallType::DelegateCall => 3,
};
s.append(&v);
}
}
impl Decodable for CallType {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
decoder.as_rlp().as_val().and_then(|v| Ok(match v {
0u32 => CallType::None,
1 => CallType::Call,
2 => CallType::CallCode,
3 => CallType::DelegateCall,
_ => return Err(DecoderError::Custom("Invalid value of CallType item")),
}))
}
}
/// Transaction execution receipt. /// Transaction execution receipt.
#[derive(Debug, PartialEq, Clone, Binary)] #[derive(Debug, PartialEq, Clone, Binary)]
pub struct Executed { pub struct Executed {
@ -59,7 +97,7 @@ pub struct Executed {
/// Transaction output. /// Transaction output.
pub output: Bytes, pub output: Bytes,
/// The trace of this transaction. /// The trace of this transaction.
pub trace: Option<Trace>, pub trace: Vec<FlatTrace>,
/// The VM trace of this transaction. /// The VM trace of this transaction.
pub vm_trace: Option<VMTrace>, pub vm_trace: Option<VMTrace>,
/// The state diff, if we traced it. /// The state diff, if we traced it.
@ -133,5 +171,39 @@ impl fmt::Display for ExecutionError {
} }
} }
/// Result of executing the transaction.
#[derive(PartialEq, Debug, Binary)]
pub enum ReplayError {
/// Couldn't find the transaction in the chain.
TransactionNotFound,
/// Couldn't find the transaction block's state in the chain.
StatePruned,
/// Error executing.
Execution(ExecutionError),
}
impl fmt::Display for ReplayError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::ReplayError::*;
let msg = match *self {
TransactionNotFound => "Transaction couldn't be found in the chain".into(),
StatePruned => "Couldn't find the transaction block's state in the chain".into(),
Execution(ref e) => format!("{}", e),
};
f.write_fmt(format_args!("Transaction replay error ({}).", msg))
}
}
/// Transaction execution result. /// Transaction execution result.
pub type ExecutionResult = Result<Executed, ExecutionError>; pub type ExecutionResult = Result<Executed, ExecutionError>;
#[test]
fn should_encode_and_decode_call_type() {
use util::rlp;
let original = CallType::Call;
let encoded = rlp::encode(&original);
let decoded = rlp::decode(&encoded);
assert_eq!(original, decoded);
}

View File

@ -143,6 +143,7 @@ mod tests {
use trace::flat::FlatTrace; use trace::flat::FlatTrace;
use trace::{Filter, AddressesFilter}; use trace::{Filter, AddressesFilter};
use basic_types::LogBloom; use basic_types::LogBloom;
use types::executed::CallType;
#[test] #[test]
fn empty_trace_filter_bloom_possibilities() { fn empty_trace_filter_bloom_possibilities() {
@ -285,9 +286,10 @@ mod tests {
value: 3.into(), value: 3.into(),
gas: 4.into(), gas: 4.into(),
input: vec![0x5], input: vec![0x5],
call_type: CallType::Call,
}), }),
result: Res::FailedCall, result: Res::FailedCall,
trace_address: vec![0], trace_address: vec![0].into_iter().collect(),
subtraces: 0, subtraces: 0,
}; };
@ -311,7 +313,7 @@ mod tests {
code: vec![], code: vec![],
address: 2.into(), address: 2.into(),
}), }),
trace_address: vec![0], trace_address: vec![0].into_iter().collect(),
subtraces: 0, subtraces: 0,
}; };
@ -330,7 +332,7 @@ mod tests {
balance: 3.into(), balance: 3.into(),
}), }),
result: Res::None, result: Res::None,
trace_address: vec![], trace_address: vec![].into_iter().collect(),
subtraces: 0 subtraces: 0
}; };

View File

@ -16,15 +16,17 @@
//! Flat trace module //! Flat trace module
use std::collections::VecDeque;
use std::mem;
use ipc::binary::BinaryConvertError;
use util::rlp::*; use util::rlp::*;
use trace::BlockTraces;
use basic_types::LogBloom; use basic_types::LogBloom;
use super::trace::{Trace, Action, Res}; use super::trace::{Action, Res};
/// Trace localized in vector of traces produced by a single transaction. /// Trace localized in vector of traces produced by a single transaction.
/// ///
/// Parent and children indexes refer to positions in this vector. /// Parent and children indexes refer to positions in this vector.
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone, Binary)]
pub struct FlatTrace { pub struct FlatTrace {
/// Type of action performed by a transaction. /// Type of action performed by a transaction.
pub action: Action, pub action: Action,
@ -35,7 +37,7 @@ pub struct FlatTrace {
/// Exact location of trace. /// Exact location of trace.
/// ///
/// [index in root, index in first CALL, index in second CALL, ...] /// [index in root, index in first CALL, index in second CALL, ...]
pub trace_address: Vec<usize>, pub trace_address: VecDeque<usize>,
} }
impl FlatTrace { impl FlatTrace {
@ -51,18 +53,19 @@ impl Encodable for FlatTrace {
s.append(&self.action); s.append(&self.action);
s.append(&self.result); s.append(&self.result);
s.append(&self.subtraces); s.append(&self.subtraces);
s.append(&self.trace_address); s.append(&self.trace_address.clone().into_iter().collect::<Vec<_>>());
} }
} }
impl Decodable for FlatTrace { impl Decodable for FlatTrace {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder { fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
let d = decoder.as_rlp(); let d = decoder.as_rlp();
let v: Vec<usize> = try!(d.val_at(3));
let res = FlatTrace { let res = FlatTrace {
action: try!(d.val_at(0)), action: try!(d.val_at(0)),
result: try!(d.val_at(1)), result: try!(d.val_at(1)),
subtraces: try!(d.val_at(2)), subtraces: try!(d.val_at(2)),
trace_address: try!(d.val_at(3)), trace_address: v.into_iter().collect(),
}; };
Ok(res) Ok(res)
@ -73,6 +76,12 @@ impl Decodable for FlatTrace {
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct FlatTransactionTraces(Vec<FlatTrace>); pub struct FlatTransactionTraces(Vec<FlatTrace>);
impl From<Vec<FlatTrace>> for FlatTransactionTraces {
fn from(v: Vec<FlatTrace>) -> Self {
FlatTransactionTraces(v)
}
}
impl FlatTransactionTraces { impl FlatTransactionTraces {
/// Returns bloom of all traces in the collection. /// Returns bloom of all traces in the collection.
pub fn bloom(&self) -> LogBloom { pub fn bloom(&self) -> LogBloom {
@ -102,6 +111,12 @@ impl Into<Vec<FlatTrace>> for FlatTransactionTraces {
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct FlatBlockTraces(Vec<FlatTransactionTraces>); pub struct FlatBlockTraces(Vec<FlatTransactionTraces>);
impl From<Vec<FlatTransactionTraces>> for FlatBlockTraces {
fn from(v: Vec<FlatTransactionTraces>) -> Self {
FlatBlockTraces(v)
}
}
impl FlatBlockTraces { impl FlatBlockTraces {
/// Returns bloom of all traces in the block. /// Returns bloom of all traces in the block.
pub fn bloom(&self) -> LogBloom { pub fn bloom(&self) -> LogBloom {
@ -121,139 +136,17 @@ impl Decodable for FlatBlockTraces {
} }
} }
impl From<BlockTraces> for FlatBlockTraces {
fn from(block_traces: BlockTraces) -> Self {
let traces: Vec<Trace> = block_traces.into();
let ordered = traces.into_iter()
.map(|trace| FlatBlockTraces::flatten(vec![], trace))
.map(FlatTransactionTraces)
.collect();
FlatBlockTraces(ordered)
}
}
impl Into<Vec<FlatTransactionTraces>> for FlatBlockTraces { impl Into<Vec<FlatTransactionTraces>> for FlatBlockTraces {
fn into(self) -> Vec<FlatTransactionTraces> { fn into(self) -> Vec<FlatTransactionTraces> {
self.0 self.0
} }
} }
impl FlatBlockTraces {
/// Helper function flattening nested tree structure to vector of ordered traces.
fn flatten(address: Vec<usize>, trace: Trace) -> Vec<FlatTrace> {
let subtraces = trace.subs.len();
let all_subs = trace.subs
.into_iter()
.enumerate()
.flat_map(|(index, subtrace)| {
let mut subtrace_address = address.clone();
subtrace_address.push(index);
FlatBlockTraces::flatten(subtrace_address, subtrace)
})
.collect::<Vec<_>>();
let ordered = FlatTrace {
action: trace.action,
result: trace.result,
subtraces: subtraces,
trace_address: address,
};
let mut result = vec![ordered];
result.extend(all_subs);
result
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{FlatBlockTraces, FlatTransactionTraces, FlatTrace}; use super::{FlatBlockTraces, FlatTransactionTraces, FlatTrace};
use util::{U256, Address}; use trace::trace::{Action, Res, CallResult, Call};
use trace::trace::{Action, Res, CallResult, Call, Create, Trace}; use types::executed::CallType;
use trace::BlockTraces;
#[test]
fn test_block_from() {
let trace = Trace {
depth: 2,
action: Action::Call(Call {
from: Address::from(1),
to: Address::from(2),
value: U256::from(3),
gas: U256::from(4),
input: vec![0x5]
}),
subs: vec![
Trace {
depth: 3,
action: Action::Create(Create {
from: Address::from(6),
value: U256::from(7),
gas: U256::from(8),
init: vec![0x9]
}),
subs: vec![
Trace {
depth: 3,
action: Action::Create(Create {
from: Address::from(6),
value: U256::from(7),
gas: U256::from(8),
init: vec![0x9]
}),
subs: vec![
],
result: Res::FailedCreate
},
Trace {
depth: 3,
action: Action::Create(Create {
from: Address::from(6),
value: U256::from(7),
gas: U256::from(8),
init: vec![0x9]
}),
subs: vec![
],
result: Res::FailedCreate
}
],
result: Res::FailedCreate
},
Trace {
depth: 3,
action: Action::Create(Create {
from: Address::from(6),
value: U256::from(7),
gas: U256::from(8),
init: vec![0x9]
}),
subs: vec![],
result: Res::FailedCreate,
}
],
result: Res::Call(CallResult {
gas_used: U256::from(10),
output: vec![0x11, 0x12]
})
};
let block_traces = FlatBlockTraces::from(BlockTraces::from(vec![trace]));
let transaction_traces: Vec<FlatTransactionTraces> = block_traces.into();
assert_eq!(transaction_traces.len(), 1);
let ordered_traces: Vec<FlatTrace> = transaction_traces.into_iter().nth(0).unwrap().into();
assert_eq!(ordered_traces.len(), 5);
assert_eq!(ordered_traces[0].trace_address, vec![]);
assert_eq!(ordered_traces[0].subtraces, 2);
assert_eq!(ordered_traces[1].trace_address, vec![0]);
assert_eq!(ordered_traces[1].subtraces, 2);
assert_eq!(ordered_traces[2].trace_address, vec![0, 0]);
assert_eq!(ordered_traces[2].subtraces, 0);
assert_eq!(ordered_traces[3].trace_address, vec![0, 1]);
assert_eq!(ordered_traces[3].subtraces, 0);
assert_eq!(ordered_traces[4].trace_address, vec![1]);
assert_eq!(ordered_traces[4].subtraces, 0);
}
#[test] #[test]
fn test_trace_serialization() { fn test_trace_serialization() {
@ -265,13 +158,14 @@ mod tests {
to: 2.into(), to: 2.into(),
value: 3.into(), value: 3.into(),
gas: 4.into(), gas: 4.into(),
input: vec![0x5] input: vec![0x5],
call_type: CallType::Call,
}), }),
result: Res::Call(CallResult { result: Res::Call(CallResult {
gas_used: 10.into(), gas_used: 10.into(),
output: vec![0x11, 0x12] output: vec![0x11, 0x12]
}), }),
trace_address: Vec::new(), trace_address: Default::default(),
subtraces: 0, subtraces: 0,
}; };

View File

@ -17,5 +17,6 @@
//! Types used in the public api //! Types used in the public api
pub mod filter; pub mod filter;
pub mod flat;
pub mod trace; pub mod trace;
pub mod localized; pub mod localized;

View File

@ -21,6 +21,7 @@ use util::rlp::*;
use util::sha3::Hashable; use util::sha3::Hashable;
use action_params::ActionParams; use action_params::ActionParams;
use basic_types::LogBloom; use basic_types::LogBloom;
use types::executed::CallType;
use ipc::binary::BinaryConvertError; use ipc::binary::BinaryConvertError;
use std::mem; use std::mem;
use std::collections::VecDeque; use std::collections::VecDeque;
@ -107,6 +108,8 @@ pub struct Call {
pub gas: U256, pub gas: U256,
/// The input data provided to the call. /// The input data provided to the call.
pub input: Bytes, pub input: Bytes,
/// The type of the call.
pub call_type: CallType,
} }
impl From<ActionParams> for Call { impl From<ActionParams> for Call {
@ -117,18 +120,20 @@ impl From<ActionParams> for Call {
value: p.value.value(), value: p.value.value(),
gas: p.gas, gas: p.gas,
input: p.data.unwrap_or_else(Vec::new), input: p.data.unwrap_or_else(Vec::new),
call_type: p.call_type,
} }
} }
} }
impl Encodable for Call { impl Encodable for Call {
fn rlp_append(&self, s: &mut RlpStream) { fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(5); s.begin_list(6);
s.append(&self.from); s.append(&self.from);
s.append(&self.to); s.append(&self.to);
s.append(&self.value); s.append(&self.value);
s.append(&self.gas); s.append(&self.gas);
s.append(&self.input); s.append(&self.input);
s.append(&self.call_type);
} }
} }
@ -141,6 +146,7 @@ impl Decodable for Call {
value: try!(d.val_at(2)), value: try!(d.val_at(2)),
gas: try!(d.val_at(3)), gas: try!(d.val_at(3)),
input: try!(d.val_at(4)), input: try!(d.val_at(4)),
call_type: try!(d.val_at(5)),
}; };
Ok(res) Ok(res)
@ -378,51 +384,6 @@ impl Res {
} }
} }
#[derive(Debug, Clone, PartialEq, Binary)]
/// A trace; includes a description of the action being traced and sub traces of each interior action.
pub struct Trace {
/// The number of EVM execution environments active when this action happened; 0 if it's
/// the outer action of the transaction.
pub depth: usize,
/// The action being performed.
pub action: Action,
/// The sub traces for each interior action performed as part of this call.
pub subs: Vec<Trace>,
/// The result of the performed action.
pub result: Res,
}
impl Encodable for Trace {
fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(4);
s.append(&self.depth);
s.append(&self.action);
s.append(&self.subs);
s.append(&self.result);
}
}
impl Decodable for Trace {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
let d = decoder.as_rlp();
let res = Trace {
depth: try!(d.val_at(0)),
action: try!(d.val_at(1)),
subs: try!(d.val_at(2)),
result: try!(d.val_at(3)),
};
Ok(res)
}
}
impl Trace {
/// Returns trace bloom.
pub fn bloom(&self) -> LogBloom {
self.subs.iter().fold(self.action.bloom() | self.result.bloom(), |b, s| b | s.bloom())
}
}
#[derive(Debug, Clone, PartialEq, Binary)] #[derive(Debug, Clone, PartialEq, Binary)]
/// A diff of some chunk of memory. /// A diff of some chunk of memory.
pub struct MemoryDiff { pub struct MemoryDiff {
@ -587,102 +548,3 @@ impl Decodable for VMTrace {
} }
} }
#[cfg(test)]
mod tests {
use util::{Address, U256, FixedHash};
use util::rlp::{encode, decode};
use util::sha3::Hashable;
use trace::trace::{Call, CallResult, Create, Res, Action, Trace, Suicide, CreateResult};
#[test]
fn traces_rlp() {
let trace = Trace {
depth: 2,
action: Action::Call(Call {
from: Address::from(1),
to: Address::from(2),
value: U256::from(3),
gas: U256::from(4),
input: vec![0x5]
}),
subs: vec![
Trace {
depth: 3,
action: Action::Create(Create {
from: Address::from(6),
value: U256::from(7),
gas: U256::from(8),
init: vec![0x9]
}),
subs: vec![],
result: Res::FailedCreate
}
],
result: Res::Call(CallResult {
gas_used: U256::from(10),
output: vec![0x11, 0x12]
})
};
let encoded = encode(&trace);
let decoded: Trace = decode(&encoded);
assert_eq!(trace, decoded);
}
#[test]
fn traces_bloom() {
let trace = Trace {
depth: 2,
action: Action::Call(Call {
from: Address::from(1),
to: Address::from(2),
value: U256::from(3),
gas: U256::from(4),
input: vec![0x5]
}),
subs: vec![
Trace {
depth: 3,
action: Action::Create(Create {
from: Address::from(6),
value: U256::from(7),
gas: U256::from(8),
init: vec![0x9]
}),
subs: vec![],
result: Res::Create(CreateResult {
gas_used: 10.into(),
code: vec![],
address: 15.into(),
}),
},
Trace {
depth: 3,
action: Action::Suicide(Suicide {
address: 101.into(),
refund_address: 102.into(),
balance: 0.into(),
}),
subs: vec![],
result: Res::None,
}
],
result: Res::Call(CallResult {
gas_used: U256::from(10),
output: vec![0x11, 0x12]
})
};
let bloom = trace.bloom();
// right now only addresses are bloomed
assert!(bloom.contains_bloomed(&Address::from(1).sha3()));
assert!(bloom.contains_bloomed(&Address::from(2).sha3()));
assert!(!bloom.contains_bloomed(&Address::from(20).sha3()));
assert!(bloom.contains_bloomed(&Address::from(6).sha3()));
assert!(bloom.contains_bloomed(&Address::from(15).sha3()));
assert!(bloom.contains_bloomed(&Address::from(101).sha3()));
assert!(bloom.contains_bloomed(&Address::from(102).sha3()));
assert!(!bloom.contains_bloomed(&Address::from(103).sha3()));
}
}

View File

@ -91,7 +91,7 @@ impl Transaction {
impl From<ethjson::state::Transaction> for SignedTransaction { impl From<ethjson::state::Transaction> for SignedTransaction {
fn from(t: ethjson::state::Transaction) -> Self { fn from(t: ethjson::state::Transaction) -> Self {
let to: Option<_> = t.to.into(); let to: Option<ethjson::hash::Address> = t.to.into();
Transaction { Transaction {
nonce: t.nonce.into(), nonce: t.nonce.into(),
gas_price: t.gas_price.into(), gas_price: t.gas_price.into(),
@ -108,7 +108,7 @@ impl From<ethjson::state::Transaction> for SignedTransaction {
impl From<ethjson::transaction::Transaction> for SignedTransaction { impl From<ethjson::transaction::Transaction> for SignedTransaction {
fn from(t: ethjson::transaction::Transaction) -> Self { fn from(t: ethjson::transaction::Transaction) -> Self {
let to: Option<_> = t.to.into(); let to: Option<ethjson::hash::Address> = t.to.into();
SignedTransaction { SignedTransaction {
unsigned: Transaction { unsigned: Transaction {
nonce: t.nonce.into(), nonce: t.nonce.into(),

View File

@ -15,7 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use blockchain::BlockProvider; use blockchain::BlockProvider;
use engine::Engine; use engines::Engine;
use error::Error; use error::Error;
use header::Header; use header::Header;
use super::Verifier; use super::Verifier;

View File

@ -25,7 +25,7 @@ pub use self::canon_verifier::CanonVerifier;
pub use self::noop_verifier::NoopVerifier; pub use self::noop_verifier::NoopVerifier;
/// Verifier type. /// Verifier type.
#[derive(Debug)] #[derive(Debug, PartialEq)]
pub enum VerifierType { pub enum VerifierType {
/// Verifies block normally. /// Verifies block normally.
Canon, Canon,

View File

@ -15,7 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use blockchain::BlockProvider; use blockchain::BlockProvider;
use engine::Engine; use engines::Engine;
use error::Error; use error::Error;
use header::Header; use header::Header;
use super::Verifier; use super::Verifier;

View File

@ -22,7 +22,7 @@
/// 3. Final verification against the blockchain done before enactment. /// 3. Final verification against the blockchain done before enactment.
use common::*; use common::*;
use engine::Engine; use engines::Engine;
use blockchain::*; use blockchain::*;
/// Preprocessed block data gathered in `verify_block_unordered` call /// Preprocessed block data gathered in `verify_block_unordered` call
@ -233,7 +233,7 @@ mod tests {
use error::BlockError::*; use error::BlockError::*;
use views::*; use views::*;
use blockchain::*; use blockchain::*;
use engine::*; use engines::Engine;
use spec::*; use spec::*;
use transaction::*; use transaction::*;
use tests::helpers::*; use tests::helpers::*;
@ -287,6 +287,14 @@ mod tests {
self.blocks.get(hash).cloned() self.blocks.get(hash).cloned()
} }
fn block_header_data(&self, hash: &H256) -> Option<Bytes> {
self.block(hash).map(|b| BlockView::new(&b).header_rlp().as_raw().to_vec())
}
fn block_body(&self, hash: &H256) -> Option<Bytes> {
self.block(hash).map(|b| BlockChain::block_to_body(&b))
}
/// Get the familial details concerning a block. /// Get the familial details concerning a block.
fn block_details(&self, hash: &H256) -> Option<BlockDetails> { fn block_details(&self, hash: &H256) -> Option<BlockDetails> {
self.blocks.get(hash).map(|bytes| { self.blocks.get(hash).map(|bytes| {
@ -350,7 +358,7 @@ mod tests {
gas: U256::from(30_000), gas: U256::from(30_000),
gas_price: U256::from(40_000), gas_price: U256::from(40_000),
nonce: U256::one() nonce: U256::one()
}.sign(&keypair.secret()); }.sign(keypair.secret());
let tr2 = Transaction { let tr2 = Transaction {
action: Action::Create, action: Action::Create,
@ -359,7 +367,7 @@ mod tests {
gas: U256::from(30_000), gas: U256::from(30_000),
gas_price: U256::from(40_000), gas_price: U256::from(40_000),
nonce: U256::from(2) nonce: U256::from(2)
}.sign(&keypair.secret()); }.sign(keypair.secret());
let good_transactions = [ tr1.clone(), tr2.clone() ]; let good_transactions = [ tr1.clone(), tr2.clone() ];

View File

@ -15,7 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use blockchain::BlockProvider; use blockchain::BlockProvider;
use engine::Engine; use engines::Engine;
use error::Error; use error::Error;
use header::Header; use header::Header;

View File

@ -56,6 +56,11 @@ impl<'a> BlockView<'a> {
self.rlp.val_at(0) self.rlp.val_at(0)
} }
/// Return header rlp.
pub fn header_rlp(&self) -> Rlp {
self.rlp.at(0)
}
/// Create new header view obto block head rlp. /// Create new header view obto block head rlp.
pub fn header_view(&self) -> HeaderView<'a> { pub fn header_view(&self) -> HeaderView<'a> {
HeaderView::new_from_rlp(self.rlp.at(0)) HeaderView::new_from_rlp(self.rlp.at(0))

144
ethcore/src/views/body.rs Normal file
View File

@ -0,0 +1,144 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! View onto block body rlp.
use util::*;
use header::*;
use transaction::*;
use super::{TransactionView, HeaderView};
/// View onto block rlp.
pub struct BodyView<'a> {
rlp: Rlp<'a>
}
impl<'a> BodyView<'a> {
/// Creates new view onto block from raw bytes.
pub fn new(bytes: &'a [u8]) -> BodyView<'a> {
BodyView {
rlp: Rlp::new(bytes)
}
}
/// Creates new view onto block from rlp.
pub fn new_from_rlp(rlp: Rlp<'a>) -> BodyView<'a> {
BodyView {
rlp: rlp
}
}
/// Return reference to underlaying rlp.
pub fn rlp(&self) -> &Rlp<'a> {
&self.rlp
}
/// Return List of transactions in given block.
pub fn transactions(&self) -> Vec<SignedTransaction> {
self.rlp.val_at(0)
}
/// Return List of transactions with additional localization info.
pub fn localized_transactions(&self, block_hash: &H256, block_number: BlockNumber) -> Vec<LocalizedTransaction> {
self.transactions()
.into_iter()
.enumerate()
.map(|(i, t)| LocalizedTransaction {
signed: t,
block_hash: block_hash.clone(),
block_number: block_number,
transaction_index: i
}).collect()
}
/// Return number of transactions in given block, without deserializing them.
pub fn transactions_count(&self) -> usize {
self.rlp.at(0).item_count()
}
/// Return List of transactions in given block.
pub fn transaction_views(&self) -> Vec<TransactionView> {
self.rlp.at(0).iter().map(TransactionView::new_from_rlp).collect()
}
/// Return transaction hashes.
pub fn transaction_hashes(&self) -> Vec<H256> {
self.rlp.at(0).iter().map(|rlp| rlp.as_raw().sha3()).collect()
}
/// Returns transaction at given index without deserializing unnecessary data.
pub fn transaction_at(&self, index: usize) -> Option<SignedTransaction> {
self.rlp.at(0).iter().nth(index).map(|rlp| rlp.as_val())
}
/// Returns localized transaction at given index.
pub fn localized_transaction_at(&self, block_hash: &H256, block_number: BlockNumber, index: usize) -> Option<LocalizedTransaction> {
self.transaction_at(index).map(|t| LocalizedTransaction {
signed: t,
block_hash: block_hash.clone(),
block_number: block_number,
transaction_index: index
})
}
/// Return list of uncles of given block.
pub fn uncles(&self) -> Vec<Header> {
self.rlp.val_at(1)
}
/// Return number of uncles in given block, without deserializing them.
pub fn uncles_count(&self) -> usize {
self.rlp.at(1).item_count()
}
/// Return List of transactions in given block.
pub fn uncle_views(&self) -> Vec<HeaderView> {
self.rlp.at(1).iter().map(HeaderView::new_from_rlp).collect()
}
/// Return list of uncle hashes of given block.
pub fn uncle_hashes(&self) -> Vec<H256> {
self.rlp.at(1).iter().map(|rlp| rlp.as_raw().sha3()).collect()
}
/// Return nth uncle.
pub fn uncle_at(&self, index: usize) -> Option<Header> {
self.rlp.at(1).iter().nth(index).map(|rlp| rlp.as_val())
}
/// Return nth uncle rlp.
pub fn uncle_rlp_at(&self, index: usize) -> Option<Bytes> {
self.rlp.at(1).iter().nth(index).map(|rlp| rlp.as_raw().to_vec())
}
}
#[cfg(test)]
mod tests {
use util::*;
use super::BodyView;
use blockchain::BlockChain;
#[test]
fn test_block_view() {
// that's rlp of block created with ethash engine.
let rlp = "f90261f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23f862f86002018304cb2f94ec0e71ad0a90ffe1909d27dac207f7680abba42d01801ba03a347e72953c860f32b1eb2c78a680d8734b2ea08085d949d729479796f218d5a047ea6239d9e31ccac8af3366f5ca37184d26e7646e3191a3aeb81c4cf74de500c0".from_hex().unwrap();
let body = BlockChain::block_to_body(&rlp);
let view = BodyView::new(&body);
assert_eq!(view.transactions_count(), 1);
assert_eq!(view.uncles_count(), 0);
}
}

View File

@ -19,7 +19,9 @@
mod block; mod block;
mod header; mod header;
mod transaction; mod transaction;
mod body;
pub use self::block::BlockView; pub use self::block::BlockView;
pub use self::header::HeaderView; pub use self::header::HeaderView;
pub use self::body::BodyView;
pub use self::transaction::TransactionView; pub use self::transaction::TransactionView;

View File

@ -14,16 +14,18 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::{fs, ffi, io}; use std::{fs, io};
use std::path::{PathBuf, Path}; use std::path::{PathBuf, Path};
use std::collections::HashMap; use std::collections::HashMap;
use time; use time;
use ethkey::Address; use ethkey::Address;
use {libc, json, SafeAccount, Error}; use {json, SafeAccount, Error};
use super::KeyDirectory; use super::KeyDirectory;
#[cfg(not(windows))] #[cfg(not(windows))]
fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> { fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> {
use std::ffi;
use libc;
let cstr = ffi::CString::new(file_path.to_str().unwrap()).unwrap(); let cstr = ffi::CString::new(file_path.to_str().unwrap()).unwrap();
match unsafe { libc::chmod(cstr.as_ptr(), libc::S_IWUSR | libc::S_IRUSR) } { match unsafe { libc::chmod(cstr.as_ptr(), libc::S_IWUSR | libc::S_IRUSR) } {
0 => Ok(()), 0 => Ok(()),
@ -32,7 +34,7 @@ fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> {
} }
#[cfg(windows)] #[cfg(windows)]
fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> { fn restrict_permissions_to_owner(_file_path: &Path) -> Result<(), i32> {
Ok(()) Ok(())
} }
@ -71,13 +73,14 @@ impl DiskDirectory {
let files = try!(files); let files = try!(files);
let accounts = files.into_iter() files.into_iter()
.map(json::KeyFile::load) .map(json::KeyFile::load)
.zip(paths.into_iter()) .zip(paths.into_iter())
.filter_map(|(file, path)| file.ok().map(|file| (path.clone(), SafeAccount::from_file(file, path)))) .map(|(file, path)| match file {
.collect(); Ok(file) => Ok((path, file.into())),
Err(err) => Err(Error::InvalidKeyFile(format!("{:?}: {}", path, err))),
Ok(accounts) })
.collect()
} }
} }

View File

@ -24,6 +24,7 @@ pub enum Error {
InvalidPassword, InvalidPassword,
InvalidSecret, InvalidSecret,
InvalidAccount, InvalidAccount,
InvalidKeyFile(String),
CreationFailed, CreationFailed,
EthKey(EthKeyError), EthKey(EthKeyError),
Custom(String), Custom(String),
@ -32,12 +33,13 @@ pub enum Error {
impl fmt::Display for Error { impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let s = match *self { let s = match *self {
Error::Io(ref err) => format!("{}", err), Error::Io(ref err) => err.to_string(),
Error::InvalidPassword => "Invalid password".into(), Error::InvalidPassword => "Invalid password".into(),
Error::InvalidSecret => "Invalid secret".into(), Error::InvalidSecret => "Invalid secret".into(),
Error::InvalidAccount => "Invalid account".into(), Error::InvalidAccount => "Invalid account".into(),
Error::InvalidKeyFile(ref reason) => format!("Invalid key file: {}", reason),
Error::CreationFailed => "Account creation failed".into(), Error::CreationFailed => "Account creation failed".into(),
Error::EthKey(ref err) => format!("{}", err), Error::EthKey(ref err) => err.to_string(),
Error::Custom(ref s) => s.clone(), Error::Custom(ref s) => s.clone(),
}; };

25
evmbin/Cargo.lock generated
View File

@ -159,6 +159,7 @@ name = "ethash"
version = "1.3.0" version = "1.3.0"
dependencies = [ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"sha3 0.1.0", "sha3 0.1.0",
] ]
@ -250,8 +251,9 @@ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)", "mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)",
"nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)", "rocksdb 0.4.5",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
@ -301,6 +303,7 @@ dependencies = [
"serde_codegen 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde_codegen 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
"syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -622,6 +625,17 @@ name = "odds"
version = "0.2.12" version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "parking_lot"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "primal" name = "primal"
version = "0.2.3" version = "0.2.3"
@ -724,16 +738,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "rocksdb" name = "rocksdb"
version = "0.4.5" version = "0.4.5"
source = "git+https://github.com/ethcore/rust-rocksdb#9be41e05923616dfa28741c58b22776d479751e6"
dependencies = [ dependencies = [
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
"rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)", "rocksdb-sys 0.3.0",
] ]
[[package]] [[package]]
name = "rocksdb-sys" name = "rocksdb-sys"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/ethcore/rust-rocksdb#9be41e05923616dfa28741c58b22776d479751e6"
dependencies = [ dependencies = [
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
@ -831,6 +843,11 @@ name = "slab"
version = "0.2.0" version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "smallvec"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "solicit" name = "solicit"
version = "0.4.4" version = "0.4.4"

View File

@ -320,7 +320,7 @@ fn binary_expr_struct(
let read_expr = match fields.iter().any(|f| codegen::has_ptr(&f.ty)) { let read_expr = match fields.iter().any(|f| codegen::has_ptr(&f.ty)) {
true => { true => {
// cannot create structs with pointers // cannot create structs with pointers
quote_expr!(cx, Err(::ipc::binary::BinaryConvertError)) quote_expr!(cx, Err(::ipc::binary::BinaryConvertError::not_supported()))
}, },
false => { false => {
if value_ident.is_some() { if value_ident.is_some() {
@ -412,7 +412,7 @@ fn binary_expr_enum(
arms.iter().map(|x| x.write.clone()).collect::<Vec<ast::Arm>>(), arms.iter().map(|x| x.write.clone()).collect::<Vec<ast::Arm>>(),
arms.iter().map(|x| x.read.clone()).collect::<Vec<ast::Arm>>()); arms.iter().map(|x| x.read.clone()).collect::<Vec<ast::Arm>>());
read_arms.push(quote_arm!(cx, _ => { Err(BinaryConvertError) } )); read_arms.push(quote_arm!(cx, _ => { Err(BinaryConvertError::variant(buffer[0])) } ));
Ok(BinaryExpressions { Ok(BinaryExpressions {
size: quote_expr!(cx, 1usize + match *self { $size_arms }), size: quote_expr!(cx, 1usize + match *self { $size_arms }),
@ -530,9 +530,29 @@ fn fields_sequence(
tt.push(Token(_sp, token::Comma)); tt.push(Token(_sp, token::Comma));
tt.push(Token(_sp, token::Ident(ext_cx.ident_of("length_stack")))); tt.push(Token(_sp, token::Ident(ext_cx.ident_of("length_stack"))));
tt.push(Token(_sp, token::CloseDelim(token::Paren)));
// name member if it has resulted in the error
tt.push(Token(_sp, token::Dot));
tt.push(Token(_sp, token::Ident(ext_cx.ident_of("map_err"))));
tt.push(Token(_sp, token::OpenDelim(token::Paren)));
tt.push(Token(_sp, token::BinOp(token::Or)));
tt.push(Token(_sp, token::Ident(ext_cx.ident_of("e"))));
tt.push(Token(_sp, token::BinOp(token::Or)));
tt.push(Token(_sp, token::Ident(ext_cx.ident_of("e"))));
tt.push(Token(_sp, token::Dot));
tt.push(Token(_sp, token::Ident(ext_cx.ident_of("named"))));
tt.push(Token(_sp, token::OpenDelim(token::Paren)));
tt.push(Token(_sp, token::Literal(token::Lit::Str_(
field.ident.unwrap_or(ext_cx.ident_of(&format!("f{}", idx))).name),
None))
);
tt.push(Token(_sp, token::CloseDelim(token::Paren)));
tt.push(Token(_sp, token::CloseDelim(token::Paren)));
tt.push(Token(_sp, token::CloseDelim(token::Paren))); tt.push(Token(_sp, token::CloseDelim(token::Paren)));
tt.push(Token(_sp, token::CloseDelim(token::Paren)));
tt.push(Token(_sp, token::Comma)); tt.push(Token(_sp, token::Comma));
} }
if named_members { if named_members {
@ -573,7 +593,7 @@ fn named_fields_sequence(
tt.push(Token(_sp, token::OpenDelim(token::Brace))); tt.push(Token(_sp, token::OpenDelim(token::Brace)));
for (idx, field) in fields.iter().enumerate() { for (idx, field) in fields.iter().enumerate() {
tt.push(Token(_sp, token::Ident(field.ident.clone().unwrap()))); tt.push(Token(_sp, token::Ident(field.ident.clone().expect("function is called for named fields"))));
tt.push(Token(_sp, token::Colon)); tt.push(Token(_sp, token::Colon));
// special case for u8, it just takes byte form sequence // special case for u8, it just takes byte form sequence
@ -646,9 +666,26 @@ fn named_fields_sequence(
tt.push(Token(_sp, token::Comma)); tt.push(Token(_sp, token::Comma));
tt.push(Token(_sp, token::Ident(ext_cx.ident_of("length_stack")))); tt.push(Token(_sp, token::Ident(ext_cx.ident_of("length_stack"))));
tt.push(Token(_sp, token::CloseDelim(token::Paren))); tt.push(Token(_sp, token::CloseDelim(token::Paren)));
// name member if it has resulted in the error
tt.push(Token(_sp, token::Dot));
tt.push(Token(_sp, token::Ident(ext_cx.ident_of("map_err"))));
tt.push(Token(_sp, token::OpenDelim(token::Paren)));
tt.push(Token(_sp, token::BinOp(token::Or)));
tt.push(Token(_sp, token::Ident(ext_cx.ident_of("e"))));
tt.push(Token(_sp, token::BinOp(token::Or)));
tt.push(Token(_sp, token::Ident(ext_cx.ident_of("e"))));
tt.push(Token(_sp, token::Dot));
tt.push(Token(_sp, token::Ident(ext_cx.ident_of("named"))));
tt.push(Token(_sp, token::OpenDelim(token::Paren)));
tt.push(Token(_sp, token::Literal(token::Lit::Str_(
field.ident.unwrap_or(ext_cx.ident_of(&format!("f{}", idx))).name),
None))
);
tt.push(Token(_sp, token::CloseDelim(token::Paren)));
tt.push(Token(_sp, token::CloseDelim(token::Paren)));
tt.push(Token(_sp, token::CloseDelim(token::Paren))); tt.push(Token(_sp, token::CloseDelim(token::Paren)));
tt.push(Token(_sp, token::Comma)); tt.push(Token(_sp, token::Comma));
} }

View File

@ -24,7 +24,74 @@ use std::ops::Range;
use super::Handshake; use super::Handshake;
#[derive(Debug)] #[derive(Debug)]
pub struct BinaryConvertError; pub enum BinaryConvertErrorKind {
SizeMismatch {
expected: usize,
found: usize,
},
TargetPayloadEmpty,
UnexpectedVariant(u8),
MissingLengthValue,
InconsistentBoundaries,
NotSupported,
}
#[derive(Debug)]
pub struct BinaryConvertError {
member_tree: Vec<&'static str>,
kind: BinaryConvertErrorKind,
}
impl BinaryConvertError {
pub fn size(expected: usize, found: usize) -> BinaryConvertError {
BinaryConvertError {
member_tree: Vec::new(),
kind: BinaryConvertErrorKind::SizeMismatch {
expected: expected,
found: found,
}
}
}
pub fn empty() -> BinaryConvertError {
BinaryConvertError { member_tree: Vec::new(), kind: BinaryConvertErrorKind::TargetPayloadEmpty }
}
pub fn variant(val: u8) -> BinaryConvertError {
BinaryConvertError { member_tree: Vec::new(), kind: BinaryConvertErrorKind::UnexpectedVariant(val) }
}
pub fn length() -> BinaryConvertError {
BinaryConvertError { member_tree: Vec::new(), kind: BinaryConvertErrorKind::MissingLengthValue }
}
pub fn boundaries() -> BinaryConvertError {
BinaryConvertError { member_tree: Vec::new(), kind: BinaryConvertErrorKind::InconsistentBoundaries }
}
pub fn not_supported() -> BinaryConvertError {
BinaryConvertError { member_tree: Vec::new(), kind: BinaryConvertErrorKind::NotSupported }
}
pub fn named(mut self, name: &'static str) -> BinaryConvertError {
self.member_tree.push(name);
self
}
}
#[derive(Debug)]
pub enum BinaryError {
Serialization(BinaryConvertError),
Io(::std::io::Error),
}
impl From<::std::io::Error> for BinaryError {
fn from(err: ::std::io::Error) -> Self { BinaryError::Io(err) }
}
impl From<BinaryConvertError> for BinaryError {
fn from(err: BinaryConvertError) -> Self { BinaryError::Serialization(err) }
}
pub trait BinaryConvertable : Sized { pub trait BinaryConvertable : Sized {
fn size(&self) -> usize { fn size(&self) -> usize {
@ -36,7 +103,7 @@ pub trait BinaryConvertable : Sized {
fn from_bytes(buffer: &[u8], length_stack: &mut VecDeque<usize>) -> Result<Self, BinaryConvertError>; fn from_bytes(buffer: &[u8], length_stack: &mut VecDeque<usize>) -> Result<Self, BinaryConvertError>;
fn from_empty_bytes() -> Result<Self, BinaryConvertError> { fn from_empty_bytes() -> Result<Self, BinaryConvertError> {
Err(BinaryConvertError) Err(BinaryConvertError::size(mem::size_of::<Self>(), 0))
} }
fn len_params() -> usize { fn len_params() -> usize {
@ -50,7 +117,7 @@ impl<T> BinaryConvertable for Option<T> where T: BinaryConvertable {
} }
fn to_bytes(&self, buffer: &mut [u8], length_stack: &mut VecDeque<usize>) -> Result<(), BinaryConvertError> { fn to_bytes(&self, buffer: &mut [u8], length_stack: &mut VecDeque<usize>) -> Result<(), BinaryConvertError> {
match *self { None => Err(BinaryConvertError), Some(ref val) => val.to_bytes(buffer, length_stack) } match *self { None => Err(BinaryConvertError::empty()), Some(ref val) => val.to_bytes(buffer, length_stack) }
} }
fn from_bytes(buffer: &[u8], length_stack: &mut VecDeque<usize>) -> Result<Self, BinaryConvertError> { fn from_bytes(buffer: &[u8], length_stack: &mut VecDeque<usize>) -> Result<Self, BinaryConvertError> {
@ -77,7 +144,7 @@ impl<E: BinaryConvertable> BinaryConvertable for Result<(), E> {
fn to_bytes(&self, buffer: &mut [u8], length_stack: &mut VecDeque<usize>) -> Result<(), BinaryConvertError> { fn to_bytes(&self, buffer: &mut [u8], length_stack: &mut VecDeque<usize>) -> Result<(), BinaryConvertError> {
match *self { match *self {
Ok(_) => Err(BinaryConvertError), Ok(_) => Err(BinaryConvertError::empty()),
Err(ref e) => Ok(try!(e.to_bytes(buffer, length_stack))), Err(ref e) => Ok(try!(e.to_bytes(buffer, length_stack))),
} }
} }
@ -107,7 +174,7 @@ impl<R: BinaryConvertable> BinaryConvertable for Result<R, ()> {
fn to_bytes(&self, buffer: &mut [u8], length_stack: &mut VecDeque<usize>) -> Result<(), BinaryConvertError> { fn to_bytes(&self, buffer: &mut [u8], length_stack: &mut VecDeque<usize>) -> Result<(), BinaryConvertError> {
match *self { match *self {
Ok(ref r) => Ok(try!(r.to_bytes(buffer, length_stack))), Ok(ref r) => Ok(try!(r.to_bytes(buffer, length_stack))),
Err(_) => Err(BinaryConvertError), Err(_) => Err(BinaryConvertError::empty()),
} }
} }
@ -160,7 +227,7 @@ impl<R: BinaryConvertable, E: BinaryConvertable> BinaryConvertable for Result<R,
} }
} }
1 => Ok(Err(try!(E::from_bytes(&buffer[1..], length_stack)))), 1 => Ok(Err(try!(E::from_bytes(&buffer[1..], length_stack)))),
_ => Err(BinaryConvertError) _ => Err(BinaryConvertError::variant(buffer[0]))
} }
} }
@ -216,7 +283,7 @@ impl<K, V> BinaryConvertable for BTreeMap<K, V> where K : BinaryConvertable + Or
loop { loop {
let key_size = match K::len_params() { let key_size = match K::len_params() {
0 => mem::size_of::<K>(), 0 => mem::size_of::<K>(),
_ => try!(length_stack.pop_front().ok_or(BinaryConvertError)), _ => try!(length_stack.pop_front().ok_or(BinaryConvertError::length())),
}; };
let key = if key_size == 0 { let key = if key_size == 0 {
try!(K::from_empty_bytes()) try!(K::from_empty_bytes())
@ -227,7 +294,7 @@ impl<K, V> BinaryConvertable for BTreeMap<K, V> where K : BinaryConvertable + Or
let val_size = match V::len_params() { let val_size = match V::len_params() {
0 => mem::size_of::<V>(), 0 => mem::size_of::<V>(),
_ => try!(length_stack.pop_front().ok_or(BinaryConvertError)), _ => try!(length_stack.pop_front().ok_or(BinaryConvertError::length())),
}; };
let val = if val_size == 0 { let val = if val_size == 0 {
try!(V::from_empty_bytes()) try!(V::from_empty_bytes())
@ -239,7 +306,7 @@ impl<K, V> BinaryConvertable for BTreeMap<K, V> where K : BinaryConvertable + Or
if index == buffer.len() { break; } if index == buffer.len() { break; }
if index > buffer.len() { if index > buffer.len() {
return Err(BinaryConvertError) return Err(BinaryConvertError::boundaries())
} }
} }
@ -255,6 +322,74 @@ impl<K, V> BinaryConvertable for BTreeMap<K, V> where K : BinaryConvertable + Or
} }
} }
impl<T> BinaryConvertable for VecDeque<T> where T: BinaryConvertable {
fn size(&self) -> usize {
match T::len_params() {
0 => mem::size_of::<T>() * self.len(),
_ => self.iter().fold(0usize, |acc, t| acc + t.size()),
}
}
fn to_bytes(&self, buffer: &mut [u8], length_stack: &mut VecDeque<usize>) -> Result<(), BinaryConvertError> {
let mut offset = 0usize;
for item in self.iter() {
let next_size = match T::len_params() {
0 => mem::size_of::<T>(),
_ => { let size = item.size(); length_stack.push_back(size); size },
};
if next_size > 0 {
let item_end = offset + next_size;
try!(item.to_bytes(&mut buffer[offset..item_end], length_stack));
offset = item_end;
}
}
Ok(())
}
fn from_bytes(buffer: &[u8], length_stack: &mut VecDeque<usize>) -> Result<Self, BinaryConvertError> {
let mut index = 0;
let mut result = Self::with_capacity(
match T::len_params() {
0 => buffer.len() / mem::size_of::<T>(),
_ => 128,
});
if buffer.len() == 0 { return Ok(result); }
loop {
let next_size = match T::len_params() {
0 => mem::size_of::<T>(),
_ => try!(length_stack.pop_front().ok_or(BinaryConvertError::length())),
};
let item = if next_size == 0 {
try!(T::from_empty_bytes())
}
else {
try!(T::from_bytes(&buffer[index..index+next_size], length_stack))
};
result.push_back(item);
index = index + next_size;
if index == buffer.len() { break; }
if index + next_size > buffer.len() {
return Err(BinaryConvertError::boundaries())
}
}
Ok(result)
}
fn from_empty_bytes() -> Result<Self, BinaryConvertError> {
Ok(Self::new())
}
fn len_params() -> usize {
1
}
}
//
impl<T> BinaryConvertable for Vec<T> where T: BinaryConvertable { impl<T> BinaryConvertable for Vec<T> where T: BinaryConvertable {
fn size(&self) -> usize { fn size(&self) -> usize {
match T::len_params() { match T::len_params() {
@ -292,7 +427,7 @@ impl<T> BinaryConvertable for Vec<T> where T: BinaryConvertable {
loop { loop {
let next_size = match T::len_params() { let next_size = match T::len_params() {
0 => mem::size_of::<T>(), 0 => mem::size_of::<T>(),
_ => try!(length_stack.pop_front().ok_or(BinaryConvertError)), _ => try!(length_stack.pop_front().ok_or(BinaryConvertError::length())),
}; };
let item = if next_size == 0 { let item = if next_size == 0 {
try!(T::from_empty_bytes()) try!(T::from_empty_bytes())
@ -304,10 +439,9 @@ impl<T> BinaryConvertable for Vec<T> where T: BinaryConvertable {
index = index + next_size; index = index + next_size;
if index == buffer.len() { break; } if index == buffer.len() { break; }
if index > buffer.len() { if index + next_size > buffer.len() {
return Err(BinaryConvertError) return Err(BinaryConvertError::boundaries())
} }
} }
Ok(result) Ok(result)
@ -351,7 +485,7 @@ impl<T> BinaryConvertable for Range<T> where T: BinaryConvertable {
} }
fn from_empty_bytes() -> Result<Self, BinaryConvertError> { fn from_empty_bytes() -> Result<Self, BinaryConvertError> {
Err(BinaryConvertError) Err(BinaryConvertError::empty())
} }
fn to_bytes(&self, buffer: &mut[u8], length_stack: &mut VecDeque<usize>) -> Result<(), BinaryConvertError> { fn to_bytes(&self, buffer: &mut[u8], length_stack: &mut VecDeque<usize>) -> Result<(), BinaryConvertError> {
@ -442,7 +576,7 @@ impl BinaryConvertable for Vec<u8> {
} }
} }
pub fn deserialize_from<T, R>(r: &mut R) -> Result<T, BinaryConvertError> pub fn deserialize_from<T, R>(r: &mut R) -> Result<T, BinaryError>
where R: ::std::io::Read, where R: ::std::io::Read,
T: BinaryConvertable T: BinaryConvertable
{ {
@ -453,12 +587,15 @@ pub fn deserialize_from<T, R>(r: &mut R) -> Result<T, BinaryConvertError>
let fixed_size = mem::size_of::<T>(); let fixed_size = mem::size_of::<T>();
let mut payload_buffer = Vec::with_capacity(fixed_size); let mut payload_buffer = Vec::with_capacity(fixed_size);
unsafe { payload_buffer.set_len(fixed_size); } unsafe { payload_buffer.set_len(fixed_size); }
try!(r.read(&mut payload_buffer).map_err(|_| BinaryConvertError)); let bytes_read = try!(r.read(&mut payload_buffer));
T::from_bytes(&payload_buffer[..], &mut fake_stack) if bytes_read != mem::size_of::<T>() {
return Err(BinaryError::Serialization(BinaryConvertError::size(fixed_size, bytes_read)))
}
Ok(try!(T::from_bytes(&payload_buffer[..], &mut fake_stack)))
}, },
_ => { _ => {
let mut payload = Vec::new(); let mut payload = Vec::new();
try!(r.read_to_end(&mut payload).map_err(|_| BinaryConvertError)); try!(r.read_to_end(&mut payload));
let stack_len = try!(u64::from_bytes(&payload[0..8], &mut fake_stack)) as usize; let stack_len = try!(u64::from_bytes(&payload[0..8], &mut fake_stack)) as usize;
let mut length_stack = VecDeque::<usize>::with_capacity(stack_len); let mut length_stack = VecDeque::<usize>::with_capacity(stack_len);
@ -474,23 +611,23 @@ pub fn deserialize_from<T, R>(r: &mut R) -> Result<T, BinaryConvertError>
let size = try!(u64::from_bytes(&payload[8+stack_len*8..16+stack_len*8], &mut fake_stack)) as usize; let size = try!(u64::from_bytes(&payload[8+stack_len*8..16+stack_len*8], &mut fake_stack)) as usize;
match size { match size {
0 => { 0 => {
T::from_empty_bytes() Ok(try!(T::from_empty_bytes()))
}, },
_ => { _ => {
T::from_bytes(&payload[16+stack_len*8..], &mut length_stack) Ok(try!(T::from_bytes(&payload[16+stack_len*8..], &mut length_stack)))
} }
} }
}, },
} }
} }
pub fn deserialize<T: BinaryConvertable>(buffer: &[u8]) -> Result<T, BinaryConvertError> { pub fn deserialize<T: BinaryConvertable>(buffer: &[u8]) -> Result<T, BinaryError> {
use std::io::Cursor; use std::io::Cursor;
let mut buff = Cursor::new(buffer); let mut buff = Cursor::new(buffer);
deserialize_from::<T, _>(&mut buff) deserialize_from::<T, _>(&mut buff)
} }
pub fn serialize_into<T, W>(t: &T, w: &mut W) -> Result<(), BinaryConvertError> pub fn serialize_into<T, W>(t: &T, w: &mut W) -> Result<(), BinaryError>
where W: ::std::io::Write, where W: ::std::io::Write,
T: BinaryConvertable T: BinaryConvertable
{ {
@ -502,7 +639,7 @@ pub fn serialize_into<T, W>(t: &T, w: &mut W) -> Result<(), BinaryConvertError>
let mut buffer = Vec::with_capacity(fixed_size); let mut buffer = Vec::with_capacity(fixed_size);
unsafe { buffer.set_len(fixed_size); } unsafe { buffer.set_len(fixed_size); }
try!(t.to_bytes(&mut buffer[..], &mut fake_stack)); try!(t.to_bytes(&mut buffer[..], &mut fake_stack));
try!(w.write(&buffer[..]).map_err(|_| BinaryConvertError)); try!(w.write(&buffer[..]));
Ok(()) Ok(())
}, },
_ => { _ => {
@ -511,8 +648,8 @@ pub fn serialize_into<T, W>(t: &T, w: &mut W) -> Result<(), BinaryConvertError>
let size = t.size(); let size = t.size();
if size == 0 { if size == 0 {
try!(w.write(&size_buffer).map_err(|_| BinaryConvertError)); try!(w.write(&size_buffer));
try!(w.write(&size_buffer).map_err(|_| BinaryConvertError)); try!(w.write(&size_buffer));
return Ok(()); return Ok(());
} }
@ -522,7 +659,7 @@ pub fn serialize_into<T, W>(t: &T, w: &mut W) -> Result<(), BinaryConvertError>
let stack_len = length_stack.len(); let stack_len = length_stack.len();
try!((stack_len as u64).to_bytes(&mut size_buffer[..], &mut fake_stack)); try!((stack_len as u64).to_bytes(&mut size_buffer[..], &mut fake_stack));
try!(w.write(&size_buffer[..]).map_err(|_| BinaryConvertError)); try!(w.write(&size_buffer[..]));
if stack_len > 0 { if stack_len > 0 {
let mut header_buffer = Vec::with_capacity(stack_len * 8); let mut header_buffer = Vec::with_capacity(stack_len * 8);
unsafe { header_buffer.set_len(stack_len * 8); }; unsafe { header_buffer.set_len(stack_len * 8); };
@ -535,20 +672,20 @@ pub fn serialize_into<T, W>(t: &T, w: &mut W) -> Result<(), BinaryConvertError>
} }
idx = idx + 1; idx = idx + 1;
} }
try!(w.write(&header_buffer[..]).map_err(|_| BinaryConvertError)); try!(w.write(&header_buffer[..]));
} }
try!((size as u64).to_bytes(&mut size_buffer[..], &mut fake_stack)); try!((size as u64).to_bytes(&mut size_buffer[..], &mut fake_stack));
try!(w.write(&size_buffer[..]).map_err(|_| BinaryConvertError)); try!(w.write(&size_buffer[..]));
try!(w.write(&buffer[..]).map_err(|_| BinaryConvertError)); try!(w.write(&buffer[..]));
Ok(()) Ok(())
}, },
} }
} }
pub fn serialize<T: BinaryConvertable>(t: &T) -> Result<Vec<u8>, BinaryConvertError> { pub fn serialize<T: BinaryConvertable>(t: &T) -> Result<Vec<u8>, BinaryError> {
use std::io::Cursor; use std::io::Cursor;
let mut buff = Cursor::new(Vec::new()); let mut buff = Cursor::new(Vec::new());
try!(serialize_into(t, &mut buff)); try!(serialize_into(t, &mut buff));
@ -562,9 +699,8 @@ macro_rules! binary_fixed_size {
impl BinaryConvertable for $target_ty { impl BinaryConvertable for $target_ty {
fn from_bytes(bytes: &[u8], _length_stack: &mut VecDeque<usize>) -> Result<Self, BinaryConvertError> { fn from_bytes(bytes: &[u8], _length_stack: &mut VecDeque<usize>) -> Result<Self, BinaryConvertError> {
match bytes.len().cmp(&::std::mem::size_of::<$target_ty>()) { match bytes.len().cmp(&::std::mem::size_of::<$target_ty>()) {
::std::cmp::Ordering::Less => return Err(BinaryConvertError), ::std::cmp::Ordering::Equal => (),
::std::cmp::Ordering::Greater => return Err(BinaryConvertError), _ => return Err(BinaryConvertError::size(::std::mem::size_of::<$target_ty>(), bytes.len())),
::std::cmp::Ordering::Equal => ()
}; };
let mut res: Self = unsafe { ::std::mem::uninitialized() }; let mut res: Self = unsafe { ::std::mem::uninitialized() };
res.copy_raw(bytes); res.copy_raw(bytes);
@ -898,6 +1034,29 @@ fn serialize_btree() {
assert_eq!(res[&1u64], 5u64); assert_eq!(res[&1u64], 5u64);
} }
#[test]
fn serialize_refcell() {
use std::cell::RefCell;
let source = RefCell::new(vec![5u32, 12u32, 19u32]);
let serialized = serialize(&source).unwrap();
let deserialized = deserialize::<RefCell<Vec<u32>>>(&serialized).unwrap();
assert_eq!(source, deserialized);
}
#[test]
fn serialize_cell() {
use std::cell::Cell;
use std::str::FromStr;
let source = Cell::new(U256::from_str("01231231231239999").unwrap());
let serialized = serialize(&source).unwrap();
let deserialized = deserialize::<Cell<U256>>(&serialized).unwrap();
assert_eq!(source, deserialized);
}
#[test] #[test]
fn serialize_handshake() { fn serialize_handshake() {
use std::io::{Cursor, SeekFrom, Seek}; use std::io::{Cursor, SeekFrom, Seek};
@ -915,5 +1074,80 @@ fn serialize_handshake() {
let res = deserialize_from::<BinHandshake, _>(&mut buff).unwrap().to_semver(); let res = deserialize_from::<BinHandshake, _>(&mut buff).unwrap().to_semver();
assert_eq!(res, handshake); assert_eq!(res, handshake);
}
#[test]
fn serialize_invalid_size() {
// value
let deserialized = deserialize::<u64>(&[]);
match deserialized {
Err(BinaryError::Serialization(
BinaryConvertError {
kind: BinaryConvertErrorKind::SizeMismatch { expected: 8, found: 0 },
member_tree: _
})) => {},
other => panic!("Not a size mismatched error but: {:?}", other),
}
}
#[test]
fn serialize_boundaries() {
// value
let deserialized = deserialize::<Vec<u32>>(
&[
// payload header
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
2u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
//
0u8, 0u8, 0u8, 5u8,
0u8, 0u8, 0u8, 4u8,
1u8, 1u8, /* not 4 bytes */
]
);
match deserialized {
Err(BinaryError::Serialization(
BinaryConvertError {
kind: BinaryConvertErrorKind::InconsistentBoundaries,
member_tree: _
})) => {},
other => panic!("Not an inconsistent boundaries error but: {:?}", other),
}
}
#[test]
fn serialize_empty_try() {
// value
let mut stack = VecDeque::new();
let mut data = vec![0u8; 16];
let sample: Option<Vec<u8>> = None;
let serialized = sample.to_bytes(&mut data, &mut stack);
match serialized {
Err(BinaryConvertError {
kind: BinaryConvertErrorKind::TargetPayloadEmpty,
member_tree: _
}) => {},
other => panic!("Not an error about empty payload to be produced but: {:?}", other),
}
}
#[test]
fn serialize_not_enough_lengths() {
// value
let deserialized = deserialize::<Vec<Option<u32>>>(
&[
// payload header
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
2u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
// does not matter because no length param for the first option
0u8,
]
);
match deserialized {
Err(BinaryError::Serialization(
BinaryConvertError {
kind: BinaryConvertErrorKind::MissingLengthValue,
member_tree: _
})) => {},
other => panic!("Not an missing length param error but: {:?}", other),
}
} }

View File

@ -17,6 +17,7 @@
//! Spec params deserialization. //! Spec params deserialization.
use uint::Uint; use uint::Uint;
use hash::H256;
/// Spec params. /// Spec params.
#[derive(Debug, PartialEq, Deserialize)] #[derive(Debug, PartialEq, Deserialize)]
@ -33,6 +34,12 @@ pub struct Params {
/// Minimum gas limit. /// Minimum gas limit.
#[serde(rename="minGasLimit")] #[serde(rename="minGasLimit")]
pub min_gas_limit: Uint, pub min_gas_limit: Uint,
/// Option fork block number to check.
#[serde(rename="forkBlock")]
pub fork_block: Option<Uint>,
/// Expected fork block hash.
#[serde(rename="forkCanonHash")]
pub fork_hash: Option<H256>,
} }
#[cfg(test)] #[cfg(test)]

View File

@ -78,7 +78,9 @@ mod tests {
"frontierCompatibilityModeLimit": "0x789b0", "frontierCompatibilityModeLimit": "0x789b0",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID" : "0x2" "networkID" : "0x2",
"forkBlock": "0xffffffffffffffff",
"forkCanonHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -36,39 +36,25 @@ use regex::Regex;
use util::RotatingLogger; use util::RotatingLogger;
use util::log::Colour; use util::log::Colour;
pub struct Settings { #[derive(Debug, PartialEq)]
pub struct Config {
pub mode: Option<String>,
pub color: bool, pub color: bool,
pub init: Option<String>,
pub file: Option<String>, pub file: Option<String>,
} }
impl Settings { impl Default for Config {
pub fn new() -> Settings { fn default() -> Self {
Settings { Config {
color: true, mode: None,
init: None, color: !cfg!(windows),
file: None, file: None,
} }
} }
pub fn init(mut self, init: String) -> Settings {
self.init = Some(init);
self
}
pub fn file(mut self, file: String) -> Settings {
self.file = Some(file);
self
}
pub fn no_color(mut self) -> Settings {
self.color = false;
self
}
} }
/// Sets up the logger /// Sets up the logger
pub fn setup_log(settings: &Settings) -> Arc<RotatingLogger> { pub fn setup_log(config: &Config) -> Result<Arc<RotatingLogger>, String> {
use rlog::*; use rlog::*;
let mut levels = String::new(); let mut levels = String::new();
@ -84,16 +70,21 @@ pub fn setup_log(settings: &Settings) -> Arc<RotatingLogger> {
builder.parse(lvl); builder.parse(lvl);
} }
if let Some(ref s) = settings.init { if let Some(ref s) = config.mode {
levels.push_str(s); levels.push_str(s);
builder.parse(s); builder.parse(s);
} }
let isatty = stderr_isatty(); let isatty = stderr_isatty();
let enable_color = settings.color && isatty; let enable_color = config.color && isatty;
let logs = Arc::new(RotatingLogger::new(levels)); let logs = Arc::new(RotatingLogger::new(levels));
let logger = logs.clone(); let logger = logs.clone();
let maybe_file = settings.file.as_ref().map(|f| File::create(f).unwrap_or_else(|_| panic!("Cannot write to log file given: {}", f)));
let maybe_file = match config.file.as_ref() {
Some(f) => Some(try!(File::create(f).map_err(|_| format!("Cannot write to log file given: {}", f)))),
None => None,
};
let format = move |record: &LogRecord| { let format = move |record: &LogRecord| {
let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap(); let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap();
@ -123,9 +114,11 @@ pub fn setup_log(settings: &Settings) -> Arc<RotatingLogger> {
ret ret
}; };
builder.format(format); builder.format(format);
builder.init().unwrap(); builder.init().unwrap();
logs
Ok(logs)
} }
fn kill_color(s: &str) -> String { fn kill_color(s: &str) -> String {

84
parity/account.rs Normal file
View File

@ -0,0 +1,84 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethcore::ethstore::{EthStore, import_accounts};
use ethcore::ethstore::dir::DiskDirectory;
use ethcore::account_provider::AccountProvider;
use helpers::{password_prompt, password_from_file};
#[derive(Debug, PartialEq)]
pub enum AccountCmd {
New(NewAccount),
List(String),
Import(ImportAccounts),
}
#[derive(Debug, PartialEq)]
pub struct NewAccount {
pub iterations: u32,
pub path: String,
pub password_file: Option<String>,
}
#[derive(Debug, PartialEq)]
pub struct ImportAccounts {
pub from: Vec<String>,
pub to: String,
}
pub fn execute(cmd: AccountCmd) -> Result<String, String> {
match cmd {
AccountCmd::New(new_cmd) => new(new_cmd),
AccountCmd::List(path) => list(path),
AccountCmd::Import(import_cmd) => import(import_cmd),
}
}
fn new(n: NewAccount) -> Result<String, String> {
let password: String = match n.password_file {
Some(file) => try!(password_from_file(file)),
None => try!(password_prompt()),
};
let dir = Box::new(DiskDirectory::create(n.path).unwrap());
let secret_store = Box::new(EthStore::open_with_iterations(dir, n.iterations).unwrap());
let acc_provider = AccountProvider::new(secret_store);
let new_account = acc_provider.new_account(&password).unwrap();
Ok(format!("{:?}", new_account))
}
fn list(path: String) -> Result<String, String> {
let dir = Box::new(DiskDirectory::create(path).unwrap());
let secret_store = Box::new(EthStore::open(dir).unwrap());
let acc_provider = AccountProvider::new(secret_store);
let accounts = acc_provider.accounts();
let result = accounts.into_iter()
.map(|a| format!("{:?}", a))
.collect::<Vec<String>>()
.join("\n");
Ok(result)
}
fn import(i: ImportAccounts) -> Result<String, String> {
let to = DiskDirectory::create(i.to).unwrap();
let mut imported = 0;
for path in &i.from {
let from = DiskDirectory::at(path);
imported += try!(import_accounts(&from, &to).map_err(|_| "Importing accounts failed.")).len();
}
Ok(format!("{}", imported))
}

286
parity/blockchain.rs Normal file
View File

@ -0,0 +1,286 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::str::{FromStr, from_utf8};
use std::{io, fs};
use std::io::{BufReader, BufRead};
use std::time::Duration;
use std::thread::sleep;
use std::path::Path;
use std::sync::Arc;
use rustc_serialize::hex::FromHex;
use ethcore_logger::{setup_log, Config as LogConfig};
use util::panics::{PanicHandler, ForwardPanic};
use util::{PayloadInfo, ToPretty};
use ethcore::service::ClientService;
use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType, BlockImportError, BlockChainClient, BlockID};
use ethcore::error::ImportError;
use ethcore::miner::Miner;
use cache::CacheConfig;
use informant::Informant;
use params::{SpecType, Pruning};
use helpers::{to_client_config, execute_upgrades};
use dir::Directories;
use fdlimit;
#[derive(Debug, PartialEq)]
pub enum DataFormat {
Hex,
Binary,
}
impl Default for DataFormat {
fn default() -> Self {
DataFormat::Binary
}
}
impl FromStr for DataFormat {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"binary" | "bin" => Ok(DataFormat::Binary),
"hex" => Ok(DataFormat::Hex),
x => Err(format!("Invalid format: {}", x))
}
}
}
#[derive(Debug, PartialEq)]
pub enum BlockchainCmd {
Import(ImportBlockchain),
Export(ExportBlockchain),
}
#[derive(Debug, PartialEq)]
pub struct ImportBlockchain {
pub spec: SpecType,
pub logger_config: LogConfig,
pub cache_config: CacheConfig,
pub dirs: Directories,
pub file_path: Option<String>,
pub format: Option<DataFormat>,
pub pruning: Pruning,
pub compaction: DatabaseCompactionProfile,
pub wal: bool,
pub mode: Mode,
pub tracing: Switch,
pub vm_type: VMType,
}
#[derive(Debug, PartialEq)]
pub struct ExportBlockchain {
pub spec: SpecType,
pub logger_config: LogConfig,
pub cache_config: CacheConfig,
pub dirs: Directories,
pub file_path: Option<String>,
pub format: Option<DataFormat>,
pub pruning: Pruning,
pub compaction: DatabaseCompactionProfile,
pub wal: bool,
pub mode: Mode,
pub tracing: Switch,
pub from_block: BlockID,
pub to_block: BlockID,
}
pub fn execute(cmd: BlockchainCmd) -> Result<String, String> {
match cmd {
BlockchainCmd::Import(import_cmd) => execute_import(import_cmd),
BlockchainCmd::Export(export_cmd) => execute_export(export_cmd),
}
}
fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
// Setup panic handler
let panic_handler = PanicHandler::new_in_arc();
// load spec file
let spec = try!(cmd.spec.spec());
// load genesis hash
let genesis_hash = spec.genesis_header().hash();
// Setup logging
let _logger = setup_log(&cmd.logger_config);
fdlimit::raise_fd_limit();
// select pruning algorithm
let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref());
// prepare client_path
let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm);
// execute upgrades
try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile()));
// prepare client config
let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), spec.fork_name.as_ref());
// build client
let service = try!(ClientService::start(
client_config,
spec,
Path::new(&client_path),
Arc::new(Miner::with_spec(try!(cmd.spec.spec()))),
).map_err(|e| format!("Client service error: {:?}", e)));
panic_handler.forward_from(&service);
let client = service.client();
let mut instream: Box<io::Read> = match cmd.file_path {
Some(f) => Box::new(try!(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f)))),
None => Box::new(io::stdin()),
};
const READAHEAD_BYTES: usize = 8;
let mut first_bytes: Vec<u8> = vec![0; READAHEAD_BYTES];
let mut first_read = 0;
let format = match cmd.format {
Some(format) => format,
None => {
first_read = try!(instream.read(&mut first_bytes).map_err(|_| "Error reading from the file/stream."));
match first_bytes[0] {
0xf9 => DataFormat::Binary,
_ => DataFormat::Hex,
}
}
};
let informant = Informant::new(client.clone(), None, None, cmd.logger_config.color);
let do_import = |bytes| {
while client.queue_info().is_full() { sleep(Duration::from_secs(1)); }
match client.import_block(bytes) {
Err(BlockImportError::Import(ImportError::AlreadyInChain)) => {
trace!("Skipping block already in chain.");
}
Err(e) => {
return Err(format!("Cannot import block: {:?}", e));
},
Ok(_) => {},
}
informant.tick();
Ok(())
};
match format {
DataFormat::Binary => {
loop {
let mut bytes = if first_read > 0 {first_bytes.clone()} else {vec![0; READAHEAD_BYTES]};
let n = if first_read > 0 {
first_read
} else {
try!(instream.read(&mut bytes).map_err(|_| "Error reading from the file/stream."))
};
if n == 0 { break; }
first_read = 0;
let s = try!(PayloadInfo::from(&bytes).map_err(|e| format!("Invalid RLP in the file/stream: {:?}", e))).total();
bytes.resize(s, 0);
try!(instream.read_exact(&mut bytes[n..]).map_err(|_| "Error reading from the file/stream."));
try!(do_import(bytes));
}
}
DataFormat::Hex => {
for line in BufReader::new(instream).lines() {
let s = try!(line.map_err(|_| "Error reading from the file/stream."));
let s = if first_read > 0 {from_utf8(&first_bytes).unwrap().to_owned() + &(s[..])} else {s};
first_read = 0;
let bytes = try!(s.from_hex().map_err(|_| "Invalid hex in file/stream."));
try!(do_import(bytes));
}
}
}
client.flush_queue();
Ok("Import completed.".into())
}
fn execute_export(cmd: ExportBlockchain) -> Result<String, String> {
// Setup panic handler
let panic_handler = PanicHandler::new_in_arc();
let format = cmd.format.unwrap_or_else(Default::default);
// load spec file
let spec = try!(cmd.spec.spec());
// load genesis hash
let genesis_hash = spec.genesis_header().hash();
// Setup logging
let _logger = setup_log(&cmd.logger_config);
fdlimit::raise_fd_limit();
// select pruning algorithm
let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref());
// prepare client_path
let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm);
// execute upgrades
try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile()));
// prepare client config
let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.wal, VMType::default(), "".into(), spec.fork_name.as_ref());
let service = try!(ClientService::start(
client_config,
spec,
Path::new(&client_path),
Arc::new(Miner::with_spec(try!(cmd.spec.spec())))
).map_err(|e| format!("Client service error: {:?}", e)));
panic_handler.forward_from(&service);
let client = service.client();
let mut out: Box<io::Write> = match cmd.file_path {
Some(f) => Box::new(try!(fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f)))),
None => Box::new(io::stdout()),
};
let from = try!(client.block_number(cmd.from_block).ok_or("From block could not be found"));
let to = try!(client.block_number(cmd.to_block).ok_or("From block could not be found"));
for i in from..(to + 1) {
let b = client.block(BlockID::Number(i)).unwrap();
match format {
DataFormat::Binary => { out.write(&b).expect("Couldn't write to stream."); }
DataFormat::Hex => { out.write_fmt(format_args!("{}", b.pretty())).expect("Couldn't write to stream."); }
}
}
Ok("Export completed.".into())
}
#[cfg(test)]
mod test {
use super::DataFormat;
#[test]
fn test_data_format_parsing() {
assert_eq!(DataFormat::Binary, "binary".parse().unwrap());
assert_eq!(DataFormat::Binary, "bin".parse().unwrap());
assert_eq!(DataFormat::Hex, "hex".parse().unwrap());
}
}

109
parity/cache.rs Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::cmp::max;
const MIN_BC_CACHE_MB: u32 = 4;
const MIN_DB_CACHE_MB: u32 = 2;
const MIN_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 16;
const DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 50;
/// Configuration for application cache sizes.
/// All values are represented in MB.
#[derive(Debug, PartialEq)]
pub struct CacheConfig {
/// Size of database cache set using option `set_block_cache_size_mb`
/// 50% is blockchain
/// 25% is tracing
/// 25% is state
db: u32,
/// Size of blockchain cache.
blockchain: u32,
/// Size of transaction queue cache.
queue: u32,
}
impl Default for CacheConfig {
fn default() -> Self {
CacheConfig::new(64, 8, DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB)
}
}
impl CacheConfig {
/// Creates new cache config with cumulative size equal `total`.
pub fn new_with_total_cache_size(total: u32) -> Self {
CacheConfig {
db: total * 7 / 8,
blockchain: total / 8,
queue: DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB,
}
}
/// Creates new cache config with gitven details.
pub fn new(db: u32, blockchain: u32, queue: u32) -> Self {
CacheConfig {
db: db,
blockchain: blockchain,
queue: queue,
}
}
/// Size of db cache for blockchain.
pub fn db_blockchain_cache_size(&self) -> u32 {
max(MIN_DB_CACHE_MB, self.blockchain / 4)
}
/// Size of db cache for state.
pub fn db_state_cache_size(&self) -> u32 {
max(MIN_DB_CACHE_MB, self.db * 3 / 4)
}
/// Size of block queue size limit
pub fn queue(&self) -> u32 {
max(self.queue, MIN_BLOCK_QUEUE_SIZE_LIMIT_MB)
}
/// Size of the blockchain cache.
pub fn blockchain(&self) -> u32 {
max(self.blockchain, MIN_BC_CACHE_MB)
}
}
#[cfg(test)]
mod tests {
use super::CacheConfig;
#[test]
fn test_cache_config_constructor() {
let config = CacheConfig::new_with_total_cache_size(200);
assert_eq!(config.db, 175);
assert_eq!(config.blockchain(), 25);
assert_eq!(config.queue(), 50);
}
#[test]
fn test_cache_config_db_cache_sizes() {
let config = CacheConfig::new_with_total_cache_size(400);
assert_eq!(config.db, 350);
assert_eq!(config.db_blockchain_cache_size(), 12);
assert_eq!(config.db_state_cache_size(), 262);
}
#[test]
fn test_cache_config_default() {
assert_eq!(CacheConfig::default(), CacheConfig::new(64, 8, super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB));
}
}

View File

@ -15,6 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::version; use util::version;
use docopt::Docopt;
pub const USAGE: &'static str = r#" pub const USAGE: &'static str = r#"
Parity. Ethereum Client. Parity. Ethereum Client.
@ -22,6 +23,8 @@ Parity. Ethereum Client.
Copyright 2015, 2016 Ethcore (UK) Limited Copyright 2015, 2016 Ethcore (UK) Limited
Usage: Usage:
parity [options]
parity ui [options]
parity daemon <pid-file> [options] parity daemon <pid-file> [options]
parity account (new | list ) [options] parity account (new | list ) [options]
parity account import <path>... [options] parity account import <path>... [options]
@ -29,8 +32,6 @@ Usage:
parity import [ <file> ] [options] parity import [ <file> ] [options]
parity export [ <file> ] [options] parity export [ <file> ] [options]
parity signer new-token [options] parity signer new-token [options]
parity [options]
parity ui [options]
Operating Options: Operating Options:
--mode MODE Set the operating mode. MODE can be one of: --mode MODE Set the operating mode. MODE can be one of:
@ -47,8 +48,8 @@ Operating Options:
[default: 3600]. [default: 3600].
--chain CHAIN Specify the blockchain type. CHAIN may be either a --chain CHAIN Specify the blockchain type. CHAIN may be either a
JSON chain specification file or olympic, frontier, JSON chain specification file or olympic, frontier,
homestead, mainnet, morden, homestead-dogmatic, or homestead, mainnet, morden, classic or testnet
testnet [default: homestead]. [default: homestead].
-d --db-path PATH Specify the database & configuration directory path -d --db-path PATH Specify the database & configuration directory path
[default: $HOME/.parity]. [default: $HOME/.parity].
--keys-path PATH Specify the path for JSON key files to be found --keys-path PATH Specify the path for JSON key files to be found
@ -78,7 +79,8 @@ Networking Options:
--no-network Disable p2p networking. --no-network Disable p2p networking.
--port PORT Override the port on which the node should listen --port PORT Override the port on which the node should listen
[default: 30303]. [default: 30303].
--peers NUM Try to maintain that many peers [default: 25]. --min-peers NUM Try to maintain at least NUM peers [default: 25].
--max-peers NUM Allow up to that many peers [default: 50].
--nat METHOD Specify method to use for determining public --nat METHOD Specify method to use for determining public
address. Must be one of: any, none, upnp, address. Must be one of: any, none, upnp,
extip:<IP> [default: any]. extip:<IP> [default: any].
@ -105,8 +107,8 @@ API and Console Options:
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC --jsonrpc-apis APIS Specify the APIs available through the JSONRPC
interface. APIS is a comma-delimited list of API interface. APIS is a comma-delimited list of API
name. Possible name are web3, eth, net, personal, name. Possible name are web3, eth, net, personal,
ethcore, ethcore_set, traces. ethcore, ethcore_set, traces, rpc.
[default: web3,eth,net,ethcore,personal,traces]. [default: web3,eth,net,ethcore,personal,traces,rpc].
--jsonrpc-hosts HOSTS List of allowed Host header values. This option will --jsonrpc-hosts HOSTS List of allowed Host header values. This option will
validate the Host header sent by the browser, it validate the Host header sent by the browser, it
is additional security against some attack is additional security against some attack
@ -201,18 +203,16 @@ Footprint Options:
fast - maintain journal overlay. Fast but 50MB used. fast - maintain journal overlay. Fast but 50MB used.
auto - use the method most recently synced or auto - use the method most recently synced or
default to fast if none synced [default: auto]. default to fast if none synced [default: auto].
--cache-pref-size BYTES Specify the preferred size of the blockchain cache in --cache-size-db MB Override database cache size [default: 64].
bytes [default: 16384]. --cache-size-blocks MB Specify the prefered size of the blockchain cache in
--cache-max-size BYTES Specify the maximum size of the blockchain cache in megabytes [default: 8].
bytes [default: 262144]. --cache-size-queue MB Specify the maximum size of memory to use for block
--queue-max-size BYTES Specify the maximum size of memory to use for block queue [default: 50].
queue [default: 52428800]. --cache-size MB Set total amount of discretionary memory to use for
--cache MEGABYTES Set total amount of discretionary memory to use for
the entire system, overrides other cache and queue the entire system, overrides other cache and queue
options. options.
--fast-and-loose Disables DB WAL, which gives a significant speed up
Database Options: but means an unclean exit is unrecoverable.
--db-cache-size MB Override RocksDB database cache size.
--db-compaction TYPE Database compaction type. TYPE may be one of: --db-compaction TYPE Database compaction type. TYPE may be one of:
ssd - suitable for SSDs and fast HDDs; ssd - suitable for SSDs and fast HDDs;
hdd - suitable for slow HDDs [default: ssd]. hdd - suitable for slow HDDs [default: ssd].
@ -239,7 +239,7 @@ Legacy Options:
Overrides the --keys-path option. Overrides the --keys-path option.
--datadir PATH Equivalent to --db-path PATH. --datadir PATH Equivalent to --db-path PATH.
--networkid INDEX Equivalent to --network-id INDEX. --networkid INDEX Equivalent to --network-id INDEX.
--maxpeers COUNT Equivalent to --peers COUNT. --peers NUM Equivalent to --min-peers NUM.
--nodekey KEY Equivalent to --node-key KEY. --nodekey KEY Equivalent to --node-key KEY.
--nodiscover Equivalent to --no-discovery. --nodiscover Equivalent to --no-discovery.
-j --jsonrpc Does nothing; JSON-RPC is on by default now. -j --jsonrpc Does nothing; JSON-RPC is on by default now.
@ -260,6 +260,7 @@ Legacy Options:
--basic-tx-usd. --basic-tx-usd.
--etherbase ADDRESS Equivalent to --author ADDRESS. --etherbase ADDRESS Equivalent to --author ADDRESS.
--extradata STRING Equivalent to --extra-data STRING. --extradata STRING Equivalent to --extra-data STRING.
--cache MB Equivalent to --cache-size MB.
Miscellaneous Options: Miscellaneous Options:
-l --logging LOGGING Specify the logging level. Must conform to the same -l --logging LOGGING Specify the logging level. Must conform to the same
@ -271,7 +272,7 @@ Miscellaneous Options:
-h --help Show this screen. -h --help Show this screen.
"#; "#;
#[derive(Debug, RustcDecodable)] #[derive(Debug, PartialEq, RustcDecodable)]
pub struct Args { pub struct Args {
pub cmd_daemon: bool, pub cmd_daemon: bool,
pub cmd_account: bool, pub cmd_account: bool,
@ -294,7 +295,6 @@ pub struct Args {
pub flag_identity: String, pub flag_identity: String,
pub flag_unlock: Option<String>, pub flag_unlock: Option<String>,
pub flag_password: Vec<String>, pub flag_password: Vec<String>,
pub flag_cache: Option<usize>,
pub flag_keys_path: String, pub flag_keys_path: String,
pub flag_keys_iterations: u32, pub flag_keys_iterations: u32,
pub flag_no_import_keys: bool, pub flag_no_import_keys: bool,
@ -303,15 +303,21 @@ pub struct Args {
pub flag_pruning: String, pub flag_pruning: String,
pub flag_tracing: String, pub flag_tracing: String,
pub flag_port: u16, pub flag_port: u16,
pub flag_peers: usize, pub flag_min_peers: u16,
pub flag_max_peers: u16,
pub flag_no_discovery: bool, pub flag_no_discovery: bool,
pub flag_nat: String, pub flag_nat: String,
pub flag_node_key: Option<String>, pub flag_node_key: Option<String>,
pub flag_reserved_peers: Option<String>, pub flag_reserved_peers: Option<String>,
pub flag_reserved_only: bool, pub flag_reserved_only: bool,
pub flag_cache_pref_size: usize,
pub flag_cache_max_size: usize, pub flag_cache_size_db: u32,
pub flag_queue_max_size: usize, pub flag_cache_size_blocks: u32,
pub flag_cache_size_queue: u32,
pub flag_cache_size: Option<u32>,
pub flag_cache: Option<u32>,
pub flag_fast_and_loose: bool,
pub flag_no_jsonrpc: bool, pub flag_no_jsonrpc: bool,
pub flag_jsonrpc_interface: String, pub flag_jsonrpc_interface: String,
pub flag_jsonrpc_port: u16, pub flag_jsonrpc_port: u16,
@ -360,7 +366,7 @@ pub struct Args {
pub flag_geth: bool, pub flag_geth: bool,
pub flag_nodekey: Option<String>, pub flag_nodekey: Option<String>,
pub flag_nodiscover: bool, pub flag_nodiscover: bool,
pub flag_maxpeers: Option<usize>, pub flag_peers: Option<u16>,
pub flag_datadir: Option<String>, pub flag_datadir: Option<String>,
pub flag_extradata: Option<String>, pub flag_extradata: Option<String>,
pub flag_etherbase: Option<String>, pub flag_etherbase: Option<String>,
@ -380,13 +386,18 @@ pub struct Args {
pub flag_dapps_off: bool, pub flag_dapps_off: bool,
pub flag_ipcpath: Option<String>, pub flag_ipcpath: Option<String>,
pub flag_ipcapi: Option<String>, pub flag_ipcapi: Option<String>,
pub flag_db_cache_size: Option<usize>,
pub flag_db_compaction: String, pub flag_db_compaction: String,
pub flag_fat_db: bool, pub flag_fat_db: bool,
} }
pub fn print_version() { impl Default for Args {
println!("\ fn default() -> Self {
Docopt::new(USAGE).unwrap().argv(&[] as &[&str]).decode().unwrap()
}
}
pub fn print_version() -> String {
format!("\
Parity Parity
version {} version {}
Copyright 2015, 2016 Ethcore (UK) Limited Copyright 2015, 2016 Ethcore (UK) Limited
@ -395,6 +406,6 @@ This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law. There is NO WARRANTY, to the extent permitted by law.
By Wood/Paronyan/Kotewicz/Drwięga/Volf.\ By Wood/Paronyan/Kotewicz/Drwięga/Volf.\
", version()); ", version())
} }

File diff suppressed because it is too large Load Diff

View File

@ -15,17 +15,17 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc; use std::sync::Arc;
use std::str::FromStr;
use std::net::SocketAddr; use std::net::SocketAddr;
use util::panics::PanicHandler; use util::panics::PanicHandler;
use die::*;
use rpc_apis; use rpc_apis;
use helpers::replace_home;
#[cfg(feature = "dapps")] #[cfg(feature = "dapps")]
pub use ethcore_dapps::Server as WebappServer; pub use ethcore_dapps::Server as WebappServer;
#[cfg(not(feature = "dapps"))] #[cfg(not(feature = "dapps"))]
pub struct WebappServer; pub struct WebappServer;
#[derive(Debug, PartialEq, Clone)]
pub struct Configuration { pub struct Configuration {
pub enabled: bool, pub enabled: bool,
pub interface: String, pub interface: String,
@ -35,18 +35,31 @@ pub struct Configuration {
pub dapps_path: String, pub dapps_path: String,
} }
impl Default for Configuration {
fn default() -> Self {
Configuration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8080,
user: None,
pass: None,
dapps_path: replace_home("$HOME/.parity/dapps"),
}
}
}
pub struct Dependencies { pub struct Dependencies {
pub panic_handler: Arc<PanicHandler>, pub panic_handler: Arc<PanicHandler>,
pub apis: Arc<rpc_apis::Dependencies>, pub apis: Arc<rpc_apis::Dependencies>,
} }
pub fn new(configuration: Configuration, deps: Dependencies) -> Option<WebappServer> { pub fn new(configuration: Configuration, deps: Dependencies) -> Result<Option<WebappServer>, String> {
if !configuration.enabled { if !configuration.enabled {
return None; return Ok(None);
} }
let url = format!("{}:{}", configuration.interface, configuration.port); let url = format!("{}:{}", configuration.interface, configuration.port);
let addr = SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid Webapps listen host/port given.", url)); let addr = try!(url.parse().map_err(|_| format!("Invalid Webapps listen host/port given: {}", url)));
let auth = configuration.user.as_ref().map(|username| { let auth = configuration.user.as_ref().map(|username| {
let password = configuration.pass.as_ref().map_or_else(|| { let password = configuration.pass.as_ref().map_or_else(|| {
@ -59,7 +72,7 @@ pub fn new(configuration: Configuration, deps: Dependencies) -> Option<WebappSer
(username.to_owned(), password) (username.to_owned(), password)
}); });
Some(setup_dapps_server(deps, configuration.dapps_path, &addr, auth)) Ok(Some(try!(setup_dapps_server(deps, configuration.dapps_path, &addr, auth))))
} }
#[cfg(not(feature = "dapps"))] #[cfg(not(feature = "dapps"))]
@ -68,8 +81,8 @@ pub fn setup_dapps_server(
_dapps_path: String, _dapps_path: String,
_url: &SocketAddr, _url: &SocketAddr,
_auth: Option<(String, String)>, _auth: Option<(String, String)>,
) -> ! { ) -> Result<WebappServer, String> {
die!("Your Parity version has been compiled without WebApps support.") Err("Your Parity version has been compiled without WebApps support.".into())
} }
#[cfg(feature = "dapps")] #[cfg(feature = "dapps")]
@ -78,7 +91,7 @@ pub fn setup_dapps_server(
dapps_path: String, dapps_path: String,
url: &SocketAddr, url: &SocketAddr,
auth: Option<(String, String)> auth: Option<(String, String)>
) -> WebappServer { ) -> Result<WebappServer, String> {
use ethcore_dapps as dapps; use ethcore_dapps as dapps;
let server = dapps::ServerBuilder::new(dapps_path); let server = dapps::ServerBuilder::new(dapps_path);
@ -93,15 +106,14 @@ pub fn setup_dapps_server(
}; };
match start_result { match start_result {
Err(dapps::ServerError::IoError(err)) => die_with_io_error("WebApps", err), Err(dapps::ServerError::IoError(err)) => Err(format!("WebApps io error: {}", err)),
Err(e) => die!("WebApps: {:?}", e), Err(e) => Err(format!("WebApps error: {:?}", e)),
Ok(server) => { Ok(server) => {
server.set_panic_handler(move || { server.set_panic_handler(move || {
deps.panic_handler.notify_all("Panic in WebApp thread.".to_owned()); deps.panic_handler.notify_all("Panic in WebApp thread.".to_owned());
}); });
server Ok(server)
}, },
} }
} }

148
parity/deprecated.rs Normal file
View File

@ -0,0 +1,148 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::fmt;
use cli::Args;
#[derive(Debug, PartialEq)]
pub enum Deprecated {
DoesNothing(&'static str),
Replaced(&'static str, &'static str),
}
impl fmt::Display for Deprecated {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
Deprecated::DoesNothing(s) => write!(f, "Option '{}' does nothing. It's on by default", s),
Deprecated::Replaced(old, new) => write!(f, "Option '{}' is deprecated. Please use '{}' instead", old, new),
}
}
}
impl Deprecated {
fn jsonrpc() -> Self {
Deprecated::DoesNothing("--jsonrpc")
}
fn rpc() -> Self {
Deprecated::DoesNothing("--rpc")
}
fn jsonrpc_off() -> Self {
Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc")
}
fn webapp() -> Self {
Deprecated::DoesNothing("--webapp")
}
fn dapps_off() -> Self {
Deprecated::Replaced("--dapps-off", "--no-daps")
}
fn ipcdisable() -> Self {
Deprecated::Replaced("--ipcdisable", "--no-ipc")
}
fn ipc_off() -> Self {
Deprecated::Replaced("--ipc-off", "--no-ipc")
}
fn etherbase() -> Self {
Deprecated::Replaced("--etherbase", "--author")
}
fn extradata() -> Self {
Deprecated::Replaced("--extradata", "--extra-data")
}
}
pub fn find_deprecated(args: &Args) -> Vec<Deprecated> {
let mut result = vec![];
if args.flag_jsonrpc {
result.push(Deprecated::jsonrpc());
}
if args.flag_rpc {
result.push(Deprecated::rpc());
}
if args.flag_jsonrpc_off {
result.push(Deprecated::jsonrpc_off());
}
if args.flag_webapp {
result.push(Deprecated::webapp())
}
if args.flag_dapps_off {
result.push(Deprecated::dapps_off());
}
if args.flag_ipcdisable {
result.push(Deprecated::ipcdisable());
}
if args.flag_ipc_off {
result.push(Deprecated::ipc_off());
}
if args.flag_etherbase.is_some() {
result.push(Deprecated::etherbase());
}
if args.flag_extradata.is_some() {
result.push(Deprecated::extradata());
}
result
}
#[cfg(test)]
mod tests {
use cli::Args;
use super::{Deprecated, find_deprecated};
#[test]
fn test_find_deprecated() {
assert_eq!(find_deprecated(&Args::default()), vec![]);
assert_eq!(find_deprecated(&{
let mut args = Args::default();
args.flag_jsonrpc = true;
args.flag_rpc = true;
args.flag_jsonrpc_off = true;
args.flag_webapp = true;
args.flag_dapps_off = true;
args.flag_ipcdisable = true;
args.flag_ipc_off = true;
args.flag_etherbase = Some(Default::default());
args.flag_extradata = Some(Default::default());
args
}), vec![
Deprecated::jsonrpc(),
Deprecated::rpc(),
Deprecated::jsonrpc_off(),
Deprecated::webapp(),
Deprecated::dapps_off(),
Deprecated::ipcdisable(),
Deprecated::ipc_off(),
Deprecated::etherbase(),
Deprecated::extradata(),
]);
}
}

View File

@ -1,61 +0,0 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std;
use ethcore;
use ethcore::client::Error as ClientError;
use util::UtilError;
use std::process::exit;
#[macro_export]
macro_rules! die {
($($arg:tt)*) => (::die::die_with_message(&format!("{}", format_args!($($arg)*))));
}
pub fn die_with_error(module: &'static str, e: ethcore::error::Error) -> ! {
use ethcore::error::Error;
match e {
Error::Util(UtilError::StdIo(e)) => die_with_io_error(module, e),
Error::Client(ClientError::Trace(e)) => die_with_message(&format!("{}", e)),
_ => {
trace!(target: module, "{:?}", e);
die!("{}: {}", module, e);
}
}
}
pub fn die_with_io_error(module: &'static str, e: std::io::Error) -> ! {
trace!(target: module, "{:?}", e);
match e.kind() {
std::io::ErrorKind::PermissionDenied => {
die!("{}: No permissions to bind to specified port.", module)
},
std::io::ErrorKind::AddrInUse => {
die!("{}: Specified address is already in use. Please make sure that nothing is listening on the same port or try using a different one.", module)
},
std::io::ErrorKind::AddrNotAvailable => {
die!("{}: Could not use specified interface or given address is invalid.", module)
},
_ => die!("{}: {}", module, e),
}
}
pub fn die_with_message(msg: &str) -> ! {
println!("ERROR: {}", msg);
exit(1);
}

86
parity/dir.rs Normal file
View File

@ -0,0 +1,86 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::fs;
use std::path::{PathBuf, Path};
use util::{H64, H256};
use util::journaldb::Algorithm;
use helpers::replace_home;
// this const is irrelevent cause we do have migrations now,
// but we still use it for backwards compatibility
const LEGACY_CLIENT_DB_VER_STR: &'static str = "5.3";
#[derive(Debug, PartialEq)]
pub struct Directories {
pub db: String,
pub keys: String,
pub signer: String,
pub dapps: String,
}
impl Default for Directories {
fn default() -> Self {
Directories {
db: replace_home("$HOME/.parity"),
keys: replace_home("$HOME/.parity/keys"),
signer: replace_home("$HOME/.parity/signer"),
dapps: replace_home("$HOME/.parity/dapps"),
}
}
}
impl Directories {
pub fn create_dirs(&self) -> Result<(), String> {
try!(fs::create_dir_all(&self.db).map_err(|e| e.to_string()));
try!(fs::create_dir_all(&self.keys).map_err(|e| e.to_string()));
try!(fs::create_dir_all(&self.signer).map_err(|e| e.to_string()));
try!(fs::create_dir_all(&self.dapps).map_err(|e| e.to_string()));
Ok(())
}
/// Get the root path for database
pub fn db_version_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf {
let mut dir = Path::new(&self.db).to_path_buf();
dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default()));
dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str()));
dir
}
/// Get the path for the databases given the genesis_hash and information on the databases.
pub fn client_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf {
let mut dir = self.db_version_path(genesis_hash, fork_name, pruning);
dir.push("db");
dir
}
}
#[cfg(test)]
mod tests {
use super::Directories;
use helpers::replace_home;
#[test]
fn test_default_directories() {
let expected = Directories {
db: replace_home("$HOME/.parity"),
keys: replace_home("$HOME/.parity/keys"),
signer: replace_home("$HOME/.parity/signer"),
dapps: replace_home("$HOME/.parity/dapps"),
};
assert_eq!(expected, Directories::default());
}
}

403
parity/helpers.rs Normal file
View File

@ -0,0 +1,403 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::{io, env};
use std::io::{Write, Read, BufReader, BufRead};
use std::time::Duration;
use std::path::Path;
use std::fs::File;
use util::{clean_0x, U256, Uint, Address, path, is_valid_node_url, H256, CompactionProfile};
use util::journaldb::Algorithm;
use ethcore::client::{Mode, BlockID, Switch, VMType, DatabaseCompactionProfile, ClientConfig};
use ethcore::miner::PendingSet;
use cache::CacheConfig;
use dir::Directories;
use params::Pruning;
use upgrade::upgrade;
use migration::migrate;
pub fn to_duration(s: &str) -> Result<Duration, String> {
to_seconds(s).map(Duration::from_secs)
}
fn to_seconds(s: &str) -> Result<u64, String> {
let bad = |_| {
format!("{}: Invalid duration given. See parity --help for more information.", s)
};
match s {
"twice-daily" => Ok(12 * 60 * 60),
"half-hourly" => Ok(30 * 60),
"1second" | "1 second" | "second" => Ok(1),
"1minute" | "1 minute" | "minute" => Ok(60),
"hourly" | "1hour" | "1 hour" | "hour" => Ok(60 * 60),
"daily" | "1day" | "1 day" | "day" => Ok(24 * 60 * 60),
x if x.ends_with("seconds") => x[0..x.len() - 7].parse().map_err(bad),
x if x.ends_with("minutes") => x[0..x.len() -7].parse::<u64>().map_err(bad).map(|x| x * 60),
x if x.ends_with("hours") => x[0..x.len() - 5].parse::<u64>().map_err(bad).map(|x| x * 60 * 60),
x if x.ends_with("days") => x[0..x.len() - 4].parse::<u64>().map_err(bad).map(|x| x * 24 * 60 * 60),
x => x.parse().map_err(bad),
}
}
pub fn to_mode(s: &str, timeout: u64, alarm: u64) -> Result<Mode, String> {
match s {
"active" => Ok(Mode::Active),
"passive" => Ok(Mode::Passive(Duration::from_secs(timeout), Duration::from_secs(alarm))),
"dark" => Ok(Mode::Dark(Duration::from_secs(timeout))),
_ => Err(format!("{}: Invalid address for --mode. Must be one of active, passive or dark.", s)),
}
}
pub fn to_block_id(s: &str) -> Result<BlockID, String> {
if s == "latest" {
Ok(BlockID::Latest)
} else if let Ok(num) = s.parse() {
Ok(BlockID::Number(num))
} else if let Ok(hash) = s.parse() {
Ok(BlockID::Hash(hash))
} else {
Err("Invalid block.".into())
}
}
pub fn to_u256(s: &str) -> Result<U256, String> {
if let Ok(decimal) = U256::from_dec_str(s) {
Ok(decimal)
} else if let Ok(hex) = clean_0x(s).parse() {
Ok(hex)
} else {
Err(format!("Invalid numeric value: {}", s))
}
}
pub fn to_pending_set(s: &str) -> Result<PendingSet, String> {
match s {
"cheap" => Ok(PendingSet::AlwaysQueue),
"strict" => Ok(PendingSet::AlwaysSealing),
"lenient" => Ok(PendingSet::SealingOrElseQueue),
other => Err(format!("Invalid pending set value: {:?}", other)),
}
}
pub fn to_address(s: Option<String>) -> Result<Address, String> {
match s {
Some(ref a) => clean_0x(a).parse().map_err(|_| format!("Invalid address: {:?}", a)),
None => Ok(Address::default())
}
}
pub fn to_addresses(s: &Option<String>) -> Result<Vec<Address>, String> {
match *s {
Some(ref adds) if adds.is_empty() => adds.split(',')
.map(|a| clean_0x(a).parse().map_err(|_| format!("Invalid address: {:?}", a)))
.collect(),
_ => Ok(Vec::new()),
}
}
/// Tries to parse string as a price.
pub fn to_price(s: &str) -> Result<f32, String> {
s.parse::<f32>().map_err(|_| format!("Invalid transaciton price 's' given. Must be a decimal number."))
}
/// Replaces `$HOME` str with home directory path.
pub fn replace_home(arg: &str) -> String {
// the $HOME directory on mac os should be `~/Library` or `~/Library/Application Support`
let r = arg.replace("$HOME", env::home_dir().unwrap().to_str().unwrap());
r.replace("/", &::std::path::MAIN_SEPARATOR.to_string() )
}
/// Flush output buffer.
pub fn flush_stdout() {
io::stdout().flush().expect("stdout is flushable; qed");
}
/// Returns default geth ipc path.
pub fn geth_ipc_path(testnet: bool) -> String {
// Windows path should not be hardcoded here.
// Instead it should be a part of path::ethereum
if cfg!(windows) {
return r"\\.\pipe\geth.ipc".to_owned();
}
if testnet {
path::ethereum::with_testnet("geth.ipc").to_str().unwrap().to_owned()
} else {
path::ethereum::with_default("geth.ipc").to_str().unwrap().to_owned()
}
}
/// Formats and returns parity ipc path.
pub fn parity_ipc_path(s: &str) -> String {
// Windows path should not be hardcoded here.
if cfg!(windows) {
return r"\\.\pipe\parity.jsonrpc".to_owned();
}
replace_home(s)
}
/// Validates and formats bootnodes option.
pub fn to_bootnodes(bootnodes: &Option<String>) -> Result<Vec<String>, String> {
match *bootnodes {
Some(ref x) if !x.is_empty() => x.split(',').map(|s| {
if is_valid_node_url(s) {
Ok(s.to_owned())
} else {
Err(format!("Invalid node address format given for a boot node: {}", s))
}
}).collect(),
Some(_) => Ok(vec![]),
None => Ok(vec![])
}
}
#[cfg(test)]
pub fn default_network_config() -> ::util::NetworkConfiguration {
use util::{NetworkConfiguration, NonReservedPeerMode};
NetworkConfiguration {
config_path: Some(replace_home("$HOME/.parity/network")),
listen_address: Some("0.0.0.0:30303".parse().unwrap()),
public_address: None,
udp_port: None,
nat_enabled: true,
discovery_enabled: true,
boot_nodes: Vec::new(),
use_secret: None,
max_peers: 50,
min_peers: 25,
reserved_nodes: Vec::new(),
non_reserved_mode: NonReservedPeerMode::Accept,
}
}
#[cfg_attr(feature = "dev", allow(too_many_arguments))]
pub fn to_client_config(
cache_config: &CacheConfig,
dirs: &Directories,
genesis_hash: H256,
mode: Mode,
tracing: Switch,
pruning: Pruning,
compaction: DatabaseCompactionProfile,
wal: bool,
vm_type: VMType,
name: String,
fork_name: Option<&String>,
) -> ClientConfig {
let mut client_config = ClientConfig::default();
let mb = 1024 * 1024;
// in bytes
client_config.blockchain.max_cache_size = cache_config.blockchain() as usize * mb;
// in bytes
client_config.blockchain.pref_cache_size = cache_config.blockchain() as usize * 3 / 4 * mb;
// db blockchain cache size, in megabytes
client_config.blockchain.db_cache_size = Some(cache_config.db_blockchain_cache_size() as usize);
// db state cache size, in megabytes
client_config.db_cache_size = Some(cache_config.db_state_cache_size() as usize);
// db queue cache size, in bytes
client_config.queue.max_mem_use = cache_config.queue() as usize * mb;
client_config.mode = mode;
client_config.tracing.enabled = tracing;
client_config.pruning = pruning.to_algorithm(dirs, genesis_hash, fork_name);
client_config.db_compaction = compaction;
client_config.db_wal = wal;
client_config.vm_type = vm_type;
client_config.name = name;
client_config
}
pub fn execute_upgrades(
dirs: &Directories,
genesis_hash: H256,
fork_name: Option<&String>,
pruning: Algorithm,
compaction_profile: CompactionProfile
) -> Result<(), String> {
match upgrade(Some(&dirs.db)) {
Ok(upgrades_applied) if upgrades_applied > 0 => {
debug!("Executed {} upgrade scripts - ok", upgrades_applied);
},
Err(e) => {
return Err(format!("Error upgrading parity data: {:?}", e));
},
_ => {},
}
let client_path = dirs.db_version_path(genesis_hash, fork_name, pruning);
migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e))
}
/// Prompts user asking for password.
pub fn password_prompt() -> Result<String, String> {
use rpassword::read_password;
println!("Please note that password is NOT RECOVERABLE.");
print!("Type password: ");
flush_stdout();
let password = read_password().unwrap();
print!("Repeat password: ");
flush_stdout();
let password_repeat = read_password().unwrap();
if password != password_repeat {
return Err("Passwords do not match!".into());
}
Ok(password)
}
/// Read a password from password file.
pub fn password_from_file<P>(path: P) -> Result<String, String> where P: AsRef<Path> {
let mut file = try!(File::open(path).map_err(|_| "Unable to open password file."));
let mut file_content = String::new();
try!(file.read_to_string(&mut file_content).map_err(|_| "Unable to read password file."));
// remove eof
Ok((&file_content[..file_content.len() - 1]).to_owned())
}
/// Reads passwords from files. Treats each line as a separate password.
pub fn passwords_from_files(files: Vec<String>) -> Result<Vec<String>, String> {
let passwords = files.iter().map(|filename| {
let file = try!(File::open(filename).map_err(|_| format!("{} Unable to read password file. Ensure it exists and permissions are correct.", filename)));
let reader = BufReader::new(&file);
let lines = reader.lines()
.map(|l| l.unwrap())
.collect::<Vec<String>>();
Ok(lines)
}).collect::<Result<Vec<Vec<String>>, String>>();
Ok(try!(passwords).into_iter().flat_map(|x| x).collect())
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use util::{U256};
use ethcore::client::{Mode, BlockID};
use ethcore::miner::PendingSet;
use super::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_address, to_price, geth_ipc_path, to_bootnodes};
#[test]
fn test_to_duration() {
assert_eq!(to_duration("twice-daily").unwrap(), Duration::from_secs(12 * 60 * 60));
assert_eq!(to_duration("half-hourly").unwrap(), Duration::from_secs(30 * 60));
assert_eq!(to_duration("1second").unwrap(), Duration::from_secs(1));
assert_eq!(to_duration("2seconds").unwrap(), Duration::from_secs(2));
assert_eq!(to_duration("15seconds").unwrap(), Duration::from_secs(15));
assert_eq!(to_duration("1minute").unwrap(), Duration::from_secs(1 * 60));
assert_eq!(to_duration("2minutes").unwrap(), Duration::from_secs(2 * 60));
assert_eq!(to_duration("15minutes").unwrap(), Duration::from_secs(15 * 60));
assert_eq!(to_duration("hourly").unwrap(), Duration::from_secs(60 * 60));
assert_eq!(to_duration("daily").unwrap(), Duration::from_secs(24 * 60 * 60));
assert_eq!(to_duration("1hour").unwrap(), Duration::from_secs(1 * 60 * 60));
assert_eq!(to_duration("2hours").unwrap(), Duration::from_secs(2 * 60 * 60));
assert_eq!(to_duration("15hours").unwrap(), Duration::from_secs(15 * 60 * 60));
assert_eq!(to_duration("1day").unwrap(), Duration::from_secs(1 * 24 * 60 * 60));
assert_eq!(to_duration("2days").unwrap(), Duration::from_secs(2 * 24 *60 * 60));
assert_eq!(to_duration("15days").unwrap(), Duration::from_secs(15 * 24 * 60 * 60));
}
#[test]
fn test_to_mode() {
assert_eq!(to_mode("active", 0, 0).unwrap(), Mode::Active);
assert_eq!(to_mode("passive", 10, 20).unwrap(), Mode::Passive(Duration::from_secs(10), Duration::from_secs(20)));
assert_eq!(to_mode("dark", 20, 30).unwrap(), Mode::Dark(Duration::from_secs(20)));
assert!(to_mode("other", 20, 30).is_err());
}
#[test]
fn test_to_block_id() {
assert_eq!(to_block_id("latest").unwrap(), BlockID::Latest);
assert_eq!(to_block_id("0").unwrap(), BlockID::Number(0));
assert_eq!(to_block_id("2").unwrap(), BlockID::Number(2));
assert_eq!(to_block_id("15").unwrap(), BlockID::Number(15));
assert_eq!(
to_block_id("9fc84d84f6a785dc1bd5abacfcf9cbdd3b6afb80c0f799bfb2fd42c44a0c224e").unwrap(),
BlockID::Hash("9fc84d84f6a785dc1bd5abacfcf9cbdd3b6afb80c0f799bfb2fd42c44a0c224e".parse().unwrap())
);
}
#[test]
fn test_to_u256() {
assert_eq!(to_u256("0").unwrap(), U256::from(0));
assert_eq!(to_u256("11").unwrap(), U256::from(11));
assert_eq!(to_u256("0x11").unwrap(), U256::from(17));
assert!(to_u256("u").is_err())
}
#[test]
fn test_pending_set() {
assert_eq!(to_pending_set("cheap").unwrap(), PendingSet::AlwaysQueue);
assert_eq!(to_pending_set("strict").unwrap(), PendingSet::AlwaysSealing);
assert_eq!(to_pending_set("lenient").unwrap(), PendingSet::SealingOrElseQueue);
assert!(to_pending_set("othe").is_err());
}
#[test]
fn test_to_address() {
assert_eq!(
to_address(Some("0xD9A111feda3f362f55Ef1744347CDC8Dd9964a41".into())).unwrap(),
"D9A111feda3f362f55Ef1744347CDC8Dd9964a41".parse().unwrap()
);
assert_eq!(
to_address(Some("D9A111feda3f362f55Ef1744347CDC8Dd9964a41".into())).unwrap(),
"D9A111feda3f362f55Ef1744347CDC8Dd9964a41".parse().unwrap()
);
assert_eq!(to_address(None).unwrap(), Default::default());
}
#[test]
#[cfg_attr(feature = "dev", allow(float_cmp))]
fn test_to_price() {
assert_eq!(to_price("1").unwrap(), 1.0);
assert_eq!(to_price("2.3").unwrap(), 2.3);
assert_eq!(to_price("2.33").unwrap(), 2.33);
}
#[test]
#[cfg(windows)]
fn test_geth_ipc_path() {
assert_eq!(geth_ipc_path(true), r"\\.\pipe\geth.ipc".to_owned());
assert_eq!(geth_ipc_path(false), r"\\.\pipe\geth.ipc".to_owned());
}
#[test]
#[cfg(not(windows))]
fn test_geth_ipc_path() {
use util::path;
assert_eq!(geth_ipc_path(true), path::ethereum::with_testnet("geth.ipc").to_str().unwrap().to_owned());
assert_eq!(geth_ipc_path(false), path::ethereum::with_default("geth.ipc").to_str().unwrap().to_owned());
}
#[test]
fn test_to_bootnodes() {
let one_bootnode = "enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303";
let two_bootnodes = "enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303,enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303";
assert_eq!(to_bootnodes(&Some("".into())), Ok(vec![]));
assert_eq!(to_bootnodes(&None), Ok(vec![]));
assert_eq!(to_bootnodes(&Some(one_bootnode.into())), Ok(vec![one_bootnode.into()]));
assert_eq!(to_bootnodes(&Some(two_bootnodes.into())), Ok(vec![one_bootnode.into(), one_bootnode.into()]));
}
}

View File

@ -108,12 +108,12 @@ impl Informant {
info!(target: "import", "{} {} {}", info!(target: "import", "{} {} {}",
match importing { match importing {
true => format!("Syncing {} {} {} {}+{} Qed", true => format!("Syncing {} {} {} {}+{} Qed",
paint(White.bold(), format!("{:>8}", format!("#{}", chain_info.best_block_number))), paint(White.bold(), format!("{:>8}", format!("#{}", chain_info.best_block_number))),
paint(White.bold(), format!("{}", chain_info.best_block_hash)), paint(White.bold(), format!("{}", chain_info.best_block_hash)),
{ {
let last_report = match write_report.deref() { &Some(ref last_report) => last_report.clone(), _ => ClientReport::default() }; let last_report = match write_report.deref() { &Some(ref last_report) => last_report.clone(), _ => ClientReport::default() };
format!("{} blk/s {} tx/s {} Mgas/s", format!("{} blk/s {} tx/s {} Mgas/s",
paint(Yellow.bold(), format!("{:4}", ((report.blocks_imported - last_report.blocks_imported) * 1000) as u64 / elapsed.as_milliseconds())), paint(Yellow.bold(), format!("{:4}", ((report.blocks_imported - last_report.blocks_imported) * 1000) as u64 / elapsed.as_milliseconds())),
paint(Yellow.bold(), format!("{:4}", ((report.transactions_applied - last_report.transactions_applied) * 1000) as u64 / elapsed.as_milliseconds())), paint(Yellow.bold(), format!("{:4}", ((report.transactions_applied - last_report.transactions_applied) * 1000) as u64 / elapsed.as_milliseconds())),
paint(Yellow.bold(), format!("{:3}", ((report.gas_processed - last_report.gas_processed) / From::from(elapsed.as_milliseconds() * 1000)).low_u64())) paint(Yellow.bold(), format!("{:3}", ((report.gas_processed - last_report.gas_processed) / From::from(elapsed.as_milliseconds() * 1000)).low_u64()))
@ -132,7 +132,7 @@ impl Informant {
}, },
paint(Cyan.bold(), format!("{:2}", sync_info.num_active_peers)), paint(Cyan.bold(), format!("{:2}", sync_info.num_active_peers)),
paint(Cyan.bold(), format!("{:2}", sync_info.num_peers)), paint(Cyan.bold(), format!("{:2}", sync_info.num_peers)),
paint(Cyan.bold(), format!("{:2}", net_config.ideal_peers)) paint(Cyan.bold(), format!("{:2}", if sync_info.num_peers as u32 > net_config.min_peers { net_config.max_peers} else { net_config.min_peers} ))
), ),
_ => String::new(), _ => String::new(),
}, },
@ -154,13 +154,13 @@ impl Informant {
} }
impl ChainNotify for Informant { impl ChainNotify for Informant {
fn new_blocks(&self, _imported: Vec<H256>, _invalid: Vec<H256>, enacted: Vec<H256>, _retracted: Vec<H256>, _sealed: Vec<H256>, duration: u64) { fn new_blocks(&self, imported: Vec<H256>, _invalid: Vec<H256>, _enacted: Vec<H256>, _retracted: Vec<H256>, _sealed: Vec<H256>, duration: u64) {
let mut last_import = self.last_import.lock(); let mut last_import = self.last_import.lock();
let queue_info = self.client.queue_info(); let queue_info = self.client.queue_info();
let importing = queue_info.unverified_queue_size + queue_info.verified_queue_size > 3 let importing = queue_info.unverified_queue_size + queue_info.verified_queue_size > 3
|| self.sync.as_ref().map_or(false, |s| s.status().is_major_syncing()); || self.sync.as_ref().map_or(false, |s| s.status().is_major_syncing());
if Instant::now() > *last_import + Duration::from_secs(1) && !importing { if Instant::now() > *last_import + Duration::from_secs(1) && !importing {
if let Some(block) = enacted.last().and_then(|h| self.client.block(BlockID::Hash(h.clone()))) { if let Some(block) = imported.last().and_then(|h| self.client.block(BlockID::Hash(*h))) {
let view = BlockView::new(&block); let view = BlockView::new(&block);
let header = view.header(); let header = view.header();
let tx_count = view.transactions_count(); let tx_count = view.transactions_count();
@ -179,7 +179,7 @@ impl ChainNotify for Informant {
} }
self.skipped.store(0, AtomicOrdering::Relaxed); self.skipped.store(0, AtomicOrdering::Relaxed);
} else { } else {
self.skipped.fetch_add(enacted.len(), AtomicOrdering::Relaxed); self.skipped.fetch_add(imported.len(), AtomicOrdering::Relaxed);
} }
} }
} }

View File

@ -25,16 +25,17 @@
extern crate docopt; extern crate docopt;
extern crate num_cpus; extern crate num_cpus;
extern crate rustc_serialize; extern crate rustc_serialize;
extern crate ethcore_devtools as devtools;
#[macro_use]
extern crate ethcore_util as util; extern crate ethcore_util as util;
extern crate ethcore; extern crate ethcore;
extern crate ethsync; extern crate ethsync;
#[macro_use] #[macro_use]
extern crate log as rlog; extern crate log as rlog;
extern crate env_logger; extern crate env_logger;
extern crate ethcore_logger;
extern crate ctrlc; extern crate ctrlc;
extern crate fdlimit; extern crate fdlimit;
#[cfg(not(windows))]
extern crate daemonize;
extern crate time; extern crate time;
extern crate number_prefix; extern crate number_prefix;
extern crate rpassword; extern crate rpassword;
@ -53,15 +54,12 @@ extern crate ansi_term;
#[macro_use] #[macro_use]
extern crate lazy_static; extern crate lazy_static;
extern crate regex; extern crate regex;
extern crate ethcore_logger;
extern crate isatty; extern crate isatty;
#[cfg(feature = "dapps")] #[cfg(feature = "dapps")]
extern crate ethcore_dapps; extern crate ethcore_dapps;
mod cache;
#[macro_use]
mod die;
mod upgrade; mod upgrade;
mod rpc; mod rpc;
mod dapps; mod dapps;
@ -73,529 +71,63 @@ mod migration;
mod signer; mod signer;
mod rpc_apis; mod rpc_apis;
mod url; mod url;
mod helpers;
mod params;
mod deprecated;
mod dir;
mod modules; mod modules;
mod account;
mod blockchain;
mod presale;
mod run;
mod sync;
use std::io::{Write, Read, BufReader, BufRead}; use std::{process, env};
use std::ops::Deref;
use std::sync::Arc;
use std::path::Path;
use std::fs::File;
use std::str::{FromStr, from_utf8};
use std::thread::sleep;
use std::time::Duration;
use rustc_serialize::hex::FromHex;
use ctrlc::CtrlC;
use util::{H256, ToPretty, PayloadInfo, Bytes, Colour, version, journaldb, RotatingLogger};
use util::panics::{MayPanic, ForwardPanic, PanicHandler};
use ethcore::client::{BlockID, BlockChainClient, ClientConfig, get_db_path, BlockImportError, Mode};
use ethcore::error::{ImportError};
use ethcore::service::ClientService;
use ethcore::spec::Spec;
use ethsync::{NetworkConfiguration};
use ethcore::miner::{Miner, MinerService, ExternalMiner};
use migration::migrate;
use informant::Informant;
use util::{Mutex, Condvar};
use ethcore_logger::setup_log;
#[cfg(feature="ipc")]
use ethcore::client::ChainNotify;
use die::*;
use cli::print_version; use cli::print_version;
use rpc::RpcServer; use configuration::{Cmd, Configuration};
use signer::{SignerServer, new_token}; use deprecated::find_deprecated;
use dapps::WebappServer;
use io_handler::ClientIoHandler; fn execute(command: Cmd) -> Result<String, String> {
use configuration::{Configuration}; match command {
Cmd::Run(run_cmd) => {
try!(run::execute(run_cmd));
Ok("".into())
},
Cmd::Version => Ok(print_version()),
Cmd::Account(account_cmd) => account::execute(account_cmd),
Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd),
Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd),
Cmd::SignerToken(path) => signer::new_token(path),
}
}
fn start() -> Result<String, String> {
let conf = Configuration::parse(env::args()).unwrap_or_else(|e| e.exit());
let deprecated = find_deprecated(&conf.args);
for d in deprecated {
println!("{}", d);
}
let cmd = try!(conf.into_command());
execute(cmd)
}
fn main() { fn main() {
let conf = Configuration::parse(); // just redirect to the sync::main()
execute(conf); if std::env::args().nth(1).map_or(false, |arg| arg == "sync") {
} sync::main();
fn execute(conf: Configuration) {
if conf.args.flag_version {
print_version();
return; return;
} }
if conf.args.cmd_signer { match start() {
execute_signer(conf); Ok(result) => {
return; println!("{}", result);
}
let spec = conf.spec();
let client_config = conf.client_config(&spec);
execute_upgrades(&conf, &spec, &client_config);
if conf.args.cmd_daemon {
daemonize(&conf);
}
// Setup panic handler
let panic_handler = PanicHandler::new_in_arc();
// Setup logging
let logger = setup_log(&conf.log_settings());
// Raise fdlimit
unsafe { ::fdlimit::raise_fd_limit(); }
if conf.args.cmd_account {
execute_account_cli(conf);
return;
}
if conf.args.cmd_wallet {
execute_wallet_cli(conf);
return;
}
if conf.args.cmd_export {
execute_export(conf, panic_handler);
return;
}
if conf.args.cmd_import {
execute_import(conf, panic_handler);
return;
}
execute_client(conf, spec, client_config, panic_handler, logger);
}
#[cfg(not(windows))]
fn daemonize(conf: &Configuration) {
use daemonize::Daemonize;
Daemonize::new()
.pid_file(conf.args.arg_pid_file.clone())
.chown_pid_file(true)
.start()
.unwrap_or_else(|e| die!("Couldn't daemonize; {}", e));
}
#[cfg(windows)]
fn daemonize(_conf: &Configuration) {
}
fn execute_upgrades(conf: &Configuration, spec: &Spec, client_config: &ClientConfig) {
match ::upgrade::upgrade(Some(&conf.path())) {
Ok(upgrades_applied) if upgrades_applied > 0 => {
debug!("Executed {} upgrade scripts - ok", upgrades_applied);
}, },
Err(e) => { Err(err) => {
die!("Error upgrading parity data: {:?}", e); println!("{}", err);
}, process::exit(1);
_ => {},
}
let db_path = get_db_path(Path::new(&conf.path()), client_config.pruning, spec.genesis_header().hash(), spec.fork_name.as_ref());
let result = migrate(&db_path, client_config.pruning);
if let Err(err) = result {
die_with_message(&format!("{} DB path: {}", err, db_path.to_string_lossy()));
}
}
fn execute_client(conf: Configuration, spec: Spec, client_config: ClientConfig, panic_handler: Arc<PanicHandler>, logger: Arc<RotatingLogger>) {
let mut hypervisor = modules::hypervisor();
info!("Starting {}", Colour::White.bold().paint(format!("{}", version())));
info!("Using state DB journalling strategy {}", Colour::White.bold().paint(match client_config.pruning {
journaldb::Algorithm::Archive => "archive",
journaldb::Algorithm::EarlyMerge => "light",
journaldb::Algorithm::OverlayRecent => "fast",
journaldb::Algorithm::RefCounted => "basic",
}));
// Display warning about using experimental journaldb types
match client_config.pruning {
journaldb::Algorithm::EarlyMerge | journaldb::Algorithm::RefCounted => {
warn!("Your chosen strategy is {}! You can re-run with --pruning to change.", Colour::Red.bold().paint("unstable"));
}
_ => {}
}
// Display warning about using unlock with signer
if conf.signer_enabled() && conf.args.flag_unlock.is_some() {
warn!("Using Trusted Signer and --unlock is not recommended!");
warn!("NOTE that Signer will not ask you to confirm transactions from unlocked account.");
}
let net_settings = conf.net_settings(&spec);
let sync_config = conf.sync_config(&spec);
// Secret Store
let account_service = Arc::new(conf.account_service());
// Miner
let miner = Miner::new(conf.miner_options(), conf.gas_pricer(), conf.spec(), Some(account_service.clone()));
miner.set_author(conf.author().unwrap_or_default());
miner.set_gas_floor_target(conf.gas_floor_target());
miner.set_gas_ceil_target(conf.gas_ceil_target());
miner.set_extra_data(conf.extra_data());
miner.set_transactions_limit(conf.args.flag_tx_queue_size);
// Build client
let service = ClientService::start(
client_config,
spec,
Path::new(&conf.path()),
miner.clone(),
).unwrap_or_else(|e| die_with_error("Client", e));
panic_handler.forward_from(&service);
let client = service.client();
let external_miner = Arc::new(ExternalMiner::default());
let network_settings = Arc::new(conf.network_settings());
// Sync
let (sync_provider, manage_network, chain_notify) =
modules::sync(&mut hypervisor, sync_config, NetworkConfiguration::from(net_settings), client.clone(), &conf.log_settings())
.unwrap_or_else(|e| die_with_error("Sync", e));
service.add_notify(chain_notify.clone());
// if network is active by default
if match conf.mode() { Mode::Dark(..) => false, _ => !conf.args.flag_no_network } {
chain_notify.start();
}
let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies {
signer_port: conf.signer_port(),
signer_queue: Arc::new(rpc_apis::ConfirmationsQueue::default()),
client: client.clone(),
sync: sync_provider.clone(),
net: manage_network.clone(),
secret_store: account_service.clone(),
miner: miner.clone(),
external_miner: external_miner.clone(),
logger: logger.clone(),
settings: network_settings.clone(),
allow_pending_receipt_query: !conf.args.flag_geth,
net_service: manage_network.clone(),
});
let dependencies = rpc::Dependencies {
panic_handler: panic_handler.clone(),
apis: deps_for_rpc_apis.clone(),
};
// Setup http rpc
let rpc_server = rpc::new_http(rpc::HttpConfiguration {
enabled: network_settings.rpc_enabled,
interface: conf.rpc_interface(),
port: network_settings.rpc_port,
apis: conf.rpc_apis(),
cors: conf.rpc_cors(),
hosts: conf.rpc_hosts(),
}, &dependencies);
// setup ipc rpc
let _ipc_server = rpc::new_ipc(conf.ipc_settings(), &dependencies);
debug!("IPC: {}", conf.ipc_settings());
if conf.args.flag_webapp { println!("WARNING: Flag -w/--webapp is deprecated. Dapps server is now on by default. Ignoring."); }
let dapps_server = dapps::new(dapps::Configuration {
enabled: conf.dapps_enabled(),
interface: conf.dapps_interface(),
port: conf.args.flag_dapps_port,
user: conf.args.flag_dapps_user.clone(),
pass: conf.args.flag_dapps_pass.clone(),
dapps_path: conf.directories().dapps,
}, dapps::Dependencies {
panic_handler: panic_handler.clone(),
apis: deps_for_rpc_apis.clone(),
});
// Set up a signer
let signer_server = signer::start(signer::Configuration {
enabled: conf.signer_enabled(),
port: conf.args.flag_signer_port,
signer_path: conf.directories().signer,
}, signer::Dependencies {
panic_handler: panic_handler.clone(),
apis: deps_for_rpc_apis.clone(),
});
let informant = Arc::new(Informant::new(service.client(), Some(sync_provider.clone()), Some(manage_network.clone()), conf.have_color()));
service.add_notify(informant.clone());
// Register IO handler
let io_handler = Arc::new(ClientIoHandler {
client: service.client(),
info: informant,
sync: sync_provider.clone(),
net: manage_network.clone(),
accounts: account_service.clone(),
});
service.register_io_handler(io_handler).expect("Error registering IO handler");
if conf.args.cmd_ui {
if !conf.dapps_enabled() {
die_with_message("Cannot use UI command with Dapps turned off.");
}
url::open(&format!("http://{}:{}/", conf.dapps_interface(), conf.args.flag_dapps_port));
}
// Handle exit
wait_for_exit(panic_handler, rpc_server, dapps_server, signer_server);
}
fn flush_stdout() {
::std::io::stdout().flush().expect("stdout is flushable; qed");
}
enum DataFormat {
Hex,
Binary,
}
fn execute_export(conf: Configuration, panic_handler: Arc<PanicHandler>) {
let spec = conf.spec();
let client_config = conf.client_config(&spec);
// Build client
let service = ClientService::start(
client_config, spec, Path::new(&conf.path()), Arc::new(Miner::with_spec(conf.spec()))
).unwrap_or_else(|e| die_with_error("Client", e));
panic_handler.forward_from(&service);
let client = service.client();
// we have a client!
let parse_block_id = |s: &str, arg: &str| -> u64 {
if s == "latest" {
client.chain_info().best_block_number
} else if let Ok(n) = s.parse::<u64>() {
n
} else if let Ok(h) = H256::from_str(s) {
client.block_number(BlockID::Hash(h)).unwrap_or_else(|| {
die!("Unknown block hash passed to {} parameter: {:?}", arg, s);
})
} else {
die!("Invalid {} parameter given: {:?}", arg, s);
}
};
let from = parse_block_id(&conf.args.flag_from, "--from");
let to = parse_block_id(&conf.args.flag_to, "--to");
let format = match conf.args.flag_format {
Some(x) => match x.deref() {
"binary" | "bin" => DataFormat::Binary,
"hex" => DataFormat::Hex,
x => die!("Invalid --format parameter given: {:?}", x),
},
None if conf.args.arg_file.is_none() => DataFormat::Hex,
None => DataFormat::Binary,
};
let mut out: Box<Write> = if let Some(f) = conf.args.arg_file {
Box::new(File::create(&f).unwrap_or_else(|_| die!("Cannot write to file given: {}", f)))
} else {
Box::new(::std::io::stdout())
};
for i in from..(to + 1) {
let b = client.deref().block(BlockID::Number(i)).unwrap();
match format {
DataFormat::Binary => { out.write(&b).expect("Couldn't write to stream."); }
DataFormat::Hex => { out.write_fmt(format_args!("{}", b.pretty())).expect("Couldn't write to stream."); }
} }
} }
} }
fn execute_import(conf: Configuration, panic_handler: Arc<PanicHandler>) {
let spec = conf.spec();
let client_config = conf.client_config(&spec);
// Build client
let service = ClientService::start(
client_config, spec, Path::new(&conf.path()), Arc::new(Miner::with_spec(conf.spec()))
).unwrap_or_else(|e| die_with_error("Client", e));
panic_handler.forward_from(&service);
let client = service.client();
let mut instream: Box<Read> = if let Some(ref f) = conf.args.arg_file {
let f = File::open(f).unwrap_or_else(|_| die!("Cannot open the file given: {}", f));
Box::new(f)
} else {
Box::new(::std::io::stdin())
};
const READAHEAD_BYTES: usize = 8;
let mut first_bytes: Bytes = vec![0; READAHEAD_BYTES];
let mut first_read = 0;
let format = match conf.args.flag_format {
Some(ref x) => match x.deref() {
"binary" | "bin" => DataFormat::Binary,
"hex" => DataFormat::Hex,
x => die!("Invalid --format parameter given: {:?}", x),
},
None => {
// autodetect...
first_read = instream.read(&mut(first_bytes[..])).unwrap_or_else(|_| die!("Error reading from the file/stream."));
match first_bytes[0] {
0xf9 => {
info!("Autodetected binary data format.");
DataFormat::Binary
}
_ => {
info!("Autodetected hex data format.");
DataFormat::Hex
}
}
}
};
let informant = Informant::new(client.clone(), None, None, conf.have_color());
let do_import = |bytes| {
while client.queue_info().is_full() { sleep(Duration::from_secs(1)); }
match client.import_block(bytes) {
Ok(_) => {}
Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { trace!("Skipping block already in chain."); }
Err(e) => die!("Cannot import block: {:?}", e)
}
informant.tick();
};
match format {
DataFormat::Binary => {
loop {
let mut bytes: Bytes = if first_read > 0 {first_bytes.clone()} else {vec![0; READAHEAD_BYTES]};
let n = if first_read > 0 {first_read} else {instream.read(&mut(bytes[..])).unwrap_or_else(|_| die!("Error reading from the file/stream."))};
if n == 0 { break; }
first_read = 0;
let s = PayloadInfo::from(&(bytes[..])).unwrap_or_else(|e| die!("Invalid RLP in the file/stream: {:?}", e)).total();
bytes.resize(s, 0);
instream.read_exact(&mut(bytes[READAHEAD_BYTES..])).unwrap_or_else(|_| die!("Error reading from the file/stream."));
do_import(bytes);
}
}
DataFormat::Hex => {
for line in BufReader::new(instream).lines() {
let s = line.unwrap_or_else(|_| die!("Error reading from the file/stream."));
let s = if first_read > 0 {from_utf8(&first_bytes).unwrap().to_owned() + &(s[..])} else {s};
first_read = 0;
let bytes = FromHex::from_hex(&(s[..])).unwrap_or_else(|_| die!("Invalid hex in file/stream."));
do_import(bytes);
}
}
}
while !client.queue_info().is_empty() {
sleep(Duration::from_secs(1));
informant.tick();
}
client.flush_queue();
}
fn execute_signer(conf: Configuration) {
if !conf.args.cmd_new_token {
die!("Unknown command.");
}
let path = conf.directories().signer;
let code = new_token(path).unwrap_or_else(|e| {
die!("Error generating token: {:?}", e)
});
println!("This key code will authorise your System Signer UI: {}", if conf.args.flag_no_color { code } else { format!("{}", Colour::White.bold().paint(code)) });
}
fn execute_account_cli(conf: Configuration) {
use ethcore::ethstore::{EthStore, import_accounts};
use ethcore::ethstore::dir::DiskDirectory;
use ethcore::account_provider::AccountProvider;
use rpassword::read_password;
let dir = Box::new(DiskDirectory::create(conf.keys_path()).unwrap());
let iterations = conf.keys_iterations();
let secret_store = AccountProvider::new(Box::new(EthStore::open_with_iterations(dir, iterations).unwrap()));
if conf.args.cmd_new {
println!("Please note that password is NOT RECOVERABLE.");
print!("Type password: ");
flush_stdout();
let password = read_password().unwrap();
print!("Repeat password: ");
flush_stdout();
let password_repeat = read_password().unwrap();
if password != password_repeat {
println!("Passwords do not match!");
return;
}
println!("New account address:");
let new_address = secret_store.new_account(&password).unwrap();
println!("{:?}", new_address);
return;
}
if conf.args.cmd_list {
println!("Known addresses:");
for addr in &secret_store.accounts() {
println!("{:?}", addr);
}
return;
}
if conf.args.cmd_import {
let to = DiskDirectory::create(conf.keys_path()).unwrap();
let mut imported = 0;
for path in &conf.args.arg_path {
let from = DiskDirectory::at(path);
imported += import_accounts(&from, &to).unwrap_or_else(|e| die!("Could not import accounts {}", e)).len();
}
println!("Imported {} keys", imported);
}
}
fn execute_wallet_cli(conf: Configuration) {
use ethcore::ethstore::{PresaleWallet, EthStore};
use ethcore::ethstore::dir::DiskDirectory;
use ethcore::account_provider::AccountProvider;
let wallet_path = conf.args.arg_path.first().unwrap();
let filename = conf.args.flag_password.first().unwrap();
let mut file = File::open(filename).unwrap_or_else(|_| die!("{} Unable to read password file.", filename));
let mut file_content = String::new();
file.read_to_string(&mut file_content).unwrap_or_else(|_| die!("{} Unable to read password file.", filename));
let dir = Box::new(DiskDirectory::create(conf.keys_path()).unwrap());
let iterations = conf.keys_iterations();
let store = AccountProvider::new(Box::new(EthStore::open_with_iterations(dir, iterations).unwrap()));
// remove eof
let pass = &file_content[..file_content.len() - 1];
let wallet = PresaleWallet::open(wallet_path).unwrap_or_else(|_| die!("Unable to open presale wallet."));
let kp = wallet.decrypt(pass).unwrap_or_else(|_| die!("Invalid password"));
let address = store.insert_account(kp.secret().clone(), pass).unwrap();
println!("Imported account: {}", address);
}
fn wait_for_exit(
panic_handler: Arc<PanicHandler>,
_rpc_server: Option<RpcServer>,
_dapps_server: Option<WebappServer>,
_signer_server: Option<SignerServer>
) {
let exit = Arc::new(Condvar::new());
// Handle possible exits
let e = exit.clone();
CtrlC::set_handler(move || { e.notify_all(); });
// Handle panics
let e = exit.clone();
panic_handler.on_panic(move |_reason| { e.notify_all(); });
// Wait for signal
let mutex = Mutex::new(());
exit.wait(&mut mutex.lock());
info!("Finishing work, please wait...");
}
/// Parity needs at least 1 test to generate coverage reports correctly.
#[test]
fn if_works() {
}

View File

@ -20,13 +20,18 @@ use std::io::{Read, Write, Error as IoError, ErrorKind};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::fmt::{Display, Formatter, Error as FmtError}; use std::fmt::{Display, Formatter, Error as FmtError};
use util::journaldb::Algorithm; use util::journaldb::Algorithm;
use util::migration::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError}; use util::migration::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError, Migration};
use util::kvdb::{CompactionProfile, Database, DatabaseConfig};
use ethcore::migrations; use ethcore::migrations;
use ethcore::client;
use ethcore::migrations::Extract;
/// Database is assumed to be at default version, when no version file is found. /// Database is assumed to be at default version, when no version file is found.
const DEFAULT_VERSION: u32 = 5; const DEFAULT_VERSION: u32 = 5;
/// Current version of database models. /// Current version of database models.
const CURRENT_VERSION: u32 = 7; const CURRENT_VERSION: u32 = 9;
/// First version of the consolidated database.
const CONSOLIDATION_VERSION: u32 = 9;
/// Defines how many items are migrated to the new version of database at once. /// Defines how many items are migrated to the new version of database at once.
const BATCH_SIZE: usize = 1024; const BATCH_SIZE: usize = 1024;
/// Version file name. /// Version file name.
@ -110,24 +115,10 @@ fn update_version(path: &Path) -> Result<(), Error> {
Ok(()) Ok(())
} }
/// Blocks database path. /// Consolidated database path
fn blocks_database_path(path: &Path) -> PathBuf { fn consolidated_database_path(path: &Path) -> PathBuf {
let mut blocks_path = path.to_owned();
blocks_path.push("blocks");
blocks_path
}
/// Extras database path.
fn extras_database_path(path: &Path) -> PathBuf {
let mut extras_path = path.to_owned();
extras_path.push("extras");
extras_path
}
/// State database path.
fn state_database_path(path: &Path) -> PathBuf {
let mut state_path = path.to_owned(); let mut state_path = path.to_owned();
state_path.push("state"); state_path.push("db");
state_path state_path
} }
@ -140,38 +131,56 @@ fn backup_database_path(path: &Path) -> PathBuf {
} }
/// Default migration settings. /// Default migration settings.
fn default_migration_settings() -> MigrationConfig { pub fn default_migration_settings(compaction_profile: &CompactionProfile) -> MigrationConfig {
MigrationConfig { MigrationConfig {
batch_size: BATCH_SIZE, batch_size: BATCH_SIZE,
compaction_profile: *compaction_profile,
} }
} }
/// Migrations on the blocks database. /// Migrations on the consolidated database.
fn blocks_database_migrations() -> Result<MigrationManager, Error> { fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
let manager = MigrationManager::new(default_migration_settings()); let manager = MigrationManager::new(default_migration_settings(compaction_profile));
Ok(manager) Ok(manager)
} }
/// Migrations on the extras database. /// Consolidates legacy databases into single one.
fn extras_database_migrations() -> Result<MigrationManager, Error> { fn consolidate_database(
let mut manager = MigrationManager::new(default_migration_settings()); old_db_path: PathBuf,
try!(manager.add_migration(migrations::extras::ToV6).map_err(|_| Error::MigrationImpossible)); new_db_path: PathBuf,
Ok(manager) column: Option<u32>,
} extract: Extract,
compaction_profile: &CompactionProfile) -> Result<(), Error> {
fn db_error(e: String) -> Error {
warn!("Cannot open Database for consolidation: {:?}", e);
Error::MigrationFailed
}
/// Migrations on the state database. let mut migration = migrations::ToV9::new(column, extract);
fn state_database_migrations(pruning: Algorithm) -> Result<MigrationManager, Error> { let config = default_migration_settings(compaction_profile);
let mut manager = MigrationManager::new(default_migration_settings()); let mut db_config = DatabaseConfig {
let res = match pruning { max_open_files: 64,
Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()), cache_size: None,
Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()), compaction: config.compaction_profile.clone(),
_ => return Err(Error::UnsuportedPruningMethod), columns: None,
wal: true,
}; };
try!(res.map_err(|_| Error::MigrationImpossible)); let old_path_str = try!(old_db_path.to_str().ok_or(Error::MigrationImpossible));
Ok(manager) let new_path_str = try!(new_db_path.to_str().ok_or(Error::MigrationImpossible));
let cur_db = try!(Database::open(&db_config, old_path_str).map_err(db_error));
// open new DB with proper number of columns
db_config.columns = migration.columns();
let mut new_db = try!(Database::open(&db_config, new_path_str).map_err(db_error));
// Migrate to new database (default column only)
try!(migration.migrate(&cur_db, &config, &mut new_db, None));
Ok(())
} }
/// Migrates database at given position with given migration rules. /// Migrates database at given position with given migration rules.
fn migrate_database(version: u32, db_path: PathBuf, mut migrations: MigrationManager) -> Result<(), Error> { fn migrate_database(version: u32, db_path: PathBuf, mut migrations: MigrationManager) -> Result<(), Error> {
// check if migration is needed // check if migration is needed
@ -207,23 +216,114 @@ fn exists(path: &Path) -> bool {
} }
/// Migrates the database. /// Migrates the database.
pub fn migrate(path: &Path, pruning: Algorithm) -> Result<(), Error> { pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionProfile) -> Result<(), Error> {
// read version file. // read version file.
let version = try!(current_version(path)); let version = try!(current_version(path));
// migrate the databases. // migrate the databases.
// main db directory may already exists, so let's check if we have blocks dir // main db directory may already exists, so let's check if we have blocks dir
if version < CURRENT_VERSION && exists(&blocks_database_path(path)) { if version > CURRENT_VERSION {
println!("Migrating database from version {} to {}", version, CURRENT_VERSION);
try!(migrate_database(version, blocks_database_path(path), try!(blocks_database_migrations())));
try!(migrate_database(version, extras_database_path(path), try!(extras_database_migrations())));
try!(migrate_database(version, state_database_path(path), try!(state_database_migrations(pruning))));
println!("Migration finished");
} else if version > CURRENT_VERSION {
return Err(Error::FutureDBVersion); return Err(Error::FutureDBVersion);
} }
// We are in the latest version, yay!
if version == CURRENT_VERSION {
return Ok(())
}
// Perform pre-consolidation migrations
if version < CONSOLIDATION_VERSION && exists(&legacy::blocks_database_path(path)) {
println!("Migrating database from version {} to {}", version, CONSOLIDATION_VERSION);
try!(migrate_database(version, legacy::blocks_database_path(path), try!(legacy::blocks_database_migrations(&compaction_profile))));
try!(migrate_database(version, legacy::extras_database_path(path), try!(legacy::extras_database_migrations(&compaction_profile))));
try!(migrate_database(version, legacy::state_database_path(path), try!(legacy::state_database_migrations(pruning, &compaction_profile))));
let db_path = consolidated_database_path(path);
// Remove the database dir (it shouldn't exist anyway, but it might when migration was interrupted)
let _ = fs::remove_dir_all(db_path.clone());
try!(consolidate_database(legacy::blocks_database_path(path), db_path.clone(), client::DB_COL_HEADERS, Extract::Header, &compaction_profile));
try!(consolidate_database(legacy::blocks_database_path(path), db_path.clone(), client::DB_COL_BODIES, Extract::Header, &compaction_profile));
try!(consolidate_database(legacy::extras_database_path(path), db_path.clone(), client::DB_COL_EXTRA, Extract::All, &compaction_profile));
try!(consolidate_database(legacy::state_database_path(path), db_path.clone(), client::DB_COL_STATE, Extract::All, &compaction_profile));
try!(consolidate_database(legacy::trace_database_path(path), db_path.clone(), client::DB_COL_TRACE, Extract::All, &compaction_profile));
let _ = fs::remove_dir_all(legacy::blocks_database_path(path));
let _ = fs::remove_dir_all(legacy::extras_database_path(path));
let _ = fs::remove_dir_all(legacy::state_database_path(path));
let _ = fs::remove_dir_all(legacy::trace_database_path(path));
println!("Migration finished");
}
// Further migrations
if version >= CONSOLIDATION_VERSION && version < CURRENT_VERSION && exists(&consolidated_database_path(path)) {
println!("Migrating database from version {} to {}", ::std::cmp::max(CONSOLIDATION_VERSION, version), CURRENT_VERSION);
try!(migrate_database(version, consolidated_database_path(path), try!(consolidated_database_migrations(&compaction_profile))));
println!("Migration finished");
}
// update version file. // update version file.
update_version(path) update_version(path)
} }
/// Old migrations utilities
mod legacy {
use super::*;
use std::path::{Path, PathBuf};
use util::journaldb::Algorithm;
use util::migration::{Manager as MigrationManager};
use util::kvdb::CompactionProfile;
use ethcore::migrations;
/// Blocks database path.
pub fn blocks_database_path(path: &Path) -> PathBuf {
let mut blocks_path = path.to_owned();
blocks_path.push("blocks");
blocks_path
}
/// Extras database path.
pub fn extras_database_path(path: &Path) -> PathBuf {
let mut extras_path = path.to_owned();
extras_path.push("extras");
extras_path
}
/// State database path.
pub fn state_database_path(path: &Path) -> PathBuf {
let mut state_path = path.to_owned();
state_path.push("state");
state_path
}
/// Trace database path.
pub fn trace_database_path(path: &Path) -> PathBuf {
let mut blocks_path = path.to_owned();
blocks_path.push("tracedb");
blocks_path
}
/// Migrations on the blocks database.
pub fn blocks_database_migrations(compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
try!(manager.add_migration(migrations::blocks::V8::default()).map_err(|_| Error::MigrationImpossible));
Ok(manager)
}
/// Migrations on the extras database.
pub fn extras_database_migrations(compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
try!(manager.add_migration(migrations::extras::ToV6).map_err(|_| Error::MigrationImpossible));
Ok(manager)
}
/// Migrations on the state database.
pub fn state_database_migrations(pruning: Algorithm, compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
let res = match pruning {
Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()),
Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()),
_ => return Err(Error::UnsuportedPruningMethod),
};
try!(res.map_err(|_| Error::MigrationImpossible));
Ok(manager)
}
}

View File

@ -23,8 +23,14 @@ use ethsync::{SyncConfig, NetworkConfiguration};
use self::no_ipc_deps::*; use self::no_ipc_deps::*;
#[cfg(feature="ipc")] #[cfg(feature="ipc")]
use self::ipc_deps::*; use self::ipc_deps::*;
use ethcore_logger::Config as LogConfig;
use ethcore_logger::Settings as LogSettings; pub mod service_urls {
pub const CLIENT: &'static str = "ipc:///tmp/parity-chain.ipc";
pub const SYNC: &'static str = "ipc:///tmp/parity-sync.ipc";
pub const SYNC_NOTIFY: &'static str = "ipc:///tmp/parity-sync-notify.ipc";
pub const NETWORK_MANAGER: &'static str = "ipc:///tmp/parity-manage-net.ipc";
}
#[cfg(not(feature="ipc"))] #[cfg(not(feature="ipc"))]
mod no_ipc_deps { mod no_ipc_deps {
@ -52,7 +58,6 @@ mod ipc_deps {
pub use ipc::binary::serialize; pub use ipc::binary::serialize;
} }
#[cfg(feature="ipc")] #[cfg(feature="ipc")]
pub fn hypervisor() -> Option<Hypervisor> { pub fn hypervisor() -> Option<Hypervisor> {
Some(Hypervisor::new()) Some(Hypervisor::new())
@ -64,7 +69,7 @@ pub fn hypervisor() -> Option<Hypervisor> {
} }
#[cfg(feature="ipc")] #[cfg(feature="ipc")]
fn sync_arguments(sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_settings: &LogSettings) -> BootArgs { fn sync_arguments(sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_settings: &LogConfig) -> BootArgs {
let service_config = ServiceConfiguration { let service_config = ServiceConfiguration {
sync: sync_cfg, sync: sync_cfg,
net: net_cfg, net: net_cfg,
@ -75,11 +80,11 @@ fn sync_arguments(sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_setti
// client service url and logging settings are passed in command line // client service url and logging settings are passed in command line
let mut cli_args = Vec::new(); let mut cli_args = Vec::new();
cli_args.push("ipc:///tmp/parity-chain.ipc".to_owned()); cli_args.push("sync".to_owned());
if !log_settings.color { cli_args.push("--no-color".to_owned()); } if !log_settings.color { cli_args.push("--no-color".to_owned()); }
if let Some(ref init) = log_settings.init { if let Some(ref mode) = log_settings.mode {
cli_args.push("-l".to_owned()); cli_args.push("-l".to_owned());
cli_args.push(init.to_owned()); cli_args.push(mode.to_owned());
} }
if let Some(ref file) = log_settings.file { if let Some(ref file) = log_settings.file {
cli_args.push("--log-file".to_owned()); cli_args.push("--log-file".to_owned());
@ -96,19 +101,19 @@ pub fn sync
sync_cfg: SyncConfig, sync_cfg: SyncConfig,
net_cfg: NetworkConfiguration, net_cfg: NetworkConfiguration,
_client: Arc<BlockChainClient>, _client: Arc<BlockChainClient>,
log_settings: &LogSettings, log_settings: &LogConfig,
) )
-> Result<SyncModules, ethcore::error::Error> -> Result<SyncModules, ethcore::error::Error>
{ {
let mut hypervisor = hypervisor_ref.take().expect("There should be hypervisor for ipc configuration"); let mut hypervisor = hypervisor_ref.take().expect("There should be hypervisor for ipc configuration");
hypervisor = hypervisor.module(SYNC_MODULE_ID, "sync", sync_arguments(sync_cfg, net_cfg, log_settings)); hypervisor = hypervisor.module(SYNC_MODULE_ID, "parity", sync_arguments(sync_cfg, net_cfg, log_settings));
hypervisor.start(); hypervisor.start();
hypervisor.wait_for_startup(); hypervisor.wait_for_startup();
let sync_client = init_client::<SyncClient<_>>("ipc:///tmp/parity-sync.ipc").unwrap(); let sync_client = init_client::<SyncClient<_>>(service_urls::SYNC).unwrap();
let notify_client = init_client::<ChainNotifyClient<_>>("ipc:///tmp/parity-sync-notify.ipc").unwrap(); let notify_client = init_client::<ChainNotifyClient<_>>(service_urls::SYNC_NOTIFY).unwrap();
let manage_client = init_client::<NetworkManagerClient<_>>("ipc:///tmp/parity-manage-net.ipc").unwrap(); let manage_client = init_client::<NetworkManagerClient<_>>(service_urls::NETWORK_MANAGER).unwrap();
*hypervisor_ref = Some(hypervisor); *hypervisor_ref = Some(hypervisor);
Ok((sync_client, manage_client, notify_client)) Ok((sync_client, manage_client, notify_client))
@ -121,7 +126,7 @@ pub fn sync
sync_cfg: SyncConfig, sync_cfg: SyncConfig,
net_cfg: NetworkConfiguration, net_cfg: NetworkConfiguration,
client: Arc<BlockChainClient>, client: Arc<BlockChainClient>,
_log_settings: &LogSettings, _log_settings: &LogConfig,
) )
-> Result<SyncModules, ethcore::error::Error> -> Result<SyncModules, ethcore::error::Error>
{ {

282
parity/params.rs Normal file
View File

@ -0,0 +1,282 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use util::{contents, Database, DatabaseConfig, journaldb, H256, Address, U256, version_data};
use util::journaldb::Algorithm;
use ethcore::client;
use ethcore::spec::Spec;
use ethcore::ethereum;
use ethcore::miner::{GasPricer, GasPriceCalibratorOptions};
use dir::Directories;
#[derive(Debug, PartialEq)]
pub enum SpecType {
Mainnet,
Testnet,
Olympic,
Classic,
Custom(String),
}
impl Default for SpecType {
fn default() -> Self {
SpecType::Mainnet
}
}
impl FromStr for SpecType {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let spec = match s {
"frontier" | "homestead" | "mainnet" => SpecType::Mainnet,
"frontier-dogmatic" | "homestead-dogmatic" | "classic" => SpecType::Classic,
"morden" | "testnet" => SpecType::Testnet,
"olympic" => SpecType::Olympic,
other => SpecType::Custom(other.into()),
};
Ok(spec)
}
}
impl SpecType {
pub fn spec(&self) -> Result<Spec, String> {
match *self {
SpecType::Mainnet => Ok(ethereum::new_frontier()),
SpecType::Testnet => Ok(ethereum::new_morden()),
SpecType::Olympic => Ok(ethereum::new_olympic()),
SpecType::Classic => Ok(ethereum::new_classic()),
SpecType::Custom(ref file) => Ok(Spec::load(&try!(contents(file).map_err(|_| "Could not load specification file."))))
}
}
}
#[derive(Debug, PartialEq)]
pub enum Pruning {
Specific(Algorithm),
Auto,
}
impl Default for Pruning {
fn default() -> Self {
Pruning::Auto
}
}
impl FromStr for Pruning {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"auto" => Ok(Pruning::Auto),
other => other.parse().map(Pruning::Specific),
}
}
}
impl Pruning {
pub fn to_algorithm(&self, dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm {
match *self {
Pruning::Specific(algo) => algo,
Pruning::Auto => Self::find_best_db(dirs, genesis_hash, fork_name),
}
}
fn find_best_db(dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm {
let mut algo_types = Algorithm::all_types();
// if all dbs have the same latest era, the last element is the default one
algo_types.push(Algorithm::default());
algo_types.into_iter().max_by_key(|i| {
let client_path = dirs.client_path(genesis_hash, fork_name, *i);
let config = DatabaseConfig::with_columns(client::DB_NO_OF_COLUMNS);
let db = match Database::open(&config, client_path.to_str().unwrap()) {
Ok(db) => db,
Err(_) => return 0,
};
let db = journaldb::new(Arc::new(db), *i, client::DB_COL_STATE);
trace!(target: "parity", "Looking for best DB: {} at {:?}", i, db.latest_era());
db.latest_era().unwrap_or(0)
}).unwrap()
}
}
#[derive(Debug, PartialEq)]
pub struct ResealPolicy {
pub own: bool,
pub external: bool,
}
impl Default for ResealPolicy {
fn default() -> Self {
ResealPolicy {
own: true,
external: true,
}
}
}
impl FromStr for ResealPolicy {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let (own, external) = match s {
"none" => (false, false),
"own" => (true, false),
"ext" => (false, true),
"all" => (true, true),
x => return Err(format!("Invalid reseal value: {}", x)),
};
let reseal = ResealPolicy {
own: own,
external: external,
};
Ok(reseal)
}
}
#[derive(Debug, PartialEq)]
pub struct AccountsConfig {
pub iterations: u32,
pub import_keys: bool,
pub testnet: bool,
pub password_files: Vec<String>,
pub unlocked_accounts: Vec<Address>,
}
impl Default for AccountsConfig {
fn default() -> Self {
AccountsConfig {
iterations: 10240,
import_keys: true,
testnet: false,
password_files: Vec::new(),
unlocked_accounts: Vec::new(),
}
}
}
#[derive(Debug, PartialEq)]
pub enum GasPricerConfig {
Fixed(U256),
Calibrated {
usd_per_tx: f32,
recalibration_period: Duration,
}
}
impl Default for GasPricerConfig {
fn default() -> Self {
GasPricerConfig::Calibrated {
usd_per_tx: 0.005,
recalibration_period: Duration::from_secs(3600),
}
}
}
impl Into<GasPricer> for GasPricerConfig {
fn into(self) -> GasPricer {
match self {
GasPricerConfig::Fixed(u) => GasPricer::Fixed(u),
GasPricerConfig::Calibrated { usd_per_tx, recalibration_period } => {
GasPricer::new_calibrated(GasPriceCalibratorOptions {
usd_per_tx: usd_per_tx,
recalibration_period: recalibration_period,
})
}
}
}
}
#[derive(Debug, PartialEq)]
pub struct MinerExtras {
pub author: Address,
pub extra_data: Vec<u8>,
pub gas_floor_target: U256,
pub gas_ceil_target: U256,
pub transactions_limit: usize,
}
impl Default for MinerExtras {
fn default() -> Self {
MinerExtras {
author: Default::default(),
extra_data: version_data(),
gas_floor_target: U256::from(4_700_000),
gas_ceil_target: U256::from(6_283_184),
transactions_limit: 1024,
}
}
}
#[cfg(test)]
mod tests {
use util::journaldb::Algorithm;
use super::{SpecType, Pruning, ResealPolicy};
#[test]
fn test_spec_type_parsing() {
assert_eq!(SpecType::Mainnet, "frontier".parse().unwrap());
assert_eq!(SpecType::Mainnet, "homestead".parse().unwrap());
assert_eq!(SpecType::Mainnet, "mainnet".parse().unwrap());
assert_eq!(SpecType::Testnet, "testnet".parse().unwrap());
assert_eq!(SpecType::Testnet, "morden".parse().unwrap());
assert_eq!(SpecType::Olympic, "olympic".parse().unwrap());
}
#[test]
fn test_spec_type_default() {
assert_eq!(SpecType::Mainnet, SpecType::default());
}
#[test]
fn test_pruning_parsing() {
assert_eq!(Pruning::Auto, "auto".parse().unwrap());
assert_eq!(Pruning::Specific(Algorithm::Archive), "archive".parse().unwrap());
assert_eq!(Pruning::Specific(Algorithm::EarlyMerge), "light".parse().unwrap());
assert_eq!(Pruning::Specific(Algorithm::OverlayRecent), "fast".parse().unwrap());
assert_eq!(Pruning::Specific(Algorithm::RefCounted), "basic".parse().unwrap());
}
#[test]
fn test_pruning_default() {
assert_eq!(Pruning::Auto, Pruning::default());
}
#[test]
fn test_reseal_policy_parsing() {
let none = ResealPolicy { own: false, external: false };
let own = ResealPolicy { own: true, external: false };
let ext = ResealPolicy { own: false, external: true };
let all = ResealPolicy { own: true, external: true };
assert_eq!(none, "none".parse().unwrap());
assert_eq!(own, "own".parse().unwrap());
assert_eq!(ext, "ext".parse().unwrap());
assert_eq!(all, "all".parse().unwrap());
}
#[test]
fn test_reseal_policy_default() {
let all = ResealPolicy { own: true, external: true };
assert_eq!(all, ResealPolicy::default());
}
}

43
parity/presale.rs Normal file
View File

@ -0,0 +1,43 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethcore::ethstore::{PresaleWallet, EthStore};
use ethcore::ethstore::dir::DiskDirectory;
use ethcore::account_provider::AccountProvider;
use helpers::{password_prompt, password_from_file};
#[derive(Debug, PartialEq)]
pub struct ImportWallet {
pub iterations: u32,
pub path: String,
pub wallet_path: String,
pub password_file: Option<String>,
}
pub fn execute(cmd: ImportWallet) -> Result<String, String> {
let password: String = match cmd.password_file {
Some(file) => try!(password_from_file(file)),
None => try!(password_prompt()),
};
let dir = Box::new(DiskDirectory::create(cmd.path).unwrap());
let secret_store = Box::new(EthStore::open_with_iterations(dir, cmd.iterations).unwrap());
let acc_provider = AccountProvider::new(secret_store);
let wallet = try!(PresaleWallet::open(cmd.wallet_path).map_err(|_| "Unable to open presale wallet."));
let kp = try!(wallet.decrypt(&password).map_err(|_| "Invalid password."));
let address = acc_provider.insert_account(kp.secret().clone(), &password).unwrap();
Ok(format!("{:?}", address))
}

View File

@ -14,40 +14,64 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::fmt;
use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use std::net::SocketAddr; use std::net::SocketAddr;
use util::panics::PanicHandler; use util::panics::PanicHandler;
use die::*; use ethcore_rpc::{RpcServerError, RpcServer as Server};
use jsonipc; use jsonipc;
use rpc_apis; use rpc_apis;
use std::fmt; use rpc_apis::ApiSet;
use helpers::parity_ipc_path;
pub use ethcore_rpc::Server as RpcServer; pub use jsonipc::Server as IpcServer;
use ethcore_rpc::{RpcServerError, RpcServer as Server}; pub use ethcore_rpc::Server as HttpServer;
#[derive(Debug, PartialEq)]
pub struct HttpConfiguration { pub struct HttpConfiguration {
pub enabled: bool, pub enabled: bool,
pub interface: String, pub interface: String,
pub port: u16, pub port: u16,
pub apis: String, pub apis: ApiSet,
pub cors: Option<Vec<String>>, pub cors: Option<Vec<String>>,
pub hosts: Option<Vec<String>>, pub hosts: Option<Vec<String>>,
} }
impl Default for HttpConfiguration {
fn default() -> Self {
HttpConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8545,
apis: ApiSet::UnsafeContext,
cors: None,
hosts: Some(Vec::new()),
}
}
}
#[derive(Debug, PartialEq)]
pub struct IpcConfiguration { pub struct IpcConfiguration {
pub enabled: bool, pub enabled: bool,
pub socket_addr: String, pub socket_addr: String,
pub apis: String, pub apis: ApiSet,
}
impl Default for IpcConfiguration {
fn default() -> Self {
IpcConfiguration {
enabled: true,
socket_addr: parity_ipc_path("$HOME/.parity/jsonrpc.ipc"),
apis: ApiSet::UnsafeContext,
}
}
} }
impl fmt::Display for IpcConfiguration { impl fmt::Display for IpcConfiguration {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.enabled { if self.enabled {
write!(f, "endpoint address [{}], api list [{}]", self.socket_addr, self.apis) write!(f, "endpoint address [{}], api list [{:?}]", self.socket_addr, self.apis)
} } else {
else {
write!(f, "disabled") write!(f, "disabled")
} }
} }
@ -58,22 +82,19 @@ pub struct Dependencies {
pub apis: Arc<rpc_apis::Dependencies>, pub apis: Arc<rpc_apis::Dependencies>,
} }
pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Option<RpcServer> { pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Result<Option<HttpServer>, String> {
if !conf.enabled { if !conf.enabled {
return None; return Ok(None);
} }
let apis = conf.apis.split(',').collect();
let url = format!("{}:{}", conf.interface, conf.port); let url = format!("{}:{}", conf.interface, conf.port);
let addr = SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid JSONRPC listen host/port given.", url)); let addr = try!(url.parse().map_err(|_| format!("Invalid JSONRPC listen host/port given: {}", url)));
Ok(Some(try!(setup_http_rpc_server(deps, &addr, conf.cors, conf.hosts, conf.apis))))
Some(setup_http_rpc_server(deps, &addr, conf.cors, conf.hosts, apis))
} }
fn setup_rpc_server(apis: Vec<&str>, deps: &Dependencies) -> Server { fn setup_rpc_server(apis: ApiSet, deps: &Dependencies) -> Result<Server, String> {
let apis = rpc_apis::from_str(apis);
let server = Server::new(); let server = Server::new();
rpc_apis::setup_rpc(server, deps.apis.clone(), rpc_apis::ApiSet::List(apis)) Ok(rpc_apis::setup_rpc(server, deps.apis.clone(), apis))
} }
pub fn setup_http_rpc_server( pub fn setup_http_rpc_server(
@ -81,29 +102,28 @@ pub fn setup_http_rpc_server(
url: &SocketAddr, url: &SocketAddr,
cors_domains: Option<Vec<String>>, cors_domains: Option<Vec<String>>,
allowed_hosts: Option<Vec<String>>, allowed_hosts: Option<Vec<String>>,
apis: Vec<&str>, apis: ApiSet
) -> RpcServer { ) -> Result<HttpServer, String> {
let server = setup_rpc_server(apis, dependencies); let server = try!(setup_rpc_server(apis, dependencies));
let ph = dependencies.panic_handler.clone(); let ph = dependencies.panic_handler.clone();
let start_result = server.start_http(url, cors_domains, allowed_hosts, ph); let start_result = server.start_http(url, cors_domains, allowed_hosts, ph);
match start_result { match start_result {
Err(RpcServerError::IoError(err)) => die_with_io_error("RPC", err), Err(RpcServerError::IoError(err)) => Err(format!("RPC io error: {}", err)),
Err(e) => die!("RPC: {:?}", e), Err(e) => Err(format!("RPC error: {:?}", e)),
Ok(server) => server, Ok(server) => Ok(server),
} }
} }
pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Option<jsonipc::Server> { pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result<Option<IpcServer>, String> {
if !conf.enabled { return None; } if !conf.enabled { return Ok(None); }
let apis = conf.apis.split(',').collect(); Ok(Some(try!(setup_ipc_rpc_server(deps, &conf.socket_addr, conf.apis))))
Some(setup_ipc_rpc_server(deps, &conf.socket_addr, apis))
} }
pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: Vec<&str>) -> jsonipc::Server { pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: ApiSet) -> Result<IpcServer, String> {
let server = setup_rpc_server(apis, dependencies); let server = try!(setup_rpc_server(apis, dependencies));
match server.start_ipc(addr) { match server.start_ipc(addr) {
Err(jsonipc::Error::Io(io_error)) => die_with_io_error("RPC", io_error), Err(jsonipc::Error::Io(io_error)) => Err(format!("RPC io error: {}", io_error)),
Err(any_error) => die!("RPC: {:?}", any_error), Err(any_error) => Err(format!("Rpc error: {:?}", any_error)),
Ok(server) => server Ok(server) => Ok(server)
} }
} }

View File

@ -15,20 +15,21 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::collections::HashSet;
use std::cmp::PartialEq;
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use util::RotatingLogger;
use ethsync::{ManageNetwork, SyncProvider}; use util::network_settings::NetworkSettings;
use ethcore::miner::{Miner, ExternalMiner}; use ethcore::miner::{Miner, ExternalMiner};
use ethcore::client::Client; use ethcore::client::Client;
use util::RotatingLogger;
use ethcore::account_provider::AccountProvider; use ethcore::account_provider::AccountProvider;
use util::network_settings::NetworkSettings; use ethsync::{ManageNetwork, SyncProvider};
use ethcore_rpc::Extendable;
pub use ethcore_rpc::ConfirmationsQueue; pub use ethcore_rpc::ConfirmationsQueue;
use ethcore_rpc::Extendable;
#[derive(Debug, PartialEq, Clone, Eq, Hash)]
pub enum Api { pub enum Api {
Web3, Web3,
Net, Net,
@ -41,18 +42,8 @@ pub enum Api {
Rpc, Rpc,
} }
pub enum ApiError {
UnknownApi(String)
}
pub enum ApiSet {
SafeContext,
UnsafeContext,
List(Vec<Api>),
}
impl FromStr for Api { impl FromStr for Api {
type Err = ApiError; type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
use self::Api::*; use self::Api::*;
@ -67,11 +58,41 @@ impl FromStr for Api {
"ethcore_set" => Ok(EthcoreSet), "ethcore_set" => Ok(EthcoreSet),
"traces" => Ok(Traces), "traces" => Ok(Traces),
"rpc" => Ok(Rpc), "rpc" => Ok(Rpc),
e => Err(ApiError::UnknownApi(e.into())), api => Err(format!("Unknown api: {}", api))
} }
} }
} }
#[derive(Debug)]
pub enum ApiSet {
SafeContext,
UnsafeContext,
List(HashSet<Api>),
}
impl Default for ApiSet {
fn default() -> Self {
ApiSet::UnsafeContext
}
}
impl PartialEq for ApiSet {
fn eq(&self, other: &Self) -> bool {
self.list_apis() == other.list_apis()
}
}
impl FromStr for ApiSet {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.split(',')
.map(Api::from_str)
.collect::<Result<_, _>>()
.map(ApiSet::List)
}
}
pub struct Dependencies { pub struct Dependencies {
pub signer_port: Option<u16>, pub signer_port: Option<u16>,
pub signer_queue: Arc<ConfirmationsQueue>, pub signer_queue: Arc<ConfirmationsQueue>,
@ -106,31 +127,27 @@ fn to_modules(apis: &[Api]) -> BTreeMap<String, String> {
modules modules
} }
pub fn from_str(apis: Vec<&str>) -> Vec<Api> { impl ApiSet {
apis.into_iter() pub fn list_apis(&self) -> HashSet<Api> {
.map(Api::from_str) match *self {
.collect::<Result<Vec<Api>, ApiError>>() ApiSet::List(ref apis) => apis.clone(),
.unwrap_or_else(|e| match e { ApiSet::UnsafeContext => {
ApiError::UnknownApi(s) => die!("Unknown RPC API specified: {}", s), vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Ethcore, Api::Traces, Api::Rpc]
}) .into_iter().collect()
} },
_ => {
fn list_apis(apis: ApiSet) -> Vec<Api> { vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Signer, Api::Ethcore, Api::EthcoreSet, Api::Traces, Api::Rpc]
match apis { .into_iter().collect()
ApiSet::List(apis) => apis, },
ApiSet::UnsafeContext => { }
vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Ethcore, Api::Traces, Api::Rpc]
},
_ => {
vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Signer, Api::Ethcore, Api::EthcoreSet, Api::Traces, Api::Rpc]
},
} }
} }
pub fn setup_rpc<T: Extendable>(server: T, deps: Arc<Dependencies>, apis: ApiSet) -> T { pub fn setup_rpc<T: Extendable>(server: T, deps: Arc<Dependencies>, apis: ApiSet) -> T {
use ethcore_rpc::v1::*; use ethcore_rpc::v1::*;
let apis = list_apis(apis); // it's turned into vector, cause ont of the cases requires &[]
let apis = apis.list_apis().into_iter().collect::<Vec<_>>();
for api in &apis { for api in &apis {
match *api { match *api {
Api::Web3 => { Api::Web3 => {
@ -140,8 +157,18 @@ pub fn setup_rpc<T: Extendable>(server: T, deps: Arc<Dependencies>, apis: ApiSet
server.add_delegate(NetClient::new(&deps.sync).to_delegate()); server.add_delegate(NetClient::new(&deps.sync).to_delegate());
}, },
Api::Eth => { Api::Eth => {
server.add_delegate(EthClient::new(&deps.client, &deps.sync, &deps.secret_store, &deps.miner, &deps.external_miner, deps.allow_pending_receipt_query).to_delegate()); let client = EthClient::new(
server.add_delegate(EthFilterClient::new(&deps.client, &deps.miner).to_delegate()); &deps.client,
&deps.sync,
&deps.secret_store,
&deps.miner,
&deps.external_miner,
deps.allow_pending_receipt_query
);
server.add_delegate(client.to_delegate());
let filter_client = EthFilterClient::new(&deps.client, &deps.miner);
server.add_delegate(filter_client.to_delegate());
if deps.signer_port.is_some() { if deps.signer_port.is_some() {
server.add_delegate(EthSigningQueueClient::new(&deps.signer_queue, &deps.client, &deps.miner, &deps.secret_store).to_delegate()); server.add_delegate(EthSigningQueueClient::new(&deps.signer_queue, &deps.client, &deps.miner, &deps.secret_store).to_delegate());
@ -173,3 +200,46 @@ pub fn setup_rpc<T: Extendable>(server: T, deps: Arc<Dependencies>, apis: ApiSet
} }
server server
} }
#[cfg(test)]
mod test {
use super::{Api, ApiSet};
#[test]
fn test_api_parsing() {
assert_eq!(Api::Web3, "web3".parse().unwrap());
assert_eq!(Api::Net, "net".parse().unwrap());
assert_eq!(Api::Eth, "eth".parse().unwrap());
assert_eq!(Api::Personal, "personal".parse().unwrap());
assert_eq!(Api::Signer, "signer".parse().unwrap());
assert_eq!(Api::Ethcore, "ethcore".parse().unwrap());
assert_eq!(Api::EthcoreSet, "ethcore_set".parse().unwrap());
assert_eq!(Api::Traces, "traces".parse().unwrap());
assert_eq!(Api::Rpc, "rpc".parse().unwrap());
assert!("rp".parse::<Api>().is_err());
}
#[test]
fn test_api_set_default() {
assert_eq!(ApiSet::UnsafeContext, ApiSet::default());
}
#[test]
fn test_api_set_parsing() {
assert_eq!(ApiSet::List(vec![Api::Web3, Api::Eth].into_iter().collect()), "web3,eth".parse().unwrap());
}
#[test]
fn test_api_set_unsafe_context() {
let expected = vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Ethcore, Api::Traces, Api::Rpc]
.into_iter().collect();
assert_eq!(ApiSet::UnsafeContext.list_apis(), expected);
}
#[test]
fn test_api_set_safe_context() {
let expected = vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Signer, Api::Ethcore, Api::EthcoreSet, Api::Traces, Api::Rpc]
.into_iter().collect();
assert_eq!(ApiSet::SafeContext.list_apis(), expected);
}
}

342
parity/run.rs Normal file
View File

@ -0,0 +1,342 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::{Arc, Mutex, Condvar};
use std::path::Path;
use std::io::ErrorKind;
use ctrlc::CtrlC;
use fdlimit::raise_fd_limit;
use ethcore_logger::{Config as LogConfig, setup_log};
use util::network_settings::NetworkSettings;
use util::{Colour, version, NetworkConfiguration, U256};
use util::panics::{MayPanic, ForwardPanic, PanicHandler};
use ethcore::client::{Mode, Switch, DatabaseCompactionProfile, VMType, ChainNotify};
use ethcore::service::ClientService;
use ethcore::account_provider::AccountProvider;
use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions};
use ethsync::SyncConfig;
use informant::Informant;
#[cfg(feature="ipc")]
use ethcore::client::ChainNotify;
use rpc::{HttpServer, IpcServer, HttpConfiguration, IpcConfiguration};
use signer::SignerServer;
use dapps::WebappServer;
use io_handler::ClientIoHandler;
use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras};
use helpers::{to_client_config, execute_upgrades, passwords_from_files};
use dir::Directories;
use cache::CacheConfig;
use dapps;
use signer;
use modules;
use rpc_apis;
use rpc;
use url;
#[derive(Debug, PartialEq)]
pub struct RunCmd {
pub cache_config: CacheConfig,
pub dirs: Directories,
pub spec: SpecType,
pub pruning: Pruning,
/// Some if execution should be daemonized. Contains pid_file path.
pub daemon: Option<String>,
pub logger_config: LogConfig,
pub miner_options: MinerOptions,
pub http_conf: HttpConfiguration,
pub ipc_conf: IpcConfiguration,
pub net_conf: NetworkConfiguration,
pub network_id: Option<U256>,
pub acc_conf: AccountsConfig,
pub gas_pricer: GasPricerConfig,
pub miner_extras: MinerExtras,
pub mode: Mode,
pub tracing: Switch,
pub compaction: DatabaseCompactionProfile,
pub wal: bool,
pub vm_type: VMType,
pub enable_network: bool,
pub geth_compatibility: bool,
pub signer_port: Option<u16>,
pub net_settings: NetworkSettings,
pub dapps_conf: dapps::Configuration,
pub signer_conf: signer::Configuration,
pub ui: bool,
pub name: String,
pub custom_bootnodes: bool,
}
pub fn execute(cmd: RunCmd) -> Result<(), String> {
// create supervisor
let mut hypervisor = modules::hypervisor();
// increase max number of open files
raise_fd_limit();
// set up logger
let logger = try!(setup_log(&cmd.logger_config));
// set up panic handler
let panic_handler = PanicHandler::new_in_arc();
// create dirs used by parity
try!(cmd.dirs.create_dirs());
// load spec
let spec = try!(cmd.spec.spec());
let fork_name = spec.fork_name.clone();
// load genesis hash
let genesis_hash = spec.genesis_header().hash();
// select pruning algorithm
let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, fork_name.as_ref());
// prepare client_path
let client_path = cmd.dirs.client_path(genesis_hash, fork_name.as_ref(), algorithm);
// execute upgrades
try!(execute_upgrades(&cmd.dirs, genesis_hash, fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile()));
// run in daemon mode
if let Some(pid_file) = cmd.daemon {
try!(daemonize(pid_file));
}
// display info about used pruning algorithm
info!("Starting {}", Colour::White.bold().paint(version()));
info!("Using state DB journalling strategy {}", Colour::White.bold().paint(algorithm.as_str()));
// display warning about using experimental journaldb alorithm
if !algorithm.is_stable() {
warn!("Your chosen strategy is {}! You can re-run with --pruning to change.", Colour::Red.bold().paint("unstable"));
}
// create sync config
let mut sync_config = SyncConfig::default();
sync_config.network_id = match cmd.network_id {
Some(id) => id,
None => spec.network_id(),
};
sync_config.fork_block = spec.fork_block().clone();
// prepare account provider
let account_provider = Arc::new(try!(prepare_account_provider(&cmd.dirs, cmd.acc_conf)));
// create miner
let miner = Miner::new(cmd.miner_options, cmd.gas_pricer.into(), spec, Some(account_provider.clone()));
miner.set_author(cmd.miner_extras.author);
miner.set_gas_floor_target(cmd.miner_extras.gas_floor_target);
miner.set_gas_ceil_target(cmd.miner_extras.gas_ceil_target);
miner.set_extra_data(cmd.miner_extras.extra_data);
miner.set_transactions_limit(cmd.miner_extras.transactions_limit);
// create client config
let client_config = to_client_config(
&cmd.cache_config,
&cmd.dirs,
genesis_hash,
cmd.mode,
cmd.tracing,
cmd.pruning,
cmd.compaction,
cmd.wal,
cmd.vm_type,
cmd.name,
fork_name.as_ref(),
);
// load spec
// TODO: make it clonable and load it only once!
let spec = try!(cmd.spec.spec());
// set up bootnodes
let mut net_conf = cmd.net_conf;
if !cmd.custom_bootnodes {
net_conf.boot_nodes = spec.nodes.clone();
}
// create client
let service = try!(ClientService::start(
client_config,
spec,
Path::new(&client_path),
miner.clone(),
).map_err(|e| format!("Client service error: {:?}", e)));
// forward panics from service
panic_handler.forward_from(&service);
// take handle to client
let client = service.client();
// create external miner
let external_miner = Arc::new(ExternalMiner::default());
// create sync object
let (sync_provider, manage_network, chain_notify) = try!(modules::sync(
&mut hypervisor, sync_config, net_conf.into(), client.clone(), &cmd.logger_config,
).map_err(|e| format!("Sync error: {}", e)));
service.add_notify(chain_notify.clone());
// start network
if cmd.enable_network {
chain_notify.start();
}
// set up dependencies for rpc servers
let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies {
signer_port: cmd.signer_port,
signer_queue: Arc::new(rpc_apis::ConfirmationsQueue::default()),
client: client.clone(),
sync: sync_provider.clone(),
net: manage_network.clone(),
secret_store: account_provider.clone(),
miner: miner.clone(),
external_miner: external_miner.clone(),
logger: logger.clone(),
settings: Arc::new(cmd.net_settings.clone()),
allow_pending_receipt_query: !cmd.geth_compatibility,
net_service: manage_network.clone()
});
let dependencies = rpc::Dependencies {
panic_handler: panic_handler.clone(),
apis: deps_for_rpc_apis.clone(),
};
// start rpc servers
let http_server = try!(rpc::new_http(cmd.http_conf, &dependencies));
let ipc_server = try!(rpc::new_ipc(cmd.ipc_conf, &dependencies));
let dapps_deps = dapps::Dependencies {
panic_handler: panic_handler.clone(),
apis: deps_for_rpc_apis.clone(),
};
// start dapps server
let dapps_server = try!(dapps::new(cmd.dapps_conf.clone(), dapps_deps));
let signer_deps = signer::Dependencies {
panic_handler: panic_handler.clone(),
apis: deps_for_rpc_apis.clone(),
};
// start signer server
let signer_server = try!(signer::start(cmd.signer_conf, signer_deps));
let informant = Arc::new(Informant::new(service.client(), Some(sync_provider.clone()), Some(manage_network.clone()), cmd.logger_config.color));
let info_notify: Arc<ChainNotify> = informant.clone();
service.add_notify(info_notify);
let io_handler = Arc::new(ClientIoHandler {
client: service.client(),
info: informant,
sync: sync_provider.clone(),
net: manage_network.clone(),
accounts: account_provider.clone(),
});
service.register_io_handler(io_handler).expect("Error registering IO handler");
// start ui
if cmd.ui {
if !cmd.dapps_conf.enabled {
return Err("Cannot use UI command with Dapps turned off.".into())
}
url::open(&format!("http://{}:{}/", cmd.dapps_conf.interface, cmd.dapps_conf.port));
}
// Handle exit
wait_for_exit(panic_handler, http_server, ipc_server, dapps_server, signer_server);
Ok(())
}
#[cfg(not(windows))]
fn daemonize(pid_file: String) -> Result<(), String> {
extern crate daemonize;
daemonize::Daemonize::new()
.pid_file(pid_file)
.chown_pid_file(true)
.start()
.map(|_| ())
.map_err(|e| format!("Couldn't daemonize; {}", e))
}
#[cfg(windows)]
fn daemonize(_pid_file: String) -> Result<(), String> {
Err("daemon is no supported on windows".into())
}
fn prepare_account_provider(dirs: &Directories, cfg: AccountsConfig) -> Result<AccountProvider, String> {
use ethcore::ethstore::{import_accounts, EthStore};
use ethcore::ethstore::dir::{GethDirectory, DirectoryType, DiskDirectory};
use ethcore::ethstore::Error;
let passwords = try!(passwords_from_files(cfg.password_files));
if cfg.import_keys {
let t = if cfg.testnet {
DirectoryType::Testnet
} else {
DirectoryType::Main
};
let from = GethDirectory::open(t);
let to = DiskDirectory::create(dirs.keys.clone()).unwrap();
match import_accounts(&from, &to) {
Ok(_) => {}
Err(Error::Io(ref io_err)) if io_err.kind() == ErrorKind::NotFound => {}
Err(err) => warn!("Import geth accounts failed. {}", err)
}
}
let dir = Box::new(DiskDirectory::create(dirs.keys.clone()).unwrap());
let account_service = AccountProvider::new(Box::new(EthStore::open_with_iterations(dir, cfg.iterations).unwrap()));
for a in cfg.unlocked_accounts {
if passwords.iter().find(|p| account_service.unlock_account_permanently(a, (*p).clone()).is_ok()).is_none() {
return Err(format!("No password given to unlock account {}. Pass the password using `--password`.", a));
}
}
Ok(account_service)
}
fn wait_for_exit(
panic_handler: Arc<PanicHandler>,
_http_server: Option<HttpServer>,
_ipc_server: Option<IpcServer>,
_dapps_server: Option<WebappServer>,
_signer_server: Option<SignerServer>
) {
let exit = Arc::new(Condvar::new());
// Handle possible exits
let e = exit.clone();
CtrlC::set_handler(move || { e.notify_all(); });
// Handle panics
let e = exit.clone();
panic_handler.on_panic(move |_reason| { e.notify_all(); });
// Wait for signal
let mutex = Mutex::new(());
let _ = exit.wait(mutex.lock().unwrap());
info!("Finishing work, please wait...");
}

View File

@ -22,28 +22,38 @@ use util::panics::{ForwardPanic, PanicHandler};
use util::path::restrict_permissions_owner; use util::path::restrict_permissions_owner;
use rpc_apis; use rpc_apis;
use ethcore_signer as signer; use ethcore_signer as signer;
use die::*; use helpers::replace_home;
pub use ethcore_signer::Server as SignerServer; pub use ethcore_signer::Server as SignerServer;
const CODES_FILENAME: &'static str = "authcodes"; const CODES_FILENAME: &'static str = "authcodes";
#[derive(Debug, PartialEq)]
pub struct Configuration { pub struct Configuration {
pub enabled: bool, pub enabled: bool,
pub port: u16, pub port: u16,
pub signer_path: String, pub signer_path: String,
} }
impl Default for Configuration {
fn default() -> Self {
Configuration {
enabled: true,
port: 8180,
signer_path: replace_home("$HOME/.parity/signer"),
}
}
}
pub struct Dependencies { pub struct Dependencies {
pub panic_handler: Arc<PanicHandler>, pub panic_handler: Arc<PanicHandler>,
pub apis: Arc<rpc_apis::Dependencies>, pub apis: Arc<rpc_apis::Dependencies>,
} }
pub fn start(conf: Configuration, deps: Dependencies) -> Option<SignerServer> { pub fn start(conf: Configuration, deps: Dependencies) -> Result<Option<SignerServer>, String> {
if !conf.enabled { if !conf.enabled {
None Ok(None)
} else { } else {
Some(do_start(conf, deps)) Ok(Some(try!(do_start(conf, deps))))
} }
} }
@ -54,7 +64,13 @@ fn codes_path(path: String) -> PathBuf {
p p
} }
pub fn new_token(path: String) -> io::Result<String> { pub fn new_token(path: String) -> Result<String, String> {
generate_new_token(path)
.map(|code| format!("This key code will authorise your System Signer UI: {}", Colour::White.bold().paint(code)))
.map_err(|err| format!("Error generating token: {:?}", err))
}
fn generate_new_token(path: String) -> io::Result<String> {
let path = codes_path(path); let path = codes_path(path);
let mut codes = try!(signer::AuthCodes::from_file(&path)); let mut codes = try!(signer::AuthCodes::from_file(&path));
let code = try!(codes.generate_new()); let code = try!(codes.generate_new());
@ -63,10 +79,10 @@ pub fn new_token(path: String) -> io::Result<String> {
Ok(code) Ok(code)
} }
fn do_start(conf: Configuration, deps: Dependencies) -> SignerServer { fn do_start(conf: Configuration, deps: Dependencies) -> Result<SignerServer, String> {
let addr = format!("127.0.0.1:{}", conf.port).parse().unwrap_or_else(|_| { let addr = try!(format!("127.0.0.1:{}", conf.port)
die!("Invalid port specified: {}", conf.port) .parse()
}); .map_err(|_| format!("Invalid port specified: {}", conf.port)));
let start_result = { let start_result = {
let server = signer::ServerBuilder::new( let server = signer::ServerBuilder::new(
@ -78,11 +94,11 @@ fn do_start(conf: Configuration, deps: Dependencies) -> SignerServer {
}; };
match start_result { match start_result {
Err(signer::ServerError::IoError(err)) => die_with_io_error("Trusted Signer", err), Err(signer::ServerError::IoError(err)) => Err(format!("Trusted Signer Error: {}", err)),
Err(e) => die!("Trusted Signer: {:?}", e), Err(e) => Err(format!("Trusted Signer Error: {:?}", e)),
Ok(server) => { Ok(server) => {
deps.panic_handler.forward_from(&server); deps.panic_handler.forward_from(&server);
server Ok(server)
}, },
} }
} }

Some files were not shown because too many files have changed in this diff Show More