diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 000000000..83aca7570 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,166 @@ +stages: + - build + - deploy +variables: + GIT_DEPTH: "3" + SIMPLECOV: "true" + RUST_BACKTRACE: "1" +cache: + key: "$CI_BUILD_NAME/$CI_BUILD_REF_NAME" + untracked: true +linux-beta: + stage: build + image: ethcore/rust:beta + script: + - cargo build --release --verbose + - strip target/release/parity + tags: + - rust + - rust-beta + artifacts: + paths: + - target/release/parity + name: "${CI_BUILD_NAME}_parity" +linux-stable: + stage: build + image: ethcore/rust:stable + script: + - cargo build --release --verbose + - strip target/release/parity + tags: + - rust + - rust-stable + artifacts: + paths: + - target/release/parity + name: "${CI_BUILD_NAME}_parity" +linux-nightly: + stage: build + image: ethcore/rust:nightly + script: + - cargo build --release --verbose + - strip target/release/parity + tags: + - rust + - rust-nightly + artifacts: + paths: + - target/release/parity + name: "${CI_BUILD_NAME}_parity" +linux-centos: + stage: build + image: ethcore/rust-centos:latest + script: + - export CXX="g++" + - export CC="gcc" + - cargo build --release --verbose + - strip target/release/parity + tags: + - rust + - rust-centos + artifacts: + paths: + - target/release/parity + name: "${CI_BUILD_NAME}_parity" +linux-armv7: + stage: build + image: ethcore/rust-arm:latest + script: + - export CXX=arm-linux-gnueabihf-g++ + - export CC=arm-linux-gnueabihf-gcc + - mkdir -p .cargo + - echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config + - echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config + - cat .cargo/config + - cargo build --target armv7-unknown-linux-gnueabihf --release --verbose + - arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity + tags: + - rust + - rust-arm + artifacts: + paths: + - target/armv7-unknown-linux-gnueabihf/release/parity + name: "${CI_BUILD_NAME}_parity" +linux-arm: + stage: build + image: ethcore/rust-arm:latest + script: + - export CXX=arm-linux-gnueabihf-g++ + - export CC=arm-linux-gnueabihf-gcc + - mkdir -p .cargo + - echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config + - echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config + - cat .cargo/config + - cargo build --target arm-unknown-linux-gnueabihf --release --verbose + - arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity + tags: + - rust + - rust-arm + artifacts: + paths: + - target/arm-unknown-linux-gnueabihf/release/parity + name: "${CI_BUILD_NAME}_parity" +linux-armv6: + stage: build + image: ethcore/rust-arm:latest + script: + - export CXX=arm-linux-gnueabi-g++ + - export CC=arm-linux-gnueabi-gcc + - mkdir -p .cargo + - echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config + - echo "linker= \"arm-linux-gnueabi-gcc\"" >> .cargo/config + - cat .cargo/config + - cargo build --target arm-unknown-linux-gnueabi --release --verbose + - arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity + tags: + - rust + - rust-arm + artifacts: + paths: + - target/arm-unknown-linux-gnueabi/release/parity + name: "${CI_BUILD_NAME}_parity" +linux-aarch64: + stage: build + image: ethcore/rust-arm:latest + script: + - export CXX=aarch64-linux-gnu-g++ + - export CC=aarch64-linux-gnu-gcc + - mkdir -p .cargo + - echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config + - echo "linker= \"aarch64-linux-gnu-gcc\"" >> .cargo/config + - cat .cargo/config + - cargo build --target aarch64-unknown-linux-gnu --release --verbose + - aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity + tags: + - rust + - rust-arm + artifacts: + paths: + - target/aarch64-unknown-linux-gnu/release/parity + name: "${CI_BUILD_NAME}_parity" +darwin: + stage: build + script: + - cargo build --release --verbose + tags: + - osx + artifacts: + paths: + - target/release/parity + name: "${CI_BUILD_NAME}_parity" +windows: + stage: build + script: + - set INCLUDE=C:\Program Files (x86)\Microsoft SDKs\Windows\v7.1A\Include;C:\vs2015\VC\include;C:\Program Files (x86)\Windows Kits\10\Include\10.0.10240.0\ucrt + - set LIB=C:\vs2015\VC\lib;C:\Program Files (x86)\Windows Kits\10\Lib\10.0.10240.0\ucrt\x64 + - set RUST_BACKTRACE=1 + - SET + - rustup default stable-x86_64-pc-windows-msvc + - cargo build --release --verbose + tags: + - rust-windows + artifacts: + paths: + - target/release/parity.exe + - target/release/parity.pdb + name: "${CI_BUILD_NAME}_parity" diff --git a/.travis.yml b/.travis.yml index d856086f9..03b4edf9d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,9 +16,11 @@ git: matrix: include: - rust: stable - env: FEATURES="--features travis-beta" RUN_TESTS="true" + env: RUN_TESTS="true" - rust: beta - env: FEATURES="--features travis-beta" RUN_COVERAGE="true" + env: RUN_COVERAGE="true" + - rust: stable + env: RUN_DOCS="true" env: global: @@ -27,6 +29,7 @@ env: - RUST_BACKTRACE="1" - RUN_TESTS="false" - RUN_COVERAGE="false" + - RUN_DOCS="false" # GH_TOKEN for documentation - secure: bumJASbZSU8bxJ0EyPUJmu16AiV9EXOpyOj86Jlq/Ty9CfwGqsSXt96uDyE+OUJf34RUFQMsw0nk37/zC4lcn6kqk2wpuH3N/o85Zo/cVZY/NusBWLQqtT5VbYWsV+u2Ua4Tmmsw8yVYQhYwU2ZOejNpflL+Cs9XGgORp1L+/gMRMC2y5Se6ZhwnKPQlRJ8LGsG1dzjQULxzADIt3/zuspNBS8a2urJwlHfGMkvHDoUWCviP/GXoSqw3TZR7FmKyxE19I8n9+iSvm9+oZZquvcgfUxMHn8Gq/b44UbPvjtFOg2yam4xdWXF/RyWCHdc/R9EHorSABeCbefIsm+zcUF3/YQxwpSxM4IZEeH2rTiC7dcrsKw3XsO16xFQz5YI5Bay+CT/wTdMmJd7DdYz7Dyf+pOvcM9WOf/zorxYWSBOMYy0uzbusU2iyIghQ82s7E/Ahg+WARtPgkuTLSB5aL1oCTBKHqQscMr7lo5Ti6RpWLxEdTQMBznc+bMr+6dEtkEcG9zqc6cE9XX+ox3wTU6+HVMfQ1ltCntJ4UKcw3A6INEbw9wgocQa812CIASQ2fE+SCAbz6JxBjIAlFUnD1lUB7S8PdMPwn9plfQgKQ2A5YZqg6FnBdf0rQXIJYxQWKHXj/rBHSUCT0tHACDlzTA+EwWggvkP5AGIxRxm8jhw= - KCOV_CMD="./kcov-master/tmp/usr/local/bin/kcov" @@ -61,13 +64,13 @@ install: ) script: - - if [ "$RUN_TESTS" = "true" ]; then ./test.sh; fi + - if [ "$RUN_TESTS" = "true" ]; then ./test.sh --verbose; fi - if [ "$RUN_COVERAGE" = "true" ]; then ./scripts/cov.sh "$KCOV_CMD"; fi after_success: | [ $TRAVIS_BRANCH = master ] && [ $TRAVIS_PULL_REQUEST = false ] && - [ $TRAVIS_RUST_VERSION = stable ] && + [ "$RUN_DOCS" = "true" ] && ./scripts/doc.sh && pip install --user ghp-import && /home/travis/.local/bin/ghp-import -n target/doc && diff --git a/Cargo.lock b/Cargo.lock index f837fdd20..3ac8595bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -620,7 +620,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "json-ipc-server" version = "0.2.4" -source = "git+https://github.com/ethcore/json-ipc-server.git#902b031b8f50a59ecb4f389cbec1d264a98556bc" +source = "git+https://github.com/ethcore/json-ipc-server.git#93c2756f669c6a1872dec1ef755a0870f40c03c3" dependencies = [ "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -899,7 +899,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "parity-dapps" version = "0.6.0" -source = "git+https://github.com/ethcore/parity-ui.git#fb88ca259fa8eda6e54d9a04b325abd9eec2818b" +source = "git+https://github.com/ethcore/parity-ui.git#7120546d08d4d9eb648e255c04935002223d362f" dependencies = [ "aster 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", @@ -913,7 +913,7 @@ dependencies = [ [[package]] name = "parity-dapps-home" version = "0.6.0" -source = "git+https://github.com/ethcore/parity-ui.git#fb88ca259fa8eda6e54d9a04b325abd9eec2818b" +source = "git+https://github.com/ethcore/parity-ui.git#7120546d08d4d9eb648e255c04935002223d362f" dependencies = [ "parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)", ] @@ -921,7 +921,7 @@ dependencies = [ [[package]] name = "parity-dapps-signer" version = "0.6.0" -source = "git+https://github.com/ethcore/parity-ui.git#fb88ca259fa8eda6e54d9a04b325abd9eec2818b" +source = "git+https://github.com/ethcore/parity-ui.git#7120546d08d4d9eb648e255c04935002223d362f" dependencies = [ "parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)", ] @@ -929,7 +929,7 @@ dependencies = [ [[package]] name = "parity-dapps-status" version = "0.6.0" -source = "git+https://github.com/ethcore/parity-ui.git#fb88ca259fa8eda6e54d9a04b325abd9eec2818b" +source = "git+https://github.com/ethcore/parity-ui.git#7120546d08d4d9eb648e255c04935002223d362f" dependencies = [ "parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)", ] @@ -937,7 +937,7 @@ dependencies = [ [[package]] name = "parity-dapps-wallet" version = "0.6.0" -source = "git+https://github.com/ethcore/parity-ui.git#fb88ca259fa8eda6e54d9a04b325abd9eec2818b" +source = "git+https://github.com/ethcore/parity-ui.git#7120546d08d4d9eb648e255c04935002223d362f" dependencies = [ "parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)", ] @@ -1094,7 +1094,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "rocksdb" version = "0.4.5" -source = "git+https://github.com/ethcore/rust-rocksdb#dd597245bfcb621c6ffc45478e1fda0b05d2f409" +source = "git+https://github.com/ethcore/rust-rocksdb#eadce7f74cfe92b99ce63a77af425b47857239b8" dependencies = [ "libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", "rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)", @@ -1103,7 +1103,7 @@ dependencies = [ [[package]] name = "rocksdb-sys" version = "0.3.0" -source = "git+https://github.com/ethcore/rust-rocksdb#dd597245bfcb621c6ffc45478e1fda0b05d2f409" +source = "git+https://github.com/ethcore/rust-rocksdb#eadce7f74cfe92b99ce63a77af425b47857239b8" dependencies = [ "gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/Cargo.toml b/Cargo.toml index 6c71a1f6d..bf35a1697 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -57,18 +57,12 @@ ui = ["dapps", "ethcore-signer/ui"] use-precompiled-js = ["ethcore-dapps/use-precompiled-js", "ethcore-signer/use-precompiled-js"] dapps = ["ethcore-dapps"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] -travis-beta = ["ethcore/json-tests"] -travis-nightly = ["ethcore/json-tests", "dev"] ipc = ["ethcore/ipc"] [[bin]] path = "parity/main.rs" name = "parity" -[[bin]] -path = "parity/sync/main.rs" -name = "sync" - [profile.release] debug = true lto = false diff --git a/appveyor.yml b/appveyor.yml index 26f82122f..3ffaa961e 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -31,10 +31,10 @@ install: build: off test_script: - - cargo test --verbose --release --no-default-features + - cargo test --verbose --release after_test: - - cargo build --verbose --release --no-default-features + - cargo build --verbose --release - ps: if($env:cert) { Start-FileDownload $env:cert -FileName $env:keyfile } - ps: if($env:cert) { signtool sign /f $env:keyfile /p $env:certpass target\release\parity.exe } - makensis.exe nsis\installer.nsi diff --git a/ethcore/res/ethereum/classic.json b/ethcore/res/ethereum/classic.json index 39e5f68c5..034c70938 100644 --- a/ethcore/res/ethereum/classic.json +++ b/ethcore/res/ethereum/classic.json @@ -18,7 +18,9 @@ "accountStartNonce": "0x00", "maximumExtraDataSize": "0x20", "minGasLimit": "0x1388", - "networkID" : "0x1" + "networkID" : "0x1", + "forkBlock": "0x1d4c00", + "forkCanonHash": "0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f" }, "genesis": { "seal": { diff --git a/ethcore/res/ethereum/frontier.json b/ethcore/res/ethereum/frontier.json index c887184f7..2f91809fc 100644 --- a/ethcore/res/ethereum/frontier.json +++ b/ethcore/res/ethereum/frontier.json @@ -137,7 +137,9 @@ "accountStartNonce": "0x00", "maximumExtraDataSize": "0x20", "minGasLimit": "0x1388", - "networkID" : "0x1" + "networkID" : "0x1", + "forkBlock": "0x1d4c00", + "forkCanonHash": "0x4985f5ca3d2afbec36529aa96f74de3cc10a2a4a6c44f2157a57d2c6059a11bb" }, "genesis": { "seal": { diff --git a/ethcore/src/account.rs b/ethcore/src/account.rs index ff7bfe70d..c8173ea28 100644 --- a/ethcore/src/account.rs +++ b/ethcore/src/account.rs @@ -61,8 +61,8 @@ impl Account { nonce: pod.nonce, storage_root: SHA3_NULL_RLP, storage_overlay: RefCell::new(pod.storage.into_iter().map(|(k, v)| (k, (Filth::Dirty, v))).collect()), - code_hash: Some(pod.code.sha3()), - code_cache: pod.code, + code_hash: pod.code.as_ref().map(|c| c.sha3()), + code_cache: pod.code.as_ref().map_or_else(|| { warn!("POD account with unknown code is being created! Assuming no code."); vec![] }, |c| c.clone()), filth: Filth::Dirty, } } @@ -288,6 +288,16 @@ mod tests { use super::*; use account_db::*; + #[test] + fn account_compress() { + let raw = Account::new_basic(2.into(), 4.into()).rlp(); + let rlp = UntrustedRlp::new(&raw); + let compact_vec = rlp.compress(RlpType::Snapshot).to_vec(); + assert!(raw.len() > compact_vec.len()); + let again_raw = UntrustedRlp::new(&compact_vec).decompress(RlpType::Snapshot); + assert_eq!(raw, again_raw.to_vec()); + } + #[test] fn storage_at() { let mut db = MemoryDB::new(); diff --git a/ethcore/src/account_provider.rs b/ethcore/src/account_provider.rs index 1738fb82a..6e3b9c94d 100644 --- a/ethcore/src/account_provider.rs +++ b/ethcore/src/account_provider.rs @@ -192,7 +192,7 @@ impl AccountProvider { pub fn accounts_info(&self) -> Result, Error> { let r: HashMap = self.sstore.accounts() .into_iter() - .map(|a| (H160(a.clone().into()), self.account_meta(a).unwrap_or(Default::default()))) + .map(|a| (H160(a.clone().into()), self.account_meta(a).unwrap_or_else(|_| Default::default()))) .collect(); Ok(r) } diff --git a/ethcore/src/action_params.rs b/ethcore/src/action_params.rs index 57100b2c5..1886c3d36 100644 --- a/ethcore/src/action_params.rs +++ b/ethcore/src/action_params.rs @@ -17,6 +17,7 @@ //! Evm input params. use common::*; use ethjson; +use types::executed::CallType; /// Transaction value #[derive(Clone, Debug)] @@ -58,7 +59,10 @@ pub struct ActionParams { /// Code being executed. pub code: Option, /// Input data. - pub data: Option + pub data: Option, + /// Type of call + pub call_type: CallType, + } impl Default for ActionParams { @@ -73,16 +77,18 @@ impl Default for ActionParams { gas_price: U256::zero(), value: ActionValue::Transfer(U256::zero()), code: None, - data: None + data: None, + call_type: CallType::None, } } } impl From for ActionParams { fn from(t: ethjson::vm::Transaction) -> Self { + let address: Address = t.address.into(); ActionParams { code_address: Address::new(), - address: t.address.into(), + address: address, sender: t.sender.into(), origin: t.origin.into(), code: Some(t.code.into()), @@ -90,6 +96,7 @@ impl From for ActionParams { gas: t.gas.into(), gas_price: t.gas_price.into(), value: ActionValue::Transfer(t.value.into()), + call_type: match address.is_zero() { true => CallType::None, false => CallType::Call }, // TODO @debris is this correct? } } } diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 2be475410..db0bac614 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -17,10 +17,10 @@ //! Blockchain block. use common::*; -use engine::*; +use engines::Engine; use state::*; use verification::PreverifiedBlock; -use trace::Trace; +use trace::FlatTrace; use evm::Factory as EvmFactory; /// A block, encoded as it is on the block chain. @@ -76,7 +76,7 @@ pub struct ExecutedBlock { receipts: Vec, transactions_set: HashSet, state: State, - traces: Option>, + traces: Option>>, } /// A set of references to `ExecutedBlock` fields that are publicly accessible. @@ -92,7 +92,7 @@ pub struct BlockRefMut<'a> { /// State. pub state: &'a mut State, /// Traces. - pub traces: &'a Option>, + pub traces: &'a Option>>, } /// A set of immutable references to `ExecutedBlock` fields that are publicly accessible. @@ -108,7 +108,7 @@ pub struct BlockRef<'a> { /// State. pub state: &'a State, /// Traces. - pub traces: &'a Option>, + pub traces: &'a Option>>, } impl ExecutedBlock { @@ -169,7 +169,7 @@ pub trait IsBlock { fn receipts(&self) -> &[Receipt] { &self.block().receipts } /// Get all information concerning transaction tracing in this block. - fn traces(&self) -> &Option> { &self.block().traces } + fn traces(&self) -> &Option>> { &self.block().traces } /// Get all uncles in this block. fn uncles(&self) -> &[Header] { &self.block().base.uncles } @@ -337,9 +337,9 @@ impl<'x> OpenBlock<'x> { self.block.transactions_set.insert(h.unwrap_or_else(||t.hash())); self.block.base.transactions.push(t); let t = outcome.trace; - self.block.traces.as_mut().map(|traces| traces.push(t.expect("self.block.traces.is_some(): so we must be tracing: qed"))); + self.block.traces.as_mut().map(|traces| traces.push(t)); self.block.receipts.push(outcome.receipt); - Ok(&self.block.receipts.last().unwrap()) + Ok(self.block.receipts.last().unwrap()) } Err(x) => Err(From::from(x)) } diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 623fab4a6..6772c0461 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -18,10 +18,11 @@ //! Sorts them ready for blockchain insertion. use std::thread::{JoinHandle, self}; use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; +use std::sync::{Condvar as SCondvar, Mutex as SMutex}; use util::*; use verification::*; use error::*; -use engine::Engine; +use engines::Engine; use views::*; use header::*; use service::*; @@ -36,7 +37,7 @@ const MIN_MEM_LIMIT: usize = 16384; const MIN_QUEUE_LIMIT: usize = 512; /// Block queue configuration -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct BlockQueueConfig { /// Maximum number of blocks to keep in unverified queue. /// When the limit is reached, is_full returns true. @@ -80,12 +81,12 @@ impl BlockQueueInfo { pub struct BlockQueue { panic_handler: Arc, engine: Arc>, - more_to_verify: Arc, + more_to_verify: Arc, verification: Arc, verifiers: Vec>, deleting: Arc, ready_signal: Arc, - empty: Arc, + empty: Arc, processing: RwLock>, max_queue_size: usize, max_mem_use: usize, @@ -133,6 +134,8 @@ struct Verification { verified: Mutex>, verifying: Mutex>, bad: Mutex>, + more_to_verify: SMutex<()>, + empty: SMutex<()>, } impl BlockQueue { @@ -143,15 +146,18 @@ impl BlockQueue { verified: Mutex::new(VecDeque::new()), verifying: Mutex::new(VecDeque::new()), bad: Mutex::new(HashSet::new()), + more_to_verify: SMutex::new(()), + empty: SMutex::new(()), + }); - let more_to_verify = Arc::new(Condvar::new()); + let more_to_verify = Arc::new(SCondvar::new()); let deleting = Arc::new(AtomicBool::new(false)); let ready_signal = Arc::new(QueueSignal { deleting: deleting.clone(), signalled: AtomicBool::new(false), message_channel: message_channel }); - let empty = Arc::new(Condvar::new()); + let empty = Arc::new(SCondvar::new()); let panic_handler = PanicHandler::new_in_arc(); let mut verifiers: Vec> = Vec::new(); @@ -190,17 +196,17 @@ impl BlockQueue { } } - fn verify(verification: Arc, engine: Arc>, wait: Arc, ready: Arc, deleting: Arc, empty: Arc) { + fn verify(verification: Arc, engine: Arc>, wait: Arc, ready: Arc, deleting: Arc, empty: Arc) { while !deleting.load(AtomicOrdering::Acquire) { { - let mut unverified = verification.unverified.lock(); + let mut more_to_verify = verification.more_to_verify.lock().unwrap(); - if unverified.is_empty() && verification.verifying.lock().is_empty() { + if verification.unverified.lock().is_empty() && verification.verifying.lock().is_empty() { empty.notify_all(); } - while unverified.is_empty() && !deleting.load(AtomicOrdering::Acquire) { - wait.wait(&mut unverified); + while verification.unverified.lock().is_empty() && !deleting.load(AtomicOrdering::Acquire) { + more_to_verify = wait.wait(more_to_verify).unwrap(); } if deleting.load(AtomicOrdering::Acquire) { @@ -276,18 +282,18 @@ impl BlockQueue { /// Wait for unverified queue to be empty pub fn flush(&self) { - let mut unverified = self.verification.unverified.lock(); - while !unverified.is_empty() || !self.verification.verifying.lock().is_empty() { - self.empty.wait(&mut unverified); + let mut lock = self.verification.empty.lock().unwrap(); + while !self.verification.unverified.lock().is_empty() || !self.verification.verifying.lock().is_empty() { + lock = self.empty.wait(lock).unwrap(); } } /// Check if the block is currently in the queue pub fn block_status(&self, hash: &H256) -> BlockStatus { - if self.processing.read().contains(&hash) { + if self.processing.read().contains(hash) { return BlockStatus::Queued; } - if self.verification.bad.lock().contains(&hash) { + if self.verification.bad.lock().contains(hash) { return BlockStatus::Bad; } BlockStatus::Unknown @@ -340,7 +346,7 @@ impl BlockQueue { bad.reserve(block_hashes.len()); for hash in block_hashes { bad.insert(hash.clone()); - processing.remove(&hash); + processing.remove(hash); } let mut new_verified = VecDeque::new(); @@ -362,7 +368,7 @@ impl BlockQueue { } let mut processing = self.processing.write(); for hash in block_hashes { - processing.remove(&hash); + processing.remove(hash); } } diff --git a/ethcore/src/blockchain/best_block.rs b/ethcore/src/blockchain/best_block.rs index 00c092713..aa1e1854e 100644 --- a/ethcore/src/blockchain/best_block.rs +++ b/ethcore/src/blockchain/best_block.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use util::bytes::Bytes; use util::numbers::{U256,H256}; use header::BlockNumber; @@ -25,5 +26,7 @@ pub struct BestBlock { /// Best block number. pub number: BlockNumber, /// Best block total difficulty. - pub total_difficulty: U256 + pub total_difficulty: U256, + /// Best block uncompressed bytes + pub block: Bytes, } diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index c26b91766..30bd7a5b1 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -31,6 +31,7 @@ use types::tree_route::TreeRoute; use blockchain::update::ExtrasUpdate; use blockchain::{CacheSize, ImportRoute, Config}; use db::{Writable, Readable, CacheUpdatePolicy}; +use client::{DB_COL_EXTRA, DB_COL_HEADERS, DB_COL_BODIES}; const LOG_BLOOMS_LEVELS: usize = 3; const LOG_BLOOMS_ELEMENTS_PER_INDEX: usize = 16; @@ -58,29 +59,37 @@ pub trait BlockProvider { /// Get the partial-header of a block. fn block_header(&self, hash: &H256) -> Option
{ - self.block(hash).map(|bytes| BlockView::new(&bytes).header()) + self.block_header_data(hash).map(|header| decode(&header)) } + /// Get the header RLP of a block. + fn block_header_data(&self, hash: &H256) -> Option; + + /// Get the block body (uncles and transactions). + fn block_body(&self, hash: &H256) -> Option; + /// Get a list of uncles for a given block. /// Returns None if block does not exist. fn uncles(&self, hash: &H256) -> Option> { - self.block(hash).map(|bytes| BlockView::new(&bytes).uncles()) + self.block_body(hash).map(|bytes| BodyView::new(&bytes).uncles()) } /// Get a list of uncle hashes for a given block. /// Returns None if block does not exist. fn uncle_hashes(&self, hash: &H256) -> Option> { - self.block(hash).map(|bytes| BlockView::new(&bytes).uncle_hashes()) + self.block_body(hash).map(|bytes| BodyView::new(&bytes).uncle_hashes()) } /// Get the number of given block's hash. fn block_number(&self, hash: &H256) -> Option { - self.block(hash).map(|bytes| BlockView::new(&bytes).header_view().number()) + self.block_details(hash).map(|details| details.number) } /// Get transaction with given transaction hash. fn transaction(&self, address: &TransactionAddress) -> Option { - self.block(&address.block_hash).and_then(|bytes| BlockView::new(&bytes).localized_transaction_at(address.index)) + self.block_body(&address.block_hash) + .and_then(|bytes| self.block_number(&address.block_hash) + .and_then(|n| BodyView::new(&bytes).localized_transaction_at(&address.block_hash, n, address.index))) } /// Get transaction receipt. @@ -91,7 +100,9 @@ pub trait BlockProvider { /// Get a list of transactions for a given block. /// Returns None if block does not exist. fn transactions(&self, hash: &H256) -> Option> { - self.block(hash).map(|bytes| BlockView::new(&bytes).localized_transactions()) + self.block_body(hash) + .and_then(|bytes| self.block_number(hash) + .map(|n| BodyView::new(&bytes).localized_transactions(hash, n))) } /// Returns reference to genesis hash. @@ -110,7 +121,8 @@ pub trait BlockProvider { #[derive(Debug, Hash, Eq, PartialEq, Clone)] enum CacheID { - Block(H256), + BlockHeader(H256), + BlockBody(H256), BlockDetails(H256), BlockHashes(BlockNumber), TransactionAddresses(H256), @@ -127,7 +139,7 @@ impl bc::group::BloomGroupDatabase for BlockChain { fn blooms_at(&self, position: &bc::group::GroupPosition) -> Option { let position = LogGroupPosition::from(position.clone()); self.note_used(CacheID::BlocksBlooms(position.clone())); - self.extras_db.read_with_cache(&self.blocks_blooms, &position).map(Into::into) + self.db.read_with_cache(DB_COL_EXTRA, &self.blocks_blooms, &position).map(Into::into) } } @@ -143,7 +155,8 @@ pub struct BlockChain { best_block: RwLock, // block cache - blocks: RwLock>, + block_headers: RwLock>, + block_bodies: RwLock>, // extra caches block_details: RwLock>, @@ -152,39 +165,96 @@ pub struct BlockChain { blocks_blooms: RwLock>, block_receipts: RwLock>, - extras_db: Database, - blocks_db: Database, + db: Arc, cache_man: RwLock, - - insert_lock: Mutex<()> } impl BlockProvider for BlockChain { /// Returns true if the given block is known /// (though not necessarily a part of the canon chain). fn is_known(&self, hash: &H256) -> bool { - self.extras_db.exists_with_cache(&self.block_details, hash) + self.db.exists_with_cache(DB_COL_EXTRA, &self.block_details, hash) } /// Get raw block data fn block(&self, hash: &H256) -> Option { + match (self.block_header_data(hash), self.block_body(hash)) { + (Some(header), Some(body)) => { + let mut block = RlpStream::new_list(3); + let body_rlp = Rlp::new(&body); + block.append_raw(&header, 1); + block.append_raw(body_rlp.at(0).as_raw(), 1); + block.append_raw(body_rlp.at(1).as_raw(), 1); + Some(block.out()) + }, + _ => None, + } + } + + /// Get block header data + fn block_header_data(&self, hash: &H256) -> Option { + // Check cache first { - let read = self.blocks.read(); + let read = self.block_headers.read(); if let Some(v) = read.get(hash) { return Some(v.clone()); } } - let opt = self.blocks_db.get(hash) + // Check if it's the best block + { + let best_block = self.best_block.read(); + if &best_block.hash == hash { + return Some(Rlp::new(&best_block.block).at(0).as_raw().to_vec()); + } + } + + // Read from DB and populate cache + let opt = self.db.get(DB_COL_HEADERS, hash) .expect("Low level database error. Some issue with disk?"); - self.note_used(CacheID::Block(hash.clone())); + self.note_used(CacheID::BlockHeader(hash.clone())); match opt { Some(b) => { - let bytes: Bytes = b.to_vec(); - let mut write = self.blocks.write(); + let bytes: Bytes = UntrustedRlp::new(&b).decompress(RlpType::Blocks).to_vec(); + let mut write = self.block_headers.write(); + write.insert(hash.clone(), bytes.clone()); + Some(bytes) + }, + None => None + } + } + + /// Get block body data + fn block_body(&self, hash: &H256) -> Option { + // Check cache first + { + let read = self.block_bodies.read(); + if let Some(v) = read.get(hash) { + return Some(v.clone()); + } + } + + // Check if it's the best block + { + let best_block = self.best_block.read(); + if &best_block.hash == hash { + return Some(Self::block_to_body(&best_block.block)); + } + } + + // Read from DB and populate cache + let opt = self.db.get(DB_COL_BODIES, hash) + .expect("Low level database error. Some issue with disk?"); + + self.note_used(CacheID::BlockBody(hash.clone())); + + match opt { + Some(b) => { + let bytes: Bytes = UntrustedRlp::new(&b).decompress(RlpType::Blocks).to_vec(); + let mut write = self.block_bodies.write(); write.insert(hash.clone(), bytes.clone()); Some(bytes) }, @@ -195,25 +265,25 @@ impl BlockProvider for BlockChain { /// Get the familial details concerning a block. fn block_details(&self, hash: &H256) -> Option { self.note_used(CacheID::BlockDetails(hash.clone())); - self.extras_db.read_with_cache(&self.block_details, hash) + self.db.read_with_cache(DB_COL_EXTRA, &self.block_details, hash) } /// Get the hash of given block's number. fn block_hash(&self, index: BlockNumber) -> Option { self.note_used(CacheID::BlockHashes(index)); - self.extras_db.read_with_cache(&self.block_hashes, &index) + self.db.read_with_cache(DB_COL_EXTRA, &self.block_hashes, &index) } /// Get the address of transaction with given hash. fn transaction_address(&self, hash: &H256) -> Option { self.note_used(CacheID::TransactionAddresses(hash.clone())); - self.extras_db.read_with_cache(&self.transaction_addresses, hash) + self.db.read_with_cache(DB_COL_EXTRA, &self.transaction_addresses, hash) } /// Get receipts of block with given hash. fn block_receipts(&self, hash: &H256) -> Option { self.note_used(CacheID::BlockReceipts(hash.clone())); - self.extras_db.read_with_cache(&self.block_receipts, hash) + self.db.read_with_cache(DB_COL_EXTRA, &self.block_receipts, hash) } /// Returns numbers of blocks containing given bloom. @@ -249,27 +319,7 @@ impl<'a> Iterator for AncestryIter<'a> { impl BlockChain { /// Create new instance of blockchain from given Genesis - pub fn new(config: Config, genesis: &[u8], path: &Path) -> BlockChain { - // open extras db - let mut extras_path = path.to_path_buf(); - extras_path.push("extras"); - let extras_db = match config.db_cache_size { - None => Database::open_default(extras_path.to_str().unwrap()).unwrap(), - Some(cache_size) => Database::open( - &DatabaseConfig::with_cache(cache_size/2), - extras_path.to_str().unwrap()).unwrap(), - }; - - // open blocks db - let mut blocks_path = path.to_path_buf(); - blocks_path.push("blocks"); - let blocks_db = match config.db_cache_size { - None => Database::open_default(blocks_path.to_str().unwrap()).unwrap(), - Some(cache_size) => Database::open( - &DatabaseConfig::with_cache(cache_size/2), - blocks_path.to_str().unwrap()).unwrap(), - }; - + pub fn new(config: Config, genesis: &[u8], db: Arc) -> BlockChain { let mut cache_man = CacheManager{cache_usage: VecDeque::new(), in_use: HashSet::new()}; (0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new())); @@ -281,39 +331,21 @@ impl BlockChain { elements_per_index: LOG_BLOOMS_ELEMENTS_PER_INDEX, }, best_block: RwLock::new(BestBlock::default()), - blocks: RwLock::new(HashMap::new()), + block_headers: RwLock::new(HashMap::new()), + block_bodies: RwLock::new(HashMap::new()), block_details: RwLock::new(HashMap::new()), block_hashes: RwLock::new(HashMap::new()), transaction_addresses: RwLock::new(HashMap::new()), blocks_blooms: RwLock::new(HashMap::new()), block_receipts: RwLock::new(HashMap::new()), - extras_db: extras_db, - blocks_db: blocks_db, + db: db.clone(), cache_man: RwLock::new(cache_man), - insert_lock: Mutex::new(()), }; // load best block - let best_block_hash = match bc.extras_db.get(b"best").unwrap() { + let best_block_hash = match bc.db.get(DB_COL_EXTRA, b"best").unwrap() { Some(best) => { - let new_best = H256::from_slice(&best); - if !bc.blocks_db.get(&new_best).unwrap().is_some() { - warn!("Best block {} not found", new_best.hex()); - } - /* TODO: enable this once the best block issue is resolved - while !bc.blocks_db.get(&new_best).unwrap().is_some() { - match bc.rewind() { - Some(h) => { - new_best = h; - } - None => { - warn!("Can't rewind blockchain"); - break; - } - } - info!("Restored mismatched best block. Was: {}, new: {}", H256::from_slice(&best).hex(), new_best.hex()); - }*/ - new_best + H256::from_slice(&best) } None => { // best block does not exist @@ -329,23 +361,32 @@ impl BlockChain { children: vec![] }; - bc.blocks_db.put(&hash, genesis).unwrap(); - - let batch = DBTransaction::new(); - batch.write(&hash, &details); - batch.write(&header.number(), &hash); - batch.put(b"best", &hash).unwrap(); - bc.extras_db.write(batch).unwrap(); + let batch = DBTransaction::new(&db); + batch.put(DB_COL_HEADERS, &hash, block.header_rlp().as_raw()).unwrap(); + batch.put(DB_COL_BODIES, &hash, &Self::block_to_body(&genesis)).unwrap(); + batch.write(DB_COL_EXTRA, &hash, &details); + batch.write(DB_COL_EXTRA, &header.number(), &hash); + batch.put(DB_COL_EXTRA, b"best", &hash).unwrap(); + bc.db.write(batch).expect("Low level database error. Some issue with disk?"); hash } }; { + // Fetch best block details + let best_block_number = bc.block_number(&best_block_hash).unwrap(); + let best_block_total_difficulty = bc.block_details(&best_block_hash).unwrap().total_difficulty; + let best_block_rlp = bc.block(&best_block_hash).unwrap(); + + // and write them let mut best_block = bc.best_block.write(); - best_block.number = bc.block_number(&best_block_hash).unwrap(); - best_block.total_difficulty = bc.block_details(&best_block_hash).unwrap().total_difficulty; - best_block.hash = best_block_hash; + *best_block = BestBlock { + number: best_block_number, + total_difficulty: best_block_total_difficulty, + hash: best_block_hash, + block: best_block_rlp, + }; } bc @@ -354,44 +395,52 @@ impl BlockChain { /// Returns true if the given parent block has given child /// (though not necessarily a part of the canon chain). fn is_known_child(&self, parent: &H256, hash: &H256) -> bool { - self.extras_db.read_with_cache(&self.block_details, parent).map_or(false, |d| d.children.contains(hash)) + self.db.read_with_cache(DB_COL_EXTRA, &self.block_details, parent).map_or(false, |d| d.children.contains(hash)) } /// Rewind to a previous block #[cfg(test)] fn rewind(&self) -> Option { use db::Key; - let batch = DBTransaction::new(); + let batch = self.db.transaction(); // track back to the best block we have in the blocks database - if let Some(best_block_hash) = self.extras_db.get(b"best").unwrap() { + if let Some(best_block_hash) = self.db.get(DB_COL_EXTRA, b"best").unwrap() { let best_block_hash = H256::from_slice(&best_block_hash); if best_block_hash == self.genesis_hash() { return None; } - if let Some(extras) = self.extras_db.read(&best_block_hash) as Option { + if let Some(extras) = self.db.read(DB_COL_EXTRA, &best_block_hash) as Option { type DetailsKey = Key; - batch.delete(&(DetailsKey::key(&best_block_hash))).unwrap(); + batch.delete(DB_COL_EXTRA, &(DetailsKey::key(&best_block_hash))).unwrap(); let hash = extras.parent; let range = extras.number as bc::Number .. extras.number as bc::Number; let chain = bc::group::BloomGroupChain::new(self.blooms_config, self); let changes = chain.replace(&range, vec![]); for (k, v) in changes.into_iter() { - batch.write(&LogGroupPosition::from(k), &BloomGroup::from(v)); + batch.write(DB_COL_EXTRA, &LogGroupPosition::from(k), &BloomGroup::from(v)); } - batch.put(b"best", &hash).unwrap(); + batch.put(DB_COL_EXTRA, b"best", &hash).unwrap(); + + let best_block_total_difficulty = self.block_details(&hash).unwrap().total_difficulty; + let best_block_rlp = self.block(&hash).unwrap(); + let mut best_block = self.best_block.write(); - best_block.number = extras.number - 1; - best_block.total_difficulty = self.block_details(&hash).unwrap().total_difficulty; - best_block.hash = hash; + *best_block = BestBlock { + number: extras.number - 1, + total_difficulty: best_block_total_difficulty, + hash: hash, + block: best_block_rlp, + }; // update parent extras - if let Some(mut details) = self.extras_db.read(&hash) as Option { + if let Some(mut details) = self.db.read(DB_COL_EXTRA, &hash) as Option { details.children.clear(); - batch.write(&hash, &details); + batch.write(DB_COL_EXTRA, &hash, &details); } - self.extras_db.write(batch).unwrap(); + self.db.write(batch).expect("Writing to db failed"); self.block_details.write().clear(); self.block_hashes.write().clear(); - self.blocks.write().clear(); + self.block_headers.write().clear(); + self.block_bodies.write().clear(); self.block_receipts.write().clear(); return Some(hash); } @@ -498,7 +547,7 @@ impl BlockChain { /// Inserts the block into backing cache database. /// Expects the block to be valid and already verified. /// If the block is already known, does nothing. - pub fn insert_block(&self, bytes: &[u8], receipts: Vec) -> ImportRoute { + pub fn insert_block(&self, batch: &DBTransaction, bytes: &[u8], receipts: Vec) -> ImportRoute { // create views onto rlp let block = BlockView::new(bytes); let header = block.header_view(); @@ -508,45 +557,99 @@ impl BlockChain { return ImportRoute::none(); } - let _lock = self.insert_lock.lock(); + let block_rlp = UntrustedRlp::new(bytes); + let compressed_header = block_rlp.at(0).unwrap().compress(RlpType::Blocks); + let compressed_body = UntrustedRlp::new(&Self::block_to_body(bytes)).compress(RlpType::Blocks); + // store block in db - self.blocks_db.put(&hash, &bytes).unwrap(); + batch.put(DB_COL_HEADERS, &hash, &compressed_header).unwrap(); + batch.put(DB_COL_BODIES, &hash, &compressed_body).unwrap(); let info = self.block_info(bytes); - self.apply_update(ExtrasUpdate { + if let BlockLocation::BranchBecomingCanonChain(ref d) = info.location { + info!(target: "reorg", "Reorg to {} ({} {} {})", + Colour::Yellow.bold().paint(format!("#{} {}", info.number, info.hash)), + Colour::Red.paint(d.retracted.iter().join(" ")), + Colour::White.paint(format!("#{} {}", self.block_details(&d.ancestor).expect("`ancestor` is in the route; qed").number, d.ancestor)), + Colour::Green.paint(d.enacted.iter().join(" ")) + ); + } + + self.apply_update(batch, ExtrasUpdate { block_hashes: self.prepare_block_hashes_update(bytes, &info), block_details: self.prepare_block_details_update(bytes, &info), block_receipts: self.prepare_block_receipts_update(receipts, &info), transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), blocks_blooms: self.prepare_block_blooms_update(bytes, &info), info: info.clone(), + block: bytes, }); ImportRoute::from(info) } - /// Applies extras update. - fn apply_update(&self, update: ExtrasUpdate) { - let batch = DBTransaction::new(); + /// Get inserted block info which is critical to prepare extras updates. + fn block_info(&self, block_bytes: &[u8]) -> BlockInfo { + let block = BlockView::new(block_bytes); + let header = block.header_view(); + let hash = block.sha3(); + let number = header.number(); + let parent_hash = header.parent_hash(); + let parent_details = self.block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash)); + let total_difficulty = parent_details.total_difficulty + header.difficulty(); + let is_new_best = total_difficulty > self.best_block_total_difficulty(); + BlockInfo { + hash: hash, + number: number, + total_difficulty: total_difficulty, + location: if is_new_best { + // on new best block we need to make sure that all ancestors + // are moved to "canon chain" + // find the route between old best block and the new one + let best_hash = self.best_block_hash(); + let route = self.tree_route(best_hash, parent_hash); + + assert_eq!(number, parent_details.number + 1); + + match route.blocks.len() { + 0 => BlockLocation::CanonChain, + _ => { + let retracted = route.blocks.iter().take(route.index).cloned().collect::>().into_iter().collect::>(); + let enacted = route.blocks.into_iter().skip(route.index).collect::>(); + BlockLocation::BranchBecomingCanonChain(BranchBecomingCanonChainData { + ancestor: route.ancestor, + enacted: enacted, + retracted: retracted, + }) + } + } + } else { + BlockLocation::Branch + } + } + } + + /// Applies extras update. + fn apply_update(&self, batch: &DBTransaction, update: ExtrasUpdate) { { for hash in update.block_details.keys().cloned() { self.note_used(CacheID::BlockDetails(hash)); } let mut write_details = self.block_details.write(); - batch.extend_with_cache(&mut *write_details, update.block_details, CacheUpdatePolicy::Overwrite); + batch.extend_with_cache(DB_COL_EXTRA, &mut *write_details, update.block_details, CacheUpdatePolicy::Overwrite); } { let mut write_receipts = self.block_receipts.write(); - batch.extend_with_cache(&mut *write_receipts, update.block_receipts, CacheUpdatePolicy::Remove); + batch.extend_with_cache(DB_COL_EXTRA, &mut *write_receipts, update.block_receipts, CacheUpdatePolicy::Remove); } { let mut write_blocks_blooms = self.blocks_blooms.write(); - batch.extend_with_cache(&mut *write_blocks_blooms, update.blocks_blooms, CacheUpdatePolicy::Remove); + batch.extend_with_cache(DB_COL_EXTRA, &mut *write_blocks_blooms, update.blocks_blooms, CacheUpdatePolicy::Remove); } // These cached values must be updated last with all three locks taken to avoid @@ -557,11 +660,12 @@ impl BlockChain { match update.info.location { BlockLocation::Branch => (), _ => { - batch.put(b"best", &update.info.hash).unwrap(); + batch.put(DB_COL_EXTRA, b"best", &update.info.hash).unwrap(); *best_block = BestBlock { hash: update.info.hash, number: update.info.number, - total_difficulty: update.info.total_difficulty + total_difficulty: update.info.total_difficulty, + block: update.block.to_vec(), }; } } @@ -569,11 +673,8 @@ impl BlockChain { let mut write_hashes = self.block_hashes.write(); let mut write_txs = self.transaction_addresses.write(); - batch.extend_with_cache(&mut *write_hashes, update.block_hashes, CacheUpdatePolicy::Remove); - batch.extend_with_cache(&mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Remove); - - // update extras database - self.extras_db.write(batch).unwrap(); + batch.extend_with_cache(DB_COL_EXTRA, &mut *write_hashes, update.block_hashes, CacheUpdatePolicy::Remove); + batch.extend_with_cache(DB_COL_EXTRA, &mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Remove); } } @@ -582,7 +683,7 @@ impl BlockChain { if self.is_known(&first) { Some(AncestryIter { current: first, - chain: &self, + chain: self, }) } else { None @@ -613,48 +714,6 @@ impl BlockChain { Some(ret) } - /// Get inserted block info which is critical to prepare extras updates. - fn block_info(&self, block_bytes: &[u8]) -> BlockInfo { - let block = BlockView::new(block_bytes); - let header = block.header_view(); - let hash = block.sha3(); - let number = header.number(); - let parent_hash = header.parent_hash(); - let parent_details = self.block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash)); - let total_difficulty = parent_details.total_difficulty + header.difficulty(); - let is_new_best = total_difficulty > self.best_block_total_difficulty(); - - BlockInfo { - hash: hash, - number: number, - total_difficulty: total_difficulty, - location: if is_new_best { - // on new best block we need to make sure that all ancestors - // are moved to "canon chain" - // find the route between old best block and the new one - let best_hash = self.best_block_hash(); - let route = self.tree_route(best_hash, parent_hash); - - assert_eq!(number, parent_details.number + 1); - - match route.blocks.len() { - 0 => BlockLocation::CanonChain, - _ => { - let retracted = route.blocks.iter().take(route.index).cloned().collect::>(); - - BlockLocation::BranchBecomingCanonChain(BranchBecomingCanonChainData { - ancestor: route.ancestor, - enacted: route.blocks.into_iter().skip(route.index).collect(), - retracted: retracted.into_iter().rev().collect(), - }) - } - } - } else { - BlockLocation::Branch - } - } - } - /// This function returns modified block hashes. fn prepare_block_hashes_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap { let mut block_hashes = HashMap::new(); @@ -668,7 +727,7 @@ impl BlockChain { block_hashes.insert(number, info.hash.clone()); }, BlockLocation::BranchBecomingCanonChain(ref data) => { - let ancestor_number = self.block_number(&data.ancestor).unwrap(); + let ancestor_number = self.block_number(&data.ancestor).expect("Block number of ancestor is always in DB"); let start_number = ancestor_number + 1; for (index, hash) in data.enacted.iter().cloned().enumerate() { @@ -762,8 +821,8 @@ impl BlockChain { let range = start_number as bc::Number..self.best_block_number() as bc::Number; let mut blooms: Vec = data.enacted.iter() - .map(|hash| self.block(hash).unwrap()) - .map(|bytes| BlockView::new(&bytes).header_view().log_bloom()) + .map(|hash| self.block_header_data(hash).unwrap()) + .map(|bytes| HeaderView::new(&bytes).log_bloom()) .map(Bloom::from) .map(Into::into) .collect(); @@ -795,10 +854,16 @@ impl BlockChain { self.best_block.read().total_difficulty } + /// Get best block header + pub fn best_block_header(&self) -> Bytes { + let block = self.best_block.read(); + BlockView::new(&block.block).header_view().rlp().as_raw().to_vec() + } + /// Get current cache size. pub fn cache_size(&self) -> CacheSize { CacheSize { - blocks: self.blocks.read().heap_size_of_children(), + blocks: self.block_headers.read().heap_size_of_children() + self.block_bodies.read().heap_size_of_children(), block_details: self.block_details.read().heap_size_of_children(), transaction_addresses: self.transaction_addresses.read().heap_size_of_children(), blocks_blooms: self.blocks_blooms.read().heap_size_of_children(), @@ -823,11 +888,23 @@ impl BlockChain { /// Ticks our cache system and throws out any old data. pub fn collect_garbage(&self) { - if self.cache_size().total() < self.pref_cache_size.load(AtomicOrder::Relaxed) { return; } + if self.cache_size().total() < self.pref_cache_size.load(AtomicOrder::Relaxed) { + // rotate cache + let mut cache_man = self.cache_man.write(); + const AVERAGE_BYTES_PER_CACHE_ENTRY: usize = 400; //estimated + if cache_man.cache_usage[0].len() > self.pref_cache_size.load(AtomicOrder::Relaxed) / COLLECTION_QUEUE_SIZE / AVERAGE_BYTES_PER_CACHE_ENTRY { + trace!("Cache rotation, cache_size = {}", self.cache_size().total()); + let cache = cache_man.cache_usage.pop_back().unwrap(); + cache_man.cache_usage.push_front(cache); + } + return; + } - for _ in 0..COLLECTION_QUEUE_SIZE { + for i in 0..COLLECTION_QUEUE_SIZE { { - let mut blocks = self.blocks.write(); + trace!("Cache cleanup round started {}, cache_size = {}", i, self.cache_size().total()); + let mut block_headers = self.block_headers.write(); + let mut block_bodies = self.block_bodies.write(); let mut block_details = self.block_details.write(); let mut block_hashes = self.block_hashes.write(); let mut transaction_addresses = self.transaction_addresses.write(); @@ -838,7 +915,8 @@ impl BlockChain { for id in cache_man.cache_usage.pop_back().unwrap().into_iter() { cache_man.in_use.remove(&id); match id { - CacheID::Block(h) => { blocks.remove(&h); }, + CacheID::BlockHeader(h) => { block_headers.remove(&h); }, + CacheID::BlockBody(h) => { block_bodies.remove(&h); }, CacheID::BlockDetails(h) => { block_details.remove(&h); } CacheID::BlockHashes(h) => { block_hashes.remove(&h); } CacheID::TransactionAddresses(h) => { transaction_addresses.remove(&h); } @@ -851,32 +929,74 @@ impl BlockChain { // TODO: handle block_hashes properly. block_hashes.clear(); - blocks.shrink_to_fit(); + block_headers.shrink_to_fit(); + block_bodies.shrink_to_fit(); block_details.shrink_to_fit(); block_hashes.shrink_to_fit(); transaction_addresses.shrink_to_fit(); blocks_blooms.shrink_to_fit(); block_receipts.shrink_to_fit(); } + trace!("Cache cleanup round complete {}, cache_size = {}", i, self.cache_size().total()); if self.cache_size().total() < self.max_cache_size.load(AtomicOrder::Relaxed) { break; } } // TODO: m_lastCollection = chrono::system_clock::now(); } + + /// Create a block body from a block. + pub fn block_to_body(block: &[u8]) -> Bytes { + let mut body = RlpStream::new_list(2); + let block_rlp = Rlp::new(block); + body.append_raw(block_rlp.at(1).as_raw(), 1); + body.append_raw(block_rlp.at(2).as_raw(), 1); + body.out() + } } #[cfg(test)] mod tests { #![cfg_attr(feature="dev", allow(similar_names))] use std::str::FromStr; + use std::sync::Arc; use rustc_serialize::hex::FromHex; + use util::{Database, DatabaseConfig}; use util::hash::*; use util::sha3::Hashable; + use receipt::Receipt; use blockchain::{BlockProvider, BlockChain, Config, ImportRoute}; use tests::helpers::*; use devtools::*; use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer}; use views::BlockView; + use client; + + fn new_db(path: &str) -> Arc { + Arc::new(Database::open(&DatabaseConfig::with_columns(client::DB_NO_OF_COLUMNS), path).unwrap()) + } + + #[test] + fn should_cache_best_block() { + // given + let mut canon_chain = ChainGenerator::default(); + let mut finalizer = BlockFinalizer::default(); + let genesis = canon_chain.generate(&mut finalizer).unwrap(); + let first = canon_chain.generate(&mut finalizer).unwrap(); + + let temp = RandomTempPath::new(); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(Config::default(), &genesis, db.clone()); + assert_eq!(bc.best_block_number(), 0); + + // when + let batch = db.transaction(); + bc.insert_block(&batch, &first, vec![]); + // NOTE no db.write here (we want to check if best block is cached) + + // then + assert_eq!(bc.best_block_number(), 1); + assert!(bc.block(&bc.best_block_hash()).is_some(), "Best block should be queryable even without DB write."); + } #[test] fn basic_blockchain_insert() { @@ -888,16 +1008,18 @@ mod tests { let first_hash = BlockView::new(&first).header_view().sha3(); let temp = RandomTempPath::new(); - let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(Config::default(), &genesis, db.clone()); assert_eq!(bc.genesis_hash(), genesis_hash.clone()); - assert_eq!(bc.best_block_number(), 0); assert_eq!(bc.best_block_hash(), genesis_hash.clone()); assert_eq!(bc.block_hash(0), Some(genesis_hash.clone())); assert_eq!(bc.block_hash(1), None); assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![]); - bc.insert_block(&first, vec![]); + let batch = db.transaction(); + bc.insert_block(&batch, &first, vec![]); + db.write(batch).unwrap(); assert_eq!(bc.block_hash(0), Some(genesis_hash.clone())); assert_eq!(bc.best_block_number(), 1); @@ -916,14 +1038,17 @@ mod tests { let genesis_hash = BlockView::new(&genesis).header_view().sha3(); let temp = RandomTempPath::new(); - let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(Config::default(), &genesis, db.clone()); let mut block_hashes = vec![genesis_hash.clone()]; + let batch = db.transaction(); for _ in 0..10 { let block = canon_chain.generate(&mut finalizer).unwrap(); block_hashes.push(BlockView::new(&block).header_view().sha3()); - bc.insert_block(&block, vec![]); + bc.insert_block(&batch, &block, vec![]); } + db.write(batch).unwrap(); block_hashes.reverse(); @@ -948,17 +1073,21 @@ mod tests { let b5a = canon_chain.generate(&mut finalizer).unwrap(); let temp = RandomTempPath::new(); - let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); - bc.insert_block(&b1a, vec![]); - bc.insert_block(&b1b, vec![]); - bc.insert_block(&b2a, vec![]); - bc.insert_block(&b2b, vec![]); - bc.insert_block(&b3a, vec![]); - bc.insert_block(&b3b, vec![]); - bc.insert_block(&b4a, vec![]); - bc.insert_block(&b4b, vec![]); - bc.insert_block(&b5a, vec![]); - bc.insert_block(&b5b, vec![]); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(Config::default(), &genesis, db.clone()); + + let batch = db.transaction(); + bc.insert_block(&batch, &b1a, vec![]); + bc.insert_block(&batch, &b1b, vec![]); + bc.insert_block(&batch, &b2a, vec![]); + bc.insert_block(&batch, &b2b, vec![]); + bc.insert_block(&batch, &b3a, vec![]); + bc.insert_block(&batch, &b3b, vec![]); + bc.insert_block(&batch, &b4a, vec![]); + bc.insert_block(&batch, &b4b, vec![]); + bc.insert_block(&batch, &b5a, vec![]); + bc.insert_block(&batch, &b5b, vec![]); + db.write(batch).unwrap(); assert_eq!( [&b4b, &b3b, &b2b].iter().map(|b| BlockView::new(b).header()).collect::>(), @@ -989,11 +1118,17 @@ mod tests { let best_block_hash = b3a_hash.clone(); let temp = RandomTempPath::new(); - let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); - let ir1 = bc.insert_block(&b1, vec![]); - let ir2 = bc.insert_block(&b2, vec![]); - let ir3b = bc.insert_block(&b3b, vec![]); - let ir3a = bc.insert_block(&b3a, vec![]); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(Config::default(), &genesis, db.clone()); + + let batch = db.transaction(); + let ir1 = bc.insert_block(&batch, &b1, vec![]); + let ir2 = bc.insert_block(&batch, &b2, vec![]); + let ir3b = bc.insert_block(&batch, &b3b, vec![]); + db.write(batch).unwrap(); + let batch = db.transaction(); + let ir3a = bc.insert_block(&batch, &b3a, vec![]); + db.write(batch).unwrap(); assert_eq!(ir1, ImportRoute { enacted: vec![b1_hash], @@ -1094,14 +1229,19 @@ mod tests { let temp = RandomTempPath::new(); { - let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(Config::default(), &genesis, db.clone()); assert_eq!(bc.best_block_hash(), genesis_hash); - bc.insert_block(&first, vec![]); + let batch = db.transaction(); + bc.insert_block(&batch, &first, vec![]); + db.write(batch).unwrap(); assert_eq!(bc.best_block_hash(), first_hash); } { - let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(Config::default(), &genesis, db.clone()); + assert_eq!(bc.best_block_hash(), first_hash); } } @@ -1154,8 +1294,11 @@ mod tests { let b1_hash = H256::from_str("f53f268d23a71e85c7d6d83a9504298712b84c1a2ba220441c86eeda0bf0b6e3").unwrap(); let temp = RandomTempPath::new(); - let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); - bc.insert_block(&b1, vec![]); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(Config::default(), &genesis, db.clone()); + let batch = db.transaction(); + bc.insert_block(&batch, &b1, vec![]); + db.write(batch).unwrap(); let transactions = bc.transactions(&b1_hash).unwrap(); assert_eq!(transactions.len(), 7); @@ -1164,6 +1307,13 @@ mod tests { } } + fn insert_block(db: &Arc, bc: &BlockChain, bytes: &[u8], receipts: Vec) -> ImportRoute { + let batch = db.transaction(); + let res = bc.insert_block(&batch, bytes, receipts); + db.write(batch).unwrap(); + res + } + #[test] fn test_bloom_filter_simple() { // TODO: From here @@ -1185,27 +1335,28 @@ mod tests { let b2a = canon_chain.with_bloom(bloom_ba.clone()).generate(&mut finalizer).unwrap(); let temp = RandomTempPath::new(); - let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(Config::default(), &genesis, db.clone()); let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); assert_eq!(blocks_b1, vec![]); assert_eq!(blocks_b2, vec![]); - bc.insert_block(&b1, vec![]); + insert_block(&db, &bc, &b1, vec![]); let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); assert_eq!(blocks_b1, vec![1]); assert_eq!(blocks_b2, vec![]); - bc.insert_block(&b2, vec![]); + insert_block(&db, &bc, &b2, vec![]); let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); assert_eq!(blocks_b1, vec![1]); assert_eq!(blocks_b2, vec![2]); // hasn't been forked yet - bc.insert_block(&b1a, vec![]); + insert_block(&db, &bc, &b1a, vec![]); let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5); @@ -1214,7 +1365,7 @@ mod tests { assert_eq!(blocks_ba, vec![]); // fork has happend - bc.insert_block(&b2a, vec![]); + insert_block(&db, &bc, &b2a, vec![]); let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5); @@ -1223,7 +1374,7 @@ mod tests { assert_eq!(blocks_ba, vec![1, 2]); // fork back - bc.insert_block(&b3, vec![]); + insert_block(&db, &bc, &b3, vec![]); let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5); @@ -1241,21 +1392,25 @@ mod tests { let temp = RandomTempPath::new(); { - let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(Config::default(), &genesis, db.clone()); let uncle = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap(); + let batch = db.transaction(); // create a longer fork for _ in 0..5 { let canon_block = canon_chain.generate(&mut finalizer).unwrap(); - bc.insert_block(&canon_block, vec![]); + bc.insert_block(&batch, &canon_block, vec![]); } assert_eq!(bc.best_block_number(), 5); - bc.insert_block(&uncle, vec![]); + bc.insert_block(&batch, &uncle, vec![]); + db.write(batch).unwrap(); } // re-loading the blockchain should load the correct best block. - let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(Config::default(), &genesis, db.clone()); assert_eq!(bc.best_block_number(), 5); } @@ -1271,10 +1426,13 @@ mod tests { let second_hash = BlockView::new(&second).header_view().sha3(); let temp = RandomTempPath::new(); - let bc = BlockChain::new(Config::default(), &genesis, temp.as_path()); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(Config::default(), &genesis, db.clone()); - bc.insert_block(&first, vec![]); - bc.insert_block(&second, vec![]); + let batch = db.transaction(); + bc.insert_block(&batch, &first, vec![]); + bc.insert_block(&batch, &second, vec![]); + db.write(batch).unwrap(); assert_eq!(bc.rewind(), Some(first_hash.clone())); assert!(!bc.is_known(&second_hash)); diff --git a/ethcore/src/blockchain/config.rs b/ethcore/src/blockchain/config.rs index e063d4269..1a0ab9d42 100644 --- a/ethcore/src/blockchain/config.rs +++ b/ethcore/src/blockchain/config.rs @@ -17,7 +17,7 @@ //! Blockchain configuration. /// Blockchain configuration. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct Config { /// Preferred cache size in bytes. pub pref_cache_size: usize, diff --git a/ethcore/src/blockchain/update.rs b/ethcore/src/blockchain/update.rs index 029d0d377..962365338 100644 --- a/ethcore/src/blockchain/update.rs +++ b/ethcore/src/blockchain/update.rs @@ -6,9 +6,11 @@ use blooms::BloomGroup; use super::extras::{BlockDetails, BlockReceipts, TransactionAddress, LogGroupPosition}; /// Block extras update info. -pub struct ExtrasUpdate { +pub struct ExtrasUpdate<'a> { /// Block info. pub info: BlockInfo, + /// Current block uncompressed rlp bytes + pub block: &'a [u8], /// Modified block hashes. pub block_hashes: HashMap, /// Modified block details. diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 0bb387e74..3b4a40d90 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -17,16 +17,16 @@ use std::collections::{HashSet, HashMap, VecDeque}; use std::ops::Deref; use std::sync::{Arc, Weak}; -use std::path::{Path, PathBuf}; +use std::path::{Path}; use std::fmt; use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; use std::time::{Instant}; use time::precise_time_ns; // util -use util::{journaldb, rlp, Bytes, Stream, View, PerfTimer, Itertools, Mutex, RwLock}; +use util::{journaldb, rlp, Bytes, View, PerfTimer, Itertools, Mutex, RwLock}; use util::journaldb::JournalDB; -use util::rlp::{RlpStream, Rlp, UntrustedRlp}; +use util::rlp::{UntrustedRlp}; use util::numbers::*; use util::panics::*; use util::io::*; @@ -34,14 +34,13 @@ use util::sha3::*; use util::kvdb::*; // other -use views::BlockView; -use error::{ImportError, ExecutionError, BlockError, ImportResult}; +use views::{BlockView, HeaderView, BodyView}; +use error::{ImportError, ExecutionError, ReplayError, BlockError, ImportResult}; use header::BlockNumber; use state::State; use spec::Spec; use basic_types::Seal; -use engine::Engine; -use views::HeaderView; +use engines::Engine; use service::ClientIoMessage; use env_info::LastHashes; use verification; @@ -53,8 +52,7 @@ use types::filter::Filter; use log_entry::LocalizedLogEntry; use block_queue::{BlockQueue, BlockQueueInfo}; use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; -use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig, - DatabaseCompactionProfile, BlockChainClient, MiningBlockChainClient, +use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig, BlockChainClient, MiningBlockChainClient, TraceFilter, CallAnalytics, BlockImportError, Mode, ChainNotify}; use client::Error as ClientError; use env_info::EnvInfo; @@ -62,6 +60,7 @@ use executive::{Executive, Executed, TransactOptions, contract_address}; use receipt::LocalizedReceipt; use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase}; use trace; +use trace::FlatTransactionTraces; use evm::Factory as EvmFactory; use miner::{Miner, MinerService}; use util::TrieFactory; @@ -123,6 +122,7 @@ pub struct Client { chain: Arc, tracedb: Arc>, engine: Arc>, + db: Arc, state_db: Mutex>, block_queue: BlockQueue, report: RwLock, @@ -141,26 +141,23 @@ pub struct Client { } const HISTORY: u64 = 1200; -// DO NOT TOUCH THIS ANY MORE UNLESS YOU REALLY KNOW WHAT YOU'RE DOING. -// Altering it will force a blanket DB update for *all* JournalDB-derived -// databases. -// Instead, add/upgrade the version string of the individual JournalDB-derived database -// of which you actually want force an upgrade. -const CLIENT_DB_VER_STR: &'static str = "5.3"; - -/// Get the path for the databases given the root path and information on the databases. -pub fn get_db_path(path: &Path, pruning: journaldb::Algorithm, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf { - let mut dir = path.to_path_buf(); - dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default())); - //TODO: sec/fat: pruned/full versioning - // version here is a bit useless now, since it's controlled only be the pruning algo. - dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, pruning)); - dir -} +// database columns +/// Column for State +pub const DB_COL_STATE: Option = Some(0); +/// Column for Block headers +pub const DB_COL_HEADERS: Option = Some(1); +/// Column for Block bodies +pub const DB_COL_BODIES: Option = Some(2); +/// Column for Extras +pub const DB_COL_EXTRA: Option = Some(3); +/// Column for Traces +pub const DB_COL_TRACE: Option = Some(4); +/// Number of columns in DB +pub const DB_NO_OF_COLUMNS: Option = Some(5); /// Append a path element to the given path and return the string. -pub fn append_path(path: &Path, item: &str) -> String { - let mut p = path.to_path_buf(); +pub fn append_path

(path: P, item: &str) -> String where P: AsRef { + let mut p = path.as_ref().to_path_buf(); p.push(item); p.to_str().unwrap().to_owned() } @@ -174,40 +171,28 @@ impl Client { miner: Arc, message_channel: IoChannel, ) -> Result, ClientError> { - let path = get_db_path(path, config.pruning, spec.genesis_header().hash(), spec.fork_name.as_ref()); + let path = path.to_path_buf(); let gb = spec.genesis_block(); - let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path)); - let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone()))); + let mut db_config = DatabaseConfig::with_columns(DB_NO_OF_COLUMNS); + db_config.cache_size = config.db_cache_size; + db_config.compaction = config.db_compaction.compaction_profile(); + db_config.wal = config.db_wal; - let mut state_db_config = match config.db_cache_size { - None => DatabaseConfig::default(), - Some(cache_size) => DatabaseConfig::with_cache(cache_size), - }; - - if config.db_compaction == DatabaseCompactionProfile::HDD { - state_db_config = state_db_config.compaction(CompactionProfile::hdd()); - } - - let mut state_db = journaldb::new( - &append_path(&path, "state"), - config.pruning, - state_db_config - ); + let db = Arc::new(Database::open(&db_config, &path.to_str().unwrap()).expect("Error opening database")); + let chain = Arc::new(BlockChain::new(config.blockchain, &gb, db.clone())); + let tracedb = Arc::new(try!(TraceDB::new(config.tracing, db.clone(), chain.clone()))); + let mut state_db = journaldb::new(db.clone(), config.pruning, DB_COL_STATE); if state_db.is_empty() && spec.ensure_db_good(state_db.as_hashdb_mut()) { - state_db.commit(0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); + let batch = DBTransaction::new(&db); + state_db.commit(&batch, 0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); + db.write(batch).expect("Error writing genesis state to state DB"); } if !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.contains(h.state_root())) { warn!("State root not found for block #{} ({})", chain.best_block_number(), chain.best_block_hash().hex()); } - /* TODO: enable this once the best block issue is resolved - while !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.contains(h.state_root())) { - warn!("State root not found for block #{} ({}), recovering...", chain.best_block_number(), chain.best_block_hash().hex()); - chain.rewind(); - }*/ - let engine = Arc::new(spec.engine); let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone()); @@ -222,6 +207,7 @@ impl Client { chain: chain, tracedb: tracedb, engine: engine, + db: db, state_db: Mutex::new(state_db), block_queue: block_queue, report: RwLock::new(Default::default()), @@ -297,7 +283,7 @@ impl Client { } // Verify Block Family - let verify_family_result = self.verifier.verify_block_family(&header, &block.bytes, engine, self.chain.deref()); + let verify_family_result = self.verifier.verify_block_family(header, &block.bytes, engine, self.chain.deref()); if let Err(e) = verify_family_result { warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); return Err(()); @@ -315,7 +301,7 @@ impl Client { let last_hashes = self.build_last_hashes(header.parent_hash.clone()); let db = self.state_db.lock().boxed_clone(); - let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, &self.vm_factory, self.trie_factory.clone()); + let enact_result = enact_verified(block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, &self.vm_factory, self.trie_factory.clone()); if let Err(e) = enact_result { warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); return Err(()); @@ -323,7 +309,7 @@ impl Client { // Final Verification let locked_block = enact_result.unwrap(); - if let Err(e) = self.verifier.verify_block_final(&header, locked_block.block().header()) { + if let Err(e) = self.verifier.verify_block_final(header, locked_block.block().header()) { warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); return Err(()); } @@ -360,7 +346,7 @@ impl Client { /// This is triggered by a message coming from a block queue when the block is ready for insertion pub fn import_verified_blocks(&self) -> usize { let max_blocks_to_import = 64; - let (imported_blocks, import_results, invalid_blocks, original_best, imported, duration) = { + let (imported_blocks, import_results, invalid_blocks, imported, duration) = { let mut imported_blocks = Vec::with_capacity(max_blocks_to_import); let mut invalid_blocks = HashSet::new(); let mut import_results = Vec::with_capacity(max_blocks_to_import); @@ -370,8 +356,6 @@ impl Client { let start = precise_time_ns(); let blocks = self.block_queue.drain(max_blocks_to_import); - let original_best = self.chain_info().best_block_hash; - for block in blocks { let header = &block.header; if invalid_blocks.contains(&header.parent_hash) { @@ -405,7 +389,7 @@ impl Client { } } let duration_ns = precise_time_ns() - start; - (imported_blocks, import_results, invalid_blocks, original_best, imported, duration_ns) + (imported_blocks, import_results, invalid_blocks, imported, duration_ns) }; { @@ -429,10 +413,6 @@ impl Client { } } - if self.chain_info().best_block_hash != original_best { - self.miner.update_sealing(self); - } - imported } @@ -449,23 +429,30 @@ impl Client { // Commit results let receipts = block.receipts().to_owned(); - let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new)); + let traces = block.traces().clone().unwrap_or_else(Vec::new); + let traces: Vec = traces.into_iter() + .map(Into::into) + .collect(); + //let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new)); + + let batch = DBTransaction::new(&self.db); // CHECK! I *think* this is fine, even if the state_root is equal to another // already-imported block of the same number. // TODO: Prove it with a test. - block.drain().commit(number, hash, ancient).expect("State DB commit failed."); + block.drain().commit(&batch, number, hash, ancient).expect("State DB commit failed."); - // And update the chain after commit to prevent race conditions - // (when something is in chain but you are not able to fetch details) - let route = self.chain.insert_block(block_data, receipts); - self.tracedb.import(TraceImportRequest { - traces: traces, + let route = self.chain.insert_block(&batch, block_data, receipts); + self.tracedb.import(&batch, TraceImportRequest { + traces: traces.into(), block_hash: hash.clone(), block_number: number, enacted: route.enacted.clone(), retracted: route.retracted.len() }); + // Final commit to the DB + self.db.write(batch).expect("State DB write failed."); + self.update_last_hashes(&parent, hash); route } @@ -484,12 +471,12 @@ impl Client { pub fn import_queued_transactions(&self, transactions: &[Bytes]) -> usize { let _timer = PerfTimer::new("import_queued_transactions"); self.queue_transactions.fetch_sub(transactions.len(), AtomicOrdering::SeqCst); - let txs = transactions.iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect(); + let txs = transactions.iter().filter_map(|bytes| UntrustedRlp::new(bytes).as_val().ok()).collect(); let results = self.miner.import_external_transactions(self, txs); results.len() } - /// Attempt to get a copy of a specific block's state. + /// Attempt to get a copy of a specific block's final state. /// /// This will not fail if given BlockID::Latest. /// Otherwise, this can fail (but may not) if the DB prunes state. @@ -520,6 +507,21 @@ impl Client { }) } + /// Attempt to get a copy of a specific block's beginning state. + /// + /// This will not fail if given BlockID::Latest. + /// Otherwise, this can fail (but may not) if the DB prunes state. + pub fn state_at_beginning(&self, id: BlockID) -> Option { + // fast path for latest state. + match id { + BlockID::Pending => self.state_at(BlockID::Latest), + id => match self.block_number(id) { + None | Some(0) => None, + Some(n) => self.state_at(BlockID::Number(n - 1)), + } + } + } + /// Get a copy of the best block's state. pub fn state(&self) -> State { State::from_existing( @@ -676,6 +678,46 @@ impl BlockChainClient for Client { ret } + fn replay(&self, id: TransactionID, analytics: CallAnalytics) -> Result { + let address = try!(self.transaction_address(id).ok_or(ReplayError::TransactionNotFound)); + let header_data = try!(self.block_header(BlockID::Hash(address.block_hash)).ok_or(ReplayError::StatePruned)); + let body_data = try!(self.block_body(BlockID::Hash(address.block_hash)).ok_or(ReplayError::StatePruned)); + let mut state = try!(self.state_at_beginning(BlockID::Hash(address.block_hash)).ok_or(ReplayError::StatePruned)); + let txs = BodyView::new(&body_data).transactions(); + + if address.index >= txs.len() { + return Err(ReplayError::TransactionNotFound); + } + + let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; + let view = HeaderView::new(&header_data); + let last_hashes = self.build_last_hashes(view.hash()); + let mut env_info = EnvInfo { + number: view.number(), + author: view.author(), + timestamp: view.timestamp(), + difficulty: view.difficulty(), + last_hashes: last_hashes, + gas_used: U256::zero(), + gas_limit: view.gas_limit(), + }; + for t in txs.iter().take(address.index) { + match Executive::new(&mut state, &env_info, self.engine.deref().deref(), &self.vm_factory).transact(t, Default::default()) { + Ok(x) => { env_info.gas_used = env_info.gas_used + x.gas_used; } + Err(ee) => { return Err(ReplayError::Execution(ee)) } + } + } + let t = &txs[address.index]; + let orig = state.clone(); + let mut ret = Executive::new(&mut state, &env_info, self.engine.deref().deref(), &self.vm_factory).transact(t, options); + if analytics.state_diffing { + if let Ok(ref mut x) = ret { + x.state_diff = Some(state.diff_from(orig)); + } + } + ret.map_err(|ee| ReplayError::Execution(ee)) + } + fn keep_alive(&self) { if self.mode != Mode::Active { self.wake_up(); @@ -683,24 +725,20 @@ impl BlockChainClient for Client { } } + fn best_block_header(&self) -> Bytes { + self.chain.best_block_header() + } + fn block_header(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) + Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_header_data(&hash)) } fn block_body(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id).and_then(|hash| { - self.chain.block(&hash).map(|bytes| { - let rlp = Rlp::new(&bytes); - let mut body = RlpStream::new_list(2); - body.append_raw(rlp.at(1).as_raw(), 1); - body.append_raw(rlp.at(2).as_raw(), 1); - body.out() - }) - }) + Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_body(&hash)) } fn block(&self, id: BlockID) -> Option { - if let &BlockID::Pending = &id { + if let BlockID::Pending = id { if let Some(block) = self.miner.pending_block() { return Some(block.rlp_bytes(Seal::Without)); } @@ -719,7 +757,7 @@ impl BlockChainClient for Client { } fn block_total_difficulty(&self, id: BlockID) -> Option { - if let &BlockID::Pending = &id { + if let BlockID::Pending = id { if let Some(block) = self.miner.pending_block() { return Some(*block.header.difficulty() + self.block_total_difficulty(BlockID::Latest).expect("blocks in chain have details; qed")); } @@ -753,13 +791,13 @@ impl BlockChainClient for Client { fn uncle(&self, id: UncleID) -> Option { let index = id.position; - self.block(id.block).and_then(|block| BlockView::new(&block).uncle_rlp_at(index)) + self.block_body(id.block).and_then(|body| BodyView::new(&body).uncle_rlp_at(index)) } fn transaction_receipt(&self, id: TransactionID) -> Option { - self.transaction_address(id).and_then(|address| { - let t = self.chain.block(&address.block_hash) - .and_then(|block| BlockView::new(&block).localized_transaction_at(address.index)); + self.transaction_address(id).and_then(|address| self.chain.block_number(&address.block_hash).and_then(|block_number| { + let t = self.chain.block_body(&address.block_hash) + .and_then(|block| BodyView::new(&block).localized_transaction_at(&address.block_hash, block_number, address.index)); match (t, self.chain.transaction_receipt(&address)) { (Some(tx), Some(receipt)) => { @@ -798,7 +836,7 @@ impl BlockChainClient for Client { }, _ => None } - }) + })) } fn tree_route(&self, from: &H256, to: &H256) -> Option { @@ -874,7 +912,7 @@ impl BlockChainClient for Client { blocks.into_iter() .filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash))) .filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts))) - .filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes()))) + .filter_map(|(number, hash, receipts)| self.chain.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes()))) .flat_map(|(number, hash, receipts, hashes)| { let mut log_index = 0; receipts.into_iter() @@ -1004,8 +1042,6 @@ impl MiningBlockChainClient for Client { let _timer = PerfTimer::new("import_sealed_block"); let start = precise_time_ns(); - let original_best = self.chain_info().best_block_hash; - let h = block.header().hash(); let number = block.header().number(); @@ -1013,26 +1049,19 @@ impl MiningBlockChainClient for Client { let route = self.commit_block(block, &h, &block_data); trace!(target: "client", "Imported sealed block #{} ({})", number, h); - { - let (enacted, retracted) = self.calculate_enacted_retracted(&[route]); - self.miner.chain_new_blocks(self, &[h.clone()], &[], &enacted, &retracted); - - self.notify(|notify| { - notify.new_blocks( - vec![h.clone()], - vec![], - enacted.clone(), - retracted.clone(), - vec![h.clone()], - precise_time_ns() - start, - ); - }); - } - - if self.chain_info().best_block_hash != original_best { - self.miner.update_sealing(self); - } + let (enacted, retracted) = self.calculate_enacted_retracted(&[route]); + self.miner.chain_new_blocks(self, &[h.clone()], &[], &enacted, &retracted); + self.notify(|notify| { + notify.new_blocks( + vec![h.clone()], + vec![], + enacted.clone(), + retracted.clone(), + vec![h.clone()], + precise_time_ns() - start, + ); + }); Ok(h) } } diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index 1010ce656..504ca4de7 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -14,13 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::str::FromStr; pub use std::time::Duration; pub use block_queue::BlockQueueConfig; pub use blockchain::Config as BlockChainConfig; pub use trace::{Config as TraceConfig, Switch}; pub use evm::VMType; pub use verification::VerifierType; -use util::journaldb; +use util::{journaldb, CompactionProfile}; use util::trie::TrieSpec; /// Client state db compaction profile @@ -33,7 +34,31 @@ pub enum DatabaseCompactionProfile { } impl Default for DatabaseCompactionProfile { - fn default() -> Self { DatabaseCompactionProfile::Default } + fn default() -> Self { + DatabaseCompactionProfile::Default + } +} + +impl DatabaseCompactionProfile { + /// Returns corresponding compaction profile. + pub fn compaction_profile(&self) -> CompactionProfile { + match *self { + DatabaseCompactionProfile::Default => Default::default(), + DatabaseCompactionProfile::HDD => CompactionProfile::hdd(), + } + } +} + +impl FromStr for DatabaseCompactionProfile { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "ssd" | "default" => Ok(DatabaseCompactionProfile::Default), + "hdd" => Ok(DatabaseCompactionProfile::HDD), + _ => Err("Invalid compaction profile given. Expected hdd/ssd (default).".into()), + } + } } /// Operating mode for the client. @@ -50,11 +75,13 @@ pub enum Mode { } impl Default for Mode { - fn default() -> Self { Mode::Active } + fn default() -> Self { + Mode::Active + } } /// Client configuration. Includes configs for all sub-systems. -#[derive(Debug, Default)] +#[derive(Debug, PartialEq, Default)] pub struct ClientConfig { /// Block queue configuration. pub queue: BlockQueueConfig, @@ -74,8 +101,32 @@ pub struct ClientConfig { pub db_cache_size: Option, /// State db compaction profile pub db_compaction: DatabaseCompactionProfile, + /// Should db have WAL enabled? + pub db_wal: bool, /// Operating mode pub mode: Mode, /// Type of block verifier used by client. pub verifier_type: VerifierType, } + +#[cfg(test)] +mod test { + use super::{DatabaseCompactionProfile, Mode}; + + #[test] + fn test_default_compaction_profile() { + assert_eq!(DatabaseCompactionProfile::default(), DatabaseCompactionProfile::Default); + } + + #[test] + fn test_parsing_compaction_profile() { + assert_eq!(DatabaseCompactionProfile::Default, "ssd".parse().unwrap()); + assert_eq!(DatabaseCompactionProfile::Default, "default".parse().unwrap()); + assert_eq!(DatabaseCompactionProfile::HDD, "hdd".parse().unwrap()); + } + + #[test] + fn test_mode_default() { + assert_eq!(Mode::default(), Mode::Active); + } +} diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index e0280b388..cdffe4302 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -37,7 +37,7 @@ use spec::Spec; use block_queue::BlockQueueInfo; use block::{OpenBlock, SealedBlock}; use executive::Executed; -use error::ExecutionError; +use error::{ExecutionError, ReplayError}; use trace::LocalizedTrace; /// Test client. @@ -190,7 +190,7 @@ impl TestBlockChainClient { gas_price: U256::one(), nonce: U256::zero() }; - let signed_tx = tx.sign(&keypair.secret()); + let signed_tx = tx.sign(keypair.secret()); txs.append(&signed_tx); txs.out() }, @@ -248,7 +248,8 @@ impl TestBlockChainClient { pub fn get_temp_journal_db() -> GuardedTempResult> { let temp = RandomTempPath::new(); - let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge, DatabaseConfig::default()); + let db = Database::open_default(temp.as_str()).unwrap(); + let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, None); GuardedTempResult { _temp: temp, result: Some(journal_db) @@ -292,6 +293,10 @@ impl BlockChainClient for TestBlockChainClient { Ok(self.execution_result.read().clone().unwrap()) } + fn replay(&self, _id: TransactionID, _analytics: CallAnalytics) -> Result { + Ok(self.execution_result.read().clone().unwrap()) + } + fn block_total_difficulty(&self, _id: BlockID) -> Option { Some(U256::zero()) } @@ -359,6 +364,10 @@ impl BlockChainClient for TestBlockChainClient { unimplemented!(); } + fn best_block_header(&self) -> Bytes { + self.block_header(BlockID::Hash(self.chain_info().best_block_hash)).expect("Best block always have header.") + } + fn block_header(&self, id: BlockID) -> Option { self.block_hash(id).and_then(|hash| self.blocks.read().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec())) } @@ -366,8 +375,8 @@ impl BlockChainClient for TestBlockChainClient { fn block_body(&self, id: BlockID) -> Option { self.block_hash(id).and_then(|hash| self.blocks.read().get(&hash).map(|r| { let mut stream = RlpStream::new_list(2); - stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1); - stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1); + stream.append_raw(Rlp::new(r).at(1).as_raw(), 1); + stream.append_raw(Rlp::new(r).at(2).as_raw(), 1); stream.out() })) } diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index ad4c4a193..348b90c90 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -26,7 +26,7 @@ use transaction::{LocalizedTransaction, SignedTransaction}; use log_entry::LocalizedLogEntry; use filter::Filter; use views::{BlockView}; -use error::{ImportResult, ExecutionError}; +use error::{ImportResult, ExecutionError, ReplayError}; use receipt::LocalizedReceipt; use trace::LocalizedTrace; use evm::Factory as EvmFactory; @@ -145,10 +145,7 @@ pub trait BlockChainClient : Sync + Send { fn chain_info(&self) -> BlockChainInfo; /// Get the best block header. - fn best_block_header(&self) -> Bytes { - // TODO: lock blockchain only once - self.block_header(BlockID::Hash(self.chain_info().best_block_hash)).unwrap() - } + fn best_block_header(&self) -> Bytes; /// Returns numbers of blocks containing given bloom. fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option>; @@ -160,6 +157,9 @@ pub trait BlockChainClient : Sync + Send { // TODO: should be able to accept blockchain location for call. fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result; + /// Replays a given transaction for inspection. + fn replay(&self, t: TransactionID, analytics: CallAnalytics) -> Result; + /// Returns traces matching given filter. fn filter_traces(&self, filter: TraceFilter) -> Option>; diff --git a/ethcore/src/db.rs b/ethcore/src/db.rs index 57b4cfdc6..eab0e2eb5 100644 --- a/ethcore/src/db.rs +++ b/ethcore/src/db.rs @@ -62,14 +62,14 @@ pub trait Key { /// Should be used to write value into database. pub trait Writable { /// Writes the value into the database. - fn write(&self, key: &Key, value: &T) where T: Encodable, R: Deref; + fn write(&self, col: Option, key: &Key, value: &T) where T: Encodable, R: Deref; /// Writes the value into the database and updates the cache. - fn write_with_cache(&self, cache: &mut Cache, key: K, value: T, policy: CacheUpdatePolicy) where + fn write_with_cache(&self, col: Option, cache: &mut Cache, key: K, value: T, policy: CacheUpdatePolicy) where K: Key + Hash + Eq, T: Encodable, R: Deref { - self.write(&key, &value); + self.write(col, &key, &value); match policy { CacheUpdatePolicy::Overwrite => { cache.insert(key, value); @@ -81,20 +81,20 @@ pub trait Writable { } /// Writes the values into the database and updates the cache. - fn extend_with_cache(&self, cache: &mut Cache, values: HashMap, policy: CacheUpdatePolicy) where + fn extend_with_cache(&self, col: Option, cache: &mut Cache, values: HashMap, policy: CacheUpdatePolicy) where K: Key + Hash + Eq, T: Encodable, R: Deref { match policy { CacheUpdatePolicy::Overwrite => { for (key, value) in values.into_iter() { - self.write(&key, &value); + self.write(col, &key, &value); cache.insert(key, value); } }, CacheUpdatePolicy::Remove => { for (key, value) in &values { - self.write(key, value); + self.write(col, key, value); cache.remove(key); } }, @@ -105,12 +105,12 @@ pub trait Writable { /// Should be used to read values from database. pub trait Readable { /// Returns value for given key. - fn read(&self, key: &Key) -> Option where + fn read(&self, col: Option, key: &Key) -> Option where T: Decodable, R: Deref; /// Returns value for given key either in cache or in database. - fn read_with_cache(&self, cache: &RwLock, key: &K) -> Option where + fn read_with_cache(&self, col: Option, cache: &RwLock, key: &K) -> Option where K: Key + Eq + Hash + Clone, T: Clone + Decodable, C: Cache { @@ -121,7 +121,7 @@ pub trait Readable { } } - self.read(key).map(|value: T|{ + self.read(col, key).map(|value: T|{ let mut write = cache.write(); write.insert(key.clone(), value.clone()); value @@ -129,10 +129,10 @@ pub trait Readable { } /// Returns true if given value exists. - fn exists(&self, key: &Key) -> bool where R: Deref; + fn exists(&self, col: Option, key: &Key) -> bool where R: Deref; /// Returns true if given value exists either in cache or in database. - fn exists_with_cache(&self, cache: &RwLock, key: &K) -> bool where + fn exists_with_cache(&self, col: Option, cache: &RwLock, key: &K) -> bool where K: Eq + Hash + Key, R: Deref, C: Cache { @@ -143,13 +143,13 @@ pub trait Readable { } } - self.exists::(key) + self.exists::(col, key) } } impl Writable for DBTransaction { - fn write(&self, key: &Key, value: &T) where T: Encodable, R: Deref { - let result = self.put(&key.key(), &encode(value)); + fn write(&self, col: Option, key: &Key, value: &T) where T: Encodable, R: Deref { + let result = self.put(col, &key.key(), &encode(value)); if let Err(err) = result { panic!("db put failed, key: {:?}, err: {:?}", &key.key() as &[u8], err); } @@ -157,8 +157,8 @@ impl Writable for DBTransaction { } impl Readable for Database { - fn read(&self, key: &Key) -> Option where T: Decodable, R: Deref { - let result = self.get(&key.key()); + fn read(&self, col: Option, key: &Key) -> Option where T: Decodable, R: Deref { + let result = self.get(col, &key.key()); match result { Ok(option) => option.map(|v| decode(&v)), @@ -168,8 +168,8 @@ impl Readable for Database { } } - fn exists(&self, key: &Key) -> bool where R: Deref { - let result = self.get(&key.key()); + fn exists(&self, col: Option, key: &Key) -> bool where R: Deref { + let result = self.get(col, &key.key()); match result { Ok(v) => v.is_some(), diff --git a/ethcore/src/basic_authority.rs b/ethcore/src/engines/basic_authority.rs similarity index 97% rename from ethcore/src/basic_authority.rs rename to ethcore/src/engines/basic_authority.rs index b5b199c82..525b825b9 100644 --- a/ethcore/src/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -19,8 +19,8 @@ use common::*; use account_provider::AccountProvider; use block::*; -use spec::{CommonParams, Spec}; -use engine::*; +use spec::CommonParams; +use engines::Engine; use evm::Schedule; use ethjson; @@ -176,16 +176,16 @@ impl Header { } } -/// Create a new test chain spec with `BasicAuthority` consensus engine. -pub fn new_test_authority() -> Spec { Spec::load(include_bytes!("../res/test_authority.json")) } - #[cfg(test)] mod tests { - use super::*; use common::*; use block::*; use tests::helpers::*; use account_provider::AccountProvider; + use spec::Spec; + + /// Create a new test chain spec with `BasicAuthority` consensus engine. + fn new_test_authority() -> Spec { Spec::load(include_bytes!("../../res/test_authority.json")) } #[test] fn has_valid_metadata() { diff --git a/ethcore/src/engine.rs b/ethcore/src/engines/mod.rs similarity index 97% rename from ethcore/src/engine.rs rename to ethcore/src/engines/mod.rs index e78f84c13..25feed74d 100644 --- a/ethcore/src/engine.rs +++ b/ethcore/src/engines/mod.rs @@ -14,7 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Consensus engine specification +//! Consensus engine specification and basic implementations. + +mod null_engine; +mod basic_authority; + +pub use self::null_engine::NullEngine; +pub use self::basic_authority::BasicAuthority; use common::*; use account_provider::AccountProvider; diff --git a/ethcore/src/null_engine.rs b/ethcore/src/engines/null_engine.rs similarity index 93% rename from ethcore/src/null_engine.rs rename to ethcore/src/engines/null_engine.rs index a760bea93..aebf7d1bf 100644 --- a/ethcore/src/null_engine.rs +++ b/ethcore/src/engines/null_engine.rs @@ -17,12 +17,12 @@ use std::collections::BTreeMap; use util::hash::Address; use builtin::Builtin; -use engine::Engine; +use engines::Engine; use spec::CommonParams; use evm::Schedule; use env_info::EnvInfo; -/// An engine which does not provide any consensus mechanism. +/// An engine which does not provide any consensus mechanism and does not seal blocks. pub struct NullEngine { params: CommonParams, builtins: BTreeMap, diff --git a/ethcore/src/error.rs b/ethcore/src/error.rs index 26a645322..efcbd65e2 100644 --- a/ethcore/src/error.rs +++ b/ethcore/src/error.rs @@ -22,7 +22,7 @@ use basic_types::LogBloom; use client::Error as ClientError; use ipc::binary::{BinaryConvertError, BinaryConvertable}; use types::block_import_error::BlockImportError; -pub use types::executed::ExecutionError; +pub use types::executed::{ExecutionError, ReplayError}; #[derive(Debug, PartialEq, Clone)] /// Errors concerning transaction processing. diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index 802b68e82..2a0f84c23 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -18,7 +18,7 @@ use ethash::{quick_get_difficulty, EthashManager, H256 as EH256}; use common::*; use block::*; use spec::CommonParams; -use engine::*; +use engines::Engine; use evm::Schedule; use ethjson; diff --git a/ethcore/src/evm/ext.rs b/ethcore/src/evm/ext.rs index 0aaa4dac6..ffc1887de 100644 --- a/ethcore/src/evm/ext.rs +++ b/ethcore/src/evm/ext.rs @@ -18,6 +18,7 @@ use util::common::*; use evm::{self, Schedule}; +use types::executed::CallType; use env_info::*; /// Result of externalities create function. @@ -69,13 +70,15 @@ pub trait Ext { /// and true if subcall was successfull. #[cfg_attr(feature="dev", allow(too_many_arguments))] fn call(&mut self, - gas: &U256, - sender_address: &Address, - receive_address: &Address, - value: Option, - data: &[u8], - code_address: &Address, - output: &mut [u8]) -> MessageCallResult; + gas: &U256, + sender_address: &Address, + receive_address: &Address, + value: Option, + data: &[u8], + code_address: &Address, + output: &mut [u8], + call_type: CallType + ) -> MessageCallResult; /// Returns code at given address fn extcode(&self, address: &Address) -> Bytes; diff --git a/ethcore/src/evm/factory.rs b/ethcore/src/evm/factory.rs index 082b9d050..50c384a99 100644 --- a/ethcore/src/evm/factory.rs +++ b/ethcore/src/evm/factory.rs @@ -21,7 +21,7 @@ use std::fmt; use evm::Evm; use util::{U256, Uint}; -#[derive(Debug, Clone)] +#[derive(Debug, PartialEq, Clone)] /// Type of EVM to use. pub enum VMType { /// JIT EVM diff --git a/ethcore/src/evm/interpreter/mod.rs b/ethcore/src/evm/interpreter/mod.rs index a43592c6d..84e416c15 100644 --- a/ethcore/src/evm/interpreter/mod.rs +++ b/ethcore/src/evm/interpreter/mod.rs @@ -38,6 +38,7 @@ use self::memory::Memory; use std::marker::PhantomData; use common::*; +use types::executed::CallType; use super::instructions::{self, Instruction, InstructionInfo}; use evm::{self, MessageCallResult, ContractCreateResult, GasLeft, CostType}; @@ -96,13 +97,13 @@ impl evm::Evm for Interpreter { self.mem.clear(); let code = ¶ms.code.as_ref().unwrap(); - let valid_jump_destinations = self.find_jump_destinations(&code); + let valid_jump_destinations = self.find_jump_destinations(code); let mut gasometer = Gasometer::::new(try!(Cost::from_u256(params.gas))); let mut stack = VecStack::with_capacity(ext.schedule().stack_limit, U256::zero()); let mut reader = CodeReader { position: 0, - code: &code + code: code }; let infos = &*instructions::INSTRUCTIONS; @@ -274,7 +275,7 @@ impl Interpreter { return Ok(InstructionResult::Ok); } - let create_result = ext.create(&gas.as_u256(), &endowment, &contract_code); + let create_result = ext.create(&gas.as_u256(), &endowment, contract_code); return match create_result { ContractCreateResult::Created(address, gas_left) => { stack.push(address_to_u256(address)); @@ -311,16 +312,16 @@ impl Interpreter { }); // Get sender & receive addresses, check if we have balance - let (sender_address, receive_address, has_balance) = match instruction { + let (sender_address, receive_address, has_balance, call_type) = match instruction { instructions::CALL => { let has_balance = ext.balance(¶ms.address) >= value.unwrap(); - (¶ms.address, &code_address, has_balance) + (¶ms.address, &code_address, has_balance, CallType::Call) }, instructions::CALLCODE => { let has_balance = ext.balance(¶ms.address) >= value.unwrap(); - (¶ms.address, ¶ms.address, has_balance) + (¶ms.address, ¶ms.address, has_balance, CallType::CallCode) }, - instructions::DELEGATECALL => (¶ms.sender, ¶ms.address, true), + instructions::DELEGATECALL => (¶ms.sender, ¶ms.address, true, CallType::DelegateCall), _ => panic!(format!("Unexpected instruction {} in CALL branch.", instruction)) }; @@ -335,7 +336,7 @@ impl Interpreter { // and we don't want to copy let input = unsafe { ::std::mem::transmute(self.mem.read_slice(in_off, in_size)) }; let output = self.mem.writeable_slice(out_off, out_size); - ext.call(&call_gas.as_u256(), sender_address, receive_address, value, input, &code_address, output) + ext.call(&call_gas.as_u256(), sender_address, receive_address, value, input, &code_address, output, call_type) }; return match call_result { diff --git a/ethcore/src/evm/tests.rs b/ethcore/src/evm/tests.rs index 573ffd3b4..bdb1f1ddb 100644 --- a/ethcore/src/evm/tests.rs +++ b/ethcore/src/evm/tests.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use common::*; +use types::executed::CallType; use evm::{self, Ext, Schedule, Factory, GasLeft, VMType, ContractCreateResult, MessageCallResult}; use std::fmt::Debug; @@ -36,7 +37,7 @@ pub struct FakeCall { receive_address: Option

, value: Option, data: Bytes, - code_address: Option
+ code_address: Option
, } /// Fake externalities test structure. @@ -119,7 +120,9 @@ impl Ext for FakeExt { value: Option, data: &[u8], code_address: &Address, - _output: &mut [u8]) -> MessageCallResult { + _output: &mut [u8], + _call_type: CallType + ) -> MessageCallResult { self.calls.insert(FakeCall { call_type: FakeCallType::Call, diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index ef6af43b3..9a48f9d4a 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -17,11 +17,12 @@ //! Transaction Execution environment. use common::*; use state::*; -use engine::*; +use engines::Engine; +use types::executed::CallType; use evm::{self, Ext, Factory, Finalize}; use externalities::*; use substate::*; -use trace::{Trace, Tracer, NoopTracer, ExecutiveTracer, VMTrace, VMTracer, ExecutiveVMTracer, NoopVMTracer}; +use trace::{FlatTrace, Tracer, NoopTracer, ExecutiveTracer, VMTrace, VMTracer, ExecutiveVMTracer, NoopVMTracer}; use crossbeam; pub use types::executed::{Executed, ExecutionResult}; @@ -39,6 +40,7 @@ pub fn contract_address(address: &Address, nonce: &U256) -> Address { } /// Transaction execution options. +#[derive(Default)] pub struct TransactOptions { /// Enable call tracing. pub tracing: bool, @@ -173,6 +175,7 @@ impl<'a> Executive<'a> { value: ActionValue::Transfer(t.value), code: Some(t.data.clone()), data: None, + call_type: CallType::None, }; (self.create(params, &mut substate, &mut tracer, &mut vm_tracer), vec![]) }, @@ -187,6 +190,7 @@ impl<'a> Executive<'a> { value: ActionValue::Transfer(t.value), code: self.state.code(address), data: Some(t.data.clone()), + call_type: CallType::Call, }; // TODO: move output upstream let mut out = vec![]; @@ -195,7 +199,7 @@ impl<'a> Executive<'a> { }; // finalize here! - Ok(try!(self.finalize(t, substate, gas_left, output, tracer.traces().pop(), vm_tracer.drain()))) + Ok(try!(self.finalize(t, substate, gas_left, output, tracer.traces(), vm_tracer.drain()))) } fn exec_vm( @@ -248,8 +252,6 @@ impl<'a> Executive<'a> { } trace!("Executive::call(params={:?}) self.env_info={:?}", params, self.info); - let delegate_call = params.code_address != params.address; - if self.engine.is_builtin(¶ms.code_address) { // if destination is builtin, try to execute it @@ -274,9 +276,7 @@ impl<'a> Executive<'a> { trace_info, cost, trace_output, - self.depth, - vec![], - delegate_call + vec![] ); } @@ -285,7 +285,7 @@ impl<'a> Executive<'a> { // just drain the whole gas self.state.revert_snapshot(); - tracer.trace_failed_call(trace_info, self.depth, vec![], delegate_call); + tracer.trace_failed_call(trace_info, vec![]); Err(evm::Error::OutOfGas) } @@ -317,11 +317,9 @@ impl<'a> Executive<'a> { trace_info, gas - gas_left, trace_output, - self.depth, - traces, - delegate_call + traces ), - _ => tracer.trace_failed_call(trace_info, self.depth, traces, delegate_call), + _ => tracer.trace_failed_call(trace_info, traces), }; trace!(target: "executive", "substate={:?}; unconfirmed_substate={:?}\n", substate, unconfirmed_substate); @@ -333,7 +331,7 @@ impl<'a> Executive<'a> { // otherwise it's just a basic transaction, only do tracing, if necessary. self.state.clear_snapshot(); - tracer.trace_call(trace_info, U256::zero(), trace_output, self.depth, vec![], delegate_call); + tracer.trace_call(trace_info, U256::zero(), trace_output, vec![]); Ok(params.gas) } } @@ -370,7 +368,7 @@ impl<'a> Executive<'a> { let gas = params.gas; let created = params.address.clone(); - let mut subvmtracer = vm_tracer.prepare_subtrace(¶ms.code.as_ref().expect("two ways into create (Externalities::create and Executive::transact_with_tracer); both place `Some(...)` `code` in `params`; qed")); + let mut subvmtracer = vm_tracer.prepare_subtrace(params.code.as_ref().expect("two ways into create (Externalities::create and Executive::transact_with_tracer); both place `Some(...)` `code` in `params`; qed")); let res = { self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::InitContract(trace_output.as_mut()), &mut subtracer, &mut subvmtracer) @@ -384,10 +382,9 @@ impl<'a> Executive<'a> { gas - gas_left, trace_output, created, - self.depth, subtracer.traces() ), - _ => tracer.trace_failed_create(trace_info, self.depth, subtracer.traces()) + _ => tracer.trace_failed_create(trace_info, subtracer.traces()) }; self.enact_result(&res, substate, unconfirmed_substate); @@ -401,7 +398,7 @@ impl<'a> Executive<'a> { substate: Substate, result: evm::Result, output: Bytes, - trace: Option, + trace: Vec, vm_trace: Option ) -> ExecutionResult { let schedule = self.engine.schedule(self.info); @@ -493,8 +490,9 @@ mod tests { use substate::*; use tests::helpers::*; use trace::trace; - use trace::{Trace, Tracer, NoopTracer, ExecutiveTracer}; + use trace::{FlatTrace, Tracer, NoopTracer, ExecutiveTracer}; use trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff, VMTracer, NoopVMTracer, ExecutiveVMTracer}; + use types::executed::CallType; #[test] fn test_contract_address() { @@ -628,6 +626,7 @@ mod tests { params.gas = U256::from(100_000); params.code = Some(code.clone()); params.value = ActionValue::Transfer(U256::from(100)); + params.call_type = CallType::Call; let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); state.add_balance(&sender, &U256::from(100)); @@ -645,35 +644,37 @@ mod tests { assert_eq!(gas_left, U256::from(44_752)); - let expected_trace = vec![ Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + subtraces: 1, action: trace::Action::Call(trace::Call { from: "cd1722f3947def4cf144679da39c4c32bdc35681".into(), to: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(), value: 100.into(), gas: 100000.into(), input: vec![], + call_type: CallType::Call, }), result: trace::Res::Call(trace::CallResult { gas_used: U256::from(55_248), output: vec![], }), - subs: vec![Trace { - depth: 1, - action: trace::Action::Create(trace::Create { - from: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(), - value: 23.into(), - gas: 67979.into(), - init: vec![96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53, 85] - }), - result: trace::Res::Create(trace::CreateResult { - gas_used: U256::from(3224), - address: Address::from_str("c6d80f262ae5e0f164e5fde365044d7ada2bfa34").unwrap(), - code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53] - }), - subs: vec![] - }] + }, FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Create(trace::Create { + from: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(), + value: 23.into(), + gas: 67979.into(), + init: vec![96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53, 85] + }), + result: trace::Res::Create(trace::CreateResult { + gas_used: U256::from(3224), + address: Address::from_str("c6d80f262ae5e0f164e5fde365044d7ada2bfa34").unwrap(), + code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53] + }), }]; + assert_eq!(tracer.traces(), expected_trace); let expected_vm_trace = VMTrace { @@ -751,8 +752,9 @@ mod tests { assert_eq!(gas_left, U256::from(96_776)); - let expected_trace = vec![Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + subtraces: 0, action: trace::Action::Create(trace::Create { from: params.sender, value: 100.into(), @@ -764,8 +766,8 @@ mod tests { address: params.address, code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53] }), - subs: vec![] }]; + assert_eq!(tracer.traces(), expected_trace); let expected_vm_trace = VMTrace { @@ -1009,7 +1011,7 @@ mod tests { gas: U256::from(100_000), gas_price: U256::zero(), nonce: U256::zero() - }.sign(&keypair.secret()); + }.sign(keypair.secret()); let sender = t.sender().unwrap(); let contract = contract_address(&sender, &U256::zero()); @@ -1076,7 +1078,7 @@ mod tests { gas: U256::from(100_000), gas_price: U256::zero(), nonce: U256::one() - }.sign(&keypair.secret()); + }.sign(keypair.secret()); let sender = t.sender().unwrap(); let mut state_result = get_temp_state(); @@ -1109,7 +1111,7 @@ mod tests { gas: U256::from(80_001), gas_price: U256::zero(), nonce: U256::zero() - }.sign(&keypair.secret()); + }.sign(keypair.secret()); let sender = t.sender().unwrap(); let mut state_result = get_temp_state(); @@ -1144,7 +1146,7 @@ mod tests { gas: U256::from(100_000), gas_price: U256::one(), nonce: U256::zero() - }.sign(&keypair.secret()); + }.sign(keypair.secret()); let sender = t.sender().unwrap(); let mut state_result = get_temp_state(); diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index 6dea52037..25fed0176 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -17,10 +17,11 @@ //! Transaction Execution environment. use common::*; use state::*; -use engine::*; +use engines::Engine; use executive::*; use evm::{self, Schedule, Ext, ContractCreateResult, MessageCallResult, Factory}; use substate::*; +use types::executed::CallType; use trace::{Tracer, VMTracer}; /// Policy for handling output data on `RETURN` opcode. @@ -148,6 +149,7 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT value: ActionValue::Transfer(*value), code: Some(code.to_vec()), data: None, + call_type: CallType::None, }; self.state.inc_nonce(&self.origin_info.address); @@ -170,7 +172,8 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT value: Option, data: &[u8], code_address: &Address, - output: &mut [u8] + output: &mut [u8], + call_type: CallType ) -> MessageCallResult { trace!(target: "externalities", "call"); @@ -184,6 +187,7 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT gas_price: self.origin_info.gas_price, code: self.state.code(code_address), data: Some(data.to_vec()), + call_type: call_type, }; if let Some(value) = value { @@ -263,7 +267,7 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT self.state.transfer_balance(&address, refund_address, &balance); } - self.tracer.trace_suicide(address, balance, refund_address.clone(), self.depth + 1); + self.tracer.trace_suicide(address, balance, refund_address.clone()); self.substate.suicides.insert(address); } @@ -272,7 +276,7 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT } fn env_info(&self) -> &EnvInfo { - &self.env_info + self.env_info } fn depth(&self) -> usize { @@ -296,13 +300,14 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT mod tests { use common::*; use state::*; - use engine::*; + use engines::Engine; use evm::{Ext}; use substate::*; use tests::helpers::*; use devtools::GuardedTempResult; use super::*; use trace::{NoopTracer, NoopVMTracer}; + use types::executed::CallType; fn get_test_origin() -> OriginInfo { OriginInfo { @@ -421,7 +426,9 @@ mod tests { Some(U256::from_str("0000000000000000000000000000000000000000000000000000000000150000").unwrap()), &[], &Address::new(), - &mut output); + &mut output, + CallType::Call + ); } #[test] @@ -455,7 +462,7 @@ mod tests { { let vm_factory = Default::default(); let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer); - ext.suicide(&refund_account); + ext.suicide(refund_account); } assert_eq!(setup.sub_state.suicides.len(), 1); diff --git a/ethcore/src/header.rs b/ethcore/src/header.rs index d5272ce2e..43b4537c4 100644 --- a/ethcore/src/header.rs +++ b/ethcore/src/header.rs @@ -234,7 +234,7 @@ impl Header { s.append(&self.extra_data); if let Seal::With = with_seal { for b in &self.seal { - s.append_raw(&b, 1); + s.append_raw(b, 1); } } } diff --git a/ethcore/src/json_tests/executive.rs b/ethcore/src/json_tests/executive.rs index 28dde2a38..adba16703 100644 --- a/ethcore/src/json_tests/executive.rs +++ b/ethcore/src/json_tests/executive.rs @@ -17,11 +17,12 @@ use super::test_common::*; use state::*; use executive::*; -use engine::*; +use engines::Engine; use evm; use evm::{Schedule, Ext, Factory, Finalize, VMType, ContractCreateResult, MessageCallResult}; use externalities::*; use substate::*; +use types::executed::CallType; use tests::helpers::*; use ethjson; use trace::{Tracer, NoopTracer}; @@ -37,7 +38,7 @@ struct CallCreate { impl From for CallCreate { fn from(c: ethjson::vm::Call) -> Self { - let dst: Option<_> = c.destination.into(); + let dst: Option = c.destination.into(); CallCreate { data: c.data.into(), destination: dst.map(Into::into), @@ -109,13 +110,15 @@ impl<'a, T, V> Ext for TestExt<'a, T, V> where T: Tracer, V: VMTracer { } fn call(&mut self, - gas: &U256, - _sender_address: &Address, - receive_address: &Address, - value: Option, - data: &[u8], - _code_address: &Address, - _output: &mut [u8]) -> MessageCallResult { + gas: &U256, + _sender_address: &Address, + receive_address: &Address, + value: Option, + data: &[u8], + _code_address: &Address, + _output: &mut [u8], + _call_type: CallType + ) -> MessageCallResult { self.callcreates.push(CallCreate { data: data.to_vec(), destination: Some(receive_address.clone()), diff --git a/ethcore/src/json_tests/transaction.rs b/ethcore/src/json_tests/transaction.rs index 673ff8650..7c9a3327e 100644 --- a/ethcore/src/json_tests/transaction.rs +++ b/ethcore/src/json_tests/transaction.rs @@ -49,7 +49,7 @@ fn do_json_test(json_data: &[u8]) -> Vec { fail_unless(t.gas_price == tx.gas_price.into()); fail_unless(t.nonce == tx.nonce.into()); fail_unless(t.value == tx.value.into()); - let to: Option<_> = tx.to.into(); + let to: Option = tx.to.into(); let to: Option
= to.map(Into::into); match t.action { Action::Call(dest) => fail_unless(Some(dest) == to), diff --git a/ethcore/src/json_tests/trie.rs b/ethcore/src/json_tests/trie.rs index 2d23ff7d2..e62fd01b3 100644 --- a/ethcore/src/json_tests/trie.rs +++ b/ethcore/src/json_tests/trie.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . use ethjson; -use util::{H256, MemoryDB, TrieMut, TrieSpec, TrieFactory}; +use util::{H256, MemoryDB, TrieSpec, TrieFactory}; fn test_trie(json: &[u8], trie: TrieSpec) -> Vec { let tests = ethjson::trie::Test::load(json).unwrap(); diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index b8de63adb..3c6f319a1 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -15,23 +15,20 @@ // along with Parity. If not, see . #![warn(missing_docs)] +#![cfg_attr(feature="benches", feature(test))] #![cfg_attr(feature="dev", feature(plugin))] #![cfg_attr(feature="dev", plugin(clippy))] -// Clippy config -// TODO [todr] not really sure +// Clippy settings +// Most of the time much more readable #![cfg_attr(feature="dev", allow(needless_range_loop))] // Shorter than if-else #![cfg_attr(feature="dev", allow(match_bool))] -// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. +// Keeps consistency (all lines with `.clone()`). #![cfg_attr(feature="dev", allow(clone_on_copy))] -// In most cases it expresses function flow better -#![cfg_attr(feature="dev", allow(if_not_else))] // TODO [todr] a lot of warnings to be fixed -#![cfg_attr(feature="dev", allow(needless_borrow))] #![cfg_attr(feature="dev", allow(assign_op_pattern))] -#![cfg_attr(feature="benches", feature(test))] //! Ethcore library //! @@ -102,7 +99,7 @@ extern crate ethcore_devtools as devtools; #[cfg(feature = "jit" )] extern crate evmjit; pub mod account_provider; -pub mod basic_authority; +pub mod engines; pub mod block; pub mod block_queue; pub mod client; @@ -114,7 +111,6 @@ pub mod trace; pub mod spec; pub mod views; pub mod pod_state; -pub mod engine; pub mod migrations; pub mod miner; pub mod snapshot; @@ -130,7 +126,6 @@ mod pod_account; mod state; mod account; mod account_db; -mod null_engine; mod builtin; mod substate; mod executive; diff --git a/ethcore/src/migrations/blocks/mod.rs b/ethcore/src/migrations/blocks/mod.rs new file mode 100644 index 000000000..7253208bb --- /dev/null +++ b/ethcore/src/migrations/blocks/mod.rs @@ -0,0 +1,21 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Blocks database migrations. + +mod v8; + +pub use self::v8::V8; diff --git a/ethcore/src/migrations/blocks/v8.rs b/ethcore/src/migrations/blocks/v8.rs new file mode 100644 index 000000000..798be0790 --- /dev/null +++ b/ethcore/src/migrations/blocks/v8.rs @@ -0,0 +1,37 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! This migration compresses the state db. + +use util::migration::{SimpleMigration, Progress}; +use util::rlp::{Compressible, UntrustedRlp, View, RlpType}; + +/// Compressing migration. +#[derive(Default)] +pub struct V8(Progress); + +impl SimpleMigration for V8 { + fn version(&self) -> u32 { + 8 + } + + fn columns(&self) -> Option { None } + + fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)> { + self.0.tick(); + Some((key,UntrustedRlp::new(&value).compress(RlpType::Blocks).to_vec())) + } +} diff --git a/ethcore/src/migrations/extras/v6.rs b/ethcore/src/migrations/extras/v6.rs index af2d0389b..9b746b9d2 100644 --- a/ethcore/src/migrations/extras/v6.rs +++ b/ethcore/src/migrations/extras/v6.rs @@ -34,9 +34,10 @@ impl ToV6 { } impl SimpleMigration for ToV6 { - fn version(&self) -> u32 { - 6 - } + + fn columns(&self) -> Option { None } + + fn version(&self) -> u32 { 6 } fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)> { diff --git a/ethcore/src/migrations/mod.rs b/ethcore/src/migrations/mod.rs index 6d86a122f..5c0c6f420 100644 --- a/ethcore/src/migrations/mod.rs +++ b/ethcore/src/migrations/mod.rs @@ -1,4 +1,25 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + //! Database migrations. -pub mod extras; pub mod state; +pub mod blocks; +pub mod extras; + +mod v9; +pub use self::v9::ToV9; +pub use self::v9::Extract; diff --git a/ethcore/src/migrations/state/v7.rs b/ethcore/src/migrations/state/v7.rs index faa289bd7..036ff707c 100644 --- a/ethcore/src/migrations/state/v7.rs +++ b/ethcore/src/migrations/state/v7.rs @@ -22,7 +22,7 @@ use std::collections::HashMap; use util::Bytes; use util::hash::{Address, FixedHash, H256}; use util::kvdb::Database; -use util::migration::{Batch, Config, Error, Migration, SimpleMigration}; +use util::migration::{Batch, Config, Error, Migration, SimpleMigration, Progress}; use util::rlp::{decode, Rlp, RlpStream, Stream, View}; use util::sha3::Hashable; @@ -63,19 +63,16 @@ fn attempt_migrate(mut key_h: H256, val: &[u8]) -> Option { /// Version for `ArchiveDB`. #[derive(Default)] -pub struct ArchiveV7(usize); +pub struct ArchiveV7(Progress); impl SimpleMigration for ArchiveV7 { - fn version(&self) -> u32 { - 7 - } + + fn columns(&self) -> Option { None } + + fn version(&self) -> u32 { 7 } fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)> { - self.0 += 1; - if self.0 == 100_000 { - self.0 = 0; - flush!("."); - } + self.0.tick(); if key.len() != 32 { // metadata key, ignore. @@ -109,7 +106,7 @@ impl OverlayRecentV7 { // walk all journal entries in the database backwards. // find migrations for any possible inserted keys. fn walk_journal(&mut self, source: &Database) -> Result<(), Error> { - if let Some(val) = try!(source.get(V7_LATEST_ERA_KEY).map_err(Error::Custom)) { + if let Some(val) = try!(source.get(None, V7_LATEST_ERA_KEY).map_err(Error::Custom)) { let mut era = decode::(&val); loop { let mut index: usize = 0; @@ -120,7 +117,7 @@ impl OverlayRecentV7 { r.out() }; - if let Some(journal_raw) = try!(source.get(&entry_key).map_err(Error::Custom)) { + if let Some(journal_raw) = try!(source.get(None, &entry_key).map_err(Error::Custom)) { let rlp = Rlp::new(&journal_raw); // migrate all inserted keys. @@ -153,7 +150,7 @@ impl OverlayRecentV7 { // replace all possible inserted/deleted keys with their migrated counterparts // and commit the altered entries. fn migrate_journal(&self, source: &Database, mut batch: Batch, dest: &mut Database) -> Result<(), Error> { - if let Some(val) = try!(source.get(V7_LATEST_ERA_KEY).map_err(Error::Custom)) { + if let Some(val) = try!(source.get(None, V7_LATEST_ERA_KEY).map_err(Error::Custom)) { try!(batch.insert(V7_LATEST_ERA_KEY.into(), val.to_owned(), dest)); let mut era = decode::(&val); @@ -166,7 +163,7 @@ impl OverlayRecentV7 { r.out() }; - if let Some(journal_raw) = try!(source.get(&entry_key).map_err(Error::Custom)) { + if let Some(journal_raw) = try!(source.get(None, &entry_key).map_err(Error::Custom)) { let rlp = Rlp::new(&journal_raw); let id: H256 = rlp.val_at(0); let mut inserted_keys: Vec<(H256, Bytes)> = Vec::new(); @@ -221,22 +218,25 @@ impl OverlayRecentV7 { } impl Migration for OverlayRecentV7 { + + fn columns(&self) -> Option { None } + fn version(&self) -> u32 { 7 } // walk all records in the database, attempting to migrate any possible and // keeping records of those that we do. then migrate the journal using // this information. - fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database) -> Result<(), Error> { - let mut batch = Batch::new(config); + fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { + let mut batch = Batch::new(config, col); // check version metadata. - match try!(source.get(V7_VERSION_KEY).map_err(Error::Custom)) { + match try!(source.get(None, V7_VERSION_KEY).map_err(Error::Custom)) { Some(ref version) if decode::(&*version) == DB_VERSION => {} _ => return Err(Error::MigrationImpossible), // missing or wrong version } let mut count = 0; - for (key, value) in source.iter() { + for (key, value) in source.iter(None) { count += 1; if count == 100_000 { count = 0; diff --git a/ethcore/src/migrations/v9.rs b/ethcore/src/migrations/v9.rs new file mode 100644 index 000000000..0c8e77588 --- /dev/null +++ b/ethcore/src/migrations/v9.rs @@ -0,0 +1,82 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + + +//! This migration consolidates all databases into single one using Column Families. + +use util::{Rlp, RlpStream, View, Stream}; +use util::kvdb::Database; +use util::migration::{Batch, Config, Error, Migration, Progress}; + +/// Which part of block to preserve +pub enum Extract { + /// Extract block header RLP. + Header, + /// Extract block body RLP. + Body, + /// Don't change the value. + All, +} + +/// Consolidation of extras/block/state databases into single one. +pub struct ToV9 { + progress: Progress, + column: Option, + extract: Extract, +} + +impl ToV9 { + /// Creates new V9 migration and assigns all `(key,value)` pairs from `source` DB to given Column Family + pub fn new(column: Option, extract: Extract) -> Self { + ToV9 { + progress: Progress::default(), + column: column, + extract: extract, + } + } +} + +impl Migration for ToV9 { + + fn columns(&self) -> Option { Some(5) } + + fn version(&self) -> u32 { 9 } + + fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { + let mut batch = Batch::new(config, self.column); + + for (key, value) in source.iter(col) { + self.progress.tick(); + match self.extract { + Extract::Header => { + try!(batch.insert(key.to_vec(), Rlp::new(&value).at(0).as_raw().to_vec(), dest)) + }, + Extract::Body => { + let mut body = RlpStream::new_list(2); + let block_rlp = Rlp::new(&value); + body.append_raw(block_rlp.at(1).as_raw(), 1); + body.append_raw(block_rlp.at(2).as_raw(), 1); + try!(batch.insert(key.to_vec(), body.out(), dest)) + }, + Extract::All => { + try!(batch.insert(key.to_vec(), value.to_vec(), dest)) + } + } + } + + batch.commit(dest) + } +} diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 97ba6c082..1ebae894e 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -15,7 +15,6 @@ // along with Parity. If not, see . use rayon::prelude::*; -use std::sync::atomic::{self, AtomicBool}; use std::time::{Instant, Duration}; use util::*; @@ -29,14 +28,14 @@ use error::*; use transaction::SignedTransaction; use receipt::Receipt; use spec::Spec; -use engine::Engine; +use engines::Engine; use miner::{MinerService, MinerStatus, TransactionQueue, AccountDetails, TransactionOrigin}; use miner::work_notify::WorkPoster; use client::TransactionImportResult; use miner::price_info::PriceInfo; /// Different possible definitions for pending transaction set. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub enum PendingSet { /// Always just the transactions in the queue. These have had only cheap checks. AlwaysQueue, @@ -48,7 +47,7 @@ pub enum PendingSet { } /// Configures the behaviour of the miner. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct MinerOptions { /// URLs to notify when there is new work. pub new_work_notify: Vec, @@ -77,12 +76,12 @@ impl Default for MinerOptions { MinerOptions { new_work_notify: vec![], force_sealing: false, - reseal_on_external_tx: true, + reseal_on_external_tx: false, reseal_on_own_tx: true, tx_gas_limit: !U256::zero(), tx_queue_size: 1024, pending_set: PendingSet::AlwaysQueue, - reseal_min_period: Duration::from_secs(0), + reseal_min_period: Duration::from_secs(2), work_queue_size: 20, enable_resubmission: true, } @@ -90,6 +89,7 @@ impl Default for MinerOptions { } /// Options for the dynamic gas price recalibrator. +#[derive(Debug, PartialEq)] pub struct GasPriceCalibratorOptions { /// Base transaction price to match against. pub usd_per_tx: f32, @@ -98,9 +98,9 @@ pub struct GasPriceCalibratorOptions { } /// The gas price validator variant for a `GasPricer`. +#[derive(Debug, PartialEq)] pub struct GasPriceCalibrator { options: GasPriceCalibratorOptions, - next_calibration: Instant, } @@ -128,6 +128,7 @@ impl GasPriceCalibrator { } /// Struct to look after updating the acceptable gas price of a miner. +#[derive(Debug, PartialEq)] pub enum GasPricer { /// A fixed gas price in terms of Wei - always the argument given. Fixed(U256), @@ -157,15 +158,20 @@ impl GasPricer { } } +struct SealingWork { + queue: UsingQueue, + enabled: bool, +} + /// Keeps track of transactions using priority queue and holds currently mined block. pub struct Miner { // NOTE [ToDr] When locking always lock in this order! transaction_queue: Arc>, - sealing_work: Mutex>, + sealing_work: Mutex, // for sealing... options: MinerOptions, - sealing_enabled: AtomicBool, + next_allowed_reseal: Mutex, sealing_block_last_request: Mutex, gas_range_target: RwLock<(U256, U256)>, @@ -184,10 +190,9 @@ impl Miner { Miner { transaction_queue: Arc::new(Mutex::new(TransactionQueue::new())), options: Default::default(), - sealing_enabled: AtomicBool::new(false), next_allowed_reseal: Mutex::new(Instant::now()), sealing_block_last_request: Mutex::new(0), - sealing_work: Mutex::new(UsingQueue::new(20)), + sealing_work: Mutex::new(SealingWork{queue: UsingQueue::new(20), enabled: false}), gas_range_target: RwLock::new((U256::zero(), U256::zero())), author: RwLock::new(Address::default()), extra_data: RwLock::new(Vec::new()), @@ -204,10 +209,9 @@ impl Miner { let txq = Arc::new(Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, options.tx_gas_limit))); Arc::new(Miner { transaction_queue: txq, - sealing_enabled: AtomicBool::new(options.force_sealing || !options.new_work_notify.is_empty()), next_allowed_reseal: Mutex::new(Instant::now()), sealing_block_last_request: Mutex::new(0), - sealing_work: Mutex::new(UsingQueue::new(options.work_queue_size)), + sealing_work: Mutex::new(SealingWork{queue: UsingQueue::new(options.work_queue_size), enabled: options.force_sealing || !options.new_work_notify.is_empty()}), gas_range_target: RwLock::new((U256::zero(), U256::zero())), author: RwLock::new(Address::default()), extra_data: RwLock::new(Vec::new()), @@ -229,12 +233,12 @@ impl Miner { /// Get `Some` `clone()` of the current pending block's state or `None` if we're not sealing. pub fn pending_state(&self) -> Option { - self.sealing_work.lock().peek_last_ref().map(|b| b.block().fields().state.clone()) + self.sealing_work.lock().queue.peek_last_ref().map(|b| b.block().fields().state.clone()) } /// Get `Some` `clone()` of the current pending block's state or `None` if we're not sealing. pub fn pending_block(&self) -> Option { - self.sealing_work.lock().peek_last_ref().map(|b| b.base().clone()) + self.sealing_work.lock().queue.peek_last_ref().map(|b| b.base().clone()) } /// Prepares new block for sealing including top transactions from queue. @@ -256,7 +260,7 @@ impl Miner { let (transactions, mut open_block, original_work_hash) = { let transactions = {self.transaction_queue.lock().top_transactions()}; let mut sealing_work = self.sealing_work.lock(); - let last_work_hash = sealing_work.peek_last_ref().map(|pb| pb.block().fields().header.hash()); + let last_work_hash = sealing_work.queue.peek_last_ref().map(|pb| pb.block().fields().header.hash()); let best_hash = chain.best_block_header().sha3(); /* // check to see if last ClosedBlock in would_seals is actually same parent block. @@ -266,7 +270,7 @@ impl Miner { // otherwise, leave everything alone. // otherwise, author a fresh block. */ - let open_block = match sealing_work.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) { + let open_block = match sealing_work.queue.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) { Some(old_block) => { trace!(target: "miner", "Already have previous work; updating and returning"); // add transactions to old_block @@ -357,7 +361,7 @@ impl Miner { let (work, is_new) = { let mut sealing_work = self.sealing_work.lock(); - let last_work_hash = sealing_work.peek_last_ref().map(|pb| pb.block().fields().header.hash()); + let last_work_hash = sealing_work.queue.peek_last_ref().map(|pb| pb.block().fields().header.hash()); trace!(target: "miner", "Checking whether we need to reseal: orig={:?} last={:?}, this={:?}", original_work_hash, last_work_hash, block.block().fields().header.hash()); let (work, is_new) = if last_work_hash.map_or(true, |h| h != block.block().fields().header.hash()) { trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash()); @@ -365,16 +369,16 @@ impl Miner { let number = block.block().fields().header.number(); let difficulty = *block.block().fields().header.difficulty(); let is_new = original_work_hash.map_or(true, |h| block.block().fields().header.hash() != h); - sealing_work.push(block); + sealing_work.queue.push(block); // If push notifications are enabled we assume all work items are used. if self.work_poster.is_some() && is_new { - sealing_work.use_last_ref(); + sealing_work.queue.use_last_ref(); } (Some((pow_hash, difficulty, number)), is_new) } else { (None, false) }; - trace!(target: "miner", "prepare_sealing: leaving (last={:?})", sealing_work.peek_last_ref().map(|b| b.block().fields().header.hash())); + trace!(target: "miner", "prepare_sealing: leaving (last={:?})", sealing_work.queue.peek_last_ref().map(|b| b.block().fields().header.hash())); (work, is_new) }; if is_new { @@ -391,14 +395,22 @@ impl Miner { /// Returns true if we had to prepare new pending block fn enable_and_prepare_sealing(&self, chain: &MiningBlockChainClient) -> bool { trace!(target: "miner", "enable_and_prepare_sealing: entering"); - let have_work = self.sealing_work.lock().peek_last_ref().is_some(); - trace!(target: "miner", "enable_and_prepare_sealing: have_work={}", have_work); - if !have_work { + let prepare_new = { + let mut sealing_work = self.sealing_work.lock(); + let have_work = sealing_work.queue.peek_last_ref().is_some(); + trace!(target: "miner", "enable_and_prepare_sealing: have_work={}", have_work); + if !have_work { + sealing_work.enabled = true; + true + } else { + false + } + }; + if prepare_new { // -------------------------------------------------------------------------- // | NOTE Code below requires transaction_queue and sealing_work locks. | // | Make sure to release the locks before calling that method. | // -------------------------------------------------------------------------- - self.sealing_enabled.store(true, atomic::Ordering::Relaxed); self.prepare_sealing(chain); } let mut sealing_block_last_request = self.sealing_block_last_request.lock(); @@ -408,8 +420,8 @@ impl Miner { *sealing_block_last_request = best_number; } - // Return if - !have_work + // Return if we restarted + prepare_new } fn add_transactions_to_queue(&self, chain: &MiningBlockChainClient, transactions: Vec, origin: TransactionOrigin, transaction_queue: &mut TransactionQueue) -> @@ -448,13 +460,13 @@ impl MinerService for Miner { MinerStatus { transactions_in_pending_queue: status.pending, transactions_in_future_queue: status.future, - transactions_in_pending_block: sealing_work.peek_last_ref().map_or(0, |b| b.transactions().len()), + transactions_in_pending_block: sealing_work.queue.peek_last_ref().map_or(0, |b| b.transactions().len()), } } fn call(&self, chain: &MiningBlockChainClient, t: &SignedTransaction, analytics: CallAnalytics) -> Result { let sealing_work = self.sealing_work.lock(); - match sealing_work.peek_last_ref() { + match sealing_work.queue.peek_last_ref() { Some(work) => { let block = work.block(); @@ -501,7 +513,7 @@ impl MinerService for Miner { fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 { let sealing_work = self.sealing_work.lock(); - sealing_work.peek_last_ref().map_or_else( + sealing_work.queue.peek_last_ref().map_or_else( || chain.latest_balance(address), |b| b.block().fields().state.balance(address) ) @@ -509,7 +521,7 @@ impl MinerService for Miner { fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> H256 { let sealing_work = self.sealing_work.lock(); - sealing_work.peek_last_ref().map_or_else( + sealing_work.queue.peek_last_ref().map_or_else( || chain.latest_storage_at(address, position), |b| b.block().fields().state.storage_at(address, position) ) @@ -517,12 +529,12 @@ impl MinerService for Miner { fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 { let sealing_work = self.sealing_work.lock(); - sealing_work.peek_last_ref().map_or_else(|| chain.latest_nonce(address), |b| b.block().fields().state.nonce(address)) + sealing_work.queue.peek_last_ref().map_or_else(|| chain.latest_nonce(address), |b| b.block().fields().state.nonce(address)) } fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option { let sealing_work = self.sealing_work.lock(); - sealing_work.peek_last_ref().map_or_else(|| chain.code(address), |b| b.block().fields().state.code(address)) + sealing_work.queue.peek_last_ref().map_or_else(|| chain.code(address), |b| b.block().fields().state.code(address)) } fn set_author(&self, author: Address) { @@ -671,8 +683,8 @@ impl MinerService for Miner { let queue = self.transaction_queue.lock(); let sw = self.sealing_work.lock(); // TODO: should only use the sealing_work when it's current (it could be an old block) - let sealing_set = match self.sealing_enabled.load(atomic::Ordering::Relaxed) { - true => sw.peek_last_ref(), + let sealing_set = match sw.enabled { + true => sw.queue.peek_last_ref(), false => None, }; match (&self.options.pending_set, sealing_set) { @@ -684,8 +696,8 @@ impl MinerService for Miner { fn pending_transactions_hashes(&self) -> Vec { let queue = self.transaction_queue.lock(); let sw = self.sealing_work.lock(); - let sealing_set = match self.sealing_enabled.load(atomic::Ordering::Relaxed) { - true => sw.peek_last_ref(), + let sealing_set = match sw.enabled { + true => sw.queue.peek_last_ref(), false => None, }; match (&self.options.pending_set, sealing_set) { @@ -697,8 +709,8 @@ impl MinerService for Miner { fn transaction(&self, hash: &H256) -> Option { let queue = self.transaction_queue.lock(); let sw = self.sealing_work.lock(); - let sealing_set = match self.sealing_enabled.load(atomic::Ordering::Relaxed) { - true => sw.peek_last_ref(), + let sealing_set = match sw.enabled { + true => sw.queue.peek_last_ref(), false => None, }; match (&self.options.pending_set, sealing_set) { @@ -708,7 +720,8 @@ impl MinerService for Miner { } fn pending_receipts(&self) -> BTreeMap { - match (self.sealing_enabled.load(atomic::Ordering::Relaxed), self.sealing_work.lock().peek_last_ref()) { + let sealing_work = self.sealing_work.lock(); + match (sealing_work.enabled, sealing_work.queue.peek_last_ref()) { (true, Some(pending)) => { let hashes = pending.transactions() .iter() @@ -727,27 +740,43 @@ impl MinerService for Miner { } fn update_sealing(&self, chain: &MiningBlockChainClient) { - if self.sealing_enabled.load(atomic::Ordering::Relaxed) { - let current_no = chain.chain_info().best_block_number; - let has_local_transactions = self.transaction_queue.lock().has_local_pending_transactions(); - let last_request = *self.sealing_block_last_request.lock(); - let should_disable_sealing = !self.forced_sealing() - && !has_local_transactions - && current_no > last_request - && current_no - last_request > SEALING_TIMEOUT_IN_BLOCKS; + trace!(target: "miner", "update_sealing"); + let requires_reseal = { + let mut sealing_work = self.sealing_work.lock(); + if sealing_work.enabled { + trace!(target: "miner", "update_sealing: sealing enabled"); + let current_no = chain.chain_info().best_block_number; + let has_local_transactions = self.transaction_queue.lock().has_local_pending_transactions(); + let last_request = *self.sealing_block_last_request.lock(); + let should_disable_sealing = !self.forced_sealing() + && !has_local_transactions + && current_no > last_request + && current_no - last_request > SEALING_TIMEOUT_IN_BLOCKS; - if should_disable_sealing { - trace!(target: "miner", "Miner sleeping (current {}, last {})", current_no, last_request); - self.sealing_enabled.store(false, atomic::Ordering::Relaxed); - self.sealing_work.lock().reset(); + trace!(target: "miner", "update_sealing: should_disable_sealing={}; current_no={}, last_request={}", should_disable_sealing, current_no, last_request); + + if should_disable_sealing { + trace!(target: "miner", "Miner sleeping (current {}, last {})", current_no, last_request); + sealing_work.enabled = false; + sealing_work.queue.reset(); + false + } else { + // sealing enabled and we don't want to sleep. + *self.next_allowed_reseal.lock() = Instant::now() + self.options.reseal_min_period; + true + } } else { - *self.next_allowed_reseal.lock() = Instant::now() + self.options.reseal_min_period; - // -------------------------------------------------------------------------- - // | NOTE Code below requires transaction_queue and sealing_work locks. | - // | Make sure to release the locks before calling that method. | - // -------------------------------------------------------------------------- - self.prepare_sealing(chain); + // sealing is disabled. + false } + }; + + if requires_reseal { + // -------------------------------------------------------------------------- + // | NOTE Code below requires transaction_queue and sealing_work locks. | + // | Make sure to release the locks before calling that method. | + // -------------------------------------------------------------------------- + self.prepare_sealing(chain); } } @@ -756,13 +785,13 @@ impl MinerService for Miner { self.enable_and_prepare_sealing(chain); trace!(target: "miner", "map_sealing_work: sealing prepared"); let mut sealing_work = self.sealing_work.lock(); - let ret = sealing_work.use_last_ref(); + let ret = sealing_work.queue.use_last_ref(); trace!(target: "miner", "map_sealing_work: leaving use_last_ref={:?}", ret.as_ref().map(|b| b.block().fields().header.hash())); ret.map(f) } fn submit_seal(&self, chain: &MiningBlockChainClient, pow_hash: H256, seal: Vec) -> Result<(), Error> { - let result = if let Some(b) = self.sealing_work.lock().get_used_if(if self.options.enable_resubmission { GetAction::Clone } else { GetAction::Take }, |b| &b.hash() == &pow_hash) { + let result = if let Some(b) = self.sealing_work.lock().queue.get_used_if(if self.options.enable_resubmission { GetAction::Clone } else { GetAction::Take }, |b| &b.hash() == &pow_hash) { b.lock().try_seal(self.engine(), seal).or_else(|_| { warn!(target: "miner", "Mined solution rejected: Invalid."); Err(Error::PowInvalid) @@ -781,6 +810,8 @@ impl MinerService for Miner { } fn chain_new_blocks(&self, chain: &MiningBlockChainClient, _imported: &[H256], _invalid: &[H256], enacted: &[H256], retracted: &[H256]) { + trace!(target: "miner", "chain_new_blocks"); + fn fetch_transactions(chain: &MiningBlockChainClient, hash: &H256) -> Vec { let block = chain .block(BlockID::Hash(*hash)) @@ -836,11 +867,13 @@ impl MinerService for Miner { }); } - // -------------------------------------------------------------------------- - // | NOTE Code below requires transaction_queue and sealing_work locks. | - // | Make sure to release the locks before calling that method. | - // -------------------------------------------------------------------------- - self.update_sealing(chain); + if enacted.len() > 0 { + // -------------------------------------------------------------------------- + // | NOTE Code below requires transaction_queue and sealing_work locks. | + // | Make sure to release the locks before calling that method. | + // -------------------------------------------------------------------------- + self.update_sealing(chain); + } } } diff --git a/ethcore/src/miner/work_notify.rs b/ethcore/src/miner/work_notify.rs index b9952e14b..557f02f31 100644 --- a/ethcore/src/miner/work_notify.rs +++ b/ethcore/src/miner/work_notify.rs @@ -35,7 +35,7 @@ pub struct WorkPoster { impl WorkPoster { pub fn new(urls: &[String]) -> Self { let urls = urls.into_iter().filter_map(|u| { - match Url::parse(&u) { + match Url::parse(u) { Ok(url) => Some(url), Err(e) => { warn!("Error parsing URL {} : {}", u, e); diff --git a/ethcore/src/pod_account.rs b/ethcore/src/pod_account.rs index d04833cab..833ae9b6b 100644 --- a/ethcore/src/pod_account.rs +++ b/ethcore/src/pod_account.rs @@ -28,8 +28,8 @@ pub struct PodAccount { pub balance: U256, /// The nonce of the account. pub nonce: U256, - /// The code of the account. - pub code: Bytes, + /// The code of the account or `None` in the special case that it is unknown. + pub code: Option, /// The storage of the account. pub storage: BTreeMap, } @@ -38,7 +38,7 @@ impl PodAccount { /// Construct new object. #[cfg(test)] pub fn new(balance: U256, nonce: U256, code: Bytes, storage: BTreeMap) -> PodAccount { - PodAccount { balance: balance, nonce: nonce, code: code, storage: storage } + PodAccount { balance: balance, nonce: nonce, code: Some(code), storage: storage } } /// Convert Account to a PodAccount. @@ -48,7 +48,7 @@ impl PodAccount { balance: *acc.balance(), nonce: *acc.nonce(), storage: acc.storage_overlay().iter().fold(BTreeMap::new(), |mut m, (k, &(_, ref v))| {m.insert(k.clone(), v.clone()); m}), - code: acc.code().unwrap().to_vec(), + code: acc.code().map(|x| x.to_vec()), } } @@ -58,14 +58,15 @@ impl PodAccount { stream.append(&self.nonce); stream.append(&self.balance); stream.append(&sec_trie_root(self.storage.iter().map(|(k, v)| (k.to_vec(), encode(&U256::from(v.as_slice())).to_vec())).collect())); - stream.append(&self.code.sha3()); + stream.append(&self.code.as_ref().unwrap_or(&vec![]).sha3()); stream.out() } /// Place additional data into given hash DB. pub fn insert_additional(&self, db: &mut AccountDBMut) { - if !self.code.is_empty() { - db.insert(&self.code); + match self.code { + Some(ref c) if !c.is_empty() => { db.insert(c); } + _ => {} } let mut r = H256::new(); let mut t = SecTrieDBMut::new(db, &mut r); @@ -80,7 +81,7 @@ impl From for PodAccount { PodAccount { balance: a.balance.into(), nonce: a.nonce.into(), - code: a.code.into(), + code: Some(a.code.into()), storage: a.storage.into_iter().map(|(key, value)| { let key: U256 = key.into(); let value: U256 = value.into(); @@ -95,7 +96,7 @@ impl From for PodAccount { PodAccount { balance: a.balance.map_or_else(U256::zero, Into::into), nonce: a.nonce.map_or_else(U256::zero, Into::into), - code: vec![], + code: Some(vec![]), storage: BTreeMap::new() } } @@ -103,7 +104,13 @@ impl From for PodAccount { impl fmt::Display for PodAccount { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "(bal={}; nonce={}; code={} bytes, #{}; storage={} items)", self.balance, self.nonce, self.code.len(), self.code.sha3(), self.storage.len()) + write!(f, "(bal={}; nonce={}; code={} bytes, #{}; storage={} items)", + self.balance, + self.nonce, + self.code.as_ref().map_or(0, |c| c.len()), + self.code.as_ref().map_or_else(H256::new, |c| c.sha3()), + self.storage.len() + ) } } @@ -114,13 +121,13 @@ pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option Some(AccountDiff { balance: Diff::Born(x.balance), nonce: Diff::Born(x.nonce), - code: Diff::Born(x.code.clone()), + code: Diff::Born(x.code.as_ref().expect("account is newly created; newly created accounts must be given code; all caches should remain in place; qed").clone()), storage: x.storage.iter().map(|(k, v)| (k.clone(), Diff::Born(v.clone()))).collect(), }), (Some(x), None) => Some(AccountDiff { balance: Diff::Died(x.balance), nonce: Diff::Died(x.nonce), - code: Diff::Died(x.code.clone()), + code: Diff::Died(x.code.as_ref().expect("account is deleted; only way to delete account is running SUICIDE; account must have had own code cached to make operation; all caches should remain in place; qed").clone()), storage: x.storage.iter().map(|(k, v)| (k.clone(), Diff::Died(v.clone()))).collect(), }), (Some(pre), Some(post)) => { @@ -130,11 +137,14 @@ pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option Diff::new(pre_code, post_code), + _ => Diff::Same, + }, storage: storage.into_iter().map(|k| (k.clone(), Diff::new( - pre.storage.get(&k).cloned().unwrap_or_else(H256::new), - post.storage.get(&k).cloned().unwrap_or_else(H256::new) + pre.storage.get(k).cloned().unwrap_or_else(H256::new), + post.storage.get(k).cloned().unwrap_or_else(H256::new) ))).collect(), }; if r.balance.is_same() && r.nonce.is_same() && r.code.is_same() && r.storage.is_empty() { @@ -156,7 +166,7 @@ mod test { #[test] fn existence() { - let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: vec![], storage: map![]}; + let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: Some(vec![]), storage: map![]}; assert_eq!(diff_pod(Some(&a), Some(&a)), None); assert_eq!(diff_pod(None, Some(&a)), Some(AccountDiff{ balance: Diff::Born(69.into()), @@ -168,8 +178,8 @@ mod test { #[test] fn basic() { - let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: vec![], storage: map![]}; - let b = PodAccount{balance: 42.into(), nonce: 1.into(), code: vec![], storage: map![]}; + let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: Some(vec![]), storage: map![]}; + let b = PodAccount{balance: 42.into(), nonce: 1.into(), code: Some(vec![]), storage: map![]}; assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff { balance: Diff::Changed(69.into(), 42.into()), nonce: Diff::Changed(0.into(), 1.into()), @@ -180,8 +190,8 @@ mod test { #[test] fn code() { - let a = PodAccount{balance: 0.into(), nonce: 0.into(), code: vec![], storage: map![]}; - let b = PodAccount{balance: 0.into(), nonce: 1.into(), code: vec![0], storage: map![]}; + let a = PodAccount{balance: 0.into(), nonce: 0.into(), code: Some(vec![]), storage: map![]}; + let b = PodAccount{balance: 0.into(), nonce: 1.into(), code: Some(vec![0]), storage: map![]}; assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff { balance: Diff::Same, nonce: Diff::Changed(0.into(), 1.into()), @@ -195,13 +205,13 @@ mod test { let a = PodAccount { balance: 0.into(), nonce: 0.into(), - code: vec![], + code: Some(vec![]), storage: map_into![1 => 1, 2 => 2, 3 => 3, 4 => 4, 5 => 0, 6 => 0, 7 => 0] }; let b = PodAccount { balance: 0.into(), nonce: 0.into(), - code: vec![], + code: Some(vec![]), storage: map_into![1 => 1, 2 => 3, 3 => 0, 5 => 0, 7 => 7, 8 => 0, 9 => 9] }; assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff { diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index e0aae18bc..6e22d9dd7 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -138,7 +138,7 @@ impl IoHandler for ClientIoHandler { fn message(&self, _io: &IoContext, net_message: &ClientIoMessage) { match *net_message { ClientIoMessage::BlockVerified => { self.client.import_verified_blocks(); } - ClientIoMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(&transactions); } + ClientIoMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(transactions); } _ => {} // ignore other messages } } @@ -175,7 +175,7 @@ mod tests { let service = ClientService::start( ClientConfig::default(), get_test_spec(), - &temp_path.as_path(), + temp_path.as_path(), Arc::new(Miner::with_spec(get_test_spec())), ); assert!(service.is_ok()); diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 6a9d4ba76..47d95722a 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -27,9 +27,10 @@ use error::Error; use ids::BlockID; use views::{BlockView, HeaderView}; -use util::{Bytes, Hashable, HashDB, JournalDB, snappy, TrieDB, TrieDBMut, TrieMut}; +use util::{Bytes, Hashable, HashDB, JournalDB, snappy, TrieDB, TrieDBMut, TrieMut, DBTransaction}; +use util::error::UtilError; use util::hash::{FixedHash, H256}; -use util::rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View}; +use util::rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View, Compressible, RlpType}; use self::account::Account; use self::block::AbridgedBlock; @@ -261,7 +262,8 @@ pub fn chunk_state(db: &HashDB, root: &H256, path: &Path) -> Result, E let account_db = AccountDB::from_hash(db, account_key_hash); let fat_rlp = try!(account.to_fat_rlp(&account_db)); - try!(chunker.push(account_key, fat_rlp)); + let compressed_rlp = UntrustedRlp::new(&fat_rlp).compress(RlpType::Snapshot).to_vec(); + try!(chunker.push(account_key, compressed_rlp)); } if chunker.cur_size != 0 { @@ -358,7 +360,9 @@ impl StateRebuilder { try!(rebuild_account_trie(db.as_hashdb_mut(), account_chunk, out_pairs_chunk)); // commit the db changes we made in this thread. - try!(db.commit(0, &H256::zero(), None)); + let batch = DBTransaction::new(&db.backing()); + try!(db.commit(&batch, 0, &H256::zero(), None)); + try!(db.backing().write(batch).map_err(UtilError::SimpleString)); Ok(()) }); @@ -387,7 +391,9 @@ impl StateRebuilder { } } - try!(self.db.commit(0, &H256::zero(), None)); + let batch = DBTransaction::new(&self.db.backing()); + try!(self.db.commit(&batch, 0, &H256::zero(), None)); + try!(self.db.backing().write(batch).map_err(|e| Error::Util(e.into()))); Ok(()) } @@ -400,7 +406,8 @@ fn rebuild_account_trie(db: &mut HashDB, account_chunk: &[&[u8]], out_chunk: &mu let account_rlp = UntrustedRlp::new(account_pair); let hash: H256 = try!(account_rlp.val_at(0)); - let fat_rlp = try!(account_rlp.at(1)); + let decompressed = try!(account_rlp.at(1)).decompress(RlpType::Snapshot); + let fat_rlp = UntrustedRlp::new(&decompressed[..]); let thin_rlp = { let mut acct_db = AccountDBMut::from_hash(db.as_hashdb_mut(), hash); diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 0e61b34c5..6a9223a1c 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -17,14 +17,12 @@ //! Parameters for a block chain. use common::*; -use engine::*; +use engines::{Engine, NullEngine, BasicAuthority}; use pod_state::*; -use null_engine::*; use account_db::*; use super::genesis::Genesis; use super::seal::Generic as GenericSeal; use ethereum; -use basic_authority::BasicAuthority; use ethjson; /// Parameters common to all engines. @@ -38,6 +36,8 @@ pub struct CommonParams { pub network_id: U256, /// Minimum gas limit. pub min_gas_limit: U256, + /// Fork block to check. + pub fork_block: Option<(BlockNumber, H256)>, } impl From for CommonParams { @@ -47,6 +47,7 @@ impl From for CommonParams { maximum_extra_data_size: p.maximum_extra_data_size.into(), network_id: p.network_id.into(), min_gas_limit: p.min_gas_limit.into(), + fork_block: if let (Some(n), Some(h)) = (p.fork_block, p.fork_hash) { Some((n.into(), h.into())) } else { None }, } } } @@ -151,6 +152,9 @@ impl Spec { /// Get the configured Network ID. pub fn network_id(&self) -> U256 { self.params.network_id } + /// Get the configured network fork block. + pub fn fork_block(&self) -> Option<(BlockNumber, H256)> { self.params.fork_block } + /// Get the header of the genesis block. pub fn genesis_header(&self) -> Header { Header { diff --git a/ethcore/src/state.rs b/ethcore/src/state.rs index effda5f0f..45a88d519 100644 --- a/ethcore/src/state.rs +++ b/ethcore/src/state.rs @@ -15,11 +15,11 @@ // along with Parity. If not, see . use common::*; -use engine::Engine; +use engines::Engine; use executive::{Executive, TransactOptions}; use evm::Factory as EvmFactory; use account_db::*; -use trace::Trace; +use trace::FlatTrace; use pod_account::*; use pod_state::{self, PodState}; use types::state_diff::StateDiff; @@ -29,7 +29,7 @@ pub struct ApplyOutcome { /// The receipt for the applied transaction. pub receipt: Receipt, /// The trace for the applied transaction, if None if tracing is disabled. - pub trace: Option, + pub trace: Vec, } /// Result type for the execution ("application") of a transaction. @@ -122,7 +122,7 @@ impl State { fn insert_cache(&self, address: &Address, account: Option) { if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() { - if !snapshot.contains_key(&address) { + if !snapshot.contains_key(address) { snapshot.insert(address.clone(), self.cache.borrow_mut().insert(address.clone(), account)); return; } @@ -132,7 +132,7 @@ impl State { fn note_cache(&self, address: &Address) { if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() { - if !snapshot.contains_key(&address) { + if !snapshot.contains_key(address) { snapshot.insert(address.clone(), self.cache.borrow().get(address).cloned()); } } @@ -151,7 +151,7 @@ impl State { /// Create a new contract at address `contract`. If there is already an account at the address /// it will have its code reset, ready for `init_code()`. pub fn new_contract(&mut self, contract: &Address, balance: U256) { - self.insert_cache(&contract, Some(Account::new_contract(balance, self.account_start_nonce))); + self.insert_cache(contract, Some(Account::new_contract(balance, self.account_start_nonce))); } /// Remove an existing account. @@ -162,7 +162,7 @@ impl State { /// Determine whether an account exists. pub fn exists(&self, a: &Address) -> bool { let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); - self.cache.borrow().get(&a).unwrap_or(&None).is_some() || db.contains(&a) + self.cache.borrow().get(a).unwrap_or(&None).is_some() || db.contains(a) } /// Get the balance of account `a`. @@ -329,7 +329,7 @@ impl State { let have_key = self.cache.borrow().contains_key(a); if !have_key { let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); - self.insert_cache(a, db.get(&a).map(Account::from_rlp)) + self.insert_cache(a, db.get(a).map(Account::from_rlp)) } if require_code { if let Some(ref mut account) = self.cache.borrow_mut().get_mut(a).unwrap().as_mut() { @@ -350,7 +350,7 @@ impl State { let have_key = self.cache.borrow().contains_key(a); if !have_key { let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); - self.insert_cache(a, db.get(&a).map(Account::from_rlp)) + self.insert_cache(a, db.get(a).map(Account::from_rlp)) } else { self.note_cache(a); } @@ -402,7 +402,8 @@ use spec::*; use transaction::*; use util::log::init_log; use trace::trace; -use trace::trace::{Trace}; +use trace::FlatTrace; +use types::executed::CallType; #[test] fn should_apply_create_transaction() { @@ -427,8 +428,9 @@ fn should_apply_create_transaction() { state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); let vm_factory = Default::default(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); - let expected_trace = Some(Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + subtraces: 0, action: trace::Action::Create(trace::Create { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), value: 100.into(), @@ -440,8 +442,7 @@ fn should_apply_create_transaction() { address: Address::from_str("8988167e088c87cd314df6d3c2b83da5acb93ace").unwrap(), code: vec![96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53] }), - subs: vec![] - }); + }]; assert_eq!(result.trace, expected_trace); } @@ -488,8 +489,8 @@ fn should_trace_failed_create_transaction() { state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); let vm_factory = Default::default(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); - let expected_trace = Some(Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), action: trace::Action::Create(trace::Create { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), value: 100.into(), @@ -497,8 +498,8 @@ fn should_trace_failed_create_transaction() { init: vec![91, 96, 0, 86], }), result: trace::Res::FailedCreate, - subs: vec![] - }); + subtraces: 0 + }]; assert_eq!(result.trace, expected_trace); } @@ -527,21 +528,22 @@ fn should_trace_call_transaction() { state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); let vm_factory = Default::default(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); - let expected_trace = Some(Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), action: trace::Action::Call(trace::Call { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), to: 0xa.into(), value: 100.into(), gas: 79000.into(), input: vec![], + call_type: CallType::Call, }), result: trace::Res::Call(trace::CallResult { gas_used: U256::from(3), output: vec![] }), - subs: vec![] - }); + subtraces: 0, + }]; assert_eq!(result.trace, expected_trace); } @@ -569,21 +571,22 @@ fn should_trace_basic_call_transaction() { state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); let vm_factory = Default::default(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); - let expected_trace = Some(Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), action: trace::Action::Call(trace::Call { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), to: 0xa.into(), value: 100.into(), gas: 79000.into(), input: vec![], + call_type: CallType::Call, }), result: trace::Res::Call(trace::CallResult { gas_used: U256::from(0), output: vec![] }), - subs: vec![] - }); + subtraces: 0, + }]; assert_eq!(result.trace, expected_trace); } @@ -611,21 +614,24 @@ fn should_trace_call_transaction_to_builtin() { let vm_factory = Default::default(); let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap(); - assert_eq!(result.trace, Some(Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), action: trace::Action::Call(trace::Call { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), to: "0000000000000000000000000000000000000001".into(), value: 0.into(), gas: 79_000.into(), input: vec![], + call_type: CallType::Call, }), result: trace::Res::Call(trace::CallResult { gas_used: U256::from(3000), output: vec![] }), - subs: vec![] - })); + subtraces: 0, + }]; + + assert_eq!(result.trace, expected_trace); } #[test] @@ -652,21 +658,23 @@ fn should_not_trace_subcall_transaction_to_builtin() { let vm_factory = Default::default(); let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap(); - let expected_trace = Some(Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), action: trace::Action::Call(trace::Call { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), to: 0xa.into(), value: 0.into(), gas: 79000.into(), input: vec![], + call_type: CallType::Call, }), result: trace::Res::Call(trace::CallResult { gas_used: U256::from(28_061), output: vec![] }), - subs: vec![] - }); + subtraces: 0, + }]; + assert_eq!(result.trace, expected_trace); } @@ -695,21 +703,38 @@ fn should_not_trace_callcode() { let vm_factory = Default::default(); let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap(); - let expected_trace = Some(Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + subtraces: 1, action: trace::Action::Call(trace::Call { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), to: 0xa.into(), value: 0.into(), gas: 79000.into(), input: vec![], + call_type: CallType::Call, }), result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(64), + gas_used: 64.into(), output: vec![] }), - subs: vec![] - }); + }, FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Call(trace::Call { + from: 0xa.into(), + to: 0xa.into(), + value: 0.into(), + gas: 4096.into(), + input: vec![], + call_type: CallType::CallCode, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: 3.into(), + output: vec![], + }), + }]; + assert_eq!(result.trace, expected_trace); } @@ -741,21 +766,38 @@ fn should_not_trace_delegatecall() { let vm_factory = Default::default(); let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap(); - let expected_trace = Some(Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + subtraces: 1, action: trace::Action::Call(trace::Call { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), to: 0xa.into(), value: 0.into(), gas: 79000.into(), input: vec![], + call_type: CallType::Call, }), result: trace::Res::Call(trace::CallResult { gas_used: U256::from(61), output: vec![] }), - subs: vec![] - }); + }, FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Call(trace::Call { + from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), + to: 0xa.into(), + value: 0.into(), + gas: 32768.into(), + input: vec![], + call_type: CallType::DelegateCall, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: 3.into(), + output: vec![], + }), + }]; + assert_eq!(result.trace, expected_trace); } @@ -783,20 +825,19 @@ fn should_trace_failed_call_transaction() { state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); let vm_factory = Default::default(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); - let expected_trace = Some(Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), action: trace::Action::Call(trace::Call { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), to: 0xa.into(), value: 100.into(), gas: 79000.into(), input: vec![], + call_type: CallType::Call, }), result: trace::Res::FailedCall, - subs: vec![] - }); - - println!("trace: {:?}", result.trace); + subtraces: 0, + }]; assert_eq!(result.trace, expected_trace); } @@ -826,35 +867,38 @@ fn should_trace_call_with_subcall_transaction() { state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); let vm_factory = Default::default(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); - let expected_trace = Some(Trace { - depth: 0, + + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + subtraces: 1, action: trace::Action::Call(trace::Call { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), to: 0xa.into(), value: 100.into(), gas: 79000.into(), input: vec![], + call_type: CallType::Call, }), result: trace::Res::Call(trace::CallResult { gas_used: U256::from(69), output: vec![] }), - subs: vec![Trace { - depth: 1, - action: trace::Action::Call(trace::Call { - from: 0xa.into(), - to: 0xb.into(), - value: 0.into(), - gas: 78934.into(), - input: vec![], - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(3), - output: vec![] - }), - subs: vec![] - }] - }); + }, FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Call(trace::Call { + from: 0xa.into(), + to: 0xb.into(), + value: 0.into(), + gas: 78934.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(3), + output: vec![] + }), + }]; assert_eq!(result.trace, expected_trace); } @@ -883,32 +927,34 @@ fn should_trace_call_with_basic_subcall_transaction() { state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); let vm_factory = Default::default(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); - let expected_trace = Some(Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + subtraces: 1, action: trace::Action::Call(trace::Call { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), to: 0xa.into(), value: 100.into(), gas: 79000.into(), input: vec![], + call_type: CallType::Call, }), result: trace::Res::Call(trace::CallResult { gas_used: U256::from(31761), output: vec![] }), - subs: vec![Trace { - depth: 1, - action: trace::Action::Call(trace::Call { - from: 0xa.into(), - to: 0xb.into(), - value: 69.into(), - gas: 2300.into(), - input: vec![], - }), - result: trace::Res::Call(trace::CallResult::default()), - subs: vec![] - }] - }); + }, FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Call(trace::Call { + from: 0xa.into(), + to: 0xb.into(), + value: 69.into(), + gas: 2300.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult::default()), + }]; assert_eq!(result.trace, expected_trace); } @@ -937,21 +983,22 @@ fn should_not_trace_call_with_invalid_basic_subcall_transaction() { state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); let vm_factory = Default::default(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); - let expected_trace = Some(Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + subtraces: 0, action: trace::Action::Call(trace::Call { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), to: 0xa.into(), value: 100.into(), gas: 79000.into(), input: vec![], + call_type: CallType::Call, }), result: trace::Res::Call(trace::CallResult { gas_used: U256::from(31761), output: vec![] }), - subs: vec![] - }); + }]; assert_eq!(result.trace, expected_trace); } @@ -981,32 +1028,34 @@ fn should_trace_failed_subcall_transaction() { state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); let vm_factory = Default::default(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); - let expected_trace = Some(Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + subtraces: 1, action: trace::Action::Call(trace::Call { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), to: 0xa.into(), value: 100.into(), gas: 79000.into(), input: vec![], + call_type: CallType::Call, }), result: trace::Res::Call(trace::CallResult { gas_used: U256::from(79_000), output: vec![] }), - subs: vec![Trace { - depth: 1, - action: trace::Action::Call(trace::Call { - from: 0xa.into(), - to: 0xb.into(), - value: 0.into(), - gas: 78934.into(), - input: vec![], - }), - result: trace::Res::FailedCall, - subs: vec![] - }] - }); + }, FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Call(trace::Call { + from: 0xa.into(), + to: 0xb.into(), + value: 0.into(), + gas: 78934.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::FailedCall, + }]; assert_eq!(result.trace, expected_trace); } @@ -1037,49 +1086,52 @@ fn should_trace_call_with_subcall_with_subcall_transaction() { state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); let vm_factory = Default::default(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); - let expected_trace = Some(Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + subtraces: 1, action: trace::Action::Call(trace::Call { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), to: 0xa.into(), value: 100.into(), gas: 79000.into(), input: vec![], + call_type: CallType::Call, }), result: trace::Res::Call(trace::CallResult { gas_used: U256::from(135), output: vec![] }), - subs: vec![Trace { - depth: 1, - action: trace::Action::Call(trace::Call { - from: 0xa.into(), - to: 0xb.into(), - value: 0.into(), - gas: 78934.into(), - input: vec![], - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(69), - output: vec![] - }), - subs: vec![Trace { - depth: 2, - action: trace::Action::Call(trace::Call { - from: 0xb.into(), - to: 0xc.into(), - value: 0.into(), - gas: 78868.into(), - input: vec![], - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(3), - output: vec![] - }), - subs: vec![] - }] - }] - }); + }, FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 1, + action: trace::Action::Call(trace::Call { + from: 0xa.into(), + to: 0xb.into(), + value: 0.into(), + gas: 78934.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(69), + output: vec![] + }), + }, FlatTrace { + trace_address: vec![0, 0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Call(trace::Call { + from: 0xb.into(), + to: 0xc.into(), + value: 0.into(), + gas: 78868.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(3), + output: vec![] + }), + }]; assert_eq!(result.trace, expected_trace); } @@ -1110,46 +1162,50 @@ fn should_trace_failed_subcall_with_subcall_transaction() { state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); let vm_factory = Default::default(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); - let expected_trace = Some(Trace { - depth: 0, + + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + subtraces: 1, action: trace::Action::Call(trace::Call { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), to: 0xa.into(), value: 100.into(), gas: 79000.into(), input: vec![], + call_type: CallType::Call, }), result: trace::Res::Call(trace::CallResult { gas_used: U256::from(79_000), output: vec![] - }), - subs: vec![Trace { - depth: 1, + }) + }, FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 1, action: trace::Action::Call(trace::Call { - from: 0xa.into(), - to: 0xb.into(), - value: 0.into(), - gas: 78934.into(), - input: vec![], - }), - result: trace::Res::FailedCall, - subs: vec![Trace { - depth: 2, - action: trace::Action::Call(trace::Call { - from: 0xb.into(), - to: 0xc.into(), - value: 0.into(), - gas: 78868.into(), - input: vec![], - }), - result: trace::Res::Call(trace::CallResult { - gas_used: U256::from(3), - output: vec![] - }), - subs: vec![] - }] - }] - }); + from: 0xa.into(), + to: 0xb.into(), + value: 0.into(), + gas: 78934.into(), + input: vec![], + call_type: CallType::Call, + }), + result: trace::Res::FailedCall, + }, FlatTrace { + trace_address: vec![0, 0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Call(trace::Call { + from: 0xb.into(), + to: 0xc.into(), + value: 0.into(), + gas: 78868.into(), + call_type: CallType::Call, + input: vec![], + }), + result: trace::Res::Call(trace::CallResult { + gas_used: U256::from(3), + output: vec![] + }), + }]; assert_eq!(result.trace, expected_trace); } @@ -1179,30 +1235,32 @@ fn should_trace_suicide() { state.add_balance(t.sender().as_ref().unwrap(), &100.into()); let vm_factory = Default::default(); let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); - let expected_trace = Some(Trace { - depth: 0, + let expected_trace = vec![FlatTrace { + trace_address: Default::default(), + subtraces: 1, action: trace::Action::Call(trace::Call { from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), to: 0xa.into(), value: 100.into(), gas: 79000.into(), input: vec![], + call_type: CallType::Call, }), result: trace::Res::Call(trace::CallResult { gas_used: 3.into(), output: vec![] }), - subs: vec![Trace { - depth: 1, - action: trace::Action::Suicide(trace::Suicide { - address: 0xa.into(), - refund_address: 0xb.into(), - balance: 150.into(), - }), - result: trace::Res::None, - subs: vec![] - }] - }); + }, FlatTrace { + trace_address: vec![0].into_iter().collect(), + subtraces: 0, + action: trace::Action::Suicide(trace::Suicide { + address: 0xa.into(), + refund_address: 0xb.into(), + balance: 150.into(), + }), + result: trace::Res::None, + }]; + assert_eq!(result.trace, expected_trace); } diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index 6e94d2ee3..b88c773c2 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use client::{BlockChainClient, Client, ClientConfig}; +use client::{self, BlockChainClient, Client, ClientConfig}; use common::*; use spec::*; use block::{OpenBlock, Drain}; use blockchain::{BlockChain, Config as BlockChainConfig}; use state::*; use evm::Schedule; -use engine::*; +use engines::Engine; use ethereum; use devtools::*; use miner::Miner; @@ -246,12 +246,23 @@ pub fn get_test_client_with_blocks(blocks: Vec) -> GuardedTempResult Arc { + Arc::new( + Database::open(&DatabaseConfig::with_columns(client::DB_NO_OF_COLUMNS), path) + .expect("Opening database for tests should always work.") + ) +} + pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult { let temp = RandomTempPath::new(); - let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), temp.as_path()); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone()); + + let batch = db.transaction(); for block_order in 1..block_number { - bc.insert_block(&create_unverifiable_block(block_order, bc.best_block_hash()), vec![]); + bc.insert_block(&batch, &create_unverifiable_block(block_order, bc.best_block_hash()), vec![]); } + db.write(batch).unwrap(); GuardedTempResult:: { _temp: temp, @@ -261,10 +272,15 @@ pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult GuardedTempResult { let temp = RandomTempPath::new(); - let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), temp.as_path()); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone()); + + + let batch = db.transaction(); for block_order in 1..block_number { - bc.insert_block(&create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]); + bc.insert_block(&batch, &create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]); } + db.write(batch).unwrap(); GuardedTempResult:: { _temp: temp, @@ -274,7 +290,8 @@ pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempRes pub fn generate_dummy_empty_blockchain() -> GuardedTempResult { let temp = RandomTempPath::new(); - let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), temp.as_path()); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone()); GuardedTempResult:: { _temp: temp, @@ -284,7 +301,8 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult { pub fn get_temp_journal_db() -> GuardedTempResult> { let temp = RandomTempPath::new(); - let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge, DatabaseConfig::default()); + let journal_db = get_temp_journal_db_in(temp.as_path()); + GuardedTempResult { _temp: temp, result: Some(journal_db) @@ -294,6 +312,7 @@ pub fn get_temp_journal_db() -> GuardedTempResult> { pub fn get_temp_state() -> GuardedTempResult { let temp = RandomTempPath::new(); let journal_db = get_temp_journal_db_in(temp.as_path()); + GuardedTempResult { _temp: temp, result: Some(State::new(journal_db, U256::from(0), Default::default())), @@ -301,7 +320,8 @@ pub fn get_temp_state() -> GuardedTempResult { } pub fn get_temp_journal_db_in(path: &Path) -> Box { - journaldb::new(path.to_str().unwrap(), journaldb::Algorithm::EarlyMerge, DatabaseConfig::default()) + let db = new_db(path.to_str().expect("Only valid utf8 paths for tests.")); + journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, None) } pub fn get_temp_state_in(path: &Path) -> State { diff --git a/ethcore/src/trace/block.rs b/ethcore/src/trace/block.rs deleted file mode 100644 index bc53f77e2..000000000 --- a/ethcore/src/trace/block.rs +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2015, 2016 Ethcore (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use util::rlp::*; -use basic_types::LogBloom; -use super::Trace; - -/// Traces created by transactions from the same block. -#[derive(Clone)] -pub struct BlockTraces(Vec); - -impl From> for BlockTraces { - fn from(traces: Vec) -> Self { - BlockTraces(traces) - } -} - -impl Into> for BlockTraces { - fn into(self) -> Vec { - self.0 - } -} - -impl Decodable for BlockTraces { - fn decode(decoder: &D) -> Result where D: Decoder { - let traces = try!(Decodable::decode(decoder)); - let block_traces = BlockTraces(traces); - Ok(block_traces) - } -} - -impl Encodable for BlockTraces { - fn rlp_append(&self, s: &mut RlpStream) { - Encodable::rlp_append(&self.0, s) - } -} - -impl BlockTraces { - /// Returns bloom of all traces in given block. - pub fn bloom(&self) -> LogBloom { - self.0.iter() - .fold(LogBloom::default(), |acc, trace| acc | trace.bloom()) - } -} - diff --git a/ethcore/src/trace/config.rs b/ethcore/src/trace/config.rs index 2876baa28..1c4646817 100644 --- a/ethcore/src/trace/config.rs +++ b/ethcore/src/trace/config.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . //! Traces config. +use std::str::FromStr; use bloomchain::Config as BloomConfig; use trace::Error; @@ -29,6 +30,25 @@ pub enum Switch { Auto, } +impl Default for Switch { + fn default() -> Self { + Switch::Auto + } +} + +impl FromStr for Switch { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "on" => Ok(Switch::On), + "off" => Ok(Switch::Off), + "auto" => Ok(Switch::Auto), + other => Err(format!("Invalid switch value: {}", other)) + } + } +} + impl Switch { /// Tries to turn old switch to new value. pub fn turn_to(&self, to: Switch) -> Result { @@ -41,7 +61,7 @@ impl Switch { } /// Traces config. -#[derive(Debug, Clone)] +#[derive(Debug, PartialEq, Clone)] pub struct Config { /// Indicates if tracing should be enabled or not. /// If it's None, it will be automatically configured. @@ -55,7 +75,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { Config { - enabled: Switch::Auto, + enabled: Switch::default(), blooms: BloomConfig { levels: 3, elements_per_index: 16, @@ -64,3 +84,20 @@ impl Default for Config { } } } + +#[cfg(test)] +mod tests { + use super::Switch; + + #[test] + fn test_switch_parsing() { + assert_eq!(Switch::On, "on".parse().unwrap()); + assert_eq!(Switch::Off, "off".parse().unwrap()); + assert_eq!(Switch::Auto, "auto".parse().unwrap()); + } + + #[test] + fn test_switch_default() { + assert_eq!(Switch::default(), Switch::Auto); + } +} diff --git a/ethcore/src/trace/db.rs b/ethcore/src/trace/db.rs index edd7dd189..c0dab5d17 100644 --- a/ethcore/src/trace/db.rs +++ b/ethcore/src/trace/db.rs @@ -18,15 +18,15 @@ use std::ops::{Deref, DerefMut}; use std::collections::HashMap; use std::sync::Arc; -use std::path::Path; use bloomchain::{Number, Config as BloomConfig}; use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup}; -use util::{H256, H264, Database, DatabaseConfig, DBTransaction, RwLock}; +use util::{H256, H264, Database, DBTransaction, RwLock}; use header::BlockNumber; use trace::{LocalizedTrace, Config, Switch, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras, Error}; use db::{Key, Writable, Readable, CacheUpdatePolicy}; use blooms; use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces}; +use client::DB_COL_TRACE; const TRACE_DB_VER: &'static [u8] = b"1.0"; @@ -94,7 +94,7 @@ pub struct TraceDB where T: DatabaseExtras { traces: RwLock>, blooms: RwLock>, // db - tracesdb: Database, + tracesdb: Arc, // config, bloom_config: BloomConfig, // tracing enabled @@ -106,24 +106,15 @@ pub struct TraceDB where T: DatabaseExtras { impl BloomGroupDatabase for TraceDB where T: DatabaseExtras { fn blooms_at(&self, position: &GroupPosition) -> Option { let position = TraceGroupPosition::from(position.clone()); - self.tracesdb.read_with_cache(&self.blooms, &position).map(Into::into) + self.tracesdb.read_with_cache(DB_COL_TRACE, &self.blooms, &position).map(Into::into) } } impl TraceDB where T: DatabaseExtras { /// Creates new instance of `TraceDB`. - pub fn new(config: Config, path: &Path, extras: Arc) -> Result { - let mut tracedb_path = path.to_path_buf(); - tracedb_path.push("tracedb"); - let tracesdb = match config.db_cache_size { - None => Database::open_default(tracedb_path.to_str().unwrap()).unwrap(), - Some(db_cache) => Database::open( - &DatabaseConfig::with_cache(db_cache), - tracedb_path.to_str().unwrap()).unwrap(), - }; - + pub fn new(config: Config, tracesdb: Arc, extras: Arc) -> Result { // check if in previously tracing was enabled - let old_tracing = match tracesdb.get(b"enabled").unwrap() { + let old_tracing = match tracesdb.get(DB_COL_TRACE, b"enabled").unwrap() { Some(ref value) if value as &[u8] == &[0x1] => Switch::On, Some(ref value) if value as &[u8] == &[0x0] => Switch::Off, Some(_) => { panic!("tracesdb is corrupted") }, @@ -137,8 +128,10 @@ impl TraceDB where T: DatabaseExtras { false => [0x0] }; - tracesdb.put(b"enabled", &encoded_tracing).unwrap(); - tracesdb.put(b"version", TRACE_DB_VER).unwrap(); + let batch = DBTransaction::new(&tracesdb); + batch.put(DB_COL_TRACE, b"enabled", &encoded_tracing).unwrap(); + batch.put(DB_COL_TRACE, b"version", TRACE_DB_VER).unwrap(); + tracesdb.write(batch).unwrap(); let db = TraceDB { traces: RwLock::new(HashMap::new()), @@ -154,7 +147,7 @@ impl TraceDB where T: DatabaseExtras { /// Returns traces for block with hash. fn traces(&self, block_hash: &H256) -> Option { - self.tracesdb.read_with_cache(&self.traces, block_hash) + self.tracesdb.read_with_cache(DB_COL_TRACE, &self.traces, block_hash) } /// Returns vector of transaction traces for given block. @@ -197,7 +190,7 @@ impl TraceDB where T: DatabaseExtras { action: trace.action, result: trace.result, subtraces: trace.subtraces, - trace_address: trace.trace_address, + trace_address: trace.trace_address.into_iter().collect(), transaction_number: tx_number, transaction_hash: tx_hash.clone(), block_number: block_number, @@ -217,20 +210,18 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { /// Traces of import request's enacted blocks are expected to be already in database /// or to be the currently inserted trace. - fn import(&self, request: ImportRequest) { + fn import(&self, batch: &DBTransaction, request: ImportRequest) { // fast return if tracing is disabled if !self.tracing_enabled() { return; } - let batch = DBTransaction::new(); - // at first, let's insert new block traces { let mut traces = self.traces.write(); // it's important to use overwrite here, // cause this value might be queried by hash later - batch.write_with_cache(traces.deref_mut(), request.block_hash, request.traces.into(), CacheUpdatePolicy::Overwrite); + batch.write_with_cache(DB_COL_TRACE, traces.deref_mut(), request.block_hash, request.traces, CacheUpdatePolicy::Overwrite); } // now let's rebuild the blooms @@ -256,19 +247,18 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { .collect::>(); let mut blooms = self.blooms.write(); - batch.extend_with_cache(blooms.deref_mut(), blooms_to_insert, CacheUpdatePolicy::Remove); + batch.extend_with_cache(DB_COL_TRACE, blooms.deref_mut(), blooms_to_insert, CacheUpdatePolicy::Remove); } - - self.tracesdb.write(batch).unwrap(); } fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec) -> Option { + let trace_position_deq = trace_position.into_iter().collect(); self.extras.block_hash(block_number) .and_then(|block_hash| self.transactions_traces(&block_hash) .and_then(|traces| traces.into_iter().nth(tx_position)) .map(Into::>::into) // this may and should be optimized - .and_then(|traces| traces.into_iter().find(|trace| trace.trace_address == trace_position)) + .and_then(|traces| traces.into_iter().find(|trace| trace.trace_address == trace_position_deq)) .map(|trace| { let tx_hash = self.extras.transaction_hash(block_number, tx_position) .expect("Expected to find transaction hash. Database is probably corrupted"); @@ -277,7 +267,7 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { action: trace.action, result: trace.result, subtraces: trace.subtraces, - trace_address: trace.trace_address, + trace_address: trace.trace_address.into_iter().collect(), transaction_number: tx_position, transaction_hash: tx_hash, block_number: block_number, @@ -301,7 +291,7 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { action: trace.action, result: trace.result, subtraces: trace.subtraces, - trace_address: trace.trace_address, + trace_address: trace.trace_address.into_iter().collect(), transaction_number: tx_position, transaction_hash: tx_hash.clone(), block_number: block_number, @@ -328,7 +318,7 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { action: trace.action, result: trace.result, subtraces: trace.subtraces, - trace_address: trace.trace_address, + trace_address: trace.trace_address.into_iter().collect(), transaction_number: tx_position, transaction_hash: tx_hash.clone(), block_number: block_number, @@ -361,12 +351,15 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { mod tests { use std::collections::HashMap; use std::sync::Arc; - use util::{Address, U256, H256}; + use util::{Address, U256, H256, Database, DatabaseConfig, DBTransaction}; use devtools::RandomTempPath; use header::BlockNumber; - use trace::{Config, Switch, TraceDB, Database, DatabaseExtras, ImportRequest}; - use trace::{BlockTraces, Trace, Filter, LocalizedTrace, AddressesFilter}; + use trace::{Config, Switch, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest}; + use trace::{Filter, LocalizedTrace, AddressesFilter}; use trace::trace::{Call, Action, Res}; + use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces}; + use client::DB_NO_OF_COLUMNS; + use types::executed::CallType; struct NoopExtras; @@ -405,28 +398,33 @@ mod tests { } } + fn new_db(path: &str) -> Arc { + Arc::new(Database::open(&DatabaseConfig::with_columns(DB_NO_OF_COLUMNS), path).unwrap()) + } + #[test] fn test_reopening_db_with_tracing_off() { let temp = RandomTempPath::new(); + let db = new_db(temp.as_str()); let mut config = Config::default(); // set autotracing config.enabled = Switch::Auto; { - let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); assert_eq!(tracedb.tracing_enabled(), false); } { - let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); assert_eq!(tracedb.tracing_enabled(), false); } config.enabled = Switch::Off; { - let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); assert_eq!(tracedb.tracing_enabled(), false); } } @@ -434,32 +432,33 @@ mod tests { #[test] fn test_reopening_db_with_tracing_on() { let temp = RandomTempPath::new(); + let db = new_db(temp.as_str()); let mut config = Config::default(); // set tracing on config.enabled = Switch::On; { - let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); assert_eq!(tracedb.tracing_enabled(), true); } { - let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); assert_eq!(tracedb.tracing_enabled(), true); } config.enabled = Switch::Auto; { - let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); assert_eq!(tracedb.tracing_enabled(), true); } config.enabled = Switch::Off; { - let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); assert_eq!(tracedb.tracing_enabled(), false); } } @@ -468,34 +467,36 @@ mod tests { #[should_panic] fn test_invalid_reopening_db() { let temp = RandomTempPath::new(); + let db = new_db(temp.as_str()); let mut config = Config::default(); // set tracing on config.enabled = Switch::Off; { - let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); assert_eq!(tracedb.tracing_enabled(), true); } config.enabled = Switch::On; - TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); // should panic! + TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); // should panic! } fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest { ImportRequest { - traces: BlockTraces::from(vec![Trace { - depth: 0, + traces: FlatBlockTraces::from(vec![FlatTransactionTraces::from(vec![FlatTrace { + trace_address: Default::default(), + subtraces: 0, action: Action::Call(Call { - from: Address::from(1), - to: Address::from(2), - value: U256::from(3), - gas: U256::from(4), + from: 1.into(), + to: 2.into(), + value: 3.into(), + gas: 4.into(), input: vec![], + call_type: CallType::Call, }), result: Res::FailedCall, - subs: vec![], - }]), + }])]), block_hash: block_hash.clone(), block_number: block_number, enacted: vec![block_hash], @@ -511,6 +512,7 @@ mod tests { value: U256::from(3), gas: U256::from(4), input: vec![], + call_type: CallType::Call, }), result: Res::FailedCall, trace_address: vec![], @@ -526,6 +528,7 @@ mod tests { #[test] fn test_import() { let temp = RandomTempPath::new(); + let db = Arc::new(Database::open(&DatabaseConfig::with_columns(DB_NO_OF_COLUMNS), temp.as_str()).unwrap()); let mut config = Config::default(); config.enabled = Switch::On; let block_0 = H256::from(0xa1); @@ -539,11 +542,13 @@ mod tests { extras.transaction_hashes.insert(0, vec![tx_0.clone()]); extras.transaction_hashes.insert(1, vec![tx_1.clone()]); - let tracedb = TraceDB::new(config, temp.as_path(), Arc::new(extras)).unwrap(); + let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)).unwrap(); // import block 0 let request = create_simple_import_request(0, block_0.clone()); - tracedb.import(request); + let batch = DBTransaction::new(&db); + tracedb.import(&batch, request); + db.write(batch).unwrap(); let filter = Filter { range: (0..0), @@ -557,7 +562,9 @@ mod tests { // import block 1 let request = create_simple_import_request(1, block_1.clone()); - tracedb.import(request); + let batch = DBTransaction::new(&db); + tracedb.import(&batch, request); + db.write(batch).unwrap(); let filter = Filter { range: (0..1), diff --git a/ethcore/src/trace/executive_tracer.rs b/ethcore/src/trace/executive_tracer.rs index af8183c0a..a64664095 100644 --- a/ethcore/src/trace/executive_tracer.rs +++ b/ethcore/src/trace/executive_tracer.rs @@ -18,13 +18,53 @@ use util::{Bytes, Address, U256}; use action_params::ActionParams; -use trace::trace::{Trace, Call, Create, Action, Res, CreateResult, CallResult, VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff, Suicide}; -use trace::{Tracer, VMTracer}; +use trace::trace::{Call, Create, Action, Res, CreateResult, CallResult, VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff, Suicide}; +use trace::{Tracer, VMTracer, FlatTrace}; /// Simple executive tracer. Traces all calls and creates. Ignores delegatecalls. #[derive(Default)] pub struct ExecutiveTracer { - traces: Vec, + traces: Vec, +} + +fn top_level_subtraces(traces: &[FlatTrace]) -> usize { + traces.iter().filter(|t| t.trace_address.is_empty()).count() +} + +fn update_trace_address(traces: Vec) -> Vec { + // input traces are expected to be ordered like + // [] + // [0] + // [0, 0] + // [0, 1] + // [] + // [0] + // + // so they can be transformed to + // + // [0] + // [0, 0] + // [0, 0, 0] + // [0, 0, 1] + // [1] + // [1, 0] + let mut top_subtrace_index = 0; + let mut subtrace_subtraces_left = 0; + traces.into_iter().map(|mut trace| { + let is_top_subtrace = trace.trace_address.is_empty(); + trace.trace_address.push_front(top_subtrace_index); + + if is_top_subtrace { + subtrace_subtraces_left = trace.subtraces; + } else { + subtrace_subtraces_left -= 1; + } + + if subtrace_subtraces_left == 0 { + top_subtrace_index += 1; + } + trace + }).collect() } impl Tracer for ExecutiveTracer { @@ -40,74 +80,73 @@ impl Tracer for ExecutiveTracer { Some(vec![]) } - fn trace_call(&mut self, call: Option, gas_used: U256, output: Option, depth: usize, subs: Vec, delegate_call: bool) { - // don't trace if it's DELEGATECALL or CALLCODE. - if delegate_call { - return; - } - - let trace = Trace { - depth: depth, - subs: subs, + fn trace_call(&mut self, call: Option, gas_used: U256, output: Option, subs: Vec) { + let trace = FlatTrace { + trace_address: Default::default(), + subtraces: top_level_subtraces(&subs), action: Action::Call(call.expect("self.prepare_trace_call().is_some(): so we must be tracing: qed")), result: Res::Call(CallResult { gas_used: gas_used, output: output.expect("self.prepare_trace_output().is_some(): so we must be tracing: qed") - }) + }), }; + debug!(target: "trace", "Traced call {:?}", trace); self.traces.push(trace); + self.traces.extend(update_trace_address(subs)); } - fn trace_create(&mut self, create: Option, gas_used: U256, code: Option, address: Address, depth: usize, subs: Vec) { - let trace = Trace { - depth: depth, - subs: subs, + fn trace_create(&mut self, create: Option, gas_used: U256, code: Option, address: Address, subs: Vec) { + let trace = FlatTrace { + subtraces: top_level_subtraces(&subs), action: Action::Create(create.expect("self.prepare_trace_create().is_some(): so we must be tracing: qed")), result: Res::Create(CreateResult { gas_used: gas_used, code: code.expect("self.prepare_trace_output.is_some(): so we must be tracing: qed"), address: address - }) + }), + trace_address: Default::default(), }; + debug!(target: "trace", "Traced create {:?}", trace); self.traces.push(trace); + self.traces.extend(update_trace_address(subs)); } - fn trace_failed_call(&mut self, call: Option, depth: usize, subs: Vec, delegate_call: bool) { - // don't trace if it's DELEGATECALL or CALLCODE. - if delegate_call { - return; - } - - let trace = Trace { - depth: depth, - subs: subs, + fn trace_failed_call(&mut self, call: Option, subs: Vec) { + let trace = FlatTrace { + trace_address: Default::default(), + subtraces: top_level_subtraces(&subs), action: Action::Call(call.expect("self.prepare_trace_call().is_some(): so we must be tracing: qed")), result: Res::FailedCall, }; + debug!(target: "trace", "Traced failed call {:?}", trace); self.traces.push(trace); + self.traces.extend(update_trace_address(subs)); } - fn trace_failed_create(&mut self, create: Option, depth: usize, subs: Vec) { - let trace = Trace { - depth: depth, - subs: subs, + fn trace_failed_create(&mut self, create: Option, subs: Vec) { + let trace = FlatTrace { + subtraces: top_level_subtraces(&subs), action: Action::Create(create.expect("self.prepare_trace_create().is_some(): so we must be tracing: qed")), result: Res::FailedCreate, + trace_address: Default::default(), }; + debug!(target: "trace", "Traced failed create {:?}", trace); self.traces.push(trace); + self.traces.extend(update_trace_address(subs)); } - fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address, depth: usize) { - let trace = Trace { - depth: depth, - subs: vec![], + fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address) { + let trace = FlatTrace { + subtraces: 0, action: Action::Suicide(Suicide { address: address, refund_address: refund_address, balance: balance, }), result: Res::None, + trace_address: Default::default(), }; + debug!(target: "trace", "Traced failed suicide {:?}", trace); self.traces.push(trace); } @@ -115,7 +154,7 @@ impl Tracer for ExecutiveTracer { ExecutiveTracer::default() } - fn traces(self) -> Vec { + fn traces(self) -> Vec { self.traces } } diff --git a/ethcore/src/trace/import.rs b/ethcore/src/trace/import.rs index a6b4a29bb..7da3e5fe2 100644 --- a/ethcore/src/trace/import.rs +++ b/ethcore/src/trace/import.rs @@ -17,12 +17,12 @@ //! Traces import request. use util::H256; use header::BlockNumber; -use trace::BlockTraces; +use trace::FlatBlockTraces; /// Traces import request. pub struct ImportRequest { /// Traces to import. - pub traces: BlockTraces, + pub traces: FlatBlockTraces, /// Hash of traces block. pub block_hash: H256, /// Number of traces block. diff --git a/ethcore/src/trace/mod.rs b/ethcore/src/trace/mod.rs index 67fec2b97..277227729 100644 --- a/ethcore/src/trace/mod.rs +++ b/ethcore/src/trace/mod.rs @@ -16,28 +16,26 @@ //! Tracing -mod block; mod bloom; mod config; mod db; mod error; mod executive_tracer; -pub mod flat; mod import; mod noop_tracer; pub use types::trace_types::*; -pub use self::block::BlockTraces; pub use self::config::{Config, Switch}; pub use self::db::TraceDB; pub use self::error::Error; -pub use types::trace_types::trace::{Trace, VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff}; +pub use types::trace_types::trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff}; +pub use types::trace_types::flat::{FlatTrace, FlatTransactionTraces, FlatBlockTraces}; pub use self::noop_tracer::{NoopTracer, NoopVMTracer}; pub use self::executive_tracer::{ExecutiveTracer, ExecutiveVMTracer}; pub use types::trace_types::filter::{Filter, AddressesFilter}; pub use self::import::ImportRequest; pub use self::localized::LocalizedTrace; -use util::{Bytes, Address, U256, H256}; +use util::{Bytes, Address, U256, H256, DBTransaction}; use self::trace::{Call, Create}; use action_params::ActionParams; use header::BlockNumber; @@ -59,9 +57,7 @@ pub trait Tracer: Send { call: Option, gas_used: U256, output: Option, - depth: usize, - subs: Vec, - delegate_call: bool + subs: Vec, ); /// Stores trace create info. @@ -71,24 +67,23 @@ pub trait Tracer: Send { gas_used: U256, code: Option, address: Address, - depth: usize, - subs: Vec + subs: Vec ); /// Stores failed call trace. - fn trace_failed_call(&mut self, call: Option, depth: usize, subs: Vec, delegate_call: bool); + fn trace_failed_call(&mut self, call: Option, subs: Vec); /// Stores failed create trace. - fn trace_failed_create(&mut self, create: Option, depth: usize, subs: Vec); + fn trace_failed_create(&mut self, create: Option, subs: Vec); /// Stores suicide info. - fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address, depth: usize); + fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address); /// Spawn subtracer which will be used to trace deeper levels of execution. fn subtracer(&self) -> Self where Self: Sized; /// Consumes self and returns all traces. - fn traces(self) -> Vec; + fn traces(self) -> Vec; } /// Used by executive to build VM traces. @@ -126,7 +121,7 @@ pub trait Database { fn tracing_enabled(&self) -> bool; /// Imports new block traces. - fn import(&self, request: ImportRequest); + fn import(&self, batch: &DBTransaction, request: ImportRequest); /// Returns localized trace at given position. fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec) -> Option; diff --git a/ethcore/src/trace/noop_tracer.rs b/ethcore/src/trace/noop_tracer.rs index 290fb2367..9ae8e2561 100644 --- a/ethcore/src/trace/noop_tracer.rs +++ b/ethcore/src/trace/noop_tracer.rs @@ -18,8 +18,8 @@ use util::{Bytes, Address, U256}; use action_params::ActionParams; -use trace::{Tracer, VMTracer}; -use trace::trace::{Trace, Call, Create, VMTrace}; +use trace::{Tracer, VMTracer, FlatTrace}; +use trace::trace::{Call, Create, VMTrace}; /// Nonoperative tracer. Does not trace anything. pub struct NoopTracer; @@ -37,32 +37,32 @@ impl Tracer for NoopTracer { None } - fn trace_call(&mut self, call: Option, _: U256, output: Option, _: usize, _: Vec, _: bool) { + fn trace_call(&mut self, call: Option, _: U256, output: Option, _: Vec) { assert!(call.is_none(), "self.prepare_trace_call().is_none(): so we can't be tracing: qed"); assert!(output.is_none(), "self.prepare_trace_output().is_none(): so we can't be tracing: qed"); } - fn trace_create(&mut self, create: Option, _: U256, code: Option, _: Address, _: usize, _: Vec) { + fn trace_create(&mut self, create: Option, _: U256, code: Option, _: Address, _: Vec) { assert!(create.is_none(), "self.prepare_trace_create().is_none(): so we can't be tracing: qed"); assert!(code.is_none(), "self.prepare_trace_output().is_none(): so we can't be tracing: qed"); } - fn trace_failed_call(&mut self, call: Option, _: usize, _: Vec, _: bool) { + fn trace_failed_call(&mut self, call: Option, _: Vec) { assert!(call.is_none(), "self.prepare_trace_call().is_none(): so we can't be tracing: qed"); } - fn trace_failed_create(&mut self, create: Option, _: usize, _: Vec) { + fn trace_failed_create(&mut self, create: Option, _: Vec) { assert!(create.is_none(), "self.prepare_trace_create().is_none(): so we can't be tracing: qed"); } - fn trace_suicide(&mut self, _address: Address, _balance: U256, _refund_address: Address, _depth: usize) { + fn trace_suicide(&mut self, _address: Address, _balance: U256, _refund_address: Address) { } fn subtracer(&self) -> Self { NoopTracer } - fn traces(self) -> Vec { + fn traces(self) -> Vec { vec![] } } diff --git a/ethcore/src/types/executed.rs b/ethcore/src/types/executed.rs index 293a427f7..efc4da9e2 100644 --- a/ethcore/src/types/executed.rs +++ b/ethcore/src/types/executed.rs @@ -18,7 +18,8 @@ use util::numbers::*; use util::Bytes; -use trace::{Trace, VMTrace}; +use util::rlp::*; +use trace::{VMTrace, FlatTrace}; use types::log_entry::LogEntry; use types::state_diff::StateDiff; use ipc::binary::BinaryConvertError; @@ -26,6 +27,43 @@ use std::fmt; use std::mem; use std::collections::VecDeque; +/// The type of the call-like instruction. +#[derive(Debug, PartialEq, Clone, Binary)] +pub enum CallType { + /// Not a CALL. + None, + /// CALL. + Call, + /// CALLCODE. + CallCode, + /// DELEGATECALL. + DelegateCall, +} + +impl Encodable for CallType { + fn rlp_append(&self, s: &mut RlpStream) { + let v = match *self { + CallType::None => 0u32, + CallType::Call => 1, + CallType::CallCode => 2, + CallType::DelegateCall => 3, + }; + s.append(&v); + } +} + +impl Decodable for CallType { + fn decode(decoder: &D) -> Result where D: Decoder { + decoder.as_rlp().as_val().and_then(|v| Ok(match v { + 0u32 => CallType::None, + 1 => CallType::Call, + 2 => CallType::CallCode, + 3 => CallType::DelegateCall, + _ => return Err(DecoderError::Custom("Invalid value of CallType item")), + })) + } +} + /// Transaction execution receipt. #[derive(Debug, PartialEq, Clone, Binary)] pub struct Executed { @@ -59,7 +97,7 @@ pub struct Executed { /// Transaction output. pub output: Bytes, /// The trace of this transaction. - pub trace: Option, + pub trace: Vec, /// The VM trace of this transaction. pub vm_trace: Option, /// The state diff, if we traced it. @@ -133,5 +171,39 @@ impl fmt::Display for ExecutionError { } } +/// Result of executing the transaction. +#[derive(PartialEq, Debug, Binary)] +pub enum ReplayError { + /// Couldn't find the transaction in the chain. + TransactionNotFound, + /// Couldn't find the transaction block's state in the chain. + StatePruned, + /// Error executing. + Execution(ExecutionError), +} + +impl fmt::Display for ReplayError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::ReplayError::*; + + let msg = match *self { + TransactionNotFound => "Transaction couldn't be found in the chain".into(), + StatePruned => "Couldn't find the transaction block's state in the chain".into(), + Execution(ref e) => format!("{}", e), + }; + + f.write_fmt(format_args!("Transaction replay error ({}).", msg)) + } +} + /// Transaction execution result. pub type ExecutionResult = Result; + +#[test] +fn should_encode_and_decode_call_type() { + use util::rlp; + let original = CallType::Call; + let encoded = rlp::encode(&original); + let decoded = rlp::decode(&encoded); + assert_eq!(original, decoded); +} diff --git a/ethcore/src/types/trace_types/filter.rs b/ethcore/src/types/trace_types/filter.rs index 481cdf274..8b9357cac 100644 --- a/ethcore/src/types/trace_types/filter.rs +++ b/ethcore/src/types/trace_types/filter.rs @@ -143,6 +143,7 @@ mod tests { use trace::flat::FlatTrace; use trace::{Filter, AddressesFilter}; use basic_types::LogBloom; + use types::executed::CallType; #[test] fn empty_trace_filter_bloom_possibilities() { @@ -285,9 +286,10 @@ mod tests { value: 3.into(), gas: 4.into(), input: vec![0x5], + call_type: CallType::Call, }), result: Res::FailedCall, - trace_address: vec![0], + trace_address: vec![0].into_iter().collect(), subtraces: 0, }; @@ -311,7 +313,7 @@ mod tests { code: vec![], address: 2.into(), }), - trace_address: vec![0], + trace_address: vec![0].into_iter().collect(), subtraces: 0, }; @@ -330,7 +332,7 @@ mod tests { balance: 3.into(), }), result: Res::None, - trace_address: vec![], + trace_address: vec![].into_iter().collect(), subtraces: 0 }; diff --git a/ethcore/src/trace/flat.rs b/ethcore/src/types/trace_types/flat.rs similarity index 52% rename from ethcore/src/trace/flat.rs rename to ethcore/src/types/trace_types/flat.rs index 1e39e940c..ad1e9bace 100644 --- a/ethcore/src/trace/flat.rs +++ b/ethcore/src/types/trace_types/flat.rs @@ -16,15 +16,17 @@ //! Flat trace module +use std::collections::VecDeque; +use std::mem; +use ipc::binary::BinaryConvertError; use util::rlp::*; -use trace::BlockTraces; use basic_types::LogBloom; -use super::trace::{Trace, Action, Res}; +use super::trace::{Action, Res}; /// Trace localized in vector of traces produced by a single transaction. /// /// Parent and children indexes refer to positions in this vector. -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Binary)] pub struct FlatTrace { /// Type of action performed by a transaction. pub action: Action, @@ -35,7 +37,7 @@ pub struct FlatTrace { /// Exact location of trace. /// /// [index in root, index in first CALL, index in second CALL, ...] - pub trace_address: Vec, + pub trace_address: VecDeque, } impl FlatTrace { @@ -51,18 +53,19 @@ impl Encodable for FlatTrace { s.append(&self.action); s.append(&self.result); s.append(&self.subtraces); - s.append(&self.trace_address); + s.append(&self.trace_address.clone().into_iter().collect::>()); } } impl Decodable for FlatTrace { fn decode(decoder: &D) -> Result where D: Decoder { let d = decoder.as_rlp(); + let v: Vec = try!(d.val_at(3)); let res = FlatTrace { action: try!(d.val_at(0)), result: try!(d.val_at(1)), subtraces: try!(d.val_at(2)), - trace_address: try!(d.val_at(3)), + trace_address: v.into_iter().collect(), }; Ok(res) @@ -73,6 +76,12 @@ impl Decodable for FlatTrace { #[derive(Debug, PartialEq, Clone)] pub struct FlatTransactionTraces(Vec); +impl From> for FlatTransactionTraces { + fn from(v: Vec) -> Self { + FlatTransactionTraces(v) + } +} + impl FlatTransactionTraces { /// Returns bloom of all traces in the collection. pub fn bloom(&self) -> LogBloom { @@ -102,6 +111,12 @@ impl Into> for FlatTransactionTraces { #[derive(Debug, PartialEq, Clone)] pub struct FlatBlockTraces(Vec); +impl From> for FlatBlockTraces { + fn from(v: Vec) -> Self { + FlatBlockTraces(v) + } +} + impl FlatBlockTraces { /// Returns bloom of all traces in the block. pub fn bloom(&self) -> LogBloom { @@ -121,139 +136,17 @@ impl Decodable for FlatBlockTraces { } } -impl From for FlatBlockTraces { - fn from(block_traces: BlockTraces) -> Self { - let traces: Vec = block_traces.into(); - let ordered = traces.into_iter() - .map(|trace| FlatBlockTraces::flatten(vec![], trace)) - .map(FlatTransactionTraces) - .collect(); - FlatBlockTraces(ordered) - } -} - impl Into> for FlatBlockTraces { fn into(self) -> Vec { self.0 } } -impl FlatBlockTraces { - /// Helper function flattening nested tree structure to vector of ordered traces. - fn flatten(address: Vec, trace: Trace) -> Vec { - let subtraces = trace.subs.len(); - let all_subs = trace.subs - .into_iter() - .enumerate() - .flat_map(|(index, subtrace)| { - let mut subtrace_address = address.clone(); - subtrace_address.push(index); - FlatBlockTraces::flatten(subtrace_address, subtrace) - }) - .collect::>(); - - let ordered = FlatTrace { - action: trace.action, - result: trace.result, - subtraces: subtraces, - trace_address: address, - }; - - let mut result = vec![ordered]; - result.extend(all_subs); - result - } -} - #[cfg(test)] mod tests { use super::{FlatBlockTraces, FlatTransactionTraces, FlatTrace}; - use util::{U256, Address}; - use trace::trace::{Action, Res, CallResult, Call, Create, Trace}; - use trace::BlockTraces; - - #[test] - fn test_block_from() { - let trace = Trace { - depth: 2, - action: Action::Call(Call { - from: Address::from(1), - to: Address::from(2), - value: U256::from(3), - gas: U256::from(4), - input: vec![0x5] - }), - subs: vec![ - Trace { - depth: 3, - action: Action::Create(Create { - from: Address::from(6), - value: U256::from(7), - gas: U256::from(8), - init: vec![0x9] - }), - subs: vec![ - Trace { - depth: 3, - action: Action::Create(Create { - from: Address::from(6), - value: U256::from(7), - gas: U256::from(8), - init: vec![0x9] - }), - subs: vec![ - ], - result: Res::FailedCreate - }, - Trace { - depth: 3, - action: Action::Create(Create { - from: Address::from(6), - value: U256::from(7), - gas: U256::from(8), - init: vec![0x9] - }), - subs: vec![ - ], - result: Res::FailedCreate - } - ], - result: Res::FailedCreate - }, - Trace { - depth: 3, - action: Action::Create(Create { - from: Address::from(6), - value: U256::from(7), - gas: U256::from(8), - init: vec![0x9] - }), - subs: vec![], - result: Res::FailedCreate, - } - ], - result: Res::Call(CallResult { - gas_used: U256::from(10), - output: vec![0x11, 0x12] - }) - }; - - let block_traces = FlatBlockTraces::from(BlockTraces::from(vec![trace])); - let transaction_traces: Vec = block_traces.into(); - assert_eq!(transaction_traces.len(), 1); - let ordered_traces: Vec = transaction_traces.into_iter().nth(0).unwrap().into(); - assert_eq!(ordered_traces.len(), 5); - assert_eq!(ordered_traces[0].trace_address, vec![]); - assert_eq!(ordered_traces[0].subtraces, 2); - assert_eq!(ordered_traces[1].trace_address, vec![0]); - assert_eq!(ordered_traces[1].subtraces, 2); - assert_eq!(ordered_traces[2].trace_address, vec![0, 0]); - assert_eq!(ordered_traces[2].subtraces, 0); - assert_eq!(ordered_traces[3].trace_address, vec![0, 1]); - assert_eq!(ordered_traces[3].subtraces, 0); - assert_eq!(ordered_traces[4].trace_address, vec![1]); - assert_eq!(ordered_traces[4].subtraces, 0); - } + use trace::trace::{Action, Res, CallResult, Call}; + use types::executed::CallType; #[test] fn test_trace_serialization() { @@ -265,13 +158,14 @@ mod tests { to: 2.into(), value: 3.into(), gas: 4.into(), - input: vec![0x5] + input: vec![0x5], + call_type: CallType::Call, }), result: Res::Call(CallResult { gas_used: 10.into(), output: vec![0x11, 0x12] }), - trace_address: Vec::new(), + trace_address: Default::default(), subtraces: 0, }; diff --git a/ethcore/src/types/trace_types/mod.rs b/ethcore/src/types/trace_types/mod.rs index db429a8f4..7b5c93790 100644 --- a/ethcore/src/types/trace_types/mod.rs +++ b/ethcore/src/types/trace_types/mod.rs @@ -17,5 +17,6 @@ //! Types used in the public api pub mod filter; +pub mod flat; pub mod trace; pub mod localized; diff --git a/ethcore/src/types/trace_types/trace.rs b/ethcore/src/types/trace_types/trace.rs index 2cff2240c..ddd64af21 100644 --- a/ethcore/src/types/trace_types/trace.rs +++ b/ethcore/src/types/trace_types/trace.rs @@ -21,6 +21,7 @@ use util::rlp::*; use util::sha3::Hashable; use action_params::ActionParams; use basic_types::LogBloom; +use types::executed::CallType; use ipc::binary::BinaryConvertError; use std::mem; use std::collections::VecDeque; @@ -107,6 +108,8 @@ pub struct Call { pub gas: U256, /// The input data provided to the call. pub input: Bytes, + /// The type of the call. + pub call_type: CallType, } impl From for Call { @@ -117,18 +120,20 @@ impl From for Call { value: p.value.value(), gas: p.gas, input: p.data.unwrap_or_else(Vec::new), + call_type: p.call_type, } } } impl Encodable for Call { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(5); + s.begin_list(6); s.append(&self.from); s.append(&self.to); s.append(&self.value); s.append(&self.gas); s.append(&self.input); + s.append(&self.call_type); } } @@ -141,6 +146,7 @@ impl Decodable for Call { value: try!(d.val_at(2)), gas: try!(d.val_at(3)), input: try!(d.val_at(4)), + call_type: try!(d.val_at(5)), }; Ok(res) @@ -378,51 +384,6 @@ impl Res { } } -#[derive(Debug, Clone, PartialEq, Binary)] -/// A trace; includes a description of the action being traced and sub traces of each interior action. -pub struct Trace { - /// The number of EVM execution environments active when this action happened; 0 if it's - /// the outer action of the transaction. - pub depth: usize, - /// The action being performed. - pub action: Action, - /// The sub traces for each interior action performed as part of this call. - pub subs: Vec, - /// The result of the performed action. - pub result: Res, -} - -impl Encodable for Trace { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(4); - s.append(&self.depth); - s.append(&self.action); - s.append(&self.subs); - s.append(&self.result); - } -} - -impl Decodable for Trace { - fn decode(decoder: &D) -> Result where D: Decoder { - let d = decoder.as_rlp(); - let res = Trace { - depth: try!(d.val_at(0)), - action: try!(d.val_at(1)), - subs: try!(d.val_at(2)), - result: try!(d.val_at(3)), - }; - - Ok(res) - } -} - -impl Trace { - /// Returns trace bloom. - pub fn bloom(&self) -> LogBloom { - self.subs.iter().fold(self.action.bloom() | self.result.bloom(), |b, s| b | s.bloom()) - } -} - #[derive(Debug, Clone, PartialEq, Binary)] /// A diff of some chunk of memory. pub struct MemoryDiff { @@ -587,102 +548,3 @@ impl Decodable for VMTrace { } } -#[cfg(test)] -mod tests { - use util::{Address, U256, FixedHash}; - use util::rlp::{encode, decode}; - use util::sha3::Hashable; - use trace::trace::{Call, CallResult, Create, Res, Action, Trace, Suicide, CreateResult}; - - #[test] - fn traces_rlp() { - let trace = Trace { - depth: 2, - action: Action::Call(Call { - from: Address::from(1), - to: Address::from(2), - value: U256::from(3), - gas: U256::from(4), - input: vec![0x5] - }), - subs: vec![ - Trace { - depth: 3, - action: Action::Create(Create { - from: Address::from(6), - value: U256::from(7), - gas: U256::from(8), - init: vec![0x9] - }), - subs: vec![], - result: Res::FailedCreate - } - ], - result: Res::Call(CallResult { - gas_used: U256::from(10), - output: vec![0x11, 0x12] - }) - }; - - let encoded = encode(&trace); - let decoded: Trace = decode(&encoded); - assert_eq!(trace, decoded); - } - - #[test] - fn traces_bloom() { - let trace = Trace { - depth: 2, - action: Action::Call(Call { - from: Address::from(1), - to: Address::from(2), - value: U256::from(3), - gas: U256::from(4), - input: vec![0x5] - }), - subs: vec![ - Trace { - depth: 3, - action: Action::Create(Create { - from: Address::from(6), - value: U256::from(7), - gas: U256::from(8), - init: vec![0x9] - }), - subs: vec![], - result: Res::Create(CreateResult { - gas_used: 10.into(), - code: vec![], - address: 15.into(), - }), - }, - Trace { - depth: 3, - action: Action::Suicide(Suicide { - address: 101.into(), - refund_address: 102.into(), - balance: 0.into(), - }), - subs: vec![], - result: Res::None, - } - ], - result: Res::Call(CallResult { - gas_used: U256::from(10), - output: vec![0x11, 0x12] - }) - }; - - let bloom = trace.bloom(); - - // right now only addresses are bloomed - assert!(bloom.contains_bloomed(&Address::from(1).sha3())); - assert!(bloom.contains_bloomed(&Address::from(2).sha3())); - assert!(!bloom.contains_bloomed(&Address::from(20).sha3())); - assert!(bloom.contains_bloomed(&Address::from(6).sha3())); - assert!(bloom.contains_bloomed(&Address::from(15).sha3())); - assert!(bloom.contains_bloomed(&Address::from(101).sha3())); - assert!(bloom.contains_bloomed(&Address::from(102).sha3())); - assert!(!bloom.contains_bloomed(&Address::from(103).sha3())); - } -} diff --git a/ethcore/src/types/transaction.rs b/ethcore/src/types/transaction.rs index a8e1c66e4..ebb82c528 100644 --- a/ethcore/src/types/transaction.rs +++ b/ethcore/src/types/transaction.rs @@ -91,7 +91,7 @@ impl Transaction { impl From for SignedTransaction { fn from(t: ethjson::state::Transaction) -> Self { - let to: Option<_> = t.to.into(); + let to: Option = t.to.into(); Transaction { nonce: t.nonce.into(), gas_price: t.gas_price.into(), @@ -108,7 +108,7 @@ impl From for SignedTransaction { impl From for SignedTransaction { fn from(t: ethjson::transaction::Transaction) -> Self { - let to: Option<_> = t.to.into(); + let to: Option = t.to.into(); SignedTransaction { unsigned: Transaction { nonce: t.nonce.into(), diff --git a/ethcore/src/verification/canon_verifier.rs b/ethcore/src/verification/canon_verifier.rs index e0ebf1b7c..cc6bc448a 100644 --- a/ethcore/src/verification/canon_verifier.rs +++ b/ethcore/src/verification/canon_verifier.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . use blockchain::BlockProvider; -use engine::Engine; +use engines::Engine; use error::Error; use header::Header; use super::Verifier; diff --git a/ethcore/src/verification/mod.rs b/ethcore/src/verification/mod.rs index 10aee21f4..53c38a6b0 100644 --- a/ethcore/src/verification/mod.rs +++ b/ethcore/src/verification/mod.rs @@ -25,7 +25,7 @@ pub use self::canon_verifier::CanonVerifier; pub use self::noop_verifier::NoopVerifier; /// Verifier type. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub enum VerifierType { /// Verifies block normally. Canon, diff --git a/ethcore/src/verification/noop_verifier.rs b/ethcore/src/verification/noop_verifier.rs index 99d1d594c..fb798be46 100644 --- a/ethcore/src/verification/noop_verifier.rs +++ b/ethcore/src/verification/noop_verifier.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . use blockchain::BlockProvider; -use engine::Engine; +use engines::Engine; use error::Error; use header::Header; use super::Verifier; diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index c6b75416b..ed094a1d2 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -22,7 +22,7 @@ /// 3. Final verification against the blockchain done before enactment. use common::*; -use engine::Engine; +use engines::Engine; use blockchain::*; /// Preprocessed block data gathered in `verify_block_unordered` call @@ -233,7 +233,7 @@ mod tests { use error::BlockError::*; use views::*; use blockchain::*; - use engine::*; + use engines::Engine; use spec::*; use transaction::*; use tests::helpers::*; @@ -287,6 +287,14 @@ mod tests { self.blocks.get(hash).cloned() } + fn block_header_data(&self, hash: &H256) -> Option { + self.block(hash).map(|b| BlockView::new(&b).header_rlp().as_raw().to_vec()) + } + + fn block_body(&self, hash: &H256) -> Option { + self.block(hash).map(|b| BlockChain::block_to_body(&b)) + } + /// Get the familial details concerning a block. fn block_details(&self, hash: &H256) -> Option { self.blocks.get(hash).map(|bytes| { @@ -350,7 +358,7 @@ mod tests { gas: U256::from(30_000), gas_price: U256::from(40_000), nonce: U256::one() - }.sign(&keypair.secret()); + }.sign(keypair.secret()); let tr2 = Transaction { action: Action::Create, @@ -359,7 +367,7 @@ mod tests { gas: U256::from(30_000), gas_price: U256::from(40_000), nonce: U256::from(2) - }.sign(&keypair.secret()); + }.sign(keypair.secret()); let good_transactions = [ tr1.clone(), tr2.clone() ]; diff --git a/ethcore/src/verification/verifier.rs b/ethcore/src/verification/verifier.rs index 5db81a4eb..7f57407f7 100644 --- a/ethcore/src/verification/verifier.rs +++ b/ethcore/src/verification/verifier.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . use blockchain::BlockProvider; -use engine::Engine; +use engines::Engine; use error::Error; use header::Header; diff --git a/ethcore/src/views/block.rs b/ethcore/src/views/block.rs index 42fd52a20..fdcae383b 100644 --- a/ethcore/src/views/block.rs +++ b/ethcore/src/views/block.rs @@ -56,6 +56,11 @@ impl<'a> BlockView<'a> { self.rlp.val_at(0) } + /// Return header rlp. + pub fn header_rlp(&self) -> Rlp { + self.rlp.at(0) + } + /// Create new header view obto block head rlp. pub fn header_view(&self) -> HeaderView<'a> { HeaderView::new_from_rlp(self.rlp.at(0)) diff --git a/ethcore/src/views/body.rs b/ethcore/src/views/body.rs new file mode 100644 index 000000000..8f1295f31 --- /dev/null +++ b/ethcore/src/views/body.rs @@ -0,0 +1,144 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! View onto block body rlp. + +use util::*; +use header::*; +use transaction::*; +use super::{TransactionView, HeaderView}; + +/// View onto block rlp. +pub struct BodyView<'a> { + rlp: Rlp<'a> +} + +impl<'a> BodyView<'a> { + /// Creates new view onto block from raw bytes. + pub fn new(bytes: &'a [u8]) -> BodyView<'a> { + BodyView { + rlp: Rlp::new(bytes) + } + } + + /// Creates new view onto block from rlp. + pub fn new_from_rlp(rlp: Rlp<'a>) -> BodyView<'a> { + BodyView { + rlp: rlp + } + } + + /// Return reference to underlaying rlp. + pub fn rlp(&self) -> &Rlp<'a> { + &self.rlp + } + + /// Return List of transactions in given block. + pub fn transactions(&self) -> Vec { + self.rlp.val_at(0) + } + + /// Return List of transactions with additional localization info. + pub fn localized_transactions(&self, block_hash: &H256, block_number: BlockNumber) -> Vec { + self.transactions() + .into_iter() + .enumerate() + .map(|(i, t)| LocalizedTransaction { + signed: t, + block_hash: block_hash.clone(), + block_number: block_number, + transaction_index: i + }).collect() + } + + /// Return number of transactions in given block, without deserializing them. + pub fn transactions_count(&self) -> usize { + self.rlp.at(0).item_count() + } + + /// Return List of transactions in given block. + pub fn transaction_views(&self) -> Vec { + self.rlp.at(0).iter().map(TransactionView::new_from_rlp).collect() + } + + /// Return transaction hashes. + pub fn transaction_hashes(&self) -> Vec { + self.rlp.at(0).iter().map(|rlp| rlp.as_raw().sha3()).collect() + } + + /// Returns transaction at given index without deserializing unnecessary data. + pub fn transaction_at(&self, index: usize) -> Option { + self.rlp.at(0).iter().nth(index).map(|rlp| rlp.as_val()) + } + + /// Returns localized transaction at given index. + pub fn localized_transaction_at(&self, block_hash: &H256, block_number: BlockNumber, index: usize) -> Option { + self.transaction_at(index).map(|t| LocalizedTransaction { + signed: t, + block_hash: block_hash.clone(), + block_number: block_number, + transaction_index: index + }) + } + + /// Return list of uncles of given block. + pub fn uncles(&self) -> Vec
{ + self.rlp.val_at(1) + } + + /// Return number of uncles in given block, without deserializing them. + pub fn uncles_count(&self) -> usize { + self.rlp.at(1).item_count() + } + + /// Return List of transactions in given block. + pub fn uncle_views(&self) -> Vec { + self.rlp.at(1).iter().map(HeaderView::new_from_rlp).collect() + } + + /// Return list of uncle hashes of given block. + pub fn uncle_hashes(&self) -> Vec { + self.rlp.at(1).iter().map(|rlp| rlp.as_raw().sha3()).collect() + } + + /// Return nth uncle. + pub fn uncle_at(&self, index: usize) -> Option
{ + self.rlp.at(1).iter().nth(index).map(|rlp| rlp.as_val()) + } + + /// Return nth uncle rlp. + pub fn uncle_rlp_at(&self, index: usize) -> Option { + self.rlp.at(1).iter().nth(index).map(|rlp| rlp.as_raw().to_vec()) + } +} + +#[cfg(test)] +mod tests { + use util::*; + use super::BodyView; + use blockchain::BlockChain; + + #[test] + fn test_block_view() { + // that's rlp of block created with ethash engine. + let rlp = "f90261f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23f862f86002018304cb2f94ec0e71ad0a90ffe1909d27dac207f7680abba42d01801ba03a347e72953c860f32b1eb2c78a680d8734b2ea08085d949d729479796f218d5a047ea6239d9e31ccac8af3366f5ca37184d26e7646e3191a3aeb81c4cf74de500c0".from_hex().unwrap(); + let body = BlockChain::block_to_body(&rlp); + let view = BodyView::new(&body); + assert_eq!(view.transactions_count(), 1); + assert_eq!(view.uncles_count(), 0); + } +} + diff --git a/ethcore/src/views/mod.rs b/ethcore/src/views/mod.rs index c0102be3d..e8267e15a 100644 --- a/ethcore/src/views/mod.rs +++ b/ethcore/src/views/mod.rs @@ -19,7 +19,9 @@ mod block; mod header; mod transaction; +mod body; pub use self::block::BlockView; pub use self::header::HeaderView; +pub use self::body::BodyView; pub use self::transaction::TransactionView; diff --git a/ethstore/src/dir/disk.rs b/ethstore/src/dir/disk.rs index b94fe5214..ef898a3ad 100644 --- a/ethstore/src/dir/disk.rs +++ b/ethstore/src/dir/disk.rs @@ -14,16 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::{fs, ffi, io}; +use std::{fs, io}; use std::path::{PathBuf, Path}; use std::collections::HashMap; use time; use ethkey::Address; -use {libc, json, SafeAccount, Error}; +use {json, SafeAccount, Error}; use super::KeyDirectory; #[cfg(not(windows))] fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> { + use std::ffi; + use libc; let cstr = ffi::CString::new(file_path.to_str().unwrap()).unwrap(); match unsafe { libc::chmod(cstr.as_ptr(), libc::S_IWUSR | libc::S_IRUSR) } { 0 => Ok(()), @@ -32,7 +34,7 @@ fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> { } #[cfg(windows)] -fn restrict_permissions_to_owner(file_path: &Path) -> Result<(), i32> { +fn restrict_permissions_to_owner(_file_path: &Path) -> Result<(), i32> { Ok(()) } @@ -71,13 +73,14 @@ impl DiskDirectory { let files = try!(files); - let accounts = files.into_iter() + files.into_iter() .map(json::KeyFile::load) .zip(paths.into_iter()) - .filter_map(|(file, path)| file.ok().map(|file| (path.clone(), SafeAccount::from_file(file, path)))) - .collect(); - - Ok(accounts) + .map(|(file, path)| match file { + Ok(file) => Ok((path, file.into())), + Err(err) => Err(Error::InvalidKeyFile(format!("{:?}: {}", path, err))), + }) + .collect() } } diff --git a/ethstore/src/error.rs b/ethstore/src/error.rs index 8066fe10c..781c5ac7a 100644 --- a/ethstore/src/error.rs +++ b/ethstore/src/error.rs @@ -24,6 +24,7 @@ pub enum Error { InvalidPassword, InvalidSecret, InvalidAccount, + InvalidKeyFile(String), CreationFailed, EthKey(EthKeyError), Custom(String), @@ -32,12 +33,13 @@ pub enum Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { let s = match *self { - Error::Io(ref err) => format!("{}", err), + Error::Io(ref err) => err.to_string(), Error::InvalidPassword => "Invalid password".into(), Error::InvalidSecret => "Invalid secret".into(), Error::InvalidAccount => "Invalid account".into(), + Error::InvalidKeyFile(ref reason) => format!("Invalid key file: {}", reason), Error::CreationFailed => "Account creation failed".into(), - Error::EthKey(ref err) => format!("{}", err), + Error::EthKey(ref err) => err.to_string(), Error::Custom(ref s) => s.clone(), }; diff --git a/evmbin/Cargo.lock b/evmbin/Cargo.lock index 14c6d9bcb..f135b3b0b 100644 --- a/evmbin/Cargo.lock +++ b/evmbin/Cargo.lock @@ -159,6 +159,7 @@ name = "ethash" version = "1.3.0" dependencies = [ "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "sha3 0.1.0", ] @@ -250,8 +251,9 @@ dependencies = [ "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)", "nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)", + "rocksdb 0.4.5", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -301,6 +303,7 @@ dependencies = [ "serde_codegen 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -622,6 +625,17 @@ name = "odds" version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "parking_lot" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "primal" version = "0.2.3" @@ -724,16 +738,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "rocksdb" version = "0.4.5" -source = "git+https://github.com/ethcore/rust-rocksdb#9be41e05923616dfa28741c58b22776d479751e6" dependencies = [ "libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", - "rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)", + "rocksdb-sys 0.3.0", ] [[package]] name = "rocksdb-sys" version = "0.3.0" -source = "git+https://github.com/ethcore/rust-rocksdb#9be41e05923616dfa28741c58b22776d479751e6" dependencies = [ "gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", @@ -831,6 +843,11 @@ name = "slab" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "smallvec" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "solicit" version = "0.4.4" diff --git a/ipc/codegen/src/serialization.rs b/ipc/codegen/src/serialization.rs index 60b54edd4..b67a81326 100644 --- a/ipc/codegen/src/serialization.rs +++ b/ipc/codegen/src/serialization.rs @@ -320,7 +320,7 @@ fn binary_expr_struct( let read_expr = match fields.iter().any(|f| codegen::has_ptr(&f.ty)) { true => { // cannot create structs with pointers - quote_expr!(cx, Err(::ipc::binary::BinaryConvertError)) + quote_expr!(cx, Err(::ipc::binary::BinaryConvertError::not_supported())) }, false => { if value_ident.is_some() { @@ -412,7 +412,7 @@ fn binary_expr_enum( arms.iter().map(|x| x.write.clone()).collect::>(), arms.iter().map(|x| x.read.clone()).collect::>()); - read_arms.push(quote_arm!(cx, _ => { Err(BinaryConvertError) } )); + read_arms.push(quote_arm!(cx, _ => { Err(BinaryConvertError::variant(buffer[0])) } )); Ok(BinaryExpressions { size: quote_expr!(cx, 1usize + match *self { $size_arms }), @@ -530,9 +530,29 @@ fn fields_sequence( tt.push(Token(_sp, token::Comma)); tt.push(Token(_sp, token::Ident(ext_cx.ident_of("length_stack")))); + tt.push(Token(_sp, token::CloseDelim(token::Paren))); + + // name member if it has resulted in the error + tt.push(Token(_sp, token::Dot)); + tt.push(Token(_sp, token::Ident(ext_cx.ident_of("map_err")))); + + tt.push(Token(_sp, token::OpenDelim(token::Paren))); + tt.push(Token(_sp, token::BinOp(token::Or))); + tt.push(Token(_sp, token::Ident(ext_cx.ident_of("e")))); + tt.push(Token(_sp, token::BinOp(token::Or))); + tt.push(Token(_sp, token::Ident(ext_cx.ident_of("e")))); + tt.push(Token(_sp, token::Dot)); + tt.push(Token(_sp, token::Ident(ext_cx.ident_of("named")))); + tt.push(Token(_sp, token::OpenDelim(token::Paren))); + tt.push(Token(_sp, token::Literal(token::Lit::Str_( + field.ident.unwrap_or(ext_cx.ident_of(&format!("f{}", idx))).name), + None)) + ); + tt.push(Token(_sp, token::CloseDelim(token::Paren))); + tt.push(Token(_sp, token::CloseDelim(token::Paren))); tt.push(Token(_sp, token::CloseDelim(token::Paren))); - tt.push(Token(_sp, token::CloseDelim(token::Paren))); + tt.push(Token(_sp, token::Comma)); } if named_members { @@ -573,7 +593,7 @@ fn named_fields_sequence( tt.push(Token(_sp, token::OpenDelim(token::Brace))); for (idx, field) in fields.iter().enumerate() { - tt.push(Token(_sp, token::Ident(field.ident.clone().unwrap()))); + tt.push(Token(_sp, token::Ident(field.ident.clone().expect("function is called for named fields")))); tt.push(Token(_sp, token::Colon)); // special case for u8, it just takes byte form sequence @@ -646,9 +666,26 @@ fn named_fields_sequence( tt.push(Token(_sp, token::Comma)); tt.push(Token(_sp, token::Ident(ext_cx.ident_of("length_stack")))); - - tt.push(Token(_sp, token::CloseDelim(token::Paren))); + + // name member if it has resulted in the error + tt.push(Token(_sp, token::Dot)); + tt.push(Token(_sp, token::Ident(ext_cx.ident_of("map_err")))); + tt.push(Token(_sp, token::OpenDelim(token::Paren))); + tt.push(Token(_sp, token::BinOp(token::Or))); + tt.push(Token(_sp, token::Ident(ext_cx.ident_of("e")))); + tt.push(Token(_sp, token::BinOp(token::Or))); + tt.push(Token(_sp, token::Ident(ext_cx.ident_of("e")))); + tt.push(Token(_sp, token::Dot)); + tt.push(Token(_sp, token::Ident(ext_cx.ident_of("named")))); + tt.push(Token(_sp, token::OpenDelim(token::Paren))); + tt.push(Token(_sp, token::Literal(token::Lit::Str_( + field.ident.unwrap_or(ext_cx.ident_of(&format!("f{}", idx))).name), + None)) + ); + tt.push(Token(_sp, token::CloseDelim(token::Paren))); + tt.push(Token(_sp, token::CloseDelim(token::Paren))); + tt.push(Token(_sp, token::CloseDelim(token::Paren))); tt.push(Token(_sp, token::Comma)); } diff --git a/ipc/rpc/src/binary.rs b/ipc/rpc/src/binary.rs index 00c5ac9e6..81cd8cf7f 100644 --- a/ipc/rpc/src/binary.rs +++ b/ipc/rpc/src/binary.rs @@ -24,7 +24,74 @@ use std::ops::Range; use super::Handshake; #[derive(Debug)] -pub struct BinaryConvertError; +pub enum BinaryConvertErrorKind { + SizeMismatch { + expected: usize, + found: usize, + }, + TargetPayloadEmpty, + UnexpectedVariant(u8), + MissingLengthValue, + InconsistentBoundaries, + NotSupported, +} + +#[derive(Debug)] +pub struct BinaryConvertError { + member_tree: Vec<&'static str>, + kind: BinaryConvertErrorKind, +} + +impl BinaryConvertError { + pub fn size(expected: usize, found: usize) -> BinaryConvertError { + BinaryConvertError { + member_tree: Vec::new(), + kind: BinaryConvertErrorKind::SizeMismatch { + expected: expected, + found: found, + } + } + } + + pub fn empty() -> BinaryConvertError { + BinaryConvertError { member_tree: Vec::new(), kind: BinaryConvertErrorKind::TargetPayloadEmpty } + } + + pub fn variant(val: u8) -> BinaryConvertError { + BinaryConvertError { member_tree: Vec::new(), kind: BinaryConvertErrorKind::UnexpectedVariant(val) } + } + + pub fn length() -> BinaryConvertError { + BinaryConvertError { member_tree: Vec::new(), kind: BinaryConvertErrorKind::MissingLengthValue } + } + + pub fn boundaries() -> BinaryConvertError { + BinaryConvertError { member_tree: Vec::new(), kind: BinaryConvertErrorKind::InconsistentBoundaries } + } + + pub fn not_supported() -> BinaryConvertError { + BinaryConvertError { member_tree: Vec::new(), kind: BinaryConvertErrorKind::NotSupported } + } + + pub fn named(mut self, name: &'static str) -> BinaryConvertError { + self.member_tree.push(name); + self + } +} + +#[derive(Debug)] +pub enum BinaryError { + Serialization(BinaryConvertError), + Io(::std::io::Error), +} + +impl From<::std::io::Error> for BinaryError { + fn from(err: ::std::io::Error) -> Self { BinaryError::Io(err) } +} + +impl From for BinaryError { + fn from(err: BinaryConvertError) -> Self { BinaryError::Serialization(err) } +} pub trait BinaryConvertable : Sized { fn size(&self) -> usize { @@ -36,7 +103,7 @@ pub trait BinaryConvertable : Sized { fn from_bytes(buffer: &[u8], length_stack: &mut VecDeque) -> Result; fn from_empty_bytes() -> Result { - Err(BinaryConvertError) + Err(BinaryConvertError::size(mem::size_of::(), 0)) } fn len_params() -> usize { @@ -50,7 +117,7 @@ impl BinaryConvertable for Option where T: BinaryConvertable { } fn to_bytes(&self, buffer: &mut [u8], length_stack: &mut VecDeque) -> Result<(), BinaryConvertError> { - match *self { None => Err(BinaryConvertError), Some(ref val) => val.to_bytes(buffer, length_stack) } + match *self { None => Err(BinaryConvertError::empty()), Some(ref val) => val.to_bytes(buffer, length_stack) } } fn from_bytes(buffer: &[u8], length_stack: &mut VecDeque) -> Result { @@ -77,7 +144,7 @@ impl BinaryConvertable for Result<(), E> { fn to_bytes(&self, buffer: &mut [u8], length_stack: &mut VecDeque) -> Result<(), BinaryConvertError> { match *self { - Ok(_) => Err(BinaryConvertError), + Ok(_) => Err(BinaryConvertError::empty()), Err(ref e) => Ok(try!(e.to_bytes(buffer, length_stack))), } } @@ -107,7 +174,7 @@ impl BinaryConvertable for Result { fn to_bytes(&self, buffer: &mut [u8], length_stack: &mut VecDeque) -> Result<(), BinaryConvertError> { match *self { Ok(ref r) => Ok(try!(r.to_bytes(buffer, length_stack))), - Err(_) => Err(BinaryConvertError), + Err(_) => Err(BinaryConvertError::empty()), } } @@ -160,7 +227,7 @@ impl BinaryConvertable for Result Ok(Err(try!(E::from_bytes(&buffer[1..], length_stack)))), - _ => Err(BinaryConvertError) + _ => Err(BinaryConvertError::variant(buffer[0])) } } @@ -216,7 +283,7 @@ impl BinaryConvertable for BTreeMap where K : BinaryConvertable + Or loop { let key_size = match K::len_params() { 0 => mem::size_of::(), - _ => try!(length_stack.pop_front().ok_or(BinaryConvertError)), + _ => try!(length_stack.pop_front().ok_or(BinaryConvertError::length())), }; let key = if key_size == 0 { try!(K::from_empty_bytes()) @@ -227,7 +294,7 @@ impl BinaryConvertable for BTreeMap where K : BinaryConvertable + Or let val_size = match V::len_params() { 0 => mem::size_of::(), - _ => try!(length_stack.pop_front().ok_or(BinaryConvertError)), + _ => try!(length_stack.pop_front().ok_or(BinaryConvertError::length())), }; let val = if val_size == 0 { try!(V::from_empty_bytes()) @@ -239,7 +306,7 @@ impl BinaryConvertable for BTreeMap where K : BinaryConvertable + Or if index == buffer.len() { break; } if index > buffer.len() { - return Err(BinaryConvertError) + return Err(BinaryConvertError::boundaries()) } } @@ -255,6 +322,74 @@ impl BinaryConvertable for BTreeMap where K : BinaryConvertable + Or } } +impl BinaryConvertable for VecDeque where T: BinaryConvertable { + fn size(&self) -> usize { + match T::len_params() { + 0 => mem::size_of::() * self.len(), + _ => self.iter().fold(0usize, |acc, t| acc + t.size()), + } + } + + fn to_bytes(&self, buffer: &mut [u8], length_stack: &mut VecDeque) -> Result<(), BinaryConvertError> { + let mut offset = 0usize; + for item in self.iter() { + let next_size = match T::len_params() { + 0 => mem::size_of::(), + _ => { let size = item.size(); length_stack.push_back(size); size }, + }; + if next_size > 0 { + let item_end = offset + next_size; + try!(item.to_bytes(&mut buffer[offset..item_end], length_stack)); + offset = item_end; + } + } + Ok(()) + } + + fn from_bytes(buffer: &[u8], length_stack: &mut VecDeque) -> Result { + let mut index = 0; + let mut result = Self::with_capacity( + match T::len_params() { + 0 => buffer.len() / mem::size_of::(), + _ => 128, + }); + + if buffer.len() == 0 { return Ok(result); } + + loop { + let next_size = match T::len_params() { + 0 => mem::size_of::(), + _ => try!(length_stack.pop_front().ok_or(BinaryConvertError::length())), + }; + let item = if next_size == 0 { + try!(T::from_empty_bytes()) + } + else { + try!(T::from_bytes(&buffer[index..index+next_size], length_stack)) + }; + result.push_back(item); + + index = index + next_size; + if index == buffer.len() { break; } + if index + next_size > buffer.len() { + return Err(BinaryConvertError::boundaries()) + } + } + + Ok(result) + } + + fn from_empty_bytes() -> Result { + Ok(Self::new()) + } + + fn len_params() -> usize { + 1 + } +} + +// + impl BinaryConvertable for Vec where T: BinaryConvertable { fn size(&self) -> usize { match T::len_params() { @@ -292,7 +427,7 @@ impl BinaryConvertable for Vec where T: BinaryConvertable { loop { let next_size = match T::len_params() { 0 => mem::size_of::(), - _ => try!(length_stack.pop_front().ok_or(BinaryConvertError)), + _ => try!(length_stack.pop_front().ok_or(BinaryConvertError::length())), }; let item = if next_size == 0 { try!(T::from_empty_bytes()) @@ -304,10 +439,9 @@ impl BinaryConvertable for Vec where T: BinaryConvertable { index = index + next_size; if index == buffer.len() { break; } - if index > buffer.len() { - return Err(BinaryConvertError) + if index + next_size > buffer.len() { + return Err(BinaryConvertError::boundaries()) } - } Ok(result) @@ -351,7 +485,7 @@ impl BinaryConvertable for Range where T: BinaryConvertable { } fn from_empty_bytes() -> Result { - Err(BinaryConvertError) + Err(BinaryConvertError::empty()) } fn to_bytes(&self, buffer: &mut[u8], length_stack: &mut VecDeque) -> Result<(), BinaryConvertError> { @@ -442,7 +576,7 @@ impl BinaryConvertable for Vec { } } -pub fn deserialize_from(r: &mut R) -> Result +pub fn deserialize_from(r: &mut R) -> Result where R: ::std::io::Read, T: BinaryConvertable { @@ -453,12 +587,15 @@ pub fn deserialize_from(r: &mut R) -> Result let fixed_size = mem::size_of::(); let mut payload_buffer = Vec::with_capacity(fixed_size); unsafe { payload_buffer.set_len(fixed_size); } - try!(r.read(&mut payload_buffer).map_err(|_| BinaryConvertError)); - T::from_bytes(&payload_buffer[..], &mut fake_stack) + let bytes_read = try!(r.read(&mut payload_buffer)); + if bytes_read != mem::size_of::() { + return Err(BinaryError::Serialization(BinaryConvertError::size(fixed_size, bytes_read))) + } + Ok(try!(T::from_bytes(&payload_buffer[..], &mut fake_stack))) }, _ => { let mut payload = Vec::new(); - try!(r.read_to_end(&mut payload).map_err(|_| BinaryConvertError)); + try!(r.read_to_end(&mut payload)); let stack_len = try!(u64::from_bytes(&payload[0..8], &mut fake_stack)) as usize; let mut length_stack = VecDeque::::with_capacity(stack_len); @@ -474,23 +611,23 @@ pub fn deserialize_from(r: &mut R) -> Result let size = try!(u64::from_bytes(&payload[8+stack_len*8..16+stack_len*8], &mut fake_stack)) as usize; match size { 0 => { - T::from_empty_bytes() + Ok(try!(T::from_empty_bytes())) }, _ => { - T::from_bytes(&payload[16+stack_len*8..], &mut length_stack) + Ok(try!(T::from_bytes(&payload[16+stack_len*8..], &mut length_stack))) } } }, } } -pub fn deserialize(buffer: &[u8]) -> Result { +pub fn deserialize(buffer: &[u8]) -> Result { use std::io::Cursor; let mut buff = Cursor::new(buffer); deserialize_from::(&mut buff) } -pub fn serialize_into(t: &T, w: &mut W) -> Result<(), BinaryConvertError> +pub fn serialize_into(t: &T, w: &mut W) -> Result<(), BinaryError> where W: ::std::io::Write, T: BinaryConvertable { @@ -502,7 +639,7 @@ pub fn serialize_into(t: &T, w: &mut W) -> Result<(), BinaryConvertError> let mut buffer = Vec::with_capacity(fixed_size); unsafe { buffer.set_len(fixed_size); } try!(t.to_bytes(&mut buffer[..], &mut fake_stack)); - try!(w.write(&buffer[..]).map_err(|_| BinaryConvertError)); + try!(w.write(&buffer[..])); Ok(()) }, _ => { @@ -511,8 +648,8 @@ pub fn serialize_into(t: &T, w: &mut W) -> Result<(), BinaryConvertError> let size = t.size(); if size == 0 { - try!(w.write(&size_buffer).map_err(|_| BinaryConvertError)); - try!(w.write(&size_buffer).map_err(|_| BinaryConvertError)); + try!(w.write(&size_buffer)); + try!(w.write(&size_buffer)); return Ok(()); } @@ -522,7 +659,7 @@ pub fn serialize_into(t: &T, w: &mut W) -> Result<(), BinaryConvertError> let stack_len = length_stack.len(); try!((stack_len as u64).to_bytes(&mut size_buffer[..], &mut fake_stack)); - try!(w.write(&size_buffer[..]).map_err(|_| BinaryConvertError)); + try!(w.write(&size_buffer[..])); if stack_len > 0 { let mut header_buffer = Vec::with_capacity(stack_len * 8); unsafe { header_buffer.set_len(stack_len * 8); }; @@ -535,20 +672,20 @@ pub fn serialize_into(t: &T, w: &mut W) -> Result<(), BinaryConvertError> } idx = idx + 1; } - try!(w.write(&header_buffer[..]).map_err(|_| BinaryConvertError)); + try!(w.write(&header_buffer[..])); } try!((size as u64).to_bytes(&mut size_buffer[..], &mut fake_stack)); - try!(w.write(&size_buffer[..]).map_err(|_| BinaryConvertError)); + try!(w.write(&size_buffer[..])); - try!(w.write(&buffer[..]).map_err(|_| BinaryConvertError)); + try!(w.write(&buffer[..])); Ok(()) }, } } -pub fn serialize(t: &T) -> Result, BinaryConvertError> { +pub fn serialize(t: &T) -> Result, BinaryError> { use std::io::Cursor; let mut buff = Cursor::new(Vec::new()); try!(serialize_into(t, &mut buff)); @@ -562,9 +699,8 @@ macro_rules! binary_fixed_size { impl BinaryConvertable for $target_ty { fn from_bytes(bytes: &[u8], _length_stack: &mut VecDeque) -> Result { match bytes.len().cmp(&::std::mem::size_of::<$target_ty>()) { - ::std::cmp::Ordering::Less => return Err(BinaryConvertError), - ::std::cmp::Ordering::Greater => return Err(BinaryConvertError), - ::std::cmp::Ordering::Equal => () + ::std::cmp::Ordering::Equal => (), + _ => return Err(BinaryConvertError::size(::std::mem::size_of::<$target_ty>(), bytes.len())), }; let mut res: Self = unsafe { ::std::mem::uninitialized() }; res.copy_raw(bytes); @@ -898,6 +1034,29 @@ fn serialize_btree() { assert_eq!(res[&1u64], 5u64); } +#[test] +fn serialize_refcell() { + use std::cell::RefCell; + + let source = RefCell::new(vec![5u32, 12u32, 19u32]); + let serialized = serialize(&source).unwrap(); + let deserialized = deserialize::>>(&serialized).unwrap(); + + assert_eq!(source, deserialized); +} + +#[test] +fn serialize_cell() { + use std::cell::Cell; + use std::str::FromStr; + + let source = Cell::new(U256::from_str("01231231231239999").unwrap()); + let serialized = serialize(&source).unwrap(); + let deserialized = deserialize::>(&serialized).unwrap(); + + assert_eq!(source, deserialized); +} + #[test] fn serialize_handshake() { use std::io::{Cursor, SeekFrom, Seek}; @@ -915,5 +1074,80 @@ fn serialize_handshake() { let res = deserialize_from::(&mut buff).unwrap().to_semver(); assert_eq!(res, handshake); - +} + +#[test] +fn serialize_invalid_size() { + // value + let deserialized = deserialize::(&[]); + match deserialized { + Err(BinaryError::Serialization( + BinaryConvertError { + kind: BinaryConvertErrorKind::SizeMismatch { expected: 8, found: 0 }, + member_tree: _ + })) => {}, + other => panic!("Not a size mismatched error but: {:?}", other), + } +} + +#[test] +fn serialize_boundaries() { + // value + let deserialized = deserialize::>( + &[ + // payload header + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, + 2u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, + // + 0u8, 0u8, 0u8, 5u8, + 0u8, 0u8, 0u8, 4u8, + 1u8, 1u8, /* not 4 bytes */ + ] + ); + match deserialized { + Err(BinaryError::Serialization( + BinaryConvertError { + kind: BinaryConvertErrorKind::InconsistentBoundaries, + member_tree: _ + })) => {}, + other => panic!("Not an inconsistent boundaries error but: {:?}", other), + } +} + +#[test] +fn serialize_empty_try() { + // value + let mut stack = VecDeque::new(); + let mut data = vec![0u8; 16]; + let sample: Option> = None; + let serialized = sample.to_bytes(&mut data, &mut stack); + match serialized { + Err(BinaryConvertError { + kind: BinaryConvertErrorKind::TargetPayloadEmpty, + member_tree: _ + }) => {}, + other => panic!("Not an error about empty payload to be produced but: {:?}", other), + } +} + +#[test] +fn serialize_not_enough_lengths() { + // value + let deserialized = deserialize::>>( + &[ + // payload header + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, + 2u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, + // does not matter because no length param for the first option + 0u8, + ] + ); + match deserialized { + Err(BinaryError::Serialization( + BinaryConvertError { + kind: BinaryConvertErrorKind::MissingLengthValue, + member_tree: _ + })) => {}, + other => panic!("Not an missing length param error but: {:?}", other), + } } diff --git a/json/src/spec/params.rs b/json/src/spec/params.rs index 8a953bb89..62c63d6b5 100644 --- a/json/src/spec/params.rs +++ b/json/src/spec/params.rs @@ -17,6 +17,7 @@ //! Spec params deserialization. use uint::Uint; +use hash::H256; /// Spec params. #[derive(Debug, PartialEq, Deserialize)] @@ -33,6 +34,12 @@ pub struct Params { /// Minimum gas limit. #[serde(rename="minGasLimit")] pub min_gas_limit: Uint, + /// Option fork block number to check. + #[serde(rename="forkBlock")] + pub fork_block: Option, + /// Expected fork block hash. + #[serde(rename="forkCanonHash")] + pub fork_hash: Option, } #[cfg(test)] diff --git a/json/src/spec/spec.rs b/json/src/spec/spec.rs index da37ba7dd..27c27ce0a 100644 --- a/json/src/spec/spec.rs +++ b/json/src/spec/spec.rs @@ -78,7 +78,9 @@ mod tests { "frontierCompatibilityModeLimit": "0x789b0", "maximumExtraDataSize": "0x20", "minGasLimit": "0x1388", - "networkID" : "0x2" + "networkID" : "0x2", + "forkBlock": "0xffffffffffffffff", + "forkCanonHash": "0x0000000000000000000000000000000000000000000000000000000000000000" }, "genesis": { "seal": { diff --git a/logger/src/lib.rs b/logger/src/lib.rs index 521c3a2d7..2a6c0bb35 100644 --- a/logger/src/lib.rs +++ b/logger/src/lib.rs @@ -36,39 +36,25 @@ use regex::Regex; use util::RotatingLogger; use util::log::Colour; -pub struct Settings { +#[derive(Debug, PartialEq)] +pub struct Config { + pub mode: Option, pub color: bool, - pub init: Option, pub file: Option, } -impl Settings { - pub fn new() -> Settings { - Settings { - color: true, - init: None, +impl Default for Config { + fn default() -> Self { + Config { + mode: None, + color: !cfg!(windows), file: None, } } - - pub fn init(mut self, init: String) -> Settings { - self.init = Some(init); - self - } - - pub fn file(mut self, file: String) -> Settings { - self.file = Some(file); - self - } - - pub fn no_color(mut self) -> Settings { - self.color = false; - self - } } /// Sets up the logger -pub fn setup_log(settings: &Settings) -> Arc { +pub fn setup_log(config: &Config) -> Result, String> { use rlog::*; let mut levels = String::new(); @@ -84,16 +70,21 @@ pub fn setup_log(settings: &Settings) -> Arc { builder.parse(lvl); } - if let Some(ref s) = settings.init { + if let Some(ref s) = config.mode { levels.push_str(s); builder.parse(s); } let isatty = stderr_isatty(); - let enable_color = settings.color && isatty; + let enable_color = config.color && isatty; let logs = Arc::new(RotatingLogger::new(levels)); let logger = logs.clone(); - let maybe_file = settings.file.as_ref().map(|f| File::create(f).unwrap_or_else(|_| panic!("Cannot write to log file given: {}", f))); + + let maybe_file = match config.file.as_ref() { + Some(f) => Some(try!(File::create(f).map_err(|_| format!("Cannot write to log file given: {}", f)))), + None => None, + }; + let format = move |record: &LogRecord| { let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap(); @@ -123,9 +114,11 @@ pub fn setup_log(settings: &Settings) -> Arc { ret }; + builder.format(format); builder.init().unwrap(); - logs + + Ok(logs) } fn kill_color(s: &str) -> String { diff --git a/parity/account.rs b/parity/account.rs new file mode 100644 index 000000000..3c4a5dd74 --- /dev/null +++ b/parity/account.rs @@ -0,0 +1,84 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use ethcore::ethstore::{EthStore, import_accounts}; +use ethcore::ethstore::dir::DiskDirectory; +use ethcore::account_provider::AccountProvider; +use helpers::{password_prompt, password_from_file}; + +#[derive(Debug, PartialEq)] +pub enum AccountCmd { + New(NewAccount), + List(String), + Import(ImportAccounts), +} + +#[derive(Debug, PartialEq)] +pub struct NewAccount { + pub iterations: u32, + pub path: String, + pub password_file: Option, +} + +#[derive(Debug, PartialEq)] +pub struct ImportAccounts { + pub from: Vec, + pub to: String, +} + +pub fn execute(cmd: AccountCmd) -> Result { + match cmd { + AccountCmd::New(new_cmd) => new(new_cmd), + AccountCmd::List(path) => list(path), + AccountCmd::Import(import_cmd) => import(import_cmd), + } +} + +fn new(n: NewAccount) -> Result { + let password: String = match n.password_file { + Some(file) => try!(password_from_file(file)), + None => try!(password_prompt()), + }; + + let dir = Box::new(DiskDirectory::create(n.path).unwrap()); + let secret_store = Box::new(EthStore::open_with_iterations(dir, n.iterations).unwrap()); + let acc_provider = AccountProvider::new(secret_store); + let new_account = acc_provider.new_account(&password).unwrap(); + Ok(format!("{:?}", new_account)) +} + +fn list(path: String) -> Result { + let dir = Box::new(DiskDirectory::create(path).unwrap()); + let secret_store = Box::new(EthStore::open(dir).unwrap()); + let acc_provider = AccountProvider::new(secret_store); + let accounts = acc_provider.accounts(); + let result = accounts.into_iter() + .map(|a| format!("{:?}", a)) + .collect::>() + .join("\n"); + + Ok(result) +} + +fn import(i: ImportAccounts) -> Result { + let to = DiskDirectory::create(i.to).unwrap(); + let mut imported = 0; + for path in &i.from { + let from = DiskDirectory::at(path); + imported += try!(import_accounts(&from, &to).map_err(|_| "Importing accounts failed.")).len(); + } + Ok(format!("{}", imported)) +} diff --git a/parity/blockchain.rs b/parity/blockchain.rs new file mode 100644 index 000000000..90ca809c1 --- /dev/null +++ b/parity/blockchain.rs @@ -0,0 +1,286 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::str::{FromStr, from_utf8}; +use std::{io, fs}; +use std::io::{BufReader, BufRead}; +use std::time::Duration; +use std::thread::sleep; +use std::path::Path; +use std::sync::Arc; +use rustc_serialize::hex::FromHex; +use ethcore_logger::{setup_log, Config as LogConfig}; +use util::panics::{PanicHandler, ForwardPanic}; +use util::{PayloadInfo, ToPretty}; +use ethcore::service::ClientService; +use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType, BlockImportError, BlockChainClient, BlockID}; +use ethcore::error::ImportError; +use ethcore::miner::Miner; +use cache::CacheConfig; +use informant::Informant; +use params::{SpecType, Pruning}; +use helpers::{to_client_config, execute_upgrades}; +use dir::Directories; +use fdlimit; + +#[derive(Debug, PartialEq)] +pub enum DataFormat { + Hex, + Binary, +} + +impl Default for DataFormat { + fn default() -> Self { + DataFormat::Binary + } +} + +impl FromStr for DataFormat { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "binary" | "bin" => Ok(DataFormat::Binary), + "hex" => Ok(DataFormat::Hex), + x => Err(format!("Invalid format: {}", x)) + } + } +} + +#[derive(Debug, PartialEq)] +pub enum BlockchainCmd { + Import(ImportBlockchain), + Export(ExportBlockchain), +} + +#[derive(Debug, PartialEq)] +pub struct ImportBlockchain { + pub spec: SpecType, + pub logger_config: LogConfig, + pub cache_config: CacheConfig, + pub dirs: Directories, + pub file_path: Option, + pub format: Option, + pub pruning: Pruning, + pub compaction: DatabaseCompactionProfile, + pub wal: bool, + pub mode: Mode, + pub tracing: Switch, + pub vm_type: VMType, +} + +#[derive(Debug, PartialEq)] +pub struct ExportBlockchain { + pub spec: SpecType, + pub logger_config: LogConfig, + pub cache_config: CacheConfig, + pub dirs: Directories, + pub file_path: Option, + pub format: Option, + pub pruning: Pruning, + pub compaction: DatabaseCompactionProfile, + pub wal: bool, + pub mode: Mode, + pub tracing: Switch, + pub from_block: BlockID, + pub to_block: BlockID, +} + +pub fn execute(cmd: BlockchainCmd) -> Result { + match cmd { + BlockchainCmd::Import(import_cmd) => execute_import(import_cmd), + BlockchainCmd::Export(export_cmd) => execute_export(export_cmd), + } +} + +fn execute_import(cmd: ImportBlockchain) -> Result { + // Setup panic handler + let panic_handler = PanicHandler::new_in_arc(); + + // load spec file + let spec = try!(cmd.spec.spec()); + + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); + + // Setup logging + let _logger = setup_log(&cmd.logger_config); + + fdlimit::raise_fd_limit(); + + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); + + // prepare client_path + let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); + + // execute upgrades + try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); + + // prepare client config + let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), spec.fork_name.as_ref()); + + // build client + let service = try!(ClientService::start( + client_config, + spec, + Path::new(&client_path), + Arc::new(Miner::with_spec(try!(cmd.spec.spec()))), + ).map_err(|e| format!("Client service error: {:?}", e))); + + panic_handler.forward_from(&service); + let client = service.client(); + + let mut instream: Box = match cmd.file_path { + Some(f) => Box::new(try!(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f)))), + None => Box::new(io::stdin()), + }; + + const READAHEAD_BYTES: usize = 8; + + let mut first_bytes: Vec = vec![0; READAHEAD_BYTES]; + let mut first_read = 0; + + let format = match cmd.format { + Some(format) => format, + None => { + first_read = try!(instream.read(&mut first_bytes).map_err(|_| "Error reading from the file/stream.")); + match first_bytes[0] { + 0xf9 => DataFormat::Binary, + _ => DataFormat::Hex, + } + } + }; + + let informant = Informant::new(client.clone(), None, None, cmd.logger_config.color); + + let do_import = |bytes| { + while client.queue_info().is_full() { sleep(Duration::from_secs(1)); } + match client.import_block(bytes) { + Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { + trace!("Skipping block already in chain."); + } + Err(e) => { + return Err(format!("Cannot import block: {:?}", e)); + }, + Ok(_) => {}, + } + informant.tick(); + Ok(()) + }; + + + match format { + DataFormat::Binary => { + loop { + let mut bytes = if first_read > 0 {first_bytes.clone()} else {vec![0; READAHEAD_BYTES]}; + let n = if first_read > 0 { + first_read + } else { + try!(instream.read(&mut bytes).map_err(|_| "Error reading from the file/stream.")) + }; + if n == 0 { break; } + first_read = 0; + let s = try!(PayloadInfo::from(&bytes).map_err(|e| format!("Invalid RLP in the file/stream: {:?}", e))).total(); + bytes.resize(s, 0); + try!(instream.read_exact(&mut bytes[n..]).map_err(|_| "Error reading from the file/stream.")); + try!(do_import(bytes)); + } + } + DataFormat::Hex => { + for line in BufReader::new(instream).lines() { + let s = try!(line.map_err(|_| "Error reading from the file/stream.")); + let s = if first_read > 0 {from_utf8(&first_bytes).unwrap().to_owned() + &(s[..])} else {s}; + first_read = 0; + let bytes = try!(s.from_hex().map_err(|_| "Invalid hex in file/stream.")); + try!(do_import(bytes)); + } + } + } + client.flush_queue(); + + Ok("Import completed.".into()) +} + +fn execute_export(cmd: ExportBlockchain) -> Result { + // Setup panic handler + let panic_handler = PanicHandler::new_in_arc(); + + let format = cmd.format.unwrap_or_else(Default::default); + + // load spec file + let spec = try!(cmd.spec.spec()); + + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); + + // Setup logging + let _logger = setup_log(&cmd.logger_config); + + fdlimit::raise_fd_limit(); + + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); + + // prepare client_path + let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); + + // execute upgrades + try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); + + // prepare client config + let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.wal, VMType::default(), "".into(), spec.fork_name.as_ref()); + + let service = try!(ClientService::start( + client_config, + spec, + Path::new(&client_path), + Arc::new(Miner::with_spec(try!(cmd.spec.spec()))) + ).map_err(|e| format!("Client service error: {:?}", e))); + + panic_handler.forward_from(&service); + let client = service.client(); + + let mut out: Box = match cmd.file_path { + Some(f) => Box::new(try!(fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f)))), + None => Box::new(io::stdout()), + }; + + let from = try!(client.block_number(cmd.from_block).ok_or("From block could not be found")); + let to = try!(client.block_number(cmd.to_block).ok_or("From block could not be found")); + + for i in from..(to + 1) { + let b = client.block(BlockID::Number(i)).unwrap(); + match format { + DataFormat::Binary => { out.write(&b).expect("Couldn't write to stream."); } + DataFormat::Hex => { out.write_fmt(format_args!("{}", b.pretty())).expect("Couldn't write to stream."); } + } + } + + Ok("Export completed.".into()) +} + +#[cfg(test)] +mod test { + use super::DataFormat; + + #[test] + fn test_data_format_parsing() { + assert_eq!(DataFormat::Binary, "binary".parse().unwrap()); + assert_eq!(DataFormat::Binary, "bin".parse().unwrap()); + assert_eq!(DataFormat::Hex, "hex".parse().unwrap()); + } +} diff --git a/parity/cache.rs b/parity/cache.rs new file mode 100644 index 000000000..45f1cb5f5 --- /dev/null +++ b/parity/cache.rs @@ -0,0 +1,109 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::cmp::max; + +const MIN_BC_CACHE_MB: u32 = 4; +const MIN_DB_CACHE_MB: u32 = 2; +const MIN_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 16; +const DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 50; + +/// Configuration for application cache sizes. +/// All values are represented in MB. +#[derive(Debug, PartialEq)] +pub struct CacheConfig { + /// Size of database cache set using option `set_block_cache_size_mb` + /// 50% is blockchain + /// 25% is tracing + /// 25% is state + db: u32, + /// Size of blockchain cache. + blockchain: u32, + /// Size of transaction queue cache. + queue: u32, +} + +impl Default for CacheConfig { + fn default() -> Self { + CacheConfig::new(64, 8, DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB) + } +} + +impl CacheConfig { + /// Creates new cache config with cumulative size equal `total`. + pub fn new_with_total_cache_size(total: u32) -> Self { + CacheConfig { + db: total * 7 / 8, + blockchain: total / 8, + queue: DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, + } + } + + /// Creates new cache config with gitven details. + pub fn new(db: u32, blockchain: u32, queue: u32) -> Self { + CacheConfig { + db: db, + blockchain: blockchain, + queue: queue, + } + } + + /// Size of db cache for blockchain. + pub fn db_blockchain_cache_size(&self) -> u32 { + max(MIN_DB_CACHE_MB, self.blockchain / 4) + } + + /// Size of db cache for state. + pub fn db_state_cache_size(&self) -> u32 { + max(MIN_DB_CACHE_MB, self.db * 3 / 4) + } + + /// Size of block queue size limit + pub fn queue(&self) -> u32 { + max(self.queue, MIN_BLOCK_QUEUE_SIZE_LIMIT_MB) + } + + /// Size of the blockchain cache. + pub fn blockchain(&self) -> u32 { + max(self.blockchain, MIN_BC_CACHE_MB) + } +} + +#[cfg(test)] +mod tests { + use super::CacheConfig; + + #[test] + fn test_cache_config_constructor() { + let config = CacheConfig::new_with_total_cache_size(200); + assert_eq!(config.db, 175); + assert_eq!(config.blockchain(), 25); + assert_eq!(config.queue(), 50); + } + + #[test] + fn test_cache_config_db_cache_sizes() { + let config = CacheConfig::new_with_total_cache_size(400); + assert_eq!(config.db, 350); + assert_eq!(config.db_blockchain_cache_size(), 12); + assert_eq!(config.db_state_cache_size(), 262); + } + + #[test] + fn test_cache_config_default() { + assert_eq!(CacheConfig::default(), CacheConfig::new(64, 8, super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB)); + } +} diff --git a/parity/cli.rs b/parity/cli.rs index 60aca8310..d3627fda2 100644 --- a/parity/cli.rs +++ b/parity/cli.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use util::version; +use docopt::Docopt; pub const USAGE: &'static str = r#" Parity. Ethereum Client. @@ -22,6 +23,8 @@ Parity. Ethereum Client. Copyright 2015, 2016 Ethcore (UK) Limited Usage: + parity [options] + parity ui [options] parity daemon [options] parity account (new | list ) [options] parity account import ... [options] @@ -29,8 +32,6 @@ Usage: parity import [ ] [options] parity export [ ] [options] parity signer new-token [options] - parity [options] - parity ui [options] Operating Options: --mode MODE Set the operating mode. MODE can be one of: @@ -47,8 +48,8 @@ Operating Options: [default: 3600]. --chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file or olympic, frontier, - homestead, mainnet, morden, homestead-dogmatic, or - testnet [default: homestead]. + homestead, mainnet, morden, classic or testnet + [default: homestead]. -d --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity]. --keys-path PATH Specify the path for JSON key files to be found @@ -78,7 +79,8 @@ Networking Options: --no-network Disable p2p networking. --port PORT Override the port on which the node should listen [default: 30303]. - --peers NUM Try to maintain that many peers [default: 25]. + --min-peers NUM Try to maintain at least NUM peers [default: 25]. + --max-peers NUM Allow up to that many peers [default: 50]. --nat METHOD Specify method to use for determining public address. Must be one of: any, none, upnp, extip: [default: any]. @@ -105,8 +107,8 @@ API and Console Options: --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited list of API name. Possible name are web3, eth, net, personal, - ethcore, ethcore_set, traces. - [default: web3,eth,net,ethcore,personal,traces]. + ethcore, ethcore_set, traces, rpc. + [default: web3,eth,net,ethcore,personal,traces,rpc]. --jsonrpc-hosts HOSTS List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack @@ -201,18 +203,16 @@ Footprint Options: fast - maintain journal overlay. Fast but 50MB used. auto - use the method most recently synced or default to fast if none synced [default: auto]. - --cache-pref-size BYTES Specify the preferred size of the blockchain cache in - bytes [default: 16384]. - --cache-max-size BYTES Specify the maximum size of the blockchain cache in - bytes [default: 262144]. - --queue-max-size BYTES Specify the maximum size of memory to use for block - queue [default: 52428800]. - --cache MEGABYTES Set total amount of discretionary memory to use for + --cache-size-db MB Override database cache size [default: 64]. + --cache-size-blocks MB Specify the prefered size of the blockchain cache in + megabytes [default: 8]. + --cache-size-queue MB Specify the maximum size of memory to use for block + queue [default: 50]. + --cache-size MB Set total amount of discretionary memory to use for the entire system, overrides other cache and queue options. - -Database Options: - --db-cache-size MB Override RocksDB database cache size. + --fast-and-loose Disables DB WAL, which gives a significant speed up + but means an unclean exit is unrecoverable. --db-compaction TYPE Database compaction type. TYPE may be one of: ssd - suitable for SSDs and fast HDDs; hdd - suitable for slow HDDs [default: ssd]. @@ -239,7 +239,7 @@ Legacy Options: Overrides the --keys-path option. --datadir PATH Equivalent to --db-path PATH. --networkid INDEX Equivalent to --network-id INDEX. - --maxpeers COUNT Equivalent to --peers COUNT. + --peers NUM Equivalent to --min-peers NUM. --nodekey KEY Equivalent to --node-key KEY. --nodiscover Equivalent to --no-discovery. -j --jsonrpc Does nothing; JSON-RPC is on by default now. @@ -260,6 +260,7 @@ Legacy Options: --basic-tx-usd. --etherbase ADDRESS Equivalent to --author ADDRESS. --extradata STRING Equivalent to --extra-data STRING. + --cache MB Equivalent to --cache-size MB. Miscellaneous Options: -l --logging LOGGING Specify the logging level. Must conform to the same @@ -271,7 +272,7 @@ Miscellaneous Options: -h --help Show this screen. "#; -#[derive(Debug, RustcDecodable)] +#[derive(Debug, PartialEq, RustcDecodable)] pub struct Args { pub cmd_daemon: bool, pub cmd_account: bool, @@ -294,7 +295,6 @@ pub struct Args { pub flag_identity: String, pub flag_unlock: Option, pub flag_password: Vec, - pub flag_cache: Option, pub flag_keys_path: String, pub flag_keys_iterations: u32, pub flag_no_import_keys: bool, @@ -303,15 +303,21 @@ pub struct Args { pub flag_pruning: String, pub flag_tracing: String, pub flag_port: u16, - pub flag_peers: usize, + pub flag_min_peers: u16, + pub flag_max_peers: u16, pub flag_no_discovery: bool, pub flag_nat: String, pub flag_node_key: Option, pub flag_reserved_peers: Option, pub flag_reserved_only: bool, - pub flag_cache_pref_size: usize, - pub flag_cache_max_size: usize, - pub flag_queue_max_size: usize, + + pub flag_cache_size_db: u32, + pub flag_cache_size_blocks: u32, + pub flag_cache_size_queue: u32, + pub flag_cache_size: Option, + pub flag_cache: Option, + pub flag_fast_and_loose: bool, + pub flag_no_jsonrpc: bool, pub flag_jsonrpc_interface: String, pub flag_jsonrpc_port: u16, @@ -360,7 +366,7 @@ pub struct Args { pub flag_geth: bool, pub flag_nodekey: Option, pub flag_nodiscover: bool, - pub flag_maxpeers: Option, + pub flag_peers: Option, pub flag_datadir: Option, pub flag_extradata: Option, pub flag_etherbase: Option, @@ -380,13 +386,18 @@ pub struct Args { pub flag_dapps_off: bool, pub flag_ipcpath: Option, pub flag_ipcapi: Option, - pub flag_db_cache_size: Option, pub flag_db_compaction: String, pub flag_fat_db: bool, } -pub fn print_version() { - println!("\ +impl Default for Args { + fn default() -> Self { + Docopt::new(USAGE).unwrap().argv(&[] as &[&str]).decode().unwrap() + } +} + +pub fn print_version() -> String { + format!("\ Parity version {} Copyright 2015, 2016 Ethcore (UK) Limited @@ -395,6 +406,6 @@ This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. By Wood/Paronyan/Kotewicz/Drwięga/Volf.\ -", version()); +", version()) } diff --git a/parity/configuration.rs b/parity/configuration.rs index ce9b7d679..81b3cb6c6 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -14,57 +14,233 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::env; -use std::fs::File; use std::time::Duration; -use std::io::{BufRead, BufReader}; -use std::net::{SocketAddr, IpAddr}; +use std::io::Read; +use std::net::SocketAddr; use std::path::PathBuf; +use std::cmp::max; use cli::{USAGE, Args}; -use docopt::Docopt; - -use die::*; -use util::*; -use util::log::Colour::*; -use ethcore::account_provider::AccountProvider; +use docopt::{Docopt, Error as DocoptError}; +use util::{Hashable, NetworkConfiguration, U256, Uint, is_valid_node_url, Bytes, version_data, Secret, Address}; use util::network_settings::NetworkSettings; -use ethcore::client::{append_path, get_db_path, Mode, ClientConfig, DatabaseCompactionProfile, Switch, VMType}; -use ethcore::miner::{MinerOptions, PendingSet, GasPricer, GasPriceCalibratorOptions}; -use ethcore::ethereum; -use ethcore::spec::Spec; -use ethsync::SyncConfig; -use rpc::IpcConfiguration; -use ethcore_logger::Settings as LogSettings; +use util::log::Colour; +use ethcore::client::{VMType, Mode}; +use ethcore::miner::MinerOptions; -pub struct Configuration { - pub args: Args +use rpc::{IpcConfiguration, HttpConfiguration}; +use cache::CacheConfig; +use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home, +geth_ipc_path, parity_ipc_path, to_bootnodes, to_addresses, to_address}; +use params::{ResealPolicy, AccountsConfig, GasPricerConfig, MinerExtras}; +use ethcore_logger::Config as LogConfig; +use dir::Directories; +use dapps::Configuration as DappsConfiguration; +use signer::Configuration as SignerConfiguration; +use run::RunCmd; +use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain}; +use presale::ImportWallet; +use account::{AccountCmd, NewAccount, ImportAccounts}; + +#[derive(Debug, PartialEq)] +pub enum Cmd { + Run(RunCmd), + Version, + Account(AccountCmd), + ImportPresaleWallet(ImportWallet), + Blockchain(BlockchainCmd), + SignerToken(String), } -pub struct Directories { - pub keys: String, - pub db: String, - pub dapps: String, - pub signer: String, +#[derive(Debug, PartialEq)] +pub struct Configuration { + pub args: Args, } impl Configuration { - pub fn parse() -> Self { - Configuration { - args: Docopt::new(USAGE).and_then(|d| d.decode()).unwrap_or_else(|e| e.exit()), + pub fn parse(command: I) -> Result where I: IntoIterator, S: AsRef { + let args = try!(Docopt::new(USAGE).and_then(|d| d.argv(command).decode())); + + let config = Configuration { + args: args, + }; + + Ok(config) + } + + pub fn into_command(self) -> Result { + let dirs = self.directories(); + let pruning = try!(self.args.flag_pruning.parse()); + let vm_type = try!(self.vm_type()); + let mode = try!(to_mode(&self.args.flag_mode, self.args.flag_mode_timeout, self.args.flag_mode_alarm)); + let miner_options = try!(self.miner_options()); + let logger_config = self.logger_config(); + let http_conf = try!(self.http_config()); + let ipc_conf = try!(self.ipc_config()); + let net_conf = try!(self.net_config()); + let network_id = try!(self.network_id()); + let cache_config = self.cache_config(); + let spec = try!(self.chain().parse()); + let tracing = try!(self.args.flag_tracing.parse()); + let compaction = try!(self.args.flag_db_compaction.parse()); + let wal = !self.args.flag_fast_and_loose; + let enable_network = self.enable_network(&mode); + let geth_compatibility = self.args.flag_geth; + let signer_port = self.signer_port(); + let dapps_conf = self.dapps_config(); + let signer_conf = self.signer_config(); + + let cmd = if self.args.flag_version { + Cmd::Version + } else if self.args.cmd_signer { + Cmd::SignerToken(dirs.signer) + } else if self.args.cmd_account { + let account_cmd = if self.args.cmd_new { + let new_acc = NewAccount { + iterations: self.args.flag_keys_iterations, + path: dirs.keys, + password_file: self.args.flag_password.first().cloned(), + }; + AccountCmd::New(new_acc) + } else if self.args.cmd_list { + AccountCmd::List(dirs.keys) + } else if self.args.cmd_import { + let import_acc = ImportAccounts { + from: self.args.arg_path.clone(), + to: dirs.keys, + }; + AccountCmd::Import(import_acc) + } else { + unreachable!(); + }; + Cmd::Account(account_cmd) + } else if self.args.cmd_wallet { + let presale_cmd = ImportWallet { + iterations: self.args.flag_keys_iterations, + path: dirs.keys, + wallet_path: self.args.arg_path.first().unwrap().clone(), + password_file: self.args.flag_password.first().cloned(), + }; + Cmd::ImportPresaleWallet(presale_cmd) + } else if self.args.cmd_import { + let import_cmd = ImportBlockchain { + spec: spec, + logger_config: logger_config, + cache_config: cache_config, + dirs: dirs, + file_path: self.args.arg_file.clone(), + format: None, + pruning: pruning, + compaction: compaction, + wal: wal, + mode: mode, + tracing: tracing, + vm_type: vm_type, + }; + Cmd::Blockchain(BlockchainCmd::Import(import_cmd)) + } else if self.args.cmd_export { + let export_cmd = ExportBlockchain { + spec: spec, + logger_config: logger_config, + cache_config: cache_config, + dirs: dirs, + file_path: self.args.arg_file.clone(), + format: None, + pruning: pruning, + compaction: compaction, + wal: wal, + mode: mode, + tracing: tracing, + from_block: try!(to_block_id(&self.args.flag_from)), + to_block: try!(to_block_id(&self.args.flag_to)), + }; + Cmd::Blockchain(BlockchainCmd::Export(export_cmd)) + } else { + let daemon = if self.args.cmd_daemon { + Some(self.args.arg_pid_file.clone()) + } else { + None + }; + + let run_cmd = RunCmd { + cache_config: cache_config, + dirs: dirs, + spec: spec, + pruning: pruning, + daemon: daemon, + logger_config: logger_config, + miner_options: miner_options, + http_conf: http_conf, + ipc_conf: ipc_conf, + net_conf: net_conf, + network_id: network_id, + acc_conf: try!(self.accounts_config()), + gas_pricer: try!(self.gas_pricer_config()), + miner_extras: try!(self.miner_extras()), + mode: mode, + tracing: tracing, + compaction: compaction, + wal: wal, + vm_type: vm_type, + enable_network: enable_network, + geth_compatibility: geth_compatibility, + signer_port: signer_port, + net_settings: self.network_settings(), + dapps_conf: dapps_conf, + signer_conf: signer_conf, + ui: self.args.cmd_ui, + name: self.args.flag_identity, + custom_bootnodes: self.args.flag_bootnodes.is_some(), + }; + Cmd::Run(run_cmd) + }; + + Ok(cmd) + } + + fn enable_network(&self, mode: &Mode) -> bool { + match *mode { + Mode::Dark(_) => false, + _ => !self.args.flag_no_network, } } - pub fn mode(&self) -> Mode { - match &(self.args.flag_mode[..]) { - "active" => Mode::Active, - "passive" => Mode::Passive(Duration::from_secs(self.args.flag_mode_timeout), Duration::from_secs(self.args.flag_mode_alarm)), - "dark" => Mode::Dark(Duration::from_secs(self.args.flag_mode_timeout)), - _ => die!("{}: Invalid address for --mode. Must be one of active, passive or dark.", self.args.flag_mode), + fn vm_type(&self) -> Result { + if self.args.flag_jitvm { + VMType::jit().ok_or("Parity is built without the JIT EVM.".into()) + } else { + Ok(VMType::Interpreter) } } - fn net_port(&self) -> u16 { - self.args.flag_port + fn miner_extras(&self) -> Result { + let extras = MinerExtras { + author: try!(self.author()), + extra_data: try!(self.extra_data()), + gas_floor_target: try!(to_u256(&self.args.flag_gas_floor_target)), + gas_ceil_target: try!(to_u256(&self.args.flag_gas_cap)), + transactions_limit: self.args.flag_tx_queue_size, + }; + + Ok(extras) + } + + fn author(&self) -> Result { + to_address(self.args.flag_etherbase.clone().or(self.args.flag_author.clone())) + } + + fn cache_config(&self) -> CacheConfig { + match self.args.flag_cache_size.or(self.args.flag_cache) { + Some(size) => CacheConfig::new_with_total_cache_size(size), + None => CacheConfig::new(self.args.flag_cache_size_db, self.args.flag_cache_size_blocks, self.args.flag_cache_size_queue), + } + } + + fn logger_config(&self) -> LogConfig { + LogConfig { + mode: self.args.flag_logging.clone(), + color: !self.args.flag_no_color && !cfg!(windows), + file: self.args.flag_log_file.clone(), + } } fn chain(&self) -> String { @@ -76,361 +252,177 @@ impl Configuration { } fn max_peers(&self) -> u32 { - self.args.flag_maxpeers.unwrap_or(self.args.flag_peers) as u32 + let peers = self.args.flag_max_peers as u32; + max(self.min_peers(), peers) } - fn decode_u256(d: &str, argument: &str) -> U256 { - U256::from_dec_str(d).unwrap_or_else(|_| - U256::from_str(clean_0x(d)).unwrap_or_else(|_| - die!("{}: Invalid numeric value for {}. Must be either a decimal or a hex number.", d, argument) - ) - ) + fn min_peers(&self) -> u32 { + self.args.flag_peers.unwrap_or(self.args.flag_min_peers) as u32 } fn work_notify(&self) -> Vec { self.args.flag_notify_work.as_ref().map_or_else(Vec::new, |s| s.split(',').map(|s| s.to_owned()).collect()) } - pub fn miner_options(&self) -> MinerOptions { - let (own, ext) = match self.args.flag_reseal_on_txs.as_str() { - "none" => (false, false), - "own" => (true, false), - "ext" => (false, true), - "all" => (true, true), - x => die!("{}: Invalid value for --reseal option. Use --help for more information.", x) + fn accounts_config(&self) -> Result { + let cfg = AccountsConfig { + iterations: self.args.flag_keys_iterations, + import_keys: !self.args.flag_no_import_keys, + testnet: self.args.flag_testnet, + password_files: self.args.flag_password.clone(), + unlocked_accounts: try!(to_addresses(&self.args.flag_unlock)), }; - MinerOptions { + + Ok(cfg) + } + + fn miner_options(&self) -> Result { + let reseal = try!(self.args.flag_reseal_on_txs.parse::()); + + let options = MinerOptions { new_work_notify: self.work_notify(), force_sealing: self.args.flag_force_sealing, - reseal_on_external_tx: ext, - reseal_on_own_tx: own, - tx_gas_limit: self.args.flag_tx_gas_limit.as_ref().map_or(!U256::zero(), |d| Self::decode_u256(d, "--tx-gas-limit")), - tx_queue_size: self.args.flag_tx_queue_size, - pending_set: match self.args.flag_relay_set.as_str() { - "cheap" => PendingSet::AlwaysQueue, - "strict" => PendingSet::AlwaysSealing, - "lenient" => PendingSet::SealingOrElseQueue, - x => die!("{}: Invalid value for --relay-set option. Use --help for more information.", x) + reseal_on_external_tx: reseal.external, + reseal_on_own_tx: reseal.own, + tx_gas_limit: match self.args.flag_tx_gas_limit { + Some(ref d) => try!(to_u256(d)), + None => U256::max_value(), }, + tx_queue_size: self.args.flag_tx_queue_size, + pending_set: try!(to_pending_set(&self.args.flag_relay_set)), reseal_min_period: Duration::from_millis(self.args.flag_reseal_min_period), work_queue_size: self.args.flag_work_queue_size, enable_resubmission: !self.args.flag_remove_solved, - } - } - - pub fn author(&self) -> Option
{ - self.args.flag_etherbase.as_ref() - .or(self.args.flag_author.as_ref()) - .map(|d| Address::from_str(clean_0x(d)).unwrap_or_else(|_| { - die!("{}: Invalid address for --author. Must be 40 hex characters, with or without the 0x at the beginning.", d) - })) - } - - pub fn gas_floor_target(&self) -> U256 { - let d = &self.args.flag_gas_floor_target; - U256::from_dec_str(d).unwrap_or_else(|_| { - die!("{}: Invalid target gas floor given. Must be a decimal unsigned 256-bit number.", d) - }) - } - - pub fn gas_ceil_target(&self) -> U256 { - let d = &self.args.flag_gas_cap; - U256::from_dec_str(d).unwrap_or_else(|_| { - die!("{}: Invalid target gas ceiling given. Must be a decimal unsigned 256-bit number.", d) - }) - } - - fn to_duration(s: &str) -> Duration { - let bad = |_| { - die!("{}: Invalid duration given. See parity --help for more information.", s) }; - Duration::from_secs(match s { - "twice-daily" => 12 * 60 * 60, - "half-hourly" => 30 * 60, - "1second" | "1 second" | "second" => 1, - "1minute" | "1 minute" | "minute" => 60, - "hourly" | "1hour" | "1 hour" | "hour" => 60 * 60, - "daily" | "1day" | "1 day" | "day" => 24 * 60 * 60, - x if x.ends_with("seconds") => FromStr::from_str(&x[0..x.len() - 7]).unwrap_or_else(bad), - x if x.ends_with("minutes") => FromStr::from_str(&x[0..x.len() - 7]).unwrap_or_else(bad) * 60, - x if x.ends_with("hours") => FromStr::from_str(&x[0..x.len() - 5]).unwrap_or_else(bad) * 60 * 60, - x if x.ends_with("days") => FromStr::from_str(&x[0..x.len() - 4]).unwrap_or_else(bad) * 24 * 60 * 60, - x => FromStr::from_str(x).unwrap_or_else(bad), - }) + + Ok(options) } - pub fn gas_pricer(&self) -> GasPricer { - match self.args.flag_gasprice.as_ref() { - Some(d) => { - GasPricer::Fixed(U256::from_dec_str(d).unwrap_or_else(|_| { - die!("{}: Invalid gas price given. Must be a decimal unsigned 256-bit number.", d) - })) - } - _ => { - let usd_per_tx: f32 = FromStr::from_str(&self.args.flag_usd_per_tx).unwrap_or_else(|_| { - die!("{}: Invalid basic transaction price given in USD. Must be a decimal number.", self.args.flag_usd_per_tx) - }); - match self.args.flag_usd_per_eth.as_str() { - "auto" => { - GasPricer::new_calibrated(GasPriceCalibratorOptions { - usd_per_tx: usd_per_tx, - recalibration_period: Self::to_duration(self.args.flag_price_update_period.as_str()), - }) - }, - x => { - let usd_per_eth: f32 = FromStr::from_str(x).unwrap_or_else(|_| die!("{}: Invalid ether price given in USD. Must be a decimal number.", x)); - let wei_per_usd: f32 = 1.0e18 / usd_per_eth; - let gas_per_tx: f32 = 21000.0; - let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx; - info!("Using a fixed conversion rate of Ξ1 = {} ({} wei/gas)", White.bold().paint(format!("US${}", usd_per_eth)), Yellow.bold().paint(format!("{}", wei_per_gas))); - GasPricer::Fixed(U256::from_dec_str(&format!("{:.0}", wei_per_gas)).unwrap()) - } - } - } + fn signer_config(&self) -> SignerConfiguration { + SignerConfiguration { + enabled: self.signer_enabled(), + port: self.args.flag_signer_port, + signer_path: self.directories().signer, } } - pub fn extra_data(&self) -> Bytes { + fn dapps_config(&self) -> DappsConfiguration { + DappsConfiguration { + enabled: self.dapps_enabled(), + interface: self.dapps_interface(), + port: self.args.flag_dapps_port, + user: self.args.flag_dapps_user.clone(), + pass: self.args.flag_dapps_pass.clone(), + dapps_path: self.directories().dapps, + } + } + + fn gas_pricer_config(&self) -> Result { + if let Some(d) = self.args.flag_gasprice.as_ref() { + return Ok(GasPricerConfig::Fixed(try!(to_u256(d)))); + } + + let usd_per_tx = try!(to_price(&self.args.flag_usd_per_tx)); + if "auto" == self.args.flag_usd_per_eth.as_str() { + return Ok(GasPricerConfig::Calibrated { + usd_per_tx: usd_per_tx, + recalibration_period: try!(to_duration(self.args.flag_price_update_period.as_str())), + }); + } + + let usd_per_eth = try!(to_price(&self.args.flag_usd_per_eth)); + let wei_per_usd: f32 = 1.0e18 / usd_per_eth; + let gas_per_tx: f32 = 21000.0; + let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx; + + info!( + "Using a fixed conversion rate of Ξ1 = {} ({} wei/gas)", + Colour::White.bold().paint(format!("US${}", usd_per_eth)), + Colour::Yellow.bold().paint(format!("{}", wei_per_gas)) + ); + + Ok(GasPricerConfig::Fixed(U256::from_dec_str(&format!("{:.0}", wei_per_gas)).unwrap())) + } + + fn extra_data(&self) -> Result { match self.args.flag_extradata.as_ref().or(self.args.flag_extra_data.as_ref()) { - Some(ref x) if x.len() <= 32 => x.as_bytes().to_owned(), - None => version_data(), - Some(ref x) => { die!("{}: Extra data must be at most 32 characters.", x); } + Some(ref x) if x.len() <= 32 => Ok(x.as_bytes().to_owned()), + None => Ok(version_data()), + Some(_) => Err("Extra data must be at most 32 characters".into()), } } - pub fn spec(&self) -> Spec { - match self.chain().as_str() { - "frontier" | "homestead" | "mainnet" => ethereum::new_frontier(), - "frontier-dogmatic" | "homestead-dogmatic" | "classic" => ethereum::new_classic(), - "morden" | "testnet" => ethereum::new_morden(), - "olympic" => ethereum::new_olympic(), - f => Spec::load(contents(f).unwrap_or_else(|_| { - die!("{}: Couldn't read chain specification file. Sure it exists?", f) - }).as_ref()), - } - } - - pub fn normalize_enode(e: &str) -> Option { - if is_valid_node_url(e) { - Some(e.to_owned()) - } else { - None - } - } - - pub fn init_nodes(&self, spec: &Spec) -> Vec { - match self.args.flag_bootnodes { - Some(ref x) if !x.is_empty() => x.split(',').map(|s| { - Self::normalize_enode(s).unwrap_or_else(|| { - die!("{}: Invalid node address format given for a boot node.", s) - }) - }).collect(), - Some(_) => Vec::new(), - None => spec.nodes().to_owned(), - } - } - - pub fn init_reserved_nodes(&self) -> Vec { + fn init_reserved_nodes(&self) -> Result, String> { use std::fs::File; - if let Some(ref path) = self.args.flag_reserved_peers { - let mut buffer = String::new(); - let mut node_file = File::open(path).unwrap_or_else(|e| { - die!("Error opening reserved nodes file: {}", e); - }); - node_file.read_to_string(&mut buffer).expect("Error reading reserved node file"); - buffer.lines().map(|s| { - Self::normalize_enode(s).unwrap_or_else(|| { - die!("{}: Invalid node address format given for a reserved node.", s); - }) - }).collect() - } else { - Vec::new() + match self.args.flag_reserved_peers { + Some(ref path) => { + let mut buffer = String::new(); + let mut node_file = try!(File::open(path).map_err(|e| format!("Error opening reserved nodes file: {}", e))); + try!(node_file.read_to_string(&mut buffer).map_err(|_| "Error reading reserved node file")); + let lines = buffer.lines().map(|s| s.trim().to_owned()).filter(|s| s.len() > 0).collect::>(); + if let Some(invalid) = lines.iter().find(|s| !is_valid_node_url(s)) { + return Err(format!("Invalid node address format given for a boot node: {}", invalid)); + } + Ok(lines) + }, + None => Ok(Vec::new()) } } - pub fn net_addresses(&self) -> (Option, Option) { - let port = self.net_port(); - let listen_address = Some(SocketAddr::new(IpAddr::from_str("0.0.0.0").unwrap(), port)); + fn net_addresses(&self) -> Result<(Option, Option), String> { + let port = self.args.flag_port; + let listen_address = Some(SocketAddr::new("0.0.0.0".parse().unwrap(), port)); let public_address = if self.args.flag_nat.starts_with("extip:") { let host = &self.args.flag_nat[6..]; - let host = IpAddr::from_str(host).unwrap_or_else(|_| die!("Invalid host given with `--nat extip:{}`", host)); + let host = try!(host.parse().map_err(|_| format!("Invalid host given with `--nat extip:{}`", host))); Some(SocketAddr::new(host, port)) } else { None }; - (listen_address, public_address) + Ok((listen_address, public_address)) } - pub fn net_settings(&self, spec: &Spec) -> NetworkConfiguration { + fn net_config(&self) -> Result { let mut ret = NetworkConfiguration::new(); ret.nat_enabled = self.args.flag_nat == "any" || self.args.flag_nat == "upnp"; - ret.boot_nodes = self.init_nodes(spec); - let (listen, public) = self.net_addresses(); + ret.boot_nodes = try!(to_bootnodes(&self.args.flag_bootnodes)); + let (listen, public) = try!(self.net_addresses()); ret.listen_address = listen; ret.public_address = public; - ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(s).unwrap_or_else(|_| s.sha3())); + ret.use_secret = self.args.flag_node_key.as_ref().map(|s| s.parse::().unwrap_or_else(|_| s.sha3())); ret.discovery_enabled = !self.args.flag_no_discovery && !self.args.flag_nodiscover; - ret.ideal_peers = self.max_peers(); - let mut net_path = PathBuf::from(&self.path()); + ret.max_peers = self.max_peers(); + ret.min_peers = self.min_peers(); + let mut net_path = PathBuf::from(self.directories().db); net_path.push("network"); ret.config_path = Some(net_path.to_str().unwrap().to_owned()); - ret.reserved_nodes = self.init_reserved_nodes(); + ret.reserved_nodes = try!(self.init_reserved_nodes()); if self.args.flag_reserved_only { ret.non_reserved_mode = ::util::network::NonReservedPeerMode::Deny; } - ret + Ok(ret) } - fn find_best_db(&self, spec: &Spec) -> Option { - let mut ret = None; - let mut latest_era = None; - let jdb_types = [journaldb::Algorithm::Archive, journaldb::Algorithm::EarlyMerge, journaldb::Algorithm::OverlayRecent, journaldb::Algorithm::RefCounted]; - for i in jdb_types.into_iter() { - let db = journaldb::new(&append_path(&get_db_path(Path::new(&self.path()), *i, spec.genesis_header().hash(), spec.fork_name.as_ref()), "state"), *i, kvdb::DatabaseConfig::default()); - trace!(target: "parity", "Looking for best DB: {} at {:?}", i, db.latest_era()); - match (latest_era, db.latest_era()) { - (Some(best), Some(this)) if best >= this => {} - (_, None) => {} - (_, Some(this)) => { - latest_era = Some(this); - ret = Some(*i); - } - } - } - ret - } - - pub fn pruning_algorithm(&self, spec: &Spec) -> journaldb::Algorithm { - match self.args.flag_pruning.as_str() { - "archive" => journaldb::Algorithm::Archive, - "light" => journaldb::Algorithm::EarlyMerge, - "fast" => journaldb::Algorithm::OverlayRecent, - "basic" => journaldb::Algorithm::RefCounted, - "auto" => self.find_best_db(spec).unwrap_or(journaldb::Algorithm::OverlayRecent), - _ => { die!("Invalid pruning method given."); } + fn network_id(&self) -> Result, String> { + let net_id = self.args.flag_network_id.as_ref().or(self.args.flag_networkid.as_ref()); + match net_id { + Some(id) => Ok(Some(try!(to_u256(id)))), + None => Ok(None), } } - pub fn client_config(&self, spec: &Spec) -> ClientConfig { - let mut client_config = ClientConfig::default(); - - client_config.mode = self.mode(); - - match self.args.flag_cache { - Some(mb) => { - client_config.blockchain.max_cache_size = mb * 1024 * 1024; - client_config.blockchain.pref_cache_size = client_config.blockchain.max_cache_size * 3 / 4; - } - None => { - client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; - client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; - } - } - // forced blockchain (blocks + extras) db cache size if provided - client_config.blockchain.db_cache_size = self.args.flag_db_cache_size.and_then(|cs| Some(cs / 2)); - - client_config.tracing.enabled = match self.args.flag_tracing.as_str() { - "auto" => Switch::Auto, - "on" => Switch::On, - "off" => Switch::Off, - _ => { die!("Invalid tracing method given!") } - }; - // forced trace db cache size if provided - client_config.tracing.db_cache_size = self.args.flag_db_cache_size.and_then(|cs| Some(cs / 4)); - - client_config.pruning = self.pruning_algorithm(spec); - - if self.args.flag_fat_db { - if let journaldb::Algorithm::Archive = client_config.pruning { - client_config.trie_spec = TrieSpec::Fat; - } else { - die!("Fatdb is not supported. Please re-run with --pruning=archive") - } - } - - // forced state db cache size if provided - client_config.db_cache_size = self.args.flag_db_cache_size.and_then(|cs| Some(cs / 4)); - - // compaction profile - client_config.db_compaction = match self.args.flag_db_compaction.as_str() { - "ssd" => DatabaseCompactionProfile::Default, - "hdd" => DatabaseCompactionProfile::HDD, - _ => { die!("Invalid compaction profile given (--db-compaction argument), expected hdd/ssd (default)."); } - }; - - if self.args.flag_jitvm { - client_config.vm_type = VMType::jit().unwrap_or_else(|| die!("Parity is built without the JIT EVM.")) - } - - trace!(target: "parity", "Using pruning strategy of {}", client_config.pruning); - client_config.name = self.args.flag_identity.clone(); - client_config.queue.max_mem_use = self.args.flag_queue_max_size; - client_config - } - - pub fn sync_config(&self, spec: &Spec) -> SyncConfig { - let mut sync_config = SyncConfig::default(); - sync_config.network_id = self.args.flag_network_id.as_ref().or(self.args.flag_networkid.as_ref()).map_or(spec.network_id(), |id| { - U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --network-id/--networkid", id)) - }); - sync_config - } - - pub fn account_service(&self) -> AccountProvider { - use ethcore::ethstore::{import_accounts, EthStore}; - use ethcore::ethstore::dir::{GethDirectory, DirectoryType, DiskDirectory}; - - // Secret Store - let passwords = self.args.flag_password.iter().flat_map(|filename| { - BufReader::new(&File::open(filename).unwrap_or_else(|_| die!("{} Unable to read password file. Ensure it exists and permissions are correct.", filename))) - .lines() - .map(|l| l.unwrap()) - .collect::>() - .into_iter() - }).collect::>(); - - if !self.args.flag_no_import_keys { - let dir_type = if self.args.flag_testnet { - DirectoryType::Testnet - } else { - DirectoryType::Main - }; - - let from = GethDirectory::open(dir_type); - let to = DiskDirectory::create(self.keys_path()).unwrap(); - // ignore error, cause geth may not exist - let _ = import_accounts(&from, &to); - } - - let dir = Box::new(DiskDirectory::create(self.keys_path()).unwrap()); - let iterations = self.keys_iterations(); - let account_service = AccountProvider::new(Box::new(EthStore::open_with_iterations(dir, iterations).unwrap())); - - if let Some(ref unlocks) = self.args.flag_unlock { - for d in unlocks.split(',') { - let a = Address::from_str(clean_0x(d)).unwrap_or_else(|_| { - die!("{}: Invalid address for --unlock. Must be 40 hex characters, without the 0x at the beginning.", d) - }); - if passwords.iter().find(|p| account_service.unlock_account_permanently(a, (*p).clone()).is_ok()).is_none() { - die!("No password given to unlock account {}. Pass the password using `--password`.", a); - } - } - } - account_service - } - - pub fn rpc_apis(&self) -> String { + fn rpc_apis(&self) -> String { self.args.flag_rpcapi.clone().unwrap_or(self.args.flag_jsonrpc_apis.clone()) } - pub fn rpc_cors(&self) -> Option> { + fn rpc_cors(&self) -> Option> { let cors = self.args.flag_jsonrpc_cors.clone().or(self.args.flag_rpccorsdomain.clone()); cors.map(|c| c.split(',').map(|s| s.to_owned()).collect()) } - pub fn rpc_hosts(&self) -> Option> { + fn rpc_hosts(&self) -> Option> { match self.args.flag_jsonrpc_hosts.as_ref() { "none" => return Some(Vec::new()), "all" => return None, @@ -440,65 +432,55 @@ impl Configuration { Some(hosts) } - fn geth_ipc_path(&self) -> String { - if cfg!(windows) { - r"\\.\pipe\geth.ipc".to_owned() - } else { - match self.args.flag_testnet { - true => path::ethereum::with_testnet("geth.ipc"), - false => path::ethereum::with_default("geth.ipc"), - }.to_str().unwrap().to_owned() - } - } - - pub fn keys_iterations(&self) -> u32 { - self.args.flag_keys_iterations - } - - pub fn ipc_settings(&self) -> IpcConfiguration { - IpcConfiguration { + fn ipc_config(&self) -> Result { + let conf = IpcConfiguration { enabled: !(self.args.flag_ipcdisable || self.args.flag_ipc_off || self.args.flag_no_ipc), socket_addr: self.ipc_path(), - apis: self.args.flag_ipcapi.clone().unwrap_or(self.args.flag_ipc_apis.clone()), - } + apis: try!(self.args.flag_ipcapi.clone().unwrap_or(self.args.flag_ipc_apis.clone()).parse()), + }; + + Ok(conf) } - pub fn network_settings(&self) -> NetworkSettings { - if self.args.flag_jsonrpc { println!("WARNING: Flag -j/--json-rpc is deprecated. JSON-RPC is now on by default. Ignoring."); } + fn http_config(&self) -> Result { + let conf = HttpConfiguration { + enabled: !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc, + interface: self.rpc_interface(), + port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), + apis: try!(self.rpc_apis().parse()), + hosts: self.rpc_hosts(), + cors: self.rpc_cors(), + }; + + Ok(conf) + } + + fn network_settings(&self) -> NetworkSettings { NetworkSettings { name: self.args.flag_identity.clone(), chain: self.chain(), max_peers: self.max_peers(), - network_port: self.net_port(), + min_peers: self.min_peers(), + network_port: self.args.flag_port, rpc_enabled: !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc, rpc_interface: self.args.flag_rpcaddr.clone().unwrap_or(self.args.flag_jsonrpc_interface.clone()), rpc_port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), } } - pub fn directories(&self) -> Directories { - let db_path = Configuration::replace_home( - self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path)); - ::std::fs::create_dir_all(&db_path).unwrap_or_else(|e| die_with_io_error("main", e)); + fn directories(&self) -> Directories { + let db_path = replace_home(self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path)); - let keys_path = Configuration::replace_home( + let keys_path = replace_home( if self.args.flag_testnet { "$HOME/.parity/testnet_keys" } else { &self.args.flag_keys_path } ); - ::std::fs::create_dir_all(&keys_path).unwrap_or_else(|e| die_with_io_error("main", e)); - let dapps_path = Configuration::replace_home(&self.args.flag_dapps_path); - ::std::fs::create_dir_all(&dapps_path).unwrap_or_else(|e| die_with_io_error("main", e)); - let signer_path = Configuration::replace_home(&self.args.flag_signer_path); - ::std::fs::create_dir_all(&signer_path).unwrap_or_else(|e| die_with_io_error("main", e)); - if self.args.flag_geth { - let geth_path = path::ethereum::default(); - ::std::fs::create_dir_all(geth_path.as_path()).unwrap_or_else( - |e| die!("Error while attempting to create '{}' for geth mode: {}", &geth_path.to_str().unwrap(), e)); - } + let dapps_path = replace_home(&self.args.flag_dapps_path); + let signer_path = replace_home(&self.args.flag_signer_path); Directories { keys: keys_path, @@ -508,33 +490,15 @@ impl Configuration { } } - pub fn keys_path(&self) -> String { - self.directories().keys - } - - pub fn path(&self) -> String { - self.directories().db - } - - fn replace_home(arg: &str) -> String { - arg.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) - } - fn ipc_path(&self) -> String { if self.args.flag_geth { - self.geth_ipc_path() - } else if cfg!(windows) { - r"\\.\pipe\parity.jsonrpc".to_owned() + geth_ipc_path(self.args.flag_testnet) } else { - Configuration::replace_home(&self.args.flag_ipcpath.clone().unwrap_or(self.args.flag_ipc_path.clone())) + parity_ipc_path(&self.args.flag_ipcpath.clone().unwrap_or(self.args.flag_ipc_path.clone())) } } - pub fn have_color(&self) -> bool { - !self.args.flag_no_color && !cfg!(windows) - } - - pub fn signer_port(&self) -> Option { + fn signer_port(&self) -> Option { if !self.signer_enabled() { None } else { @@ -542,7 +506,7 @@ impl Configuration { } } - pub fn rpc_interface(&self) -> String { + fn rpc_interface(&self) -> String { match self.network_settings().rpc_interface.as_str() { "all" => "0.0.0.0", "local" => "127.0.0.1", @@ -550,18 +514,18 @@ impl Configuration { }.into() } - pub fn dapps_interface(&self) -> String { + fn dapps_interface(&self) -> String { match self.args.flag_dapps_interface.as_str() { "local" => "127.0.0.1", x => x, }.into() } - pub fn dapps_enabled(&self) -> bool { + fn dapps_enabled(&self) -> bool { !self.args.flag_dapps_off && !self.args.flag_no_dapps && cfg!(feature = "dapps") } - pub fn signer_enabled(&self) -> bool { + fn signer_enabled(&self) -> bool { if self.args.flag_force_signer { return true; } @@ -570,21 +534,7 @@ impl Configuration { self.args.flag_geth || self.args.flag_no_signer; - return !signer_disabled; - } - - pub fn log_settings(&self) -> LogSettings { - let mut settings = LogSettings::new(); - if self.args.flag_no_color || cfg!(windows) { - settings = settings.no_color(); - } - if let Some(ref init) = self.args.flag_logging { - settings = settings.init(init.to_owned()) - } - if let Some(ref file) = self.args.flag_log_file { - settings = settings.file(file.to_owned()) - } - settings + !signer_disabled } } @@ -594,6 +544,18 @@ mod tests { use cli::USAGE; use docopt::Docopt; use util::network_settings::NetworkSettings; + use ethcore::client::{VMType, BlockID}; + use helpers::{replace_home, default_network_config}; + use run::RunCmd; + use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain}; + use presale::ImportWallet; + use account::{AccountCmd, NewAccount, ImportAccounts}; + use devtools::{RandomTempPath}; + use std::io::Write; + use std::fs::{File, create_dir}; + + #[derive(Debug, PartialEq)] + struct TestPasswordReader(&'static str); fn parse(args: &[&str]) -> Configuration { Configuration { @@ -601,6 +563,140 @@ mod tests { } } + #[test] + fn test_command_version() { + let args = vec!["parity", "--version"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::Version); + } + + #[test] + fn test_command_account_new() { + let args = vec!["parity", "account", "new"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::Account(AccountCmd::New(NewAccount { + iterations: 10240, + path: replace_home("$HOME/.parity/keys"), + password_file: None, + }))); + } + + #[test] + fn test_command_account_list() { + let args = vec!["parity", "account", "list"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::Account( + AccountCmd::List(replace_home("$HOME/.parity/keys"))) + ); + } + + #[test] + fn test_command_account_import() { + let args = vec!["parity", "account", "import", "my_dir", "another_dir"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::Account(AccountCmd::Import(ImportAccounts { + from: vec!["my_dir".into(), "another_dir".into()], + to: replace_home("$HOME/.parity/keys"), + }))); + } + + #[test] + fn test_command_wallet_import() { + let args = vec!["parity", "wallet", "import", "my_wallet.json", "--password", "pwd"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::ImportPresaleWallet(ImportWallet { + iterations: 10240, + path: replace_home("$HOME/.parity/keys"), + wallet_path: "my_wallet.json".into(), + password_file: Some("pwd".into()), + })); + } + + #[test] + fn test_command_blockchain_import() { + let args = vec!["parity", "import", "blockchain.json"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Import(ImportBlockchain { + spec: Default::default(), + logger_config: Default::default(), + cache_config: Default::default(), + dirs: Default::default(), + file_path: Some("blockchain.json".into()), + format: None, + pruning: Default::default(), + compaction: Default::default(), + wal: true, + mode: Default::default(), + tracing: Default::default(), + vm_type: VMType::Interpreter, + }))); + } + + #[test] + fn test_command_blockchain_export() { + let args = vec!["parity", "export", "blockchain.json"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain { + spec: Default::default(), + logger_config: Default::default(), + cache_config: Default::default(), + dirs: Default::default(), + file_path: Some("blockchain.json".into()), + pruning: Default::default(), + format: Default::default(), + compaction: Default::default(), + wal: true, + mode: Default::default(), + tracing: Default::default(), + from_block: BlockID::Number(1), + to_block: BlockID::Latest, + }))); + } + + #[test] + fn test_command_signer_new_token() { + let args = vec!["parity", "signer", "new-token"]; + let conf = Configuration::parse(args).unwrap(); + let expected = replace_home("$HOME/.parity/signer"); + assert_eq!(conf.into_command().unwrap(), Cmd::SignerToken(expected)); + } + + #[test] + fn test_run_cmd() { + let args = vec!["parity"]; + let conf = Configuration::parse(args).unwrap(); + assert_eq!(conf.into_command().unwrap(), Cmd::Run(RunCmd { + cache_config: Default::default(), + dirs: Default::default(), + spec: Default::default(), + pruning: Default::default(), + daemon: None, + logger_config: Default::default(), + miner_options: Default::default(), + http_conf: Default::default(), + ipc_conf: Default::default(), + net_conf: default_network_config(), + network_id: None, + acc_conf: Default::default(), + gas_pricer: Default::default(), + miner_extras: Default::default(), + mode: Default::default(), + tracing: Default::default(), + compaction: Default::default(), + wal: true, + vm_type: Default::default(), + enable_network: true, + geth_compatibility: false, + signer_port: Some(8180), + net_settings: Default::default(), + dapps_conf: Default::default(), + signer_conf: Default::default(), + ui: false, + name: "".into(), + custom_bootnodes: false, + })); + } + #[test] fn should_parse_network_settings() { // given @@ -612,7 +708,8 @@ mod tests { assert_eq!(conf.network_settings(), NetworkSettings { name: "testname".to_owned(), chain: "morden".to_owned(), - max_peers: 25, + max_peers: 50, + min_peers: 25, network_port: 30303, rpc_enabled: true, rpc_interface: "local".to_owned(), @@ -691,5 +788,16 @@ mod tests { // then assert_eq!(conf0.signer_enabled(), false); } + + #[test] + fn should_not_bail_on_empty_line_in_reserved_peers() { + let temp = RandomTempPath::new(); + create_dir(temp.as_str().to_owned()).unwrap(); + let filename = temp.as_str().to_owned() + "/peers"; + File::create(filename.clone()).unwrap().write_all(b" \n\t\n").unwrap(); + let args = vec!["parity", "--reserved-peers", &filename]; + let conf = Configuration::parse(args).unwrap(); + assert!(conf.init_reserved_nodes().is_ok()); + } } diff --git a/parity/dapps.rs b/parity/dapps.rs index 917c59fc6..9fb01a30a 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -15,17 +15,17 @@ // along with Parity. If not, see . use std::sync::Arc; -use std::str::FromStr; use std::net::SocketAddr; use util::panics::PanicHandler; -use die::*; use rpc_apis; +use helpers::replace_home; #[cfg(feature = "dapps")] pub use ethcore_dapps::Server as WebappServer; #[cfg(not(feature = "dapps"))] pub struct WebappServer; +#[derive(Debug, PartialEq, Clone)] pub struct Configuration { pub enabled: bool, pub interface: String, @@ -35,18 +35,31 @@ pub struct Configuration { pub dapps_path: String, } +impl Default for Configuration { + fn default() -> Self { + Configuration { + enabled: true, + interface: "127.0.0.1".into(), + port: 8080, + user: None, + pass: None, + dapps_path: replace_home("$HOME/.parity/dapps"), + } + } +} + pub struct Dependencies { pub panic_handler: Arc, pub apis: Arc, } -pub fn new(configuration: Configuration, deps: Dependencies) -> Option { +pub fn new(configuration: Configuration, deps: Dependencies) -> Result, String> { if !configuration.enabled { - return None; + return Ok(None); } let url = format!("{}:{}", configuration.interface, configuration.port); - let addr = SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid Webapps listen host/port given.", url)); + let addr = try!(url.parse().map_err(|_| format!("Invalid Webapps listen host/port given: {}", url))); let auth = configuration.user.as_ref().map(|username| { let password = configuration.pass.as_ref().map_or_else(|| { @@ -59,7 +72,7 @@ pub fn new(configuration: Configuration, deps: Dependencies) -> Option, -) -> ! { - die!("Your Parity version has been compiled without WebApps support.") +) -> Result { + Err("Your Parity version has been compiled without WebApps support.".into()) } #[cfg(feature = "dapps")] @@ -78,7 +91,7 @@ pub fn setup_dapps_server( dapps_path: String, url: &SocketAddr, auth: Option<(String, String)> -) -> WebappServer { +) -> Result { use ethcore_dapps as dapps; let server = dapps::ServerBuilder::new(dapps_path); @@ -93,15 +106,14 @@ pub fn setup_dapps_server( }; match start_result { - Err(dapps::ServerError::IoError(err)) => die_with_io_error("WebApps", err), - Err(e) => die!("WebApps: {:?}", e), + Err(dapps::ServerError::IoError(err)) => Err(format!("WebApps io error: {}", err)), + Err(e) => Err(format!("WebApps error: {:?}", e)), Ok(server) => { server.set_panic_handler(move || { deps.panic_handler.notify_all("Panic in WebApp thread.".to_owned()); }); - server + Ok(server) }, } - } diff --git a/parity/deprecated.rs b/parity/deprecated.rs new file mode 100644 index 000000000..5d3a74913 --- /dev/null +++ b/parity/deprecated.rs @@ -0,0 +1,148 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::fmt; +use cli::Args; + +#[derive(Debug, PartialEq)] +pub enum Deprecated { + DoesNothing(&'static str), + Replaced(&'static str, &'static str), +} + +impl fmt::Display for Deprecated { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + Deprecated::DoesNothing(s) => write!(f, "Option '{}' does nothing. It's on by default", s), + Deprecated::Replaced(old, new) => write!(f, "Option '{}' is deprecated. Please use '{}' instead", old, new), + } + } +} + +impl Deprecated { + fn jsonrpc() -> Self { + Deprecated::DoesNothing("--jsonrpc") + } + + fn rpc() -> Self { + Deprecated::DoesNothing("--rpc") + } + + fn jsonrpc_off() -> Self { + Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc") + } + + fn webapp() -> Self { + Deprecated::DoesNothing("--webapp") + } + + fn dapps_off() -> Self { + Deprecated::Replaced("--dapps-off", "--no-daps") + } + + fn ipcdisable() -> Self { + Deprecated::Replaced("--ipcdisable", "--no-ipc") + } + + fn ipc_off() -> Self { + Deprecated::Replaced("--ipc-off", "--no-ipc") + } + + fn etherbase() -> Self { + Deprecated::Replaced("--etherbase", "--author") + } + + fn extradata() -> Self { + Deprecated::Replaced("--extradata", "--extra-data") + } +} + +pub fn find_deprecated(args: &Args) -> Vec { + let mut result = vec![]; + + if args.flag_jsonrpc { + result.push(Deprecated::jsonrpc()); + } + + if args.flag_rpc { + result.push(Deprecated::rpc()); + } + + if args.flag_jsonrpc_off { + result.push(Deprecated::jsonrpc_off()); + } + + if args.flag_webapp { + result.push(Deprecated::webapp()) + } + + if args.flag_dapps_off { + result.push(Deprecated::dapps_off()); + } + + if args.flag_ipcdisable { + result.push(Deprecated::ipcdisable()); + } + + if args.flag_ipc_off { + result.push(Deprecated::ipc_off()); + } + + if args.flag_etherbase.is_some() { + result.push(Deprecated::etherbase()); + } + + if args.flag_extradata.is_some() { + result.push(Deprecated::extradata()); + } + + result +} + +#[cfg(test)] +mod tests { + use cli::Args; + use super::{Deprecated, find_deprecated}; + + #[test] + fn test_find_deprecated() { + assert_eq!(find_deprecated(&Args::default()), vec![]); + assert_eq!(find_deprecated(&{ + let mut args = Args::default(); + args.flag_jsonrpc = true; + args.flag_rpc = true; + args.flag_jsonrpc_off = true; + args.flag_webapp = true; + args.flag_dapps_off = true; + args.flag_ipcdisable = true; + args.flag_ipc_off = true; + args.flag_etherbase = Some(Default::default()); + args.flag_extradata = Some(Default::default()); + args + }), vec![ + Deprecated::jsonrpc(), + Deprecated::rpc(), + Deprecated::jsonrpc_off(), + Deprecated::webapp(), + Deprecated::dapps_off(), + Deprecated::ipcdisable(), + Deprecated::ipc_off(), + Deprecated::etherbase(), + Deprecated::extradata(), + ]); + } +} + diff --git a/parity/die.rs b/parity/die.rs deleted file mode 100644 index 80b31f619..000000000 --- a/parity/die.rs +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2015, 2016 Ethcore (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std; -use ethcore; -use ethcore::client::Error as ClientError; -use util::UtilError; -use std::process::exit; - -#[macro_export] -macro_rules! die { - ($($arg:tt)*) => (::die::die_with_message(&format!("{}", format_args!($($arg)*)))); -} - -pub fn die_with_error(module: &'static str, e: ethcore::error::Error) -> ! { - use ethcore::error::Error; - - match e { - Error::Util(UtilError::StdIo(e)) => die_with_io_error(module, e), - Error::Client(ClientError::Trace(e)) => die_with_message(&format!("{}", e)), - _ => { - trace!(target: module, "{:?}", e); - die!("{}: {}", module, e); - } - } -} - -pub fn die_with_io_error(module: &'static str, e: std::io::Error) -> ! { - trace!(target: module, "{:?}", e); - - match e.kind() { - std::io::ErrorKind::PermissionDenied => { - die!("{}: No permissions to bind to specified port.", module) - }, - std::io::ErrorKind::AddrInUse => { - die!("{}: Specified address is already in use. Please make sure that nothing is listening on the same port or try using a different one.", module) - }, - std::io::ErrorKind::AddrNotAvailable => { - die!("{}: Could not use specified interface or given address is invalid.", module) - }, - _ => die!("{}: {}", module, e), - } -} - -pub fn die_with_message(msg: &str) -> ! { - println!("ERROR: {}", msg); - exit(1); -} diff --git a/parity/dir.rs b/parity/dir.rs new file mode 100644 index 000000000..bb92e1277 --- /dev/null +++ b/parity/dir.rs @@ -0,0 +1,86 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::fs; +use std::path::{PathBuf, Path}; +use util::{H64, H256}; +use util::journaldb::Algorithm; +use helpers::replace_home; + +// this const is irrelevent cause we do have migrations now, +// but we still use it for backwards compatibility +const LEGACY_CLIENT_DB_VER_STR: &'static str = "5.3"; + +#[derive(Debug, PartialEq)] +pub struct Directories { + pub db: String, + pub keys: String, + pub signer: String, + pub dapps: String, +} + +impl Default for Directories { + fn default() -> Self { + Directories { + db: replace_home("$HOME/.parity"), + keys: replace_home("$HOME/.parity/keys"), + signer: replace_home("$HOME/.parity/signer"), + dapps: replace_home("$HOME/.parity/dapps"), + } + } +} + +impl Directories { + pub fn create_dirs(&self) -> Result<(), String> { + try!(fs::create_dir_all(&self.db).map_err(|e| e.to_string())); + try!(fs::create_dir_all(&self.keys).map_err(|e| e.to_string())); + try!(fs::create_dir_all(&self.signer).map_err(|e| e.to_string())); + try!(fs::create_dir_all(&self.dapps).map_err(|e| e.to_string())); + Ok(()) + } + + /// Get the root path for database + pub fn db_version_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf { + let mut dir = Path::new(&self.db).to_path_buf(); + dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default())); + dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str())); + dir + } + + /// Get the path for the databases given the genesis_hash and information on the databases. + pub fn client_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf { + let mut dir = self.db_version_path(genesis_hash, fork_name, pruning); + dir.push("db"); + dir + } +} + +#[cfg(test)] +mod tests { + use super::Directories; + use helpers::replace_home; + + #[test] + fn test_default_directories() { + let expected = Directories { + db: replace_home("$HOME/.parity"), + keys: replace_home("$HOME/.parity/keys"), + signer: replace_home("$HOME/.parity/signer"), + dapps: replace_home("$HOME/.parity/dapps"), + }; + assert_eq!(expected, Directories::default()); + } +} diff --git a/parity/helpers.rs b/parity/helpers.rs new file mode 100644 index 000000000..8e032f13c --- /dev/null +++ b/parity/helpers.rs @@ -0,0 +1,403 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::{io, env}; +use std::io::{Write, Read, BufReader, BufRead}; +use std::time::Duration; +use std::path::Path; +use std::fs::File; +use util::{clean_0x, U256, Uint, Address, path, is_valid_node_url, H256, CompactionProfile}; +use util::journaldb::Algorithm; +use ethcore::client::{Mode, BlockID, Switch, VMType, DatabaseCompactionProfile, ClientConfig}; +use ethcore::miner::PendingSet; +use cache::CacheConfig; +use dir::Directories; +use params::Pruning; +use upgrade::upgrade; +use migration::migrate; + +pub fn to_duration(s: &str) -> Result { + to_seconds(s).map(Duration::from_secs) +} + +fn to_seconds(s: &str) -> Result { + let bad = |_| { + format!("{}: Invalid duration given. See parity --help for more information.", s) + }; + + match s { + "twice-daily" => Ok(12 * 60 * 60), + "half-hourly" => Ok(30 * 60), + "1second" | "1 second" | "second" => Ok(1), + "1minute" | "1 minute" | "minute" => Ok(60), + "hourly" | "1hour" | "1 hour" | "hour" => Ok(60 * 60), + "daily" | "1day" | "1 day" | "day" => Ok(24 * 60 * 60), + x if x.ends_with("seconds") => x[0..x.len() - 7].parse().map_err(bad), + x if x.ends_with("minutes") => x[0..x.len() -7].parse::().map_err(bad).map(|x| x * 60), + x if x.ends_with("hours") => x[0..x.len() - 5].parse::().map_err(bad).map(|x| x * 60 * 60), + x if x.ends_with("days") => x[0..x.len() - 4].parse::().map_err(bad).map(|x| x * 24 * 60 * 60), + x => x.parse().map_err(bad), + } +} + +pub fn to_mode(s: &str, timeout: u64, alarm: u64) -> Result { + match s { + "active" => Ok(Mode::Active), + "passive" => Ok(Mode::Passive(Duration::from_secs(timeout), Duration::from_secs(alarm))), + "dark" => Ok(Mode::Dark(Duration::from_secs(timeout))), + _ => Err(format!("{}: Invalid address for --mode. Must be one of active, passive or dark.", s)), + } +} + +pub fn to_block_id(s: &str) -> Result { + if s == "latest" { + Ok(BlockID::Latest) + } else if let Ok(num) = s.parse() { + Ok(BlockID::Number(num)) + } else if let Ok(hash) = s.parse() { + Ok(BlockID::Hash(hash)) + } else { + Err("Invalid block.".into()) + } +} + +pub fn to_u256(s: &str) -> Result { + if let Ok(decimal) = U256::from_dec_str(s) { + Ok(decimal) + } else if let Ok(hex) = clean_0x(s).parse() { + Ok(hex) + } else { + Err(format!("Invalid numeric value: {}", s)) + } +} + +pub fn to_pending_set(s: &str) -> Result { + match s { + "cheap" => Ok(PendingSet::AlwaysQueue), + "strict" => Ok(PendingSet::AlwaysSealing), + "lenient" => Ok(PendingSet::SealingOrElseQueue), + other => Err(format!("Invalid pending set value: {:?}", other)), + } +} + +pub fn to_address(s: Option) -> Result { + match s { + Some(ref a) => clean_0x(a).parse().map_err(|_| format!("Invalid address: {:?}", a)), + None => Ok(Address::default()) + } +} + +pub fn to_addresses(s: &Option) -> Result, String> { + match *s { + Some(ref adds) if adds.is_empty() => adds.split(',') + .map(|a| clean_0x(a).parse().map_err(|_| format!("Invalid address: {:?}", a))) + .collect(), + _ => Ok(Vec::new()), + } +} + +/// Tries to parse string as a price. +pub fn to_price(s: &str) -> Result { + s.parse::().map_err(|_| format!("Invalid transaciton price 's' given. Must be a decimal number.")) +} + +/// Replaces `$HOME` str with home directory path. +pub fn replace_home(arg: &str) -> String { + // the $HOME directory on mac os should be `~/Library` or `~/Library/Application Support` + let r = arg.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()); + r.replace("/", &::std::path::MAIN_SEPARATOR.to_string() ) +} + +/// Flush output buffer. +pub fn flush_stdout() { + io::stdout().flush().expect("stdout is flushable; qed"); +} + +/// Returns default geth ipc path. +pub fn geth_ipc_path(testnet: bool) -> String { + // Windows path should not be hardcoded here. + // Instead it should be a part of path::ethereum + if cfg!(windows) { + return r"\\.\pipe\geth.ipc".to_owned(); + } + + if testnet { + path::ethereum::with_testnet("geth.ipc").to_str().unwrap().to_owned() + } else { + path::ethereum::with_default("geth.ipc").to_str().unwrap().to_owned() + } +} + +/// Formats and returns parity ipc path. +pub fn parity_ipc_path(s: &str) -> String { + // Windows path should not be hardcoded here. + if cfg!(windows) { + return r"\\.\pipe\parity.jsonrpc".to_owned(); + } + + replace_home(s) +} + +/// Validates and formats bootnodes option. +pub fn to_bootnodes(bootnodes: &Option) -> Result, String> { + match *bootnodes { + Some(ref x) if !x.is_empty() => x.split(',').map(|s| { + if is_valid_node_url(s) { + Ok(s.to_owned()) + } else { + Err(format!("Invalid node address format given for a boot node: {}", s)) + } + }).collect(), + Some(_) => Ok(vec![]), + None => Ok(vec![]) + } +} + +#[cfg(test)] +pub fn default_network_config() -> ::util::NetworkConfiguration { + use util::{NetworkConfiguration, NonReservedPeerMode}; + NetworkConfiguration { + config_path: Some(replace_home("$HOME/.parity/network")), + listen_address: Some("0.0.0.0:30303".parse().unwrap()), + public_address: None, + udp_port: None, + nat_enabled: true, + discovery_enabled: true, + boot_nodes: Vec::new(), + use_secret: None, + max_peers: 50, + min_peers: 25, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Accept, + } +} + +#[cfg_attr(feature = "dev", allow(too_many_arguments))] +pub fn to_client_config( + cache_config: &CacheConfig, + dirs: &Directories, + genesis_hash: H256, + mode: Mode, + tracing: Switch, + pruning: Pruning, + compaction: DatabaseCompactionProfile, + wal: bool, + vm_type: VMType, + name: String, + fork_name: Option<&String>, + ) -> ClientConfig { + let mut client_config = ClientConfig::default(); + + let mb = 1024 * 1024; + // in bytes + client_config.blockchain.max_cache_size = cache_config.blockchain() as usize * mb; + // in bytes + client_config.blockchain.pref_cache_size = cache_config.blockchain() as usize * 3 / 4 * mb; + // db blockchain cache size, in megabytes + client_config.blockchain.db_cache_size = Some(cache_config.db_blockchain_cache_size() as usize); + // db state cache size, in megabytes + client_config.db_cache_size = Some(cache_config.db_state_cache_size() as usize); + // db queue cache size, in bytes + client_config.queue.max_mem_use = cache_config.queue() as usize * mb; + + client_config.mode = mode; + client_config.tracing.enabled = tracing; + client_config.pruning = pruning.to_algorithm(dirs, genesis_hash, fork_name); + client_config.db_compaction = compaction; + client_config.db_wal = wal; + client_config.vm_type = vm_type; + client_config.name = name; + client_config +} + +pub fn execute_upgrades( + dirs: &Directories, + genesis_hash: H256, + fork_name: Option<&String>, + pruning: Algorithm, + compaction_profile: CompactionProfile +) -> Result<(), String> { + + match upgrade(Some(&dirs.db)) { + Ok(upgrades_applied) if upgrades_applied > 0 => { + debug!("Executed {} upgrade scripts - ok", upgrades_applied); + }, + Err(e) => { + return Err(format!("Error upgrading parity data: {:?}", e)); + }, + _ => {}, + } + + let client_path = dirs.db_version_path(genesis_hash, fork_name, pruning); + migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e)) +} + +/// Prompts user asking for password. +pub fn password_prompt() -> Result { + use rpassword::read_password; + + println!("Please note that password is NOT RECOVERABLE."); + print!("Type password: "); + flush_stdout(); + + let password = read_password().unwrap(); + + print!("Repeat password: "); + flush_stdout(); + + let password_repeat = read_password().unwrap(); + + if password != password_repeat { + return Err("Passwords do not match!".into()); + } + + Ok(password) +} + +/// Read a password from password file. +pub fn password_from_file

(path: P) -> Result where P: AsRef { + let mut file = try!(File::open(path).map_err(|_| "Unable to open password file.")); + let mut file_content = String::new(); + try!(file.read_to_string(&mut file_content).map_err(|_| "Unable to read password file.")); + // remove eof + Ok((&file_content[..file_content.len() - 1]).to_owned()) +} + +/// Reads passwords from files. Treats each line as a separate password. +pub fn passwords_from_files(files: Vec) -> Result, String> { + let passwords = files.iter().map(|filename| { + let file = try!(File::open(filename).map_err(|_| format!("{} Unable to read password file. Ensure it exists and permissions are correct.", filename))); + let reader = BufReader::new(&file); + let lines = reader.lines() + .map(|l| l.unwrap()) + .collect::>(); + Ok(lines) + }).collect::>, String>>(); + Ok(try!(passwords).into_iter().flat_map(|x| x).collect()) +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + use util::{U256}; + use ethcore::client::{Mode, BlockID}; + use ethcore::miner::PendingSet; + use super::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_address, to_price, geth_ipc_path, to_bootnodes}; + + #[test] + fn test_to_duration() { + assert_eq!(to_duration("twice-daily").unwrap(), Duration::from_secs(12 * 60 * 60)); + assert_eq!(to_duration("half-hourly").unwrap(), Duration::from_secs(30 * 60)); + assert_eq!(to_duration("1second").unwrap(), Duration::from_secs(1)); + assert_eq!(to_duration("2seconds").unwrap(), Duration::from_secs(2)); + assert_eq!(to_duration("15seconds").unwrap(), Duration::from_secs(15)); + assert_eq!(to_duration("1minute").unwrap(), Duration::from_secs(1 * 60)); + assert_eq!(to_duration("2minutes").unwrap(), Duration::from_secs(2 * 60)); + assert_eq!(to_duration("15minutes").unwrap(), Duration::from_secs(15 * 60)); + assert_eq!(to_duration("hourly").unwrap(), Duration::from_secs(60 * 60)); + assert_eq!(to_duration("daily").unwrap(), Duration::from_secs(24 * 60 * 60)); + assert_eq!(to_duration("1hour").unwrap(), Duration::from_secs(1 * 60 * 60)); + assert_eq!(to_duration("2hours").unwrap(), Duration::from_secs(2 * 60 * 60)); + assert_eq!(to_duration("15hours").unwrap(), Duration::from_secs(15 * 60 * 60)); + assert_eq!(to_duration("1day").unwrap(), Duration::from_secs(1 * 24 * 60 * 60)); + assert_eq!(to_duration("2days").unwrap(), Duration::from_secs(2 * 24 *60 * 60)); + assert_eq!(to_duration("15days").unwrap(), Duration::from_secs(15 * 24 * 60 * 60)); + } + + #[test] + fn test_to_mode() { + assert_eq!(to_mode("active", 0, 0).unwrap(), Mode::Active); + assert_eq!(to_mode("passive", 10, 20).unwrap(), Mode::Passive(Duration::from_secs(10), Duration::from_secs(20))); + assert_eq!(to_mode("dark", 20, 30).unwrap(), Mode::Dark(Duration::from_secs(20))); + assert!(to_mode("other", 20, 30).is_err()); + } + + #[test] + fn test_to_block_id() { + assert_eq!(to_block_id("latest").unwrap(), BlockID::Latest); + assert_eq!(to_block_id("0").unwrap(), BlockID::Number(0)); + assert_eq!(to_block_id("2").unwrap(), BlockID::Number(2)); + assert_eq!(to_block_id("15").unwrap(), BlockID::Number(15)); + assert_eq!( + to_block_id("9fc84d84f6a785dc1bd5abacfcf9cbdd3b6afb80c0f799bfb2fd42c44a0c224e").unwrap(), + BlockID::Hash("9fc84d84f6a785dc1bd5abacfcf9cbdd3b6afb80c0f799bfb2fd42c44a0c224e".parse().unwrap()) + ); + } + + #[test] + fn test_to_u256() { + assert_eq!(to_u256("0").unwrap(), U256::from(0)); + assert_eq!(to_u256("11").unwrap(), U256::from(11)); + assert_eq!(to_u256("0x11").unwrap(), U256::from(17)); + assert!(to_u256("u").is_err()) + } + + #[test] + fn test_pending_set() { + assert_eq!(to_pending_set("cheap").unwrap(), PendingSet::AlwaysQueue); + assert_eq!(to_pending_set("strict").unwrap(), PendingSet::AlwaysSealing); + assert_eq!(to_pending_set("lenient").unwrap(), PendingSet::SealingOrElseQueue); + assert!(to_pending_set("othe").is_err()); + } + + #[test] + fn test_to_address() { + assert_eq!( + to_address(Some("0xD9A111feda3f362f55Ef1744347CDC8Dd9964a41".into())).unwrap(), + "D9A111feda3f362f55Ef1744347CDC8Dd9964a41".parse().unwrap() + ); + assert_eq!( + to_address(Some("D9A111feda3f362f55Ef1744347CDC8Dd9964a41".into())).unwrap(), + "D9A111feda3f362f55Ef1744347CDC8Dd9964a41".parse().unwrap() + ); + assert_eq!(to_address(None).unwrap(), Default::default()); + } + + #[test] + #[cfg_attr(feature = "dev", allow(float_cmp))] + fn test_to_price() { + assert_eq!(to_price("1").unwrap(), 1.0); + assert_eq!(to_price("2.3").unwrap(), 2.3); + assert_eq!(to_price("2.33").unwrap(), 2.33); + } + + #[test] + #[cfg(windows)] + fn test_geth_ipc_path() { + assert_eq!(geth_ipc_path(true), r"\\.\pipe\geth.ipc".to_owned()); + assert_eq!(geth_ipc_path(false), r"\\.\pipe\geth.ipc".to_owned()); + } + + #[test] + #[cfg(not(windows))] + fn test_geth_ipc_path() { + use util::path; + assert_eq!(geth_ipc_path(true), path::ethereum::with_testnet("geth.ipc").to_str().unwrap().to_owned()); + assert_eq!(geth_ipc_path(false), path::ethereum::with_default("geth.ipc").to_str().unwrap().to_owned()); + } + + #[test] + fn test_to_bootnodes() { + let one_bootnode = "enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303"; + let two_bootnodes = "enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303,enode://e731347db0521f3476e6bbbb83375dcd7133a1601425ebd15fd10f3835fd4c304fba6282087ca5a0deeafadf0aa0d4fd56c3323331901c1f38bd181c283e3e35@128.199.55.137:30303"; + + assert_eq!(to_bootnodes(&Some("".into())), Ok(vec![])); + assert_eq!(to_bootnodes(&None), Ok(vec![])); + assert_eq!(to_bootnodes(&Some(one_bootnode.into())), Ok(vec![one_bootnode.into()])); + assert_eq!(to_bootnodes(&Some(two_bootnodes.into())), Ok(vec![one_bootnode.into(), one_bootnode.into()])); + } +} + diff --git a/parity/informant.rs b/parity/informant.rs index 9d5c7bf27..fed1c46d1 100644 --- a/parity/informant.rs +++ b/parity/informant.rs @@ -108,12 +108,12 @@ impl Informant { info!(target: "import", "{} {} {}", match importing { - true => format!("Syncing {} {} {} {}+{} Qed", + true => format!("Syncing {} {} {} {}+{} Qed", paint(White.bold(), format!("{:>8}", format!("#{}", chain_info.best_block_number))), paint(White.bold(), format!("{}", chain_info.best_block_hash)), { let last_report = match write_report.deref() { &Some(ref last_report) => last_report.clone(), _ => ClientReport::default() }; - format!("{} blk/s {} tx/s {} Mgas/s", + format!("{} blk/s {} tx/s {} Mgas/s", paint(Yellow.bold(), format!("{:4}", ((report.blocks_imported - last_report.blocks_imported) * 1000) as u64 / elapsed.as_milliseconds())), paint(Yellow.bold(), format!("{:4}", ((report.transactions_applied - last_report.transactions_applied) * 1000) as u64 / elapsed.as_milliseconds())), paint(Yellow.bold(), format!("{:3}", ((report.gas_processed - last_report.gas_processed) / From::from(elapsed.as_milliseconds() * 1000)).low_u64())) @@ -132,7 +132,7 @@ impl Informant { }, paint(Cyan.bold(), format!("{:2}", sync_info.num_active_peers)), paint(Cyan.bold(), format!("{:2}", sync_info.num_peers)), - paint(Cyan.bold(), format!("{:2}", net_config.ideal_peers)) + paint(Cyan.bold(), format!("{:2}", if sync_info.num_peers as u32 > net_config.min_peers { net_config.max_peers} else { net_config.min_peers} )) ), _ => String::new(), }, @@ -154,13 +154,13 @@ impl Informant { } impl ChainNotify for Informant { - fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, _retracted: Vec, _sealed: Vec, duration: u64) { + fn new_blocks(&self, imported: Vec, _invalid: Vec, _enacted: Vec, _retracted: Vec, _sealed: Vec, duration: u64) { let mut last_import = self.last_import.lock(); let queue_info = self.client.queue_info(); let importing = queue_info.unverified_queue_size + queue_info.verified_queue_size > 3 || self.sync.as_ref().map_or(false, |s| s.status().is_major_syncing()); if Instant::now() > *last_import + Duration::from_secs(1) && !importing { - if let Some(block) = enacted.last().and_then(|h| self.client.block(BlockID::Hash(h.clone()))) { + if let Some(block) = imported.last().and_then(|h| self.client.block(BlockID::Hash(*h))) { let view = BlockView::new(&block); let header = view.header(); let tx_count = view.transactions_count(); @@ -179,7 +179,7 @@ impl ChainNotify for Informant { } self.skipped.store(0, AtomicOrdering::Relaxed); } else { - self.skipped.fetch_add(enacted.len(), AtomicOrdering::Relaxed); + self.skipped.fetch_add(imported.len(), AtomicOrdering::Relaxed); } } } diff --git a/parity/main.rs b/parity/main.rs index 8406c3768..51951d115 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -25,16 +25,17 @@ extern crate docopt; extern crate num_cpus; extern crate rustc_serialize; +extern crate ethcore_devtools as devtools; +#[macro_use] extern crate ethcore_util as util; extern crate ethcore; extern crate ethsync; #[macro_use] extern crate log as rlog; extern crate env_logger; +extern crate ethcore_logger; extern crate ctrlc; extern crate fdlimit; -#[cfg(not(windows))] -extern crate daemonize; extern crate time; extern crate number_prefix; extern crate rpassword; @@ -53,15 +54,12 @@ extern crate ansi_term; #[macro_use] extern crate lazy_static; extern crate regex; -extern crate ethcore_logger; extern crate isatty; #[cfg(feature = "dapps")] extern crate ethcore_dapps; - -#[macro_use] -mod die; +mod cache; mod upgrade; mod rpc; mod dapps; @@ -73,529 +71,63 @@ mod migration; mod signer; mod rpc_apis; mod url; +mod helpers; +mod params; +mod deprecated; +mod dir; mod modules; +mod account; +mod blockchain; +mod presale; +mod run; +mod sync; -use std::io::{Write, Read, BufReader, BufRead}; -use std::ops::Deref; -use std::sync::Arc; -use std::path::Path; -use std::fs::File; -use std::str::{FromStr, from_utf8}; -use std::thread::sleep; -use std::time::Duration; -use rustc_serialize::hex::FromHex; -use ctrlc::CtrlC; -use util::{H256, ToPretty, PayloadInfo, Bytes, Colour, version, journaldb, RotatingLogger}; -use util::panics::{MayPanic, ForwardPanic, PanicHandler}; -use ethcore::client::{BlockID, BlockChainClient, ClientConfig, get_db_path, BlockImportError, Mode}; -use ethcore::error::{ImportError}; -use ethcore::service::ClientService; -use ethcore::spec::Spec; -use ethsync::{NetworkConfiguration}; -use ethcore::miner::{Miner, MinerService, ExternalMiner}; -use migration::migrate; -use informant::Informant; -use util::{Mutex, Condvar}; -use ethcore_logger::setup_log; -#[cfg(feature="ipc")] -use ethcore::client::ChainNotify; - -use die::*; +use std::{process, env}; use cli::print_version; -use rpc::RpcServer; -use signer::{SignerServer, new_token}; -use dapps::WebappServer; -use io_handler::ClientIoHandler; -use configuration::{Configuration}; +use configuration::{Cmd, Configuration}; +use deprecated::find_deprecated; + +fn execute(command: Cmd) -> Result { + match command { + Cmd::Run(run_cmd) => { + try!(run::execute(run_cmd)); + Ok("".into()) + }, + Cmd::Version => Ok(print_version()), + Cmd::Account(account_cmd) => account::execute(account_cmd), + Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd), + Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd), + Cmd::SignerToken(path) => signer::new_token(path), + } +} + +fn start() -> Result { + let conf = Configuration::parse(env::args()).unwrap_or_else(|e| e.exit()); + + let deprecated = find_deprecated(&conf.args); + for d in deprecated { + println!("{}", d); + } + + let cmd = try!(conf.into_command()); + execute(cmd) +} fn main() { - let conf = Configuration::parse(); - execute(conf); -} - -fn execute(conf: Configuration) { - if conf.args.flag_version { - print_version(); + // just redirect to the sync::main() + if std::env::args().nth(1).map_or(false, |arg| arg == "sync") { + sync::main(); return; } - if conf.args.cmd_signer { - execute_signer(conf); - return; - } - - let spec = conf.spec(); - let client_config = conf.client_config(&spec); - - execute_upgrades(&conf, &spec, &client_config); - - if conf.args.cmd_daemon { - daemonize(&conf); - } - - // Setup panic handler - let panic_handler = PanicHandler::new_in_arc(); - // Setup logging - let logger = setup_log(&conf.log_settings()); - // Raise fdlimit - unsafe { ::fdlimit::raise_fd_limit(); } - - if conf.args.cmd_account { - execute_account_cli(conf); - return; - } - - if conf.args.cmd_wallet { - execute_wallet_cli(conf); - return; - } - - if conf.args.cmd_export { - execute_export(conf, panic_handler); - return; - } - - if conf.args.cmd_import { - execute_import(conf, panic_handler); - return; - } - - execute_client(conf, spec, client_config, panic_handler, logger); -} - -#[cfg(not(windows))] -fn daemonize(conf: &Configuration) { - use daemonize::Daemonize; - Daemonize::new() - .pid_file(conf.args.arg_pid_file.clone()) - .chown_pid_file(true) - .start() - .unwrap_or_else(|e| die!("Couldn't daemonize; {}", e)); -} - -#[cfg(windows)] -fn daemonize(_conf: &Configuration) { -} - -fn execute_upgrades(conf: &Configuration, spec: &Spec, client_config: &ClientConfig) { - match ::upgrade::upgrade(Some(&conf.path())) { - Ok(upgrades_applied) if upgrades_applied > 0 => { - debug!("Executed {} upgrade scripts - ok", upgrades_applied); + match start() { + Ok(result) => { + println!("{}", result); }, - Err(e) => { - die!("Error upgrading parity data: {:?}", e); - }, - _ => {}, - } - - let db_path = get_db_path(Path::new(&conf.path()), client_config.pruning, spec.genesis_header().hash(), spec.fork_name.as_ref()); - let result = migrate(&db_path, client_config.pruning); - if let Err(err) = result { - die_with_message(&format!("{} DB path: {}", err, db_path.to_string_lossy())); - } -} - -fn execute_client(conf: Configuration, spec: Spec, client_config: ClientConfig, panic_handler: Arc, logger: Arc) { - let mut hypervisor = modules::hypervisor(); - - info!("Starting {}", Colour::White.bold().paint(format!("{}", version()))); - info!("Using state DB journalling strategy {}", Colour::White.bold().paint(match client_config.pruning { - journaldb::Algorithm::Archive => "archive", - journaldb::Algorithm::EarlyMerge => "light", - journaldb::Algorithm::OverlayRecent => "fast", - journaldb::Algorithm::RefCounted => "basic", - })); - - // Display warning about using experimental journaldb types - match client_config.pruning { - journaldb::Algorithm::EarlyMerge | journaldb::Algorithm::RefCounted => { - warn!("Your chosen strategy is {}! You can re-run with --pruning to change.", Colour::Red.bold().paint("unstable")); - } - _ => {} - } - - // Display warning about using unlock with signer - if conf.signer_enabled() && conf.args.flag_unlock.is_some() { - warn!("Using Trusted Signer and --unlock is not recommended!"); - warn!("NOTE that Signer will not ask you to confirm transactions from unlocked account."); - } - - let net_settings = conf.net_settings(&spec); - let sync_config = conf.sync_config(&spec); - - // Secret Store - let account_service = Arc::new(conf.account_service()); - - // Miner - let miner = Miner::new(conf.miner_options(), conf.gas_pricer(), conf.spec(), Some(account_service.clone())); - miner.set_author(conf.author().unwrap_or_default()); - miner.set_gas_floor_target(conf.gas_floor_target()); - miner.set_gas_ceil_target(conf.gas_ceil_target()); - miner.set_extra_data(conf.extra_data()); - miner.set_transactions_limit(conf.args.flag_tx_queue_size); - - // Build client - let service = ClientService::start( - client_config, - spec, - Path::new(&conf.path()), - miner.clone(), - ).unwrap_or_else(|e| die_with_error("Client", e)); - - panic_handler.forward_from(&service); - let client = service.client(); - - let external_miner = Arc::new(ExternalMiner::default()); - let network_settings = Arc::new(conf.network_settings()); - - // Sync - let (sync_provider, manage_network, chain_notify) = - modules::sync(&mut hypervisor, sync_config, NetworkConfiguration::from(net_settings), client.clone(), &conf.log_settings()) - .unwrap_or_else(|e| die_with_error("Sync", e)); - - service.add_notify(chain_notify.clone()); - - // if network is active by default - if match conf.mode() { Mode::Dark(..) => false, _ => !conf.args.flag_no_network } { - chain_notify.start(); - } - - let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies { - signer_port: conf.signer_port(), - signer_queue: Arc::new(rpc_apis::ConfirmationsQueue::default()), - client: client.clone(), - sync: sync_provider.clone(), - net: manage_network.clone(), - secret_store: account_service.clone(), - miner: miner.clone(), - external_miner: external_miner.clone(), - logger: logger.clone(), - settings: network_settings.clone(), - allow_pending_receipt_query: !conf.args.flag_geth, - net_service: manage_network.clone(), - }); - - let dependencies = rpc::Dependencies { - panic_handler: panic_handler.clone(), - apis: deps_for_rpc_apis.clone(), - }; - - // Setup http rpc - let rpc_server = rpc::new_http(rpc::HttpConfiguration { - enabled: network_settings.rpc_enabled, - interface: conf.rpc_interface(), - port: network_settings.rpc_port, - apis: conf.rpc_apis(), - cors: conf.rpc_cors(), - hosts: conf.rpc_hosts(), - }, &dependencies); - - // setup ipc rpc - let _ipc_server = rpc::new_ipc(conf.ipc_settings(), &dependencies); - debug!("IPC: {}", conf.ipc_settings()); - - if conf.args.flag_webapp { println!("WARNING: Flag -w/--webapp is deprecated. Dapps server is now on by default. Ignoring."); } - let dapps_server = dapps::new(dapps::Configuration { - enabled: conf.dapps_enabled(), - interface: conf.dapps_interface(), - port: conf.args.flag_dapps_port, - user: conf.args.flag_dapps_user.clone(), - pass: conf.args.flag_dapps_pass.clone(), - dapps_path: conf.directories().dapps, - }, dapps::Dependencies { - panic_handler: panic_handler.clone(), - apis: deps_for_rpc_apis.clone(), - }); - - // Set up a signer - let signer_server = signer::start(signer::Configuration { - enabled: conf.signer_enabled(), - port: conf.args.flag_signer_port, - signer_path: conf.directories().signer, - }, signer::Dependencies { - panic_handler: panic_handler.clone(), - apis: deps_for_rpc_apis.clone(), - }); - - let informant = Arc::new(Informant::new(service.client(), Some(sync_provider.clone()), Some(manage_network.clone()), conf.have_color())); - service.add_notify(informant.clone()); - // Register IO handler - let io_handler = Arc::new(ClientIoHandler { - client: service.client(), - info: informant, - sync: sync_provider.clone(), - net: manage_network.clone(), - accounts: account_service.clone(), - }); - service.register_io_handler(io_handler).expect("Error registering IO handler"); - - if conf.args.cmd_ui { - if !conf.dapps_enabled() { - die_with_message("Cannot use UI command with Dapps turned off."); - } - url::open(&format!("http://{}:{}/", conf.dapps_interface(), conf.args.flag_dapps_port)); - } - - // Handle exit - wait_for_exit(panic_handler, rpc_server, dapps_server, signer_server); -} - -fn flush_stdout() { - ::std::io::stdout().flush().expect("stdout is flushable; qed"); -} - -enum DataFormat { - Hex, - Binary, -} - -fn execute_export(conf: Configuration, panic_handler: Arc) { - let spec = conf.spec(); - let client_config = conf.client_config(&spec); - - // Build client - let service = ClientService::start( - client_config, spec, Path::new(&conf.path()), Arc::new(Miner::with_spec(conf.spec())) - ).unwrap_or_else(|e| die_with_error("Client", e)); - - panic_handler.forward_from(&service); - let client = service.client(); - - // we have a client! - let parse_block_id = |s: &str, arg: &str| -> u64 { - if s == "latest" { - client.chain_info().best_block_number - } else if let Ok(n) = s.parse::() { - n - } else if let Ok(h) = H256::from_str(s) { - client.block_number(BlockID::Hash(h)).unwrap_or_else(|| { - die!("Unknown block hash passed to {} parameter: {:?}", arg, s); - }) - } else { - die!("Invalid {} parameter given: {:?}", arg, s); - } - }; - let from = parse_block_id(&conf.args.flag_from, "--from"); - let to = parse_block_id(&conf.args.flag_to, "--to"); - let format = match conf.args.flag_format { - Some(x) => match x.deref() { - "binary" | "bin" => DataFormat::Binary, - "hex" => DataFormat::Hex, - x => die!("Invalid --format parameter given: {:?}", x), - }, - None if conf.args.arg_file.is_none() => DataFormat::Hex, - None => DataFormat::Binary, - }; - - let mut out: Box = if let Some(f) = conf.args.arg_file { - Box::new(File::create(&f).unwrap_or_else(|_| die!("Cannot write to file given: {}", f))) - } else { - Box::new(::std::io::stdout()) - }; - - for i in from..(to + 1) { - let b = client.deref().block(BlockID::Number(i)).unwrap(); - match format { - DataFormat::Binary => { out.write(&b).expect("Couldn't write to stream."); } - DataFormat::Hex => { out.write_fmt(format_args!("{}", b.pretty())).expect("Couldn't write to stream."); } + Err(err) => { + println!("{}", err); + process::exit(1); } } } -fn execute_import(conf: Configuration, panic_handler: Arc) { - let spec = conf.spec(); - let client_config = conf.client_config(&spec); - - // Build client - let service = ClientService::start( - client_config, spec, Path::new(&conf.path()), Arc::new(Miner::with_spec(conf.spec())) - ).unwrap_or_else(|e| die_with_error("Client", e)); - - panic_handler.forward_from(&service); - let client = service.client(); - - let mut instream: Box = if let Some(ref f) = conf.args.arg_file { - let f = File::open(f).unwrap_or_else(|_| die!("Cannot open the file given: {}", f)); - Box::new(f) - } else { - Box::new(::std::io::stdin()) - }; - - const READAHEAD_BYTES: usize = 8; - - let mut first_bytes: Bytes = vec![0; READAHEAD_BYTES]; - let mut first_read = 0; - - let format = match conf.args.flag_format { - Some(ref x) => match x.deref() { - "binary" | "bin" => DataFormat::Binary, - "hex" => DataFormat::Hex, - x => die!("Invalid --format parameter given: {:?}", x), - }, - None => { - // autodetect... - first_read = instream.read(&mut(first_bytes[..])).unwrap_or_else(|_| die!("Error reading from the file/stream.")); - match first_bytes[0] { - 0xf9 => { - info!("Autodetected binary data format."); - DataFormat::Binary - } - _ => { - info!("Autodetected hex data format."); - DataFormat::Hex - } - } - } - }; - - let informant = Informant::new(client.clone(), None, None, conf.have_color()); - - let do_import = |bytes| { - while client.queue_info().is_full() { sleep(Duration::from_secs(1)); } - match client.import_block(bytes) { - Ok(_) => {} - Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { trace!("Skipping block already in chain."); } - Err(e) => die!("Cannot import block: {:?}", e) - } - informant.tick(); - }; - - match format { - DataFormat::Binary => { - loop { - let mut bytes: Bytes = if first_read > 0 {first_bytes.clone()} else {vec![0; READAHEAD_BYTES]}; - let n = if first_read > 0 {first_read} else {instream.read(&mut(bytes[..])).unwrap_or_else(|_| die!("Error reading from the file/stream."))}; - if n == 0 { break; } - first_read = 0; - let s = PayloadInfo::from(&(bytes[..])).unwrap_or_else(|e| die!("Invalid RLP in the file/stream: {:?}", e)).total(); - bytes.resize(s, 0); - instream.read_exact(&mut(bytes[READAHEAD_BYTES..])).unwrap_or_else(|_| die!("Error reading from the file/stream.")); - do_import(bytes); - } - } - DataFormat::Hex => { - for line in BufReader::new(instream).lines() { - let s = line.unwrap_or_else(|_| die!("Error reading from the file/stream.")); - let s = if first_read > 0 {from_utf8(&first_bytes).unwrap().to_owned() + &(s[..])} else {s}; - first_read = 0; - let bytes = FromHex::from_hex(&(s[..])).unwrap_or_else(|_| die!("Invalid hex in file/stream.")); - do_import(bytes); - } - } - } - while !client.queue_info().is_empty() { - sleep(Duration::from_secs(1)); - informant.tick(); - } - client.flush_queue(); -} - -fn execute_signer(conf: Configuration) { - if !conf.args.cmd_new_token { - die!("Unknown command."); - } - - let path = conf.directories().signer; - let code = new_token(path).unwrap_or_else(|e| { - die!("Error generating token: {:?}", e) - }); - println!("This key code will authorise your System Signer UI: {}", if conf.args.flag_no_color { code } else { format!("{}", Colour::White.bold().paint(code)) }); -} - -fn execute_account_cli(conf: Configuration) { - use ethcore::ethstore::{EthStore, import_accounts}; - use ethcore::ethstore::dir::DiskDirectory; - use ethcore::account_provider::AccountProvider; - use rpassword::read_password; - - let dir = Box::new(DiskDirectory::create(conf.keys_path()).unwrap()); - let iterations = conf.keys_iterations(); - let secret_store = AccountProvider::new(Box::new(EthStore::open_with_iterations(dir, iterations).unwrap())); - - if conf.args.cmd_new { - println!("Please note that password is NOT RECOVERABLE."); - print!("Type password: "); - flush_stdout(); - let password = read_password().unwrap(); - print!("Repeat password: "); - flush_stdout(); - let password_repeat = read_password().unwrap(); - if password != password_repeat { - println!("Passwords do not match!"); - return; - } - println!("New account address:"); - let new_address = secret_store.new_account(&password).unwrap(); - println!("{:?}", new_address); - return; - } - - if conf.args.cmd_list { - println!("Known addresses:"); - for addr in &secret_store.accounts() { - println!("{:?}", addr); - } - return; - } - - if conf.args.cmd_import { - let to = DiskDirectory::create(conf.keys_path()).unwrap(); - let mut imported = 0; - for path in &conf.args.arg_path { - let from = DiskDirectory::at(path); - imported += import_accounts(&from, &to).unwrap_or_else(|e| die!("Could not import accounts {}", e)).len(); - } - println!("Imported {} keys", imported); - } -} - -fn execute_wallet_cli(conf: Configuration) { - use ethcore::ethstore::{PresaleWallet, EthStore}; - use ethcore::ethstore::dir::DiskDirectory; - use ethcore::account_provider::AccountProvider; - - let wallet_path = conf.args.arg_path.first().unwrap(); - let filename = conf.args.flag_password.first().unwrap(); - let mut file = File::open(filename).unwrap_or_else(|_| die!("{} Unable to read password file.", filename)); - let mut file_content = String::new(); - file.read_to_string(&mut file_content).unwrap_or_else(|_| die!("{} Unable to read password file.", filename)); - - let dir = Box::new(DiskDirectory::create(conf.keys_path()).unwrap()); - let iterations = conf.keys_iterations(); - let store = AccountProvider::new(Box::new(EthStore::open_with_iterations(dir, iterations).unwrap())); - - // remove eof - let pass = &file_content[..file_content.len() - 1]; - let wallet = PresaleWallet::open(wallet_path).unwrap_or_else(|_| die!("Unable to open presale wallet.")); - let kp = wallet.decrypt(pass).unwrap_or_else(|_| die!("Invalid password")); - let address = store.insert_account(kp.secret().clone(), pass).unwrap(); - - println!("Imported account: {}", address); -} - -fn wait_for_exit( - panic_handler: Arc, - _rpc_server: Option, - _dapps_server: Option, - _signer_server: Option - ) { - let exit = Arc::new(Condvar::new()); - - // Handle possible exits - let e = exit.clone(); - CtrlC::set_handler(move || { e.notify_all(); }); - - // Handle panics - let e = exit.clone(); - panic_handler.on_panic(move |_reason| { e.notify_all(); }); - - // Wait for signal - let mutex = Mutex::new(()); - exit.wait(&mut mutex.lock()); - info!("Finishing work, please wait..."); -} - -/// Parity needs at least 1 test to generate coverage reports correctly. -#[test] -fn if_works() { -} diff --git a/parity/migration.rs b/parity/migration.rs index 40cc369e3..b073b8dbf 100644 --- a/parity/migration.rs +++ b/parity/migration.rs @@ -20,13 +20,18 @@ use std::io::{Read, Write, Error as IoError, ErrorKind}; use std::path::{Path, PathBuf}; use std::fmt::{Display, Formatter, Error as FmtError}; use util::journaldb::Algorithm; -use util::migration::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError}; +use util::migration::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError, Migration}; +use util::kvdb::{CompactionProfile, Database, DatabaseConfig}; use ethcore::migrations; +use ethcore::client; +use ethcore::migrations::Extract; /// Database is assumed to be at default version, when no version file is found. const DEFAULT_VERSION: u32 = 5; /// Current version of database models. -const CURRENT_VERSION: u32 = 7; +const CURRENT_VERSION: u32 = 9; +/// First version of the consolidated database. +const CONSOLIDATION_VERSION: u32 = 9; /// Defines how many items are migrated to the new version of database at once. const BATCH_SIZE: usize = 1024; /// Version file name. @@ -110,24 +115,10 @@ fn update_version(path: &Path) -> Result<(), Error> { Ok(()) } -/// Blocks database path. -fn blocks_database_path(path: &Path) -> PathBuf { - let mut blocks_path = path.to_owned(); - blocks_path.push("blocks"); - blocks_path -} - -/// Extras database path. -fn extras_database_path(path: &Path) -> PathBuf { - let mut extras_path = path.to_owned(); - extras_path.push("extras"); - extras_path -} - -/// State database path. -fn state_database_path(path: &Path) -> PathBuf { +/// Consolidated database path +fn consolidated_database_path(path: &Path) -> PathBuf { let mut state_path = path.to_owned(); - state_path.push("state"); + state_path.push("db"); state_path } @@ -140,38 +131,56 @@ fn backup_database_path(path: &Path) -> PathBuf { } /// Default migration settings. -fn default_migration_settings() -> MigrationConfig { +pub fn default_migration_settings(compaction_profile: &CompactionProfile) -> MigrationConfig { MigrationConfig { batch_size: BATCH_SIZE, + compaction_profile: *compaction_profile, } } -/// Migrations on the blocks database. -fn blocks_database_migrations() -> Result { - let manager = MigrationManager::new(default_migration_settings()); +/// Migrations on the consolidated database. +fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> Result { + let manager = MigrationManager::new(default_migration_settings(compaction_profile)); Ok(manager) } -/// Migrations on the extras database. -fn extras_database_migrations() -> Result { - let mut manager = MigrationManager::new(default_migration_settings()); - try!(manager.add_migration(migrations::extras::ToV6).map_err(|_| Error::MigrationImpossible)); - Ok(manager) -} +/// Consolidates legacy databases into single one. +fn consolidate_database( + old_db_path: PathBuf, + new_db_path: PathBuf, + column: Option, + extract: Extract, + compaction_profile: &CompactionProfile) -> Result<(), Error> { + fn db_error(e: String) -> Error { + warn!("Cannot open Database for consolidation: {:?}", e); + Error::MigrationFailed + } -/// Migrations on the state database. -fn state_database_migrations(pruning: Algorithm) -> Result { - let mut manager = MigrationManager::new(default_migration_settings()); - let res = match pruning { - Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()), - Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()), - _ => return Err(Error::UnsuportedPruningMethod), + let mut migration = migrations::ToV9::new(column, extract); + let config = default_migration_settings(compaction_profile); + let mut db_config = DatabaseConfig { + max_open_files: 64, + cache_size: None, + compaction: config.compaction_profile.clone(), + columns: None, + wal: true, }; - try!(res.map_err(|_| Error::MigrationImpossible)); - Ok(manager) + let old_path_str = try!(old_db_path.to_str().ok_or(Error::MigrationImpossible)); + let new_path_str = try!(new_db_path.to_str().ok_or(Error::MigrationImpossible)); + + let cur_db = try!(Database::open(&db_config, old_path_str).map_err(db_error)); + // open new DB with proper number of columns + db_config.columns = migration.columns(); + let mut new_db = try!(Database::open(&db_config, new_path_str).map_err(db_error)); + + // Migrate to new database (default column only) + try!(migration.migrate(&cur_db, &config, &mut new_db, None)); + + Ok(()) } + /// Migrates database at given position with given migration rules. fn migrate_database(version: u32, db_path: PathBuf, mut migrations: MigrationManager) -> Result<(), Error> { // check if migration is needed @@ -207,23 +216,114 @@ fn exists(path: &Path) -> bool { } /// Migrates the database. -pub fn migrate(path: &Path, pruning: Algorithm) -> Result<(), Error> { +pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionProfile) -> Result<(), Error> { // read version file. let version = try!(current_version(path)); // migrate the databases. // main db directory may already exists, so let's check if we have blocks dir - if version < CURRENT_VERSION && exists(&blocks_database_path(path)) { - println!("Migrating database from version {} to {}", version, CURRENT_VERSION); - try!(migrate_database(version, blocks_database_path(path), try!(blocks_database_migrations()))); - try!(migrate_database(version, extras_database_path(path), try!(extras_database_migrations()))); - try!(migrate_database(version, state_database_path(path), try!(state_database_migrations(pruning)))); - println!("Migration finished"); - } else if version > CURRENT_VERSION { + if version > CURRENT_VERSION { return Err(Error::FutureDBVersion); } + // We are in the latest version, yay! + if version == CURRENT_VERSION { + return Ok(()) + } + + // Perform pre-consolidation migrations + if version < CONSOLIDATION_VERSION && exists(&legacy::blocks_database_path(path)) { + println!("Migrating database from version {} to {}", version, CONSOLIDATION_VERSION); + try!(migrate_database(version, legacy::blocks_database_path(path), try!(legacy::blocks_database_migrations(&compaction_profile)))); + try!(migrate_database(version, legacy::extras_database_path(path), try!(legacy::extras_database_migrations(&compaction_profile)))); + try!(migrate_database(version, legacy::state_database_path(path), try!(legacy::state_database_migrations(pruning, &compaction_profile)))); + let db_path = consolidated_database_path(path); + // Remove the database dir (it shouldn't exist anyway, but it might when migration was interrupted) + let _ = fs::remove_dir_all(db_path.clone()); + try!(consolidate_database(legacy::blocks_database_path(path), db_path.clone(), client::DB_COL_HEADERS, Extract::Header, &compaction_profile)); + try!(consolidate_database(legacy::blocks_database_path(path), db_path.clone(), client::DB_COL_BODIES, Extract::Header, &compaction_profile)); + try!(consolidate_database(legacy::extras_database_path(path), db_path.clone(), client::DB_COL_EXTRA, Extract::All, &compaction_profile)); + try!(consolidate_database(legacy::state_database_path(path), db_path.clone(), client::DB_COL_STATE, Extract::All, &compaction_profile)); + try!(consolidate_database(legacy::trace_database_path(path), db_path.clone(), client::DB_COL_TRACE, Extract::All, &compaction_profile)); + let _ = fs::remove_dir_all(legacy::blocks_database_path(path)); + let _ = fs::remove_dir_all(legacy::extras_database_path(path)); + let _ = fs::remove_dir_all(legacy::state_database_path(path)); + let _ = fs::remove_dir_all(legacy::trace_database_path(path)); + println!("Migration finished"); + } + + // Further migrations + if version >= CONSOLIDATION_VERSION && version < CURRENT_VERSION && exists(&consolidated_database_path(path)) { + println!("Migrating database from version {} to {}", ::std::cmp::max(CONSOLIDATION_VERSION, version), CURRENT_VERSION); + try!(migrate_database(version, consolidated_database_path(path), try!(consolidated_database_migrations(&compaction_profile)))); + println!("Migration finished"); + } + // update version file. update_version(path) } +/// Old migrations utilities +mod legacy { + use super::*; + use std::path::{Path, PathBuf}; + use util::journaldb::Algorithm; + use util::migration::{Manager as MigrationManager}; + use util::kvdb::CompactionProfile; + use ethcore::migrations; + + /// Blocks database path. + pub fn blocks_database_path(path: &Path) -> PathBuf { + let mut blocks_path = path.to_owned(); + blocks_path.push("blocks"); + blocks_path + } + + /// Extras database path. + pub fn extras_database_path(path: &Path) -> PathBuf { + let mut extras_path = path.to_owned(); + extras_path.push("extras"); + extras_path + } + + /// State database path. + pub fn state_database_path(path: &Path) -> PathBuf { + let mut state_path = path.to_owned(); + state_path.push("state"); + state_path + } + + /// Trace database path. + pub fn trace_database_path(path: &Path) -> PathBuf { + let mut blocks_path = path.to_owned(); + blocks_path.push("tracedb"); + blocks_path + } + + /// Migrations on the blocks database. + pub fn blocks_database_migrations(compaction_profile: &CompactionProfile) -> Result { + let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); + try!(manager.add_migration(migrations::blocks::V8::default()).map_err(|_| Error::MigrationImpossible)); + Ok(manager) + } + + /// Migrations on the extras database. + pub fn extras_database_migrations(compaction_profile: &CompactionProfile) -> Result { + let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); + try!(manager.add_migration(migrations::extras::ToV6).map_err(|_| Error::MigrationImpossible)); + Ok(manager) + } + + /// Migrations on the state database. + pub fn state_database_migrations(pruning: Algorithm, compaction_profile: &CompactionProfile) -> Result { + let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); + let res = match pruning { + Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()), + Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()), + _ => return Err(Error::UnsuportedPruningMethod), + }; + + try!(res.map_err(|_| Error::MigrationImpossible)); + Ok(manager) + } +} diff --git a/parity/modules.rs b/parity/modules.rs index f7b14dd54..4e2e22ffb 100644 --- a/parity/modules.rs +++ b/parity/modules.rs @@ -23,8 +23,14 @@ use ethsync::{SyncConfig, NetworkConfiguration}; use self::no_ipc_deps::*; #[cfg(feature="ipc")] use self::ipc_deps::*; +use ethcore_logger::Config as LogConfig; -use ethcore_logger::Settings as LogSettings; +pub mod service_urls { + pub const CLIENT: &'static str = "ipc:///tmp/parity-chain.ipc"; + pub const SYNC: &'static str = "ipc:///tmp/parity-sync.ipc"; + pub const SYNC_NOTIFY: &'static str = "ipc:///tmp/parity-sync-notify.ipc"; + pub const NETWORK_MANAGER: &'static str = "ipc:///tmp/parity-manage-net.ipc"; +} #[cfg(not(feature="ipc"))] mod no_ipc_deps { @@ -52,7 +58,6 @@ mod ipc_deps { pub use ipc::binary::serialize; } - #[cfg(feature="ipc")] pub fn hypervisor() -> Option { Some(Hypervisor::new()) @@ -64,7 +69,7 @@ pub fn hypervisor() -> Option { } #[cfg(feature="ipc")] -fn sync_arguments(sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_settings: &LogSettings) -> BootArgs { +fn sync_arguments(sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_settings: &LogConfig) -> BootArgs { let service_config = ServiceConfiguration { sync: sync_cfg, net: net_cfg, @@ -75,11 +80,11 @@ fn sync_arguments(sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_setti // client service url and logging settings are passed in command line let mut cli_args = Vec::new(); - cli_args.push("ipc:///tmp/parity-chain.ipc".to_owned()); + cli_args.push("sync".to_owned()); if !log_settings.color { cli_args.push("--no-color".to_owned()); } - if let Some(ref init) = log_settings.init { + if let Some(ref mode) = log_settings.mode { cli_args.push("-l".to_owned()); - cli_args.push(init.to_owned()); + cli_args.push(mode.to_owned()); } if let Some(ref file) = log_settings.file { cli_args.push("--log-file".to_owned()); @@ -96,19 +101,19 @@ pub fn sync sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, _client: Arc, - log_settings: &LogSettings, + log_settings: &LogConfig, ) -> Result { let mut hypervisor = hypervisor_ref.take().expect("There should be hypervisor for ipc configuration"); - hypervisor = hypervisor.module(SYNC_MODULE_ID, "sync", sync_arguments(sync_cfg, net_cfg, log_settings)); + hypervisor = hypervisor.module(SYNC_MODULE_ID, "parity", sync_arguments(sync_cfg, net_cfg, log_settings)); hypervisor.start(); hypervisor.wait_for_startup(); - let sync_client = init_client::>("ipc:///tmp/parity-sync.ipc").unwrap(); - let notify_client = init_client::>("ipc:///tmp/parity-sync-notify.ipc").unwrap(); - let manage_client = init_client::>("ipc:///tmp/parity-manage-net.ipc").unwrap(); + let sync_client = init_client::>(service_urls::SYNC).unwrap(); + let notify_client = init_client::>(service_urls::SYNC_NOTIFY).unwrap(); + let manage_client = init_client::>(service_urls::NETWORK_MANAGER).unwrap(); *hypervisor_ref = Some(hypervisor); Ok((sync_client, manage_client, notify_client)) @@ -121,7 +126,7 @@ pub fn sync sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, client: Arc, - _log_settings: &LogSettings, + _log_settings: &LogConfig, ) -> Result { diff --git a/parity/params.rs b/parity/params.rs new file mode 100644 index 000000000..6e105f524 --- /dev/null +++ b/parity/params.rs @@ -0,0 +1,282 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; +use util::{contents, Database, DatabaseConfig, journaldb, H256, Address, U256, version_data}; +use util::journaldb::Algorithm; +use ethcore::client; +use ethcore::spec::Spec; +use ethcore::ethereum; +use ethcore::miner::{GasPricer, GasPriceCalibratorOptions}; +use dir::Directories; + +#[derive(Debug, PartialEq)] +pub enum SpecType { + Mainnet, + Testnet, + Olympic, + Classic, + Custom(String), +} + +impl Default for SpecType { + fn default() -> Self { + SpecType::Mainnet + } +} + +impl FromStr for SpecType { + type Err = String; + + fn from_str(s: &str) -> Result { + let spec = match s { + "frontier" | "homestead" | "mainnet" => SpecType::Mainnet, + "frontier-dogmatic" | "homestead-dogmatic" | "classic" => SpecType::Classic, + "morden" | "testnet" => SpecType::Testnet, + "olympic" => SpecType::Olympic, + other => SpecType::Custom(other.into()), + }; + Ok(spec) + } +} + +impl SpecType { + pub fn spec(&self) -> Result { + match *self { + SpecType::Mainnet => Ok(ethereum::new_frontier()), + SpecType::Testnet => Ok(ethereum::new_morden()), + SpecType::Olympic => Ok(ethereum::new_olympic()), + SpecType::Classic => Ok(ethereum::new_classic()), + SpecType::Custom(ref file) => Ok(Spec::load(&try!(contents(file).map_err(|_| "Could not load specification file.")))) + } + } +} + +#[derive(Debug, PartialEq)] +pub enum Pruning { + Specific(Algorithm), + Auto, +} + +impl Default for Pruning { + fn default() -> Self { + Pruning::Auto + } +} + +impl FromStr for Pruning { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "auto" => Ok(Pruning::Auto), + other => other.parse().map(Pruning::Specific), + } + } +} + +impl Pruning { + pub fn to_algorithm(&self, dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm { + match *self { + Pruning::Specific(algo) => algo, + Pruning::Auto => Self::find_best_db(dirs, genesis_hash, fork_name), + } + } + + fn find_best_db(dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm { + let mut algo_types = Algorithm::all_types(); + + // if all dbs have the same latest era, the last element is the default one + algo_types.push(Algorithm::default()); + + algo_types.into_iter().max_by_key(|i| { + let client_path = dirs.client_path(genesis_hash, fork_name, *i); + let config = DatabaseConfig::with_columns(client::DB_NO_OF_COLUMNS); + let db = match Database::open(&config, client_path.to_str().unwrap()) { + Ok(db) => db, + Err(_) => return 0, + }; + let db = journaldb::new(Arc::new(db), *i, client::DB_COL_STATE); + trace!(target: "parity", "Looking for best DB: {} at {:?}", i, db.latest_era()); + db.latest_era().unwrap_or(0) + }).unwrap() + } +} + +#[derive(Debug, PartialEq)] +pub struct ResealPolicy { + pub own: bool, + pub external: bool, +} + +impl Default for ResealPolicy { + fn default() -> Self { + ResealPolicy { + own: true, + external: true, + } + } +} + +impl FromStr for ResealPolicy { + type Err = String; + + fn from_str(s: &str) -> Result { + let (own, external) = match s { + "none" => (false, false), + "own" => (true, false), + "ext" => (false, true), + "all" => (true, true), + x => return Err(format!("Invalid reseal value: {}", x)), + }; + + let reseal = ResealPolicy { + own: own, + external: external, + }; + + Ok(reseal) + } +} + +#[derive(Debug, PartialEq)] +pub struct AccountsConfig { + pub iterations: u32, + pub import_keys: bool, + pub testnet: bool, + pub password_files: Vec, + pub unlocked_accounts: Vec

, +} + +impl Default for AccountsConfig { + fn default() -> Self { + AccountsConfig { + iterations: 10240, + import_keys: true, + testnet: false, + password_files: Vec::new(), + unlocked_accounts: Vec::new(), + } + } +} + +#[derive(Debug, PartialEq)] +pub enum GasPricerConfig { + Fixed(U256), + Calibrated { + usd_per_tx: f32, + recalibration_period: Duration, + } +} + +impl Default for GasPricerConfig { + fn default() -> Self { + GasPricerConfig::Calibrated { + usd_per_tx: 0.005, + recalibration_period: Duration::from_secs(3600), + } + } +} + +impl Into for GasPricerConfig { + fn into(self) -> GasPricer { + match self { + GasPricerConfig::Fixed(u) => GasPricer::Fixed(u), + GasPricerConfig::Calibrated { usd_per_tx, recalibration_period } => { + GasPricer::new_calibrated(GasPriceCalibratorOptions { + usd_per_tx: usd_per_tx, + recalibration_period: recalibration_period, + }) + } + } + } +} + +#[derive(Debug, PartialEq)] +pub struct MinerExtras { + pub author: Address, + pub extra_data: Vec, + pub gas_floor_target: U256, + pub gas_ceil_target: U256, + pub transactions_limit: usize, +} + +impl Default for MinerExtras { + fn default() -> Self { + MinerExtras { + author: Default::default(), + extra_data: version_data(), + gas_floor_target: U256::from(4_700_000), + gas_ceil_target: U256::from(6_283_184), + transactions_limit: 1024, + } + } +} + +#[cfg(test)] +mod tests { + use util::journaldb::Algorithm; + use super::{SpecType, Pruning, ResealPolicy}; + + #[test] + fn test_spec_type_parsing() { + assert_eq!(SpecType::Mainnet, "frontier".parse().unwrap()); + assert_eq!(SpecType::Mainnet, "homestead".parse().unwrap()); + assert_eq!(SpecType::Mainnet, "mainnet".parse().unwrap()); + assert_eq!(SpecType::Testnet, "testnet".parse().unwrap()); + assert_eq!(SpecType::Testnet, "morden".parse().unwrap()); + assert_eq!(SpecType::Olympic, "olympic".parse().unwrap()); + } + + #[test] + fn test_spec_type_default() { + assert_eq!(SpecType::Mainnet, SpecType::default()); + } + + #[test] + fn test_pruning_parsing() { + assert_eq!(Pruning::Auto, "auto".parse().unwrap()); + assert_eq!(Pruning::Specific(Algorithm::Archive), "archive".parse().unwrap()); + assert_eq!(Pruning::Specific(Algorithm::EarlyMerge), "light".parse().unwrap()); + assert_eq!(Pruning::Specific(Algorithm::OverlayRecent), "fast".parse().unwrap()); + assert_eq!(Pruning::Specific(Algorithm::RefCounted), "basic".parse().unwrap()); + } + + #[test] + fn test_pruning_default() { + assert_eq!(Pruning::Auto, Pruning::default()); + } + + #[test] + fn test_reseal_policy_parsing() { + let none = ResealPolicy { own: false, external: false }; + let own = ResealPolicy { own: true, external: false }; + let ext = ResealPolicy { own: false, external: true }; + let all = ResealPolicy { own: true, external: true }; + assert_eq!(none, "none".parse().unwrap()); + assert_eq!(own, "own".parse().unwrap()); + assert_eq!(ext, "ext".parse().unwrap()); + assert_eq!(all, "all".parse().unwrap()); + } + + #[test] + fn test_reseal_policy_default() { + let all = ResealPolicy { own: true, external: true }; + assert_eq!(all, ResealPolicy::default()); + } +} diff --git a/parity/presale.rs b/parity/presale.rs new file mode 100644 index 000000000..51d9cd37f --- /dev/null +++ b/parity/presale.rs @@ -0,0 +1,43 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use ethcore::ethstore::{PresaleWallet, EthStore}; +use ethcore::ethstore::dir::DiskDirectory; +use ethcore::account_provider::AccountProvider; +use helpers::{password_prompt, password_from_file}; + +#[derive(Debug, PartialEq)] +pub struct ImportWallet { + pub iterations: u32, + pub path: String, + pub wallet_path: String, + pub password_file: Option, +} + +pub fn execute(cmd: ImportWallet) -> Result { + let password: String = match cmd.password_file { + Some(file) => try!(password_from_file(file)), + None => try!(password_prompt()), + }; + + let dir = Box::new(DiskDirectory::create(cmd.path).unwrap()); + let secret_store = Box::new(EthStore::open_with_iterations(dir, cmd.iterations).unwrap()); + let acc_provider = AccountProvider::new(secret_store); + let wallet = try!(PresaleWallet::open(cmd.wallet_path).map_err(|_| "Unable to open presale wallet.")); + let kp = try!(wallet.decrypt(&password).map_err(|_| "Invalid password.")); + let address = acc_provider.insert_account(kp.secret().clone(), &password).unwrap(); + Ok(format!("{:?}", address)) +} diff --git a/parity/rpc.rs b/parity/rpc.rs index 2b0599962..b30529c1c 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -14,40 +14,64 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . - -use std::str::FromStr; +use std::fmt; use std::sync::Arc; use std::net::SocketAddr; use util::panics::PanicHandler; -use die::*; +use ethcore_rpc::{RpcServerError, RpcServer as Server}; use jsonipc; use rpc_apis; -use std::fmt; +use rpc_apis::ApiSet; +use helpers::parity_ipc_path; -pub use ethcore_rpc::Server as RpcServer; -use ethcore_rpc::{RpcServerError, RpcServer as Server}; +pub use jsonipc::Server as IpcServer; +pub use ethcore_rpc::Server as HttpServer; +#[derive(Debug, PartialEq)] pub struct HttpConfiguration { pub enabled: bool, pub interface: String, pub port: u16, - pub apis: String, + pub apis: ApiSet, pub cors: Option>, pub hosts: Option>, } +impl Default for HttpConfiguration { + fn default() -> Self { + HttpConfiguration { + enabled: true, + interface: "127.0.0.1".into(), + port: 8545, + apis: ApiSet::UnsafeContext, + cors: None, + hosts: Some(Vec::new()), + } + } +} + +#[derive(Debug, PartialEq)] pub struct IpcConfiguration { pub enabled: bool, pub socket_addr: String, - pub apis: String, + pub apis: ApiSet, +} + +impl Default for IpcConfiguration { + fn default() -> Self { + IpcConfiguration { + enabled: true, + socket_addr: parity_ipc_path("$HOME/.parity/jsonrpc.ipc"), + apis: ApiSet::UnsafeContext, + } + } } impl fmt::Display for IpcConfiguration { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.enabled { - write!(f, "endpoint address [{}], api list [{}]", self.socket_addr, self.apis) - } - else { + write!(f, "endpoint address [{}], api list [{:?}]", self.socket_addr, self.apis) + } else { write!(f, "disabled") } } @@ -58,22 +82,19 @@ pub struct Dependencies { pub apis: Arc, } -pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Option { +pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Result, String> { if !conf.enabled { - return None; + return Ok(None); } - let apis = conf.apis.split(',').collect(); let url = format!("{}:{}", conf.interface, conf.port); - let addr = SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid JSONRPC listen host/port given.", url)); - - Some(setup_http_rpc_server(deps, &addr, conf.cors, conf.hosts, apis)) + let addr = try!(url.parse().map_err(|_| format!("Invalid JSONRPC listen host/port given: {}", url))); + Ok(Some(try!(setup_http_rpc_server(deps, &addr, conf.cors, conf.hosts, conf.apis)))) } -fn setup_rpc_server(apis: Vec<&str>, deps: &Dependencies) -> Server { - let apis = rpc_apis::from_str(apis); +fn setup_rpc_server(apis: ApiSet, deps: &Dependencies) -> Result { let server = Server::new(); - rpc_apis::setup_rpc(server, deps.apis.clone(), rpc_apis::ApiSet::List(apis)) + Ok(rpc_apis::setup_rpc(server, deps.apis.clone(), apis)) } pub fn setup_http_rpc_server( @@ -81,29 +102,28 @@ pub fn setup_http_rpc_server( url: &SocketAddr, cors_domains: Option>, allowed_hosts: Option>, - apis: Vec<&str>, -) -> RpcServer { - let server = setup_rpc_server(apis, dependencies); + apis: ApiSet +) -> Result { + let server = try!(setup_rpc_server(apis, dependencies)); let ph = dependencies.panic_handler.clone(); let start_result = server.start_http(url, cors_domains, allowed_hosts, ph); match start_result { - Err(RpcServerError::IoError(err)) => die_with_io_error("RPC", err), - Err(e) => die!("RPC: {:?}", e), - Ok(server) => server, + Err(RpcServerError::IoError(err)) => Err(format!("RPC io error: {}", err)), + Err(e) => Err(format!("RPC error: {:?}", e)), + Ok(server) => Ok(server), } } -pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Option { - if !conf.enabled { return None; } - let apis = conf.apis.split(',').collect(); - Some(setup_ipc_rpc_server(deps, &conf.socket_addr, apis)) +pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result, String> { + if !conf.enabled { return Ok(None); } + Ok(Some(try!(setup_ipc_rpc_server(deps, &conf.socket_addr, conf.apis)))) } -pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: Vec<&str>) -> jsonipc::Server { - let server = setup_rpc_server(apis, dependencies); +pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: ApiSet) -> Result { + let server = try!(setup_rpc_server(apis, dependencies)); match server.start_ipc(addr) { - Err(jsonipc::Error::Io(io_error)) => die_with_io_error("RPC", io_error), - Err(any_error) => die!("RPC: {:?}", any_error), - Ok(server) => server + Err(jsonipc::Error::Io(io_error)) => Err(format!("RPC io error: {}", io_error)), + Err(any_error) => Err(format!("Rpc error: {:?}", any_error)), + Ok(server) => Ok(server) } } diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index c2146795b..66a59a86b 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -15,20 +15,21 @@ // along with Parity. If not, see . use std::collections::BTreeMap; +use std::collections::HashSet; +use std::cmp::PartialEq; use std::str::FromStr; use std::sync::Arc; - -use ethsync::{ManageNetwork, SyncProvider}; +use util::RotatingLogger; +use util::network_settings::NetworkSettings; use ethcore::miner::{Miner, ExternalMiner}; use ethcore::client::Client; -use util::RotatingLogger; use ethcore::account_provider::AccountProvider; -use util::network_settings::NetworkSettings; - +use ethsync::{ManageNetwork, SyncProvider}; +use ethcore_rpc::Extendable; pub use ethcore_rpc::ConfirmationsQueue; -use ethcore_rpc::Extendable; +#[derive(Debug, PartialEq, Clone, Eq, Hash)] pub enum Api { Web3, Net, @@ -41,18 +42,8 @@ pub enum Api { Rpc, } -pub enum ApiError { - UnknownApi(String) -} - -pub enum ApiSet { - SafeContext, - UnsafeContext, - List(Vec), -} - impl FromStr for Api { - type Err = ApiError; + type Err = String; fn from_str(s: &str) -> Result { use self::Api::*; @@ -67,11 +58,41 @@ impl FromStr for Api { "ethcore_set" => Ok(EthcoreSet), "traces" => Ok(Traces), "rpc" => Ok(Rpc), - e => Err(ApiError::UnknownApi(e.into())), + api => Err(format!("Unknown api: {}", api)) } } } +#[derive(Debug)] +pub enum ApiSet { + SafeContext, + UnsafeContext, + List(HashSet), +} + +impl Default for ApiSet { + fn default() -> Self { + ApiSet::UnsafeContext + } +} + +impl PartialEq for ApiSet { + fn eq(&self, other: &Self) -> bool { + self.list_apis() == other.list_apis() + } +} + +impl FromStr for ApiSet { + type Err = String; + + fn from_str(s: &str) -> Result { + s.split(',') + .map(Api::from_str) + .collect::>() + .map(ApiSet::List) + } +} + pub struct Dependencies { pub signer_port: Option, pub signer_queue: Arc, @@ -106,31 +127,27 @@ fn to_modules(apis: &[Api]) -> BTreeMap { modules } -pub fn from_str(apis: Vec<&str>) -> Vec { - apis.into_iter() - .map(Api::from_str) - .collect::, ApiError>>() - .unwrap_or_else(|e| match e { - ApiError::UnknownApi(s) => die!("Unknown RPC API specified: {}", s), - }) -} - -fn list_apis(apis: ApiSet) -> Vec { - match apis { - ApiSet::List(apis) => apis, - ApiSet::UnsafeContext => { - vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Ethcore, Api::Traces, Api::Rpc] - }, - _ => { - vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Signer, Api::Ethcore, Api::EthcoreSet, Api::Traces, Api::Rpc] - }, +impl ApiSet { + pub fn list_apis(&self) -> HashSet { + match *self { + ApiSet::List(ref apis) => apis.clone(), + ApiSet::UnsafeContext => { + vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Ethcore, Api::Traces, Api::Rpc] + .into_iter().collect() + }, + _ => { + vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Signer, Api::Ethcore, Api::EthcoreSet, Api::Traces, Api::Rpc] + .into_iter().collect() + }, + } } } pub fn setup_rpc(server: T, deps: Arc, apis: ApiSet) -> T { use ethcore_rpc::v1::*; - let apis = list_apis(apis); + // it's turned into vector, cause ont of the cases requires &[] + let apis = apis.list_apis().into_iter().collect::>(); for api in &apis { match *api { Api::Web3 => { @@ -140,8 +157,18 @@ pub fn setup_rpc(server: T, deps: Arc, apis: ApiSet server.add_delegate(NetClient::new(&deps.sync).to_delegate()); }, Api::Eth => { - server.add_delegate(EthClient::new(&deps.client, &deps.sync, &deps.secret_store, &deps.miner, &deps.external_miner, deps.allow_pending_receipt_query).to_delegate()); - server.add_delegate(EthFilterClient::new(&deps.client, &deps.miner).to_delegate()); + let client = EthClient::new( + &deps.client, + &deps.sync, + &deps.secret_store, + &deps.miner, + &deps.external_miner, + deps.allow_pending_receipt_query + ); + server.add_delegate(client.to_delegate()); + + let filter_client = EthFilterClient::new(&deps.client, &deps.miner); + server.add_delegate(filter_client.to_delegate()); if deps.signer_port.is_some() { server.add_delegate(EthSigningQueueClient::new(&deps.signer_queue, &deps.client, &deps.miner, &deps.secret_store).to_delegate()); @@ -173,3 +200,46 @@ pub fn setup_rpc(server: T, deps: Arc, apis: ApiSet } server } + +#[cfg(test)] +mod test { + use super::{Api, ApiSet}; + + #[test] + fn test_api_parsing() { + assert_eq!(Api::Web3, "web3".parse().unwrap()); + assert_eq!(Api::Net, "net".parse().unwrap()); + assert_eq!(Api::Eth, "eth".parse().unwrap()); + assert_eq!(Api::Personal, "personal".parse().unwrap()); + assert_eq!(Api::Signer, "signer".parse().unwrap()); + assert_eq!(Api::Ethcore, "ethcore".parse().unwrap()); + assert_eq!(Api::EthcoreSet, "ethcore_set".parse().unwrap()); + assert_eq!(Api::Traces, "traces".parse().unwrap()); + assert_eq!(Api::Rpc, "rpc".parse().unwrap()); + assert!("rp".parse::().is_err()); + } + + #[test] + fn test_api_set_default() { + assert_eq!(ApiSet::UnsafeContext, ApiSet::default()); + } + + #[test] + fn test_api_set_parsing() { + assert_eq!(ApiSet::List(vec![Api::Web3, Api::Eth].into_iter().collect()), "web3,eth".parse().unwrap()); + } + + #[test] + fn test_api_set_unsafe_context() { + let expected = vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Ethcore, Api::Traces, Api::Rpc] + .into_iter().collect(); + assert_eq!(ApiSet::UnsafeContext.list_apis(), expected); + } + + #[test] + fn test_api_set_safe_context() { + let expected = vec![Api::Web3, Api::Net, Api::Eth, Api::Personal, Api::Signer, Api::Ethcore, Api::EthcoreSet, Api::Traces, Api::Rpc] + .into_iter().collect(); + assert_eq!(ApiSet::SafeContext.list_apis(), expected); + } +} diff --git a/parity/run.rs b/parity/run.rs new file mode 100644 index 000000000..45b844ef0 --- /dev/null +++ b/parity/run.rs @@ -0,0 +1,342 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::{Arc, Mutex, Condvar}; +use std::path::Path; +use std::io::ErrorKind; +use ctrlc::CtrlC; +use fdlimit::raise_fd_limit; +use ethcore_logger::{Config as LogConfig, setup_log}; +use util::network_settings::NetworkSettings; +use util::{Colour, version, NetworkConfiguration, U256}; +use util::panics::{MayPanic, ForwardPanic, PanicHandler}; +use ethcore::client::{Mode, Switch, DatabaseCompactionProfile, VMType, ChainNotify}; +use ethcore::service::ClientService; +use ethcore::account_provider::AccountProvider; +use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions}; +use ethsync::SyncConfig; +use informant::Informant; +#[cfg(feature="ipc")] +use ethcore::client::ChainNotify; + +use rpc::{HttpServer, IpcServer, HttpConfiguration, IpcConfiguration}; +use signer::SignerServer; +use dapps::WebappServer; +use io_handler::ClientIoHandler; +use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras}; +use helpers::{to_client_config, execute_upgrades, passwords_from_files}; +use dir::Directories; +use cache::CacheConfig; +use dapps; +use signer; +use modules; +use rpc_apis; +use rpc; +use url; + +#[derive(Debug, PartialEq)] +pub struct RunCmd { + pub cache_config: CacheConfig, + pub dirs: Directories, + pub spec: SpecType, + pub pruning: Pruning, + /// Some if execution should be daemonized. Contains pid_file path. + pub daemon: Option, + pub logger_config: LogConfig, + pub miner_options: MinerOptions, + pub http_conf: HttpConfiguration, + pub ipc_conf: IpcConfiguration, + pub net_conf: NetworkConfiguration, + pub network_id: Option, + pub acc_conf: AccountsConfig, + pub gas_pricer: GasPricerConfig, + pub miner_extras: MinerExtras, + pub mode: Mode, + pub tracing: Switch, + pub compaction: DatabaseCompactionProfile, + pub wal: bool, + pub vm_type: VMType, + pub enable_network: bool, + pub geth_compatibility: bool, + pub signer_port: Option, + pub net_settings: NetworkSettings, + pub dapps_conf: dapps::Configuration, + pub signer_conf: signer::Configuration, + pub ui: bool, + pub name: String, + pub custom_bootnodes: bool, +} + +pub fn execute(cmd: RunCmd) -> Result<(), String> { + // create supervisor + let mut hypervisor = modules::hypervisor(); + + // increase max number of open files + raise_fd_limit(); + + // set up logger + let logger = try!(setup_log(&cmd.logger_config)); + + // set up panic handler + let panic_handler = PanicHandler::new_in_arc(); + + // create dirs used by parity + try!(cmd.dirs.create_dirs()); + + // load spec + let spec = try!(cmd.spec.spec()); + let fork_name = spec.fork_name.clone(); + + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); + + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, fork_name.as_ref()); + + // prepare client_path + let client_path = cmd.dirs.client_path(genesis_hash, fork_name.as_ref(), algorithm); + + // execute upgrades + try!(execute_upgrades(&cmd.dirs, genesis_hash, fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); + + // run in daemon mode + if let Some(pid_file) = cmd.daemon { + try!(daemonize(pid_file)); + } + + // display info about used pruning algorithm + info!("Starting {}", Colour::White.bold().paint(version())); + info!("Using state DB journalling strategy {}", Colour::White.bold().paint(algorithm.as_str())); + + // display warning about using experimental journaldb alorithm + if !algorithm.is_stable() { + warn!("Your chosen strategy is {}! You can re-run with --pruning to change.", Colour::Red.bold().paint("unstable")); + } + + // create sync config + let mut sync_config = SyncConfig::default(); + sync_config.network_id = match cmd.network_id { + Some(id) => id, + None => spec.network_id(), + }; + sync_config.fork_block = spec.fork_block().clone(); + + // prepare account provider + let account_provider = Arc::new(try!(prepare_account_provider(&cmd.dirs, cmd.acc_conf))); + + // create miner + let miner = Miner::new(cmd.miner_options, cmd.gas_pricer.into(), spec, Some(account_provider.clone())); + miner.set_author(cmd.miner_extras.author); + miner.set_gas_floor_target(cmd.miner_extras.gas_floor_target); + miner.set_gas_ceil_target(cmd.miner_extras.gas_ceil_target); + miner.set_extra_data(cmd.miner_extras.extra_data); + miner.set_transactions_limit(cmd.miner_extras.transactions_limit); + + // create client config + let client_config = to_client_config( + &cmd.cache_config, + &cmd.dirs, + genesis_hash, + cmd.mode, + cmd.tracing, + cmd.pruning, + cmd.compaction, + cmd.wal, + cmd.vm_type, + cmd.name, + fork_name.as_ref(), + ); + + // load spec + // TODO: make it clonable and load it only once! + let spec = try!(cmd.spec.spec()); + + // set up bootnodes + let mut net_conf = cmd.net_conf; + if !cmd.custom_bootnodes { + net_conf.boot_nodes = spec.nodes.clone(); + } + + // create client + let service = try!(ClientService::start( + client_config, + spec, + Path::new(&client_path), + miner.clone(), + ).map_err(|e| format!("Client service error: {:?}", e))); + + // forward panics from service + panic_handler.forward_from(&service); + + // take handle to client + let client = service.client(); + + // create external miner + let external_miner = Arc::new(ExternalMiner::default()); + + // create sync object + let (sync_provider, manage_network, chain_notify) = try!(modules::sync( + &mut hypervisor, sync_config, net_conf.into(), client.clone(), &cmd.logger_config, + ).map_err(|e| format!("Sync error: {}", e))); + + service.add_notify(chain_notify.clone()); + + // start network + if cmd.enable_network { + chain_notify.start(); + } + + // set up dependencies for rpc servers + let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies { + signer_port: cmd.signer_port, + signer_queue: Arc::new(rpc_apis::ConfirmationsQueue::default()), + client: client.clone(), + sync: sync_provider.clone(), + net: manage_network.clone(), + secret_store: account_provider.clone(), + miner: miner.clone(), + external_miner: external_miner.clone(), + logger: logger.clone(), + settings: Arc::new(cmd.net_settings.clone()), + allow_pending_receipt_query: !cmd.geth_compatibility, + net_service: manage_network.clone() + }); + + let dependencies = rpc::Dependencies { + panic_handler: panic_handler.clone(), + apis: deps_for_rpc_apis.clone(), + }; + + // start rpc servers + let http_server = try!(rpc::new_http(cmd.http_conf, &dependencies)); + let ipc_server = try!(rpc::new_ipc(cmd.ipc_conf, &dependencies)); + + let dapps_deps = dapps::Dependencies { + panic_handler: panic_handler.clone(), + apis: deps_for_rpc_apis.clone(), + }; + + // start dapps server + let dapps_server = try!(dapps::new(cmd.dapps_conf.clone(), dapps_deps)); + + let signer_deps = signer::Dependencies { + panic_handler: panic_handler.clone(), + apis: deps_for_rpc_apis.clone(), + }; + + // start signer server + let signer_server = try!(signer::start(cmd.signer_conf, signer_deps)); + + let informant = Arc::new(Informant::new(service.client(), Some(sync_provider.clone()), Some(manage_network.clone()), cmd.logger_config.color)); + let info_notify: Arc = informant.clone(); + service.add_notify(info_notify); + let io_handler = Arc::new(ClientIoHandler { + client: service.client(), + info: informant, + sync: sync_provider.clone(), + net: manage_network.clone(), + accounts: account_provider.clone(), + }); + service.register_io_handler(io_handler).expect("Error registering IO handler"); + + // start ui + if cmd.ui { + if !cmd.dapps_conf.enabled { + return Err("Cannot use UI command with Dapps turned off.".into()) + } + url::open(&format!("http://{}:{}/", cmd.dapps_conf.interface, cmd.dapps_conf.port)); + } + + // Handle exit + wait_for_exit(panic_handler, http_server, ipc_server, dapps_server, signer_server); + + Ok(()) +} + +#[cfg(not(windows))] +fn daemonize(pid_file: String) -> Result<(), String> { + extern crate daemonize; + + daemonize::Daemonize::new() + .pid_file(pid_file) + .chown_pid_file(true) + .start() + .map(|_| ()) + .map_err(|e| format!("Couldn't daemonize; {}", e)) +} + +#[cfg(windows)] +fn daemonize(_pid_file: String) -> Result<(), String> { + Err("daemon is no supported on windows".into()) +} + +fn prepare_account_provider(dirs: &Directories, cfg: AccountsConfig) -> Result { + use ethcore::ethstore::{import_accounts, EthStore}; + use ethcore::ethstore::dir::{GethDirectory, DirectoryType, DiskDirectory}; + use ethcore::ethstore::Error; + + let passwords = try!(passwords_from_files(cfg.password_files)); + + if cfg.import_keys { + let t = if cfg.testnet { + DirectoryType::Testnet + } else { + DirectoryType::Main + }; + + let from = GethDirectory::open(t); + let to = DiskDirectory::create(dirs.keys.clone()).unwrap(); + match import_accounts(&from, &to) { + Ok(_) => {} + Err(Error::Io(ref io_err)) if io_err.kind() == ErrorKind::NotFound => {} + Err(err) => warn!("Import geth accounts failed. {}", err) + } + } + + let dir = Box::new(DiskDirectory::create(dirs.keys.clone()).unwrap()); + let account_service = AccountProvider::new(Box::new(EthStore::open_with_iterations(dir, cfg.iterations).unwrap())); + + for a in cfg.unlocked_accounts { + if passwords.iter().find(|p| account_service.unlock_account_permanently(a, (*p).clone()).is_ok()).is_none() { + return Err(format!("No password given to unlock account {}. Pass the password using `--password`.", a)); + } + } + + Ok(account_service) +} + +fn wait_for_exit( + panic_handler: Arc, + _http_server: Option, + _ipc_server: Option, + _dapps_server: Option, + _signer_server: Option + ) { + let exit = Arc::new(Condvar::new()); + + // Handle possible exits + let e = exit.clone(); + CtrlC::set_handler(move || { e.notify_all(); }); + + // Handle panics + let e = exit.clone(); + panic_handler.on_panic(move |_reason| { e.notify_all(); }); + + // Wait for signal + let mutex = Mutex::new(()); + let _ = exit.wait(mutex.lock().unwrap()); + info!("Finishing work, please wait..."); +} diff --git a/parity/signer.rs b/parity/signer.rs index 4cf9b006d..d85e6e3e3 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -22,28 +22,38 @@ use util::panics::{ForwardPanic, PanicHandler}; use util::path::restrict_permissions_owner; use rpc_apis; use ethcore_signer as signer; -use die::*; - +use helpers::replace_home; pub use ethcore_signer::Server as SignerServer; const CODES_FILENAME: &'static str = "authcodes"; +#[derive(Debug, PartialEq)] pub struct Configuration { pub enabled: bool, pub port: u16, pub signer_path: String, } +impl Default for Configuration { + fn default() -> Self { + Configuration { + enabled: true, + port: 8180, + signer_path: replace_home("$HOME/.parity/signer"), + } + } +} + pub struct Dependencies { pub panic_handler: Arc, pub apis: Arc, } -pub fn start(conf: Configuration, deps: Dependencies) -> Option { +pub fn start(conf: Configuration, deps: Dependencies) -> Result, String> { if !conf.enabled { - None + Ok(None) } else { - Some(do_start(conf, deps)) + Ok(Some(try!(do_start(conf, deps)))) } } @@ -54,7 +64,13 @@ fn codes_path(path: String) -> PathBuf { p } -pub fn new_token(path: String) -> io::Result { +pub fn new_token(path: String) -> Result { + generate_new_token(path) + .map(|code| format!("This key code will authorise your System Signer UI: {}", Colour::White.bold().paint(code))) + .map_err(|err| format!("Error generating token: {:?}", err)) +} + +fn generate_new_token(path: String) -> io::Result { let path = codes_path(path); let mut codes = try!(signer::AuthCodes::from_file(&path)); let code = try!(codes.generate_new()); @@ -63,10 +79,10 @@ pub fn new_token(path: String) -> io::Result { Ok(code) } -fn do_start(conf: Configuration, deps: Dependencies) -> SignerServer { - let addr = format!("127.0.0.1:{}", conf.port).parse().unwrap_or_else(|_| { - die!("Invalid port specified: {}", conf.port) - }); +fn do_start(conf: Configuration, deps: Dependencies) -> Result { + let addr = try!(format!("127.0.0.1:{}", conf.port) + .parse() + .map_err(|_| format!("Invalid port specified: {}", conf.port))); let start_result = { let server = signer::ServerBuilder::new( @@ -78,11 +94,11 @@ fn do_start(conf: Configuration, deps: Dependencies) -> SignerServer { }; match start_result { - Err(signer::ServerError::IoError(err)) => die_with_io_error("Trusted Signer", err), - Err(e) => die!("Trusted Signer: {:?}", e), + Err(signer::ServerError::IoError(err)) => Err(format!("Trusted Signer Error: {}", err)), + Err(e) => Err(format!("Trusted Signer Error: {:?}", e)), Ok(server) => { deps.panic_handler.forward_from(&server); - server + Ok(server) }, } } diff --git a/parity/sync/main.rs b/parity/sync.rs similarity index 70% rename from parity/sync/main.rs rename to parity/sync.rs index 272248785..447f3678c 100644 --- a/parity/sync/main.rs +++ b/parity/sync.rs @@ -16,18 +16,9 @@ //! Parity sync service -extern crate ethcore_ipc_nano as nanoipc; -extern crate ethcore_ipc_hypervisor as hypervisor; -extern crate ethcore_ipc as ipc; -extern crate ctrlc; -#[macro_use] extern crate log; -extern crate ethsync; -extern crate rustc_serialize; -extern crate docopt; -extern crate ethcore; -extern crate ethcore_util as util; -extern crate ethcore_logger; - +use nanoipc; +use ipc; +use std; use std::sync::Arc; use hypervisor::{HypervisorServiceClient, SYNC_MODULE_ID, HYPERVISOR_IPC_URL}; use ctrlc::CtrlC; @@ -37,14 +28,13 @@ use ethcore::client::{RemoteClient, ChainNotify}; use ethsync::{SyncProvider, EthSync, ManageNetwork, ServiceConfiguration}; use std::thread; use nanoipc::IpcInterface; - -use ethcore_logger::Settings as LogSettings; -use ethcore_logger::setup_log; +use modules::service_urls; +use ethcore_logger::{Config as LogConfig, setup_log}; const USAGE: &'static str = " Ethcore sync service Usage: - sync [options] + parity sync [options] Options: -l --logging LOGGING Specify the logging level. Must conform to the same @@ -56,25 +46,18 @@ Usage: #[derive(Debug, RustcDecodable)] struct Args { - arg_client_url: String, flag_logging: Option, flag_log_file: Option, flag_no_color: bool, } impl Args { - pub fn log_settings(&self) -> LogSettings { - let mut settings = LogSettings::new(); - if self.flag_no_color || cfg!(windows) { - settings = settings.no_color(); + pub fn log_settings(&self) -> LogConfig { + LogConfig { + color: self.flag_no_color || cfg!(windows), + mode: self.flag_logging.clone(), + file: self.flag_log_file.clone(), } - if let Some(ref init) = self.flag_logging { - settings = settings.init(init.to_owned()) - } - if let Some(ref file) = self.flag_log_file { - settings = settings.file(file.to_owned()) - } - settings } } @@ -90,29 +73,29 @@ fn run_service(addr: &str, stop_guard: Arc(&buffer).expect("Failed deserializing initialisation payload"); - let remote_client = nanoipc::init_client::>(&args.arg_client_url).unwrap(); + let remote_client = nanoipc::init_client::>(service_urls::CLIENT).unwrap(); remote_client.handshake().unwrap(); let stop = Arc::new(AtomicBool::new(false)); let sync = EthSync::new(service_config.sync, remote_client.service().clone(), service_config.net).unwrap(); - run_service("ipc:///tmp/parity-sync.ipc", stop.clone(), sync.clone() as Arc); - run_service("ipc:///tmp/parity-manage-net.ipc", stop.clone(), sync.clone() as Arc); - run_service("ipc:///tmp/parity-sync-notify.ipc", stop.clone(), sync.clone() as Arc); + run_service(service_urls::SYNC, stop.clone(), sync.clone() as Arc); + run_service(service_urls::NETWORK_MANAGER, stop.clone(), sync.clone() as Arc); + run_service(service_urls::SYNC_NOTIFY, stop.clone(), sync.clone() as Arc); let hypervisor_client = nanoipc::init_client::>(HYPERVISOR_IPC_URL).unwrap(); hypervisor_client.handshake().unwrap(); diff --git a/rpc/src/v1/impls/traces.rs b/rpc/src/v1/impls/traces.rs index 71176df27..9ea045b0f 100644 --- a/rpc/src/v1/impls/traces.rs +++ b/rpc/src/v1/impls/traces.rs @@ -18,14 +18,21 @@ use std::sync::{Weak, Arc}; use jsonrpc_core::*; -use std::collections::BTreeMap; -//use util::H256; +use util::rlp::{UntrustedRlp, View}; use ethcore::client::{BlockChainClient, CallAnalytics, TransactionID, TraceId}; use ethcore::miner::MinerService; use ethcore::transaction::{Transaction as EthTransaction, SignedTransaction, Action}; use v1::traits::Traces; use v1::helpers::CallRequest as CRequest; -use v1::types::{TraceFilter, LocalizedTrace, Trace, BlockNumber, Index, CallRequest, Bytes, StateDiff, VMTrace, H256}; +use v1::types::{TraceFilter, LocalizedTrace, BlockNumber, Index, CallRequest, Bytes, TraceResults, H256}; + +fn to_call_analytics(flags: Vec) -> CallAnalytics { + CallAnalytics { + transaction_tracing: flags.contains(&("trace".to_owned())), + vm_tracing: flags.contains(&("vmTrace".to_owned())), + state_diffing: flags.contains(&("stateDiff".to_owned())), + } +} /// Traces api implementation. pub struct TracesClient where C: BlockChainClient, M: MinerService { @@ -115,33 +122,40 @@ impl Traces for TracesClient where C: BlockChainClient + 'static, M: fn call(&self, params: Params) -> Result { try!(self.active()); - trace!(target: "jsonrpc", "call: {:?}", params); from_params(params) .and_then(|(request, flags)| { let request = CallRequest::into(request); - let flags: Vec = flags; - let analytics = CallAnalytics { - transaction_tracing: flags.contains(&("trace".to_owned())), - vm_tracing: flags.contains(&("vmTrace".to_owned())), - state_diffing: flags.contains(&("stateDiff".to_owned())), - }; let signed = try!(self.sign_call(request)); - let r = take_weak!(self.client).call(&signed, analytics); - if let Ok(executed) = r { - // TODO maybe add other stuff to this? - let mut ret = map!["output".to_owned() => to_value(&Bytes(executed.output)).unwrap()]; - if let Some(trace) = executed.trace { - ret.insert("trace".to_owned(), to_value(&Trace::from(trace)).unwrap()); - } - if let Some(vm_trace) = executed.vm_trace { - ret.insert("vmTrace".to_owned(), to_value(&VMTrace::from(vm_trace)).unwrap()); - } - if let Some(state_diff) = executed.state_diff { - ret.insert("stateDiff".to_owned(), to_value(&StateDiff::from(state_diff)).unwrap()); - } - return Ok(Value::Object(ret)) + match take_weak!(self.client).call(&signed, to_call_analytics(flags)) { + Ok(e) => to_value(&TraceResults::from(e)), + _ => Ok(Value::Null), + } + }) + } + + fn raw_transaction(&self, params: Params) -> Result { + try!(self.active()); + from_params::<(Bytes, _)>(params) + .and_then(|(raw_transaction, flags)| { + let raw_transaction = raw_transaction.to_vec(); + match UntrustedRlp::new(&raw_transaction).as_val() { + Ok(signed) => match take_weak!(self.client).call(&signed, to_call_analytics(flags)) { + Ok(e) => to_value(&TraceResults::from(e)), + _ => Ok(Value::Null), + }, + Err(_) => Err(Error::invalid_params()), + } + }) + } + + fn replay_transaction(&self, params: Params) -> Result { + try!(self.active()); + from_params::<(H256, _)>(params) + .and_then(|(transaction_hash, flags)| { + match take_weak!(self.client).replay(TransactionID::Hash(transaction_hash.into()), to_call_analytics(flags)) { + Ok(e) => to_value(&TraceResults::from(e)), + _ => Ok(Value::Null), } - Ok(Value::Null) }) } } diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index b27bff574..b04e48133 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -367,7 +367,7 @@ fn rpc_eth_pending_transaction_by_hash() { tester.miner.pending_transactions.lock().insert(H256::zero(), tx); } - let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"creates":null,"from":"0x0f65fe9276bc9a24ae7083ae28e2660ef72df99e","gas":"0x5208","gasPrice":"0x01","hash":"0x41df922fd0d4766fcc02e161f8295ec28522f329ae487f14d811e4b64c8d6e31","input":"0x","nonce":"0x00","to":"0x095e7baea6a6c7c4c2dfeb977efac326af552d87","transactionIndex":null,"value":"0x0a"},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"creates":null,"from":"0x0f65fe9276bc9a24ae7083ae28e2660ef72df99e","gas":"0x5208","gasPrice":"0x01","hash":"0x41df922fd0d4766fcc02e161f8295ec28522f329ae487f14d811e4b64c8d6e31","input":"0x","nonce":"0x00","raw":"0xf85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804","to":"0x095e7baea6a6c7c4c2dfeb977efac326af552d87","transactionIndex":null,"value":"0x0a"},"id":1}"#; let request = r#"{ "jsonrpc": "2.0", "method": "eth_getTransactionByHash", @@ -431,7 +431,7 @@ fn rpc_eth_call() { logs: vec![], contracts_created: vec![], output: vec![0x12, 0x34, 0xff], - trace: None, + trace: vec![], vm_trace: None, state_diff: None, }); @@ -466,7 +466,7 @@ fn rpc_eth_call_default_block() { logs: vec![], contracts_created: vec![], output: vec![0x12, 0x34, 0xff], - trace: None, + trace: vec![], vm_trace: None, state_diff: None, }); @@ -500,7 +500,7 @@ fn rpc_eth_estimate_gas() { logs: vec![], contracts_created: vec![], output: vec![0x12, 0x34, 0xff], - trace: None, + trace: vec![], vm_trace: None, state_diff: None, }); @@ -535,7 +535,7 @@ fn rpc_eth_estimate_gas_default_block() { logs: vec![], contracts_created: vec![], output: vec![0x12, 0x34, 0xff], - trace: None, + trace: vec![], vm_trace: None, state_diff: None, }); diff --git a/rpc/src/v1/tests/mocked/ethcore.rs b/rpc/src/v1/tests/mocked/ethcore.rs index 5b88e8756..73875d131 100644 --- a/rpc/src/v1/tests/mocked/ethcore.rs +++ b/rpc/src/v1/tests/mocked/ethcore.rs @@ -39,6 +39,7 @@ fn settings() -> Arc { Arc::new(NetworkSettings { name: "mynode".to_owned(), chain: "testchain".to_owned(), + min_peers: 25, max_peers: 25, network_port: 30303, rpc_enabled: true, diff --git a/rpc/src/v1/traits/traces.rs b/rpc/src/v1/traits/traces.rs index 45fa916be..64d16c5b4 100644 --- a/rpc/src/v1/traits/traces.rs +++ b/rpc/src/v1/traits/traces.rs @@ -35,6 +35,12 @@ pub trait Traces: Sized + Send + Sync + 'static { /// Executes the given call and returns a number of possible traces for it. fn call(&self, _: Params) -> Result; + /// Executes the given raw transaction and returns a number of possible traces for it. + fn raw_transaction(&self, _: Params) -> Result; + + /// Executes the transaction with the given hash and returns a number of possible traces for it. + fn replay_transaction(&self, _: Params) -> Result; + /// Should be used to convert object to io delegate. fn to_delegate(self) -> IoDelegate { let mut delegate = IoDelegate::new(Arc::new(self)); @@ -43,6 +49,8 @@ pub trait Traces: Sized + Send + Sync + 'static { delegate.add_method("trace_transaction", Traces::transaction_traces); delegate.add_method("trace_block", Traces::block_traces); delegate.add_method("trace_call", Traces::call); + delegate.add_method("trace_rawTransaction", Traces::raw_transaction); + delegate.add_method("trace_replayTransaction", Traces::replay_transaction); delegate } diff --git a/rpc/src/v1/types/block.rs b/rpc/src/v1/types/block.rs index c1de9a276..08fe37c61 100644 --- a/rpc/src/v1/types/block.rs +++ b/rpc/src/v1/types/block.rs @@ -103,7 +103,7 @@ mod tests { fn test_serialize_block_transactions() { let t = BlockTransactions::Full(vec![Transaction::default()]); let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"[{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x00","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x00","gasPrice":"0x00","gas":"0x00","input":"0x","creates":null}]"#); + assert_eq!(serialized, r#"[{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x00","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x00","gasPrice":"0x00","gas":"0x00","input":"0x","creates":null,"raw":"0x"}]"#); let t = BlockTransactions::Hashes(vec![H256::default().into()]); let serialized = serde_json::to_string(&t).unwrap(); diff --git a/rpc/src/v1/types/mod.rs.in b/rpc/src/v1/types/mod.rs.in index ecbe6d9a0..75f78906b 100644 --- a/rpc/src/v1/types/mod.rs.in +++ b/rpc/src/v1/types/mod.rs.in @@ -42,6 +42,6 @@ pub use self::transaction::Transaction; pub use self::transaction_request::{TransactionRequest, TransactionConfirmation, TransactionModification}; pub use self::call_request::CallRequest; pub use self::receipt::Receipt; -pub use self::trace::{Trace, LocalizedTrace, StateDiff, VMTrace}; +pub use self::trace::{LocalizedTrace, TraceResults}; pub use self::trace_filter::TraceFilter; pub use self::uint::U256; diff --git a/rpc/src/v1/types/trace.rs b/rpc/src/v1/types/trace.rs index 187a9a9b0..b48fb88bf 100644 --- a/rpc/src/v1/types/trace.rs +++ b/rpc/src/v1/types/trace.rs @@ -17,10 +17,12 @@ use std::collections::BTreeMap; use serde::{Serialize, Serializer}; use ethcore::trace::trace; -use ethcore::trace::{Trace as EthTrace, LocalizedTrace as EthLocalizedTrace}; +use ethcore::trace::{FlatTrace, LocalizedTrace as EthLocalizedTrace}; use ethcore::trace as et; use ethcore::state_diff; use ethcore::account_diff; +use ethcore::executed; +use ethcore::client::Executed; use util::Uint; use v1::types::{Bytes, H160, H256, U256}; @@ -193,6 +195,7 @@ impl From for AccountDiff { } } +#[derive(Debug)] /// Serde-friendly `StateDiff` shadow. pub struct StateDiff(BTreeMap); @@ -233,6 +236,34 @@ impl From for Create { } } +/// Call type. +#[derive(Debug, Serialize)] +pub enum CallType { + /// None + #[serde(rename="none")] + None, + /// Call + #[serde(rename="call")] + Call, + /// Call code + #[serde(rename="callcode")] + CallCode, + /// Delegate call + #[serde(rename="delegatecall")] + DelegateCall, +} + +impl From for CallType { + fn from(c: executed::CallType) -> Self { + match c { + executed::CallType::None => CallType::None, + executed::CallType::Call => CallType::Call, + executed::CallType::CallCode => CallType::CallCode, + executed::CallType::DelegateCall => CallType::DelegateCall, + } + } +} + /// Call response #[derive(Debug, Serialize)] pub struct Call { @@ -246,6 +277,9 @@ pub struct Call { gas: U256, /// Input data input: Bytes, + /// The type of the call. + #[serde(rename="callType")] + call_type: CallType, } impl From for Call { @@ -256,6 +290,7 @@ impl From for Call { value: c.value.into(), gas: c.gas.into(), input: c.input.into(), + call_type: c.call_type.into(), } } } @@ -423,23 +458,50 @@ impl From for LocalizedTrace { /// Trace #[derive(Debug, Serialize)] pub struct Trace { - /// Depth within the call trace tree. - depth: usize, + /// Trace address + #[serde(rename="traceAddress")] + trace_address: Vec, + /// Subtraces + subtraces: U256, /// Action action: Action, /// Result result: Res, - /// Subtraces - subtraces: Vec, } -impl From for Trace { - fn from(t: EthTrace) -> Self { +impl From for Trace { + fn from(t: FlatTrace) -> Self { Trace { - depth: t.depth.into(), + trace_address: t.trace_address.into_iter().map(Into::into).collect(), + subtraces: t.subtraces.into(), action: t.action.into(), result: t.result.into(), - subtraces: t.subs.into_iter().map(Into::into).collect(), + } + } +} + +#[derive(Debug, Serialize)] +/// A diff of some chunk of memory. +pub struct TraceResults { + /// The output of the call/create + pub output: Vec, + /// The transaction trace. + pub trace: Vec, + /// The transaction trace. + #[serde(rename="vmTrace")] + pub vm_trace: Option, + /// The transaction trace. + #[serde(rename="stateDiff")] + pub state_diff: Option, +} + +impl From for TraceResults { + fn from(t: Executed) -> Self { + TraceResults { + output: t.output.into(), + trace: t.trace.into_iter().map(Into::into).collect(), + vm_trace: t.vm_trace.map(Into::into), + state_diff: t.state_diff.map(Into::into), } } } @@ -451,6 +513,18 @@ mod tests { use v1::types::{Bytes, U256, H256, H160}; use super::*; + #[test] + fn should_serialize_trace_results() { + let r = TraceResults { + output: vec![0x60], + trace: vec![], + vm_trace: None, + state_diff: None, + }; + let serialized = serde_json::to_string(&r).unwrap(); + assert_eq!(serialized, r#"{"output":[96],"trace":[],"vmTrace":null,"stateDiff":null}"#); + } + #[test] fn test_trace_serialize() { let t = LocalizedTrace { @@ -460,6 +534,7 @@ mod tests { value: U256::from(6), gas: U256::from(7), input: Bytes::new(vec![0x12, 0x34]), + call_type: CallType::Call, }), result: Res::Call(CallResult { gas_used: U256::from(8), @@ -473,7 +548,7 @@ mod tests { block_hash: H256::from(14), }; let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"action":{"call":{"from":"0x0000000000000000000000000000000000000004","to":"0x0000000000000000000000000000000000000005","value":"0x06","gas":"0x07","input":"0x1234"}},"result":{"call":{"gasUsed":"0x08","output":"0x5678"}},"traceAddress":["0x0a"],"subtraces":"0x01","transactionPosition":"0x0b","transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":"0x0d","blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); + assert_eq!(serialized, r#"{"action":{"call":{"from":"0x0000000000000000000000000000000000000004","to":"0x0000000000000000000000000000000000000005","value":"0x06","gas":"0x07","input":"0x1234","callType":{"call":[]}}},"result":{"call":{"gasUsed":"0x08","output":"0x5678"}},"traceAddress":["0x0a"],"subtraces":"0x01","transactionPosition":"0x0b","transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":"0x0d","blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); } #[test] @@ -549,6 +624,7 @@ mod tests { value: U256::from(3), gas: U256::from(4), input: vec![0x12, 0x34].into(), + call_type: CallType::Call, }), Action::Create(Create { from: H160::from(5), value: U256::from(6), @@ -557,7 +633,7 @@ mod tests { })]; let serialized = serde_json::to_string(&actions).unwrap(); - assert_eq!(serialized, r#"[{"call":{"from":"0x0000000000000000000000000000000000000001","to":"0x0000000000000000000000000000000000000002","value":"0x03","gas":"0x04","input":"0x1234"}},{"create":{"from":"0x0000000000000000000000000000000000000005","value":"0x06","gas":"0x07","init":"0x5678"}}]"#); + assert_eq!(serialized, r#"[{"call":{"from":"0x0000000000000000000000000000000000000001","to":"0x0000000000000000000000000000000000000002","value":"0x03","gas":"0x04","input":"0x1234","callType":{"call":[]}}},{"create":{"from":"0x0000000000000000000000000000000000000005","value":"0x06","gas":"0x07","init":"0x5678"}}]"#); } #[test] diff --git a/rpc/src/v1/types/transaction.rs b/rpc/src/v1/types/transaction.rs index cb554b172..812c006e4 100644 --- a/rpc/src/v1/types/transaction.rs +++ b/rpc/src/v1/types/transaction.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use util::rlp::encode; use ethcore::contract_address; use ethcore::transaction::{LocalizedTransaction, Action, SignedTransaction}; use v1::types::{Bytes, H160, H256, U256}; @@ -49,6 +50,8 @@ pub struct Transaction { pub input: Bytes, /// Creates contract pub creates: Option, + /// Raw transaction data + pub raw: Bytes, } impl From for Transaction { @@ -72,6 +75,7 @@ impl From for Transaction { Action::Create => Some(contract_address(&t.sender().unwrap(), &t.nonce).into()), Action::Call(_) => None, }, + raw: encode(&t.signed).to_vec().into(), } } } @@ -97,6 +101,7 @@ impl From for Transaction { Action::Create => Some(contract_address(&t.sender().unwrap(), &t.nonce).into()), Action::Call(_) => None, }, + raw: encode(&t).to_vec().into(), } } } @@ -110,7 +115,7 @@ mod tests { fn test_transaction_serialize() { let t = Transaction::default(); let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x00","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x00","gasPrice":"0x00","gas":"0x00","input":"0x","creates":null}"#); + assert_eq!(serialized, r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x00","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x00","gasPrice":"0x00","gas":"0x00","input":"0x","creates":null,"raw":"0x"}"#); } } diff --git a/scripts/cov.sh b/scripts/cov.sh index 49ef97c09..036cf9c80 100755 --- a/scripts/cov.sh +++ b/scripts/cov.sh @@ -37,7 +37,9 @@ src/tests,\ util/json-tests,\ util/src/network/tests,\ ethcore/src/evm/tests,\ -ethstore/tests\ +ethstore/tests,\ +target/debug/build,\ +target/release/build\ " rm -rf $KCOV_TARGET diff --git a/scripts/targets.sh b/scripts/targets.sh index ee743f840..331353c3f 100644 --- a/scripts/targets.sh +++ b/scripts/targets.sh @@ -11,4 +11,5 @@ export TARGETS=" -p ethkey \ -p ethstore \ -p ethsync \ + -p ethcore-ipc \ -p parity" diff --git a/sync/src/api.rs b/sync/src/api.rs index 99429c232..5c0e028e3 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -20,6 +20,7 @@ use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, Peer NetworkConfiguration as BasicNetworkConfiguration, NonReservedPeerMode}; use util::{TimerToken, U256, H256, UtilError, Secret, Populatable}; use ethcore::client::{BlockChainClient, ChainNotify}; +use ethcore::header::BlockNumber; use io::NetSyncIo; use chain::{ChainSync, SyncStatus}; use std::net::{SocketAddr, AddrParseError}; @@ -38,6 +39,8 @@ pub struct SyncConfig { pub max_download_ahead_blocks: usize, /// Network ID pub network_id: U256, + /// Fork block to check + pub fork_block: Option<(BlockNumber, H256)>, } impl Default for SyncConfig { @@ -45,6 +48,7 @@ impl Default for SyncConfig { SyncConfig { max_download_ahead_blocks: 20000, network_id: U256::from(1), + fork_block: None, } } } @@ -228,8 +232,10 @@ pub struct NetworkConfiguration { pub boot_nodes: Vec, /// Use provided node key instead of default pub use_secret: Option, - /// Number of connected peers to maintain - pub ideal_peers: u32, + /// Max number of connected peers to maintain + pub max_peers: u32, + /// Min number of connected peers to maintain + pub min_peers: u32, /// List of reserved node addresses. pub reserved_nodes: Vec, /// The non-reserved peer mode. @@ -249,7 +255,8 @@ impl NetworkConfiguration { discovery_enabled: self.discovery_enabled, boot_nodes: self.boot_nodes, use_secret: self.use_secret, - ideal_peers: self.ideal_peers, + max_peers: self.max_peers, + min_peers: self.min_peers, reserved_nodes: self.reserved_nodes, non_reserved_mode: if self.allow_non_reserved { NonReservedPeerMode::Accept } else { NonReservedPeerMode::Deny }, }) @@ -267,7 +274,8 @@ impl From for NetworkConfiguration { discovery_enabled: other.discovery_enabled, boot_nodes: other.boot_nodes, use_secret: other.use_secret, - ideal_peers: other.ideal_peers, + max_peers: other.max_peers, + min_peers: other.min_peers, reserved_nodes: other.reserved_nodes, allow_non_reserved: match other.non_reserved_mode { NonReservedPeerMode::Accept => true, _ => false } , } diff --git a/sync/src/blocks.rs b/sync/src/blocks.rs index b48085d43..e691f8b76 100644 --- a/sync/src/blocks.rs +++ b/sync/src/blocks.rs @@ -120,13 +120,22 @@ impl BlockCollection { if let Some(head) = head { match self.blocks.get(&head) { Some(block) if block.body.is_none() && !self.downloading_bodies.contains(&head) => { + self.downloading_bodies.insert(head.clone()); needed_bodies.push(head.clone()); } _ => (), } } } - self.downloading_bodies.extend(needed_bodies.iter()); + for h in self.header_ids.values() { + if needed_bodies.len() >= count { + break; + } + if !self.downloading_bodies.contains(h) { + needed_bodies.push(h.clone()); + self.downloading_bodies.insert(h.clone()); + } + } needed_bodies } @@ -286,6 +295,7 @@ impl BlockCollection { self.parents.insert(info.parent_hash.clone(), hash.clone()); self.blocks.insert(hash.clone(), block); + trace!(target: "sync", "New header: {}", hash.hex()); Ok(hash) } diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 8a2e75383..1eb1bf2a4 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -111,11 +111,11 @@ const MAX_NODE_DATA_TO_SEND: usize = 1024; const MAX_RECEIPTS_TO_SEND: usize = 1024; const MAX_RECEIPTS_HEADERS_TO_SEND: usize = 256; const MAX_HEADERS_TO_REQUEST: usize = 128; -const MAX_BODIES_TO_REQUEST: usize = 64; +const MAX_BODIES_TO_REQUEST: usize = 128; const MIN_PEERS_PROPAGATION: usize = 4; const MAX_PEERS_PROPAGATION: usize = 128; const MAX_PEER_LAG_PROPAGATION: BlockNumber = 20; -const SUBCHAIN_SIZE: usize = 64; +const SUBCHAIN_SIZE: usize = 256; const MAX_ROUND_PARENTS: usize = 32; const MAX_NEW_HASHES: usize = 64; const MAX_TX_TO_IMPORT: usize = 512; @@ -137,6 +137,7 @@ const RECEIPTS_PACKET: u8 = 0x10; const HEADERS_TIMEOUT_SEC: f64 = 15f64; const BODIES_TIMEOUT_SEC: f64 = 5f64; +const FORK_HEADER_TIMEOUT_SEC: f64 = 3f64; #[derive(Copy, Clone, Eq, PartialEq, Debug)] /// Sync state @@ -191,6 +192,7 @@ impl SyncStatus { /// Peer data type requested enum PeerAsking { Nothing, + ForkHeader, BlockHeaders, BlockBodies, Heads, @@ -221,6 +223,14 @@ struct PeerInfo { ask_time: f64, /// Pending request is expird and result should be ignored expired: bool, + /// Peer fork confirmed + confirmed: bool, +} + +impl PeerInfo { + fn is_available(&self) -> bool { + self.confirmed && !self.expired + } } /// Blockchain sync handler. @@ -254,6 +264,8 @@ pub struct ChainSync { round_parents: VecDeque<(H256, H256)>, /// Network ID network_id: U256, + /// Optional fork block to check + fork_block: Option<(BlockNumber, H256)>, } type RlpResponseResult = Result, PacketDecodeError>; @@ -277,6 +289,7 @@ impl ChainSync { round_parents: VecDeque::new(), _max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks), network_id: config.network_id, + fork_block: config.fork_block, }; sync.reset(); sync @@ -293,8 +306,8 @@ impl ChainSync { highest_block_number: self.highest_block.map(|n| max(n, self.last_imported_block)), blocks_received: if self.last_imported_block > self.starting_block { self.last_imported_block - self.starting_block } else { 0 }, blocks_total: match self.highest_block { Some(x) if x > self.starting_block => x - self.starting_block, _ => 0 }, - num_peers: self.peers.len(), - num_active_peers: self.peers.values().filter(|p| p.asking != PeerAsking::Nothing).count(), + num_peers: self.peers.values().filter(|p| p.confirmed).count(), + num_active_peers: self.peers.values().filter(|p| p.confirmed && p.asking != PeerAsking::Nothing).count(), mem_used: self.blocks.heap_size() + self.peers.heap_size_of_children() @@ -316,7 +329,7 @@ impl ChainSync { p.asking_blocks.clear(); p.asking_hash = None; // mark any pending requests as expired - if p.asking != PeerAsking::Nothing { + if p.asking != PeerAsking::Nothing && p.confirmed { p.expired = true; } } @@ -370,6 +383,7 @@ impl ChainSync { asking_hash: None, ask_time: 0f64, expired: false, + confirmed: self.fork_block.is_none(), }; trace!(target: "sync", "New peer {} (protocol: {}, network: {:?}, difficulty: {:?}, latest:{}, genesis:{})", peer_id, peer.protocol_version, peer.network_id, peer.difficulty, peer.latest_hash, peer.genesis); @@ -397,18 +411,43 @@ impl ChainSync { self.peers.insert(peer_id.clone(), peer); self.active_peers.insert(peer_id.clone()); debug!(target: "sync", "Connected {}:{}", peer_id, io.peer_info(peer_id)); - self.sync_peer(io, peer_id, false); + if let Some((fork_block, _)) = self.fork_block { + self.request_headers_by_number(io, peer_id, fork_block, 1, 0, false, PeerAsking::ForkHeader); + } else { + self.sync_peer(io, peer_id, false); + } Ok(()) } #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] /// Called by peer once it has new block headers during sync fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + let confirmed = match self.peers.get_mut(&peer_id) { + Some(ref mut peer) if peer.asking == PeerAsking::ForkHeader => { + let item_count = r.item_count(); + if item_count == 0 || (item_count == 1 && try!(r.at(0)).as_raw().sha3() == self.fork_block.unwrap().1) { + trace!(target: "sync", "{}: Confirmed peer", peer_id); + peer.asking = PeerAsking::Nothing; + peer.confirmed = true; + true + } else { + trace!(target: "sync", "{}: Fork mismatch", peer_id); + io.disconnect_peer(peer_id); + false + } + }, + _ => false, + }; + if confirmed { + self.sync_peer(io, peer_id, false); + return Ok(()); + } + self.clear_peer_download(peer_id); - let expected_hash = self.peers.get(&peer_id).and_then(|p| p.asking_hash); let expected_asking = if self.state == SyncState::ChainHead { PeerAsking::Heads } else { PeerAsking::BlockHeaders }; + let expected_hash = self.peers.get(&peer_id).and_then(|p| p.asking_hash); if !self.reset_peer_asking(peer_id, expected_asking) || expected_hash.is_none() { - trace!(target: "sync", "Ignored unexpected headers"); + trace!(target: "sync", "{}: Ignored unexpected headers", peer_id); self.continue_sync(io); return Ok(()); } @@ -474,14 +513,14 @@ impl ChainSync { // Disable the peer for this syncing round if it gives invalid chain if !valid_response { - trace!(target: "sync", "{} Deactivated for invalid headers response", peer_id); - self.deactivate_peer(io, peer_id); + trace!(target: "sync", "{} Disabled for invalid headers response", peer_id); + io.disable_peer(peer_id); } if headers.is_empty() { // Peer does not have any new subchain heads, deactivate it nd try with another - trace!(target: "sync", "{} Deactivated for no data", peer_id); - self.deactivate_peer(io, peer_id); + trace!(target: "sync", "{} Disabled for no data", peer_id); + io.disable_peer(peer_id); } match self.state { SyncState::ChainHead => { @@ -692,7 +731,8 @@ impl ChainSync { /// Resume downloading fn continue_sync(&mut self, io: &mut SyncIo) { - let mut peers: Vec<(PeerId, U256)> = self.peers.iter().map(|(k, p)| (*k, p.difficulty.unwrap_or_else(U256::zero))).collect(); + let mut peers: Vec<(PeerId, U256)> = self.peers.iter().filter_map(|(k, p)| + if p.is_available() { Some((*k, p.difficulty.unwrap_or_else(U256::zero))) } else { None }).collect(); thread_rng().shuffle(&mut peers); //TODO: sort by rating trace!(target: "sync", "Syncing with {}/{} peers", self.active_peers.len(), peers.len()); for (p, _) in peers { @@ -700,7 +740,7 @@ impl ChainSync { self.sync_peer(io, p, false); } } - if self.state != SyncState::Waiting && !self.peers.values().any(|p| p.asking != PeerAsking::Nothing && !p.expired) { + if self.state != SyncState::Waiting && !self.peers.values().any(|p| p.asking != PeerAsking::Nothing && p.is_available()) { self.complete_sync(); } } @@ -726,7 +766,7 @@ impl ChainSync { } let (peer_latest, peer_difficulty) = { let peer = self.peers.get_mut(&peer_id).unwrap(); - if peer.asking != PeerAsking::Nothing { + if peer.asking != PeerAsking::Nothing || !peer.is_available() { return; } if self.state == SyncState::Waiting { @@ -754,7 +794,9 @@ impl ChainSync { // Request subchain headers trace!(target: "sync", "Starting sync with better chain"); let last = self.last_imported_hash.clone(); - self.request_headers_by_hash(io, peer_id, &last, SUBCHAIN_SIZE, MAX_HEADERS_TO_REQUEST - 1, false, PeerAsking::Heads); + // Request MAX_HEADERS_TO_REQUEST - 2 headers apart so that + // MAX_HEADERS_TO_REQUEST would include headers for neighbouring subchains + self.request_headers_by_hash(io, peer_id, &last, SUBCHAIN_SIZE, MAX_HEADERS_TO_REQUEST - 2, false, PeerAsking::Heads); }, SyncState::Blocks | SyncState::NewBlocks => { if io.chain().block_status(BlockID::Hash(peer_latest)) == BlockStatus::Unknown { @@ -922,6 +964,17 @@ impl ChainSync { .asking_hash = Some(h.clone()); } + /// Request headers from a peer by block number + #[cfg_attr(feature="dev", allow(too_many_arguments))] + fn request_headers_by_number(&mut self, sync: &mut SyncIo, peer_id: PeerId, n: BlockNumber, count: usize, skip: usize, reverse: bool, asking: PeerAsking) { + trace!(target: "sync", "{} <- GetBlockHeaders: {} entries starting from {}", peer_id, count, n); + let mut rlp = RlpStream::new_list(4); + rlp.append(&n); + rlp.append(&count); + rlp.append(&skip); + rlp.append(&if reverse {1u32} else {0u32}); + self.send_request(sync, peer_id, asking, GET_BLOCK_HEADERS_PACKET, rlp.out()); + } /// Request block bodies from a peer fn request_bodies(&mut self, sync: &mut SyncIo, peer_id: PeerId, hashes: Vec) { let mut rlp = RlpStream::new_list(hashes.len()); @@ -975,6 +1028,9 @@ impl ChainSync { if !io.is_chain_queue_empty() { return Ok(()); } + if self.peers.get(&peer_id).map_or(false, |p| p.confirmed) { + trace!(target: "sync", "{} Ignoring transactions from unconfirmed/unknown peer", peer_id); + } let mut item_count = r.item_count(); trace!(target: "sync", "{} -> Transactions ({} entries)", peer_id, item_count); @@ -1210,6 +1266,7 @@ impl ChainSync { PeerAsking::BlockHeaders | PeerAsking::Heads => (tick - peer.ask_time) > HEADERS_TIMEOUT_SEC, PeerAsking::BlockBodies => (tick - peer.ask_time) > BODIES_TIMEOUT_SEC, PeerAsking::Nothing => false, + PeerAsking::ForkHeader => (tick - peer.ask_time) > FORK_HEADER_TIMEOUT_SEC, }; if timeout { trace!(target:"sync", "Timeout {}", peer_id); @@ -1627,6 +1684,7 @@ mod tests { asking_hash: None, ask_time: 0f64, expired: false, + confirmed: false, }); sync } diff --git a/sync/src/tests/chain.rs b/sync/src/tests/chain.rs index e15d804e2..2a84b0f99 100644 --- a/sync/src/tests/chain.rs +++ b/sync/src/tests/chain.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . use util::*; -use ethcore::client::{BlockChainClient, BlockID, EachBlockWith}; +use ethcore::client::{TestBlockChainClient, BlockChainClient, BlockID, EachBlockWith}; use chain::{SyncState}; use super::helpers::*; @@ -95,6 +95,25 @@ fn forked() { assert_eq!(net.peer(2).chain.numbers.read().deref(), &peer1_chain); } +#[test] +fn net_hard_fork() { + ::env_logger::init().ok(); + let ref_client = TestBlockChainClient::new(); + ref_client.add_blocks(50, EachBlockWith::Uncle); + { + let mut net = TestNet::new_with_fork(2, Some((50, ref_client.block_hash(BlockID::Number(50)).unwrap()))); + net.peer_mut(0).chain.add_blocks(100, EachBlockWith::Uncle); + net.sync(); + assert_eq!(net.peer(1).chain.chain_info().best_block_number, 100); + } + { + let mut net = TestNet::new_with_fork(2, Some((50, ref_client.block_hash(BlockID::Number(50)).unwrap()))); + net.peer_mut(0).chain.add_blocks(100, EachBlockWith::Nothing); + net.sync(); + assert_eq!(net.peer(1).chain.chain_info().best_block_number, 0); + } +} + #[test] fn restart() { let mut net = TestNet::new(3); diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index d5fda2e70..8e4511e54 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -16,6 +16,7 @@ use util::*; use ethcore::client::{TestBlockChainClient, BlockChainClient}; +use ethcore::header::BlockNumber; use io::SyncIo; use chain::ChainSync; use ::SyncConfig; @@ -89,13 +90,19 @@ pub struct TestNet { impl TestNet { pub fn new(n: usize) -> TestNet { + Self::new_with_fork(n, None) + } + + pub fn new_with_fork(n: usize, fork: Option<(BlockNumber, H256)>) -> TestNet { let mut net = TestNet { peers: Vec::new(), started: false, }; for _ in 0..n { let chain = TestBlockChainClient::new(); - let sync = ChainSync::new(SyncConfig::default(), &chain); + let mut config = SyncConfig::default(); + config.fork_block = fork; + let sync = ChainSync::new(config, &chain); net.peers.push(TestPeer { sync: RwLock::new(sync), chain: chain, diff --git a/test.sh b/test.sh index af51fd3fd..18a7bb6b6 100755 --- a/test.sh +++ b/test.sh @@ -14,5 +14,5 @@ case $1 in esac . ./scripts/targets.sh -cargo test --release --verbose $FEATURES $TARGETS $1 \ +cargo test --release $FEATURES $TARGETS $1 \ diff --git a/util/bigint/src/hash.rs b/util/bigint/src/hash.rs index 9c5081b93..3658f3489 100644 --- a/util/bigint/src/hash.rs +++ b/util/bigint/src/hash.rs @@ -62,7 +62,7 @@ pub trait FixedHash: Sized + FromStr + Default + DerefMut { /// Return `s` without the `0x` at the beginning of it, if any. pub fn clean_0x(s: &str) -> &str { - if s.len() >= 2 && &s[0..2] == "0x" { + if s.starts_with("0x") { &s[2..] } else { s @@ -436,13 +436,13 @@ macro_rules! impl_hash { } } - impl<'a> From<&'a str> for $from { - fn from(s: &'a str) -> $from { - use std::str::FromStr; + impl From<&'static str> for $from { + fn from(s: &'static str) -> $from { + let s = clean_0x(s); if s.len() % 2 == 1 { - $from::from_str(&("0".to_owned() + &(clean_0x(s).to_owned()))[..]).unwrap_or_else(|_| $from::new()) + $from::from_str(&("0".to_owned() + s)).unwrap() } else { - $from::from_str(clean_0x(s)).unwrap_or_else(|_| $from::new()) + $from::from_str(s).unwrap() } } } @@ -620,8 +620,6 @@ mod tests { assert_eq!(H64::from(0x1234567890abcdef), H64::from("0x1234567890abcdef")); assert_eq!(H64::from(0x1234567890abcdef), H64::from("1234567890abcdef")); assert_eq!(H64::from(0x234567890abcdef), H64::from("0x234567890abcdef")); - // too short. - assert_eq!(H64::from(0), H64::from("0x34567890abcdef")); } #[test] diff --git a/util/bigint/src/uint.rs b/util/bigint/src/uint.rs index e3c27f80d..766fa33d3 100644 --- a/util/bigint/src/uint.rs +++ b/util/bigint/src/uint.rs @@ -565,7 +565,7 @@ macro_rules! construct_uint { impl Uint for $name { fn from_dec_str(value: &str) -> Result { - if value.bytes().any(|b| b < 48 && b > 57) { + if !value.bytes().all(|b| b >= 48 && b <= 57) { return Err(FromDecStrErr::InvalidCharacter) } @@ -1788,6 +1788,7 @@ mod tests { assert_eq!(U256::from_dec_str("10").unwrap(), U256::from(10u64)); assert_eq!(U256::from_dec_str("1024").unwrap(), U256::from(1024u64)); assert_eq!(U256::from_dec_str("115792089237316195423570985008687907853269984665640564039457584007913129639936"), Err(FromDecStrErr::InvalidLength)); + assert_eq!(U256::from_dec_str("0x11"), Err(FromDecStrErr::InvalidCharacter)); } #[test] diff --git a/util/fdlimit/src/lib.rs b/util/fdlimit/src/lib.rs index e659bb8c9..92c403058 100644 --- a/util/fdlimit/src/lib.rs +++ b/util/fdlimit/src/lib.rs @@ -1,18 +1,19 @@ // Copyright 2015, 2016 Ethcore (UK) Ltd. // This file is part of Parity. -// +// // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// +// // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -// +// // You should have received a copy of the GNU General Public License // along with Parity. If not, see .extern crate libc; + extern crate libc; -pub mod raise_fd_limit; +mod raise_fd_limit; pub use raise_fd_limit::raise_fd_limit; diff --git a/util/fdlimit/src/raise_fd_limit.rs b/util/fdlimit/src/raise_fd_limit.rs index 92127da35..d0539fda9 100644 --- a/util/fdlimit/src/raise_fd_limit.rs +++ b/util/fdlimit/src/raise_fd_limit.rs @@ -15,70 +15,74 @@ /// #[cfg(any(target_os = "macos", target_os = "ios"))] #[allow(non_camel_case_types)] -pub unsafe fn raise_fd_limit() { - use libc; - use std::cmp; - use std::io; - use std::mem::size_of_val; - use std::ptr::null_mut; +pub fn raise_fd_limit() { + use libc; + use std::cmp; + use std::io; + use std::mem::size_of_val; + use std::ptr::null_mut; - static CTL_KERN: libc::c_int = 1; - static KERN_MAXFILESPERPROC: libc::c_int = 29; + unsafe { + static CTL_KERN: libc::c_int = 1; + static KERN_MAXFILESPERPROC: libc::c_int = 29; - // The strategy here is to fetch the current resource limits, read the - // kern.maxfilesperproc sysctl value, and bump the soft resource limit for - // maxfiles up to the sysctl value. + // The strategy here is to fetch the current resource limits, read the + // kern.maxfilesperproc sysctl value, and bump the soft resource limit for + // maxfiles up to the sysctl value. - // Fetch the kern.maxfilesperproc value - let mut mib: [libc::c_int; 2] = [CTL_KERN, KERN_MAXFILESPERPROC]; - let mut maxfiles: libc::c_int = 0; - let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t; - if libc::sysctl(&mut mib[0], 2, &mut maxfiles as *mut _ as *mut _, &mut size, - null_mut(), 0) != 0 { - let err = io::Error::last_os_error(); - panic!("raise_fd_limit: error calling sysctl: {}", err); - } + // Fetch the kern.maxfilesperproc value + let mut mib: [libc::c_int; 2] = [CTL_KERN, KERN_MAXFILESPERPROC]; + let mut maxfiles: libc::c_int = 0; + let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t; + if libc::sysctl(&mut mib[0], 2, &mut maxfiles as *mut _ as *mut _, &mut size, + null_mut(), 0) != 0 { + let err = io::Error::last_os_error(); + panic!("raise_fd_limit: error calling sysctl: {}", err); + } - // Fetch the current resource limits - let mut rlim = libc::rlimit{rlim_cur: 0, rlim_max: 0}; - if libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) != 0 { - let err = io::Error::last_os_error(); - panic!("raise_fd_limit: error calling getrlimit: {}", err); - } + // Fetch the current resource limits + let mut rlim = libc::rlimit{rlim_cur: 0, rlim_max: 0}; + if libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) != 0 { + let err = io::Error::last_os_error(); + panic!("raise_fd_limit: error calling getrlimit: {}", err); + } - // Bump the soft limit to the smaller of kern.maxfilesperproc and the hard - // limit - rlim.rlim_cur = cmp::min(maxfiles as libc::rlim_t, rlim.rlim_max); + // Bump the soft limit to the smaller of kern.maxfilesperproc and the hard + // limit + rlim.rlim_cur = cmp::min(maxfiles as libc::rlim_t, rlim.rlim_max); - // Set our newly-increased resource limit - if libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) != 0 { - let err = io::Error::last_os_error(); - panic!("raise_fd_limit: error calling setrlimit: {}", err); - } + // Set our newly-increased resource limit + if libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) != 0 { + let err = io::Error::last_os_error(); + panic!("raise_fd_limit: error calling setrlimit: {}", err); + } + } } #[cfg(any(target_os = "linux"))] #[allow(non_camel_case_types)] -pub unsafe fn raise_fd_limit() { - use libc; - use std::io; +pub fn raise_fd_limit() { + use libc; + use std::io; - // Fetch the current resource limits - let mut rlim = libc::rlimit{rlim_cur: 0, rlim_max: 0}; - if libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) != 0 { - let err = io::Error::last_os_error(); - panic!("raise_fd_limit: error calling getrlimit: {}", err); - } + unsafe { + // Fetch the current resource limits + let mut rlim = libc::rlimit{rlim_cur: 0, rlim_max: 0}; + if libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) != 0 { + let err = io::Error::last_os_error(); + panic!("raise_fd_limit: error calling getrlimit: {}", err); + } - // Set soft limit to hard imit - rlim.rlim_cur = rlim.rlim_max; + // Set soft limit to hard imit + rlim.rlim_cur = rlim.rlim_max; - // Set our newly-increased resource limit - if libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) != 0 { - let err = io::Error::last_os_error(); - panic!("raise_fd_limit: error calling setrlimit: {}", err); - } + // Set our newly-increased resource limit + if libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) != 0 { + let err = io::Error::last_os_error(); + panic!("raise_fd_limit: error calling setrlimit: {}", err); + } + } } #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "linux")))] -pub unsafe fn raise_fd_limit() {} +pub fn raise_fd_limit() {} diff --git a/util/src/bytes.rs b/util/src/bytes.rs index 7426609d6..dfd21fd90 100644 --- a/util/src/bytes.rs +++ b/util/src/bytes.rs @@ -195,7 +195,7 @@ pub trait Populatable { /// If `d` is smaller, will leave some bytes untouched. fn copy_raw(&mut self, d: &[u8]) { use std::io::Write; - self.as_slice_mut().write(&d).unwrap(); + self.as_slice_mut().write(d).unwrap(); } /// Copies the raw representation of an object `d` to `self`, overwriting as necessary. diff --git a/util/src/crypto.rs b/util/src/crypto.rs index c9b05a97f..9943f872f 100644 --- a/util/src/crypto.rs +++ b/util/src/crypto.rs @@ -275,7 +275,7 @@ pub mod ecdh { let publ = try!(key::PublicKey::from_slice(context, &pdata)); // no way to create SecretKey from raw byte array. let sec: &key::SecretKey = unsafe { ::std::mem::transmute(secret) }; - let shared = ecdh::SharedSecret::new_raw(context, &publ, &sec); + let shared = ecdh::SharedSecret::new_raw(context, &publ, sec); let mut s = crypto::Secret::new(); s.copy_from_slice(&shared[0..32]); diff --git a/util/src/error.rs b/util/src/error.rs index f5048445b..bc57cfc55 100644 --- a/util/src/error.rs +++ b/util/src/error.rs @@ -82,7 +82,7 @@ impl fmt::Display for UtilError { UtilError::BaseData(ref err) => f.write_fmt(format_args!("{}", err)), UtilError::Network(ref err) => f.write_fmt(format_args!("{}", err)), UtilError::Decoder(ref err) => f.write_fmt(format_args!("{}", err)), - UtilError::SimpleString(ref msg) => f.write_str(&msg), + UtilError::SimpleString(ref msg) => f.write_str(msg), UtilError::BadSize => f.write_str("Bad input size."), UtilError::Snappy(ref err) => f.write_fmt(format_args!("{}", err)), } diff --git a/util/src/io/service.rs b/util/src/io/service.rs index bfd63b04c..6e7dad4bb 100644 --- a/util/src/io/service.rs +++ b/util/src/io/service.rs @@ -25,7 +25,8 @@ use io::{IoError, IoHandler}; use io::worker::{Worker, Work, WorkType}; use panics::*; -use parking_lot::{Condvar, RwLock, Mutex}; +use parking_lot::{RwLock}; +use std::sync::{Condvar as SCondvar, Mutex as SMutex}; /// Timer ID pub type TimerToken = usize; @@ -169,7 +170,7 @@ pub struct IoManager where Message: Send + Sync { handlers: Slab>, HandlerId>, workers: Vec, worker_channel: chase_lev::Worker>, - work_ready: Arc, + work_ready: Arc, } impl IoManager where Message: Send + Sync + Clone + 'static { @@ -177,8 +178,8 @@ impl IoManager where Message: Send + Sync + Clone + 'static { pub fn start(panic_handler: Arc, event_loop: &mut EventLoop>) -> Result<(), UtilError> { let (worker, stealer) = chase_lev::deque(); let num_workers = 4; - let work_ready_mutex = Arc::new(Mutex::new(())); - let work_ready = Arc::new(Condvar::new()); + let work_ready_mutex = Arc::new(SMutex::new(())); + let work_ready = Arc::new(SCondvar::new()); let workers = (0..num_workers).map(|i| Worker::new( i, diff --git a/util/src/io/worker.rs b/util/src/io/worker.rs index a81cdca08..c96bdf007 100644 --- a/util/src/io/worker.rs +++ b/util/src/io/worker.rs @@ -23,7 +23,7 @@ use io::service::{HandlerId, IoChannel, IoContext}; use io::{IoHandler}; use panics::*; -use parking_lot::{Condvar, Mutex}; +use std::sync::{Condvar as SCondvar, Mutex as SMutex}; pub enum WorkType { Readable, @@ -44,9 +44,9 @@ pub struct Work { /// Sorts them ready for blockchain insertion. pub struct Worker { thread: Option>, - wait: Arc, + wait: Arc, deleting: Arc, - wait_mutex: Arc>, + wait_mutex: Arc>, } impl Worker { @@ -54,8 +54,8 @@ impl Worker { pub fn new(index: usize, stealer: chase_lev::Stealer>, channel: IoChannel, - wait: Arc, - wait_mutex: Arc>, + wait: Arc, + wait_mutex: Arc>, panic_handler: Arc ) -> Worker where Message: Send + Sync + Clone + 'static { @@ -77,17 +77,17 @@ impl Worker { } fn work_loop(stealer: chase_lev::Stealer>, - channel: IoChannel, wait: Arc, - wait_mutex: Arc>, + channel: IoChannel, wait: Arc, + wait_mutex: Arc>, deleting: Arc) where Message: Send + Sync + Clone + 'static { loop { { - let mut lock = wait_mutex.lock(); + let lock = wait_mutex.lock().unwrap(); if deleting.load(AtomicOrdering::Acquire) { return; } - wait.wait(&mut lock); + let _ = wait.wait(lock); } if deleting.load(AtomicOrdering::Acquire) { @@ -123,7 +123,7 @@ impl Worker { impl Drop for Worker { fn drop(&mut self) { trace!(target: "shutdown", "[IoWorker] Closing..."); - let _ = self.wait_mutex.lock(); + let _ = self.wait_mutex.lock().unwrap(); self.deleting.store(true, AtomicOrdering::Release); self.wait.notify_all(); let thread = mem::replace(&mut self.thread, None).unwrap(); diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index 4bf377cf7..b8c5d1664 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -20,9 +20,9 @@ use common::*; use rlp::*; use hashdb::*; use memorydb::*; -use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, VERSION_KEY}; +use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; use super::traits::JournalDB; -use kvdb::{Database, DBTransaction, DatabaseConfig}; +use kvdb::{Database, DBTransaction}; #[cfg(test)] use std::env; @@ -30,9 +30,6 @@ use std::env; /// Would be nich to use rocksdb columns for this eventually. const AUX_FLAG: u8 = 255; -/// Database version. -const DB_VERSION : u32 = 0x103; - /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// @@ -44,28 +41,18 @@ pub struct ArchiveDB { overlay: MemoryDB, backing: Arc, latest_era: Option, + column: Option, } impl ArchiveDB { /// Create a new instance from file - pub fn new(path: &str, config: DatabaseConfig) -> ArchiveDB { - let backing = Database::open(&config, path).unwrap_or_else(|e| { - panic!("Error opening state db: {}", e); - }); - if !backing.is_empty() { - match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { - Ok(Some(DB_VERSION)) => {}, - v => panic!("Incompatible DB version, expected {}, got {:?}; to resolve, remove {} and restart.", DB_VERSION, v, path) - } - } else { - backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); - } - - let latest_era = backing.get(&LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::(&val)); + pub fn new(backing: Arc, col: Option) -> ArchiveDB { + let latest_era = backing.get(col, &LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::(&val)); ArchiveDB { overlay: MemoryDB::new(), - backing: Arc::new(backing), + backing: backing, latest_era: latest_era, + column: col, } } @@ -74,18 +61,19 @@ impl ArchiveDB { fn new_temp() -> ArchiveDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); - Self::new(dir.to_str().unwrap(), DatabaseConfig::default()) + let backing = Arc::new(Database::open_default(dir.to_str().unwrap()).unwrap()); + Self::new(backing, None) } fn payload(&self, key: &H256) -> Option { - self.backing.get(key).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) + self.backing.get(self.column, key).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } } impl HashDB for ArchiveDB { fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); - for (key, _) in self.backing.iter() { + for (key, _) in self.backing.iter(self.column) { let h = H256::from_slice(key.deref()); ret.insert(h, 1); } @@ -140,7 +128,7 @@ impl HashDB for ArchiveDB { let mut db_hash = hash.to_vec(); db_hash.push(AUX_FLAG); - self.backing.get(&db_hash) + self.backing.get(self.column, &db_hash) .expect("Low-level database error. Some issue with your hard disk?") .map(|v| v.to_vec()) } @@ -156,6 +144,7 @@ impl JournalDB for ArchiveDB { overlay: self.overlay.clone(), backing: self.backing.clone(), latest_era: self.latest_era, + column: self.column.clone(), }) } @@ -167,8 +156,7 @@ impl JournalDB for ArchiveDB { self.latest_era.is_none() } - fn commit(&mut self, now: u64, _: &H256, _: Option<(u64, H256)>) -> Result { - let batch = DBTransaction::new(); + fn commit(&mut self, batch: &DBTransaction, now: u64, _id: &H256, _end: Option<(u64, H256)>) -> Result { let mut inserts = 0usize; let mut deletes = 0usize; @@ -176,7 +164,7 @@ impl JournalDB for ArchiveDB { let (key, (value, rc)) = i; if rc > 0 { assert!(rc == 1); - batch.put(&key, &value).expect("Low-level database error. Some issue with your hard disk?"); + batch.put(self.column, &key, &value).expect("Low-level database error. Some issue with your hard disk?"); inserts += 1; } if rc < 0 { @@ -187,24 +175,27 @@ impl JournalDB for ArchiveDB { for (mut key, value) in self.overlay.drain_aux().into_iter() { key.push(AUX_FLAG); - batch.put(&key, &value).expect("Low-level database error. Some issue with your hard disk?"); + batch.put(self.column, &key, &value).expect("Low-level database error. Some issue with your hard disk?"); } if self.latest_era.map_or(true, |e| now > e) { - try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + try!(batch.put(self.column, &LATEST_ERA_KEY, &encode(&now))); self.latest_era = Some(now); } - try!(self.backing.write(batch)); Ok((inserts + deletes) as u32) } fn latest_era(&self) -> Option { self.latest_era } fn state(&self, id: &H256) -> Option { - self.backing.get_by_prefix(&id[0..DB_PREFIX_LEN]).map(|b| b.to_vec()) + self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.to_vec()) } fn is_pruned(&self) -> bool { false } + + fn backing(&self) -> &Arc { + &self.backing + } } #[cfg(test)] @@ -216,7 +207,7 @@ mod tests { use super::*; use hashdb::*; use journaldb::traits::JournalDB; - use kvdb::DatabaseConfig; + use kvdb::Database; #[test] fn insert_same_in_fork() { @@ -224,18 +215,18 @@ mod tests { let mut jdb = ArchiveDB::new_temp(); let x = jdb.insert(b"X"); - jdb.commit(1, &b"1".sha3(), None).unwrap(); - jdb.commit(2, &b"2".sha3(), None).unwrap(); - jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); - jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); jdb.remove(&x); - jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); let x = jdb.insert(b"X"); - jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); - jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); - jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + jdb.commit_batch(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + jdb.commit_batch(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); assert!(jdb.contains(&x)); } @@ -245,16 +236,16 @@ mod tests { // history is 3 let mut jdb = ArchiveDB::new_temp(); let h = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.contains(&h)); jdb.remove(&h); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); assert!(jdb.contains(&h)); - jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); assert!(jdb.contains(&h)); - jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.contains(&h)); - jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); } #[test] @@ -264,29 +255,29 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.remove(&foo); jdb.remove(&bar); let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); assert!(jdb.contains(&baz)); let foo = jdb.insert(b"foo"); jdb.remove(&baz); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&baz)); jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.contains(&foo)); - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); } #[test] @@ -296,22 +287,22 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.remove(&foo); let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); jdb.remove(&bar); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); assert!(jdb.contains(&baz)); - jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.contains(&foo)); } @@ -321,16 +312,16 @@ mod tests { let mut jdb = ArchiveDB::new_temp(); let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.contains(&foo)); jdb.remove(&foo); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); jdb.insert(b"foo"); assert!(jdb.contains(&foo)); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.contains(&foo)); - jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); assert!(jdb.contains(&foo)); } @@ -338,19 +329,24 @@ mod tests { fn fork_same_key() { // history is 1 let mut jdb = ArchiveDB::new_temp(); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); let foo = jdb.insert(b"foo"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); jdb.insert(b"foo"); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.contains(&foo)); - jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); assert!(jdb.contains(&foo)); } + fn new_db(dir: &Path) -> ArchiveDB { + let db = Database::open_default(dir.to_str().unwrap()).unwrap(); + ArchiveDB::new(Arc::new(db), None) + } + #[test] fn reopen() { let mut dir = ::std::env::temp_dir(); @@ -358,25 +354,25 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); foo }; { - let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); jdb.remove(&foo); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); } { - let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); } } @@ -386,27 +382,27 @@ mod tests { dir.push(H32::random().hex()); let foo = { - let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); // history is 1 let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); // foo is ancient history. jdb.insert(b"foo"); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); foo }; { - let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.contains(&foo)); jdb.remove(&foo); - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); - jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); } } @@ -415,23 +411,23 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, _, _) = { - let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); jdb.remove(&foo); let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); jdb.remove(&bar); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); (foo, bar, baz) }; { - let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); - jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + let mut jdb = new_db(&dir); + jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.contains(&foo)); } } @@ -441,14 +437,14 @@ mod tests { let temp = ::devtools::RandomTempPath::new(); let key = { - let mut jdb = ArchiveDB::new(temp.as_str(), DatabaseConfig::default()); + let mut jdb = new_db(temp.as_path().as_path()); let key = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); key }; { - let jdb = ArchiveDB::new(temp.as_str(), DatabaseConfig::default()); + let jdb = new_db(temp.as_path().as_path()); let state = jdb.state(&key); assert!(state.is_some()); } diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs index 45e9202b4..908c5cadf 100644 --- a/util/src/journaldb/earlymergedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -20,9 +20,9 @@ use common::*; use rlp::*; use hashdb::*; use memorydb::*; -use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, VERSION_KEY}; +use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; use super::traits::JournalDB; -use kvdb::{Database, DBTransaction, DatabaseConfig}; +use kvdb::{Database, DBTransaction}; #[cfg(test)] use std::env; @@ -66,33 +66,22 @@ pub struct EarlyMergeDB { backing: Arc, refs: Option>>>, latest_era: Option, + column: Option, } -const DB_VERSION : u32 = 0x003; const PADDING : [u8; 10] = [ 0u8; 10 ]; impl EarlyMergeDB { /// Create a new instance from file - pub fn new(path: &str, config: DatabaseConfig) -> EarlyMergeDB { - let backing = Database::open(&config, path).unwrap_or_else(|e| { - panic!("Error opening state db: {}", e); - }); - if !backing.is_empty() { - match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { - Ok(Some(DB_VERSION)) => {}, - v => panic!("Incompatible DB version, expected {}, got {:?}; to resolve, remove {} and restart.", DB_VERSION, v, path) - } - } else { - backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); - } - - let (latest_era, refs) = EarlyMergeDB::read_refs(&backing); + pub fn new(backing: Arc, col: Option) -> EarlyMergeDB { + let (latest_era, refs) = EarlyMergeDB::read_refs(&backing, col); let refs = Some(Arc::new(RwLock::new(refs))); EarlyMergeDB { overlay: MemoryDB::new(), - backing: Arc::new(backing), + backing: backing, refs: refs, latest_era: latest_era, + column: col, } } @@ -101,7 +90,8 @@ impl EarlyMergeDB { fn new_temp() -> EarlyMergeDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); - Self::new(dir.to_str().unwrap(), DatabaseConfig::default()) + let backing = Arc::new(Database::open_default(dir.to_str().unwrap()).unwrap()); + Self::new(backing, None) } fn morph_key(key: &H256, index: u8) -> Bytes { @@ -111,13 +101,13 @@ impl EarlyMergeDB { } // The next three are valid only as long as there is an insert operation of `key` in the journal. - fn set_already_in(batch: &DBTransaction, key: &H256) { batch.put(&Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); } - fn reset_already_in(batch: &DBTransaction, key: &H256) { batch.delete(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); } - fn is_already_in(backing: &Database, key: &H256) -> bool { - backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() + fn set_already_in(batch: &DBTransaction, col: Option, key: &H256) { batch.put(col, &Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); } + fn reset_already_in(batch: &DBTransaction, col: Option, key: &H256) { batch.delete(col, &Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); } + fn is_already_in(backing: &Database, col: Option, key: &H256) -> bool { + backing.get(col, &Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() } - fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, refs: &mut HashMap, batch: &DBTransaction, trace: bool) { + fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, col: Option, refs: &mut HashMap, batch: &DBTransaction, trace: bool) { for &(ref h, ref d) in inserts { if let Some(c) = refs.get_mut(h) { // already counting. increment. @@ -129,9 +119,9 @@ impl EarlyMergeDB { } // this is the first entry for this node in the journal. - if backing.get(h).expect("Low-level database error. Some issue with your hard disk?").is_some() { + if backing.get(col, h).expect("Low-level database error. Some issue with your hard disk?").is_some() { // already in the backing DB. start counting, and remember it was already in. - Self::set_already_in(batch, &h); + Self::set_already_in(batch, col, h); refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: true}); if trace { trace!(target: "jdb.fine", " insert({}): New to queue, in DB: Recording and inserting into queue", h); @@ -141,8 +131,8 @@ impl EarlyMergeDB { // Gets removed when a key leaves the journal, so should never be set when we're placing a new key. //Self::reset_already_in(&h); - assert!(!Self::is_already_in(backing, &h)); - batch.put(h, d).expect("Low-level database error. Some issue with your hard disk?"); + assert!(!Self::is_already_in(backing, col, &h)); + batch.put(col, h, d).expect("Low-level database error. Some issue with your hard disk?"); refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: false}); if trace { trace!(target: "jdb.fine", " insert({}): New to queue, not in DB: Inserting into queue and DB", h); @@ -150,7 +140,7 @@ impl EarlyMergeDB { } } - fn replay_keys(inserts: &[H256], backing: &Database, refs: &mut HashMap) { + fn replay_keys(inserts: &[H256], backing: &Database, col: Option, refs: &mut HashMap) { trace!(target: "jdb.fine", "replay_keys: inserts={:?}, refs={:?}", inserts, refs); for h in inserts { if let Some(c) = refs.get_mut(h) { @@ -161,12 +151,12 @@ impl EarlyMergeDB { // this is the first entry for this node in the journal. // it is initialised to 1 if it was already in. - refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: Self::is_already_in(backing, h)}); + refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: Self::is_already_in(backing, col, h)}); } trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs); } - fn remove_keys(deletes: &[H256], refs: &mut HashMap, batch: &DBTransaction, from: RemoveFrom, trace: bool) { + fn remove_keys(deletes: &[H256], refs: &mut HashMap, batch: &DBTransaction, col: Option, from: RemoveFrom, trace: bool) { // with a remove on {queue_refs: 1, in_archive: true}, we have two options: // - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive) // - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue) @@ -178,7 +168,7 @@ impl EarlyMergeDB { if let Some(c) = refs.get_mut(h) { if c.in_archive && from == RemoveFrom::Archive { c.in_archive = false; - Self::reset_already_in(batch, h); + Self::reset_already_in(batch, col, h); if trace { trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Reducing to queue only and recording", h); } @@ -196,14 +186,14 @@ impl EarlyMergeDB { match n { Some(RefInfo{queue_refs: 1, in_archive: true}) => { refs.remove(h); - Self::reset_already_in(batch, h); + Self::reset_already_in(batch, col, h); if trace { trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Removing from queue and leaving in archive", h); } } Some(RefInfo{queue_refs: 1, in_archive: false}) => { refs.remove(h); - batch.delete(h).expect("Low-level database error. Some issue with your hard disk?"); + batch.delete(col, h).expect("Low-level database error. Some issue with your hard disk?"); if trace { trace!(target: "jdb.fine", " remove({}): Not in archive, only 1 ref in queue: Removing from queue and DB", h); } @@ -211,7 +201,7 @@ impl EarlyMergeDB { None => { // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. //assert!(!Self::is_already_in(db, &h)); - batch.delete(h).expect("Low-level database error. Some issue with your hard disk?"); + batch.delete(col, h).expect("Low-level database error. Some issue with your hard disk?"); if trace { trace!(target: "jdb.fine", " remove({}): Not in queue - MUST BE IN ARCHIVE: Removing from DB", h); } @@ -223,7 +213,7 @@ impl EarlyMergeDB { #[cfg(test)] fn can_reconstruct_refs(&self) -> bool { - let (latest_era, reconstructed) = Self::read_refs(&self.backing); + let (latest_era, reconstructed) = Self::read_refs(&self.backing, self.column); let refs = self.refs.as_ref().unwrap().write(); if *refs != reconstructed || latest_era != self.latest_era { let clean_refs = refs.iter().filter_map(|(k, v)| if reconstructed.get(k) == Some(v) {None} else {Some((k.clone(), v.clone()))}).collect::>(); @@ -236,18 +226,18 @@ impl EarlyMergeDB { } fn payload(&self, key: &H256) -> Option { - self.backing.get(key).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) + self.backing.get(self.column, key).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } - fn read_refs(db: &Database) -> (Option, HashMap) { + fn read_refs(db: &Database, col: Option) -> (Option, HashMap) { let mut refs = HashMap::new(); let mut latest_era = None; - if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { + if let Some(val) = db.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") { let mut era = decode::(&val); latest_era = Some(era); loop { let mut index = 0usize; - while let Some(rlp_data) = db.get({ + while let Some(rlp_data) = db.get(col, { let mut r = RlpStream::new_list(3); r.append(&era); r.append(&index); @@ -256,7 +246,7 @@ impl EarlyMergeDB { }).expect("Low-level database error.") { let rlp = Rlp::new(&rlp_data); let inserts: Vec = rlp.val_at(1); - Self::replay_keys(&inserts, db, &mut refs); + Self::replay_keys(&inserts, db, col, &mut refs); index += 1; }; if index == 0 || era == 0 { @@ -267,12 +257,12 @@ impl EarlyMergeDB { } (latest_era, refs) } - } +} impl HashDB for EarlyMergeDB { fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); - for (key, _) in self.backing.iter() { + for (key, _) in self.backing.iter(self.column) { let h = H256::from_slice(key.deref()); ret.insert(h, 1); } @@ -321,11 +311,16 @@ impl JournalDB for EarlyMergeDB { backing: self.backing.clone(), refs: self.refs.clone(), latest_era: self.latest_era.clone(), + column: self.column.clone(), }) } fn is_empty(&self) -> bool { - self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() + self.backing.get(self.column, &LATEST_ERA_KEY).expect("Low level database error").is_none() + } + + fn backing(&self) -> &Arc { + &self.backing } fn latest_era(&self) -> Option { self.latest_era } @@ -338,11 +333,11 @@ impl JournalDB for EarlyMergeDB { } fn state(&self, id: &H256) -> Option { - self.backing.get_by_prefix(&id[0..DB_PREFIX_LEN]).map(|b| b.to_vec()) + self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.to_vec()) } #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] - fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + fn commit(&mut self, batch: &DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] @@ -389,13 +384,12 @@ impl JournalDB for EarlyMergeDB { // record new commit's details. let mut refs = self.refs.as_ref().unwrap().write(); - let batch = DBTransaction::new(); let trace = false; { let mut index = 0usize; let mut last; - while try!(self.backing.get({ + while try!(self.backing.get(self.column, { let mut r = RlpStream::new_list(3); r.append(&now); r.append(&index); @@ -436,15 +430,15 @@ impl JournalDB for EarlyMergeDB { r.begin_list(inserts.len()); inserts.iter().foreach(|&(k, _)| {r.append(&k);}); r.append(&removes); - Self::insert_keys(&inserts, &self.backing, &mut refs, &batch, trace); + Self::insert_keys(&inserts, &self.backing, self.column, &mut refs, &batch, trace); if trace { let ins = inserts.iter().map(|&(k, _)| k).collect::>(); trace!(target: "jdb.ops", " Inserts: {:?}", ins); trace!(target: "jdb.ops", " Deletes: {:?}", removes); } - try!(batch.put(&last, r.as_raw())); + try!(batch.put(self.column, &last, r.as_raw())); if self.latest_era.map_or(true, |e| now > e) { - try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + try!(batch.put(self.column, &LATEST_ERA_KEY, &encode(&now))); self.latest_era = Some(now); } } @@ -453,7 +447,7 @@ impl JournalDB for EarlyMergeDB { if let Some((end_era, canon_id)) = end { let mut index = 0usize; let mut last; - while let Some(rlp_data) = try!(self.backing.get({ + while let Some(rlp_data) = try!(self.backing.get(self.column, { let mut r = RlpStream::new_list(3); r.append(&end_era); r.append(&index); @@ -470,7 +464,7 @@ impl JournalDB for EarlyMergeDB { if trace { trace!(target: "jdb.ops", " Expunging: {:?}", deletes); } - Self::remove_keys(&deletes, &mut refs, &batch, RemoveFrom::Archive, trace); + Self::remove_keys(&deletes, &mut refs, &batch, self.column, RemoveFrom::Archive, trace); if trace { trace!(target: "jdb.ops", " Finalising: {:?}", inserts); @@ -488,7 +482,7 @@ impl JournalDB for EarlyMergeDB { } Some( RefInfo{queue_refs: x, in_archive: false} ) => { // must set already in; , - Self::set_already_in(&batch, k); + Self::set_already_in(&batch, self.column, k); refs.insert(k.clone(), RefInfo{ queue_refs: x - 1, in_archive: true }); } Some( RefInfo{in_archive: true, ..} ) => { @@ -502,10 +496,10 @@ impl JournalDB for EarlyMergeDB { if trace { trace!(target: "jdb.ops", " Reverting: {:?}", inserts); } - Self::remove_keys(&inserts, &mut refs, &batch, RemoveFrom::Queue, trace); + Self::remove_keys(&inserts, &mut refs, &batch, self.column, RemoveFrom::Queue, trace); } - try!(batch.delete(&last)); + try!(batch.delete(self.column, &last)); index += 1; } if trace { @@ -513,10 +507,6 @@ impl JournalDB for EarlyMergeDB { } } - try!(self.backing.write(batch)); - - // Comment out for now. TODO: automatically enable in tests. - if trace { trace!(target: "jdb", "OK: {:?}", refs.clone()); } @@ -535,7 +525,7 @@ mod tests { use super::super::traits::JournalDB; use hashdb::*; use log::init_log; - use kvdb::DatabaseConfig; + use kvdb::{Database, DatabaseConfig}; #[test] fn insert_same_in_fork() { @@ -543,25 +533,25 @@ mod tests { let mut jdb = EarlyMergeDB::new_temp(); let x = jdb.insert(b"X"); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&x); - jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); let x = jdb.insert(b"X"); - jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + jdb.commit_batch(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + jdb.commit_batch(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&x)); @@ -571,17 +561,17 @@ mod tests { fn insert_older_era() { let mut jdb = EarlyMergeDB::new_temp(); let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0a".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0a".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let bar = jdb.insert(b"bar"); - jdb.commit(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); - jdb.commit(0, &b"0b".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0b".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); @@ -592,20 +582,20 @@ mod tests { // history is 3 let mut jdb = EarlyMergeDB::new_temp(); let h = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); jdb.remove(&h); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); - jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); - jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); - jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&h)); } @@ -617,7 +607,7 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); @@ -625,7 +615,7 @@ mod tests { jdb.remove(&foo); jdb.remove(&bar); let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); @@ -633,20 +623,20 @@ mod tests { let foo = jdb.insert(b"foo"); jdb.remove(&baz); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&bar)); assert!(jdb.contains(&baz)); jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&bar)); assert!(!jdb.contains(&baz)); - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); assert!(!jdb.contains(&bar)); @@ -660,25 +650,25 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.remove(&foo); let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); assert!(jdb.contains(&baz)); - jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&baz)); @@ -691,115 +681,113 @@ mod tests { let mut jdb = EarlyMergeDB::new_temp(); let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); jdb.remove(&foo); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); assert!(jdb.contains(&foo)); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); - jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); } #[test] fn fork_same_key_one() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + let mut jdb = EarlyMergeDB::new_temp(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); - jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); } #[test] fn fork_same_key_other() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + let mut jdb = EarlyMergeDB::new_temp(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); - jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); } #[test] fn fork_ins_del_ins() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + let mut jdb = EarlyMergeDB::new_temp(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap(); + jdb.commit_batch(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } + fn new_db(path: &Path) -> EarlyMergeDB { + let config = DatabaseConfig::with_columns(Some(1)); + let backing = Arc::new(Database::open(&config, path.to_str().unwrap()).unwrap()); + EarlyMergeDB::new(backing, Some(0)) + } + #[test] fn reopen() { let mut dir = ::std::env::temp_dir(); @@ -807,27 +795,27 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); foo }; { - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); jdb.remove(&foo); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } { - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); } @@ -836,145 +824,136 @@ mod tests { #[test] fn insert_delete_insert_delete_insert_expunge() { init_log(); - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = EarlyMergeDB::new_temp(); // history is 4 let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), None).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); // expunge foo - jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } #[test] fn forked_insert_delete_insert_delete_insert_expunge() { init_log(); - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = EarlyMergeDB::new_temp(); // history is 4 let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(1, &b"1a".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1a".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(1, &b"1b".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1b".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(2, &b"2a".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2a".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(2, &b"2b".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2b".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(3, &b"3a".sha3(), None).unwrap(); + jdb.commit_batch(3, &b"3a".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(3, &b"3b".sha3(), None).unwrap(); + jdb.commit_batch(3, &b"3b".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); // expunge foo - jdb.commit(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap(); + jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } #[test] fn broken_assert() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); + let mut jdb = EarlyMergeDB::new_temp(); - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 1 let foo = jdb.insert(b"foo"); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.remove(&foo); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN + jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); jdb.remove(&foo); - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + jdb.commit_batch(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); } #[test] fn reopen_test() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); + let mut jdb = EarlyMergeDB::new_temp(); - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 4 let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(3, &b"3".sha3(), None).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); jdb.remove(&bar); - jdb.commit(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); jdb.insert(b"bar"); - jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -988,45 +967,48 @@ mod tests { let foo = b"foo".sha3(); { - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); // history is 1 jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.remove(&foo); - jdb.commit(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); jdb.insert(b"foo"); - jdb.commit(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); // incantation to reopen the db - }; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + }; { + let mut jdb = new_db(&dir); jdb.remove(&foo); - jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); // incantation to reopen the db - }; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + }; { + let mut jdb = new_db(&dir); - jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); // incantation to reopen the db - }; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + }; { + let mut jdb = new_db(&dir); - jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); + jdb.commit_batch(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); } @@ -1037,26 +1019,26 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); (foo, bar, baz) }; { - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); - jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + let mut jdb = new_db(&dir); + jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&baz)); diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs index 15508c8ba..bb79ed9a8 100644 --- a/util/src/journaldb/mod.rs +++ b/util/src/journaldb/mod.rs @@ -17,7 +17,7 @@ //! `JournalDB` interface and implementation. use common::*; -use kvdb::DatabaseConfig; +use kvdb::Database; /// Export the journaldb module. pub mod traits; @@ -30,7 +30,7 @@ mod refcounteddb; pub use self::traits::JournalDB; /// A journal database algorithm. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, PartialEq, Clone, Copy)] pub enum Algorithm { /// Keep all keys forever. Archive, @@ -60,28 +60,128 @@ impl Default for Algorithm { fn default() -> Algorithm { Algorithm::OverlayRecent } } +impl FromStr for Algorithm { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "archive" => Ok(Algorithm::Archive), + "light" => Ok(Algorithm::EarlyMerge), + "fast" => Ok(Algorithm::OverlayRecent), + "basic" => Ok(Algorithm::RefCounted), + e => Err(format!("Invalid algorithm: {}", e)), + } + } +} + +impl Algorithm { + /// Returns static str describing journal database algorithm. + pub fn as_str(&self) -> &'static str { + match *self { + Algorithm::Archive => "archive", + Algorithm::EarlyMerge => "light", + Algorithm::OverlayRecent => "fast", + Algorithm::RefCounted => "basic", + } + } + + /// Returns static str describing journal database algorithm. + pub fn as_internal_name_str(&self) -> &'static str { + match *self { + Algorithm::Archive => "archive", + Algorithm::EarlyMerge => "earlymerge", + Algorithm::OverlayRecent => "overlayrecent", + Algorithm::RefCounted => "refcounted", + } + } + + /// Returns true if pruning strategy is stable + pub fn is_stable(&self) -> bool { + match *self { + Algorithm::Archive | Algorithm::OverlayRecent => true, + _ => false, + } + } + + /// Returns all algorithm types. + pub fn all_types() -> Vec { + vec![Algorithm::Archive, Algorithm::EarlyMerge, Algorithm::OverlayRecent, Algorithm::RefCounted] + } +} + impl fmt::Display for Algorithm { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", match self { - &Algorithm::Archive => "archive", - &Algorithm::EarlyMerge => "earlymerge", - &Algorithm::OverlayRecent => "overlayrecent", - &Algorithm::RefCounted => "refcounted", - }) + write!(f, "{}", self.as_str()) } } /// Create a new `JournalDB` trait object. -pub fn new(path: &str, algorithm: Algorithm, config: DatabaseConfig) -> Box { +pub fn new(backing: Arc, algorithm: Algorithm, col: Option) -> Box { match algorithm { - Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path, config)), - Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(path, config)), - Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(path, config)), - Algorithm::RefCounted => Box::new(refcounteddb::RefCountedDB::new(path, config)), + Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(backing, col)), + Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(backing, col)), + Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(backing, col)), + Algorithm::RefCounted => Box::new(refcounteddb::RefCountedDB::new(backing, col)), } } // all keys must be at least 12 bytes const DB_PREFIX_LEN : usize = 12; const LATEST_ERA_KEY : [u8; DB_PREFIX_LEN] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; -const VERSION_KEY : [u8; DB_PREFIX_LEN] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; + +#[cfg(test)] +mod tests { + use super::Algorithm; + + #[test] + fn test_journal_algorithm_parsing() { + assert_eq!(Algorithm::Archive, "archive".parse().unwrap()); + assert_eq!(Algorithm::EarlyMerge, "light".parse().unwrap()); + assert_eq!(Algorithm::OverlayRecent, "fast".parse().unwrap()); + assert_eq!(Algorithm::RefCounted, "basic".parse().unwrap()); + } + + #[test] + fn test_journal_algorithm_printing() { + assert_eq!(Algorithm::Archive.to_string(), "archive".to_owned()); + assert_eq!(Algorithm::EarlyMerge.to_string(), "light".to_owned()); + assert_eq!(Algorithm::OverlayRecent.to_string(), "fast".to_owned()); + assert_eq!(Algorithm::RefCounted.to_string(), "basic".to_owned()); + } + + #[test] + fn test_journal_algorithm_is_stable() { + assert!(Algorithm::Archive.is_stable()); + assert!(Algorithm::OverlayRecent.is_stable()); + assert!(!Algorithm::EarlyMerge.is_stable()); + assert!(!Algorithm::RefCounted.is_stable()); + } + + #[test] + fn test_journal_algorithm_default() { + assert_eq!(Algorithm::default(), Algorithm::OverlayRecent); + } + + #[test] + fn test_journal_algorithm_all_types() { + // compiling should fail if some cases are not covered + let mut archive = 0; + let mut earlymerge = 0; + let mut overlayrecent = 0; + let mut refcounted = 0; + + for a in &Algorithm::all_types() { + match *a { + Algorithm::Archive => archive += 1, + Algorithm::EarlyMerge => earlymerge += 1, + Algorithm::OverlayRecent => overlayrecent += 1, + Algorithm::RefCounted => refcounted += 1, + } + } + + assert_eq!(archive, 1); + assert_eq!(earlymerge, 1); + assert_eq!(overlayrecent, 1); + assert_eq!(refcounted, 1); + } +} diff --git a/util/src/journaldb/overlayrecentdb.rs b/util/src/journaldb/overlayrecentdb.rs index a4d3005a8..186fd4040 100644 --- a/util/src/journaldb/overlayrecentdb.rs +++ b/util/src/journaldb/overlayrecentdb.rs @@ -20,8 +20,8 @@ use common::*; use rlp::*; use hashdb::*; use memorydb::*; -use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, VERSION_KEY}; -use kvdb::{Database, DBTransaction, DatabaseConfig}; +use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; +use kvdb::{Database, DBTransaction}; #[cfg(test)] use std::env; use super::JournalDB; @@ -61,6 +61,7 @@ pub struct OverlayRecentDB { transaction_overlay: MemoryDB, backing: Arc, journal_overlay: Arc>, + column: Option, } #[derive(PartialEq)] @@ -89,38 +90,22 @@ impl Clone for OverlayRecentDB { transaction_overlay: self.transaction_overlay.clone(), backing: self.backing.clone(), journal_overlay: self.journal_overlay.clone(), + column: self.column.clone(), } } } -const DB_VERSION : u32 = 0x203; const PADDING : [u8; 10] = [ 0u8; 10 ]; impl OverlayRecentDB { - /// Create a new instance from file - pub fn new(path: &str, config: DatabaseConfig) -> OverlayRecentDB { - Self::from_prefs(path, config) - } - - /// Create a new instance from file - pub fn from_prefs(path: &str, config: DatabaseConfig) -> OverlayRecentDB { - let backing = Database::open(&config, path).unwrap_or_else(|e| { - panic!("Error opening state db: {}", e); - }); - if !backing.is_empty() { - match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { - Ok(Some(DB_VERSION)) => {} - v => panic!("Incompatible DB version, expected {}, got {:?}; to resolve, remove {} and restart.", DB_VERSION, v, path) - } - } else { - backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); - } - - let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&backing))); + /// Create a new instance. + pub fn new(backing: Arc, col: Option) -> OverlayRecentDB { + let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&backing, col))); OverlayRecentDB { transaction_overlay: MemoryDB::new(), - backing: Arc::new(backing), + backing: backing, journal_overlay: journal_overlay, + column: col, } } @@ -129,31 +114,32 @@ impl OverlayRecentDB { pub fn new_temp() -> OverlayRecentDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); - Self::new(dir.to_str().unwrap(), DatabaseConfig::default()) + let backing = Arc::new(Database::open_default(dir.to_str().unwrap()).unwrap()); + Self::new(backing, None) } #[cfg(test)] fn can_reconstruct_refs(&self) -> bool { - let reconstructed = Self::read_overlay(&self.backing); + let reconstructed = Self::read_overlay(&self.backing, self.column); let journal_overlay = self.journal_overlay.read(); *journal_overlay == reconstructed } fn payload(&self, key: &H256) -> Option { - self.backing.get(key).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) + self.backing.get(self.column, key).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } - fn read_overlay(db: &Database) -> JournalOverlay { + fn read_overlay(db: &Database, col: Option) -> JournalOverlay { let mut journal = HashMap::new(); let mut overlay = MemoryDB::new(); let mut count = 0; let mut latest_era = None; - if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { + if let Some(val) = db.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") { let mut era = decode::(&val); latest_era = Some(era); loop { let mut index = 0usize; - while let Some(rlp_data) = db.get({ + while let Some(rlp_data) = db.get(col, { let mut r = RlpStream::new_list(3); r.append(&era); r.append(&index); @@ -193,7 +179,7 @@ impl OverlayRecentDB { #[inline] fn to_short_key(key: &H256) -> H256 { let mut k = H256::new(); - &mut k[0..DB_PREFIX_LEN].copy_from_slice(&key[0..DB_PREFIX_LEN]); + k[0..DB_PREFIX_LEN].copy_from_slice(&key[0..DB_PREFIX_LEN]); k } } @@ -212,21 +198,24 @@ impl JournalDB for OverlayRecentDB { } fn is_empty(&self) -> bool { - self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() + self.backing.get(self.column, &LATEST_ERA_KEY).expect("Low level database error").is_none() + } + + fn backing(&self) -> &Arc { + &self.backing } fn latest_era(&self) -> Option { self.journal_overlay.read().latest_era } fn state(&self, key: &H256) -> Option { let v = self.journal_overlay.read().backing_overlay.get(&OverlayRecentDB::to_short_key(key)).map(|v| v.to_vec()); - v.or_else(|| self.backing.get_by_prefix(&key[0..DB_PREFIX_LEN]).map(|b| b.to_vec())) + v.or_else(|| self.backing.get_by_prefix(self.column, &key[0..DB_PREFIX_LEN]).map(|b| b.to_vec())) } - fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + fn commit(&mut self, batch: &DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { // record new commit's details. trace!("commit: #{} ({}), end era: {:?}", now, id, end); let mut journal_overlay = self.journal_overlay.write(); - let batch = DBTransaction::new(); { let mut r = RlpStream::new_list(3); let mut tx = self.transaction_overlay.drain(); @@ -249,9 +238,9 @@ impl JournalDB for OverlayRecentDB { k.append(&now); k.append(&index); k.append(&&PADDING[..]); - try!(batch.put(&k.drain(), r.as_raw())); + try!(batch.put(self.column, &k.drain(), r.as_raw())); if journal_overlay.latest_era.map_or(true, |e| now > e) { - try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + try!(batch.put(self.column, &LATEST_ERA_KEY, &encode(&now))); journal_overlay.latest_era = Some(now); } journal_overlay.journal.entry(now).or_insert_with(Vec::new).push(JournalEntry { id: id.clone(), insertions: inserted_keys, deletions: removed_keys }); @@ -271,7 +260,7 @@ impl JournalDB for OverlayRecentDB { r.append(&end_era); r.append(&index); r.append(&&PADDING[..]); - try!(batch.delete(&r.drain())); + try!(batch.delete(self.column, &r.drain())); trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): +{} -{} entries", end_era, index, journal.id, canon_id, journal.insertions.len(), journal.deletions.len()); { if canon_id == journal.id { @@ -290,7 +279,7 @@ impl JournalDB for OverlayRecentDB { } // apply canon inserts first for (k, v) in canon_insertions { - try!(batch.put(&k, &v)); + try!(batch.put(self.column, &k, &v)); } // update the overlay for k in overlay_deletions { @@ -299,13 +288,12 @@ impl JournalDB for OverlayRecentDB { // apply canon deletions for k in canon_deletions { if !journal_overlay.backing_overlay.contains(&OverlayRecentDB::to_short_key(&k)) { - try!(batch.delete(&k)); + try!(batch.delete(self.column, &k)); } } } journal_overlay.journal.remove(&end_era); } - try!(self.backing.write(batch)); Ok(0) } @@ -314,7 +302,7 @@ impl JournalDB for OverlayRecentDB { impl HashDB for OverlayRecentDB { fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); - for (key, _) in self.backing.iter() { + for (key, _) in self.backing.iter(self.column) { let h = H256::from_slice(key.deref()); ret.insert(h, 1); } @@ -374,7 +362,12 @@ mod tests { use hashdb::*; use log::init_log; use journaldb::JournalDB; - use kvdb::DatabaseConfig; + use kvdb::Database; + + fn new_db(path: &Path) -> OverlayRecentDB { + let backing = Arc::new(Database::open_default(path.to_str().unwrap()).unwrap()); + OverlayRecentDB::new(backing, None) + } #[test] fn insert_same_in_fork() { @@ -382,25 +375,25 @@ mod tests { let mut jdb = OverlayRecentDB::new_temp(); let x = jdb.insert(b"X"); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&x); - jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); let x = jdb.insert(b"X"); - jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + jdb.commit_batch(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + jdb.commit_batch(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&x)); @@ -411,20 +404,20 @@ mod tests { // history is 3 let mut jdb = OverlayRecentDB::new_temp(); let h = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); jdb.remove(&h); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); - jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); - jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); - jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&h)); } @@ -436,7 +429,7 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); @@ -444,7 +437,7 @@ mod tests { jdb.remove(&foo); jdb.remove(&bar); let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); @@ -452,20 +445,20 @@ mod tests { let foo = jdb.insert(b"foo"); jdb.remove(&baz); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&bar)); assert!(jdb.contains(&baz)); jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&bar)); assert!(!jdb.contains(&baz)); - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); assert!(!jdb.contains(&bar)); @@ -479,25 +472,25 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.remove(&foo); let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); assert!(jdb.contains(&baz)); - jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&baz)); @@ -510,112 +503,105 @@ mod tests { let mut jdb = OverlayRecentDB::new_temp(); let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); jdb.remove(&foo); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); assert!(jdb.contains(&foo)); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); - jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); } #[test] fn fork_same_key_one() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + let mut jdb = OverlayRecentDB::new_temp(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); - jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); } #[test] fn fork_same_key_other() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); + let mut jdb = OverlayRecentDB::new_temp(); - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); - jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); } #[test] fn fork_ins_del_ins() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); + let mut jdb = OverlayRecentDB::new_temp(); - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap(); + jdb.commit_batch(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -626,27 +612,27 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); foo }; { - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); jdb.remove(&foo); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } { - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); } @@ -655,145 +641,133 @@ mod tests { #[test] fn insert_delete_insert_delete_insert_expunge() { init_log(); - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = OverlayRecentDB::new_temp(); // history is 4 let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), None).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); // expunge foo - jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } #[test] fn forked_insert_delete_insert_delete_insert_expunge() { init_log(); - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = OverlayRecentDB::new_temp(); // history is 4 let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(1, &b"1a".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1a".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(1, &b"1b".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1b".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(2, &b"2a".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2a".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(2, &b"2b".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2b".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(3, &b"3a".sha3(), None).unwrap(); + jdb.commit_batch(3, &b"3a".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit(3, &b"3b".sha3(), None).unwrap(); + jdb.commit_batch(3, &b"3b".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); // expunge foo - jdb.commit(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap(); + jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } #[test] fn broken_assert() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); + let mut jdb = OverlayRecentDB::new_temp(); - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); - // history is 1 let foo = jdb.insert(b"foo"); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.remove(&foo); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN + jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); jdb.remove(&foo); - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + jdb.commit_batch(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); } #[test] fn reopen_test() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = OverlayRecentDB::new_temp(); // history is 4 let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(3, &b"3".sha3(), None).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); jdb.remove(&bar); - jdb.commit(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); jdb.insert(b"bar"); - jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -807,45 +781,48 @@ mod tests { let foo = b"foo".sha3(); { - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); // history is 1 jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.remove(&foo); - jdb.commit(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); jdb.insert(b"foo"); - jdb.commit(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); // incantation to reopen the db - }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + }; { + let mut jdb = new_db(&dir); jdb.remove(&foo); - jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); // incantation to reopen the db - }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + }; { + let mut jdb = new_db(&dir); - jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); // incantation to reopen the db - }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + }; { + let mut jdb = new_db(&dir); - jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); + jdb.commit_batch(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); } @@ -856,26 +833,26 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); + let mut jdb = new_db(&dir); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); (foo, bar, baz) }; { - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); - jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + let mut jdb = new_db(&dir); + jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&baz)); @@ -887,17 +864,17 @@ mod tests { fn insert_older_era() { let mut jdb = OverlayRecentDB::new_temp(); let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0a".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0a".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let bar = jdb.insert(b"bar"); - jdb.commit(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); - jdb.commit(0, &b"0b".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0b".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); diff --git a/util/src/journaldb/refcounteddb.rs b/util/src/journaldb/refcounteddb.rs index b50fc2a72..2fcbd4851 100644 --- a/util/src/journaldb/refcounteddb.rs +++ b/util/src/journaldb/refcounteddb.rs @@ -20,9 +20,9 @@ use common::*; use rlp::*; use hashdb::*; use overlaydb::*; -use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, VERSION_KEY}; +use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; use super::traits::JournalDB; -use kvdb::{Database, DBTransaction, DatabaseConfig}; +use kvdb::{Database, DBTransaction}; #[cfg(test)] use std::env; @@ -39,35 +39,23 @@ pub struct RefCountedDB { latest_era: Option, inserts: Vec, removes: Vec, + column: Option, } -const DB_VERSION : u32 = 0x200; const PADDING : [u8; 10] = [ 0u8; 10 ]; impl RefCountedDB { /// Create a new instance given a `backing` database. - pub fn new(path: &str, config: DatabaseConfig) -> RefCountedDB { - let backing = Database::open(&config, path).unwrap_or_else(|e| { - panic!("Error opening state db: {}", e); - }); - if !backing.is_empty() { - match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { - Ok(Some(DB_VERSION)) => {}, - v => panic!("Incompatible DB version, expected {}, got {:?}; to resolve, remove {} and restart.", DB_VERSION, v, path) - } - } else { - backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); - } - - let backing = Arc::new(backing); - let latest_era = backing.get(&LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::(&val)); + pub fn new(backing: Arc, col: Option) -> RefCountedDB { + let latest_era = backing.get(col, &LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::(&val)); RefCountedDB { - forward: OverlayDB::new_with_arc(backing.clone()), + forward: OverlayDB::new(backing.clone(), col), backing: backing, inserts: vec![], removes: vec![], latest_era: latest_era, + column: col, } } @@ -76,7 +64,8 @@ impl RefCountedDB { fn new_temp() -> RefCountedDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); - Self::new(dir.to_str().unwrap(), DatabaseConfig::default()) + let backing = Arc::new(Database::open_default(dir.to_str().unwrap()).unwrap()); + Self::new(backing, None) } } @@ -97,6 +86,7 @@ impl JournalDB for RefCountedDB { latest_era: self.latest_era, inserts: self.inserts.clone(), removes: self.removes.clone(), + column: self.column.clone(), }) } @@ -108,13 +98,17 @@ impl JournalDB for RefCountedDB { self.latest_era.is_none() } + fn backing(&self) -> &Arc { + &self.backing + } + fn latest_era(&self) -> Option { self.latest_era } fn state(&self, id: &H256) -> Option { - self.backing.get_by_prefix(&id[0..DB_PREFIX_LEN]).map(|b| b.to_vec()) + self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.to_vec()) } - fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + fn commit(&mut self, batch: &DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] @@ -128,12 +122,11 @@ impl JournalDB for RefCountedDB { // of its inserts otherwise. // record new commit's details. - let batch = DBTransaction::new(); { let mut index = 0usize; let mut last; - while try!(self.backing.get({ + while try!(self.backing.get(self.column, { let mut r = RlpStream::new_list(3); r.append(&now); r.append(&index); @@ -148,7 +141,7 @@ impl JournalDB for RefCountedDB { r.append(id); r.append(&self.inserts); r.append(&self.removes); - try!(batch.put(&last, r.as_raw())); + try!(batch.put(self.column, &last, r.as_raw())); trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, index, id, self.inserts, self.removes); @@ -156,7 +149,7 @@ impl JournalDB for RefCountedDB { self.removes.clear(); if self.latest_era.map_or(true, |e| now > e) { - try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + try!(batch.put(self.column, &LATEST_ERA_KEY, &encode(&now))); self.latest_era = Some(now); } } @@ -167,7 +160,7 @@ impl JournalDB for RefCountedDB { let mut last; while let Some(rlp_data) = { // trace!(target: "rcdb", "checking for journal #{}.{}", end_era, index); - try!(self.backing.get({ + try!(self.backing.get(self.column, { let mut r = RlpStream::new_list(3); r.append(&end_era); r.append(&index); @@ -183,13 +176,12 @@ impl JournalDB for RefCountedDB { for i in &to_remove { self.forward.remove(i); } - try!(batch.delete(&last)); + try!(batch.delete(self.column, &last)); index += 1; } } let r = try!(self.forward.commit_to_batch(&batch)); - try!(self.backing.write(batch)); Ok(r) } } @@ -209,16 +201,16 @@ mod tests { // history is 3 let mut jdb = RefCountedDB::new_temp(); let h = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.contains(&h)); jdb.remove(&h); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); assert!(jdb.contains(&h)); - jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); assert!(jdb.contains(&h)); - jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.contains(&h)); - jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(!jdb.contains(&h)); } @@ -228,16 +220,16 @@ mod tests { let mut jdb = RefCountedDB::new_temp(); assert_eq!(jdb.latest_era(), None); let h = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert_eq!(jdb.latest_era(), Some(0)); jdb.remove(&h); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); assert_eq!(jdb.latest_era(), Some(1)); - jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); assert_eq!(jdb.latest_era(), Some(2)); - jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); assert_eq!(jdb.latest_era(), Some(3)); - jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); assert_eq!(jdb.latest_era(), Some(4)); } @@ -248,32 +240,32 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.remove(&foo); jdb.remove(&bar); let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); assert!(jdb.contains(&baz)); let foo = jdb.insert(b"foo"); jdb.remove(&baz); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&bar)); assert!(jdb.contains(&baz)); jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&bar)); assert!(!jdb.contains(&baz)); - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(!jdb.contains(&foo)); assert!(!jdb.contains(&bar)); assert!(!jdb.contains(&baz)); @@ -286,22 +278,22 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.remove(&foo); let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); jdb.remove(&bar); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); assert!(jdb.contains(&baz)); - jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&baz)); assert!(!jdb.contains(&bar)); diff --git a/util/src/journaldb/traits.rs b/util/src/journaldb/traits.rs index 53fd17a62..8fd597349 100644 --- a/util/src/journaldb/traits.rs +++ b/util/src/journaldb/traits.rs @@ -18,6 +18,7 @@ use common::*; use hashdb::*; +use kvdb::{Database, DBTransaction}; /// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually /// exclusive actions. @@ -36,11 +37,22 @@ pub trait JournalDB : HashDB + Send + Sync { /// Commit all recent insert operations and canonical historical commits' removals from the /// old era to the backing database, reverting any non-canonical historical commit's inserts. - fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result; + fn commit(&mut self, batch: &DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result; /// State data query fn state(&self, _id: &H256) -> Option; /// Whether this database is pruned. fn is_pruned(&self) -> bool { true } + + /// Get backing database. + fn backing(&self) -> &Arc; + + #[cfg(test)] + /// Commit all changes in a single batch + fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + let batch = self.backing().transaction(); + let res = try!(self.commit(&batch, now, id, end)); + self.backing().write(batch).map(|_| res).map_err(Into::into) + } } diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 93dc760c4..a87796324 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -18,7 +18,7 @@ use std::default::Default; use rocksdb::{DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBVector, DBIterator, - Options, DBCompactionStyle, BlockBasedOptions, Direction, Cache}; + Options, DBCompactionStyle, BlockBasedOptions, Direction, Cache, Column}; const DB_BACKGROUND_FLUSHES: i32 = 2; const DB_BACKGROUND_COMPACTIONS: i32 = 2; @@ -26,32 +26,31 @@ const DB_BACKGROUND_COMPACTIONS: i32 = 2; /// Write transaction. Batches a sequence of put/delete operations for efficiency. pub struct DBTransaction { batch: WriteBatch, -} - -impl Default for DBTransaction { - fn default() -> Self { - DBTransaction::new() - } + cfs: Vec, } impl DBTransaction { /// Create new transaction. - pub fn new() -> DBTransaction { - DBTransaction { batch: WriteBatch::new() } + pub fn new(db: &Database) -> DBTransaction { + DBTransaction { + batch: WriteBatch::new(), + cfs: db.cfs.clone(), + } } /// Insert a key-value pair in the transaction. Any existing value value will be overwritten upon write. - pub fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String> { - self.batch.put(key, value) + pub fn put(&self, col: Option, key: &[u8], value: &[u8]) -> Result<(), String> { + col.map_or_else(|| self.batch.put(key, value), |c| self.batch.put_cf(self.cfs[c as usize], key, value)) } /// Delete value by key. - pub fn delete(&self, key: &[u8]) -> Result<(), String> { - self.batch.delete(key) + pub fn delete(&self, col: Option, key: &[u8]) -> Result<(), String> { + col.map_or_else(|| self.batch.delete(key), |c| self.batch.delete_cf(self.cfs[c as usize], key)) } } /// Compaction profile for the database settings +#[derive(Clone, Copy)] pub struct CompactionProfile { /// L0-L1 target file size pub initial_file_size: u64, @@ -61,16 +60,18 @@ pub struct CompactionProfile { pub write_rate_limit: Option, } -impl CompactionProfile { +impl Default for CompactionProfile { /// Default profile suitable for most storage - pub fn default() -> CompactionProfile { + fn default() -> CompactionProfile { CompactionProfile { initial_file_size: 32 * 1024 * 1024, file_size_multiplier: 2, write_rate_limit: None, } } +} +impl CompactionProfile { /// Slow hdd compaction profile pub fn hdd() -> CompactionProfile { CompactionProfile { @@ -82,6 +83,7 @@ impl CompactionProfile { } /// Database configuration +#[derive(Clone, Copy)] pub struct DatabaseConfig { /// Max number of open files. pub max_open_files: i32, @@ -89,22 +91,18 @@ pub struct DatabaseConfig { pub cache_size: Option, /// Compaction profile pub compaction: CompactionProfile, + /// Set number of columns + pub columns: Option, + /// Should we keep WAL enabled? + pub wal: bool, } impl DatabaseConfig { - /// Database with default settings and specified cache size - pub fn with_cache(cache_size: usize) -> DatabaseConfig { - DatabaseConfig { - cache_size: Some(cache_size), - max_open_files: 256, - compaction: CompactionProfile::default(), - } - } - - /// Modify the compaction profile - pub fn compaction(mut self, profile: CompactionProfile) -> Self { - self.compaction = profile; - self + /// Create new `DatabaseConfig` with default parameters and specified set of columns. + pub fn with_columns(columns: Option) -> Self { + let mut config = Self::default(); + config.columns = columns; + config } } @@ -112,8 +110,10 @@ impl Default for DatabaseConfig { fn default() -> DatabaseConfig { DatabaseConfig { cache_size: None, - max_open_files: 256, + max_open_files: 1024, compaction: CompactionProfile::default(), + columns: None, + wal: true, } } } @@ -135,6 +135,7 @@ impl<'a> Iterator for DatabaseIterator { pub struct Database { db: DB, write_opts: WriteOptions, + cfs: Vec, } impl Database { @@ -168,10 +169,37 @@ impl Database { opts.set_block_based_table_factory(&block_opts); } - let write_opts = WriteOptions::new(); - //write_opts.disable_wal(true); // TODO: make sure this is safe + let mut write_opts = WriteOptions::new(); + if !config.wal { + write_opts.disable_wal(true); + } - let db = match DB::open(&opts, path) { + let mut cfs: Vec = Vec::new(); + let db = match config.columns { + Some(columns) => { + let cfnames: Vec<_> = (0..columns).map(|c| format!("col{}", c)).collect(); + let cfnames: Vec<&str> = cfnames.iter().map(|n| n as &str).collect(); + match DB::open_cf(&opts, path, &cfnames) { + Ok(db) => { + cfs = cfnames.iter().map(|n| db.cf_handle(n).unwrap()).collect(); + assert!(cfs.len() == columns as usize); + Ok(db) + } + Err(_) => { + // retry and create CFs + match DB::open_cf(&opts, path, &[]) { + Ok(mut db) => { + cfs = cfnames.iter().map(|n| db.create_cf(n, &opts).unwrap()).collect(); + Ok(db) + }, + err @ Err(_) => err, + } + } + } + }, + None => DB::open(&opts, path) + }; + let db = match db { Ok(db) => db, Err(ref s) if s.starts_with("Corruption:") => { info!("{}", s); @@ -181,17 +209,12 @@ impl Database { }, Err(s) => { return Err(s); } }; - Ok(Database { db: db, write_opts: write_opts, }) + Ok(Database { db: db, write_opts: write_opts, cfs: cfs }) } - /// Insert a key-value pair in the transaction. Any existing value value will be overwritten. - pub fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String> { - self.db.put_opt(key, value, &self.write_opts) - } - - /// Delete value by key. - pub fn delete(&self, key: &[u8]) -> Result<(), String> { - self.db.delete_opt(key, &self.write_opts) + /// Creates new transaction for this database. + pub fn transaction(&self) -> DBTransaction { + DBTransaction::new(self) } /// Commit transaction to database. @@ -200,13 +223,14 @@ impl Database { } /// Get value by key. - pub fn get(&self, key: &[u8]) -> Result, String> { - self.db.get(key) + pub fn get(&self, col: Option, key: &[u8]) -> Result, String> { + col.map_or_else(|| self.db.get(key), |c| self.db.get_cf(self.cfs[c as usize], key)) } /// Get value by partial key. Prefix size should match configured prefix size. - pub fn get_by_prefix(&self, prefix: &[u8]) -> Option> { - let mut iter = self.db.iterator(IteratorMode::From(prefix, Direction::Forward)); + pub fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { + let mut iter = col.map_or_else(|| self.db.iterator(IteratorMode::From(prefix, Direction::Forward)), + |c| self.db.iterator_cf(self.cfs[c as usize], IteratorMode::From(prefix, Direction::Forward)).unwrap()); match iter.next() { // TODO: use prefix_same_as_start read option (not availabele in C API currently) Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None }, @@ -215,13 +239,14 @@ impl Database { } /// Check if there is anything in the database. - pub fn is_empty(&self) -> bool { - self.db.iterator(IteratorMode::Start).next().is_none() + pub fn is_empty(&self, col: Option) -> bool { + self.iter(col).next().is_none() } - /// Check if there is anything in the database. - pub fn iter(&self) -> DatabaseIterator { - DatabaseIterator { iter: self.db.iterator(IteratorMode::Start) } + /// Get database iterator. + pub fn iter(&self, col: Option) -> DatabaseIterator { + col.map_or_else(|| DatabaseIterator { iter: self.db.iterator(IteratorMode::Start) }, + |c| DatabaseIterator { iter: self.db.iterator_cf(self.cfs[c as usize], IteratorMode::Start).unwrap() }) } } @@ -240,39 +265,46 @@ mod tests { let key2 = H256::from_str("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); let key3 = H256::from_str("01c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); - db.put(&key1, b"cat").unwrap(); - db.put(&key2, b"dog").unwrap(); + let batch = db.transaction(); + batch.put(None, &key1, b"cat").unwrap(); + batch.put(None, &key2, b"dog").unwrap(); + db.write(batch).unwrap(); - assert_eq!(db.get(&key1).unwrap().unwrap().deref(), b"cat"); + assert_eq!(db.get(None, &key1).unwrap().unwrap().deref(), b"cat"); - let contents: Vec<_> = db.iter().collect(); + let contents: Vec<_> = db.iter(None).collect(); assert_eq!(contents.len(), 2); assert_eq!(&*contents[0].0, key1.deref()); assert_eq!(&*contents[0].1, b"cat"); assert_eq!(&*contents[1].0, key2.deref()); assert_eq!(&*contents[1].1, b"dog"); - db.delete(&key1).unwrap(); - assert!(db.get(&key1).unwrap().is_none()); - db.put(&key1, b"cat").unwrap(); + let batch = db.transaction(); + batch.delete(None, &key1).unwrap(); + db.write(batch).unwrap(); - let transaction = DBTransaction::new(); - transaction.put(&key3, b"elephant").unwrap(); - transaction.delete(&key1).unwrap(); + assert!(db.get(None, &key1).unwrap().is_none()); + + let batch = db.transaction(); + batch.put(None, &key1, b"cat").unwrap(); + db.write(batch).unwrap(); + + let transaction = db.transaction(); + transaction.put(None, &key3, b"elephant").unwrap(); + transaction.delete(None, &key1).unwrap(); db.write(transaction).unwrap(); - assert!(db.get(&key1).unwrap().is_none()); - assert_eq!(db.get(&key3).unwrap().unwrap().deref(), b"elephant"); + assert!(db.get(None, &key1).unwrap().is_none()); + assert_eq!(db.get(None, &key3).unwrap().unwrap().deref(), b"elephant"); - assert_eq!(db.get_by_prefix(&key3).unwrap().deref(), b"elephant"); - assert_eq!(db.get_by_prefix(&key2).unwrap().deref(), b"dog"); + assert_eq!(db.get_by_prefix(None, &key3).unwrap().deref(), b"elephant"); + assert_eq!(db.get_by_prefix(None, &key2).unwrap().deref(), b"dog"); } #[test] fn kvdb() { let path = RandomTempPath::create_dir(); let smoke = Database::open_default(path.as_path().to_str().unwrap()).unwrap(); - assert!(smoke.is_empty()); + assert!(smoke.is_empty(None)); test_db(&DatabaseConfig::default()); } } - diff --git a/util/src/lib.rs b/util/src/lib.rs index 7bdbb43ff..e56f3fe92 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -19,20 +19,16 @@ #![cfg_attr(feature="dev", plugin(clippy))] // Clippy settings -// TODO [todr] not really sure +// Most of the time much more readable #![cfg_attr(feature="dev", allow(needless_range_loop))] // Shorter than if-else #![cfg_attr(feature="dev", allow(match_bool))] // We use that to be more explicit about handled cases #![cfg_attr(feature="dev", allow(match_same_arms))] -// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. +// Keeps consistency (all lines with `.clone()`). #![cfg_attr(feature="dev", allow(clone_on_copy))] -// In most cases it expresses function flow better -#![cfg_attr(feature="dev", allow(if_not_else))] // TODO [todr] a lot of warnings to be fixed -#![cfg_attr(feature="dev", allow(needless_borrow))] #![cfg_attr(feature="dev", allow(assign_op_pattern))] -#![cfg_attr(feature="dev", allow(unnecessary_operation))] //! Ethcore-util library diff --git a/util/src/migration/mod.rs b/util/src/migration/mod.rs index d71d26885..6072041a3 100644 --- a/util/src/migration/mod.rs +++ b/util/src/migration/mod.rs @@ -29,12 +29,15 @@ use ::kvdb::{CompactionProfile, Database, DatabaseConfig, DBTransaction}; pub struct Config { /// Defines how many elements should be migrated at once. pub batch_size: usize, + /// Database compaction profile. + pub compaction_profile: CompactionProfile, } impl Default for Config { fn default() -> Self { Config { batch_size: 1024, + compaction_profile: Default::default(), } } } @@ -43,14 +46,16 @@ impl Default for Config { pub struct Batch { inner: BTreeMap, Vec>, batch_size: usize, + column: Option, } impl Batch { /// Make a new batch with the given config. - pub fn new(config: &Config) -> Self { + pub fn new(config: &Config, col: Option) -> Self { Batch { inner: BTreeMap::new(), batch_size: config.batch_size, + column: col, } } @@ -67,10 +72,10 @@ impl Batch { pub fn commit(&mut self, dest: &mut Database) -> Result<(), Error> { if self.inner.is_empty() { return Ok(()) } - let transaction = DBTransaction::new(); + let transaction = DBTransaction::new(dest); for keypair in &self.inner { - try!(transaction.put(&keypair.0, &keypair.1).map_err(Error::Custom)); + try!(transaction.put(self.column, &keypair.0, &keypair.1).map_err(Error::Custom)); } self.inner.clear(); @@ -99,14 +104,18 @@ impl From<::std::io::Error> for Error { /// A generalized migration from the given db to a destination db. pub trait Migration: 'static { + /// Number of columns in database after the migration. + fn columns(&self) -> Option; /// Version of the database after the migration. fn version(&self) -> u32; /// Migrate a source to a destination. - fn migrate(&mut self, source: &Database, config: &Config, destination: &mut Database) -> Result<(), Error>; + fn migrate(&mut self, source: &Database, config: &Config, destination: &mut Database, col: Option) -> Result<(), Error>; } /// A simple migration over key-value pairs. pub trait SimpleMigration: 'static { + /// Number of columns in database after the migration. + fn columns(&self) -> Option; /// Version of database after the migration. fn version(&self) -> u32; /// Should migrate existing object to new database. @@ -115,12 +124,14 @@ pub trait SimpleMigration: 'static { } impl Migration for T { + fn columns(&self) -> Option { SimpleMigration::columns(self) } + fn version(&self) -> u32 { SimpleMigration::version(self) } - fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database) -> Result<(), Error> { - let mut batch = Batch::new(config); + fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { + let mut batch = Batch::new(config, col); - for (key, value) in source.iter() { + for (key, value) in source.iter(col) { if let Some((key, value)) = self.simple_migrate(key.to_vec(), value.to_vec()) { try!(batch.insert(key, value, dest)); } @@ -180,12 +191,11 @@ impl Manager { /// Adds new migration rules. pub fn add_migration(&mut self, migration: T) -> Result<(), Error> where T: Migration { - let version_match = match self.migrations.last() { - Some(last) => last.version() + 1 == migration.version(), + let is_new = match self.migrations.last() { + Some(last) => migration.version() > last.version(), None => true, }; - - match version_match { + match is_new { true => Ok(self.migrations.push(Box::new(migration))), false => Err(Error::CannotAddMigration), } @@ -195,11 +205,15 @@ impl Manager { /// and producing a path where the final migration lives. pub fn execute(&mut self, old_path: &Path, version: u32) -> Result { let config = self.config.clone(); - let migrations = try!(self.migrations_from(version).ok_or(Error::MigrationImpossible)); - let db_config = DatabaseConfig { + let columns = self.no_of_columns_at(version); + let migrations = self.migrations_from(version); + if migrations.is_empty() { return Err(Error::MigrationImpossible) }; + let mut db_config = DatabaseConfig { max_open_files: 64, cache_size: None, - compaction: CompactionProfile::default(), + compaction: config.compaction_profile, + columns: columns, + wal: true, }; let db_root = database_path(old_path); @@ -209,14 +223,28 @@ impl Manager { // start with the old db. let old_path_str = try!(old_path.to_str().ok_or(Error::MigrationImpossible)); let mut cur_db = try!(Database::open(&db_config, old_path_str).map_err(Error::Custom)); + for migration in migrations { + // Change number of columns in new db + let current_columns = db_config.columns; + db_config.columns = migration.columns(); + // open the target temporary database. temp_path = temp_idx.path(&db_root); let temp_path_str = try!(temp_path.to_str().ok_or(Error::MigrationImpossible)); let mut new_db = try!(Database::open(&db_config, temp_path_str).map_err(Error::Custom)); // perform the migration from cur_db to new_db. - try!(migration.migrate(&cur_db, &config, &mut new_db)); + match current_columns { + // migrate only default column + None => try!(migration.migrate(&cur_db, &config, &mut new_db, None)), + Some(v) => { + // Migrate all columns in previous DB + for col in 0..v { + try!(migration.migrate(&cur_db, &config, &mut new_db, Some(col))) + } + } + } // next iteration, we will migrate from this db into the other temp. cur_db = new_db; temp_idx.swap(); @@ -235,10 +263,42 @@ impl Manager { } } - fn migrations_from(&mut self, version: u32) -> Option<&mut [Box]> { - // index of the first required migration - let position = self.migrations.iter().position(|m| m.version() == version + 1); - position.map(move |p| &mut self.migrations[p..]) + /// Find all needed migrations. + fn migrations_from(&mut self, version: u32) -> Vec<&mut Box> { + self.migrations.iter_mut().filter(|m| m.version() > version).collect() + } + + fn no_of_columns_at(&self, version: u32) -> Option { + let migration = self.migrations.iter().find(|m| m.version() == version); + match migration { + Some(m) => m.columns(), + None => None + } } } +/// Prints a dot every `max` ticks +pub struct Progress { + current: usize, + max: usize, +} + +impl Default for Progress { + fn default() -> Self { + Progress { + current: 0, + max: 100_000, + } + } +} + +impl Progress { + /// Tick progress meter. + pub fn tick(&mut self) { + self.current += 1; + if self.current == self.max { + self.current = 0; + flush!("."); + } + } +} diff --git a/util/src/migration/tests.rs b/util/src/migration/tests.rs index 58d2c9008..8eec87c21 100644 --- a/util/src/migration/tests.rs +++ b/util/src/migration/tests.rs @@ -20,7 +20,7 @@ use common::*; use migration::{Config, SimpleMigration, Manager}; -use kvdb::{Database, DBTransaction}; +use kvdb::Database; use devtools::RandomTempPath; use std::path::PathBuf; @@ -35,9 +35,9 @@ fn db_path(path: &Path) -> PathBuf { fn make_db(path: &Path, pairs: BTreeMap, Vec>) { let db = Database::open_default(path.to_str().unwrap()).expect("failed to open temp database"); { - let transaction = DBTransaction::new(); + let transaction = db.transaction(); for (k, v) in pairs { - transaction.put(&k, &v).expect("failed to add pair to transaction"); + transaction.put(None, &k, &v).expect("failed to add pair to transaction"); } db.write(transaction).expect("failed to write db transaction"); @@ -49,7 +49,7 @@ fn verify_migration(path: &Path, pairs: BTreeMap, Vec>) { let db = Database::open_default(path.to_str().unwrap()).unwrap(); for (k, v) in pairs { - let x = db.get(&k).unwrap().unwrap(); + let x = db.get(None, &k).unwrap().unwrap(); assert_eq!(&x[..], &v[..]); } @@ -58,9 +58,9 @@ fn verify_migration(path: &Path, pairs: BTreeMap, Vec>) { struct Migration0; impl SimpleMigration for Migration0 { - fn version(&self) -> u32 { - 1 - } + fn columns(&self) -> Option { None } + + fn version(&self) -> u32 { 1 } fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)> { let mut key = key; @@ -74,9 +74,9 @@ impl SimpleMigration for Migration0 { struct Migration1; impl SimpleMigration for Migration1 { - fn version(&self) -> u32 { - 2 - } + fn columns(&self) -> Option { None } + + fn version(&self) -> u32 { 2 } fn simple_migrate(&mut self, key: Vec, _value: Vec) -> Option<(Vec, Vec)> { Some((key, vec![])) @@ -109,6 +109,18 @@ fn no_migration_needed() { manager.execute(&db_path, 1).unwrap(); } +#[test] +#[should_panic] +fn wrong_adding_order() { + let dir = RandomTempPath::create_dir(); + let db_path = db_path(dir.as_path()); + let mut manager = Manager::new(Config::default()); + make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); + + manager.add_migration(Migration1).unwrap(); + manager.add_migration(Migration0).unwrap(); +} + #[test] fn multiple_migrations() { let dir = RandomTempPath::create_dir(); @@ -139,6 +151,34 @@ fn second_migration() { verify_migration(&end_path, expected); } +#[test] +fn first_and_noop_migration() { + let dir = RandomTempPath::create_dir(); + let db_path = db_path(dir.as_path()); + let mut manager = Manager::new(Config::default()); + make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); + let expected = map![vec![0x11] => vec![0x22], vec![1, 0x11] => vec![1, 0x22]]; + + manager.add_migration(Migration0).unwrap(); + let end_path = manager.execute(&db_path, 0).unwrap(); + + verify_migration(&end_path, expected); +} + +#[test] +fn noop_and_second_migration() { + let dir = RandomTempPath::create_dir(); + let db_path = db_path(dir.as_path()); + let mut manager = Manager::new(Config::default()); + make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]); + let expected = map![vec![] => vec![], vec![1] => vec![]]; + + manager.add_migration(Migration1).unwrap(); + let end_path = manager.execute(&db_path, 0).unwrap(); + + verify_migration(&end_path, expected); +} + #[test] fn is_migration_needed() { let mut manager = Manager::new(Config::default()); diff --git a/util/src/network/connection.rs b/util/src/network/connection.rs index 9963d94b7..cbc9f39d7 100644 --- a/util/src/network/connection.rs +++ b/util/src/network/connection.rs @@ -77,7 +77,7 @@ impl GenericConnection { /// Readable IO handler. Called when there is some data to be read. pub fn readable(&mut self) -> io::Result> { if self.rec_size == 0 || self.rec_buf.len() >= self.rec_size { - warn!(target:"network", "Unexpected connection read"); + return Ok(None); } let sock_ref = ::by_ref(&mut self.socket); loop { @@ -355,7 +355,7 @@ impl EncryptedConnection { self.encoder.encrypt(&mut RefReadBuffer::new(&header), &mut RefWriteBuffer::new(&mut packet), false).expect("Invalid length or padding"); EncryptedConnection::update_mac(&mut self.egress_mac, &mut self.mac_encoder, &packet[0..16]); self.egress_mac.clone().finalize(&mut packet[16..32]); - self.encoder.encrypt(&mut RefReadBuffer::new(&payload), &mut RefWriteBuffer::new(&mut packet[32..(32 + len)]), padding == 0).expect("Invalid length or padding"); + self.encoder.encrypt(&mut RefReadBuffer::new(payload), &mut RefWriteBuffer::new(&mut packet[32..(32 + len)]), padding == 0).expect("Invalid length or padding"); if padding != 0 { let pad = [0u8; 16]; self.encoder.encrypt(&mut RefReadBuffer::new(&pad[0..padding]), &mut RefWriteBuffer::new(&mut packet[(32 + len)..(32 + len + padding)]), true).expect("Invalid length or padding"); diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index 5670c4cfa..d71ff1252 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -167,7 +167,7 @@ impl Discovery { } fn clear_ping(&mut self, id: &NodeId) { - let mut bucket = self.node_buckets.get_mut(Discovery::distance(&self.id, &id) as usize).unwrap(); + let mut bucket = self.node_buckets.get_mut(Discovery::distance(&self.id, id) as usize).unwrap(); if let Some(node) = bucket.nodes.iter_mut().find(|n| &n.address.id == id) { node.timeout = None; } @@ -438,7 +438,7 @@ impl Discovery { } let mut packets = Discovery::prepare_neighbours_packets(&nearest); for p in packets.drain(..) { - self.send_packet(PACKET_NEIGHBOURS, &from, &p); + self.send_packet(PACKET_NEIGHBOURS, from, &p); } trace!(target: "discovery", "Sent {} Neighbours to {:?}", nearest.len(), &from); Ok(None) diff --git a/util/src/network/handshake.rs b/util/src/network/handshake.rs index 179309a6f..162ccc618 100644 --- a/util/src/network/handshake.rs +++ b/util/src/network/handshake.rs @@ -128,31 +128,27 @@ impl Handshake { /// Readable IO handler. Drives the state change. pub fn readable(&mut self, io: &IoContext, host: &HostInfo) -> Result<(), UtilError> where Message: Send + Clone { if !self.expired() { - match self.state { - HandshakeState::New => {} - HandshakeState::ReadingAuth => { - if let Some(data) = try!(self.connection.readable()) { + while let Some(data) = try!(self.connection.readable()) { + match self.state { + HandshakeState::New => {}, + HandshakeState::StartSession => {}, + HandshakeState::ReadingAuth => { try!(self.read_auth(io, host.secret(), &data)); - }; - }, - HandshakeState::ReadingAuthEip8 => { - if let Some(data) = try!(self.connection.readable()) { + }, + HandshakeState::ReadingAuthEip8 => { try!(self.read_auth_eip8(io, host.secret(), &data)); - }; - }, - HandshakeState::ReadingAck => { - if let Some(data) = try!(self.connection.readable()) { + }, + HandshakeState::ReadingAck => { try!(self.read_ack(host.secret(), &data)); - }; - }, - HandshakeState::ReadingAckEip8 => { - if let Some(data) = try!(self.connection.readable()) { + }, + HandshakeState::ReadingAckEip8 => { try!(self.read_ack_eip8(host.secret(), &data)); - }; - }, - HandshakeState::StartSession => { + }, + } + if self.state == HandshakeState::StartSession { io.clear_timer(self.connection.token).ok(); - }, + break; + } } } Ok(()) @@ -178,9 +174,9 @@ impl Handshake { /// Parse, validate and confirm auth message fn read_auth(&mut self, io: &IoContext, secret: &Secret, data: &[u8]) -> Result<(), UtilError> where Message: Send + Clone { - trace!(target:"network", "Received handshake auth from {:?}", self.connection.remote_addr_str()); + trace!(target: "network", "Received handshake auth from {:?}", self.connection.remote_addr_str()); if data.len() != V4_AUTH_PACKET_SIZE { - debug!(target:"net", "Wrong auth packet size"); + debug!(target: "network", "Wrong auth packet size"); return Err(From::from(NetworkError::BadProtocol)); } self.auth_cipher = data.to_vec(); @@ -197,7 +193,7 @@ impl Handshake { // Try to interpret as EIP-8 packet let total = (((data[0] as u16) << 8 | (data[1] as u16)) as usize) + 2; if total < V4_AUTH_PACKET_SIZE { - debug!(target:"net", "Wrong EIP8 auth packet size"); + debug!(target: "network", "Wrong EIP8 auth packet size"); return Err(From::from(NetworkError::BadProtocol)); } let rest = total - data.len(); @@ -209,7 +205,7 @@ impl Handshake { } fn read_auth_eip8(&mut self, io: &IoContext, secret: &Secret, data: &[u8]) -> Result<(), UtilError> where Message: Send + Clone { - trace!(target:"network", "Received EIP8 handshake auth from {:?}", self.connection.remote_addr_str()); + trace!(target: "network", "Received EIP8 handshake auth from {:?}", self.connection.remote_addr_str()); self.auth_cipher.extend_from_slice(data); let auth = try!(ecies::decrypt(secret, &self.auth_cipher[0..2], &self.auth_cipher[2..])); let rlp = UntrustedRlp::new(&auth); @@ -224,9 +220,9 @@ impl Handshake { /// Parse and validate ack message fn read_ack(&mut self, secret: &Secret, data: &[u8]) -> Result<(), UtilError> { - trace!(target:"network", "Received handshake ack from {:?}", self.connection.remote_addr_str()); + trace!(target: "network", "Received handshake ack from {:?}", self.connection.remote_addr_str()); if data.len() != V4_ACK_PACKET_SIZE { - debug!(target:"net", "Wrong ack packet size"); + debug!(target: "network", "Wrong ack packet size"); return Err(From::from(NetworkError::BadProtocol)); } self.ack_cipher = data.to_vec(); @@ -240,7 +236,7 @@ impl Handshake { // Try to interpret as EIP-8 packet let total = (((data[0] as u16) << 8 | (data[1] as u16)) as usize) + 2; if total < V4_ACK_PACKET_SIZE { - debug!(target:"net", "Wrong EIP8 ack packet size"); + debug!(target: "network", "Wrong EIP8 ack packet size"); return Err(From::from(NetworkError::BadProtocol)); } let rest = total - data.len(); @@ -252,7 +248,7 @@ impl Handshake { } fn read_ack_eip8(&mut self, secret: &Secret, data: &[u8]) -> Result<(), UtilError> { - trace!(target:"network", "Received EIP8 handshake auth from {:?}", self.connection.remote_addr_str()); + trace!(target: "network", "Received EIP8 handshake auth from {:?}", self.connection.remote_addr_str()); self.ack_cipher.extend_from_slice(data); let ack = try!(ecies::decrypt(secret, &self.ack_cipher[0..2], &self.ack_cipher[2..])); let rlp = UntrustedRlp::new(&ack); @@ -265,7 +261,7 @@ impl Handshake { /// Sends auth message fn write_auth(&mut self, io: &IoContext, secret: &Secret, public: &Public) -> Result<(), UtilError> where Message: Send + Clone { - trace!(target:"network", "Sending handshake auth to {:?}", self.connection.remote_addr_str()); + trace!(target: "network", "Sending handshake auth to {:?}", self.connection.remote_addr_str()); let mut data = [0u8; /*Signature::SIZE*/ 65 + /*H256::SIZE*/ 32 + /*Public::SIZE*/ 64 + /*H256::SIZE*/ 32 + 1]; //TODO: use associated constants let len = data.len(); { @@ -292,7 +288,7 @@ impl Handshake { /// Sends ack message fn write_ack(&mut self, io: &IoContext) -> Result<(), UtilError> where Message: Send + Clone { - trace!(target:"network", "Sending handshake ack to {:?}", self.connection.remote_addr_str()); + trace!(target: "network", "Sending handshake ack to {:?}", self.connection.remote_addr_str()); let mut data = [0u8; 1 + /*Public::SIZE*/ 64 + /*H256::SIZE*/ 32]; //TODO: use associated constants let len = data.len(); { @@ -311,7 +307,7 @@ impl Handshake { /// Sends EIP8 ack message fn write_ack_eip8(&mut self, io: &IoContext) -> Result<(), UtilError> where Message: Send + Clone { - trace!(target:"network", "Sending EIP8 handshake ack to {:?}", self.connection.remote_addr_str()); + trace!(target: "network", "Sending EIP8 handshake ack to {:?}", self.connection.remote_addr_str()); let mut rlp = RlpStream::new_list(3); rlp.append(self.ecdhe.public()); rlp.append(&self.nonce); diff --git a/util/src/network/host.rs b/util/src/network/host.rs index e986c6020..7b1baf97a 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -51,7 +51,7 @@ const MAX_HANDSHAKES: usize = 80; const MAX_HANDSHAKES_PER_ROUND: usize = 32; const MAINTENANCE_TIMEOUT: u64 = 1000; -#[derive(Debug, Clone)] +#[derive(Debug, PartialEq, Clone)] /// Network service configuration pub struct NetworkConfiguration { /// Directory path to store network configuration. None means nothing will be saved @@ -70,8 +70,10 @@ pub struct NetworkConfiguration { pub boot_nodes: Vec, /// Use provided node key instead of default pub use_secret: Option, - /// Number of connected peers to maintain - pub ideal_peers: u32, + /// Minimum number of connected peers to maintain + pub min_peers: u32, + /// Maximum allowd number of peers + pub max_peers: u32, /// List of reserved node addresses. pub reserved_nodes: Vec, /// The non-reserved peer mode. @@ -96,7 +98,8 @@ impl NetworkConfiguration { discovery_enabled: true, boot_nodes: Vec::new(), use_secret: None, - ideal_peers: 25, + min_peers: 25, + max_peers: 50, reserved_nodes: Vec::new(), non_reserved_mode: NonReservedPeerMode::Accept, } @@ -355,11 +358,11 @@ impl Host { let keys = if let Some(ref secret) = config.use_secret { KeyPair::from_secret(secret.clone()).unwrap() } else { - config.config_path.clone().and_then(|ref p| load_key(&Path::new(&p))) + config.config_path.clone().and_then(|ref p| load_key(Path::new(&p))) .map_or_else(|| { let key = KeyPair::create().unwrap(); if let Some(path) = config.config_path.clone() { - save_key(&Path::new(&path), &key.secret()); + save_key(Path::new(&path), key.secret()); } key }, @@ -597,19 +600,19 @@ impl Host { } fn connect_peers(&self, io: &IoContext) { - let (ideal_peers, mut pin) = { + let (min_peers, mut pin) = { let info = self.info.read(); if info.capabilities.is_empty() { return; } let config = &info.config; - (config.ideal_peers, config.non_reserved_mode == NonReservedPeerMode::Deny) + (config.min_peers, config.non_reserved_mode == NonReservedPeerMode::Deny) }; let session_count = self.session_count(); let reserved_nodes = self.reserved_nodes.read(); - if session_count >= ideal_peers as usize + reserved_nodes.len() { + if session_count >= min_peers as usize + reserved_nodes.len() { // check if all pinned nodes are connected. if reserved_nodes.iter().all(|n| self.have_session(n) && self.connecting_to(n)) { return; @@ -767,12 +770,12 @@ impl Host { self.num_sessions.fetch_add(1, AtomicOrdering::SeqCst); if !s.info.originated { let session_count = self.session_count(); - let (ideal_peers, reserved_only) = { + let (max_peers, reserved_only) = { let info = self.info.read(); - (info.config.ideal_peers, info.config.non_reserved_mode == NonReservedPeerMode::Deny) + (info.config.max_peers, info.config.non_reserved_mode == NonReservedPeerMode::Deny) }; - if session_count >= ideal_peers as usize || reserved_only { + if session_count >= max_peers as usize || reserved_only { // only proceed if the connecting peer is reserved. if !self.reserved_nodes.read().contains(s.id().unwrap()) { s.disconnect(io, DisconnectReason::TooManyPeers); @@ -1099,7 +1102,7 @@ fn save_key(path: &Path, key: &Secret) { return; } }; - if let Err(e) = restrict_permissions_owner(&path) { + if let Err(e) = restrict_permissions_owner(path) { warn!(target: "network", "Failed to modify permissions of the file (chmod: {})", e); } if let Err(e) = file.write(&key.hex().into_bytes()) { diff --git a/util/src/network/ip_utils.rs b/util/src/network/ip_utils.rs index 27ff29737..276c3fec0 100644 --- a/util/src/network/ip_utils.rs +++ b/util/src/network/ip_utils.rs @@ -16,17 +16,12 @@ // Based on original work by David Levy https://raw.githubusercontent.com/dlevy47/rust-interfaces -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; use std::io; use igd::{PortMappingProtocol, search_gateway_from_timeout}; use std::time::Duration; use network::node_table::{NodeEndpoint}; -pub enum IpAddr{ - V4(Ipv4Addr), - V6(Ipv6Addr), -} - /// Socket address extension for rustc beta. To be replaces with now unstable API pub trait SocketAddrExt { /// Returns true for the special 'unspecified' address 0.0.0.0. @@ -66,8 +61,7 @@ mod getinterfaces { use std::{mem, io, ptr}; use libc::{AF_INET, AF_INET6}; use libc::{getifaddrs, freeifaddrs, ifaddrs, sockaddr, sockaddr_in, sockaddr_in6}; - use std::net::{Ipv4Addr, Ipv6Addr}; - use super::IpAddr; + use std::net::{Ipv4Addr, Ipv6Addr, IpAddr}; fn convert_sockaddr (sa: *mut sockaddr) -> Option { if sa == ptr::null_mut() { return None; } diff --git a/util/src/network/session.rs b/util/src/network/session.rs index c19dfbcf8..d90ecb062 100644 --- a/util/src/network/session.rs +++ b/util/src/network/session.rs @@ -128,7 +128,7 @@ impl Session { nonce: &H256, stats: Arc, host: &HostInfo) -> Result where Message: Send + Clone { let originated = id.is_some(); - let mut handshake = Handshake::new(token, id, socket, &nonce, stats).expect("Can't create handshake"); + let mut handshake = Handshake::new(token, id, socket, nonce, stats).expect("Can't create handshake"); try!(handshake.start(io, host, originated)); Ok(Session { state: State::Handshake(handshake), @@ -313,7 +313,7 @@ impl Session { self.connection().token() } - fn read_packet(&mut self, io: &IoContext, packet: Packet, host: &HostInfo) -> Result + fn read_packet(&mut self, io: &IoContext, packet: Packet, host: &HostInfo) -> Result where Message: Send + Sync + Clone { if packet.data.len() < 2 { return Err(From::from(NetworkError::BadProtocol)); @@ -381,7 +381,7 @@ impl Session { self.send(io, rlp) } - fn read_hello(&mut self, io: &IoContext, rlp: &UntrustedRlp, host: &HostInfo) -> Result<(), UtilError> + fn read_hello(&mut self, io: &IoContext, rlp: &UntrustedRlp, host: &HostInfo) -> Result<(), UtilError> where Message: Send + Sync + Clone { let protocol = try!(rlp.val_at::(0)); let client_version = try!(rlp.val_at::(1)); diff --git a/util/src/network_settings.rs b/util/src/network_settings.rs index 7f02272b6..9e590c5e7 100644 --- a/util/src/network_settings.rs +++ b/util/src/network_settings.rs @@ -16,13 +16,15 @@ //! Structure to hold network settings configured from CLI /// Networking & RPC settings -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub struct NetworkSettings { /// Node name pub name: String, /// Name of the chain we are connected to pub chain: String, - /// Ideal number of peers + /// Min number of peers + pub min_peers: u32, + /// Max number of peers pub max_peers: u32, /// Networking port pub network_port: u16, @@ -34,3 +36,17 @@ pub struct NetworkSettings { pub rpc_port: u16, } +impl Default for NetworkSettings { + fn default() -> Self { + NetworkSettings { + name: "".into(), + chain: "homestead".into(), + min_peers: 25, + max_peers: 50, + network_port: 30303, + rpc_enabled: true, + rpc_interface: "local".into(), + rpc_port: 8545 + } + } +} diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 63ec3dd50..7c2ab3b71 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -24,7 +24,6 @@ use hashdb::*; use memorydb::*; use std::ops::*; use std::sync::*; -use std::env; use std::collections::HashMap; use kvdb::{Database, DBTransaction}; @@ -40,22 +39,29 @@ use kvdb::{Database, DBTransaction}; pub struct OverlayDB { overlay: MemoryDB, backing: Arc, + column: Option, } impl OverlayDB { /// Create a new instance of OverlayDB given a `backing` database. - pub fn new(backing: Database) -> OverlayDB { Self::new_with_arc(Arc::new(backing)) } - - /// Create a new instance of OverlayDB given a `backing` database. - pub fn new_with_arc(backing: Arc) -> OverlayDB { - OverlayDB{ overlay: MemoryDB::new(), backing: backing } + pub fn new(backing: Arc, col: Option) -> OverlayDB { + OverlayDB{ overlay: MemoryDB::new(), backing: backing, column: col } } /// Create a new instance of OverlayDB with an anonymous temporary database. + #[cfg(test)] pub fn new_temp() -> OverlayDB { - let mut dir = env::temp_dir(); + let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - Self::new(Database::open_default(dir.to_str().unwrap()).unwrap()) + Self::new(Arc::new(Database::open_default(dir.to_str().unwrap()).unwrap()), None) + } + + /// Commit all operations in a single batch. + #[cfg(test)] + pub fn commit(&mut self) -> Result { + let batch = self.backing.transaction(); + let res = try!(self.commit_to_batch(&batch)); + self.backing.write(batch).map(|_| res).map_err(|e| e.into()) } /// Commit all operations to given batch. @@ -88,91 +94,16 @@ impl OverlayDB { Ok(ret) } - /// Commit all memory operations to the backing database. - /// - /// Returns either an error or the number of items changed in the backing database. - /// - /// Will return an error if the number of `remove()`s ever exceeds the number of - /// `insert()`s for any key. This will leave the database in an undeterminate - /// state. Don't ever let it happen. - /// - /// # Example - /// ``` - /// extern crate ethcore_util; - /// use ethcore_util::hashdb::*; - /// use ethcore_util::overlaydb::*; - /// fn main() { - /// let mut m = OverlayDB::new_temp(); - /// let key = m.insert(b"foo"); // insert item. - /// assert!(m.contains(&key)); // key exists (in memory). - /// assert_eq!(m.commit().unwrap(), 1); // 1 item changed. - /// assert!(m.contains(&key)); // key still exists (in backing). - /// m.remove(&key); // delete item. - /// assert!(!m.contains(&key)); // key "doesn't exist" (though still does in backing). - /// m.remove(&key); // oh dear... more removes than inserts for the key... - /// //m.commit().unwrap(); // this commit/unwrap would cause a panic. - /// m.revert(); // revert both removes. - /// assert!(m.contains(&key)); // key now still exists. - /// } - /// ``` - pub fn commit(&mut self) -> Result { - let mut ret = 0u32; - let mut deletes = 0usize; - for i in self.overlay.drain().into_iter() { - let (key, (value, rc)) = i; - if rc != 0 { - match self.payload(&key) { - Some(x) => { - let (back_value, back_rc) = x; - let total_rc: i32 = back_rc as i32 + rc; - if total_rc < 0 { - return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); - } - deletes += if self.put_payload(&key, (back_value, total_rc as u32)) {1} else {0}; - } - None => { - if rc < 0 { - return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); - } - self.put_payload(&key, (value, rc as u32)); - } - }; - ret += 1; - } - } - trace!("OverlayDB::commit() deleted {} nodes", deletes); - Ok(ret) - } - /// Revert all operations on this object (i.e. `insert()`s and `remove()`s) since the /// last `commit()`. - /// - /// # Example - /// ``` - /// extern crate ethcore_util; - /// use ethcore_util::hashdb::*; - /// use ethcore_util::overlaydb::*; - /// fn main() { - /// let mut m = OverlayDB::new_temp(); - /// let foo = m.insert(b"foo"); // insert foo. - /// m.commit().unwrap(); // commit - new operations begin here... - /// let bar = m.insert(b"bar"); // insert bar. - /// m.remove(&foo); // remove foo. - /// assert!(!m.contains(&foo)); // foo is gone. - /// assert!(m.contains(&bar)); // bar is here. - /// m.revert(); // revert the last two operations. - /// assert!(m.contains(&foo)); // foo is here. - /// assert!(!m.contains(&bar)); // bar is gone. - /// } - /// ``` pub fn revert(&mut self) { self.overlay.clear(); } /// Get the number of references that would be committed. - pub fn commit_refs(&self, key: &H256) -> i32 { self.overlay.raw(&key).map_or(0, |&(_, refs)| refs) } + pub fn commit_refs(&self, key: &H256) -> i32 { self.overlay.raw(key).map_or(0, |&(_, refs)| refs) } /// Get the refs and value of the given key. fn payload(&self, key: &H256) -> Option<(Bytes, u32)> { - self.backing.get(key) + self.backing.get(self.column, key) .expect("Low-level database error. Some issue with your hard disk?") .map(|d| { let r = Rlp::new(&d); @@ -186,24 +117,10 @@ impl OverlayDB { let mut s = RlpStream::new_list(2); s.append(&payload.1); s.append(&payload.0); - batch.put(key, s.as_raw()).expect("Low-level database error. Some issue with your hard disk?"); + batch.put(self.column, key, s.as_raw()).expect("Low-level database error. Some issue with your hard disk?"); false } else { - batch.delete(key).expect("Low-level database error. Some issue with your hard disk?"); - true - } - } - - /// Put the refs and value of the given key, possibly deleting it from the db. - fn put_payload(&self, key: &H256, payload: (Bytes, u32)) -> bool { - if payload.1 > 0 { - let mut s = RlpStream::new_list(2); - s.append(&payload.1); - s.append(&payload.0); - self.backing.put(key, s.as_raw()).expect("Low-level database error. Some issue with your hard disk?"); - false - } else { - self.backing.delete(key).expect("Low-level database error. Some issue with your hard disk?"); + batch.delete(self.column, key).expect("Low-level database error. Some issue with your hard disk?"); true } } @@ -212,7 +129,7 @@ impl OverlayDB { impl HashDB for OverlayDB { fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); - for (key, _) in self.backing.iter() { + for (key, _) in self.backing.iter(self.column) { let h = H256::from_slice(key.deref()); let r = self.payload(&h).unwrap().1; ret.insert(h, r as i32); @@ -274,6 +191,22 @@ impl HashDB for OverlayDB { fn remove(&mut self, key: &H256) { self.overlay.remove(key); } } +#[test] +fn overlaydb_revert() { + let mut m = OverlayDB::new_temp(); + let foo = m.insert(b"foo"); // insert foo. + let batch = m.backing.transaction(); + m.commit_to_batch(&batch).unwrap(); // commit - new operations begin here... + m.backing.write(batch).unwrap(); + let bar = m.insert(b"bar"); // insert bar. + m.remove(&foo); // remove foo. + assert!(!m.contains(&foo)); // foo is gone. + assert!(m.contains(&bar)); // bar is here. + m.revert(); // revert the last two operations. + assert!(m.contains(&foo)); // foo is here. + assert!(!m.contains(&bar)); // bar is gone. +} + #[test] fn overlaydb_overlay_insert_and_remove() { let mut trie = OverlayDB::new_temp(); @@ -366,14 +299,18 @@ fn overlaydb_complex() { fn playpen() { use std::fs; { - let db: Database = Database::open_default("/tmp/test").unwrap(); - db.put(b"test", b"test2").unwrap(); - match db.get(b"test") { + let db = Database::open_default("/tmp/test").unwrap(); + let batch = db.transaction(); + batch.put(None, b"test", b"test2").unwrap(); + db.write(batch).unwrap(); + match db.get(None, b"test") { Ok(Some(value)) => println!("Got value {:?}", value.deref()), Ok(None) => println!("No value for that key"), Err(..) => println!("Gah"), } - db.delete(b"test").unwrap(); + let batch = db.transaction(); + batch.delete(None, b"test").unwrap(); + db.write(batch).unwrap(); } fs::remove_dir_all("/tmp/test").unwrap(); } diff --git a/util/src/rlp/commonrlps.rs b/util/src/rlp/commonrlps.rs new file mode 100644 index 000000000..670657224 --- /dev/null +++ b/util/src/rlp/commonrlps.rs @@ -0,0 +1,106 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Contains RLPs used for compression. + +use rlp::rlpcompression::InvalidRlpSwapper; + +lazy_static! { + /// Swapper for snapshot compression. + pub static ref SNAPSHOT_RLP_SWAPPER: InvalidRlpSwapper<'static> = InvalidRlpSwapper::new(EMPTY_RLPS, INVALID_RLPS); +} + +lazy_static! { + /// Swapper with common long RLPs, up to 127 can be added. + pub static ref BLOCKS_RLP_SWAPPER: InvalidRlpSwapper<'static> = InvalidRlpSwapper::new(COMMON_RLPS, INVALID_RLPS); +} + +static EMPTY_RLPS: &'static [&'static [u8]] = &[ + // RLP of SHA3_NULL_RLP + &[160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33], + // RLP of SHA3_EMPTY + &[160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112] +]; + +static COMMON_RLPS: &'static [&'static [u8]] = &[ + // RLP of SHA3_NULL_RLP + &[160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33], + // RLP of SHA3_EMPTY + &[160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112], + // Other RLPs found in blocks DB using the test below. + &[160, 29, 204, 77, 232, 222, 199, 93, 122, 171, 133, 181, 103, 182, 204, 212, 26, 211, 18, 69, 27, 148, 138, 116, 19, 240, 161, 66, 253, 64, 212, 147, 71], + &[148, 50, 190, 52, 59, 148, 248, 96, 18, 77, 196, 254, 226, 120, 253, 203, 211, 140, 16, 45, 136], + &[148, 82, 188, 68, 213, 55, 131, 9, 238, 42, 191, 21, 57, 191, 113, 222, 27, 125, 123, 227, 181], + &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] +]; + +static INVALID_RLPS: &'static [&'static [u8]] = &[&[0x81, 0x0], &[0x81, 0x1], &[0x81, 0x2], &[0x81, 0x3], &[0x81, 0x4], &[0x81, 0x5], &[0x81, 0x6], &[0x81, 0x7], &[0x81, 0x8], &[0x81, 0x9], &[0x81, 0xa], &[0x81, 0xb], &[0x81, 0xc], &[0x81, 0xd], &[0x81, 0xe], &[0x81, 0xf], &[0x81, 0x10], &[0x81, 0x11], &[0x81, 0x12], &[0x81, 0x13], &[0x81, 0x14], &[0x81, 0x15], &[0x81, 0x16], &[0x81, 0x17], &[0x81, 0x18], &[0x81, 0x19], &[0x81, 0x1a], &[0x81, 0x1b], &[0x81, 0x1c], &[0x81, 0x1d], &[0x81, 0x1e], &[0x81, 0x1f], &[0x81, 0x20], &[0x81, 0x21], &[0x81, 0x22], &[0x81, 0x23], &[0x81, 0x24], &[0x81, 0x25], &[0x81, 0x26], &[0x81, 0x27], &[0x81, 0x28], &[0x81, 0x29], &[0x81, 0x2a], &[0x81, 0x2b], &[0x81, 0x2c], &[0x81, 0x2d], &[0x81, 0x2e], &[0x81, 0x2f], &[0x81, 0x30], &[0x81, 0x31], &[0x81, 0x32], &[0x81, 0x33], &[0x81, 0x34], &[0x81, 0x35], &[0x81, 0x36], &[0x81, 0x37], &[0x81, 0x38], &[0x81, 0x39], &[0x81, 0x3a], &[0x81, 0x3b], &[0x81, 0x3c], &[0x81, 0x3d], &[0x81, 0x3e], &[0x81, 0x3f], &[0x81, 0x40], &[0x81, 0x41], &[0x81, 0x42], &[0x81, 0x43], &[0x81, 0x44], &[0x81, 0x45], &[0x81, 0x46], &[0x81, 0x47], &[0x81, 0x48], &[0x81, 0x49], &[0x81, 0x4a], &[0x81, 0x4b], &[0x81, 0x4c], &[0x81, 0x4d], &[0x81, 0x4e], &[0x81, 0x4f], &[0x81, 0x50], &[0x81, 0x51], &[0x81, 0x52], &[0x81, 0x53], &[0x81, 0x54], &[0x81, 0x55], &[0x81, 0x56], &[0x81, 0x57], &[0x81, 0x58], &[0x81, 0x59], &[0x81, 0x5a], &[0x81, 0x5b], &[0x81, 0x5c], &[0x81, 0x5d], &[0x81, 0x5e], &[0x81, 0x5f], &[0x81, 0x60], &[0x81, 0x61], &[0x81, 0x62], &[0x81, 0x63], &[0x81, 0x64], &[0x81, 0x65], &[0x81, 0x66], &[0x81, 0x67], &[0x81, 0x68], &[0x81, 0x69], &[0x81, 0x6a], &[0x81, 0x6b], &[0x81, 0x6c], &[0x81, 0x6d], &[0x81, 0x6e], &[0x81, 0x6f], &[0x81, 0x70], &[0x81, 0x71], &[0x81, 0x72], &[0x81, 0x73], &[0x81, 0x74], &[0x81, 0x75], &[0x81, 0x76], &[0x81, 0x77], &[0x81, 0x78], &[0x81, 0x79], &[0x81, 0x7a], &[0x81, 0x7b], &[0x81, 0x7c], &[0x81, 0x7d], &[0x81, 0x7e]]; + +#[cfg(test)] +mod tests { + #[test] + #[ignore] + fn analyze_db() { + use rlp::{UntrustedRlp, View}; + use std::collections::HashMap; + use kvdb::*; + + let path = "db path".to_string(); + let values: Vec<_> = Database::open_default(&path).unwrap().iter(Some(2)).map(|(_, v)| v).collect(); + let mut rlp_counts: HashMap<_, u32> = HashMap::new(); + let mut rlp_sizes: HashMap<_, u32> = HashMap::new(); + + fn flat_rlp<'a>(acc: &mut Vec>, rlp: UntrustedRlp<'a>) { + match rlp.is_data() { + true => if rlp.size()>=70 { + match rlp.data() { + Ok(x) => flat_rlp(acc, UntrustedRlp::new(x)), + _ => acc.push(rlp), + } + } else { + acc.push(rlp); + }, + false => for r in rlp.iter() { flat_rlp(acc, r); }, + } + } + + fn space_saving(bytes: &[u8]) -> u32 { + let l = bytes.len() as u32; + match l >= 2 { + true => l-2, + false => 0, + } + } + + for v in values.iter() { + let rlp = UntrustedRlp::new(&v); + let mut flat = Vec::new(); + flat_rlp(&mut flat, rlp); + for r in flat.iter() { + *rlp_counts.entry(r.as_raw()).or_insert(0) += 1; + *rlp_sizes.entry(r.as_raw()).or_insert(0) += space_saving(r.as_raw()); + } + } + let mut size_vec: Vec<_> = rlp_sizes.iter().collect(); + size_vec.sort_by(|a, b| b.1.cmp(a.1)); + + // Exclude rare large RLPs. + for v in size_vec.iter().filter(|v| rlp_counts.get(v.0).unwrap()>&100).take(20) { + println!("{:?}, {:?}", v, rlp_counts.get(v.0).unwrap()); + } + println!("DONE"); + } +} diff --git a/util/src/rlp/mod.rs b/util/src/rlp/mod.rs index 4be76dd3d..4dc14c8a3 100644 --- a/util/src/rlp/mod.rs +++ b/util/src/rlp/mod.rs @@ -51,16 +51,19 @@ mod rlperrors; mod rlpin; mod untrusted_rlp; mod rlpstream; +mod rlpcompression; +mod commonrlps; mod bytes; #[cfg(test)] mod tests; pub use self::rlperrors::DecoderError; -pub use self::rlptraits::{Decoder, Decodable, View, Stream, Encodable, Encoder, RlpEncodable, RlpDecodable}; +pub use self::rlptraits::{Decoder, Decodable, View, Stream, Encodable, Encoder, RlpEncodable, RlpDecodable, Compressible}; pub use self::untrusted_rlp::{UntrustedRlp, UntrustedRlpIterator, PayloadInfo, Prototype}; pub use self::rlpin::{Rlp, RlpIterator}; -pub use self::rlpstream::{RlpStream}; +pub use self::rlpstream::RlpStream; +pub use self::rlpcompression::RlpType; pub use elastic_array::ElasticArray1024; use super::hash::H256; diff --git a/util/src/rlp/rlpcompression.rs b/util/src/rlp/rlpcompression.rs new file mode 100644 index 000000000..a9e74addc --- /dev/null +++ b/util/src/rlp/rlpcompression.rs @@ -0,0 +1,245 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use rlp::{UntrustedRlp, View, Compressible, encode, ElasticArray1024, Stream, RlpStream}; +use rlp::commonrlps::{BLOCKS_RLP_SWAPPER, SNAPSHOT_RLP_SWAPPER}; +use std::collections::HashMap; + +/// Stores RLPs used for compression +pub struct InvalidRlpSwapper<'a> { + invalid_to_valid: HashMap<&'a [u8], &'a [u8]>, + valid_to_invalid: HashMap<&'a [u8], &'a [u8]>, +} + +impl<'a> InvalidRlpSwapper<'a> { + /// Construct a swapper from a list of common RLPs + pub fn new(rlps_to_swap: &[&'a [u8]], invalid_rlps: &[&'a [u8]]) -> Self { + if rlps_to_swap.len() > 0x7e { + panic!("Invalid usage, only 127 RLPs can be swappable."); + } + let mut invalid_to_valid = HashMap::new(); + let mut valid_to_invalid = HashMap::new(); + for (&rlp, &invalid) in rlps_to_swap.iter().zip(invalid_rlps.iter()) { + invalid_to_valid.insert(invalid, rlp); + valid_to_invalid.insert(rlp, invalid); + } + InvalidRlpSwapper { + invalid_to_valid: invalid_to_valid, + valid_to_invalid: valid_to_invalid + } + } + /// Get a valid RLP corresponding to an invalid one + fn get_valid(&self, invalid_rlp: &[u8]) -> Option<&[u8]> { + self.invalid_to_valid.get(invalid_rlp).map(|r| r.clone()) + } + /// Get an invalid RLP corresponding to a valid one + fn get_invalid(&self, valid_rlp: &[u8]) -> Option<&[u8]> { + self.valid_to_invalid.get(valid_rlp).map(|r| r.clone()) + } +} + +/// Type of RLP indicating its origin database. +pub enum RlpType { + /// RLP used in blocks database. + Blocks, + /// RLP used in snapshots. + Snapshot, +} + +fn to_elastic(slice: &[u8]) -> ElasticArray1024 { + let mut out = ElasticArray1024::new(); + out.append_slice(slice); + out +} + +fn map_rlp(rlp: &UntrustedRlp, f: F) -> Option> where + F: Fn(&UntrustedRlp) -> Option> { + match rlp.iter() + .fold((false, RlpStream::new_list(rlp.item_count())), + |(is_some, mut acc), subrlp| { + let new = f(&subrlp); + if let Some(ref insert) = new { + acc.append_raw(&insert[..], 1); + } else { + acc.append_raw(subrlp.as_raw(), 1); + } + (is_some || new.is_some(), acc) + }) { + (true, s) => Some(s.drain()), + _ => None, + } +} + +/// Replace common RLPs with invalid shorter ones. +fn simple_compress(rlp: &UntrustedRlp, swapper: &InvalidRlpSwapper) -> ElasticArray1024 { + if rlp.is_data() { + to_elastic(swapper.get_invalid(rlp.as_raw()).unwrap_or(rlp.as_raw())) + } else { + map_rlp(rlp, |r| Some(simple_compress(r, swapper))).unwrap_or(to_elastic(rlp.as_raw())) + } +} + +/// Recover valid RLP from a compressed form. +fn simple_decompress(rlp: &UntrustedRlp, swapper: &InvalidRlpSwapper) -> ElasticArray1024 { + if rlp.is_data() { + to_elastic(swapper.get_valid(rlp.as_raw()).unwrap_or(rlp.as_raw())) + } else { + map_rlp(rlp, |r| Some(simple_decompress(r, swapper))).unwrap_or(to_elastic(rlp.as_raw())) + } +} + +/// Replace common RLPs with invalid shorter ones, None if no compression achieved. +/// Tries to compress data insides. +fn deep_compress(rlp: &UntrustedRlp, swapper: &InvalidRlpSwapper) -> Option> { + let simple_swap = || + swapper.get_invalid(rlp.as_raw()).map(|b| to_elastic(&b)); + if rlp.is_data() { + // Try to treat the inside as RLP. + return match rlp.payload_info() { + // Shortest decompressed account is 70, so simply try to swap the value. + Ok(ref p) if p.value_len < 70 => simple_swap(), + _ => { + if let Ok(d) = rlp.data() { + let internal_rlp = UntrustedRlp::new(d); + if let Some(new_d) = deep_compress(&internal_rlp, swapper) { + // If compressed put in a special list, with first element being invalid code. + let mut rlp = RlpStream::new_list(2); + rlp.append_raw(&[0x81, 0x7f], 1); + rlp.append_raw(&new_d[..], 1); + return Some(rlp.drain()); + } + } + simple_swap() + }, + }; + } + // Iterate through RLP while checking if it has been compressed. + map_rlp(rlp, |r| deep_compress(r, swapper)) +} + +/// Recover valid RLP from a compressed form, None if no decompression achieved. +/// Tries to decompress compressed data insides. +fn deep_decompress(rlp: &UntrustedRlp, swapper: &InvalidRlpSwapper) -> Option> { + let simple_swap = || + swapper.get_valid(rlp.as_raw()).map(|b| to_elastic(&b)); + // Simply decompress data. + if rlp.is_data() { return simple_swap(); } + match rlp.item_count() { + // Look for special compressed list, which contains nested data. + 2 if rlp.at(0).map(|r| r.as_raw() == &[0x81, 0x7f]).unwrap_or(false) => + rlp.at(1).ok().map_or(simple_swap(), + |r| deep_decompress(&r, swapper).map(|d| { let v = d.to_vec(); encode(&v) })), + // Iterate through RLP while checking if it has been compressed. + _ => map_rlp(rlp, |r| deep_decompress(r, swapper)), + } +} + + + +impl<'a> Compressible for UntrustedRlp<'a> { + type DataType = RlpType; + + fn compress(&self, t: RlpType) -> ElasticArray1024 { + match t { + RlpType::Snapshot => simple_compress(self, &SNAPSHOT_RLP_SWAPPER), + RlpType::Blocks => deep_compress(self, &BLOCKS_RLP_SWAPPER).unwrap_or(to_elastic(self.as_raw())), + } + } + + fn decompress(&self, t: RlpType) -> ElasticArray1024 { + match t { + RlpType::Snapshot => simple_decompress(self, &SNAPSHOT_RLP_SWAPPER), + RlpType::Blocks => deep_decompress(self, &BLOCKS_RLP_SWAPPER).unwrap_or(to_elastic(self.as_raw())), + } + } +} + +#[cfg(test)] +mod tests { + use rlp::{UntrustedRlp, Compressible, View, RlpType}; + use rlp::rlpcompression::InvalidRlpSwapper; + + #[test] + fn invalid_rlp_swapper() { + let to_swap: &[&[u8]] = &[&[0x83, b'c', b'a', b't'], &[0x83, b'd', b'o', b'g']]; + let invalid_rlp: &[&[u8]] = &[&[0x81, 0x00], &[0x81, 0x01]]; + let swapper = InvalidRlpSwapper::new(to_swap, invalid_rlp); + assert_eq!(Some(invalid_rlp[0]), swapper.get_invalid(&[0x83, b'c', b'a', b't'])); + assert_eq!(None, swapper.get_invalid(&[0x83, b'b', b'a', b't'])); + assert_eq!(Some(to_swap[1]), swapper.get_valid(invalid_rlp[1])); + } + + #[test] + fn simple_compression() { + let basic_account_rlp = vec![248, 68, 4, 2, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112]; + let rlp = UntrustedRlp::new(&basic_account_rlp); + let compressed = rlp.compress(RlpType::Snapshot).to_vec(); + assert_eq!(compressed, vec![198, 4, 2, 129, 0, 129, 1]); + let compressed_rlp = UntrustedRlp::new(&compressed); + assert_eq!(compressed_rlp.decompress(RlpType::Snapshot).to_vec(), basic_account_rlp); + } + + #[test] + fn data_compression() { + let data_basic_account_rlp = vec![184, 70, 248, 68, 4, 2, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112]; + let data_rlp = UntrustedRlp::new(&data_basic_account_rlp); + let compressed = data_rlp.compress(RlpType::Blocks).to_vec(); + assert_eq!(compressed, vec![201, 129, 127, 198, 4, 2, 129, 0, 129, 1]); + let compressed_rlp = UntrustedRlp::new(&compressed); + assert_eq!(compressed_rlp.decompress(RlpType::Blocks).to_vec(), data_basic_account_rlp); + } + + #[test] + fn nested_list_rlp() { + let nested_basic_account_rlp = vec![228, 4, 226, 2, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33]; + let nested_rlp = UntrustedRlp::new(&nested_basic_account_rlp); + let compressed = nested_rlp.compress(RlpType::Blocks).to_vec(); + assert_eq!(compressed, vec![197, 4, 195, 2, 129, 0]); + let compressed_rlp = UntrustedRlp::new(&compressed); + assert_eq!(compressed_rlp.decompress(RlpType::Blocks).to_vec(), nested_basic_account_rlp); + let compressed = nested_rlp.compress(RlpType::Snapshot).to_vec(); + assert_eq!(compressed, vec![197, 4, 195, 2, 129, 0]); + let compressed_rlp = UntrustedRlp::new(&compressed); + assert_eq!(compressed_rlp.decompress(RlpType::Snapshot).to_vec(), nested_basic_account_rlp); + } + + #[test] + fn malformed_rlp() { + let malformed = vec![248, 81, 128, 128, 128, 128, 128, 160, 12, 51, 241, 93, 69, 218, 74, 138, 79, 115, 227, 44, 216, 81, 46, 132, 85, 235, 96, 45, 252, 48, 181, 29, 75, 141, 217, 215, 86, 160, 109, 130, 160, 140, 36, 93, 200, 109, 215, 100, 241, 246, 99, 135, 92, 168, 149, 170, 114, 9, 143, 4, 93, 25, 76, 54, 176, 119, 230, 170, 154, 105, 47, 121, 10, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128]; + let malformed_rlp = UntrustedRlp::new(&malformed); + assert_eq!(malformed_rlp.decompress(RlpType::Blocks).to_vec(), malformed); + } + + #[test] + #[ignore] + fn test_compression() { + use kvdb::*; + let path = "db to test".to_string(); + let values: Vec<_> = Database::open_default(&path).unwrap().iter(Some(2)).map(|(_, v)| v).collect(); + let mut decomp_size = 0; + let mut comp_size = 0; + + for v in values.iter() { + let rlp = UntrustedRlp::new(&v); + let compressed = rlp.compress(RlpType::Blocks).to_vec(); + comp_size += compressed.len(); + let decompressed = rlp.decompress(RlpType::Blocks).to_vec(); + decomp_size += decompressed.len(); + } + println!("Decompressed bytes {:?}, compressed bytes: {:?}", decomp_size, comp_size); + assert!(decomp_size > comp_size); + } +} diff --git a/util/src/rlp/rlptraits.rs b/util/src/rlp/rlptraits.rs index f29511d87..1e6ef5917 100644 --- a/util/src/rlp/rlptraits.rs +++ b/util/src/rlp/rlptraits.rs @@ -26,8 +26,8 @@ use sha3::*; /// Type is able to decode RLP. pub trait Decoder: Sized { /// Read a value from the RLP into a given type. - fn read_value(&self, f: F) -> Result - where F: FnOnce(&[u8]) -> Result; + fn read_value(&self, f: &F) -> Result + where F: Fn(&[u8]) -> Result; /// Get underlying `UntrustedRLP` object. fn as_rlp(&self) -> &UntrustedRlp; @@ -63,7 +63,7 @@ pub trait View<'a, 'view>: Sized { /// Creates a new instance of `Rlp` reader fn new(bytes: &'a [u8]) -> Self; - /// The raw data of the RLP. + /// The raw data of the RLP as slice. /// /// ```rust /// extern crate ethcore_util as util; @@ -365,3 +365,14 @@ pub trait Stream: Sized { /// panic! if stream is not finished. fn out(self) -> Vec; } + +/// Trait for compressing and decompressing RLP by replacement of common terms. +pub trait Compressible: Sized { + /// Indicates the origin of RLP to be compressed. + type DataType; + + /// Compress given RLP type using appropriate methods. + fn compress(&self, t: Self::DataType) -> ElasticArray1024; + /// Decompress given RLP type using appropriate methods. + fn decompress(&self, t: Self::DataType) -> ElasticArray1024; +} diff --git a/util/src/rlp/untrusted_rlp.rs b/util/src/rlp/untrusted_rlp.rs index a55bb0f3b..fdf584211 100644 --- a/util/src/rlp/untrusted_rlp.rs +++ b/util/src/rlp/untrusted_rlp.rs @@ -55,6 +55,18 @@ pub struct PayloadInfo { pub value_len: usize, } +fn calculate_payload_info(header_bytes: &[u8], len_of_len: usize) -> Result { + let header_len = 1 + len_of_len; + match header_bytes.get(1) { + Some(&0) => return Err(DecoderError::RlpDataLenWithZeroPrefix), + None => return Err(DecoderError::RlpIsTooShort), + _ => (), + } + if header_bytes.len() < header_len { return Err(DecoderError::RlpIsTooShort); } + let value_len = try!(usize::from_bytes(&header_bytes[1..header_len])); + Ok(PayloadInfo::new(header_len, value_len)) +} + impl PayloadInfo { fn new(header_len: usize, value_len: usize) -> PayloadInfo { PayloadInfo { @@ -68,28 +80,22 @@ impl PayloadInfo { /// Create a new object from the given bytes RLP. The bytes pub fn from(header_bytes: &[u8]) -> Result { - Ok(match header_bytes.first().cloned() { - None => return Err(DecoderError::RlpIsTooShort), - Some(0...0x7f) => PayloadInfo::new(0, 1), - Some(l @ 0x80...0xb7) => PayloadInfo::new(1, l as usize - 0x80), + match header_bytes.first().cloned() { + None => Err(DecoderError::RlpIsTooShort), + Some(0...0x7f) => Ok(PayloadInfo::new(0, 1)), + Some(l @ 0x80...0xb7) => Ok(PayloadInfo::new(1, l as usize - 0x80)), Some(l @ 0xb8...0xbf) => { let len_of_len = l as usize - 0xb7; - let header_len = 1 + len_of_len; - if header_bytes[1] == 0 { return Err(DecoderError::RlpDataLenWithZeroPrefix); } - let value_len = try!(usize::from_bytes(&header_bytes[1..header_len])); - PayloadInfo::new(header_len, value_len) + calculate_payload_info(header_bytes, len_of_len) } - Some(l @ 0xc0...0xf7) => PayloadInfo::new(1, l as usize - 0xc0), + Some(l @ 0xc0...0xf7) => Ok(PayloadInfo::new(1, l as usize - 0xc0)), Some(l @ 0xf8...0xff) => { let len_of_len = l as usize - 0xf7; - let header_len = 1 + len_of_len; - let value_len = try!(usize::from_bytes(&header_bytes[1..header_len])); - if header_bytes[1] == 0 { return Err(DecoderError::RlpListLenWithZeroPrefix); } - PayloadInfo::new(header_len, value_len) + calculate_payload_info(header_bytes, len_of_len) }, // we cant reach this place, but rust requires _ to be implemented _ => { unreachable!(); } - }) + } } } @@ -190,8 +196,8 @@ impl<'a, 'view> View<'a, 'view> for UntrustedRlp<'a> where 'a: 'view { fn size(&self) -> usize { match self.is_data() { - // we can safely unwrap (?) cause its data - true => BasicDecoder::payload_info(self.bytes).unwrap().value_len, + // TODO: No panic on malformed data, but ideally would Err on no PayloadInfo. + true => BasicDecoder::payload_info(self.bytes).map(|b| b.value_len).unwrap_or(0), false => 0 } } @@ -342,15 +348,15 @@ impl<'a> BasicDecoder<'a> { } impl<'a> Decoder for BasicDecoder<'a> { - fn read_value(&self, f: F) -> Result - where F: FnOnce(&[u8]) -> Result { + fn read_value(&self, f: &F) -> Result + where F: Fn(&[u8]) -> Result { let bytes = self.rlp.as_raw(); match bytes.first().cloned() { - // rlp is too short + // RLP is too short. None => Err(DecoderError::RlpIsTooShort), - // single byt value + // Single byte value. Some(l @ 0...0x7f) => Ok(try!(f(&[l]))), // 0-55 bytes Some(l @ 0x80...0xb7) => { @@ -362,10 +368,9 @@ impl<'a> Decoder for BasicDecoder<'a> { if l == 0x81 && d[0] < 0x80 { return Err(DecoderError::RlpInvalidIndirection); } - Ok(try!(f(d))) }, - // longer than 55 bytes + // Longer than 55 bytes. Some(l @ 0xb8...0xbf) => { let len_of_len = l as usize - 0xb7; let begin_of_value = 1 as usize + len_of_len; @@ -380,7 +385,7 @@ impl<'a> Decoder for BasicDecoder<'a> { } Ok(try!(f(&bytes[begin_of_value..last_index_of_value]))) } - // we are reading value, not a list! + // We are reading value, not a list! _ => Err(DecoderError::RlpExpectedToBeData) } } @@ -396,9 +401,7 @@ impl<'a> Decoder for BasicDecoder<'a> { impl Decodable for T where T: FromBytes { fn decode(decoder: &D) -> Result where D: Decoder { - decoder.read_value(| bytes | { - Ok(try!(T::from_bytes(bytes))) - }) + decoder.read_value(&|bytes: &[u8]| Ok(try!(T::from_bytes(bytes)))) } } @@ -416,11 +419,7 @@ impl Decodable for Option where T: Decodable { impl Decodable for Vec { fn decode(decoder: &D) -> Result where D: Decoder { - decoder.read_value(| bytes | { - let mut res = vec![]; - res.extend_from_slice(bytes); - Ok(res) - }) + decoder.read_value(&|bytes: &[u8]| Ok(bytes.to_vec())) } } @@ -489,11 +488,14 @@ impl RlpDecodable for u8 { } } -#[test] -fn test_rlp_display() { - use rustc_serialize::hex::FromHex; - let data = "f84d0589010efbef67941f79b2a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470".from_hex().unwrap(); - let rlp = UntrustedRlp::new(&data); - assert_eq!(format!("{}", rlp), "[\"0x05\", \"0x010efbef67941f79b2\", \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\", \"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470\"]"); +#[cfg(test)] +mod tests { + use rlp::{UntrustedRlp, View}; + #[test] + fn test_rlp_display() { + use rustc_serialize::hex::FromHex; + let data = "f84d0589010efbef67941f79b2a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470".from_hex().unwrap(); + let rlp = UntrustedRlp::new(&data); + assert_eq!(format!("{}", rlp), "[\"0x05\", \"0x010efbef67941f79b2\", \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\", \"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470\"]"); + } } - diff --git a/util/src/trie/mod.rs b/util/src/trie/mod.rs index d608863cd..7881240b5 100644 --- a/util/src/trie/mod.rs +++ b/util/src/trie/mod.rs @@ -63,7 +63,7 @@ impl fmt::Display for TrieError { } /// Trie types -#[derive(Debug, Clone)] +#[derive(Debug, PartialEq, Clone)] pub enum TrieSpec { /// Generic trie. Generic, diff --git a/util/src/trie/standardmap.rs b/util/src/trie/standardmap.rs index 216d29ad0..28e4c76f7 100644 --- a/util/src/trie/standardmap.rs +++ b/util/src/trie/standardmap.rs @@ -106,7 +106,7 @@ impl StandardMap { Alphabet::All => Self::random_bytes(self.min_key, self.journal_key, seed), Alphabet::Low => Self::random_word(low, self.min_key, self.journal_key, seed), Alphabet::Mid => Self::random_word(mid, self.min_key, self.journal_key, seed), - Alphabet::Custom(ref a) => Self::random_word(&a, self.min_key, self.journal_key, seed), + Alphabet::Custom(ref a) => Self::random_word(a, self.min_key, self.journal_key, seed), }; let v = match self.value_mode { ValueMode::Mirror => k.clone(), diff --git a/util/src/trie/triedb.rs b/util/src/trie/triedb.rs index a50904cc1..9feeb0370 100644 --- a/util/src/trie/triedb.rs +++ b/util/src/trie/triedb.rs @@ -132,7 +132,7 @@ impl<'db> TrieDB<'db> { /// Get the data of the root node. fn root_data(&self) -> &[u8] { - self.db.get(&self.root).expect("Trie root not found!") + self.db.get(self.root).expect("Trie root not found!") } /// Get the root node as a `Node`. @@ -184,7 +184,7 @@ impl<'db> TrieDB<'db> { /// Return optional data for a key given as a `NibbleSlice`. Returns `None` if no data exists. fn do_lookup<'a, 'key>(&'a self, key: &NibbleSlice<'key>) -> Option<&'a [u8]> where 'a: 'key { let root_rlp = self.root_data(); - self.get_from_node(&root_rlp, key) + self.get_from_node(root_rlp, key) } /// Recursible function to retrieve the value given a `node` and a partial `key`. `None` if no @@ -340,7 +340,7 @@ impl<'db> Trie for TrieDB<'db> { Box::new(TrieDB::iter(self)) } - fn root(&self) -> &H256 { &self.root } + fn root(&self) -> &H256 { self.root } fn contains(&self, key: &[u8]) -> bool { self.get(key).is_some() @@ -354,7 +354,7 @@ impl<'db> Trie for TrieDB<'db> { impl<'db> fmt::Debug for TrieDB<'db> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { try!(writeln!(f, "c={:?} [", self.hash_count)); - let root_rlp = self.db.get(&self.root).expect("Trie root not found!"); + let root_rlp = self.db.get(self.root).expect("Trie root not found!"); try!(self.fmt_all(Node::decoded(root_rlp), f, 0)); writeln!(f, "]") } @@ -373,7 +373,7 @@ fn iterator() { { let mut t = TrieDBMut::new(&mut memdb, &mut root); for x in &d { - t.insert(&x, &x); + t.insert(x, x); } } assert_eq!(d.iter().map(|i|i.to_vec()).collect::>(), TrieDB::new(&memdb, &root).unwrap().iter().map(|x|x.0).collect::>()); diff --git a/util/src/trie/triedbmut.rs b/util/src/trie/triedbmut.rs index c50a66cc8..68d56f4d8 100644 --- a/util/src/trie/triedbmut.rs +++ b/util/src/trie/triedbmut.rs @@ -401,7 +401,7 @@ impl<'a> TrieDBMut<'a> { /// Return optional data for a key given as a `NibbleSlice`. Returns `None` if no data exists. fn do_db_lookup<'x, 'key>(&'x self, hash: &H256, key: NibbleSlice<'key>) -> Option<&'x [u8]> where 'x: 'key { - self.db.get(hash).and_then(|node_rlp| self.get_from_db_node(&node_rlp, key)) + self.db.get(hash).and_then(|node_rlp| self.get_from_db_node(node_rlp, key)) } /// Recursible function to retrieve the value given a `node` and a partial `key`. `None` if no @@ -868,7 +868,7 @@ impl<'a> TrieDBMut<'a> { impl<'a> TrieMut for TrieDBMut<'a> { fn root(&mut self) -> &H256 { self.commit(); - &self.root + self.root } fn is_empty(&self) -> bool { @@ -938,7 +938,7 @@ mod tests { for i in 0..v.len() { let key: &[u8]= &v[i].0; let val: &[u8] = &v[i].1; - t.insert(&key, &val); + t.insert(key, val); } t } @@ -946,7 +946,7 @@ mod tests { fn unpopulate_trie<'db>(t: &mut TrieDBMut<'db>, v: &[(Vec, Vec)]) { for i in v { let key: &[u8]= &i.0; - t.remove(&key); + t.remove(key); } } diff --git a/util/src/triehash.rs b/util/src/triehash.rs index e95c3b5f9..f49b588d4 100644 --- a/util/src/triehash.rs +++ b/util/src/triehash.rs @@ -213,7 +213,7 @@ fn hash256rlp(input: &[(Vec, Vec)], pre_len: usize, stream: &mut RlpStre .skip(1) // get minimum number of shared nibbles between first and each successive .fold(key.len(), | acc, &(ref k, _) | { - cmp::min(key.shared_prefix_len(&k), acc) + cmp::min(key.shared_prefix_len(k), acc) }); // if shared prefix is higher than current prefix append its