From 3153d12bd9997f223a8a04ec40e8db21fb161663 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 7 Mar 2016 11:40:44 +0100 Subject: [PATCH 1/8] feature enabled when compiling without --release --- Cargo.toml | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9b8ec6405..d1094a110 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,15 +12,22 @@ rustc-serialize = "0.3" docopt = "0.6" time = "0.1" ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" } -clippy = { version = "0.0.44", optional = true } -ethcore-util = { path = "util" } -ethcore = { path = "ethcore" } -ethsync = { path = "sync" } -ethcore-rpc = { path = "rpc", optional = true } fdlimit = { path = "util/fdlimit" } daemonize = "0.2" -ethcore-devtools = { path = "devtools" } number_prefix = "0.2" +clippy = { version = "0.0.44", optional = true } + +ethcore = { path = "ethcore" } +ethcore-util = { path = "util" } +ethsync = { path = "sync" } +ethcore-devtools = { path = "devtools" } +ethcore-rpc = { path = "rpc", optional = true } + +[dev-dependencies] +ethcore = { path = "ethcore", features = ["dev"]} +ethcore-util = { path = "util", features = ["dev"] } +ethsync = { path = "sync", features = ["dev"] } +ethcore-rpc = { path = "rpc", features = ["dev"]} [features] default = ["rpc"] From cbc2c0cf0c76bf12361641c6825e21e82bdec7ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 7 Mar 2016 14:33:00 +0100 Subject: [PATCH 2/8] Fixing clippy warnings. When building on nightly it is required to enable clippy --- Cargo.lock | 4 +++ Cargo.toml | 13 ++++++--- build.rs | 25 ++++++++++++++++ cargo.sh | 2 ++ ethcore/Cargo.toml | 6 +++- ethcore/build.rs | 25 ++++++++++++++++ ethcore/src/basic_types.rs | 2 +- ethcore/src/block.rs | 4 +-- ethcore/src/block_queue.rs | 4 +-- ethcore/src/blockchain/blockchain.rs | 4 +-- ethcore/src/ethereum/ethash.rs | 2 +- ethcore/src/evm/interpreter.rs | 6 ++-- ethcore/src/evm/tests.rs | 11 ++++--- ethcore/src/externalities.rs | 6 ++-- ethcore/src/lib.rs | 10 +++---- ethcore/src/service.rs | 9 +++--- ethcore/src/spec.rs | 10 +++---- ethcore/src/state.rs | 2 +- ethcore/src/transaction.rs | 2 +- ethcore/src/verification/mod.rs | 2 ++ hook.sh | 2 +- parity/main.rs | 6 ++-- rpc/Cargo.toml | 3 +- rpc/build.rs | 23 +++++++++++++++ sync/Cargo.toml | 6 +++- sync/build.rs | 25 ++++++++++++++++ sync/src/chain.rs | 7 +++-- sync/src/lib.rs | 6 ++-- sync/src/range_collection.rs | 2 +- util/Cargo.toml | 2 +- util/bigint/src/uint.rs | 6 ++-- util/build.rs | 21 ++++++++++++++ util/src/hash.rs | 4 +-- util/src/journaldb.rs | 43 +++++++++++++++------------- util/src/kvdb.rs | 3 +- util/src/lib.rs | 12 ++++---- util/src/network/discovery.rs | 22 +++++++------- util/src/network/host.rs | 4 +-- util/src/panics.rs | 2 +- util/src/trie/triedb.rs | 18 ++++++------ util/src/trie/triedbmut.rs | 36 +++++++++++------------ 41 files changed, 272 insertions(+), 130 deletions(-) create mode 100644 build.rs create mode 100755 cargo.sh create mode 100644 ethcore/build.rs create mode 100644 sync/build.rs diff --git a/Cargo.lock b/Cargo.lock index 55ed996ed..61f152f69 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16,6 +16,7 @@ dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -209,6 +210,7 @@ dependencies = [ "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -232,6 +234,7 @@ dependencies = [ "jsonrpc-http-server 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -286,6 +289,7 @@ dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/Cargo.toml b/Cargo.toml index d1094a110..d8e05bb20 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,10 @@ name = "parity" version = "0.9.99" license = "GPL-3.0" authors = ["Ethcore "] +build = "build.rs" + +[build-dependencies] +rustc_version = "0.1" [dependencies] log = "0.3" @@ -24,17 +28,18 @@ ethcore-devtools = { path = "devtools" } ethcore-rpc = { path = "rpc", optional = true } [dev-dependencies] -ethcore = { path = "ethcore", features = ["dev"]} +ethcore = { path = "ethcore", features = ["dev"] } ethcore-util = { path = "util", features = ["dev"] } ethsync = { path = "sync", features = ["dev"] } -ethcore-rpc = { path = "rpc", features = ["dev"]} +ethcore-rpc = { path = "rpc", features = ["dev"] } [features] default = ["rpc"] rpc = ["ethcore-rpc"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] +dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] +dev-clippy = ["clippy", "ethcore/clippy", "ethcore-util/clippy", "ethsync/clippy", "ethcore-rpc/clippy"] travis-beta = ["ethcore/json-tests"] -travis-nightly = ["ethcore/json-tests", "dev"] +travis-nightly = ["ethcore/json-tests", "clippy", "dev"] [[bin]] path = "parity/main.rs" diff --git a/build.rs b/build.rs new file mode 100644 index 000000000..41b9a1b3e --- /dev/null +++ b/build.rs @@ -0,0 +1,25 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + +fn main() { + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } +} diff --git a/cargo.sh b/cargo.sh new file mode 100755 index 000000000..6870ab385 --- /dev/null +++ b/cargo.sh @@ -0,0 +1,2 @@ +#!/bin/sh +cargo "$@" --features dev-clippy diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index c3a3d32dc..fbfe175d7 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -5,6 +5,10 @@ license = "GPL-3.0" name = "ethcore" version = "0.9.99" authors = ["Ethcore "] +build = "build.rs" + +[build-dependencies] +rustc_version = "0.1" [dependencies] log = "0.3" @@ -27,5 +31,5 @@ jit = ["evmjit"] evm-debug = [] json-tests = [] test-heavy = [] -dev = ["clippy"] +dev = [] default = [] diff --git a/ethcore/build.rs b/ethcore/build.rs new file mode 100644 index 000000000..41b9a1b3e --- /dev/null +++ b/ethcore/build.rs @@ -0,0 +1,25 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + +fn main() { + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } +} diff --git a/ethcore/src/basic_types.rs b/ethcore/src/basic_types.rs index 5f6515c0d..9cba8b3a0 100644 --- a/ethcore/src/basic_types.rs +++ b/ethcore/src/basic_types.rs @@ -24,7 +24,7 @@ pub type LogBloom = H2048; /// Constant 2048-bit datum for 0. Often used as a default. pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]); -#[cfg_attr(feature="dev", allow(enum_variant_names))] +#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))] /// Semantic boolean for when a seal/signature is included. pub enum Seal { /// The seal/signature is included. diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 68f647e37..b3894db94 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -16,7 +16,7 @@ //! Blockchain block. -#![cfg_attr(feature="dev", allow(ptr_arg))] // Because of &LastHashes -> &Vec<_> +#![cfg_attr(all(nightly, feature="dev"), allow(ptr_arg))] // Because of &LastHashes -> &Vec<_> use common::*; use engine::*; @@ -274,7 +274,7 @@ impl<'x> OpenBlock<'x> { s.block.base.header.note_dirty(); ClosedBlock { - block: s.block, + block: s.block, uncle_bytes: uncle_bytes, } } diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 490a17995..de6802a4f 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -121,7 +121,7 @@ struct QueueSignal { } impl QueueSignal { - #[cfg_attr(feature="dev", allow(bool_comparison))] + #[cfg_attr(all(nightly, feature="dev"), allow(bool_comparison))] fn set(&self) { if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false { self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message"); @@ -385,7 +385,7 @@ impl BlockQueue { } } - pub fn collect_garbage(&self) { + pub fn collect_garbage(&self) { { let mut verification = self.verification.lock().unwrap(); verification.unverified.shrink_to_fit(); diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index e529f50af..d7c9d7975 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -884,7 +884,7 @@ mod tests { } #[test] - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn test_find_uncles() { let mut canon_chain = ChainGenerator::default(); let mut finalizer = BlockFinalizer::default(); @@ -922,7 +922,7 @@ mod tests { } #[test] - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn test_small_fork() { let mut canon_chain = ChainGenerator::default(); let mut finalizer = BlockFinalizer::default(); diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index f9810b964..b0c0e4a9f 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -202,7 +202,7 @@ impl Engine for Ethash { } } -#[cfg_attr(feature="dev", allow(wrong_self_convention))] // to_ethash should take self +#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // to_ethash should take self impl Ethash { fn calculate_difficuty(&self, header: &Header, parent: &Header) -> U256 { const EXP_DIFF_PERIOD: u64 = 100000; diff --git a/ethcore/src/evm/interpreter.rs b/ethcore/src/evm/interpreter.rs index 7491321cb..fb8d19357 100644 --- a/ethcore/src/evm/interpreter.rs +++ b/ethcore/src/evm/interpreter.rs @@ -243,7 +243,7 @@ struct CodeReader<'a> { code: &'a Bytes } -#[cfg_attr(feature="dev", allow(len_without_is_empty))] +#[cfg_attr(all(nightly, feature="dev"), allow(len_without_is_empty))] impl<'a> CodeReader<'a> { /// Get `no_of_bytes` from code and convert to U256. Move PC fn read(&mut self, no_of_bytes: usize) -> U256 { @@ -258,7 +258,7 @@ impl<'a> CodeReader<'a> { } } -#[cfg_attr(feature="dev", allow(enum_variant_names))] +#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))] enum InstructionCost { Gas(U256), GasMem(U256, U256), @@ -347,7 +347,7 @@ impl evm::Evm for Interpreter { } impl Interpreter { - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn get_gas_cost_mem(&self, ext: &evm::Ext, instruction: Instruction, diff --git a/ethcore/src/evm/tests.rs b/ethcore/src/evm/tests.rs index 9d4dd3bc4..dc84a9a05 100644 --- a/ethcore/src/evm/tests.rs +++ b/ethcore/src/evm/tests.rs @@ -25,9 +25,8 @@ struct FakeLogEntry { } #[derive(PartialEq, Eq, Hash, Debug)] -#[cfg_attr(feature="dev", allow(enum_variant_names))] // Common prefix is C ;) enum FakeCallType { - CALL, CREATE + Call, Create } #[derive(PartialEq, Eq, Hash, Debug)] @@ -94,7 +93,7 @@ impl Ext for FakeExt { fn create(&mut self, gas: &U256, value: &U256, code: &[u8]) -> ContractCreateResult { self.calls.insert(FakeCall { - call_type: FakeCallType::CREATE, + call_type: FakeCallType::Create, gas: *gas, sender_address: None, receive_address: None, @@ -115,7 +114,7 @@ impl Ext for FakeExt { _output: &mut [u8]) -> MessageCallResult { self.calls.insert(FakeCall { - call_type: FakeCallType::CALL, + call_type: FakeCallType::Call, gas: *gas, sender_address: Some(sender_address.clone()), receive_address: Some(receive_address.clone()), @@ -909,7 +908,7 @@ fn test_calls(factory: super::Factory) { }; assert_set_contains(&ext.calls, &FakeCall { - call_type: FakeCallType::CALL, + call_type: FakeCallType::Call, gas: U256::from(2556), sender_address: Some(address.clone()), receive_address: Some(code_address.clone()), @@ -918,7 +917,7 @@ fn test_calls(factory: super::Factory) { code_address: Some(code_address.clone()) }); assert_set_contains(&ext.calls, &FakeCall { - call_type: FakeCallType::CALL, + call_type: FakeCallType::Call, gas: U256::from(2556), sender_address: Some(address.clone()), receive_address: Some(address.clone()), diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index beb8d62a1..a1f5763ea 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -188,7 +188,7 @@ impl<'a> Ext for Externalities<'a> { self.state.code(address).unwrap_or_else(|| vec![]) } - #[cfg_attr(feature="dev", allow(match_ref_pats))] + #[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))] fn ret(&mut self, gas: &U256, data: &[u8]) -> Result { match &mut self.output { &mut OutputPolicy::Return(BytesRef::Fixed(ref mut slice)) => unsafe { @@ -226,9 +226,9 @@ impl<'a> Ext for Externalities<'a> { fn log(&mut self, topics: Vec, data: &[u8]) { let address = self.origin_info.address.clone(); - self.substate.logs.push(LogEntry { + self.substate.logs.push(LogEntry { address: address, - topics: topics, + topics: topics, data: data.to_vec() }); } diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 938da02a0..469364eb3 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -15,16 +15,16 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(feature="dev", feature(plugin))] -#![cfg_attr(feature="dev", plugin(clippy))] +#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] +#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] // Clippy config // TODO [todr] not really sure -#![cfg_attr(feature="dev", allow(needless_range_loop))] +#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))] // Shorter than if-else -#![cfg_attr(feature="dev", allow(match_bool))] +#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. -#![cfg_attr(feature="dev", allow(clone_on_copy))] +#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] //! Ethcore library //! diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 756d02407..33dca8de7 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -115,12 +115,11 @@ impl IoHandler for ClientIoHandler { } } - #[cfg_attr(feature="dev", allow(match_ref_pats))] - #[cfg_attr(feature="dev", allow(single_match))] + #[cfg_attr(all(nightly, feature="dev"), allow(single_match))] fn message(&self, io: &IoContext, net_message: &NetSyncMessage) { - if let &UserMessage(ref message) = net_message { - match message { - &SyncMessage::BlockVerified => { + if let UserMessage(ref message) = *net_message { + match *message { + SyncMessage::BlockVerified => { self.client.import_verified_blocks(&io.channel()); }, _ => {}, // ignore other messages diff --git a/ethcore/src/spec.rs b/ethcore/src/spec.rs index 38a0dda53..774024351 100644 --- a/ethcore/src/spec.rs +++ b/ethcore/src/spec.rs @@ -99,7 +99,7 @@ pub struct Spec { genesis_state: PodState, } -#[cfg_attr(feature="dev", allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self) +#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self) impl Spec { /// Convert this object into a boxed Engine of the right underlying type. // TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. @@ -136,7 +136,7 @@ impl Spec { uncles_hash: RlpStream::new_list(0).out().sha3(), extra_data: self.extra_data.clone(), state_root: self.state_root().clone(), - receipts_root: self.receipts_root.clone(), + receipts_root: self.receipts_root.clone(), log_bloom: H2048::new().clone(), gas_used: self.gas_used.clone(), gas_limit: self.gas_limit.clone(), @@ -182,7 +182,7 @@ impl Spec { ) } }; - + self.parent_hash = H256::from_json(&genesis["parentHash"]); self.transactions_root = genesis.find("transactionsTrie").and_then(|_| Some(H256::from_json(&genesis["transactionsTrie"]))).unwrap_or(SHA3_NULL_RLP.clone()); self.receipts_root = genesis.find("receiptTrie").and_then(|_| Some(H256::from_json(&genesis["receiptTrie"]))).unwrap_or(SHA3_NULL_RLP.clone()); @@ -249,7 +249,7 @@ impl FromJson for Spec { ) } }; - + Spec { name: json.find("name").map_or("unknown", |j| j.as_string().unwrap()).to_owned(), engine_name: json["engineName"].as_string().unwrap().to_owned(), @@ -278,7 +278,7 @@ impl Spec { /// Ensure that the given state DB has the trie nodes in for the genesis state. pub fn ensure_db_good(&self, db: &mut HashDB) -> bool { if !db.contains(&self.state_root()) { - let mut root = H256::new(); + let mut root = H256::new(); { let mut t = SecTrieDBMut::new(db, &mut root); for (address, account) in self.genesis_state.get().iter() { diff --git a/ethcore/src/state.rs b/ethcore/src/state.rs index c13678c38..7c1064abf 100644 --- a/ethcore/src/state.rs +++ b/ethcore/src/state.rs @@ -224,7 +224,7 @@ impl State { /// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit. /// `accounts` is mutable because we may need to commit the code or storage and record that. - #[cfg_attr(feature="dev", allow(match_ref_pats))] + #[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))] pub fn commit_into(db: &mut HashDB, root: &mut H256, accounts: &mut HashMap>) { // first, commit the sub trees. // TODO: is this necessary or can we dispense with the `ref mut a` for just `a`? diff --git a/ethcore/src/transaction.rs b/ethcore/src/transaction.rs index a51824494..733e5ac6b 100644 --- a/ethcore/src/transaction.rs +++ b/ethcore/src/transaction.rs @@ -80,7 +80,7 @@ impl Transaction { } impl FromJson for SignedTransaction { - #[cfg_attr(feature="dev", allow(single_char_pattern))] + #[cfg_attr(all(nightly, feature="dev"), allow(single_char_pattern))] fn from_json(json: &Json) -> SignedTransaction { let t = Transaction { nonce: xjson!(&json["nonce"]), diff --git a/ethcore/src/verification/mod.rs b/ethcore/src/verification/mod.rs index 260121989..fe1f406cc 100644 --- a/ethcore/src/verification/mod.rs +++ b/ethcore/src/verification/mod.rs @@ -17,9 +17,11 @@ pub mod verification; pub mod verifier; mod canon_verifier; +#[cfg(test)] mod noop_verifier; pub use self::verification::*; pub use self::verifier::Verifier; pub use self::canon_verifier::CanonVerifier; +#[cfg(test)] pub use self::noop_verifier::NoopVerifier; diff --git a/hook.sh b/hook.sh index 106ffe4f0..354fddd5d 100755 --- a/hook.sh +++ b/hook.sh @@ -1,3 +1,3 @@ #!/bin/sh -echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --features dev" > ./.git/hooks/pre-push +echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --features dev-clippy" > ./.git/hooks/pre-push chmod +x ./.git/hooks/pre-push diff --git a/parity/main.rs b/parity/main.rs index 605fb315d..4055fcf46 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -17,8 +17,8 @@ //! Ethcore client application. #![warn(missing_docs)] -#![cfg_attr(feature="dev", feature(plugin))] -#![cfg_attr(feature="dev", plugin(clippy))] +#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] +#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] extern crate docopt; extern crate rustc_serialize; extern crate ethcore_util as util; @@ -246,7 +246,7 @@ impl Configuration { } } - #[cfg_attr(feature="dev", allow(useless_format))] + #[cfg_attr(all(nightly, feature="dev"), allow(useless_format))] fn net_addresses(&self) -> (Option, Option) { let mut listen_address = None; let mut public_address = None; diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index bfdf8f2d3..07c0eb85d 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -26,8 +26,9 @@ serde_macros = { version = "0.7.0", optional = true } [build-dependencies] serde_codegen = { version = "0.7.0", optional = true } syntex = "0.29.0" +rustc_version = "0.1" [features] default = ["serde_codegen"] nightly = ["serde_macros"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"] +dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev"] diff --git a/rpc/build.rs b/rpc/build.rs index b5adeaba1..3806f6fe5 100644 --- a/rpc/build.rs +++ b/rpc/build.rs @@ -1,3 +1,23 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + #[cfg(not(feature = "serde_macros"))] mod inner { extern crate syntex; @@ -26,4 +46,7 @@ mod inner { fn main() { inner::main(); + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } } diff --git a/sync/Cargo.toml b/sync/Cargo.toml index f10a772e3..fd4b9c46f 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -4,9 +4,13 @@ name = "ethsync" version = "0.9.99" license = "GPL-3.0" authors = ["Ethcore . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + +fn main() { + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } +} diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 63640f87f..0feae01b0 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -268,7 +268,7 @@ impl ChainSync { } - #[cfg_attr(feature="dev", allow(for_kv_map))] // Because it's not possible to get `values_mut()` + #[cfg_attr(all(nightly, feature="dev"), allow(for_kv_map))] // Because it's not possible to get `values_mut()` /// Rest sync. Clear all downloaded data but keep the queue fn reset(&mut self) { self.downloading_headers.clear(); @@ -335,7 +335,7 @@ impl ChainSync { Ok(()) } - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] /// Called by peer once it has new block headers during sync fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { self.reset_peer_asking(peer_id, PeerAsking::BlockHeaders); @@ -462,6 +462,7 @@ impl ChainSync { } /// Called by peer once it has new block bodies + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { let block_rlp = try!(r.at(0)); let header_rlp = try!(block_rlp.at(0)); @@ -484,7 +485,7 @@ impl ChainSync { trace!(target: "sync", "New block already queued {:?}", h); }, Ok(_) => { - if self.current_base_block() < header.number { + if self.current_base_block() < header.number { self.last_imported_block = Some(header.number); self.remove_downloaded_blocks(header.number); } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 74541660d..3ce33e31f 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -15,11 +15,11 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(feature="dev", feature(plugin))] -#![cfg_attr(feature="dev", plugin(clippy))] +#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] +#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. -#![cfg_attr(feature="dev", allow(clone_on_copy))] +#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] //! Blockchain sync module //! Implements ethereum protocol version 63 as specified here: diff --git a/sync/src/range_collection.rs b/sync/src/range_collection.rs index dc2f4e446..dad732fe8 100644 --- a/sync/src/range_collection.rs +++ b/sync/src/range_collection.rs @@ -207,7 +207,7 @@ impl RangeCollection for Vec<(K, Vec)> where K: Ord + PartialEq + } #[test] -#[cfg_attr(feature="dev", allow(cyclomatic_complexity))] +#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn test_range() { use std::cmp::{Ordering}; diff --git a/util/Cargo.toml b/util/Cargo.toml index 9c5cb3fe3..0ce27ec2b 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -40,7 +40,7 @@ chrono = "0.2" [features] default = [] -dev = ["clippy"] +dev = [] [build-dependencies] vergen = "*" diff --git a/util/bigint/src/uint.rs b/util/bigint/src/uint.rs index bd57e9d6d..959df0944 100644 --- a/util/bigint/src/uint.rs +++ b/util/bigint/src/uint.rs @@ -1103,7 +1103,7 @@ macro_rules! construct_uint { } } - #[cfg_attr(feature="dev", allow(derive_hash_xor_eq))] // We are pretty sure it's ok. + #[cfg_attr(all(nightly, feature="dev"), allow(derive_hash_xor_eq))] // We are pretty sure it's ok. impl Hash for $name { fn hash(&self, state: &mut H) where H: Hasher { unsafe { state.write(::std::slice::from_raw_parts(self.0.as_ptr() as *mut u8, self.0.len() * 8)); } @@ -1485,7 +1485,7 @@ mod tests { } #[test] - #[cfg_attr(feature="dev", allow(eq_op))] + #[cfg_attr(all(nightly, feature="dev"), allow(eq_op))] pub fn uint256_comp_test() { let small = U256([10u64, 0, 0, 0]); let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]); @@ -2032,7 +2032,7 @@ mod tests { #[test] - #[cfg_attr(feature = "dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn u256_multi_full_mul() { let result = U256([0, 0, 0, 0]).full_mul(U256([0, 0, 0, 0])); assert_eq!(U512([0, 0, 0, 0, 0, 0, 0, 0]), result); diff --git a/util/build.rs b/util/build.rs index eed080e29..0b9b233e0 100644 --- a/util/build.rs +++ b/util/build.rs @@ -1,7 +1,28 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; extern crate vergen; use vergen::*; +use rustc_version::{version_meta, Channel}; fn main() { vergen(OutputFns::all()).unwrap(); + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } } diff --git a/util/src/hash.rs b/util/src/hash.rs index 73fa33b47..4eb96b53e 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -305,7 +305,7 @@ macro_rules! impl_hash { } impl Copy for $from {} - #[cfg_attr(feature="dev", allow(expl_impl_clone_on_copy))] + #[cfg_attr(all(nightly, feature="dev"), allow(expl_impl_clone_on_copy))] impl Clone for $from { fn clone(&self) -> $from { unsafe { @@ -637,7 +637,7 @@ mod tests { use std::str::FromStr; #[test] - #[cfg_attr(feature="dev", allow(eq_op))] + #[cfg_attr(all(nightly, feature="dev"), allow(eq_op))] fn hash() { let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]); assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h); diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 57af857a9..3228f2201 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -27,7 +27,7 @@ use std::env; /// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// -/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to +/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. @@ -158,7 +158,7 @@ impl JournalDB { backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() } - fn insert_keys(inserts: &Vec<(H256, Bytes)>, backing: &Database, counters: &mut HashMap, batch: &DBTransaction) { + fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, counters: &mut HashMap, batch: &DBTransaction) { for &(ref h, ref d) in inserts { if let Some(c) = counters.get_mut(h) { // already counting. increment. @@ -181,7 +181,7 @@ impl JournalDB { } } - fn replay_keys(inserts: &Vec, backing: &Database, counters: &mut HashMap) { + fn replay_keys(inserts: &[H256], backing: &Database, counters: &mut HashMap) { println!("replay_keys: inserts={:?}, counters={:?}", inserts, counters); for h in inserts { if let Some(c) = counters.get_mut(h) { @@ -211,12 +211,12 @@ impl JournalDB { n = Some(*c); } } - match &n { - &Some(i) if i == 1 => { + match n { + Some(i) if i == 1 => { counters.remove(&h); Self::reset_already_in(batch, &h); } - &None => { + None => { // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. //assert!(!Self::is_already_in(db, &h)); batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); @@ -229,7 +229,7 @@ impl JournalDB { /// Commit all recent insert operations and historical removals from the old era /// to the backing database. fn commit_with_counters(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - // journal format: + // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, n] => [ ... ] @@ -242,12 +242,12 @@ impl JournalDB { // By the time comes to remove a tuple from the queue (i.e. then the era passes from recent history // into ancient history) then only one commit from the tuple is considered canonical. This commit // is kept in the main backing database, whereas any others from the same era are reverted. - // + // // It is possible that a key, properly available in the backing database be deleted and re-inserted // in the recent history queue, yet have both operations in commits that are eventually non-canonical. // To avoid the original, and still required, key from being deleted, we maintain a reference count // which includes an original key, if any. - // + // // The semantics of the `counter` are: // insert key k: // counter already contains k: count += 1 @@ -255,7 +255,7 @@ impl JournalDB { // backing db contains k: count = 1 // backing db doesn't contain k: insert into backing db, count = 0 // delete key k: - // counter contains k (count is asserted to be non-zero): + // counter contains k (count is asserted to be non-zero): // count > 1: counter -= 1 // count == 1: remove counter // count == 0: remove key from backing db @@ -274,7 +274,7 @@ impl JournalDB { // // record new commit's details. - trace!("commit: #{} ({}), end era: {:?}", now, id, end); + trace!("commit: #{} ({}), end era: {:?}", now, id, end); let mut counters = self.counters.as_ref().unwrap().write().unwrap(); let batch = DBTransaction::new(); { @@ -295,7 +295,7 @@ impl JournalDB { let drained = self.overlay.drain(); let removes: Vec = drained .iter() - .filter_map(|(ref k, &(_, ref c))| if *c < 0 {Some(k.clone())} else {None}).cloned() + .filter_map(|(k, &(_, c))| if c < 0 {Some(k.clone())} else {None}) .collect(); let inserts: Vec<(H256, Bytes)> = drained .into_iter() @@ -382,12 +382,15 @@ impl JournalDB { /// Returns heap memory size used pub fn mem_used(&self) -> usize { - self.overlay.mem_used() + match &self.counters { &Some(ref c) => c.read().unwrap().heap_size_of_children(), &None => 0 } + self.overlay.mem_used() + match self.counters { + Some(ref c) => c.read().unwrap().heap_size_of_children(), + None => 0 + } } } impl HashDB for JournalDB { - fn keys(&self) -> HashMap { + fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); for (key, _) in self.backing.iter() { let h = H256::from_slice(key.deref()); @@ -401,7 +404,7 @@ impl HashDB for JournalDB { ret } - fn lookup(&self, key: &H256) -> Option<&[u8]> { + fn lookup(&self, key: &H256) -> Option<&[u8]> { let k = self.overlay.raw(key); match k { Some(&(ref d, rc)) if rc > 0 => Some(d), @@ -416,18 +419,18 @@ impl HashDB for JournalDB { } } - fn exists(&self, key: &H256) -> bool { + fn exists(&self, key: &H256) -> bool { self.lookup(key).is_some() } - fn insert(&mut self, value: &[u8]) -> H256 { + fn insert(&mut self, value: &[u8]) -> H256 { self.overlay.insert(value) } fn emplace(&mut self, key: H256, value: Bytes) { - self.overlay.emplace(key, value); + self.overlay.emplace(key, value); } - fn kill(&mut self, key: &H256) { - self.overlay.kill(key); + fn kill(&mut self, key: &H256) { + self.overlay.kill(key); } } diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 43a9fc532..a2fa2215a 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -55,8 +55,7 @@ pub struct DatabaseIterator<'a> { impl<'a> Iterator for DatabaseIterator<'a> { type Item = (Box<[u8]>, Box<[u8]>); - #[cfg_attr(feature="dev", allow(type_complexity))] - fn next(&mut self) -> Option<(Box<[u8]>, Box<[u8]>)> { + fn next(&mut self) -> Option { self.iter.next() } } diff --git a/util/src/lib.rs b/util/src/lib.rs index a50ba8da4..59d66a325 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -15,18 +15,18 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(feature="dev", feature(plugin))] -#![cfg_attr(feature="dev", plugin(clippy))] +#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] +#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] // Clippy settings // TODO [todr] not really sure -#![cfg_attr(feature="dev", allow(needless_range_loop))] +#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))] // Shorter than if-else -#![cfg_attr(feature="dev", allow(match_bool))] +#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))] // We use that to be more explicit about handled cases -#![cfg_attr(feature="dev", allow(match_same_arms))] +#![cfg_attr(all(nightly, feature="dev"), allow(match_same_arms))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. -#![cfg_attr(feature="dev", allow(clone_on_copy))] +#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] //! Ethcore-util library //! diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index e52d5d25f..644af22af 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -113,14 +113,14 @@ impl Discovery { } /// Add a new node to discovery table. Pings the node. - pub fn add_node(&mut self, e: NodeEntry) { + pub fn add_node(&mut self, e: NodeEntry) { let endpoint = e.endpoint.clone(); self.update_node(e); self.ping(&endpoint); } /// Add a list of known nodes to the table. - pub fn init_node_list(&mut self, mut nodes: Vec) { + pub fn init_node_list(&mut self, mut nodes: Vec) { for n in nodes.drain(..) { self.update_node(n); } @@ -243,7 +243,7 @@ impl Discovery { self.send_to(packet, address.clone()); } - #[cfg_attr(feature="dev", allow(map_clone))] + #[cfg_attr(all(nightly, feature="dev"), allow(map_clone))] fn nearest_node_entries(target: &NodeId, buckets: &[NodeBucket]) -> Vec { let mut found: BTreeMap> = BTreeMap::new(); let mut count = 0; @@ -251,7 +251,7 @@ impl Discovery { // Sort nodes by distance to target for bucket in buckets { for node in &bucket.nodes { - let distance = Discovery::distance(target, &node.address.id); + let distance = Discovery::distance(target, &node.address.id); found.entry(distance).or_insert_with(Vec::new).push(&node.address); if count == BUCKET_SIZE { // delete the most distant element @@ -310,7 +310,7 @@ impl Discovery { None }), Ok(_) => None, - Err(e) => { + Err(e) => { warn!("Error reading UPD socket: {:?}", e); None } @@ -339,7 +339,7 @@ impl Discovery { PACKET_PONG => self.on_pong(&rlp, &node_id, &from), PACKET_FIND_NODE => self.on_find_node(&rlp, &node_id, &from), PACKET_NEIGHBOURS => self.on_neighbours(&rlp, &node_id, &from), - _ => { + _ => { debug!("Unknown UDP packet: {}", packet_id); Ok(None) } @@ -367,14 +367,14 @@ impl Discovery { } else { self.update_node(entry.clone()); - added_map.insert(node.clone(), entry); + added_map.insert(node.clone(), entry); } let hash = rlp.as_raw().sha3(); let mut response = RlpStream::new_list(2); dest.to_rlp_list(&mut response); response.append(&hash); self.send_packet(PACKET_PONG, from, &response.drain()); - + Ok(Some(TableUpdates { added: added_map, removed: HashSet::new() })) } @@ -391,7 +391,7 @@ impl Discovery { } self.clear_ping(node); let mut added_map = HashMap::new(); - added_map.insert(node.clone(), entry); + added_map.insert(node.clone(), entry); Ok(None) } @@ -466,8 +466,8 @@ impl Discovery { pub fn round(&mut self) -> Option { let removed = self.check_expired(false); self.discover(); - if !removed.is_empty() { - Some(TableUpdates { added: HashMap::new(), removed: removed }) + if !removed.is_empty() { + Some(TableUpdates { added: HashMap::new(), removed: removed }) } else { None } } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index ece24a1d1..2d1af55ba 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -507,7 +507,7 @@ impl Host where Message: Send + Sync + Clone { debug!(target: "network", "Connecting peers: {} sessions, {} pending", self.session_count(), self.handshake_count()); } - #[cfg_attr(feature="dev", allow(single_match))] + #[cfg_attr(all(nightly, feature="dev"), allow(single_match))] fn connect_peer(&self, id: &NodeId, io: &IoContext>) { if self.have_session(id) { @@ -542,7 +542,7 @@ impl Host where Message: Send + Sync + Clone { self.create_connection(socket, Some(id), io); } - #[cfg_attr(feature="dev", allow(block_in_if_condition_stmt))] + #[cfg_attr(all(nightly, feature="dev"), allow(block_in_if_condition_stmt))] fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext>) { let nonce = self.info.write().unwrap().next_nonce(); let mut handshakes = self.handshakes.write().unwrap(); diff --git a/util/src/panics.rs b/util/src/panics.rs index 05d266b8b..70ce0bc33 100644 --- a/util/src/panics.rs +++ b/util/src/panics.rs @@ -71,7 +71,7 @@ impl PanicHandler { /// Invoke closure and catch any possible panics. /// In case of panic notifies all listeners about it. - #[cfg_attr(feature="dev", allow(deprecated))] + #[cfg_attr(all(nightly, feature="dev"), allow(deprecated))] pub fn catch_panic(&self, g: G) -> thread::Result where G: FnOnce() -> R + Send + 'static { let _guard = PanicGuard { handler: self }; let result = g(); diff --git a/util/src/trie/triedb.rs b/util/src/trie/triedb.rs index c4b5e120c..182b87063 100644 --- a/util/src/trie/triedb.rs +++ b/util/src/trie/triedb.rs @@ -22,7 +22,7 @@ use super::trietraits::*; use super::node::*; /// A `Trie` implementation using a generic `HashDB` backing database. -/// +/// /// Use it as a `Trie` trait object. You can use `db()` to get the backing database object, `keys` /// to get the keys belonging to the trie in the backing database, and `db_items_remaining()` to get /// which items in the backing database do not belong to this trie. If this is the only trie in the @@ -54,7 +54,7 @@ pub struct TrieDB<'db> { pub hash_count: usize, } -#[cfg_attr(feature="dev", allow(wrong_self_convention))] +#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] impl<'db> TrieDB<'db> { /// Create a new trie with the backing database `db` and `root` /// Panics, if `root` does not exist @@ -63,16 +63,16 @@ impl<'db> TrieDB<'db> { flushln!("TrieDB::new({}): Trie root not found!", root); panic!("Trie root not found!"); } - TrieDB { - db: db, + TrieDB { + db: db, root: root, - hash_count: 0 + hash_count: 0 } } /// Get the backing database. - pub fn db(&'db self) -> &'db HashDB { - self.db + pub fn db(&'db self) -> &'db HashDB { + self.db } /// Determine all the keys in the backing database that belong to the trie. @@ -142,7 +142,7 @@ impl<'db> TrieDB<'db> { /// Indentation helper for `formal_all`. fn fmt_indent(&self, f: &mut fmt::Formatter, size: usize) -> fmt::Result { - for _ in 0..size { + for _ in 0..size { try!(write!(f, " ")); } Ok(()) @@ -358,7 +358,7 @@ impl<'db> fmt::Debug for TrieDB<'db> { fn iterator() { use memorydb::*; use super::triedbmut::*; - + let d = vec![ &b"A"[..], &b"AA"[..], &b"AB"[..], &b"B"[..] ]; let mut memdb = MemoryDB::new(); diff --git a/util/src/trie/triedbmut.rs b/util/src/trie/triedbmut.rs index 829c1e518..3d5c366e5 100644 --- a/util/src/trie/triedbmut.rs +++ b/util/src/trie/triedbmut.rs @@ -23,7 +23,7 @@ use super::journal::*; use super::trietraits::*; /// A `Trie` implementation using a generic `HashDB` backing database. -/// +/// /// Use it as a `Trie` trait object. You can use `db()` to get the backing database object, `keys` /// to get the keys belonging to the trie in the backing database, and `db_items_remaining()` to get /// which items in the backing database do not belong to this trie. If this is the only trie in the @@ -66,21 +66,21 @@ enum MaybeChanged<'a> { Changed(Bytes), } -#[cfg_attr(feature="dev", allow(wrong_self_convention))] +#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] impl<'db> TrieDBMut<'db> { /// Create a new trie with the backing database `db` and empty `root` /// Initialise to the state entailed by the genesis block. /// This guarantees the trie is built correctly. - pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self { + pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self { let mut r = TrieDBMut{ - db: db, + db: db, root: root, - hash_count: 0 - }; + hash_count: 0 + }; // set root rlp - *r.root = SHA3_NULL_RLP.clone(); - r + *r.root = SHA3_NULL_RLP.clone(); + r } /// Create a new trie with the backing database `db` and `root`. @@ -91,21 +91,21 @@ impl<'db> TrieDBMut<'db> { flushln!("Trie root not found {}", root); panic!("Trie root not found!"); } - TrieDBMut { - db: db, + TrieDBMut { + db: db, root: root, - hash_count: 0 + hash_count: 0 } } /// Get the backing database. - pub fn db(&'db self) -> &'db HashDB { - self.db + pub fn db(&'db self) -> &'db HashDB { + self.db } /// Get the backing database. - pub fn db_mut(&'db mut self) -> &'db mut HashDB { - self.db + pub fn db_mut(&'db mut self) -> &'db mut HashDB { + self.db } /// Determine all the keys in the backing database that belong to the trie. @@ -184,7 +184,7 @@ impl<'db> TrieDBMut<'db> { /// Indentation helper for `formal_all`. fn fmt_indent(&self, f: &mut fmt::Formatter, size: usize) -> fmt::Result { - for _ in 0..size { + for _ in 0..size { try!(write!(f, " ")); } Ok(()) @@ -350,7 +350,7 @@ impl<'db> TrieDBMut<'db> { } } - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] /// Determine the RLP of the node, assuming we're inserting `partial` into the /// node currently of data `old`. This will *not* delete any hash of `old` from the database; /// it will just return the new RLP that includes the new node. @@ -378,7 +378,7 @@ impl<'db> TrieDBMut<'db> { // original had empty slot - place a leaf there. true if old_rlp.at(i).is_empty() => journal.new_node(Self::compose_leaf(&partial.mid(1), value), &mut s), // original has something there already; augment. - true => { + true => { let new = self.augmented(self.take_node(&old_rlp.at(i), journal), &partial.mid(1), value, journal); journal.new_node(new, &mut s); } From b61c0397bc476ca2253f43724646fc703eb0fdb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 7 Mar 2016 14:36:38 +0100 Subject: [PATCH 3/8] removing unused variable --- util/src/journaldb.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 378bc2de5..35ad83fa0 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -617,7 +617,6 @@ mod tests { fn reopen_remove() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let bar = H256::random(); let foo = { let mut jdb = JournalDB::new(dir.to_str().unwrap()); From ab42ec8c81bc91f82b3b81d404e5da2e81382aff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 7 Mar 2016 14:40:39 +0100 Subject: [PATCH 4/8] Removing unneeded lifetime --- ethcore/src/blockchain/generator/generator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ethcore/src/blockchain/generator/generator.rs b/ethcore/src/blockchain/generator/generator.rs index 51e6294fc..88c9577e2 100644 --- a/ethcore/src/blockchain/generator/generator.rs +++ b/ethcore/src/blockchain/generator/generator.rs @@ -29,7 +29,7 @@ pub trait ChainIterator: Iterator + Sized { /// Blocks generated by fork will have lower difficulty than current chain. fn fork(&self, fork_number: usize) -> Fork where Self: Clone; /// Should be called to make every consecutive block have given bloom. - fn with_bloom<'a>(&'a mut self, bloom: H2048) -> Bloom<'a, Self>; + fn with_bloom(&mut self, bloom: H2048) -> Bloom; /// Should be called to complete block. Without complete, block may have incorrect hash. fn complete<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Complete<'a, Self>; /// Completes and generates block. @@ -44,7 +44,7 @@ impl ChainIterator for I where I: Iterator + Sized { } } - fn with_bloom<'a>(&'a mut self, bloom: H2048) -> Bloom<'a, Self> { + fn with_bloom(&mut self, bloom: H2048) -> Bloom { Bloom { iter: self, bloom: bloom From f84d40734d372aa2338df55dc0ad44c4d97650b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 9 Mar 2016 10:26:51 +0100 Subject: [PATCH 5/8] Validating sender before importing to queuue --- sync/src/chain.rs | 4 +- sync/src/transaction_queue.rs | 140 +++++++++++++++++++++------------- 2 files changed, 87 insertions(+), 57 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index a41b06904..8cf1beea1 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -935,7 +935,7 @@ impl ChainSync { let mut transaction_queue = self.transaction_queue.lock().unwrap(); for i in 0..item_count { let tx: SignedTransaction = try!(r.val_at(i)); - transaction_queue.add(tx, &fetch_latest_nonce); + let _ = transaction_queue.add(tx, &fetch_latest_nonce); } Ok(()) } @@ -1291,7 +1291,7 @@ impl ChainSync { let _sender = tx.sender(); } let mut transaction_queue = self.transaction_queue.lock().unwrap(); - transaction_queue.add_all(txs, |a| chain.nonce(a)); + let _ = transaction_queue.add_all(txs, |a| chain.nonce(a)); }); } diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 3e0d931b5..39ad29894 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -24,6 +24,7 @@ use util::numbers::{Uint, U256}; use util::hash::{Address, H256}; use util::table::*; use ethcore::transaction::*; +use ethcore::error::Error; #[derive(Clone, Debug)] @@ -82,10 +83,11 @@ struct VerifiedTransaction { transaction: SignedTransaction } impl VerifiedTransaction { - fn new(transaction: SignedTransaction) -> Self { - VerifiedTransaction { + fn new(transaction: SignedTransaction) -> Result { + try!(transaction.sender()); + Ok(VerifiedTransaction { transaction: transaction - } + }) } fn hash(&self) -> H256 { @@ -148,6 +150,8 @@ impl TransactionSet { } } +// Will be used when rpc merged +#[allow(dead_code)] #[derive(Debug)] /// Current status of the queue pub struct TransactionQueueStatus { @@ -196,6 +200,8 @@ impl TransactionQueue { } } + // Will be used when rpc merged + #[allow(dead_code)] /// Returns current status for this queue pub fn status(&self) -> TransactionQueueStatus { TransactionQueueStatus { @@ -205,17 +211,19 @@ impl TransactionQueue { } /// Adds all signed transactions to queue to be verified and imported - pub fn add_all(&mut self, txs: Vec, fetch_nonce: T) + pub fn add_all(&mut self, txs: Vec, fetch_nonce: T) -> Result<(), Error> where T: Fn(&Address) -> U256 { for tx in txs.into_iter() { - self.add(tx, &fetch_nonce); + try!(self.add(tx, &fetch_nonce)); } + Ok(()) } /// Add signed transaction to queue to be verified and imported - pub fn add(&mut self, tx: SignedTransaction, fetch_nonce: &T) + pub fn add(&mut self, tx: SignedTransaction, fetch_nonce: &T) -> Result<(), Error> where T: Fn(&Address) -> U256 { - self.import_tx(VerifiedTransaction::new(tx), fetch_nonce); + self.import_tx(try!(VerifiedTransaction::new(tx)), fetch_nonce); + Ok(()) } /// Removes all transactions identified by hashes given in slice @@ -299,7 +307,8 @@ impl TransactionQueue { self.future.enforce_limit(&mut self.by_hash); } - + // Will be used when mining merged + #[allow(dead_code)] /// Returns top transactions from the queue pub fn top_transactions(&self, size: usize) -> Vec { self.current.by_priority @@ -407,13 +416,8 @@ impl TransactionQueue { #[cfg(test)] mod test { extern crate rustc_serialize; - use self::rustc_serialize::hex::FromHex; - use std::ops::Deref; - use std::collections::{HashMap, BTreeSet}; - use util::crypto::KeyPair; - use util::numbers::{U256, Uint}; - use util::hash::{Address}; use util::table::*; + use util::*; use ethcore::transaction::*; use super::*; use super::{TransactionSet, TransactionOrder, VerifiedTransaction}; @@ -457,12 +461,12 @@ mod test { limit: 1 }; let (tx1, tx2) = new_txs(U256::from(1)); - let tx1 = VerifiedTransaction::new(tx1); - let tx2 = VerifiedTransaction::new(tx2); + let tx1 = VerifiedTransaction::new(tx1).unwrap(); + let tx2 = VerifiedTransaction::new(tx2).unwrap(); let mut by_hash = { let mut x = HashMap::new(); - let tx1 = VerifiedTransaction::new(tx1.transaction.clone()); - let tx2 = VerifiedTransaction::new(tx2.transaction.clone()); + let tx1 = VerifiedTransaction::new(tx1.transaction.clone()).unwrap(); + let tx2 = VerifiedTransaction::new(tx2.transaction.clone()).unwrap(); x.insert(tx1.hash(), tx1); x.insert(tx2.hash(), tx2); x @@ -496,13 +500,39 @@ mod test { let tx = new_tx(); // when - txq.add(tx, &default_nonce); + let res = txq.add(tx, &default_nonce); // then + assert!(res.is_ok()); let stats = txq.status(); assert_eq!(stats.pending, 1); } + #[test] + fn should_reject_incorectly_signed_transaction() { + // given + let mut txq = TransactionQueue::new(); + let tx = new_unsigned_tx(U256::from(123)); + let stx = { + let mut s = RlpStream::new_list(9); + s.append(&tx.nonce); + s.append(&tx.gas_price); + s.append(&tx.gas); + s.append_empty_data(); // action=create + s.append(&tx.value); + s.append(&tx.data); + s.append(&0u64); // v + s.append(&U256::zero()); // r + s.append(&U256::zero()); // s + decode(s.as_raw()) + }; + // when + let res = txq.add(stx, &default_nonce); + + // then + assert!(res.is_err()); + } + #[test] fn should_import_txs_from_same_sender() { // given @@ -511,8 +541,8 @@ mod test { let (tx, tx2) = new_txs(U256::from(1)); // when - txq.add(tx.clone(), &default_nonce); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx.clone(), &default_nonce).unwrap(); + txq.add(tx2.clone(), &default_nonce).unwrap(); // then let top = txq.top_transactions(5); @@ -529,8 +559,8 @@ mod test { let (tx, tx2) = new_txs(U256::from(2)); // when - txq.add(tx.clone(), &default_nonce); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx.clone(), &default_nonce).unwrap(); + txq.add(tx2.clone(), &default_nonce).unwrap(); // then let stats = txq.status(); @@ -551,13 +581,13 @@ mod test { let tx1 = new_unsigned_tx(U256::from(124)).sign(&secret); let tx2 = new_unsigned_tx(U256::from(125)).sign(&secret); - txq.add(tx, &default_nonce); + txq.add(tx, &default_nonce).unwrap(); assert_eq!(txq.status().pending, 1); - txq.add(tx2, &default_nonce); + txq.add(tx2, &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); // when - txq.add(tx1, &default_nonce); + txq.add(tx1, &default_nonce).unwrap(); // then let stats = txq.status(); @@ -570,8 +600,8 @@ mod test { // given let mut txq2 = TransactionQueue::new(); let (tx, tx2) = new_txs(U256::from(3)); - txq2.add(tx.clone(), &default_nonce); - txq2.add(tx2.clone(), &default_nonce); + txq2.add(tx.clone(), &default_nonce).unwrap(); + txq2.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq2.status().pending, 1); assert_eq!(txq2.status().future, 1); @@ -592,10 +622,10 @@ mod test { let mut txq = TransactionQueue::new(); let (tx, tx2) = new_txs(U256::from(1)); let tx3 = new_tx(); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx3.clone(), &default_nonce); - txq.add(tx.clone(), &default_nonce); + txq.add(tx3.clone(), &default_nonce).unwrap(); + txq.add(tx.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().pending, 3); // when @@ -614,8 +644,8 @@ mod test { let (tx, tx2) = new_txs(U256::one()); // add - txq.add(tx2.clone(), &default_nonce); - txq.add(tx.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); + txq.add(tx.clone(), &default_nonce).unwrap(); let stats = txq.status(); assert_eq!(stats.pending, 2); @@ -632,11 +662,11 @@ mod test { // given let mut txq = TransactionQueue::with_limits(1, 1); let (tx, tx2) = new_txs(U256::one()); - txq.add(tx.clone(), &default_nonce); + txq.add(tx.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().pending, 1); // when - txq.add(tx2.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); // then let t = txq.top_transactions(2); @@ -650,14 +680,14 @@ mod test { let mut txq = TransactionQueue::with_limits(10, 1); let (tx1, tx2) = new_txs(U256::from(4)); let (tx3, tx4) = new_txs(U256::from(4)); - txq.add(tx1.clone(), &default_nonce); - txq.add(tx3.clone(), &default_nonce); + txq.add(tx1.clone(), &default_nonce).unwrap(); + txq.add(tx3.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().pending, 2); // when - txq.add(tx2.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx4.clone(), &default_nonce); + txq.add(tx4.clone(), &default_nonce).unwrap(); // then assert_eq!(txq.status().future, 1); @@ -671,7 +701,7 @@ mod test { let fetch_last_nonce = |_a: &Address| last_nonce; // when - txq.add(tx, &fetch_last_nonce); + txq.add(tx, &fetch_last_nonce).unwrap(); // then let stats = txq.status(); @@ -685,12 +715,12 @@ mod test { let nonce = |a: &Address| default_nonce(a) + U256::one(); let mut txq = TransactionQueue::new(); let (_tx1, tx2) = new_txs(U256::from(1)); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); assert_eq!(txq.status().pending, 0); // when - txq.add(tx2.clone(), &nonce); + txq.add(tx2.clone(), &nonce).unwrap(); // then let stats = txq.status(); @@ -703,15 +733,15 @@ mod test { // given let mut txq = TransactionQueue::new(); let (tx1, tx2) = new_txs(U256::from(1)); - txq.add(tx1.clone(), &default_nonce); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx1.clone(), &default_nonce).unwrap(); + txq.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().pending, 2); // when txq.remove(&tx1.hash(), &default_nonce); assert_eq!(txq.status().pending, 0); assert_eq!(txq.status().future, 1); - txq.add(tx1.clone(), &default_nonce); + txq.add(tx1.clone(), &default_nonce).unwrap(); // then let stats = txq.status(); @@ -726,10 +756,10 @@ mod test { let mut txq = TransactionQueue::new(); let (tx, tx2) = new_txs(U256::from(1)); let tx3 = new_tx(); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx3.clone(), &default_nonce); - txq.add(tx.clone(), &default_nonce); + txq.add(tx3.clone(), &default_nonce).unwrap(); + txq.add(tx.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().pending, 3); // when @@ -754,8 +784,8 @@ mod test { }; // when - txq.add(tx, &default_nonce); - txq.add(tx2, &default_nonce); + txq.add(tx, &default_nonce).unwrap(); + txq.add(tx2, &default_nonce).unwrap(); // then let stats = txq.status(); @@ -782,10 +812,10 @@ mod test { }; // when - txq.add(tx1, &default_nonce); - txq.add(tx2, &default_nonce); + txq.add(tx1, &default_nonce).unwrap(); + txq.add(tx2, &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx0, &default_nonce); + txq.add(tx0, &default_nonce).unwrap(); // then let stats = txq.status(); @@ -801,8 +831,8 @@ mod test { let next_nonce = |a: &Address| default_nonce(a) + U256::one(); let mut txq = TransactionQueue::new(); let (tx1, tx2) = new_txs(U256::one()); - txq.add(tx1.clone(), &previous_nonce); - txq.add(tx2, &previous_nonce); + txq.add(tx1.clone(), &previous_nonce).unwrap(); + txq.add(tx2, &previous_nonce).unwrap(); assert_eq!(txq.status().future, 2); // when From 3c1888c26abd94107f20a4a42cabaa3f01fbef53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 09:26:04 +0100 Subject: [PATCH 6/8] Fixing deps --- Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7df0c2541..1dbe54c8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,14 +25,14 @@ ethcore-util = { path = "util" } ethsync = { path = "sync" } ethcore-devtools = { path = "devtools" } ethcore-rpc = { path = "rpc", optional = true } +number_prefix = "0.2" +rpassword = "0.1" [dev-dependencies] ethcore = { path = "ethcore", features = ["dev"] } ethcore-util = { path = "util", features = ["dev"] } ethsync = { path = "sync", features = ["dev"] } ethcore-rpc = { path = "rpc", features = ["dev"] } -number_prefix = "0.2" -rpassword = "0.1" [features] default = ["rpc"] From 878e38c0cf922fbdd3b51b0f71892418181903ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 09:33:25 +0100 Subject: [PATCH 7/8] Fixing deps again --- Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 1dbe54c8f..e797a3eac 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,6 @@ ethcore-util = { path = "util" } ethsync = { path = "sync" } ethcore-devtools = { path = "devtools" } ethcore-rpc = { path = "rpc", optional = true } -number_prefix = "0.2" rpassword = "0.1" [dev-dependencies] From 9f77a85491b714ee03de491471c23b2714548cc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 09:35:46 +0100 Subject: [PATCH 8/8] Fixing compilation on nightly --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index e797a3eac..22d0f9288 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,7 +39,7 @@ rpc = ["ethcore-rpc"] dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] dev-clippy = ["clippy", "ethcore/clippy", "ethcore-util/clippy", "ethsync/clippy", "ethcore-rpc/clippy"] travis-beta = ["ethcore/json-tests"] -travis-nightly = ["ethcore/json-tests", "clippy", "dev"] +travis-nightly = ["ethcore/json-tests", "dev-clippy", "dev"] [[bin]] path = "parity/main.rs"