Clippy bump (#2877)

* Bumping clippy

* Fixing warnings

* Fix the "fix"
This commit is contained in:
Tomasz Drwięga 2016-10-27 08:28:12 +02:00 committed by Gav Wood
parent 9bfb8094cc
commit 88997801d0
39 changed files with 117 additions and 112 deletions

24
Cargo.lock generated
View File

@ -3,7 +3,7 @@ name = "parity"
version = "1.4.0"
dependencies = [
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
"ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)",
"daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)",
@ -145,15 +145,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "clippy"
version = "0.0.90"
version = "0.0.96"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy_lints 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "clippy_lints"
version = "0.0.90"
version = "0.0.96"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -279,7 +279,7 @@ dependencies = [
"bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.4.0",
@ -330,7 +330,7 @@ dependencies = [
name = "ethcore-dapps"
version = "1.4.0"
dependencies = [
"clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-devtools 1.4.0",
@ -473,7 +473,7 @@ dependencies = [
name = "ethcore-rpc"
version = "1.4.0"
dependencies = [
"clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.4.0",
"ethcore 1.4.0",
"ethcore-devtools 1.4.0",
@ -503,7 +503,7 @@ dependencies = [
name = "ethcore-signer"
version = "1.4.0"
dependencies = [
"clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-devtools 1.4.0",
"ethcore-io 1.4.0",
@ -542,7 +542,7 @@ version = "1.4.0"
dependencies = [
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
"elastic-array 0.6.0 (git+https://github.com/ethcore/elastic-array)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)",
@ -631,7 +631,7 @@ dependencies = [
name = "ethsync"
version = "1.4.0"
dependencies = [
"clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore 1.4.0",
"ethcore-io 1.4.0",
@ -1948,8 +1948,8 @@ dependencies = [
"checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27"
"checksum bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)" = "<none>"
"checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
"checksum clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "d19bda68c3db98e3a780342f6101b44312fef20a5f13ce756d1202a35922b01b"
"checksum clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "3d4ed67c69b9bb35169be2538691d290a3aa0cbfd4b9f0bfb7c221fc1d399a96"
"checksum clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)" = "6eacf01b0aad84a0817703498f72d252df7c0faf6a5b86d0be4265f1829e459f"
"checksum clippy_lints 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)" = "a49960c9aab544ce86b004dcb61620e8b898fea5fc0f697a028f460f48221ed6"
"checksum cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90266f45846f14a1e986c77d1e9c2626b8c342ed806fe60241ec38cc8697b245"
"checksum crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "fb974f835e90390c5f9dfac00f05b06dc117299f5ea4e85fbc7bb443af4911cc"
"checksum ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)" = "<none>"

View File

@ -46,7 +46,7 @@ ethcore-logger = { path = "logger" }
rlp = { path = "util/rlp" }
ethcore-stratum = { path = "stratum" }
ethcore-dapps = { path = "dapps", optional = true }
clippy = { version = "0.0.90", optional = true}
clippy = { version = "0.0.96", optional = true}
[target.'cfg(windows)'.dependencies]
winapi = "0.2"

View File

@ -33,7 +33,7 @@ fetch = { path = "../util/fetch" }
parity-ui = { path = "./ui" }
mime_guess = { version = "1.6.1" }
clippy = { version = "0.0.90", optional = true}
clippy = { version = "0.0.96", optional = true}
[build-dependencies]
serde_codegen = { version = "0.8", optional = true }

View File

@ -11,7 +11,7 @@ build = "build.rs"
ethcore-ipc-codegen = { path = "../ipc/codegen" }
[dependencies]
clippy = { version = "0.0.90", optional = true}
clippy = { version = "0.0.96", optional = true}
ethcore-devtools = { path = "../devtools" }
ethcore-ipc = { path = "../ipc/rpc" }
rocksdb = { git = "https://github.com/ethcore/rust-rocksdb" }

View File

@ -25,7 +25,7 @@ semver = "0.2"
bit-set = "0.4"
time = "0.1"
evmjit = { path = "../evmjit", optional = true }
clippy = { version = "0.0.90", optional = true}
clippy = { version = "0.0.96", optional = true}
ethash = { path = "../ethash" }
ethcore-util = { path = "../util" }
ethcore-io = { path = "../util/io" }

View File

@ -267,17 +267,17 @@ impl AccountProvider {
/// Returns `true` if the password for `account` is `password`. `false` if not.
pub fn test_password(&self, account: &Address, password: String) -> Result<bool, Error> {
match self.sstore.sign(&account, &password, &Default::default()) {
match self.sstore.sign(account, &password, &Default::default()) {
Ok(_) => Ok(true),
Err(SSError::InvalidPassword) => Ok(false),
Err(e) => Err(Error::SStore(e)),
}
}
}
/// Changes the password of `account` from `password` to `new_password`. Fails if incorrect `password` given.
pub fn change_password(&self, account: &Address, password: String, new_password: String) -> Result<(), Error> {
self.sstore.change_password(&account, &password, &new_password).map_err(Error::SStore)
}
self.sstore.change_password(account, &password, &new_password).map_err(Error::SStore)
}
/// Helper method used for unlocking accounts.
fn unlock_account(&self, account: Address, password: String, unlock: Unlock) -> Result<(), Error> {

View File

@ -542,7 +542,7 @@ pub fn enact(
Ok(b.close_and_lock())
}
#[inline(always)]
#[inline]
#[cfg(not(feature = "slow-blocks"))]
fn push_transactions(block: &mut OpenBlock, transactions: &[SignedTransaction]) -> Result<(), Error> {
for t in transactions {

View File

@ -414,6 +414,7 @@ impl<'a> Iterator for AncestryIter<'a> {
}
impl BlockChain {
#[cfg_attr(feature="dev", allow(useless_let_if_seq))]
/// Create new instance of blockchain from given Genesis
pub fn new(config: Config, genesis: &[u8], db: Arc<Database>) -> BlockChain {
// 400 is the avarage size of the key
@ -565,7 +566,7 @@ impl BlockChain {
let range = extras.number as bc::Number .. extras.number as bc::Number;
let chain = bc::group::BloomGroupChain::new(self.blooms_config, self);
let changes = chain.replace(&range, vec![]);
for (k, v) in changes.into_iter() {
for (k, v) in changes {
batch.write(db::COL_EXTRA, &LogGroupPosition::from(k), &BloomGroup::from(v));
}
batch.put(db::COL_EXTRA, b"best", &hash);

View File

@ -66,7 +66,7 @@ impl<T> CacheManager<T> where T: Eq + Hash {
}
fn rotate_cache_if_needed(&mut self) {
if self.cache_usage.len() == 0 { return }
if self.cache_usage.is_empty() { return }
if self.cache_usage[0].len() * self.bytes_per_cache_entry > self.pref_cache_size / COLLECTION_QUEUE_SIZE {
if let Some(cache) = self.cache_usage.pop_back() {

View File

@ -314,7 +314,7 @@ impl Client {
if let Some(parent) = chain_has_parent {
// Enact Verified Block
let last_hashes = self.build_last_hashes(header.parent_hash().clone());
let db = self.state_db.lock().boxed_clone_canon(&header.parent_hash());
let db = self.state_db.lock().boxed_clone_canon(header.parent_hash());
let enact_result = enact_verified(block, engine, self.tracedb.read().tracing_enabled(), db, &parent, last_hashes, self.factories.clone());
let locked_block = try!(enact_result.map_err(|e| {

View File

@ -114,7 +114,7 @@ pub trait Writable {
R: Deref<Target = [u8]> {
match policy {
CacheUpdatePolicy::Overwrite => {
for (key, value) in values.into_iter() {
for (key, value) in values {
self.write(col, &key, &value);
cache.insert(key, value);
}
@ -135,7 +135,7 @@ pub trait Writable {
R: Deref<Target = [u8]> {
match policy {
CacheUpdatePolicy::Overwrite => {
for (key, value) in values.into_iter() {
for (key, value) in values {
match value {
Some(ref v) => self.write(col, &key, v),
None => self.delete(col, &key),
@ -144,7 +144,7 @@ pub trait Writable {
}
},
CacheUpdatePolicy::Remove => {
for (key, value) in values.into_iter() {
for (key, value) in values {
match value {
Some(v) => self.write(col, &key, &v),
None => self.delete(col, &key),

View File

@ -54,14 +54,14 @@ const TWO_POW_248: U256 = U256([0, 0, 0, 0x100000000000000]); //0x1 00000000 000
/// Abstraction over raw vector of Bytes. Easier state management of PC.
struct CodeReader<'a> {
position: ProgramCounter,
code: &'a Bytes
code: &'a [u8]
}
#[cfg_attr(feature="dev", allow(len_without_is_empty))]
impl<'a> CodeReader<'a> {
/// Create new code reader - starting at position 0.
fn new(code: &'a Bytes) -> Self {
fn new(code: &'a [u8]) -> Self {
CodeReader {
position: 0,
code: code,

View File

@ -61,7 +61,7 @@ pub fn generate_bloom(source: Arc<Database>, dest: &mut Database) -> Result<(),
let account_trie = try!(TrieDB::new(state_db.as_hashdb(), &state_root).map_err(|e| Error::Custom(format!("Cannot open trie: {:?}", e))));
for item in try!(account_trie.iter().map_err(|_| Error::MigrationImpossible)) {
let (ref account_key, _) = try!(item.map_err(|_| Error::MigrationImpossible));
let account_key_hash = H256::from_slice(&account_key);
let account_key_hash = H256::from_slice(account_key);
bloom.set(&*account_key_hash);
}

View File

@ -362,7 +362,7 @@ impl Miner {
{
let mut queue = self.transaction_queue.lock();
for hash in invalid_transactions.into_iter() {
for hash in invalid_transactions {
queue.remove_invalid(&hash, &fetch_account);
}
for hash in transactions_to_penalize {
@ -522,6 +522,8 @@ impl Miner {
/// Are we allowed to do a non-mandatory reseal?
fn tx_reseal_allowed(&self) -> bool { Instant::now() > *self.next_allowed_reseal.lock() }
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
#[cfg_attr(feature="dev", allow(redundant_closure))]
fn from_pending_block<H, F, G>(&self, latest_block_number: BlockNumber, from_chain: F, map_block: G) -> H
where F: Fn() -> H, G: Fn(&ClosedBlock) -> H {
let sealing_work = self.sealing_work.lock();
@ -885,7 +887,7 @@ impl MinerService for Miner {
fn pending_receipts(&self, best_block: BlockNumber) -> BTreeMap<H256, Receipt> {
self.from_pending_block(
best_block,
|| BTreeMap::new(),
BTreeMap::new,
|pending| {
let hashes = pending.transactions()
.iter()
@ -1019,7 +1021,7 @@ impl MinerService for Miner {
tx.sender().expect("Transaction is in block, so sender has to be defined.")
})
.collect::<HashSet<Address>>();
for sender in to_remove.into_iter() {
for sender in to_remove {
transaction_queue.remove_all(sender, chain.latest_nonce(&sender));
}
});

View File

@ -446,6 +446,7 @@ pub struct AccountDetails {
const GAS_LIMIT_HYSTERESIS: usize = 10; // (100/GAS_LIMIT_HYSTERESIS) %
/// Describes the strategy used to prioritize transactions in the queue.
#[cfg_attr(feature="dev", allow(enum_variant_names))]
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum PrioritizationStrategy {
/// Use only gas price. Disregards the actual computation cost of the transaction.

View File

@ -87,7 +87,7 @@ impl ClientService {
db_config.set_cache(::db::COL_STATE, size);
}
db_config.compaction = config.db_compaction.compaction_profile(&client_path);
db_config.compaction = config.db_compaction.compaction_profile(client_path);
db_config.wal = config.db_wal;
let pruning = config.pruning;

View File

@ -202,7 +202,7 @@ impl<'a> BlockChunker<'a> {
// cut off the chunk if too large.
if new_loaded_size > PREFERRED_CHUNK_SIZE && self.rlps.len() > 0 {
if new_loaded_size > PREFERRED_CHUNK_SIZE && !self.rlps.is_empty() {
try!(self.write_chunk(last));
loaded_size = pair.len();
} else {

View File

@ -413,7 +413,7 @@ impl Account {
self.code_size = other.code_size;
self.address_hash = other.address_hash;
let mut cache = self.storage_cache.borrow_mut();
for (k, v) in other.storage_cache.into_inner().into_iter() {
for (k, v) in other.storage_cache.into_inner() {
cache.insert(k.clone() , v.clone()); //TODO: cloning should not be required here
}
self.storage_changes = other.storage_changes;

View File

@ -127,11 +127,10 @@ impl AccountEntry {
fn overwrite_with(&mut self, other: AccountEntry) {
self.state = other.state;
match other.account {
Some(acc) => match self.account {
Some(ref mut ours) => {
Some(acc) => {
if let Some(ref mut ours) = self.account {
ours.overwrite_with(acc);
},
None => {},
}
},
None => self.account = None,
}
@ -281,13 +280,10 @@ impl State {
}
},
None => {
match self.cache.get_mut().entry(k) {
Entry::Occupied(e) => {
if e.get().is_dirty() {
e.remove();
}
},
_ => {}
if let Entry::Occupied(e) = self.cache.get_mut().entry(k) {
if e.get().is_dirty() {
e.remove();
}
}
}
}
@ -501,6 +497,7 @@ impl State {
/// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit.
/// `accounts` is mutable because we may need to commit the code or storage and record that.
#[cfg_attr(feature="dev", allow(match_ref_pats))]
#[cfg_attr(feature="dev", allow(needless_borrow))]
fn commit_into(
factories: &Factories,
db: &mut StateDB,
@ -509,17 +506,14 @@ impl State {
) -> Result<(), Error> {
// first, commit the sub trees.
for (address, ref mut a) in accounts.iter_mut().filter(|&(_, ref a)| a.is_dirty()) {
match a.account {
Some(ref mut account) => {
if !account.is_empty() {
db.note_account_bloom(&address);
}
let addr_hash = account.address_hash(address);
let mut account_db = factories.accountdb.create(db.as_hashdb_mut(), addr_hash);
account.commit_storage(&factories.trie, account_db.as_hashdb_mut());
account.commit_code(account_db.as_hashdb_mut());
if let Some(ref mut account) = a.account {
if !account.is_empty() {
db.note_account_bloom(address);
}
_ => {}
let addr_hash = account.address_hash(address);
let mut account_db = factories.accountdb.create(db.as_hashdb_mut(), addr_hash);
account.commit_storage(&factories.trie, account_db.as_hashdb_mut());
account.commit_code(account_db.as_hashdb_mut());
}
}
@ -586,7 +580,7 @@ impl State {
fn query_pod(&mut self, query: &PodState) {
for (address, pod_account) in query.get().into_iter()
.filter(|&(ref a, _)| self.ensure_cached(a, RequireCache::Code, true, |a| a.is_some()))
.filter(|&(a, _)| self.ensure_cached(a, RequireCache::Code, true, |a| a.is_some()))
{
// needs to be split into two parts for the refcell code here
// to work.
@ -679,14 +673,12 @@ impl State {
None => {
let maybe_acc = if self.db.check_account_bloom(a) {
let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
let maybe_acc = match db.get(a) {
match db.get(a) {
Ok(Some(acc)) => AccountEntry::new_clean(Some(Account::from_rlp(&acc))),
Ok(None) => AccountEntry::new_clean(None),
Err(e) => panic!("Potential DB corruption encountered: {}", e),
};
maybe_acc
}
else {
}
} else {
AccountEntry::new_clean(None)
};
self.insert_cache(a, maybe_acc);

View File

@ -170,7 +170,7 @@ impl StateDB {
pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> Result<(), UtilError> {
assert!(journal.hash_functions <= 255);
batch.put(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY, &vec![journal.hash_functions as u8]);
batch.put(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY, &[journal.hash_functions as u8]);
let mut key = [0u8; 8];
let mut val = [0u8; 8];
@ -216,7 +216,7 @@ impl StateDB {
let mut clear = false;
for block in enacted.iter().filter(|h| self.commit_hash.as_ref().map_or(true, |p| *h != p)) {
clear = clear || {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) {
trace!("Reverting enacted block {:?}", block);
m.is_canon = true;
for a in &m.accounts {
@ -232,7 +232,7 @@ impl StateDB {
for block in retracted {
clear = clear || {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) {
trace!("Retracting block {:?}", block);
m.is_canon = false;
for a in &m.accounts {
@ -286,7 +286,7 @@ impl StateDB {
is_canon: is_best,
parent: parent.clone(),
};
let insert_at = cache.modifications.iter().enumerate().find(|&(_, ref m)| m.number < *number).map(|(i, _)| i);
let insert_at = cache.modifications.iter().enumerate().find(|&(_, m)| m.number < *number).map(|(i, _)| i);
trace!("inserting modifications at {:?}", insert_at);
if let Some(insert_at) = insert_at {
cache.modifications.insert(insert_at, block_changes);
@ -369,7 +369,7 @@ impl StateDB {
if !Self::is_allowed(addr, &self.parent_hash, &cache.modifications) {
return None;
}
cache.accounts.get_mut(&addr).map(|a| a.as_ref().map(|a| a.clone_basic()))
cache.accounts.get_mut(addr).map(|a| a.as_ref().map(|a| a.clone_basic()))
}
/// Get value from a cached account.
@ -406,8 +406,7 @@ impl StateDB {
// We search for our parent in that list first and then for
// all its parent until we hit the canonical block,
// checking against all the intermediate modifications.
let mut iter = modifications.iter();
while let Some(ref m) = iter.next() {
for m in modifications {
if &m.hash == parent {
if m.is_canon {
return true;
@ -420,7 +419,7 @@ impl StateDB {
}
}
trace!("Cache lookup skipped for {:?}: parent hash is unknown", addr);
return false;
false
}
}

View File

@ -285,7 +285,7 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
let mut blooms = self.blooms.write();
batch.extend_with_cache(db::COL_TRACE, &mut *blooms, blooms_to_insert, CacheUpdatePolicy::Remove);
// note_used must be called after locking blooms to avoid cache/traces deadlock on garbage collection
for key in blooms_keys.into_iter() {
for key in blooms_keys {
self.note_used(CacheID::Bloom(key));
}
}

View File

@ -50,12 +50,12 @@ fn prefix_subtrace_addresses(mut traces: Vec<FlatTrace>) -> Vec<FlatTrace> {
// [1, 0]
let mut current_subtrace_index = 0;
let mut first = true;
for trace in traces.iter_mut() {
for trace in &mut traces {
match (first, trace.trace_address.is_empty()) {
(true, _) => first = false,
(_, true) => current_subtrace_index += 1,
_ => {}
}
}
trace.trace_address.push_front(current_subtrace_index);
}
traces
@ -78,7 +78,7 @@ fn should_prefix_address_properly() {
let t = vec![vec![], vec![0], vec![0, 0], vec![0], vec![], vec![], vec![0], vec![]].into_iter().map(&f).collect();
let t = prefix_subtrace_addresses(t);
assert_eq!(t, vec![vec![0], vec![0, 0], vec![0, 0, 0], vec![0, 0], vec![1], vec![2], vec![2, 0], vec![3]].into_iter().map(&f).collect::<Vec<_>>());
}
}
impl Tracer for ExecutiveTracer {
fn prepare_trace_call(&self, params: &ActionParams) -> Option<Call> {

View File

@ -10,7 +10,7 @@ rustc-serialize = "0.3"
serde = "0.8"
serde_json = "0.8"
serde_macros = { version = "0.8", optional = true }
clippy = { version = "0.0.90", optional = true}
clippy = { version = "0.0.96", optional = true}
[build-dependencies]
serde_codegen = { version = "0.8", optional = true }

View File

@ -29,7 +29,7 @@ fetch = { path = "../util/fetch" }
rustc-serialize = "0.3"
transient-hashmap = "0.1"
serde_macros = { version = "0.8.0", optional = true }
clippy = { version = "0.0.90", optional = true}
clippy = { version = "0.0.96", optional = true}
json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" }
ethcore-ipc = { path = "../ipc/rpc" }
time = "0.1"

View File

@ -23,7 +23,7 @@ ethcore-rpc = { path = "../rpc" }
ethcore-devtools = { path = "../devtools" }
parity-ui = { path = "../dapps/ui", version = "1.4", optional = true }
clippy = { version = "0.0.90", optional = true}
clippy = { version = "0.0.96", optional = true}
[features]
dev = ["clippy"]

View File

@ -99,6 +99,7 @@ impl<T: TimeProvider> AuthCodes<T> {
}
/// Checks if given hash is correct identifier of `SignerUI`
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
pub fn is_valid(&mut self, hash: &H256, time: u64) -> bool {
let now = self.now.now();
// check time

View File

@ -17,7 +17,7 @@ ethcore-network = { path = "../util/network" }
ethcore-io = { path = "../util/io" }
ethcore = { path = "../ethcore" }
rlp = { path = "../util/rlp" }
clippy = { version = "0.0.90", optional = true}
clippy = { version = "0.0.96", optional = true}
log = "0.3"
env_logger = "0.3"
time = "0.1.34"

View File

@ -114,7 +114,7 @@ impl BlockCollection {
/// Insert a set of headers into collection and advance subchain head pointers.
pub fn insert_headers(&mut self, headers: Vec<Bytes>) {
for h in headers.into_iter() {
for h in headers {
if let Err(e) = self.insert_header(h) {
trace!(target: "sync", "Ignored invalid header: {:?}", e);
}
@ -125,7 +125,7 @@ impl BlockCollection {
/// Insert a collection of block bodies for previously downloaded headers.
pub fn insert_bodies(&mut self, bodies: Vec<Bytes>) -> usize {
let mut inserted = 0;
for b in bodies.into_iter() {
for b in bodies {
if let Err(e) = self.insert_body(b) {
trace!(target: "sync", "Ignored invalid body: {:?}", e);
} else {
@ -141,7 +141,7 @@ impl BlockCollection {
return 0;
}
let mut inserted = 0;
for r in receipts.into_iter() {
for r in receipts {
if let Err(e) = self.insert_receipt(r) {
trace!(target: "sync", "Ignored invalid receipt: {:?}", e);
} else {

View File

@ -209,8 +209,8 @@ pub struct SyncStatus {
impl SyncStatus {
/// Indicates if snapshot download is in progress
pub fn is_snapshot_syncing(&self) -> bool {
self.state == SyncState::SnapshotManifest
|| self.state == SyncState::SnapshotData
self.state == SyncState::SnapshotManifest
|| self.state == SyncState::SnapshotData
|| self.state == SyncState::SnapshotWaiting
}
@ -381,7 +381,7 @@ impl ChainSync {
/// Returns information on peers connections
pub fn peers(&self, io: &SyncIo) -> Vec<PeerInfoDigest> {
self.peers.iter()
.filter_map(|(&peer_id, ref peer_data)|
.filter_map(|(&peer_id, peer_data)|
io.peer_session_info(peer_id).map(|session_info|
PeerInfoDigest {
id: session_info.id.map(|id| id.hex()),
@ -453,7 +453,7 @@ impl ChainSync {
self.init_downloaders(io.chain());
self.reset_and_continue(io);
}
/// Restart sync after bad block has been detected. May end up re-downloading up to QUEUE_SIZE blocks
fn init_downloaders(&mut self, chain: &BlockChainClient) {
// Do not assume that the block queue/chain still has our last_imported_block
@ -1017,7 +1017,7 @@ impl ChainSync {
return;
}
let (peer_latest, peer_difficulty, peer_snapshot_number, peer_snapshot_hash) = {
if let Some(ref peer) = self.peers.get_mut(&peer_id) {
if let Some(peer) = self.peers.get_mut(&peer_id) {
if peer.asking != PeerAsking::Nothing || !peer.can_sync() {
return;
}
@ -1142,6 +1142,7 @@ impl ChainSync {
}
/// Checks if there are blocks fully downloaded that can be imported into the blockchain and does the import.
#[cfg_attr(feature="dev", allow(block_in_if_condition_stmt))]
fn collect_blocks(&mut self, io: &mut SyncIo, block_set: BlockSet) {
match block_set {
BlockSet::NewBlocks => {
@ -1150,9 +1151,9 @@ impl ChainSync {
}
},
BlockSet::OldBlocks => {
if self.old_blocks.as_mut().map_or(false, |downloader| { downloader.collect_blocks(io, false) == Err(DownloaderImportError::Invalid) }) {
self.restart(io);
} else if self.old_blocks.as_ref().map_or(false, |downloader| { downloader.is_complete() }) {
if self.old_blocks.as_mut().map_or(false, |downloader| { downloader.collect_blocks(io, false) == Err(DownloaderImportError::Invalid) }) {
self.restart(io);
} else if self.old_blocks.as_ref().map_or(false, |downloader| { downloader.is_complete() }) {
trace!(target: "sync", "Background block download is complete");
self.old_blocks = None;
}
@ -1242,7 +1243,7 @@ impl ChainSync {
return true;
}
}
return false;
false
}
/// Generic request sender
@ -1370,7 +1371,7 @@ impl ChainSync {
while number <= last && count < max_count {
if let Some(hdr) = overlay.get(&number) {
trace!(target: "sync", "{}: Returning cached fork header", peer_id);
data.extend(hdr);
data.extend_from_slice(hdr);
count += 1;
} else if let Some(mut hdr) = io.chain().block_header(BlockID::Number(number)) {
data.append(&mut hdr);
@ -1707,7 +1708,7 @@ impl ChainSync {
self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp);
}
}
if let Some(ref mut peer) = self.peers.get_mut(&peer_id) {
if let Some(ref mut peer) = self.peers.get_mut(peer_id) {
peer.latest_hash = chain_info.best_block_hash.clone();
}
sent += 1;
@ -1725,7 +1726,7 @@ impl ChainSync {
sent += match ChainSync::create_new_hashes_rlp(io.chain(), &last_parent, &chain_info.best_block_hash) {
Some(rlp) => {
{
if let Some(ref mut peer) = self.peers.get_mut(&peer_id) {
if let Some(ref mut peer) = self.peers.get_mut(peer_id) {
peer.latest_hash = chain_info.best_block_hash.clone();
}
}
@ -1793,7 +1794,7 @@ impl ChainSync {
// Send RLPs
let sent = lucky_peers.len();
if sent > 0 {
for (peer_id, rlp) in lucky_peers.into_iter() {
for (peer_id, rlp) in lucky_peers {
self.send_packet(io, peer_id, TRANSACTIONS_PACKET, rlp);
}

View File

@ -23,7 +23,7 @@ rlp = { path = "rlp" }
heapsize = { version = "0.3", features = ["unstable"] }
itertools = "0.4"
sha3 = { path = "sha3" }
clippy = { version = "0.0.90", optional = true}
clippy = { version = "0.0.96", optional = true}
ethcore-devtools = { path = "../devtools" }
libc = "0.2.7"
vergen = "0.1"

View File

@ -14,7 +14,7 @@ time = "0.1.34"
tiny-keccak = "1.0"
rust-crypto = "0.2.34"
slab = "0.2"
clippy = { version = "0.0.90", optional = true}
clippy = { version = "0.0.96", optional = true}
igd = "0.5.0"
libc = "0.2.7"
parking_lot = "0.3"

View File

@ -78,7 +78,7 @@ impl HashDB for ArchiveDB {
ret.insert(h, 1);
}
for (key, refs) in self.overlay.keys().into_iter() {
for (key, refs) in self.overlay.keys() {
let refs = *ret.get(&key).unwrap_or(&0) + refs;
ret.insert(key, refs);
}
@ -152,7 +152,7 @@ impl JournalDB for ArchiveDB {
let mut inserts = 0usize;
let mut deletes = 0usize;
for i in self.overlay.drain().into_iter() {
for i in self.overlay.drain() {
let (key, (value, rc)) = i;
if rc > 0 {
batch.put(self.column, &key, &value);
@ -164,7 +164,7 @@ impl JournalDB for ArchiveDB {
}
}
for (mut key, value) in self.overlay.drain_aux().into_iter() {
for (mut key, value) in self.overlay.drain_aux() {
key.push(AUX_FLAG);
batch.put(self.column, &key, &value);
}
@ -185,7 +185,7 @@ impl JournalDB for ArchiveDB {
let mut inserts = 0usize;
let mut deletes = 0usize;
for i in self.overlay.drain().into_iter() {
for i in self.overlay.drain() {
let (key, (value, rc)) = i;
if rc > 0 {
if try!(self.backing.get(self.column, &key)).is_some() {
@ -204,7 +204,7 @@ impl JournalDB for ArchiveDB {
}
}
for (mut key, value) in self.overlay.drain_aux().into_iter() {
for (mut key, value) in self.overlay.drain_aux() {
key.push(AUX_FLAG);
batch.put(self.column, &key, &value);
}

View File

@ -63,9 +63,11 @@ enum RemoveFrom {
/// the removals actually take effect.
///
/// journal format:
/// ```
/// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
/// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
/// [era, n] => [ ... ]
/// ```
///
/// When we make a new commit, we make a journal of all blocks in the recent history and record
/// all keys that were inserted and deleted. The journal is ordered by era; multiple commits can
@ -80,6 +82,7 @@ enum RemoveFrom {
/// which includes an original key, if any.
///
/// The semantics of the `counter` are:
/// ```
/// insert key k:
/// counter already contains k: count += 1
/// counter doesn't contain k:
@ -91,9 +94,11 @@ enum RemoveFrom {
/// count == 1: remove counter
/// count == 0: remove key from backing db
/// counter doesn't contain k: remove key from backing db
/// ```
///
/// Practically, this means that for each commit block turning from recent to ancient we do the
/// following:
/// ```
/// is_canonical:
/// inserts: Ignored (left alone in the backing database).
/// deletes: Enacted; however, recent history queue is checked for ongoing references. This is
@ -102,8 +107,9 @@ enum RemoveFrom {
/// inserts: Reverted; however, recent history queue is checked for ongoing references. This is
/// reduced as a preference to deletion from the backing database.
/// deletes: Ignored (they were never inserted).
/// ```
///
/// TODO: store_reclaim_period
/// TODO: `store_reclaim_period`
pub struct EarlyMergeDB {
overlay: MemoryDB,
backing: Arc<Database>,
@ -310,7 +316,7 @@ impl HashDB for EarlyMergeDB {
ret.insert(h, 1);
}
for (key, refs) in self.overlay.keys().into_iter() {
for (key, refs) in self.overlay.keys() {
let refs = *ret.get(&key).unwrap_or(&0) + refs;
ret.insert(key, refs);
}

View File

@ -379,7 +379,7 @@ impl HashDB for OverlayRecentDB {
ret.insert(h, 1);
}
for (key, refs) in self.transaction_overlay.keys().into_iter() {
for (key, refs) in self.transaction_overlay.keys() {
let refs = *ret.get(&key).unwrap_or(&0) + refs;
ret.insert(key, refs);
}

View File

@ -36,12 +36,14 @@ use std::env;
/// the removals actually take effect.
///
/// journal format:
/// ```
/// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
/// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
/// [era, n] => [ ... ]
/// ```
///
/// when we make a new commit, we journal the inserts and removes.
/// for each end_era that we journaled that we are no passing by,
/// for each `end_era` that we journaled that we are no passing by,
/// we remove all of its removes assuming it is canonical and all
/// of its inserts otherwise.
// TODO: store last_era, reclaim_period.

View File

@ -231,7 +231,7 @@ impl Manager {
trace!(target: "migration", "Total migrations to execute for version {}: {}", version, migrations.len());
if migrations.is_empty() { return Err(Error::MigrationImpossible) };
let columns = migrations.iter().nth(0).and_then(|m| m.pre_columns());
let columns = migrations.get(0).and_then(|m| m.pre_columns());
trace!(target: "migration", "Expecting database to contain {:?} columns", columns);
let mut db_config = DatabaseConfig {

View File

@ -66,7 +66,7 @@ impl OverlayDB {
pub fn commit_to_batch(&mut self, batch: &mut DBTransaction) -> Result<u32, UtilError> {
let mut ret = 0u32;
let mut deletes = 0usize;
for i in self.overlay.drain().into_iter() {
for i in self.overlay.drain() {
let (key, (value, rc)) = i;
if rc != 0 {
match self.payload(&key) {
@ -133,7 +133,7 @@ impl HashDB for OverlayDB {
ret.insert(h, r as i32);
}
for (key, refs) in self.overlay.keys().into_iter() {
for (key, refs) in self.overlay.keys() {
let refs = *ret.get(&key).unwrap_or(&0) + refs;
ret.insert(key, refs);
}

View File

@ -84,7 +84,7 @@ impl Journal {
pub fn apply(self, db: &mut HashDB) -> Score {
trace!("applying {:?} changes", self.0.len());
let mut ret = Score{inserts: 0, removes: 0};
for d in self.0.into_iter() {
for d in self.0 {
match d {
Operation::Delete(h) => {
trace!("TrieDBMut::apply --- {:?}", &h);

View File

@ -87,7 +87,7 @@ impl<'db> TrieDB<'db> {
/// Convert a vector of hashes to a hashmap of hash to occurrences.
pub fn to_map(hashes: Vec<H256>) -> HashMap<H256, u32> {
let mut r: HashMap<H256, u32> = HashMap::new();
for h in hashes.into_iter() {
for h in hashes {
*r.entry(h).or_insert(0) += 1;
}
r
@ -97,7 +97,7 @@ impl<'db> TrieDB<'db> {
/// trie.
pub fn db_items_remaining(&self) -> super::Result<HashMap<H256, i32>> {
let mut ret = self.db.keys();
for (k, v) in Self::to_map(try!(self.keys())).into_iter() {
for (k, v) in Self::to_map(try!(self.keys())) {
let keycount = *ret.get(&k).unwrap_or(&0);
match keycount <= v as i32 {
true => ret.remove(&k),