Clippy bump (#2877)

* Bumping clippy

* Fixing warnings

* Fix the "fix"
This commit is contained in:
Tomasz Drwięga 2016-10-27 08:28:12 +02:00 committed by Gav Wood
parent 9bfb8094cc
commit 88997801d0
39 changed files with 117 additions and 112 deletions

24
Cargo.lock generated
View File

@ -3,7 +3,7 @@ name = "parity"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
"ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)", "ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)",
"daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)",
@ -145,15 +145,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "clippy" name = "clippy"
version = "0.0.90" version = "0.0.96"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "clippy_lints 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
name = "clippy_lints" name = "clippy_lints"
version = "0.0.90" version = "0.0.96"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -279,7 +279,7 @@ dependencies = [
"bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.4.0", "ethash 1.4.0",
@ -330,7 +330,7 @@ dependencies = [
name = "ethcore-dapps" name = "ethcore-dapps"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-devtools 1.4.0", "ethcore-devtools 1.4.0",
@ -473,7 +473,7 @@ dependencies = [
name = "ethcore-rpc" name = "ethcore-rpc"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.4.0", "ethash 1.4.0",
"ethcore 1.4.0", "ethcore 1.4.0",
"ethcore-devtools 1.4.0", "ethcore-devtools 1.4.0",
@ -503,7 +503,7 @@ dependencies = [
name = "ethcore-signer" name = "ethcore-signer"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-devtools 1.4.0", "ethcore-devtools 1.4.0",
"ethcore-io 1.4.0", "ethcore-io 1.4.0",
@ -542,7 +542,7 @@ version = "1.4.0"
dependencies = [ dependencies = [
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
"elastic-array 0.6.0 (git+https://github.com/ethcore/elastic-array)", "elastic-array 0.6.0 (git+https://github.com/ethcore/elastic-array)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)",
@ -631,7 +631,7 @@ dependencies = [
name = "ethsync" name = "ethsync"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore 1.4.0", "ethcore 1.4.0",
"ethcore-io 1.4.0", "ethcore-io 1.4.0",
@ -1948,8 +1948,8 @@ dependencies = [
"checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27" "checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27"
"checksum bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)" = "<none>" "checksum bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)" = "<none>"
"checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c" "checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
"checksum clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "d19bda68c3db98e3a780342f6101b44312fef20a5f13ce756d1202a35922b01b" "checksum clippy 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)" = "6eacf01b0aad84a0817703498f72d252df7c0faf6a5b86d0be4265f1829e459f"
"checksum clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "3d4ed67c69b9bb35169be2538691d290a3aa0cbfd4b9f0bfb7c221fc1d399a96" "checksum clippy_lints 0.0.96 (registry+https://github.com/rust-lang/crates.io-index)" = "a49960c9aab544ce86b004dcb61620e8b898fea5fc0f697a028f460f48221ed6"
"checksum cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90266f45846f14a1e986c77d1e9c2626b8c342ed806fe60241ec38cc8697b245" "checksum cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90266f45846f14a1e986c77d1e9c2626b8c342ed806fe60241ec38cc8697b245"
"checksum crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "fb974f835e90390c5f9dfac00f05b06dc117299f5ea4e85fbc7bb443af4911cc" "checksum crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "fb974f835e90390c5f9dfac00f05b06dc117299f5ea4e85fbc7bb443af4911cc"
"checksum ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)" = "<none>" "checksum ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)" = "<none>"

View File

@ -46,7 +46,7 @@ ethcore-logger = { path = "logger" }
rlp = { path = "util/rlp" } rlp = { path = "util/rlp" }
ethcore-stratum = { path = "stratum" } ethcore-stratum = { path = "stratum" }
ethcore-dapps = { path = "dapps", optional = true } ethcore-dapps = { path = "dapps", optional = true }
clippy = { version = "0.0.90", optional = true} clippy = { version = "0.0.96", optional = true}
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
winapi = "0.2" winapi = "0.2"

View File

@ -33,7 +33,7 @@ fetch = { path = "../util/fetch" }
parity-ui = { path = "./ui" } parity-ui = { path = "./ui" }
mime_guess = { version = "1.6.1" } mime_guess = { version = "1.6.1" }
clippy = { version = "0.0.90", optional = true} clippy = { version = "0.0.96", optional = true}
[build-dependencies] [build-dependencies]
serde_codegen = { version = "0.8", optional = true } serde_codegen = { version = "0.8", optional = true }

View File

@ -11,7 +11,7 @@ build = "build.rs"
ethcore-ipc-codegen = { path = "../ipc/codegen" } ethcore-ipc-codegen = { path = "../ipc/codegen" }
[dependencies] [dependencies]
clippy = { version = "0.0.90", optional = true} clippy = { version = "0.0.96", optional = true}
ethcore-devtools = { path = "../devtools" } ethcore-devtools = { path = "../devtools" }
ethcore-ipc = { path = "../ipc/rpc" } ethcore-ipc = { path = "../ipc/rpc" }
rocksdb = { git = "https://github.com/ethcore/rust-rocksdb" } rocksdb = { git = "https://github.com/ethcore/rust-rocksdb" }

View File

@ -25,7 +25,7 @@ semver = "0.2"
bit-set = "0.4" bit-set = "0.4"
time = "0.1" time = "0.1"
evmjit = { path = "../evmjit", optional = true } evmjit = { path = "../evmjit", optional = true }
clippy = { version = "0.0.90", optional = true} clippy = { version = "0.0.96", optional = true}
ethash = { path = "../ethash" } ethash = { path = "../ethash" }
ethcore-util = { path = "../util" } ethcore-util = { path = "../util" }
ethcore-io = { path = "../util/io" } ethcore-io = { path = "../util/io" }

View File

@ -267,7 +267,7 @@ impl AccountProvider {
/// Returns `true` if the password for `account` is `password`. `false` if not. /// Returns `true` if the password for `account` is `password`. `false` if not.
pub fn test_password(&self, account: &Address, password: String) -> Result<bool, Error> { pub fn test_password(&self, account: &Address, password: String) -> Result<bool, Error> {
match self.sstore.sign(&account, &password, &Default::default()) { match self.sstore.sign(account, &password, &Default::default()) {
Ok(_) => Ok(true), Ok(_) => Ok(true),
Err(SSError::InvalidPassword) => Ok(false), Err(SSError::InvalidPassword) => Ok(false),
Err(e) => Err(Error::SStore(e)), Err(e) => Err(Error::SStore(e)),
@ -276,7 +276,7 @@ impl AccountProvider {
/// Changes the password of `account` from `password` to `new_password`. Fails if incorrect `password` given. /// Changes the password of `account` from `password` to `new_password`. Fails if incorrect `password` given.
pub fn change_password(&self, account: &Address, password: String, new_password: String) -> Result<(), Error> { pub fn change_password(&self, account: &Address, password: String, new_password: String) -> Result<(), Error> {
self.sstore.change_password(&account, &password, &new_password).map_err(Error::SStore) self.sstore.change_password(account, &password, &new_password).map_err(Error::SStore)
} }
/// Helper method used for unlocking accounts. /// Helper method used for unlocking accounts.

View File

@ -542,7 +542,7 @@ pub fn enact(
Ok(b.close_and_lock()) Ok(b.close_and_lock())
} }
#[inline(always)] #[inline]
#[cfg(not(feature = "slow-blocks"))] #[cfg(not(feature = "slow-blocks"))]
fn push_transactions(block: &mut OpenBlock, transactions: &[SignedTransaction]) -> Result<(), Error> { fn push_transactions(block: &mut OpenBlock, transactions: &[SignedTransaction]) -> Result<(), Error> {
for t in transactions { for t in transactions {

View File

@ -414,6 +414,7 @@ impl<'a> Iterator for AncestryIter<'a> {
} }
impl BlockChain { impl BlockChain {
#[cfg_attr(feature="dev", allow(useless_let_if_seq))]
/// Create new instance of blockchain from given Genesis /// Create new instance of blockchain from given Genesis
pub fn new(config: Config, genesis: &[u8], db: Arc<Database>) -> BlockChain { pub fn new(config: Config, genesis: &[u8], db: Arc<Database>) -> BlockChain {
// 400 is the avarage size of the key // 400 is the avarage size of the key
@ -565,7 +566,7 @@ impl BlockChain {
let range = extras.number as bc::Number .. extras.number as bc::Number; let range = extras.number as bc::Number .. extras.number as bc::Number;
let chain = bc::group::BloomGroupChain::new(self.blooms_config, self); let chain = bc::group::BloomGroupChain::new(self.blooms_config, self);
let changes = chain.replace(&range, vec![]); let changes = chain.replace(&range, vec![]);
for (k, v) in changes.into_iter() { for (k, v) in changes {
batch.write(db::COL_EXTRA, &LogGroupPosition::from(k), &BloomGroup::from(v)); batch.write(db::COL_EXTRA, &LogGroupPosition::from(k), &BloomGroup::from(v));
} }
batch.put(db::COL_EXTRA, b"best", &hash); batch.put(db::COL_EXTRA, b"best", &hash);

View File

@ -66,7 +66,7 @@ impl<T> CacheManager<T> where T: Eq + Hash {
} }
fn rotate_cache_if_needed(&mut self) { fn rotate_cache_if_needed(&mut self) {
if self.cache_usage.len() == 0 { return } if self.cache_usage.is_empty() { return }
if self.cache_usage[0].len() * self.bytes_per_cache_entry > self.pref_cache_size / COLLECTION_QUEUE_SIZE { if self.cache_usage[0].len() * self.bytes_per_cache_entry > self.pref_cache_size / COLLECTION_QUEUE_SIZE {
if let Some(cache) = self.cache_usage.pop_back() { if let Some(cache) = self.cache_usage.pop_back() {

View File

@ -314,7 +314,7 @@ impl Client {
if let Some(parent) = chain_has_parent { if let Some(parent) = chain_has_parent {
// Enact Verified Block // Enact Verified Block
let last_hashes = self.build_last_hashes(header.parent_hash().clone()); let last_hashes = self.build_last_hashes(header.parent_hash().clone());
let db = self.state_db.lock().boxed_clone_canon(&header.parent_hash()); let db = self.state_db.lock().boxed_clone_canon(header.parent_hash());
let enact_result = enact_verified(block, engine, self.tracedb.read().tracing_enabled(), db, &parent, last_hashes, self.factories.clone()); let enact_result = enact_verified(block, engine, self.tracedb.read().tracing_enabled(), db, &parent, last_hashes, self.factories.clone());
let locked_block = try!(enact_result.map_err(|e| { let locked_block = try!(enact_result.map_err(|e| {

View File

@ -114,7 +114,7 @@ pub trait Writable {
R: Deref<Target = [u8]> { R: Deref<Target = [u8]> {
match policy { match policy {
CacheUpdatePolicy::Overwrite => { CacheUpdatePolicy::Overwrite => {
for (key, value) in values.into_iter() { for (key, value) in values {
self.write(col, &key, &value); self.write(col, &key, &value);
cache.insert(key, value); cache.insert(key, value);
} }
@ -135,7 +135,7 @@ pub trait Writable {
R: Deref<Target = [u8]> { R: Deref<Target = [u8]> {
match policy { match policy {
CacheUpdatePolicy::Overwrite => { CacheUpdatePolicy::Overwrite => {
for (key, value) in values.into_iter() { for (key, value) in values {
match value { match value {
Some(ref v) => self.write(col, &key, v), Some(ref v) => self.write(col, &key, v),
None => self.delete(col, &key), None => self.delete(col, &key),
@ -144,7 +144,7 @@ pub trait Writable {
} }
}, },
CacheUpdatePolicy::Remove => { CacheUpdatePolicy::Remove => {
for (key, value) in values.into_iter() { for (key, value) in values {
match value { match value {
Some(v) => self.write(col, &key, &v), Some(v) => self.write(col, &key, &v),
None => self.delete(col, &key), None => self.delete(col, &key),

View File

@ -54,14 +54,14 @@ const TWO_POW_248: U256 = U256([0, 0, 0, 0x100000000000000]); //0x1 00000000 000
/// Abstraction over raw vector of Bytes. Easier state management of PC. /// Abstraction over raw vector of Bytes. Easier state management of PC.
struct CodeReader<'a> { struct CodeReader<'a> {
position: ProgramCounter, position: ProgramCounter,
code: &'a Bytes code: &'a [u8]
} }
#[cfg_attr(feature="dev", allow(len_without_is_empty))] #[cfg_attr(feature="dev", allow(len_without_is_empty))]
impl<'a> CodeReader<'a> { impl<'a> CodeReader<'a> {
/// Create new code reader - starting at position 0. /// Create new code reader - starting at position 0.
fn new(code: &'a Bytes) -> Self { fn new(code: &'a [u8]) -> Self {
CodeReader { CodeReader {
position: 0, position: 0,
code: code, code: code,

View File

@ -61,7 +61,7 @@ pub fn generate_bloom(source: Arc<Database>, dest: &mut Database) -> Result<(),
let account_trie = try!(TrieDB::new(state_db.as_hashdb(), &state_root).map_err(|e| Error::Custom(format!("Cannot open trie: {:?}", e)))); let account_trie = try!(TrieDB::new(state_db.as_hashdb(), &state_root).map_err(|e| Error::Custom(format!("Cannot open trie: {:?}", e))));
for item in try!(account_trie.iter().map_err(|_| Error::MigrationImpossible)) { for item in try!(account_trie.iter().map_err(|_| Error::MigrationImpossible)) {
let (ref account_key, _) = try!(item.map_err(|_| Error::MigrationImpossible)); let (ref account_key, _) = try!(item.map_err(|_| Error::MigrationImpossible));
let account_key_hash = H256::from_slice(&account_key); let account_key_hash = H256::from_slice(account_key);
bloom.set(&*account_key_hash); bloom.set(&*account_key_hash);
} }

View File

@ -362,7 +362,7 @@ impl Miner {
{ {
let mut queue = self.transaction_queue.lock(); let mut queue = self.transaction_queue.lock();
for hash in invalid_transactions.into_iter() { for hash in invalid_transactions {
queue.remove_invalid(&hash, &fetch_account); queue.remove_invalid(&hash, &fetch_account);
} }
for hash in transactions_to_penalize { for hash in transactions_to_penalize {
@ -522,6 +522,8 @@ impl Miner {
/// Are we allowed to do a non-mandatory reseal? /// Are we allowed to do a non-mandatory reseal?
fn tx_reseal_allowed(&self) -> bool { Instant::now() > *self.next_allowed_reseal.lock() } fn tx_reseal_allowed(&self) -> bool { Instant::now() > *self.next_allowed_reseal.lock() }
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
#[cfg_attr(feature="dev", allow(redundant_closure))]
fn from_pending_block<H, F, G>(&self, latest_block_number: BlockNumber, from_chain: F, map_block: G) -> H fn from_pending_block<H, F, G>(&self, latest_block_number: BlockNumber, from_chain: F, map_block: G) -> H
where F: Fn() -> H, G: Fn(&ClosedBlock) -> H { where F: Fn() -> H, G: Fn(&ClosedBlock) -> H {
let sealing_work = self.sealing_work.lock(); let sealing_work = self.sealing_work.lock();
@ -885,7 +887,7 @@ impl MinerService for Miner {
fn pending_receipts(&self, best_block: BlockNumber) -> BTreeMap<H256, Receipt> { fn pending_receipts(&self, best_block: BlockNumber) -> BTreeMap<H256, Receipt> {
self.from_pending_block( self.from_pending_block(
best_block, best_block,
|| BTreeMap::new(), BTreeMap::new,
|pending| { |pending| {
let hashes = pending.transactions() let hashes = pending.transactions()
.iter() .iter()
@ -1019,7 +1021,7 @@ impl MinerService for Miner {
tx.sender().expect("Transaction is in block, so sender has to be defined.") tx.sender().expect("Transaction is in block, so sender has to be defined.")
}) })
.collect::<HashSet<Address>>(); .collect::<HashSet<Address>>();
for sender in to_remove.into_iter() { for sender in to_remove {
transaction_queue.remove_all(sender, chain.latest_nonce(&sender)); transaction_queue.remove_all(sender, chain.latest_nonce(&sender));
} }
}); });

View File

@ -446,6 +446,7 @@ pub struct AccountDetails {
const GAS_LIMIT_HYSTERESIS: usize = 10; // (100/GAS_LIMIT_HYSTERESIS) % const GAS_LIMIT_HYSTERESIS: usize = 10; // (100/GAS_LIMIT_HYSTERESIS) %
/// Describes the strategy used to prioritize transactions in the queue. /// Describes the strategy used to prioritize transactions in the queue.
#[cfg_attr(feature="dev", allow(enum_variant_names))]
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum PrioritizationStrategy { pub enum PrioritizationStrategy {
/// Use only gas price. Disregards the actual computation cost of the transaction. /// Use only gas price. Disregards the actual computation cost of the transaction.

View File

@ -87,7 +87,7 @@ impl ClientService {
db_config.set_cache(::db::COL_STATE, size); db_config.set_cache(::db::COL_STATE, size);
} }
db_config.compaction = config.db_compaction.compaction_profile(&client_path); db_config.compaction = config.db_compaction.compaction_profile(client_path);
db_config.wal = config.db_wal; db_config.wal = config.db_wal;
let pruning = config.pruning; let pruning = config.pruning;

View File

@ -202,7 +202,7 @@ impl<'a> BlockChunker<'a> {
// cut off the chunk if too large. // cut off the chunk if too large.
if new_loaded_size > PREFERRED_CHUNK_SIZE && self.rlps.len() > 0 { if new_loaded_size > PREFERRED_CHUNK_SIZE && !self.rlps.is_empty() {
try!(self.write_chunk(last)); try!(self.write_chunk(last));
loaded_size = pair.len(); loaded_size = pair.len();
} else { } else {

View File

@ -413,7 +413,7 @@ impl Account {
self.code_size = other.code_size; self.code_size = other.code_size;
self.address_hash = other.address_hash; self.address_hash = other.address_hash;
let mut cache = self.storage_cache.borrow_mut(); let mut cache = self.storage_cache.borrow_mut();
for (k, v) in other.storage_cache.into_inner().into_iter() { for (k, v) in other.storage_cache.into_inner() {
cache.insert(k.clone() , v.clone()); //TODO: cloning should not be required here cache.insert(k.clone() , v.clone()); //TODO: cloning should not be required here
} }
self.storage_changes = other.storage_changes; self.storage_changes = other.storage_changes;

View File

@ -127,11 +127,10 @@ impl AccountEntry {
fn overwrite_with(&mut self, other: AccountEntry) { fn overwrite_with(&mut self, other: AccountEntry) {
self.state = other.state; self.state = other.state;
match other.account { match other.account {
Some(acc) => match self.account { Some(acc) => {
Some(ref mut ours) => { if let Some(ref mut ours) = self.account {
ours.overwrite_with(acc); ours.overwrite_with(acc);
}, }
None => {},
}, },
None => self.account = None, None => self.account = None,
} }
@ -281,13 +280,10 @@ impl State {
} }
}, },
None => { None => {
match self.cache.get_mut().entry(k) { if let Entry::Occupied(e) = self.cache.get_mut().entry(k) {
Entry::Occupied(e) => { if e.get().is_dirty() {
if e.get().is_dirty() { e.remove();
e.remove(); }
}
},
_ => {}
} }
} }
} }
@ -501,6 +497,7 @@ impl State {
/// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit. /// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit.
/// `accounts` is mutable because we may need to commit the code or storage and record that. /// `accounts` is mutable because we may need to commit the code or storage and record that.
#[cfg_attr(feature="dev", allow(match_ref_pats))] #[cfg_attr(feature="dev", allow(match_ref_pats))]
#[cfg_attr(feature="dev", allow(needless_borrow))]
fn commit_into( fn commit_into(
factories: &Factories, factories: &Factories,
db: &mut StateDB, db: &mut StateDB,
@ -509,17 +506,14 @@ impl State {
) -> Result<(), Error> { ) -> Result<(), Error> {
// first, commit the sub trees. // first, commit the sub trees.
for (address, ref mut a) in accounts.iter_mut().filter(|&(_, ref a)| a.is_dirty()) { for (address, ref mut a) in accounts.iter_mut().filter(|&(_, ref a)| a.is_dirty()) {
match a.account { if let Some(ref mut account) = a.account {
Some(ref mut account) => { if !account.is_empty() {
if !account.is_empty() { db.note_account_bloom(address);
db.note_account_bloom(&address);
}
let addr_hash = account.address_hash(address);
let mut account_db = factories.accountdb.create(db.as_hashdb_mut(), addr_hash);
account.commit_storage(&factories.trie, account_db.as_hashdb_mut());
account.commit_code(account_db.as_hashdb_mut());
} }
_ => {} let addr_hash = account.address_hash(address);
let mut account_db = factories.accountdb.create(db.as_hashdb_mut(), addr_hash);
account.commit_storage(&factories.trie, account_db.as_hashdb_mut());
account.commit_code(account_db.as_hashdb_mut());
} }
} }
@ -586,7 +580,7 @@ impl State {
fn query_pod(&mut self, query: &PodState) { fn query_pod(&mut self, query: &PodState) {
for (address, pod_account) in query.get().into_iter() for (address, pod_account) in query.get().into_iter()
.filter(|&(ref a, _)| self.ensure_cached(a, RequireCache::Code, true, |a| a.is_some())) .filter(|&(a, _)| self.ensure_cached(a, RequireCache::Code, true, |a| a.is_some()))
{ {
// needs to be split into two parts for the refcell code here // needs to be split into two parts for the refcell code here
// to work. // to work.
@ -679,14 +673,12 @@ impl State {
None => { None => {
let maybe_acc = if self.db.check_account_bloom(a) { let maybe_acc = if self.db.check_account_bloom(a) {
let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
let maybe_acc = match db.get(a) { match db.get(a) {
Ok(Some(acc)) => AccountEntry::new_clean(Some(Account::from_rlp(&acc))), Ok(Some(acc)) => AccountEntry::new_clean(Some(Account::from_rlp(&acc))),
Ok(None) => AccountEntry::new_clean(None), Ok(None) => AccountEntry::new_clean(None),
Err(e) => panic!("Potential DB corruption encountered: {}", e), Err(e) => panic!("Potential DB corruption encountered: {}", e),
}; }
maybe_acc } else {
}
else {
AccountEntry::new_clean(None) AccountEntry::new_clean(None)
}; };
self.insert_cache(a, maybe_acc); self.insert_cache(a, maybe_acc);

View File

@ -170,7 +170,7 @@ impl StateDB {
pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> Result<(), UtilError> { pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> Result<(), UtilError> {
assert!(journal.hash_functions <= 255); assert!(journal.hash_functions <= 255);
batch.put(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY, &vec![journal.hash_functions as u8]); batch.put(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY, &[journal.hash_functions as u8]);
let mut key = [0u8; 8]; let mut key = [0u8; 8];
let mut val = [0u8; 8]; let mut val = [0u8; 8];
@ -216,7 +216,7 @@ impl StateDB {
let mut clear = false; let mut clear = false;
for block in enacted.iter().filter(|h| self.commit_hash.as_ref().map_or(true, |p| *h != p)) { for block in enacted.iter().filter(|h| self.commit_hash.as_ref().map_or(true, |p| *h != p)) {
clear = clear || { clear = clear || {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) { if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) {
trace!("Reverting enacted block {:?}", block); trace!("Reverting enacted block {:?}", block);
m.is_canon = true; m.is_canon = true;
for a in &m.accounts { for a in &m.accounts {
@ -232,7 +232,7 @@ impl StateDB {
for block in retracted { for block in retracted {
clear = clear || { clear = clear || {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) { if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) {
trace!("Retracting block {:?}", block); trace!("Retracting block {:?}", block);
m.is_canon = false; m.is_canon = false;
for a in &m.accounts { for a in &m.accounts {
@ -286,7 +286,7 @@ impl StateDB {
is_canon: is_best, is_canon: is_best,
parent: parent.clone(), parent: parent.clone(),
}; };
let insert_at = cache.modifications.iter().enumerate().find(|&(_, ref m)| m.number < *number).map(|(i, _)| i); let insert_at = cache.modifications.iter().enumerate().find(|&(_, m)| m.number < *number).map(|(i, _)| i);
trace!("inserting modifications at {:?}", insert_at); trace!("inserting modifications at {:?}", insert_at);
if let Some(insert_at) = insert_at { if let Some(insert_at) = insert_at {
cache.modifications.insert(insert_at, block_changes); cache.modifications.insert(insert_at, block_changes);
@ -369,7 +369,7 @@ impl StateDB {
if !Self::is_allowed(addr, &self.parent_hash, &cache.modifications) { if !Self::is_allowed(addr, &self.parent_hash, &cache.modifications) {
return None; return None;
} }
cache.accounts.get_mut(&addr).map(|a| a.as_ref().map(|a| a.clone_basic())) cache.accounts.get_mut(addr).map(|a| a.as_ref().map(|a| a.clone_basic()))
} }
/// Get value from a cached account. /// Get value from a cached account.
@ -406,8 +406,7 @@ impl StateDB {
// We search for our parent in that list first and then for // We search for our parent in that list first and then for
// all its parent until we hit the canonical block, // all its parent until we hit the canonical block,
// checking against all the intermediate modifications. // checking against all the intermediate modifications.
let mut iter = modifications.iter(); for m in modifications {
while let Some(ref m) = iter.next() {
if &m.hash == parent { if &m.hash == parent {
if m.is_canon { if m.is_canon {
return true; return true;
@ -420,7 +419,7 @@ impl StateDB {
} }
} }
trace!("Cache lookup skipped for {:?}: parent hash is unknown", addr); trace!("Cache lookup skipped for {:?}: parent hash is unknown", addr);
return false; false
} }
} }

View File

@ -285,7 +285,7 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
let mut blooms = self.blooms.write(); let mut blooms = self.blooms.write();
batch.extend_with_cache(db::COL_TRACE, &mut *blooms, blooms_to_insert, CacheUpdatePolicy::Remove); batch.extend_with_cache(db::COL_TRACE, &mut *blooms, blooms_to_insert, CacheUpdatePolicy::Remove);
// note_used must be called after locking blooms to avoid cache/traces deadlock on garbage collection // note_used must be called after locking blooms to avoid cache/traces deadlock on garbage collection
for key in blooms_keys.into_iter() { for key in blooms_keys {
self.note_used(CacheID::Bloom(key)); self.note_used(CacheID::Bloom(key));
} }
} }

View File

@ -50,7 +50,7 @@ fn prefix_subtrace_addresses(mut traces: Vec<FlatTrace>) -> Vec<FlatTrace> {
// [1, 0] // [1, 0]
let mut current_subtrace_index = 0; let mut current_subtrace_index = 0;
let mut first = true; let mut first = true;
for trace in traces.iter_mut() { for trace in &mut traces {
match (first, trace.trace_address.is_empty()) { match (first, trace.trace_address.is_empty()) {
(true, _) => first = false, (true, _) => first = false,
(_, true) => current_subtrace_index += 1, (_, true) => current_subtrace_index += 1,

View File

@ -10,7 +10,7 @@ rustc-serialize = "0.3"
serde = "0.8" serde = "0.8"
serde_json = "0.8" serde_json = "0.8"
serde_macros = { version = "0.8", optional = true } serde_macros = { version = "0.8", optional = true }
clippy = { version = "0.0.90", optional = true} clippy = { version = "0.0.96", optional = true}
[build-dependencies] [build-dependencies]
serde_codegen = { version = "0.8", optional = true } serde_codegen = { version = "0.8", optional = true }

View File

@ -29,7 +29,7 @@ fetch = { path = "../util/fetch" }
rustc-serialize = "0.3" rustc-serialize = "0.3"
transient-hashmap = "0.1" transient-hashmap = "0.1"
serde_macros = { version = "0.8.0", optional = true } serde_macros = { version = "0.8.0", optional = true }
clippy = { version = "0.0.90", optional = true} clippy = { version = "0.0.96", optional = true}
json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" } json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" }
ethcore-ipc = { path = "../ipc/rpc" } ethcore-ipc = { path = "../ipc/rpc" }
time = "0.1" time = "0.1"

View File

@ -23,7 +23,7 @@ ethcore-rpc = { path = "../rpc" }
ethcore-devtools = { path = "../devtools" } ethcore-devtools = { path = "../devtools" }
parity-ui = { path = "../dapps/ui", version = "1.4", optional = true } parity-ui = { path = "../dapps/ui", version = "1.4", optional = true }
clippy = { version = "0.0.90", optional = true} clippy = { version = "0.0.96", optional = true}
[features] [features]
dev = ["clippy"] dev = ["clippy"]

View File

@ -99,6 +99,7 @@ impl<T: TimeProvider> AuthCodes<T> {
} }
/// Checks if given hash is correct identifier of `SignerUI` /// Checks if given hash is correct identifier of `SignerUI`
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
pub fn is_valid(&mut self, hash: &H256, time: u64) -> bool { pub fn is_valid(&mut self, hash: &H256, time: u64) -> bool {
let now = self.now.now(); let now = self.now.now();
// check time // check time

View File

@ -17,7 +17,7 @@ ethcore-network = { path = "../util/network" }
ethcore-io = { path = "../util/io" } ethcore-io = { path = "../util/io" }
ethcore = { path = "../ethcore" } ethcore = { path = "../ethcore" }
rlp = { path = "../util/rlp" } rlp = { path = "../util/rlp" }
clippy = { version = "0.0.90", optional = true} clippy = { version = "0.0.96", optional = true}
log = "0.3" log = "0.3"
env_logger = "0.3" env_logger = "0.3"
time = "0.1.34" time = "0.1.34"

View File

@ -114,7 +114,7 @@ impl BlockCollection {
/// Insert a set of headers into collection and advance subchain head pointers. /// Insert a set of headers into collection and advance subchain head pointers.
pub fn insert_headers(&mut self, headers: Vec<Bytes>) { pub fn insert_headers(&mut self, headers: Vec<Bytes>) {
for h in headers.into_iter() { for h in headers {
if let Err(e) = self.insert_header(h) { if let Err(e) = self.insert_header(h) {
trace!(target: "sync", "Ignored invalid header: {:?}", e); trace!(target: "sync", "Ignored invalid header: {:?}", e);
} }
@ -125,7 +125,7 @@ impl BlockCollection {
/// Insert a collection of block bodies for previously downloaded headers. /// Insert a collection of block bodies for previously downloaded headers.
pub fn insert_bodies(&mut self, bodies: Vec<Bytes>) -> usize { pub fn insert_bodies(&mut self, bodies: Vec<Bytes>) -> usize {
let mut inserted = 0; let mut inserted = 0;
for b in bodies.into_iter() { for b in bodies {
if let Err(e) = self.insert_body(b) { if let Err(e) = self.insert_body(b) {
trace!(target: "sync", "Ignored invalid body: {:?}", e); trace!(target: "sync", "Ignored invalid body: {:?}", e);
} else { } else {
@ -141,7 +141,7 @@ impl BlockCollection {
return 0; return 0;
} }
let mut inserted = 0; let mut inserted = 0;
for r in receipts.into_iter() { for r in receipts {
if let Err(e) = self.insert_receipt(r) { if let Err(e) = self.insert_receipt(r) {
trace!(target: "sync", "Ignored invalid receipt: {:?}", e); trace!(target: "sync", "Ignored invalid receipt: {:?}", e);
} else { } else {

View File

@ -381,7 +381,7 @@ impl ChainSync {
/// Returns information on peers connections /// Returns information on peers connections
pub fn peers(&self, io: &SyncIo) -> Vec<PeerInfoDigest> { pub fn peers(&self, io: &SyncIo) -> Vec<PeerInfoDigest> {
self.peers.iter() self.peers.iter()
.filter_map(|(&peer_id, ref peer_data)| .filter_map(|(&peer_id, peer_data)|
io.peer_session_info(peer_id).map(|session_info| io.peer_session_info(peer_id).map(|session_info|
PeerInfoDigest { PeerInfoDigest {
id: session_info.id.map(|id| id.hex()), id: session_info.id.map(|id| id.hex()),
@ -1017,7 +1017,7 @@ impl ChainSync {
return; return;
} }
let (peer_latest, peer_difficulty, peer_snapshot_number, peer_snapshot_hash) = { let (peer_latest, peer_difficulty, peer_snapshot_number, peer_snapshot_hash) = {
if let Some(ref peer) = self.peers.get_mut(&peer_id) { if let Some(peer) = self.peers.get_mut(&peer_id) {
if peer.asking != PeerAsking::Nothing || !peer.can_sync() { if peer.asking != PeerAsking::Nothing || !peer.can_sync() {
return; return;
} }
@ -1142,6 +1142,7 @@ impl ChainSync {
} }
/// Checks if there are blocks fully downloaded that can be imported into the blockchain and does the import. /// Checks if there are blocks fully downloaded that can be imported into the blockchain and does the import.
#[cfg_attr(feature="dev", allow(block_in_if_condition_stmt))]
fn collect_blocks(&mut self, io: &mut SyncIo, block_set: BlockSet) { fn collect_blocks(&mut self, io: &mut SyncIo, block_set: BlockSet) {
match block_set { match block_set {
BlockSet::NewBlocks => { BlockSet::NewBlocks => {
@ -1150,9 +1151,9 @@ impl ChainSync {
} }
}, },
BlockSet::OldBlocks => { BlockSet::OldBlocks => {
if self.old_blocks.as_mut().map_or(false, |downloader| { downloader.collect_blocks(io, false) == Err(DownloaderImportError::Invalid) }) { if self.old_blocks.as_mut().map_or(false, |downloader| { downloader.collect_blocks(io, false) == Err(DownloaderImportError::Invalid) }) {
self.restart(io); self.restart(io);
} else if self.old_blocks.as_ref().map_or(false, |downloader| { downloader.is_complete() }) { } else if self.old_blocks.as_ref().map_or(false, |downloader| { downloader.is_complete() }) {
trace!(target: "sync", "Background block download is complete"); trace!(target: "sync", "Background block download is complete");
self.old_blocks = None; self.old_blocks = None;
} }
@ -1242,7 +1243,7 @@ impl ChainSync {
return true; return true;
} }
} }
return false; false
} }
/// Generic request sender /// Generic request sender
@ -1370,7 +1371,7 @@ impl ChainSync {
while number <= last && count < max_count { while number <= last && count < max_count {
if let Some(hdr) = overlay.get(&number) { if let Some(hdr) = overlay.get(&number) {
trace!(target: "sync", "{}: Returning cached fork header", peer_id); trace!(target: "sync", "{}: Returning cached fork header", peer_id);
data.extend(hdr); data.extend_from_slice(hdr);
count += 1; count += 1;
} else if let Some(mut hdr) = io.chain().block_header(BlockID::Number(number)) { } else if let Some(mut hdr) = io.chain().block_header(BlockID::Number(number)) {
data.append(&mut hdr); data.append(&mut hdr);
@ -1707,7 +1708,7 @@ impl ChainSync {
self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp); self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp);
} }
} }
if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { if let Some(ref mut peer) = self.peers.get_mut(peer_id) {
peer.latest_hash = chain_info.best_block_hash.clone(); peer.latest_hash = chain_info.best_block_hash.clone();
} }
sent += 1; sent += 1;
@ -1725,7 +1726,7 @@ impl ChainSync {
sent += match ChainSync::create_new_hashes_rlp(io.chain(), &last_parent, &chain_info.best_block_hash) { sent += match ChainSync::create_new_hashes_rlp(io.chain(), &last_parent, &chain_info.best_block_hash) {
Some(rlp) => { Some(rlp) => {
{ {
if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { if let Some(ref mut peer) = self.peers.get_mut(peer_id) {
peer.latest_hash = chain_info.best_block_hash.clone(); peer.latest_hash = chain_info.best_block_hash.clone();
} }
} }
@ -1793,7 +1794,7 @@ impl ChainSync {
// Send RLPs // Send RLPs
let sent = lucky_peers.len(); let sent = lucky_peers.len();
if sent > 0 { if sent > 0 {
for (peer_id, rlp) in lucky_peers.into_iter() { for (peer_id, rlp) in lucky_peers {
self.send_packet(io, peer_id, TRANSACTIONS_PACKET, rlp); self.send_packet(io, peer_id, TRANSACTIONS_PACKET, rlp);
} }

View File

@ -23,7 +23,7 @@ rlp = { path = "rlp" }
heapsize = { version = "0.3", features = ["unstable"] } heapsize = { version = "0.3", features = ["unstable"] }
itertools = "0.4" itertools = "0.4"
sha3 = { path = "sha3" } sha3 = { path = "sha3" }
clippy = { version = "0.0.90", optional = true} clippy = { version = "0.0.96", optional = true}
ethcore-devtools = { path = "../devtools" } ethcore-devtools = { path = "../devtools" }
libc = "0.2.7" libc = "0.2.7"
vergen = "0.1" vergen = "0.1"

View File

@ -14,7 +14,7 @@ time = "0.1.34"
tiny-keccak = "1.0" tiny-keccak = "1.0"
rust-crypto = "0.2.34" rust-crypto = "0.2.34"
slab = "0.2" slab = "0.2"
clippy = { version = "0.0.90", optional = true} clippy = { version = "0.0.96", optional = true}
igd = "0.5.0" igd = "0.5.0"
libc = "0.2.7" libc = "0.2.7"
parking_lot = "0.3" parking_lot = "0.3"

View File

@ -78,7 +78,7 @@ impl HashDB for ArchiveDB {
ret.insert(h, 1); ret.insert(h, 1);
} }
for (key, refs) in self.overlay.keys().into_iter() { for (key, refs) in self.overlay.keys() {
let refs = *ret.get(&key).unwrap_or(&0) + refs; let refs = *ret.get(&key).unwrap_or(&0) + refs;
ret.insert(key, refs); ret.insert(key, refs);
} }
@ -152,7 +152,7 @@ impl JournalDB for ArchiveDB {
let mut inserts = 0usize; let mut inserts = 0usize;
let mut deletes = 0usize; let mut deletes = 0usize;
for i in self.overlay.drain().into_iter() { for i in self.overlay.drain() {
let (key, (value, rc)) = i; let (key, (value, rc)) = i;
if rc > 0 { if rc > 0 {
batch.put(self.column, &key, &value); batch.put(self.column, &key, &value);
@ -164,7 +164,7 @@ impl JournalDB for ArchiveDB {
} }
} }
for (mut key, value) in self.overlay.drain_aux().into_iter() { for (mut key, value) in self.overlay.drain_aux() {
key.push(AUX_FLAG); key.push(AUX_FLAG);
batch.put(self.column, &key, &value); batch.put(self.column, &key, &value);
} }
@ -185,7 +185,7 @@ impl JournalDB for ArchiveDB {
let mut inserts = 0usize; let mut inserts = 0usize;
let mut deletes = 0usize; let mut deletes = 0usize;
for i in self.overlay.drain().into_iter() { for i in self.overlay.drain() {
let (key, (value, rc)) = i; let (key, (value, rc)) = i;
if rc > 0 { if rc > 0 {
if try!(self.backing.get(self.column, &key)).is_some() { if try!(self.backing.get(self.column, &key)).is_some() {
@ -204,7 +204,7 @@ impl JournalDB for ArchiveDB {
} }
} }
for (mut key, value) in self.overlay.drain_aux().into_iter() { for (mut key, value) in self.overlay.drain_aux() {
key.push(AUX_FLAG); key.push(AUX_FLAG);
batch.put(self.column, &key, &value); batch.put(self.column, &key, &value);
} }

View File

@ -63,9 +63,11 @@ enum RemoveFrom {
/// the removals actually take effect. /// the removals actually take effect.
/// ///
/// journal format: /// journal format:
/// ```
/// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] /// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
/// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] /// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
/// [era, n] => [ ... ] /// [era, n] => [ ... ]
/// ```
/// ///
/// When we make a new commit, we make a journal of all blocks in the recent history and record /// When we make a new commit, we make a journal of all blocks in the recent history and record
/// all keys that were inserted and deleted. The journal is ordered by era; multiple commits can /// all keys that were inserted and deleted. The journal is ordered by era; multiple commits can
@ -80,6 +82,7 @@ enum RemoveFrom {
/// which includes an original key, if any. /// which includes an original key, if any.
/// ///
/// The semantics of the `counter` are: /// The semantics of the `counter` are:
/// ```
/// insert key k: /// insert key k:
/// counter already contains k: count += 1 /// counter already contains k: count += 1
/// counter doesn't contain k: /// counter doesn't contain k:
@ -91,9 +94,11 @@ enum RemoveFrom {
/// count == 1: remove counter /// count == 1: remove counter
/// count == 0: remove key from backing db /// count == 0: remove key from backing db
/// counter doesn't contain k: remove key from backing db /// counter doesn't contain k: remove key from backing db
/// ```
/// ///
/// Practically, this means that for each commit block turning from recent to ancient we do the /// Practically, this means that for each commit block turning from recent to ancient we do the
/// following: /// following:
/// ```
/// is_canonical: /// is_canonical:
/// inserts: Ignored (left alone in the backing database). /// inserts: Ignored (left alone in the backing database).
/// deletes: Enacted; however, recent history queue is checked for ongoing references. This is /// deletes: Enacted; however, recent history queue is checked for ongoing references. This is
@ -102,8 +107,9 @@ enum RemoveFrom {
/// inserts: Reverted; however, recent history queue is checked for ongoing references. This is /// inserts: Reverted; however, recent history queue is checked for ongoing references. This is
/// reduced as a preference to deletion from the backing database. /// reduced as a preference to deletion from the backing database.
/// deletes: Ignored (they were never inserted). /// deletes: Ignored (they were never inserted).
/// ```
/// ///
/// TODO: store_reclaim_period /// TODO: `store_reclaim_period`
pub struct EarlyMergeDB { pub struct EarlyMergeDB {
overlay: MemoryDB, overlay: MemoryDB,
backing: Arc<Database>, backing: Arc<Database>,
@ -310,7 +316,7 @@ impl HashDB for EarlyMergeDB {
ret.insert(h, 1); ret.insert(h, 1);
} }
for (key, refs) in self.overlay.keys().into_iter() { for (key, refs) in self.overlay.keys() {
let refs = *ret.get(&key).unwrap_or(&0) + refs; let refs = *ret.get(&key).unwrap_or(&0) + refs;
ret.insert(key, refs); ret.insert(key, refs);
} }

View File

@ -379,7 +379,7 @@ impl HashDB for OverlayRecentDB {
ret.insert(h, 1); ret.insert(h, 1);
} }
for (key, refs) in self.transaction_overlay.keys().into_iter() { for (key, refs) in self.transaction_overlay.keys() {
let refs = *ret.get(&key).unwrap_or(&0) + refs; let refs = *ret.get(&key).unwrap_or(&0) + refs;
ret.insert(key, refs); ret.insert(key, refs);
} }

View File

@ -36,12 +36,14 @@ use std::env;
/// the removals actually take effect. /// the removals actually take effect.
/// ///
/// journal format: /// journal format:
/// ```
/// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] /// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
/// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] /// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
/// [era, n] => [ ... ] /// [era, n] => [ ... ]
/// ```
/// ///
/// when we make a new commit, we journal the inserts and removes. /// when we make a new commit, we journal the inserts and removes.
/// for each end_era that we journaled that we are no passing by, /// for each `end_era` that we journaled that we are no passing by,
/// we remove all of its removes assuming it is canonical and all /// we remove all of its removes assuming it is canonical and all
/// of its inserts otherwise. /// of its inserts otherwise.
// TODO: store last_era, reclaim_period. // TODO: store last_era, reclaim_period.

View File

@ -231,7 +231,7 @@ impl Manager {
trace!(target: "migration", "Total migrations to execute for version {}: {}", version, migrations.len()); trace!(target: "migration", "Total migrations to execute for version {}: {}", version, migrations.len());
if migrations.is_empty() { return Err(Error::MigrationImpossible) }; if migrations.is_empty() { return Err(Error::MigrationImpossible) };
let columns = migrations.iter().nth(0).and_then(|m| m.pre_columns()); let columns = migrations.get(0).and_then(|m| m.pre_columns());
trace!(target: "migration", "Expecting database to contain {:?} columns", columns); trace!(target: "migration", "Expecting database to contain {:?} columns", columns);
let mut db_config = DatabaseConfig { let mut db_config = DatabaseConfig {

View File

@ -66,7 +66,7 @@ impl OverlayDB {
pub fn commit_to_batch(&mut self, batch: &mut DBTransaction) -> Result<u32, UtilError> { pub fn commit_to_batch(&mut self, batch: &mut DBTransaction) -> Result<u32, UtilError> {
let mut ret = 0u32; let mut ret = 0u32;
let mut deletes = 0usize; let mut deletes = 0usize;
for i in self.overlay.drain().into_iter() { for i in self.overlay.drain() {
let (key, (value, rc)) = i; let (key, (value, rc)) = i;
if rc != 0 { if rc != 0 {
match self.payload(&key) { match self.payload(&key) {
@ -133,7 +133,7 @@ impl HashDB for OverlayDB {
ret.insert(h, r as i32); ret.insert(h, r as i32);
} }
for (key, refs) in self.overlay.keys().into_iter() { for (key, refs) in self.overlay.keys() {
let refs = *ret.get(&key).unwrap_or(&0) + refs; let refs = *ret.get(&key).unwrap_or(&0) + refs;
ret.insert(key, refs); ret.insert(key, refs);
} }

View File

@ -84,7 +84,7 @@ impl Journal {
pub fn apply(self, db: &mut HashDB) -> Score { pub fn apply(self, db: &mut HashDB) -> Score {
trace!("applying {:?} changes", self.0.len()); trace!("applying {:?} changes", self.0.len());
let mut ret = Score{inserts: 0, removes: 0}; let mut ret = Score{inserts: 0, removes: 0};
for d in self.0.into_iter() { for d in self.0 {
match d { match d {
Operation::Delete(h) => { Operation::Delete(h) => {
trace!("TrieDBMut::apply --- {:?}", &h); trace!("TrieDBMut::apply --- {:?}", &h);

View File

@ -87,7 +87,7 @@ impl<'db> TrieDB<'db> {
/// Convert a vector of hashes to a hashmap of hash to occurrences. /// Convert a vector of hashes to a hashmap of hash to occurrences.
pub fn to_map(hashes: Vec<H256>) -> HashMap<H256, u32> { pub fn to_map(hashes: Vec<H256>) -> HashMap<H256, u32> {
let mut r: HashMap<H256, u32> = HashMap::new(); let mut r: HashMap<H256, u32> = HashMap::new();
for h in hashes.into_iter() { for h in hashes {
*r.entry(h).or_insert(0) += 1; *r.entry(h).or_insert(0) += 1;
} }
r r
@ -97,7 +97,7 @@ impl<'db> TrieDB<'db> {
/// trie. /// trie.
pub fn db_items_remaining(&self) -> super::Result<HashMap<H256, i32>> { pub fn db_items_remaining(&self) -> super::Result<HashMap<H256, i32>> {
let mut ret = self.db.keys(); let mut ret = self.db.keys();
for (k, v) in Self::to_map(try!(self.keys())).into_iter() { for (k, v) in Self::to_map(try!(self.keys())) {
let keycount = *ret.get(&k).unwrap_or(&0); let keycount = *ret.get(&k).unwrap_or(&0);
match keycount <= v as i32 { match keycount <= v as i32 {
true => ret.remove(&k), true => ret.remove(&k),