Fixing clippy warnings. When building on nightly it is required to enable clippy

This commit is contained in:
Tomasz Drwięga
2016-03-07 14:33:00 +01:00
parent 3153d12bd9
commit cbc2c0cf0c
41 changed files with 272 additions and 130 deletions

View File

@@ -305,7 +305,7 @@ macro_rules! impl_hash {
}
impl Copy for $from {}
#[cfg_attr(feature="dev", allow(expl_impl_clone_on_copy))]
#[cfg_attr(all(nightly, feature="dev"), allow(expl_impl_clone_on_copy))]
impl Clone for $from {
fn clone(&self) -> $from {
unsafe {
@@ -637,7 +637,7 @@ mod tests {
use std::str::FromStr;
#[test]
#[cfg_attr(feature="dev", allow(eq_op))]
#[cfg_attr(all(nightly, feature="dev"), allow(eq_op))]
fn hash() {
let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]);
assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h);

View File

@@ -27,7 +27,7 @@ use std::env;
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay
/// and latent-removal semantics.
///
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
/// the removals actually take effect.
@@ -158,7 +158,7 @@ impl JournalDB {
backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some()
}
fn insert_keys(inserts: &Vec<(H256, Bytes)>, backing: &Database, counters: &mut HashMap<H256, i32>, batch: &DBTransaction) {
fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, counters: &mut HashMap<H256, i32>, batch: &DBTransaction) {
for &(ref h, ref d) in inserts {
if let Some(c) = counters.get_mut(h) {
// already counting. increment.
@@ -181,7 +181,7 @@ impl JournalDB {
}
}
fn replay_keys(inserts: &Vec<H256>, backing: &Database, counters: &mut HashMap<H256, i32>) {
fn replay_keys(inserts: &[H256], backing: &Database, counters: &mut HashMap<H256, i32>) {
println!("replay_keys: inserts={:?}, counters={:?}", inserts, counters);
for h in inserts {
if let Some(c) = counters.get_mut(h) {
@@ -211,12 +211,12 @@ impl JournalDB {
n = Some(*c);
}
}
match &n {
&Some(i) if i == 1 => {
match n {
Some(i) if i == 1 => {
counters.remove(&h);
Self::reset_already_in(batch, &h);
}
&None => {
None => {
// Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs.
//assert!(!Self::is_already_in(db, &h));
batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?");
@@ -229,7 +229,7 @@ impl JournalDB {
/// Commit all recent insert operations and historical removals from the old era
/// to the backing database.
fn commit_with_counters(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
// journal format:
// journal format:
// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
// [era, n] => [ ... ]
@@ -242,12 +242,12 @@ impl JournalDB {
// By the time comes to remove a tuple from the queue (i.e. then the era passes from recent history
// into ancient history) then only one commit from the tuple is considered canonical. This commit
// is kept in the main backing database, whereas any others from the same era are reverted.
//
//
// It is possible that a key, properly available in the backing database be deleted and re-inserted
// in the recent history queue, yet have both operations in commits that are eventually non-canonical.
// To avoid the original, and still required, key from being deleted, we maintain a reference count
// which includes an original key, if any.
//
//
// The semantics of the `counter` are:
// insert key k:
// counter already contains k: count += 1
@@ -255,7 +255,7 @@ impl JournalDB {
// backing db contains k: count = 1
// backing db doesn't contain k: insert into backing db, count = 0
// delete key k:
// counter contains k (count is asserted to be non-zero):
// counter contains k (count is asserted to be non-zero):
// count > 1: counter -= 1
// count == 1: remove counter
// count == 0: remove key from backing db
@@ -274,7 +274,7 @@ impl JournalDB {
//
// record new commit's details.
trace!("commit: #{} ({}), end era: {:?}", now, id, end);
trace!("commit: #{} ({}), end era: {:?}", now, id, end);
let mut counters = self.counters.as_ref().unwrap().write().unwrap();
let batch = DBTransaction::new();
{
@@ -295,7 +295,7 @@ impl JournalDB {
let drained = self.overlay.drain();
let removes: Vec<H256> = drained
.iter()
.filter_map(|(ref k, &(_, ref c))| if *c < 0 {Some(k.clone())} else {None}).cloned()
.filter_map(|(k, &(_, c))| if c < 0 {Some(k.clone())} else {None})
.collect();
let inserts: Vec<(H256, Bytes)> = drained
.into_iter()
@@ -382,12 +382,15 @@ impl JournalDB {
/// Returns heap memory size used
pub fn mem_used(&self) -> usize {
self.overlay.mem_used() + match &self.counters { &Some(ref c) => c.read().unwrap().heap_size_of_children(), &None => 0 }
self.overlay.mem_used() + match self.counters {
Some(ref c) => c.read().unwrap().heap_size_of_children(),
None => 0
}
}
}
impl HashDB for JournalDB {
fn keys(&self) -> HashMap<H256, i32> {
fn keys(&self) -> HashMap<H256, i32> {
let mut ret: HashMap<H256, i32> = HashMap::new();
for (key, _) in self.backing.iter() {
let h = H256::from_slice(key.deref());
@@ -401,7 +404,7 @@ impl HashDB for JournalDB {
ret
}
fn lookup(&self, key: &H256) -> Option<&[u8]> {
fn lookup(&self, key: &H256) -> Option<&[u8]> {
let k = self.overlay.raw(key);
match k {
Some(&(ref d, rc)) if rc > 0 => Some(d),
@@ -416,18 +419,18 @@ impl HashDB for JournalDB {
}
}
fn exists(&self, key: &H256) -> bool {
fn exists(&self, key: &H256) -> bool {
self.lookup(key).is_some()
}
fn insert(&mut self, value: &[u8]) -> H256 {
fn insert(&mut self, value: &[u8]) -> H256 {
self.overlay.insert(value)
}
fn emplace(&mut self, key: H256, value: Bytes) {
self.overlay.emplace(key, value);
self.overlay.emplace(key, value);
}
fn kill(&mut self, key: &H256) {
self.overlay.kill(key);
fn kill(&mut self, key: &H256) {
self.overlay.kill(key);
}
}

View File

@@ -55,8 +55,7 @@ pub struct DatabaseIterator<'a> {
impl<'a> Iterator for DatabaseIterator<'a> {
type Item = (Box<[u8]>, Box<[u8]>);
#[cfg_attr(feature="dev", allow(type_complexity))]
fn next(&mut self) -> Option<(Box<[u8]>, Box<[u8]>)> {
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
}

View File

@@ -15,18 +15,18 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![warn(missing_docs)]
#![cfg_attr(feature="dev", feature(plugin))]
#![cfg_attr(feature="dev", plugin(clippy))]
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))]
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))]
// Clippy settings
// TODO [todr] not really sure
#![cfg_attr(feature="dev", allow(needless_range_loop))]
#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))]
// Shorter than if-else
#![cfg_attr(feature="dev", allow(match_bool))]
#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))]
// We use that to be more explicit about handled cases
#![cfg_attr(feature="dev", allow(match_same_arms))]
#![cfg_attr(all(nightly, feature="dev"), allow(match_same_arms))]
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
#![cfg_attr(feature="dev", allow(clone_on_copy))]
#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))]
//! Ethcore-util library
//!

View File

@@ -113,14 +113,14 @@ impl Discovery {
}
/// Add a new node to discovery table. Pings the node.
pub fn add_node(&mut self, e: NodeEntry) {
pub fn add_node(&mut self, e: NodeEntry) {
let endpoint = e.endpoint.clone();
self.update_node(e);
self.ping(&endpoint);
}
/// Add a list of known nodes to the table.
pub fn init_node_list(&mut self, mut nodes: Vec<NodeEntry>) {
pub fn init_node_list(&mut self, mut nodes: Vec<NodeEntry>) {
for n in nodes.drain(..) {
self.update_node(n);
}
@@ -243,7 +243,7 @@ impl Discovery {
self.send_to(packet, address.clone());
}
#[cfg_attr(feature="dev", allow(map_clone))]
#[cfg_attr(all(nightly, feature="dev"), allow(map_clone))]
fn nearest_node_entries(target: &NodeId, buckets: &[NodeBucket]) -> Vec<NodeEntry> {
let mut found: BTreeMap<u32, Vec<&NodeEntry>> = BTreeMap::new();
let mut count = 0;
@@ -251,7 +251,7 @@ impl Discovery {
// Sort nodes by distance to target
for bucket in buckets {
for node in &bucket.nodes {
let distance = Discovery::distance(target, &node.address.id);
let distance = Discovery::distance(target, &node.address.id);
found.entry(distance).or_insert_with(Vec::new).push(&node.address);
if count == BUCKET_SIZE {
// delete the most distant element
@@ -310,7 +310,7 @@ impl Discovery {
None
}),
Ok(_) => None,
Err(e) => {
Err(e) => {
warn!("Error reading UPD socket: {:?}", e);
None
}
@@ -339,7 +339,7 @@ impl Discovery {
PACKET_PONG => self.on_pong(&rlp, &node_id, &from),
PACKET_FIND_NODE => self.on_find_node(&rlp, &node_id, &from),
PACKET_NEIGHBOURS => self.on_neighbours(&rlp, &node_id, &from),
_ => {
_ => {
debug!("Unknown UDP packet: {}", packet_id);
Ok(None)
}
@@ -367,14 +367,14 @@ impl Discovery {
}
else {
self.update_node(entry.clone());
added_map.insert(node.clone(), entry);
added_map.insert(node.clone(), entry);
}
let hash = rlp.as_raw().sha3();
let mut response = RlpStream::new_list(2);
dest.to_rlp_list(&mut response);
response.append(&hash);
self.send_packet(PACKET_PONG, from, &response.drain());
Ok(Some(TableUpdates { added: added_map, removed: HashSet::new() }))
}
@@ -391,7 +391,7 @@ impl Discovery {
}
self.clear_ping(node);
let mut added_map = HashMap::new();
added_map.insert(node.clone(), entry);
added_map.insert(node.clone(), entry);
Ok(None)
}
@@ -466,8 +466,8 @@ impl Discovery {
pub fn round(&mut self) -> Option<TableUpdates> {
let removed = self.check_expired(false);
self.discover();
if !removed.is_empty() {
Some(TableUpdates { added: HashMap::new(), removed: removed })
if !removed.is_empty() {
Some(TableUpdates { added: HashMap::new(), removed: removed })
} else { None }
}

View File

@@ -507,7 +507,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
debug!(target: "network", "Connecting peers: {} sessions, {} pending", self.session_count(), self.handshake_count());
}
#[cfg_attr(feature="dev", allow(single_match))]
#[cfg_attr(all(nightly, feature="dev"), allow(single_match))]
fn connect_peer(&self, id: &NodeId, io: &IoContext<NetworkIoMessage<Message>>) {
if self.have_session(id)
{
@@ -542,7 +542,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
self.create_connection(socket, Some(id), io);
}
#[cfg_attr(feature="dev", allow(block_in_if_condition_stmt))]
#[cfg_attr(all(nightly, feature="dev"), allow(block_in_if_condition_stmt))]
fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext<NetworkIoMessage<Message>>) {
let nonce = self.info.write().unwrap().next_nonce();
let mut handshakes = self.handshakes.write().unwrap();

View File

@@ -71,7 +71,7 @@ impl PanicHandler {
/// Invoke closure and catch any possible panics.
/// In case of panic notifies all listeners about it.
#[cfg_attr(feature="dev", allow(deprecated))]
#[cfg_attr(all(nightly, feature="dev"), allow(deprecated))]
pub fn catch_panic<G, R>(&self, g: G) -> thread::Result<R> where G: FnOnce() -> R + Send + 'static {
let _guard = PanicGuard { handler: self };
let result = g();

View File

@@ -22,7 +22,7 @@ use super::trietraits::*;
use super::node::*;
/// A `Trie` implementation using a generic `HashDB` backing database.
///
///
/// Use it as a `Trie` trait object. You can use `db()` to get the backing database object, `keys`
/// to get the keys belonging to the trie in the backing database, and `db_items_remaining()` to get
/// which items in the backing database do not belong to this trie. If this is the only trie in the
@@ -54,7 +54,7 @@ pub struct TrieDB<'db> {
pub hash_count: usize,
}
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))]
impl<'db> TrieDB<'db> {
/// Create a new trie with the backing database `db` and `root`
/// Panics, if `root` does not exist
@@ -63,16 +63,16 @@ impl<'db> TrieDB<'db> {
flushln!("TrieDB::new({}): Trie root not found!", root);
panic!("Trie root not found!");
}
TrieDB {
db: db,
TrieDB {
db: db,
root: root,
hash_count: 0
hash_count: 0
}
}
/// Get the backing database.
pub fn db(&'db self) -> &'db HashDB {
self.db
pub fn db(&'db self) -> &'db HashDB {
self.db
}
/// Determine all the keys in the backing database that belong to the trie.
@@ -142,7 +142,7 @@ impl<'db> TrieDB<'db> {
/// Indentation helper for `formal_all`.
fn fmt_indent(&self, f: &mut fmt::Formatter, size: usize) -> fmt::Result {
for _ in 0..size {
for _ in 0..size {
try!(write!(f, " "));
}
Ok(())
@@ -358,7 +358,7 @@ impl<'db> fmt::Debug for TrieDB<'db> {
fn iterator() {
use memorydb::*;
use super::triedbmut::*;
let d = vec![ &b"A"[..], &b"AA"[..], &b"AB"[..], &b"B"[..] ];
let mut memdb = MemoryDB::new();

View File

@@ -23,7 +23,7 @@ use super::journal::*;
use super::trietraits::*;
/// A `Trie` implementation using a generic `HashDB` backing database.
///
///
/// Use it as a `Trie` trait object. You can use `db()` to get the backing database object, `keys`
/// to get the keys belonging to the trie in the backing database, and `db_items_remaining()` to get
/// which items in the backing database do not belong to this trie. If this is the only trie in the
@@ -66,21 +66,21 @@ enum MaybeChanged<'a> {
Changed(Bytes),
}
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))]
impl<'db> TrieDBMut<'db> {
/// Create a new trie with the backing database `db` and empty `root`
/// Initialise to the state entailed by the genesis block.
/// This guarantees the trie is built correctly.
pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self {
pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self {
let mut r = TrieDBMut{
db: db,
db: db,
root: root,
hash_count: 0
};
hash_count: 0
};
// set root rlp
*r.root = SHA3_NULL_RLP.clone();
r
*r.root = SHA3_NULL_RLP.clone();
r
}
/// Create a new trie with the backing database `db` and `root`.
@@ -91,21 +91,21 @@ impl<'db> TrieDBMut<'db> {
flushln!("Trie root not found {}", root);
panic!("Trie root not found!");
}
TrieDBMut {
db: db,
TrieDBMut {
db: db,
root: root,
hash_count: 0
hash_count: 0
}
}
/// Get the backing database.
pub fn db(&'db self) -> &'db HashDB {
self.db
pub fn db(&'db self) -> &'db HashDB {
self.db
}
/// Get the backing database.
pub fn db_mut(&'db mut self) -> &'db mut HashDB {
self.db
pub fn db_mut(&'db mut self) -> &'db mut HashDB {
self.db
}
/// Determine all the keys in the backing database that belong to the trie.
@@ -184,7 +184,7 @@ impl<'db> TrieDBMut<'db> {
/// Indentation helper for `formal_all`.
fn fmt_indent(&self, f: &mut fmt::Formatter, size: usize) -> fmt::Result {
for _ in 0..size {
for _ in 0..size {
try!(write!(f, " "));
}
Ok(())
@@ -350,7 +350,7 @@ impl<'db> TrieDBMut<'db> {
}
}
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))]
/// Determine the RLP of the node, assuming we're inserting `partial` into the
/// node currently of data `old`. This will *not* delete any hash of `old` from the database;
/// it will just return the new RLP that includes the new node.
@@ -378,7 +378,7 @@ impl<'db> TrieDBMut<'db> {
// original had empty slot - place a leaf there.
true if old_rlp.at(i).is_empty() => journal.new_node(Self::compose_leaf(&partial.mid(1), value), &mut s),
// original has something there already; augment.
true => {
true => {
let new = self.augmented(self.take_node(&old_rlp.at(i), journal), &partial.mid(1), value, journal);
journal.new_node(new, &mut s);
}