Fixing some clippy warnings (#1728)

* Fixing warnings

* Fixing unnecessary ref

* Removing unnecessary operation
This commit is contained in:
Tomasz Drwięga 2016-07-26 20:31:25 +02:00 committed by Gav Wood
parent 01e33ffb61
commit 3f41186b2e
36 changed files with 81 additions and 86 deletions

View File

@ -192,7 +192,7 @@ impl AccountProvider {
pub fn accounts_info(&self) -> Result<HashMap<H160, AccountMeta>, Error> { pub fn accounts_info(&self) -> Result<HashMap<H160, AccountMeta>, Error> {
let r: HashMap<H160, AccountMeta> = self.sstore.accounts() let r: HashMap<H160, AccountMeta> = self.sstore.accounts()
.into_iter() .into_iter()
.map(|a| (H160(a.clone().into()), self.account_meta(a).unwrap_or(Default::default()))) .map(|a| (H160(a.clone().into()), self.account_meta(a).unwrap_or_else(|_| Default::default())))
.collect(); .collect();
Ok(r) Ok(r)
} }

View File

@ -339,7 +339,7 @@ impl<'x> OpenBlock<'x> {
let t = outcome.trace; let t = outcome.trace;
self.block.traces.as_mut().map(|traces| traces.push(t.expect("self.block.traces.is_some(): so we must be tracing: qed"))); self.block.traces.as_mut().map(|traces| traces.push(t.expect("self.block.traces.is_some(): so we must be tracing: qed")));
self.block.receipts.push(outcome.receipt); self.block.receipts.push(outcome.receipt);
Ok(&self.block.receipts.last().unwrap()) Ok(self.block.receipts.last().unwrap())
} }
Err(x) => Err(From::from(x)) Err(x) => Err(From::from(x))
} }

View File

@ -284,10 +284,10 @@ impl BlockQueue {
/// Check if the block is currently in the queue /// Check if the block is currently in the queue
pub fn block_status(&self, hash: &H256) -> BlockStatus { pub fn block_status(&self, hash: &H256) -> BlockStatus {
if self.processing.read().contains(&hash) { if self.processing.read().contains(hash) {
return BlockStatus::Queued; return BlockStatus::Queued;
} }
if self.verification.bad.lock().contains(&hash) { if self.verification.bad.lock().contains(hash) {
return BlockStatus::Bad; return BlockStatus::Bad;
} }
BlockStatus::Unknown BlockStatus::Unknown
@ -340,7 +340,7 @@ impl BlockQueue {
bad.reserve(block_hashes.len()); bad.reserve(block_hashes.len());
for hash in block_hashes { for hash in block_hashes {
bad.insert(hash.clone()); bad.insert(hash.clone());
processing.remove(&hash); processing.remove(hash);
} }
let mut new_verified = VecDeque::new(); let mut new_verified = VecDeque::new();
@ -362,7 +362,7 @@ impl BlockQueue {
} }
let mut processing = self.processing.write(); let mut processing = self.processing.write();
for hash in block_hashes { for hash in block_hashes {
processing.remove(&hash); processing.remove(hash);
} }
} }

View File

@ -512,7 +512,7 @@ impl BlockChain {
let _lock = self.insert_lock.lock(); let _lock = self.insert_lock.lock();
// store block in db // store block in db
self.blocks_db.put(&hash, &bytes).unwrap(); self.blocks_db.put(&hash, bytes).unwrap();
let info = self.block_info(bytes); let info = self.block_info(bytes);
@ -633,7 +633,7 @@ impl BlockChain {
if self.is_known(&first) { if self.is_known(&first) {
Some(AncestryIter { Some(AncestryIter {
current: first, current: first,
chain: &self, chain: self,
}) })
} else { } else {
None None

View File

@ -281,7 +281,7 @@ impl Client {
} }
// Verify Block Family // Verify Block Family
let verify_family_result = self.verifier.verify_block_family(&header, &block.bytes, engine, self.chain.deref()); let verify_family_result = self.verifier.verify_block_family(header, &block.bytes, engine, self.chain.deref());
if let Err(e) = verify_family_result { if let Err(e) = verify_family_result {
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(()); return Err(());
@ -299,7 +299,7 @@ impl Client {
let last_hashes = self.build_last_hashes(header.parent_hash.clone()); let last_hashes = self.build_last_hashes(header.parent_hash.clone());
let db = self.state_db.lock().boxed_clone(); let db = self.state_db.lock().boxed_clone();
let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, &self.vm_factory, self.trie_factory.clone()); let enact_result = enact_verified(block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, &self.vm_factory, self.trie_factory.clone());
if let Err(e) = enact_result { if let Err(e) = enact_result {
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(()); return Err(());
@ -307,7 +307,7 @@ impl Client {
// Final Verification // Final Verification
let locked_block = enact_result.unwrap(); let locked_block = enact_result.unwrap();
if let Err(e) = self.verifier.verify_block_final(&header, locked_block.block().header()) { if let Err(e) = self.verifier.verify_block_final(header, locked_block.block().header()) {
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(()); return Err(());
} }
@ -468,7 +468,7 @@ impl Client {
pub fn import_queued_transactions(&self, transactions: &[Bytes]) -> usize { pub fn import_queued_transactions(&self, transactions: &[Bytes]) -> usize {
let _timer = PerfTimer::new("import_queued_transactions"); let _timer = PerfTimer::new("import_queued_transactions");
self.queue_transactions.fetch_sub(transactions.len(), AtomicOrdering::SeqCst); self.queue_transactions.fetch_sub(transactions.len(), AtomicOrdering::SeqCst);
let txs = transactions.iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect(); let txs = transactions.iter().filter_map(|bytes| UntrustedRlp::new(bytes).as_val().ok()).collect();
let results = self.miner.import_external_transactions(self, txs); let results = self.miner.import_external_transactions(self, txs);
results.len() results.len()
} }
@ -684,7 +684,7 @@ impl BlockChainClient for Client {
} }
fn block(&self, id: BlockID) -> Option<Bytes> { fn block(&self, id: BlockID) -> Option<Bytes> {
if let &BlockID::Pending = &id { if let BlockID::Pending = id {
if let Some(block) = self.miner.pending_block() { if let Some(block) = self.miner.pending_block() {
return Some(block.rlp_bytes(Seal::Without)); return Some(block.rlp_bytes(Seal::Without));
} }
@ -703,7 +703,7 @@ impl BlockChainClient for Client {
} }
fn block_total_difficulty(&self, id: BlockID) -> Option<U256> { fn block_total_difficulty(&self, id: BlockID) -> Option<U256> {
if let &BlockID::Pending = &id { if let BlockID::Pending = id {
if let Some(block) = self.miner.pending_block() { if let Some(block) = self.miner.pending_block() {
return Some(*block.header.difficulty() + self.block_total_difficulty(BlockID::Latest).expect("blocks in chain have details; qed")); return Some(*block.header.difficulty() + self.block_total_difficulty(BlockID::Latest).expect("blocks in chain have details; qed"));
} }

View File

@ -46,7 +46,7 @@ impl FromStr for DatabaseCompactionProfile {
match s { match s {
"ssd" | "default" => Ok(DatabaseCompactionProfile::Default), "ssd" | "default" => Ok(DatabaseCompactionProfile::Default),
"hdd" => Ok(DatabaseCompactionProfile::HDD), "hdd" => Ok(DatabaseCompactionProfile::HDD),
_ => Err(format!("Invalid compaction profile given. Expected hdd/ssd (default).")), _ => Err("Invalid compaction profile given. Expected hdd/ssd (default).".into()),
} }
} }
} }

View File

@ -190,7 +190,7 @@ impl TestBlockChainClient {
gas_price: U256::one(), gas_price: U256::one(),
nonce: U256::zero() nonce: U256::zero()
}; };
let signed_tx = tx.sign(&keypair.secret()); let signed_tx = tx.sign(keypair.secret());
txs.append(&signed_tx); txs.append(&signed_tx);
txs.out() txs.out()
}, },
@ -366,8 +366,8 @@ impl BlockChainClient for TestBlockChainClient {
fn block_body(&self, id: BlockID) -> Option<Bytes> { fn block_body(&self, id: BlockID) -> Option<Bytes> {
self.block_hash(id).and_then(|hash| self.blocks.read().get(&hash).map(|r| { self.block_hash(id).and_then(|hash| self.blocks.read().get(&hash).map(|r| {
let mut stream = RlpStream::new_list(2); let mut stream = RlpStream::new_list(2);
stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1); stream.append_raw(Rlp::new(r).at(1).as_raw(), 1);
stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1); stream.append_raw(Rlp::new(r).at(2).as_raw(), 1);
stream.out() stream.out()
})) }))
} }

View File

@ -96,13 +96,13 @@ impl<Cost: CostType> evm::Evm for Interpreter<Cost> {
self.mem.clear(); self.mem.clear();
let code = &params.code.as_ref().unwrap(); let code = &params.code.as_ref().unwrap();
let valid_jump_destinations = self.find_jump_destinations(&code); let valid_jump_destinations = self.find_jump_destinations(code);
let mut gasometer = Gasometer::<Cost>::new(try!(Cost::from_u256(params.gas))); let mut gasometer = Gasometer::<Cost>::new(try!(Cost::from_u256(params.gas)));
let mut stack = VecStack::with_capacity(ext.schedule().stack_limit, U256::zero()); let mut stack = VecStack::with_capacity(ext.schedule().stack_limit, U256::zero());
let mut reader = CodeReader { let mut reader = CodeReader {
position: 0, position: 0,
code: &code code: code
}; };
let infos = &*instructions::INSTRUCTIONS; let infos = &*instructions::INSTRUCTIONS;
@ -274,7 +274,7 @@ impl<Cost: CostType> Interpreter<Cost> {
return Ok(InstructionResult::Ok); return Ok(InstructionResult::Ok);
} }
let create_result = ext.create(&gas.as_u256(), &endowment, &contract_code); let create_result = ext.create(&gas.as_u256(), &endowment, contract_code);
return match create_result { return match create_result {
ContractCreateResult::Created(address, gas_left) => { ContractCreateResult::Created(address, gas_left) => {
stack.push(address_to_u256(address)); stack.push(address_to_u256(address));

View File

@ -370,7 +370,7 @@ impl<'a> Executive<'a> {
let gas = params.gas; let gas = params.gas;
let created = params.address.clone(); let created = params.address.clone();
let mut subvmtracer = vm_tracer.prepare_subtrace(&params.code.as_ref().expect("two ways into create (Externalities::create and Executive::transact_with_tracer); both place `Some(...)` `code` in `params`; qed")); let mut subvmtracer = vm_tracer.prepare_subtrace(params.code.as_ref().expect("two ways into create (Externalities::create and Executive::transact_with_tracer); both place `Some(...)` `code` in `params`; qed"));
let res = { let res = {
self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::InitContract(trace_output.as_mut()), &mut subtracer, &mut subvmtracer) self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::InitContract(trace_output.as_mut()), &mut subtracer, &mut subvmtracer)
@ -1009,7 +1009,7 @@ mod tests {
gas: U256::from(100_000), gas: U256::from(100_000),
gas_price: U256::zero(), gas_price: U256::zero(),
nonce: U256::zero() nonce: U256::zero()
}.sign(&keypair.secret()); }.sign(keypair.secret());
let sender = t.sender().unwrap(); let sender = t.sender().unwrap();
let contract = contract_address(&sender, &U256::zero()); let contract = contract_address(&sender, &U256::zero());
@ -1076,7 +1076,7 @@ mod tests {
gas: U256::from(100_000), gas: U256::from(100_000),
gas_price: U256::zero(), gas_price: U256::zero(),
nonce: U256::one() nonce: U256::one()
}.sign(&keypair.secret()); }.sign(keypair.secret());
let sender = t.sender().unwrap(); let sender = t.sender().unwrap();
let mut state_result = get_temp_state(); let mut state_result = get_temp_state();
@ -1109,7 +1109,7 @@ mod tests {
gas: U256::from(80_001), gas: U256::from(80_001),
gas_price: U256::zero(), gas_price: U256::zero(),
nonce: U256::zero() nonce: U256::zero()
}.sign(&keypair.secret()); }.sign(keypair.secret());
let sender = t.sender().unwrap(); let sender = t.sender().unwrap();
let mut state_result = get_temp_state(); let mut state_result = get_temp_state();
@ -1144,7 +1144,7 @@ mod tests {
gas: U256::from(100_000), gas: U256::from(100_000),
gas_price: U256::one(), gas_price: U256::one(),
nonce: U256::zero() nonce: U256::zero()
}.sign(&keypair.secret()); }.sign(keypair.secret());
let sender = t.sender().unwrap(); let sender = t.sender().unwrap();
let mut state_result = get_temp_state(); let mut state_result = get_temp_state();

View File

@ -272,7 +272,7 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT
} }
fn env_info(&self) -> &EnvInfo { fn env_info(&self) -> &EnvInfo {
&self.env_info self.env_info
} }
fn depth(&self) -> usize { fn depth(&self) -> usize {
@ -455,7 +455,7 @@ mod tests {
{ {
let vm_factory = Default::default(); let vm_factory = Default::default();
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer); let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer);
ext.suicide(&refund_account); ext.suicide(refund_account);
} }
assert_eq!(setup.sub_state.suicides.len(), 1); assert_eq!(setup.sub_state.suicides.len(), 1);

View File

@ -234,7 +234,7 @@ impl Header {
s.append(&self.extra_data); s.append(&self.extra_data);
if let Seal::With = with_seal { if let Seal::With = with_seal {
for b in &self.seal { for b in &self.seal {
s.append_raw(&b, 1); s.append_raw(b, 1);
} }
} }
} }

View File

@ -15,23 +15,20 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![warn(missing_docs)] #![warn(missing_docs)]
#![cfg_attr(feature="benches", feature(test))]
#![cfg_attr(feature="dev", feature(plugin))] #![cfg_attr(feature="dev", feature(plugin))]
#![cfg_attr(feature="dev", plugin(clippy))] #![cfg_attr(feature="dev", plugin(clippy))]
// Clippy config // Clippy settings
// TODO [todr] not really sure // Most of the time much more readable
#![cfg_attr(feature="dev", allow(needless_range_loop))] #![cfg_attr(feature="dev", allow(needless_range_loop))]
// Shorter than if-else // Shorter than if-else
#![cfg_attr(feature="dev", allow(match_bool))] #![cfg_attr(feature="dev", allow(match_bool))]
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. // Keeps consistency (all lines with `.clone()`).
#![cfg_attr(feature="dev", allow(clone_on_copy))] #![cfg_attr(feature="dev", allow(clone_on_copy))]
// In most cases it expresses function flow better
#![cfg_attr(feature="dev", allow(if_not_else))]
// TODO [todr] a lot of warnings to be fixed // TODO [todr] a lot of warnings to be fixed
#![cfg_attr(feature="dev", allow(needless_borrow))]
#![cfg_attr(feature="dev", allow(assign_op_pattern))] #![cfg_attr(feature="dev", allow(assign_op_pattern))]
#![cfg_attr(feature="benches", feature(test))]
//! Ethcore library //! Ethcore library
//! //!

View File

@ -35,7 +35,7 @@ pub struct WorkPoster {
impl WorkPoster { impl WorkPoster {
pub fn new(urls: &[String]) -> Self { pub fn new(urls: &[String]) -> Self {
let urls = urls.into_iter().filter_map(|u| { let urls = urls.into_iter().filter_map(|u| {
match Url::parse(&u) { match Url::parse(u) {
Ok(url) => Some(url), Ok(url) => Some(url),
Err(e) => { Err(e) => {
warn!("Error parsing URL {} : {}", u, e); warn!("Error parsing URL {} : {}", u, e);

View File

@ -143,8 +143,8 @@ pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option<A
}, },
storage: storage.into_iter().map(|k| storage: storage.into_iter().map(|k|
(k.clone(), Diff::new( (k.clone(), Diff::new(
pre.storage.get(&k).cloned().unwrap_or_else(H256::new), pre.storage.get(k).cloned().unwrap_or_else(H256::new),
post.storage.get(&k).cloned().unwrap_or_else(H256::new) post.storage.get(k).cloned().unwrap_or_else(H256::new)
))).collect(), ))).collect(),
}; };
if r.balance.is_same() && r.nonce.is_same() && r.code.is_same() && r.storage.is_empty() { if r.balance.is_same() && r.nonce.is_same() && r.code.is_same() && r.storage.is_empty() {

View File

@ -138,7 +138,7 @@ impl IoHandler<ClientIoMessage> for ClientIoHandler {
fn message(&self, _io: &IoContext<ClientIoMessage>, net_message: &ClientIoMessage) { fn message(&self, _io: &IoContext<ClientIoMessage>, net_message: &ClientIoMessage) {
match *net_message { match *net_message {
ClientIoMessage::BlockVerified => { self.client.import_verified_blocks(); } ClientIoMessage::BlockVerified => { self.client.import_verified_blocks(); }
ClientIoMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(&transactions); } ClientIoMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(transactions); }
_ => {} // ignore other messages _ => {} // ignore other messages
} }
} }
@ -175,7 +175,7 @@ mod tests {
let service = ClientService::start( let service = ClientService::start(
ClientConfig::default(), ClientConfig::default(),
get_test_spec(), get_test_spec(),
&temp_path.as_path(), temp_path.as_path(),
Arc::new(Miner::with_spec(get_test_spec())), Arc::new(Miner::with_spec(get_test_spec())),
); );
assert!(service.is_ok()); assert!(service.is_ok());

View File

@ -122,7 +122,7 @@ impl State {
fn insert_cache(&self, address: &Address, account: Option<Account>) { fn insert_cache(&self, address: &Address, account: Option<Account>) {
if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() { if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() {
if !snapshot.contains_key(&address) { if !snapshot.contains_key(address) {
snapshot.insert(address.clone(), self.cache.borrow_mut().insert(address.clone(), account)); snapshot.insert(address.clone(), self.cache.borrow_mut().insert(address.clone(), account));
return; return;
} }
@ -132,7 +132,7 @@ impl State {
fn note_cache(&self, address: &Address) { fn note_cache(&self, address: &Address) {
if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() { if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() {
if !snapshot.contains_key(&address) { if !snapshot.contains_key(address) {
snapshot.insert(address.clone(), self.cache.borrow().get(address).cloned()); snapshot.insert(address.clone(), self.cache.borrow().get(address).cloned());
} }
} }
@ -151,7 +151,7 @@ impl State {
/// Create a new contract at address `contract`. If there is already an account at the address /// Create a new contract at address `contract`. If there is already an account at the address
/// it will have its code reset, ready for `init_code()`. /// it will have its code reset, ready for `init_code()`.
pub fn new_contract(&mut self, contract: &Address, balance: U256) { pub fn new_contract(&mut self, contract: &Address, balance: U256) {
self.insert_cache(&contract, Some(Account::new_contract(balance, self.account_start_nonce))); self.insert_cache(contract, Some(Account::new_contract(balance, self.account_start_nonce)));
} }
/// Remove an existing account. /// Remove an existing account.
@ -162,7 +162,7 @@ impl State {
/// Determine whether an account exists. /// Determine whether an account exists.
pub fn exists(&self, a: &Address) -> bool { pub fn exists(&self, a: &Address) -> bool {
let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
self.cache.borrow().get(&a).unwrap_or(&None).is_some() || db.contains(&a) self.cache.borrow().get(a).unwrap_or(&None).is_some() || db.contains(a)
} }
/// Get the balance of account `a`. /// Get the balance of account `a`.
@ -329,7 +329,7 @@ impl State {
let have_key = self.cache.borrow().contains_key(a); let have_key = self.cache.borrow().contains_key(a);
if !have_key { if !have_key {
let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
self.insert_cache(a, db.get(&a).map(Account::from_rlp)) self.insert_cache(a, db.get(a).map(Account::from_rlp))
} }
if require_code { if require_code {
if let Some(ref mut account) = self.cache.borrow_mut().get_mut(a).unwrap().as_mut() { if let Some(ref mut account) = self.cache.borrow_mut().get_mut(a).unwrap().as_mut() {
@ -350,7 +350,7 @@ impl State {
let have_key = self.cache.borrow().contains_key(a); let have_key = self.cache.borrow().contains_key(a);
if !have_key { if !have_key {
let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
self.insert_cache(a, db.get(&a).map(Account::from_rlp)) self.insert_cache(a, db.get(a).map(Account::from_rlp))
} else { } else {
self.note_cache(a); self.note_cache(a);
} }

View File

@ -350,7 +350,7 @@ mod tests {
gas: U256::from(30_000), gas: U256::from(30_000),
gas_price: U256::from(40_000), gas_price: U256::from(40_000),
nonce: U256::one() nonce: U256::one()
}.sign(&keypair.secret()); }.sign(keypair.secret());
let tr2 = Transaction { let tr2 = Transaction {
action: Action::Create, action: Action::Create,
@ -359,7 +359,7 @@ mod tests {
gas: U256::from(30_000), gas: U256::from(30_000),
gas_price: U256::from(40_000), gas_price: U256::from(40_000),
nonce: U256::from(2) nonce: U256::from(2)
}.sign(&keypair.secret()); }.sign(keypair.secret());
let good_transactions = [ tr1.clone(), tr2.clone() ]; let good_transactions = [ tr1.clone(), tr2.clone() ];

View File

@ -522,7 +522,7 @@ impl Configuration {
self.args.flag_geth || self.args.flag_geth ||
self.args.flag_no_signer; self.args.flag_no_signer;
return !signer_disabled; !signer_disabled
} }
} }

View File

@ -184,6 +184,7 @@ pub fn default_network_config() -> ::util::NetworkConfiguration {
} }
} }
#[cfg_attr(feature = "dev", allow(too_many_arguments))]
pub fn to_client_config( pub fn to_client_config(
cache_config: &CacheConfig, cache_config: &CacheConfig,
dirs: &Directories, dirs: &Directories,
@ -356,6 +357,7 @@ mod tests {
} }
#[test] #[test]
#[cfg_attr(feature = "dev", allow(float_cmp))]
fn test_to_price() { fn test_to_price() {
assert_eq!(to_price("1").unwrap(), 1.0); assert_eq!(to_price("1").unwrap(), 1.0);
assert_eq!(to_price("2.3").unwrap(), 2.3); assert_eq!(to_price("2.3").unwrap(), 2.3);

View File

@ -160,7 +160,7 @@ impl ChainNotify for Informant {
let importing = queue_info.unverified_queue_size + queue_info.verified_queue_size > 3 let importing = queue_info.unverified_queue_size + queue_info.verified_queue_size > 3
|| self.sync.as_ref().map_or(false, |s| s.status().is_major_syncing()); || self.sync.as_ref().map_or(false, |s| s.status().is_major_syncing());
if Instant::now() > *last_import + Duration::from_secs(1) && !importing { if Instant::now() > *last_import + Duration::from_secs(1) && !importing {
if let Some(block) = imported.last().and_then(|h| self.client.block(BlockID::Hash(h.clone()))) { if let Some(block) = imported.last().and_then(|h| self.client.block(BlockID::Hash(*h))) {
let view = BlockView::new(&block); let view = BlockView::new(&block);
let header = view.header(); let header = view.header();
let tx_count = view.transactions_count(); let tx_count = view.transactions_count();

View File

@ -113,7 +113,7 @@ fn start() -> Result<String, String> {
fn main() { fn main() {
// just redirect to the sync::main() // just redirect to the sync::main()
if std::env::args().nth(1).map(|arg| arg == "sync").unwrap_or(false) { if std::env::args().nth(1).map_or(false, |arg| arg == "sync") {
sync::main(); sync::main();
return; return;
} }

View File

@ -195,7 +195,7 @@ pub trait Populatable {
/// If `d` is smaller, will leave some bytes untouched. /// If `d` is smaller, will leave some bytes untouched.
fn copy_raw(&mut self, d: &[u8]) { fn copy_raw(&mut self, d: &[u8]) {
use std::io::Write; use std::io::Write;
self.as_slice_mut().write(&d).unwrap(); self.as_slice_mut().write(d).unwrap();
} }
/// Copies the raw representation of an object `d` to `self`, overwriting as necessary. /// Copies the raw representation of an object `d` to `self`, overwriting as necessary.

View File

@ -273,7 +273,7 @@ pub mod ecdh {
let publ = try!(key::PublicKey::from_slice(context, &pdata)); let publ = try!(key::PublicKey::from_slice(context, &pdata));
// no way to create SecretKey from raw byte array. // no way to create SecretKey from raw byte array.
let sec: &key::SecretKey = unsafe { ::std::mem::transmute(secret) }; let sec: &key::SecretKey = unsafe { ::std::mem::transmute(secret) };
let shared = ecdh::SharedSecret::new_raw(context, &publ, &sec); let shared = ecdh::SharedSecret::new_raw(context, &publ, sec);
let mut s = crypto::Secret::new(); let mut s = crypto::Secret::new();
s.copy_from_slice(&shared[0..32]); s.copy_from_slice(&shared[0..32]);

View File

@ -82,7 +82,7 @@ impl fmt::Display for UtilError {
UtilError::BaseData(ref err) => f.write_fmt(format_args!("{}", err)), UtilError::BaseData(ref err) => f.write_fmt(format_args!("{}", err)),
UtilError::Network(ref err) => f.write_fmt(format_args!("{}", err)), UtilError::Network(ref err) => f.write_fmt(format_args!("{}", err)),
UtilError::Decoder(ref err) => f.write_fmt(format_args!("{}", err)), UtilError::Decoder(ref err) => f.write_fmt(format_args!("{}", err)),
UtilError::SimpleString(ref msg) => f.write_str(&msg), UtilError::SimpleString(ref msg) => f.write_str(msg),
UtilError::BadSize => f.write_str("Bad input size."), UtilError::BadSize => f.write_str("Bad input size."),
UtilError::Snappy(ref err) => f.write_fmt(format_args!("{}", err)), UtilError::Snappy(ref err) => f.write_fmt(format_args!("{}", err)),
} }

View File

@ -131,7 +131,7 @@ impl EarlyMergeDB {
// this is the first entry for this node in the journal. // this is the first entry for this node in the journal.
if backing.get(h).expect("Low-level database error. Some issue with your hard disk?").is_some() { if backing.get(h).expect("Low-level database error. Some issue with your hard disk?").is_some() {
// already in the backing DB. start counting, and remember it was already in. // already in the backing DB. start counting, and remember it was already in.
Self::set_already_in(batch, &h); Self::set_already_in(batch, h);
refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: true}); refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: true});
if trace { if trace {
trace!(target: "jdb.fine", " insert({}): New to queue, in DB: Recording and inserting into queue", h); trace!(target: "jdb.fine", " insert({}): New to queue, in DB: Recording and inserting into queue", h);

View File

@ -193,7 +193,7 @@ impl OverlayRecentDB {
#[inline] #[inline]
fn to_short_key(key: &H256) -> H256 { fn to_short_key(key: &H256) -> H256 {
let mut k = H256::new(); let mut k = H256::new();
&mut k[0..DB_PREFIX_LEN].copy_from_slice(&key[0..DB_PREFIX_LEN]); k[0..DB_PREFIX_LEN].copy_from_slice(&key[0..DB_PREFIX_LEN]);
k k
} }
} }

View File

@ -19,20 +19,16 @@
#![cfg_attr(feature="dev", plugin(clippy))] #![cfg_attr(feature="dev", plugin(clippy))]
// Clippy settings // Clippy settings
// TODO [todr] not really sure // Most of the time much more readable
#![cfg_attr(feature="dev", allow(needless_range_loop))] #![cfg_attr(feature="dev", allow(needless_range_loop))]
// Shorter than if-else // Shorter than if-else
#![cfg_attr(feature="dev", allow(match_bool))] #![cfg_attr(feature="dev", allow(match_bool))]
// We use that to be more explicit about handled cases // We use that to be more explicit about handled cases
#![cfg_attr(feature="dev", allow(match_same_arms))] #![cfg_attr(feature="dev", allow(match_same_arms))]
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. // Keeps consistency (all lines with `.clone()`).
#![cfg_attr(feature="dev", allow(clone_on_copy))] #![cfg_attr(feature="dev", allow(clone_on_copy))]
// In most cases it expresses function flow better
#![cfg_attr(feature="dev", allow(if_not_else))]
// TODO [todr] a lot of warnings to be fixed // TODO [todr] a lot of warnings to be fixed
#![cfg_attr(feature="dev", allow(needless_borrow))]
#![cfg_attr(feature="dev", allow(assign_op_pattern))] #![cfg_attr(feature="dev", allow(assign_op_pattern))]
#![cfg_attr(feature="dev", allow(unnecessary_operation))]
//! Ethcore-util library //! Ethcore-util library

View File

@ -355,7 +355,7 @@ impl EncryptedConnection {
self.encoder.encrypt(&mut RefReadBuffer::new(&header), &mut RefWriteBuffer::new(&mut packet), false).expect("Invalid length or padding"); self.encoder.encrypt(&mut RefReadBuffer::new(&header), &mut RefWriteBuffer::new(&mut packet), false).expect("Invalid length or padding");
EncryptedConnection::update_mac(&mut self.egress_mac, &mut self.mac_encoder, &packet[0..16]); EncryptedConnection::update_mac(&mut self.egress_mac, &mut self.mac_encoder, &packet[0..16]);
self.egress_mac.clone().finalize(&mut packet[16..32]); self.egress_mac.clone().finalize(&mut packet[16..32]);
self.encoder.encrypt(&mut RefReadBuffer::new(&payload), &mut RefWriteBuffer::new(&mut packet[32..(32 + len)]), padding == 0).expect("Invalid length or padding"); self.encoder.encrypt(&mut RefReadBuffer::new(payload), &mut RefWriteBuffer::new(&mut packet[32..(32 + len)]), padding == 0).expect("Invalid length or padding");
if padding != 0 { if padding != 0 {
let pad = [0u8; 16]; let pad = [0u8; 16];
self.encoder.encrypt(&mut RefReadBuffer::new(&pad[0..padding]), &mut RefWriteBuffer::new(&mut packet[(32 + len)..(32 + len + padding)]), true).expect("Invalid length or padding"); self.encoder.encrypt(&mut RefReadBuffer::new(&pad[0..padding]), &mut RefWriteBuffer::new(&mut packet[(32 + len)..(32 + len + padding)]), true).expect("Invalid length or padding");

View File

@ -167,7 +167,7 @@ impl Discovery {
} }
fn clear_ping(&mut self, id: &NodeId) { fn clear_ping(&mut self, id: &NodeId) {
let mut bucket = self.node_buckets.get_mut(Discovery::distance(&self.id, &id) as usize).unwrap(); let mut bucket = self.node_buckets.get_mut(Discovery::distance(&self.id, id) as usize).unwrap();
if let Some(node) = bucket.nodes.iter_mut().find(|n| &n.address.id == id) { if let Some(node) = bucket.nodes.iter_mut().find(|n| &n.address.id == id) {
node.timeout = None; node.timeout = None;
} }
@ -438,7 +438,7 @@ impl Discovery {
} }
let mut packets = Discovery::prepare_neighbours_packets(&nearest); let mut packets = Discovery::prepare_neighbours_packets(&nearest);
for p in packets.drain(..) { for p in packets.drain(..) {
self.send_packet(PACKET_NEIGHBOURS, &from, &p); self.send_packet(PACKET_NEIGHBOURS, from, &p);
} }
trace!(target: "discovery", "Sent {} Neighbours to {:?}", nearest.len(), &from); trace!(target: "discovery", "Sent {} Neighbours to {:?}", nearest.len(), &from);
Ok(None) Ok(None)

View File

@ -355,11 +355,11 @@ impl Host {
let keys = if let Some(ref secret) = config.use_secret { let keys = if let Some(ref secret) = config.use_secret {
KeyPair::from_secret(secret.clone()).unwrap() KeyPair::from_secret(secret.clone()).unwrap()
} else { } else {
config.config_path.clone().and_then(|ref p| load_key(&Path::new(&p))) config.config_path.clone().and_then(|ref p| load_key(Path::new(&p)))
.map_or_else(|| { .map_or_else(|| {
let key = KeyPair::create().unwrap(); let key = KeyPair::create().unwrap();
if let Some(path) = config.config_path.clone() { if let Some(path) = config.config_path.clone() {
save_key(&Path::new(&path), &key.secret()); save_key(Path::new(&path), key.secret());
} }
key key
}, },
@ -1099,7 +1099,7 @@ fn save_key(path: &Path, key: &Secret) {
return; return;
} }
}; };
if let Err(e) = restrict_permissions_owner(&path) { if let Err(e) = restrict_permissions_owner(path) {
warn!(target: "network", "Failed to modify permissions of the file (chmod: {})", e); warn!(target: "network", "Failed to modify permissions of the file (chmod: {})", e);
} }
if let Err(e) = file.write(&key.hex().into_bytes()) { if let Err(e) = file.write(&key.hex().into_bytes()) {

View File

@ -128,7 +128,7 @@ impl Session {
nonce: &H256, stats: Arc<NetworkStats>, host: &HostInfo) -> Result<Session, UtilError> nonce: &H256, stats: Arc<NetworkStats>, host: &HostInfo) -> Result<Session, UtilError>
where Message: Send + Clone { where Message: Send + Clone {
let originated = id.is_some(); let originated = id.is_some();
let mut handshake = Handshake::new(token, id, socket, &nonce, stats).expect("Can't create handshake"); let mut handshake = Handshake::new(token, id, socket, nonce, stats).expect("Can't create handshake");
try!(handshake.start(io, host, originated)); try!(handshake.start(io, host, originated));
Ok(Session { Ok(Session {
state: State::Handshake(handshake), state: State::Handshake(handshake),

View File

@ -168,7 +168,7 @@ impl OverlayDB {
pub fn revert(&mut self) { self.overlay.clear(); } pub fn revert(&mut self) { self.overlay.clear(); }
/// Get the number of references that would be committed. /// Get the number of references that would be committed.
pub fn commit_refs(&self, key: &H256) -> i32 { self.overlay.raw(&key).map_or(0, |&(_, refs)| refs) } pub fn commit_refs(&self, key: &H256) -> i32 { self.overlay.raw(key).map_or(0, |&(_, refs)| refs) }
/// Get the refs and value of the given key. /// Get the refs and value of the given key.
fn payload(&self, key: &H256) -> Option<(Bytes, u32)> { fn payload(&self, key: &H256) -> Option<(Bytes, u32)> {

View File

@ -106,7 +106,7 @@ impl StandardMap {
Alphabet::All => Self::random_bytes(self.min_key, self.journal_key, seed), Alphabet::All => Self::random_bytes(self.min_key, self.journal_key, seed),
Alphabet::Low => Self::random_word(low, self.min_key, self.journal_key, seed), Alphabet::Low => Self::random_word(low, self.min_key, self.journal_key, seed),
Alphabet::Mid => Self::random_word(mid, self.min_key, self.journal_key, seed), Alphabet::Mid => Self::random_word(mid, self.min_key, self.journal_key, seed),
Alphabet::Custom(ref a) => Self::random_word(&a, self.min_key, self.journal_key, seed), Alphabet::Custom(ref a) => Self::random_word(a, self.min_key, self.journal_key, seed),
}; };
let v = match self.value_mode { let v = match self.value_mode {
ValueMode::Mirror => k.clone(), ValueMode::Mirror => k.clone(),

View File

@ -132,7 +132,7 @@ impl<'db> TrieDB<'db> {
/// Get the data of the root node. /// Get the data of the root node.
fn root_data(&self) -> &[u8] { fn root_data(&self) -> &[u8] {
self.db.get(&self.root).expect("Trie root not found!") self.db.get(self.root).expect("Trie root not found!")
} }
/// Get the root node as a `Node`. /// Get the root node as a `Node`.
@ -184,7 +184,7 @@ impl<'db> TrieDB<'db> {
/// Return optional data for a key given as a `NibbleSlice`. Returns `None` if no data exists. /// Return optional data for a key given as a `NibbleSlice`. Returns `None` if no data exists.
fn do_lookup<'a, 'key>(&'a self, key: &NibbleSlice<'key>) -> Option<&'a [u8]> where 'a: 'key { fn do_lookup<'a, 'key>(&'a self, key: &NibbleSlice<'key>) -> Option<&'a [u8]> where 'a: 'key {
let root_rlp = self.root_data(); let root_rlp = self.root_data();
self.get_from_node(&root_rlp, key) self.get_from_node(root_rlp, key)
} }
/// Recursible function to retrieve the value given a `node` and a partial `key`. `None` if no /// Recursible function to retrieve the value given a `node` and a partial `key`. `None` if no
@ -340,7 +340,7 @@ impl<'db> Trie for TrieDB<'db> {
Box::new(TrieDB::iter(self)) Box::new(TrieDB::iter(self))
} }
fn root(&self) -> &H256 { &self.root } fn root(&self) -> &H256 { self.root }
fn contains(&self, key: &[u8]) -> bool { fn contains(&self, key: &[u8]) -> bool {
self.get(key).is_some() self.get(key).is_some()
@ -354,7 +354,7 @@ impl<'db> Trie for TrieDB<'db> {
impl<'db> fmt::Debug for TrieDB<'db> { impl<'db> fmt::Debug for TrieDB<'db> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(writeln!(f, "c={:?} [", self.hash_count)); try!(writeln!(f, "c={:?} [", self.hash_count));
let root_rlp = self.db.get(&self.root).expect("Trie root not found!"); let root_rlp = self.db.get(self.root).expect("Trie root not found!");
try!(self.fmt_all(Node::decoded(root_rlp), f, 0)); try!(self.fmt_all(Node::decoded(root_rlp), f, 0));
writeln!(f, "]") writeln!(f, "]")
} }
@ -373,7 +373,7 @@ fn iterator() {
{ {
let mut t = TrieDBMut::new(&mut memdb, &mut root); let mut t = TrieDBMut::new(&mut memdb, &mut root);
for x in &d { for x in &d {
t.insert(&x, &x); t.insert(x, x);
} }
} }
assert_eq!(d.iter().map(|i|i.to_vec()).collect::<Vec<_>>(), TrieDB::new(&memdb, &root).unwrap().iter().map(|x|x.0).collect::<Vec<_>>()); assert_eq!(d.iter().map(|i|i.to_vec()).collect::<Vec<_>>(), TrieDB::new(&memdb, &root).unwrap().iter().map(|x|x.0).collect::<Vec<_>>());

View File

@ -401,7 +401,7 @@ impl<'a> TrieDBMut<'a> {
/// Return optional data for a key given as a `NibbleSlice`. Returns `None` if no data exists. /// Return optional data for a key given as a `NibbleSlice`. Returns `None` if no data exists.
fn do_db_lookup<'x, 'key>(&'x self, hash: &H256, key: NibbleSlice<'key>) -> Option<&'x [u8]> where 'x: 'key { fn do_db_lookup<'x, 'key>(&'x self, hash: &H256, key: NibbleSlice<'key>) -> Option<&'x [u8]> where 'x: 'key {
self.db.get(hash).and_then(|node_rlp| self.get_from_db_node(&node_rlp, key)) self.db.get(hash).and_then(|node_rlp| self.get_from_db_node(node_rlp, key))
} }
/// Recursible function to retrieve the value given a `node` and a partial `key`. `None` if no /// Recursible function to retrieve the value given a `node` and a partial `key`. `None` if no
@ -868,7 +868,7 @@ impl<'a> TrieDBMut<'a> {
impl<'a> TrieMut for TrieDBMut<'a> { impl<'a> TrieMut for TrieDBMut<'a> {
fn root(&mut self) -> &H256 { fn root(&mut self) -> &H256 {
self.commit(); self.commit();
&self.root self.root
} }
fn is_empty(&self) -> bool { fn is_empty(&self) -> bool {
@ -938,7 +938,7 @@ mod tests {
for i in 0..v.len() { for i in 0..v.len() {
let key: &[u8]= &v[i].0; let key: &[u8]= &v[i].0;
let val: &[u8] = &v[i].1; let val: &[u8] = &v[i].1;
t.insert(&key, &val); t.insert(key, val);
} }
t t
} }
@ -946,7 +946,7 @@ mod tests {
fn unpopulate_trie<'db>(t: &mut TrieDBMut<'db>, v: &[(Vec<u8>, Vec<u8>)]) { fn unpopulate_trie<'db>(t: &mut TrieDBMut<'db>, v: &[(Vec<u8>, Vec<u8>)]) {
for i in v { for i in v {
let key: &[u8]= &i.0; let key: &[u8]= &i.0;
t.remove(&key); t.remove(key);
} }
} }

View File

@ -213,7 +213,7 @@ fn hash256rlp(input: &[(Vec<u8>, Vec<u8>)], pre_len: usize, stream: &mut RlpStre
.skip(1) .skip(1)
// get minimum number of shared nibbles between first and each successive // get minimum number of shared nibbles between first and each successive
.fold(key.len(), | acc, &(ref k, _) | { .fold(key.len(), | acc, &(ref k, _) | {
cmp::min(key.shared_prefix_len(&k), acc) cmp::min(key.shared_prefix_len(k), acc)
}); });
// if shared prefix is higher than current prefix append its // if shared prefix is higher than current prefix append its