Fixing some clippy warnings

This commit is contained in:
Tomasz Drwięga 2016-11-28 13:20:49 +01:00
parent e1e7886918
commit 2b3d100de2
33 changed files with 72 additions and 81 deletions

View File

@ -869,8 +869,8 @@ impl BlockChainClient for Client {
} }
fn keep_alive(&self) { fn keep_alive(&self) {
let should_wake = match &*self.mode.lock() { let should_wake = match *self.mode.lock() {
&Mode::Dark(..) | &Mode::Passive(..) => true, Mode::Dark(..) | Mode::Passive(..) => true,
_ => false, _ => false,
}; };
if should_wake { if should_wake {

View File

@ -124,7 +124,7 @@ impl AuthorityRound {
} }
fn step_proposer(&self, step: usize) -> &Address { fn step_proposer(&self, step: usize) -> &Address {
let ref p = self.our_params; let p = &self.our_params;
p.authorities.get(step % p.authority_n).expect("There are authority_n authorities; taking number modulo authority_n gives number in authority_n range; qed") p.authorities.get(step % p.authority_n).expect("There are authority_n authorities; taking number modulo authority_n gives number in authority_n range; qed")
} }
@ -211,7 +211,7 @@ impl Engine for AuthorityRound {
fn on_close_block(&self, _block: &mut ExecutedBlock) {} fn on_close_block(&self, _block: &mut ExecutedBlock) {}
fn is_sealer(&self, author: &Address) -> Option<bool> { fn is_sealer(&self, author: &Address) -> Option<bool> {
let ref p = self.our_params; let p = &self.our_params;
Some(p.authorities.contains(author)) Some(p.authorities.contains(author))
} }

View File

@ -197,19 +197,17 @@ impl<Gas: CostType> Gasometer<Gas> {
let address = u256_to_address(stack.peek(1)); let address = u256_to_address(stack.peek(1));
let is_value_transfer = !stack.peek(2).is_zero(); let is_value_transfer = !stack.peek(2).is_zero();
if instruction == instructions::CALL { if instruction == instructions::CALL && (
if ( (!schedule.no_empty && !ext.exists(&address))
!schedule.no_empty && !ext.exists(&address) ||
) || ( (schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address))
schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address)
) { ) {
gas = overflowing!(gas.overflow_add(schedule.call_new_account_gas.into())); gas = overflowing!(gas.overflow_add(schedule.call_new_account_gas.into()));
} }
};
if is_value_transfer { if is_value_transfer {
gas = overflowing!(gas.overflow_add(schedule.call_value_transfer_gas.into())); gas = overflowing!(gas.overflow_add(schedule.call_value_transfer_gas.into()));
}; }
let requested = *stack.peek(0); let requested = *stack.peek(0);
@ -347,7 +345,7 @@ fn test_mem_gas_cost() {
let result = gasometer.mem_gas_cost(&schedule, current_mem_size, &mem_size); let result = gasometer.mem_gas_cost(&schedule, current_mem_size, &mem_size);
// then // then
if let Ok(_) = result { if result.is_ok() {
assert!(false, "Should fail with OutOfGas"); assert!(false, "Should fail with OutOfGas");
} }
} }

View File

@ -103,7 +103,7 @@ impl Ext for FakeExt {
} }
fn balance(&self, address: &Address) -> U256 { fn balance(&self, address: &Address) -> U256 {
*self.balances.get(address).unwrap() self.balances[address]
} }
fn blockhash(&self, number: &U256) -> H256 { fn blockhash(&self, number: &U256) -> H256 {

View File

@ -516,7 +516,6 @@ impl<'a> Executive<'a> {
mod tests { mod tests {
use ethkey::{Generator, Random}; use ethkey::{Generator, Random};
use super::*; use super::*;
use util::*;
use action_params::{ActionParams, ActionValue}; use action_params::{ActionParams, ActionValue};
use env_info::EnvInfo; use env_info::EnvInfo;
use evm::{Factory, VMType}; use evm::{Factory, VMType};

View File

@ -151,7 +151,7 @@ impl GasPriceCalibrator {
if Instant::now() >= self.next_calibration { if Instant::now() >= self.next_calibration {
let usd_per_tx = self.options.usd_per_tx; let usd_per_tx = self.options.usd_per_tx;
trace!(target: "miner", "Getting price info"); trace!(target: "miner", "Getting price info");
if let Ok(_) = PriceInfo::get(move |price: PriceInfo| { let price_info = PriceInfo::get(move |price: PriceInfo| {
trace!(target: "miner", "Price info arrived: {:?}", price); trace!(target: "miner", "Price info arrived: {:?}", price);
let usd_per_eth = price.ethusd; let usd_per_eth = price.ethusd;
let wei_per_usd: f32 = 1.0e18 / usd_per_eth; let wei_per_usd: f32 = 1.0e18 / usd_per_eth;
@ -159,7 +159,9 @@ impl GasPriceCalibrator {
let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx; let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx;
info!(target: "miner", "Updated conversion rate to Ξ1 = {} ({} wei/gas)", Colour::White.bold().paint(format!("US${}", usd_per_eth)), Colour::Yellow.bold().paint(format!("{}", wei_per_gas))); info!(target: "miner", "Updated conversion rate to Ξ1 = {} ({} wei/gas)", Colour::White.bold().paint(format!("US${}", usd_per_eth)), Colour::Yellow.bold().paint(format!("{}", wei_per_gas)));
set_price(U256::from(wei_per_gas as u64)); set_price(U256::from(wei_per_gas as u64));
}) { });
if price_info.is_ok() {
self.next_calibration = Instant::now() + self.options.recalibration_period; self.next_calibration = Instant::now() + self.options.recalibration_period;
} else { } else {
warn!(target: "miner", "Unable to update Ether price."); warn!(target: "miner", "Unable to update Ether price.");
@ -1142,12 +1144,10 @@ mod tests {
use std::time::Duration; use std::time::Duration;
use super::super::{MinerService, PrioritizationStrategy}; use super::super::{MinerService, PrioritizationStrategy};
use super::*; use super::*;
use util::*;
use ethkey::{Generator, Random}; use ethkey::{Generator, Random};
use client::{BlockChainClient, TestBlockChainClient, EachBlockWith, TransactionImportResult}; use client::{BlockChainClient, TestBlockChainClient, EachBlockWith, TransactionImportResult};
use header::BlockNumber; use header::BlockNumber;
use types::transaction::{Transaction, SignedTransaction, Action}; use types::transaction::{Transaction, SignedTransaction, Action};
use block::*;
use spec::Spec; use spec::Spec;
use tests::helpers::{generate_dummy_client}; use tests::helpers::{generate_dummy_client};

View File

@ -990,7 +990,7 @@ impl TransactionQueue {
let mut update_last_nonce_to = None; let mut update_last_nonce_to = None;
{ {
let by_nonce = self.future.by_address.row_mut(&address); let by_nonce = self.future.by_address.row_mut(&address);
if let None = by_nonce { if by_nonce.is_none() {
return; return;
} }
let mut by_nonce = by_nonce.expect("None is tested in early-exit condition above; qed"); let mut by_nonce = by_nonce.expect("None is tested in early-exit condition above; qed");
@ -1212,7 +1212,6 @@ mod test {
use util::table::*; use util::table::*;
use util::*; use util::*;
use ethkey::{Random, Generator}; use ethkey::{Random, Generator};
use transaction::*;
use error::{Error, TransactionError}; use error::{Error, TransactionError};
use super::*; use super::*;
use super::{TransactionSet, TransactionOrder, VerifiedTransaction}; use super::{TransactionSet, TransactionOrder, VerifiedTransaction};

View File

@ -552,11 +552,11 @@ const POW_VERIFY_RATE: f32 = 0.02;
pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &Engine, chain: &BlockChain, body: Option<&[u8]>, always: bool) -> Result<(), ::error::Error> { pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &Engine, chain: &BlockChain, body: Option<&[u8]>, always: bool) -> Result<(), ::error::Error> {
if always || rng.gen::<f32>() <= POW_VERIFY_RATE { if always || rng.gen::<f32>() <= POW_VERIFY_RATE {
match chain.block_header(header.parent_hash()) { match chain.block_header(header.parent_hash()) {
Some(parent) => engine.verify_block_family(&header, &parent, body), Some(parent) => engine.verify_block_family(header, &parent, body),
None => engine.verify_block_seal(&header), None => engine.verify_block_seal(header),
} }
} else { } else {
engine.verify_block_basic(&header, body) engine.verify_block_basic(header, body)
} }
} }

View File

@ -449,7 +449,6 @@ mod tests {
use util::*; use util::*;
use super::*; use super::*;
use account_db::*; use account_db::*;
use rlp::*;
#[test] #[test]
fn account_compress() { fn account_compress() {

View File

@ -445,6 +445,7 @@ impl State {
} }
/// Add `incr` to the balance of account `a`. /// Add `incr` to the balance of account `a`.
#[cfg_attr(feature="dev", allow(single_match))]
pub fn add_balance(&mut self, a: &Address, incr: &U256, cleanup_mode: CleanupMode) { pub fn add_balance(&mut self, a: &Address, incr: &U256, cleanup_mode: CleanupMode) {
trace!(target: "state", "add_balance({}, {}): {}", a, incr, self.balance(a)); trace!(target: "state", "add_balance({}, {}): {}", a, incr, self.balance(a));
let is_value_transfer = !incr.is_zero(); let is_value_transfer = !incr.is_zero();

View File

@ -57,6 +57,7 @@ impl Substate {
} }
/// Get the cleanup mode object from this. /// Get the cleanup mode object from this.
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
pub fn to_cleanup_mode(&mut self, schedule: &Schedule) -> CleanupMode { pub fn to_cleanup_mode(&mut self, schedule: &Schedule) -> CleanupMode {
match (schedule.no_empty, schedule.kill_empty) { match (schedule.no_empty, schedule.kill_empty) {
(false, _) => CleanupMode::ForceCreate, (false, _) => CleanupMode::ForceCreate,

View File

@ -397,6 +397,7 @@ impl StateDB {
} }
/// Get cached code based on hash. /// Get cached code based on hash.
#[cfg_attr(feature="dev", allow(map_clone))]
pub fn get_cached_code(&self, hash: &H256) -> Option<Arc<Vec<u8>>> { pub fn get_cached_code(&self, hash: &H256) -> Option<Arc<Vec<u8>>> {
let mut cache = self.code_cache.lock(); let mut cache = self.code_cache.lock();

View File

@ -62,7 +62,7 @@ fn should_return_registrar() {
&db_config &db_config
).unwrap(); ).unwrap();
let params = client.additional_params(); let params = client.additional_params();
let address = params.get("registrar").unwrap(); let address = &params["registrar"];
assert_eq!(address.len(), 40); assert_eq!(address.len(), 40);
assert!(U256::from_str(address).is_ok()); assert!(U256::from_str(address).is_ok());
@ -93,7 +93,7 @@ fn imports_good_block() {
&db_config &db_config
).unwrap(); ).unwrap();
let good_block = get_good_dummy_block(); let good_block = get_good_dummy_block();
if let Err(_) = client.import_block(good_block) { if client.import_block(good_block).is_err() {
panic!("error importing block being good by definition"); panic!("error importing block being good by definition");
} }
client.flush_queue(); client.flush_queue();
@ -203,18 +203,18 @@ fn can_collect_garbage() {
#[test] #[test]
fn can_generate_gas_price_median() { fn can_generate_gas_price_median() {
let client_result = generate_dummy_client_with_data(3, 1, &vec_into![1, 2, 3]); let client_result = generate_dummy_client_with_data(3, 1, slice_into![1, 2, 3]);
let client = client_result.reference(); let client = client_result.reference();
assert_eq!(Some(U256::from(2)), client.gas_price_median(3)); assert_eq!(Some(U256::from(2)), client.gas_price_median(3));
let client_result = generate_dummy_client_with_data(4, 1, &vec_into![1, 4, 3, 2]); let client_result = generate_dummy_client_with_data(4, 1, slice_into![1, 4, 3, 2]);
let client = client_result.reference(); let client = client_result.reference();
assert_eq!(Some(U256::from(3)), client.gas_price_median(4)); assert_eq!(Some(U256::from(3)), client.gas_price_median(4));
} }
#[test] #[test]
fn can_generate_gas_price_histogram() { fn can_generate_gas_price_histogram() {
let client_result = generate_dummy_client_with_data(20, 1, &vec_into![6354,8593,6065,4842,7845,7002,689,4958,4250,6098,5804,4320,643,8895,2296,8589,7145,2000,2512,1408]); let client_result = generate_dummy_client_with_data(20, 1, slice_into![6354,8593,6065,4842,7845,7002,689,4958,4250,6098,5804,4320,643,8895,2296,8589,7145,2000,2512,1408]);
let client = client_result.reference(); let client = client_result.reference();
let hist = client.gas_price_histogram(20, 5).unwrap(); let hist = client.gas_price_histogram(20, 5).unwrap();
@ -224,7 +224,7 @@ fn can_generate_gas_price_histogram() {
#[test] #[test]
fn empty_gas_price_histogram() { fn empty_gas_price_histogram() {
let client_result = generate_dummy_client_with_data(20, 0, &vec_into![]); let client_result = generate_dummy_client_with_data(20, 0, slice_into![]);
let client = client_result.reference(); let client = client_result.reference();
assert!(client.gas_price_histogram(20, 5).is_none()); assert!(client.gas_price_histogram(20, 5).is_none());

View File

@ -262,7 +262,7 @@ pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> GuardedTempResult<Arc<
).unwrap(); ).unwrap();
for block in &blocks { for block in &blocks {
if let Err(_) = client.import_block(block.clone()) { if client.import_block(block.clone()).is_err() {
panic!("panic importing block which is well-formed"); panic!("panic importing block which is well-formed");
} }
} }

View File

@ -19,16 +19,16 @@
pub use std::time::Duration; pub use std::time::Duration;
use client::Mode as ClientMode; use client::Mode as ClientMode;
/// IPC-capable shadow-type for client::config::Mode /// IPC-capable shadow-type for `client::config::Mode`
#[derive(Clone, Binary, Debug)] #[derive(Clone, Binary, Debug)]
pub enum Mode { pub enum Mode {
/// Same as ClientMode::Off. /// Same as `ClientMode::Off`.
Off, Off,
/// Same as ClientMode::Dark; values in seconds. /// Same as `ClientMode::Dark`; values in seconds.
Dark(u64), Dark(u64),
/// Same as ClientMode::Passive; values in seconds. /// Same as `ClientMode::Passive`; values in seconds.
Passive(u64, u64), Passive(u64, u64),
/// Same as ClientMode::Active. /// Same as `ClientMode::Active`.
Active, Active,
} }

View File

@ -73,7 +73,7 @@ pub struct Transaction {
impl Transaction { impl Transaction {
/// Append object with a without signature into RLP stream /// Append object with a without signature into RLP stream
pub fn rlp_append_unsigned_transaction(&self, s: &mut RlpStream, network_id: Option<u8>) { pub fn rlp_append_unsigned_transaction(&self, s: &mut RlpStream, network_id: Option<u8>) {
s.begin_list(if let None = network_id { 6 } else { 9 }); s.begin_list(if network_id.is_none() { 6 } else { 9 });
s.append(&self.nonce); s.append(&self.nonce);
s.append(&self.gas_price); s.append(&self.gas_price);
s.append(&self.gas); s.append(&self.gas);

View File

@ -156,7 +156,7 @@ pub fn execute(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<(), String> {
// get the mode // get the mode
let mode = try!(mode_switch_to_bool(cmd.mode, &user_defaults)); let mode = try!(mode_switch_to_bool(cmd.mode, &user_defaults));
trace!(target: "mode", "mode is {:?}", mode); trace!(target: "mode", "mode is {:?}", mode);
let network_enabled = match &mode { &Mode::Dark(_) | &Mode::Off => false, _ => true, }; let network_enabled = match mode { Mode::Dark(_) | Mode::Off => false, _ => true, };
// prepare client and snapshot paths. // prepare client and snapshot paths.
let client_path = db_dirs.client_path(algorithm); let client_path = db_dirs.client_path(algorithm);

View File

@ -125,7 +125,7 @@ mod tests {
use serde_json; use serde_json;
use std::str::FromStr; use std::str::FromStr;
use util::hash::*; use util::hash::*;
use super::*; use super::{VariadicValue, Topic, Filter};
use v1::types::BlockNumber; use v1::types::BlockNumber;
use ethcore::filter::Filter as EthFilter; use ethcore::filter::Filter as EthFilter;
use ethcore::client::BlockID; use ethcore::client::BlockID;

View File

@ -80,6 +80,7 @@ pub struct AuthCodes<T: TimeProvider = DefaultTimeProvider> {
impl AuthCodes<DefaultTimeProvider> { impl AuthCodes<DefaultTimeProvider> {
/// Reads `AuthCodes` from file and creates new instance using `DefaultTimeProvider`. /// Reads `AuthCodes` from file and creates new instance using `DefaultTimeProvider`.
#[cfg_attr(feature="dev", allow(single_char_pattern))]
pub fn from_file(file: &Path) -> io::Result<AuthCodes> { pub fn from_file(file: &Path) -> io::Result<AuthCodes> {
let content = { let content = {
if let Ok(mut file) = fs::File::open(file) { if let Ok(mut file) = fs::File::open(file) {
@ -128,7 +129,7 @@ impl<T: TimeProvider> AuthCodes<T> {
let mut file = try!(fs::File::create(file)); let mut file = try!(fs::File::create(file));
let content = self.codes.iter().map(|code| { let content = self.codes.iter().map(|code| {
let mut data = vec![code.code.clone(), encode_time(code.created_at.clone())]; let mut data = vec![code.code.clone(), encode_time(code.created_at.clone())];
if let Some(used_at) = code.last_used_at.clone() { if let Some(used_at) = code.last_used_at {
data.push(encode_time(used_at)); data.push(encode_time(used_at));
} }
data.join(SEPARATOR) data.join(SEPARATOR)

View File

@ -99,7 +99,7 @@ fn auth_is_valid(codes_path: &Path, protocols: ws::Result<Vec<&str>>) -> bool {
let res = codes.is_valid(&auth, time); let res = codes.is_valid(&auth, time);
// make sure to save back authcodes - it might have been modified // make sure to save back authcodes - it might have been modified
if let Err(_) = codes.to_file(codes_path) { if codes.to_file(codes_path).is_err() {
warn!(target: "signer", "Couldn't save authorization codes to file."); warn!(target: "signer", "Couldn't save authorization codes to file.");
} }
res res

View File

@ -624,7 +624,7 @@ impl ChainSync {
Ok(()) Ok(())
} }
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))] #[cfg_attr(feature="dev", allow(cyclomatic_complexity, needless_borrow))]
/// Called by peer once it has new block headers during sync /// Called by peer once it has new block headers during sync
fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
let confirmed = match self.peers.get_mut(&peer_id) { let confirmed = match self.peers.get_mut(&peer_id) {
@ -1173,7 +1173,7 @@ impl ChainSync {
} }
}, },
SyncState::SnapshotData => { SyncState::SnapshotData => {
if let RestorationStatus::Ongoing { state_chunks: _, block_chunks: _, state_chunks_done, block_chunks_done, } = io.snapshot_service().status() { if let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done, .. } = io.snapshot_service().status() {
if self.snapshot.done_chunks() - (state_chunks_done + block_chunks_done) as usize > MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD { if self.snapshot.done_chunks() - (state_chunks_done + block_chunks_done) as usize > MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD {
trace!(target: "sync", "Snapshot queue full, pausing sync"); trace!(target: "sync", "Snapshot queue full, pausing sync");
self.state = SyncState::SnapshotWaiting; self.state = SyncState::SnapshotWaiting;
@ -1744,7 +1744,7 @@ impl ChainSync {
self.restart(io); self.restart(io);
self.continue_sync(io); self.continue_sync(io);
}, },
RestorationStatus::Ongoing { state_chunks: _, block_chunks: _, state_chunks_done, block_chunks_done, } => { RestorationStatus::Ongoing { state_chunks_done, block_chunks_done, .. } => {
if !self.snapshot.is_complete() && self.snapshot.done_chunks() - (state_chunks_done + block_chunks_done) as usize <= MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD { if !self.snapshot.is_complete() && self.snapshot.done_chunks() - (state_chunks_done + block_chunks_done) as usize <= MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD {
trace!(target:"sync", "Resuming snapshot sync"); trace!(target:"sync", "Resuming snapshot sync");
self.state = SyncState::SnapshotData; self.state = SyncState::SnapshotData;
@ -2002,8 +2002,6 @@ mod tests {
use tests::snapshot::TestSnapshotService; use tests::snapshot::TestSnapshotService;
use super::*; use super::*;
use ::SyncConfig; use ::SyncConfig;
use util::*;
use rlp::*;
use super::{PeerInfo, PeerAsking}; use super::{PeerInfo, PeerAsking};
use ethcore::views::BlockView; use ethcore::views::BlockView;
use ethcore::header::*; use ethcore::header::*;

View File

@ -156,19 +156,19 @@ impl TestNet {
} }
pub fn peer(&self, i: usize) -> &TestPeer { pub fn peer(&self, i: usize) -> &TestPeer {
self.peers.get(i).unwrap() &self.peers[i]
} }
pub fn peer_mut(&mut self, i: usize) -> &mut TestPeer { pub fn peer_mut(&mut self, i: usize) -> &mut TestPeer {
self.peers.get_mut(i).unwrap() &mut self.peers[i]
} }
pub fn start(&mut self) { pub fn start(&mut self) {
for peer in 0..self.peers.len() { for peer in 0..self.peers.len() {
for client in 0..self.peers.len() { for client in 0..self.peers.len() {
if peer != client { if peer != client {
let mut p = self.peers.get_mut(peer).unwrap(); let mut p = &mut self.peers[peer];
p.sync.write().update_targets(&mut p.chain); p.sync.write().update_targets(&p.chain);
p.sync.write().on_peer_connected(&mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(client as PeerId)), client as PeerId); p.sync.write().on_peer_connected(&mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(client as PeerId)), client as PeerId);
} }
} }
@ -179,7 +179,7 @@ impl TestNet {
for peer in 0..self.peers.len() { for peer in 0..self.peers.len() {
if let Some(packet) = self.peers[peer].queue.pop_front() { if let Some(packet) = self.peers[peer].queue.pop_front() {
let disconnecting = { let disconnecting = {
let mut p = self.peers.get_mut(packet.recipient).unwrap(); let mut p = &mut self.peers[packet.recipient];
trace!("--- {} -> {} ---", peer, packet.recipient); trace!("--- {} -> {} ---", peer, packet.recipient);
let to_disconnect = { let to_disconnect = {
let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId)); let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId));
@ -195,7 +195,7 @@ impl TestNet {
}; };
for d in &disconnecting { for d in &disconnecting {
// notify other peers that this peer is disconnecting // notify other peers that this peer is disconnecting
let mut p = self.peers.get_mut(*d).unwrap(); let mut p = &mut self.peers[*d];
let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId)); let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId));
p.sync.write().on_peer_aborting(&mut io, peer as PeerId); p.sync.write().on_peer_aborting(&mut io, peer as PeerId);
} }

View File

@ -507,7 +507,6 @@ mod tests {
use std::io::{Read, Write, Error, Cursor, ErrorKind}; use std::io::{Read, Write, Error, Cursor, ErrorKind};
use mio::{Ready}; use mio::{Ready};
use std::collections::VecDeque; use std::collections::VecDeque;
use util::bytes::*;
use devtools::*; use devtools::*;
use io::*; use io::*;

View File

@ -555,10 +555,6 @@ impl Discovery {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use util::hash::*;
use util::sha3::*;
use std::net::*;
use node_table::*;
use std::str::FromStr; use std::str::FromStr;
use rustc_serialize::hex::FromHex; use rustc_serialize::hex::FromHex;
use ethkey::{Random, Generator}; use ethkey::{Random, Generator};

View File

@ -333,7 +333,6 @@ mod test {
use std::sync::Arc; use std::sync::Arc;
use rustc_serialize::hex::FromHex; use rustc_serialize::hex::FromHex;
use super::*; use super::*;
use util::hash::*;
use io::*; use io::*;
use mio::tcp::TcpStream; use mio::tcp::TcpStream;
use stats::NetworkStats; use stats::NetworkStats;

View File

@ -358,8 +358,6 @@ pub fn is_valid_node_url(url: &str) -> bool {
mod tests { mod tests {
use super::*; use super::*;
use std::str::FromStr; use std::str::FromStr;
use std::net::*;
use util::hash::*;
use devtools::*; use devtools::*;
use AllowIP; use AllowIP;

View File

@ -33,6 +33,13 @@ macro_rules! vec_into {
} }
} }
#[macro_export]
macro_rules! slice_into {
( $( $x:expr ),* ) => {
&[ $( $x.into() ),* ]
}
}
#[macro_export] #[macro_export]
macro_rules! hash_map { macro_rules! hash_map {
() => { HashMap::new() }; () => { HashMap::new() };

View File

@ -236,7 +236,6 @@ mod tests {
use common::*; use common::*;
use super::*; use super::*;
use hashdb::*;
use journaldb::traits::JournalDB; use journaldb::traits::JournalDB;
use kvdb::Database; use kvdb::Database;

View File

@ -556,7 +556,6 @@ mod tests {
use common::*; use common::*;
use super::*; use super::*;
use super::super::traits::JournalDB; use super::super::traits::JournalDB;
use hashdb::*;
use log::init_log; use log::init_log;
use kvdb::{Database, DatabaseConfig}; use kvdb::{Database, DatabaseConfig};

View File

@ -422,7 +422,6 @@ mod tests {
use common::*; use common::*;
use super::*; use super::*;
use hashdb::*;
use log::init_log; use log::init_log;
use journaldb::JournalDB; use journaldb::JournalDB;
use kvdb::Database; use kvdb::Database;

View File

@ -217,7 +217,6 @@ mod tests {
use common::*; use common::*;
use super::*; use super::*;
use super::super::traits::JournalDB; use super::super::traits::JournalDB;
use hashdb::*;
#[test] #[test]
fn long_history() { fn long_history() {

View File

@ -628,7 +628,6 @@ impl Drop for Database {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use hash::*;
use super::*; use super::*;
use devtools::*; use devtools::*;
use std::str::FromStr; use std::str::FromStr;

View File

@ -67,7 +67,7 @@ mod tests {
#[test] #[test]
fn check_histogram() { fn check_histogram() {
let hist = Histogram::new(&vec_into![643,689,1408,2000,2296,2512,4250,4320,4842,4958,5804,6065,6098,6354,7002,7145,7845,8589,8593,8895], 5).unwrap(); let hist = Histogram::new(slice_into![643,689,1408,2000,2296,2512,4250,4320,4842,4958,5804,6065,6098,6354,7002,7145,7845,8589,8593,8895], 5).unwrap();
let correct_bounds: Vec<U256> = vec_into![643, 2294, 3945, 5596, 7247, 8898]; let correct_bounds: Vec<U256> = vec_into![643, 2294, 3945, 5596, 7247, 8898];
assert_eq!(Histogram { bucket_bounds: correct_bounds, counts: vec![4,2,4,6,4] }, hist); assert_eq!(Histogram { bucket_bounds: correct_bounds, counts: vec![4,2,4,6,4] }, hist);
} }
@ -75,7 +75,7 @@ mod tests {
#[test] #[test]
fn smaller_data_range_than_bucket_range() { fn smaller_data_range_than_bucket_range() {
assert_eq!( assert_eq!(
Histogram::new(&vec_into![1, 2, 2], 3), Histogram::new(slice_into![1, 2, 2], 3),
Some(Histogram { bucket_bounds: vec_into![1, 2, 3, 4], counts: vec![1, 2, 0] }) Some(Histogram { bucket_bounds: vec_into![1, 2, 3, 4], counts: vec![1, 2, 0] })
); );
} }
@ -83,7 +83,7 @@ mod tests {
#[test] #[test]
fn data_range_is_not_multiple_of_bucket_range() { fn data_range_is_not_multiple_of_bucket_range() {
assert_eq!( assert_eq!(
Histogram::new(&vec_into![1, 2, 5], 2), Histogram::new(slice_into![1, 2, 5], 2),
Some(Histogram { bucket_bounds: vec_into![1, 4, 7], counts: vec![2, 1] }) Some(Histogram { bucket_bounds: vec_into![1, 4, 7], counts: vec![2, 1] })
); );
} }
@ -91,13 +91,13 @@ mod tests {
#[test] #[test]
fn data_range_is_multiple_of_bucket_range() { fn data_range_is_multiple_of_bucket_range() {
assert_eq!( assert_eq!(
Histogram::new(&vec_into![1, 2, 6], 2), Histogram::new(slice_into![1, 2, 6], 2),
Some(Histogram { bucket_bounds: vec_into![1, 4, 7], counts: vec![2, 1] }) Some(Histogram { bucket_bounds: vec_into![1, 4, 7], counts: vec![2, 1] })
); );
} }
#[test] #[test]
fn none_when_too_few_data() { fn none_when_too_few_data() {
assert!(Histogram::new(&vec_into![], 1).is_none()); assert!(Histogram::new(slice_into![], 1).is_none());
} }
} }