Merge pull request #688 from ethcore/updating_clippy

Updating clippy
This commit is contained in:
Marek Kotewicz 2016-03-13 10:07:44 +01:00
commit 1f8e0f86ac
17 changed files with 103 additions and 51 deletions

12
Cargo.lock generated
View File

@ -2,7 +2,7 @@
name = "parity"
version = "0.9.99"
dependencies = [
"clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
"ctrlc 1.1.1 (git+https://github.com/tomusdrw/rust-ctrlc.git)",
"daemonize 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"docopt 0.6.78 (registry+https://github.com/rust-lang/crates.io-index)",
@ -94,7 +94,7 @@ dependencies = [
[[package]]
name = "clippy"
version = "0.0.49"
version = "0.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"regex-syntax 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
@ -207,7 +207,7 @@ dependencies = [
name = "ethcore"
version = "0.9.99"
dependencies = [
"clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 0.9.99",
@ -233,7 +233,7 @@ dependencies = [
name = "ethcore-rpc"
version = "0.9.99"
dependencies = [
"clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 0.9.99",
"ethcore 0.9.99",
"ethcore-util 0.9.99",
@ -256,7 +256,7 @@ dependencies = [
"arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"bigint 0.1.0",
"chrono 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -288,7 +288,7 @@ dependencies = [
name = "ethsync"
version = "0.9.99"
dependencies = [
"clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore 0.9.99",
"ethcore-util 0.9.99",

View File

@ -19,7 +19,7 @@ ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" }
fdlimit = { path = "util/fdlimit" }
daemonize = "0.2"
number_prefix = "0.2"
clippy = { version = "0.0.49", optional = true }
clippy = { version = "0.0.50", optional = true }
ethcore = { path = "ethcore" }
ethcore-util = { path = "util" }
ethsync = { path = "sync" }

View File

@ -17,7 +17,7 @@ ethcore-util = { path = "../util" }
evmjit = { path = "../evmjit", optional = true }
ethash = { path = "../ethash" }
num_cpus = "0.2"
clippy = { version = "0.0.49", optional = true }
clippy = { version = "0.0.50", optional = true }
crossbeam = "0.1.5"
lazy_static = "0.1"
ethcore-devtools = { path = "../devtools" }

View File

@ -523,7 +523,7 @@ mod tests {
let engine = spec.to_engine().unwrap();
let mut config = BlockQueueConfig::default();
config.max_mem_use = super::MIN_MEM_LIMIT; // empty queue uses about 15000
let mut queue = BlockQueue::new(config, Arc::new(engine), IoChannel::disconnected());
let queue = BlockQueue::new(config, Arc::new(engine), IoChannel::disconnected());
assert!(!queue.queue_info().is_full());
let mut blocks = get_good_dummy_block_seq(50);
for b in blocks.drain(..) {

View File

@ -28,9 +28,15 @@ pub struct MemoryCache {
blooms: HashMap<BloomIndex, H2048>,
}
impl Default for MemoryCache {
fn default() -> Self {
MemoryCache::new()
}
}
impl MemoryCache {
/// Default constructor for MemoryCache
pub fn new() -> MemoryCache {
pub fn new() -> Self {
MemoryCache { blooms: HashMap::new() }
}

View File

@ -57,9 +57,15 @@ pub enum EachBlockWith {
UncleAndTransaction
}
impl Default for TestBlockChainClient {
fn default() -> Self {
TestBlockChainClient::new()
}
}
impl TestBlockChainClient {
/// Creates new test client.
pub fn new() -> TestBlockChainClient {
pub fn new() -> Self {
let mut client = TestBlockChainClient {
blocks: RwLock::new(HashMap::new()),

View File

@ -301,8 +301,14 @@ mod tests {
env_info: EnvInfo
}
impl Default for TestSetup {
fn default() -> Self {
TestSetup::new()
}
}
impl TestSetup {
fn new() -> TestSetup {
fn new() -> Self {
TestSetup {
state: get_temp_state(),
engine: get_test_spec().to_engine().unwrap(),

View File

@ -20,6 +20,7 @@ use error::Error;
use header::Header;
use super::Verifier;
#[allow(dead_code)]
pub struct NoopVerifier;
impl Verifier for NoopVerifier {

View File

@ -255,8 +255,14 @@ mod tests {
numbers: HashMap<BlockNumber, H256>,
}
impl Default for TestBlockChain {
fn default() -> Self {
TestBlockChain::new()
}
}
impl TestBlockChain {
pub fn new() -> TestBlockChain {
pub fn new() -> Self {
TestBlockChain {
blocks: HashMap::new(),
numbers: HashMap::new(),

View File

@ -314,7 +314,7 @@ impl Configuration {
fn init_nodes(&self, spec: &Spec) -> Vec<String> {
let mut r = if self.args.flag_no_bootstrap { Vec::new() } else { spec.nodes().clone() };
if let Some(ref x) = self.args.flag_bootnodes {
r.extend(x.split(",").map(|s| Self::normalize_enode(s).unwrap_or_else(|| die!("{}: Invalid node address format given for a boot node.", s))));
r.extend(x.split(',').map(|s| Self::normalize_enode(s).unwrap_or_else(|| die!("{}: Invalid node address format given for a boot node.", s))));
}
r
}
@ -327,7 +327,7 @@ impl Configuration {
let host = IpAddr::from_str(host).unwrap_or_else(|_| die!("Invalid host given with `--nat extip:{}`", host));
Some(SocketAddr::new(host, self.args.flag_port))
} else {
listen_address.clone()
listen_address
};
(listen_address, public_address)
}
@ -388,12 +388,13 @@ impl Configuration {
}
if self.args.cmd_list {
println!("Known addresses:");
for &(addr, _) in secret_store.accounts().unwrap().iter() {
for &(addr, _) in &secret_store.accounts().unwrap() {
println!("{:?}", addr);
}
}
}
#[cfg_attr(feature="dev", allow(useless_format))]
fn execute_client(&self) {
// Setup panic handler
let panic_handler = PanicHandler::new_in_arc();
@ -406,7 +407,11 @@ impl Configuration {
let spec = self.spec();
let net_settings = self.net_settings(&spec);
let mut sync_config = SyncConfig::default();
sync_config.network_id = self.args.flag_networkid.as_ref().map(|id| U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --networkid", id))).unwrap_or(spec.network_id());
sync_config.network_id = self.args.flag_networkid.as_ref().map_or(spec.network_id(), |id| {
U256::from_str(id).unwrap_or_else(|_| {
die!("{}: Invalid index given with --networkid", id)
})
});
// Build client
let mut client_config = ClientConfig::default();
@ -421,8 +426,7 @@ impl Configuration {
}
}
client_config.pruning = match self.args.flag_pruning.as_str() {
"" => journaldb::Algorithm::Archive,
"archive" => journaldb::Algorithm::Archive,
"" | "archive" => journaldb::Algorithm::Archive,
"pruned" => journaldb::Algorithm::EarlyMerge,
"fast" => journaldb::Algorithm::OverlayRecent,
// "slow" => journaldb::Algorithm::RefCounted, // TODO: @gavofyork uncomment this once ref-count algo is merged.
@ -452,7 +456,7 @@ impl Configuration {
let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors);
// TODO: use this as the API list.
let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis);
let server_handler = setup_rpc_server(service.client(), sync.clone(), account_service.clone(), &url, cors, apis.split(",").collect());
let server_handler = setup_rpc_server(service.client(), sync.clone(), account_service.clone(), &url, cors, apis.split(',').collect());
if let Some(handler) = server_handler {
panic_handler.forward_from(handler.deref());
}

View File

@ -18,7 +18,7 @@ ethcore-util = { path = "../util" }
ethcore = { path = "../ethcore" }
ethash = { path = "../ethash" }
ethsync = { path = "../sync" }
clippy = { version = "0.0.49", optional = true }
clippy = { version = "0.0.50", optional = true }
rustc-serialize = "0.3"
transient-hashmap = "0.1"
serde_macros = { version = "0.7.0", optional = true }

View File

@ -17,7 +17,6 @@
use util::numbers::*;
use ethcore::transaction::{LocalizedTransaction, Action};
use v1::types::{Bytes, OptionalValue};
use serde::Error;
#[derive(Debug, Default, Serialize)]
pub struct Transaction {

View File

@ -10,7 +10,7 @@ authors = ["Ethcore <admin@ethcore.io"]
[dependencies]
ethcore-util = { path = "../util" }
ethcore = { path = "../ethcore" }
clippy = { version = "0.0.49", optional = true }
clippy = { version = "0.0.50", optional = true }
log = "0.3"
env_logger = "0.3"
time = "0.1.34"

View File

@ -27,7 +27,7 @@ crossbeam = "0.2"
slab = "0.1"
sha3 = { path = "sha3" }
serde = "0.7.0"
clippy = { version = "0.0.49", optional = true }
clippy = { version = "0.0.50", optional = true }
json-tests = { path = "json-tests" }
rustc_version = "0.1.0"
igd = "0.4.2"

View File

@ -33,14 +33,14 @@ use super::JournalDB;
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
/// the removals actually take effect.
///
/// There are two memory overlays:
/// - Transaction overlay contains current transaction data. It is merged with with history
/// There are two memory overlays:
/// - Transaction overlay contains current transaction data. It is merged with with history
/// overlay on each `commit()`
/// - History overlay contains all data inserted during the history period. When the node
/// - History overlay contains all data inserted during the history period. When the node
/// in the overlay becomes ancient it is written to disk on `commit()`
///
/// There is also a journal maintained in memory and on the disk as well which lists insertions
/// and removals for each commit during the history period. This is used to track
/// There is also a journal maintained in memory and on the disk as well which lists insertions
/// and removals for each commit during the history period. This is used to track
/// data nodes that go out of history scope and must be written to disk.
///
/// Commit workflow:
@ -50,12 +50,12 @@ use super::JournalDB;
/// 3. Clear the transaction overlay.
/// 4. For a canonical journal record that becomes ancient inserts its insertions into the disk DB
/// 5. For each journal record that goes out of the history scope (becomes ancient) remove its
/// insertions from the history overlay, decreasing the reference counter and removing entry if
/// insertions from the history overlay, decreasing the reference counter and removing entry if
/// if reaches zero.
/// 6. For a canonical journal record that becomes ancient delete its removals from the disk only if
/// 6. For a canonical journal record that becomes ancient delete its removals from the disk only if
/// the removed key is not present in the history overlay.
/// 7. Delete ancient record from memory and disk.
///
pub struct OverlayRecentDB {
transaction_overlay: MemoryDB,
backing: Arc<Database>,
@ -223,7 +223,7 @@ impl JournalDB for OverlayRecentDB {
let mut tx = self.transaction_overlay.drain();
let inserted_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c > 0 { Some(k.clone()) } else { None }).collect();
let removed_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c < 0 { Some(k.clone()) } else { None }).collect();
// Increase counter for each inserted key no matter if the block is canonical or not.
// Increase counter for each inserted key no matter if the block is canonical or not.
let insertions = tx.drain().filter_map(|(k, (v, c))| if c > 0 { Some((k, v)) } else { None });
r.append(id);
r.begin_list(inserted_keys.len());
@ -236,7 +236,7 @@ impl JournalDB for OverlayRecentDB {
r.append(&removed_keys);
let mut k = RlpStream::new_list(3);
let index = journal_overlay.journal.get(&now).map(|j| j.len()).unwrap_or(0);
let index = journal_overlay.journal.get(&now).map_or(0, |j| j.len());
k.append(&now);
k.append(&index);
k.append(&&PADDING[..]);
@ -345,14 +345,14 @@ impl HashDB for OverlayRecentDB {
self.lookup(key).is_some()
}
fn insert(&mut self, value: &[u8]) -> H256 {
fn insert(&mut self, value: &[u8]) -> H256 {
self.transaction_overlay.insert(value)
}
fn emplace(&mut self, key: H256, value: Bytes) {
self.transaction_overlay.emplace(key, value);
self.transaction_overlay.emplace(key, value);
}
fn kill(&mut self, key: &H256) {
self.transaction_overlay.kill(key);
fn kill(&mut self, key: &H256) {
self.transaction_overlay.kill(key);
}
}
@ -749,7 +749,7 @@ mod tests {
assert!(jdb.can_reconstruct_refs());
assert!(!jdb.exists(&foo));
}
#[test]
fn reopen_test() {
let mut dir = ::std::env::temp_dir();
@ -784,7 +784,7 @@ mod tests {
jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
}
#[test]
fn reopen_remove_three() {
init_log();
@ -838,7 +838,7 @@ mod tests {
assert!(!jdb.exists(&foo));
}
}
#[test]
fn reopen_fork() {
let mut dir = ::std::env::temp_dir();

View File

@ -120,9 +120,15 @@ impl AccountProvider for AccountService {
}
}
impl Default for AccountService {
fn default() -> Self {
AccountService::new()
}
}
impl AccountService {
/// New account service with the default location
pub fn new() -> AccountService {
pub fn new() -> Self {
let secret_store = RwLock::new(SecretStore::new());
secret_store.write().unwrap().try_import_existing();
AccountService {
@ -568,7 +574,7 @@ mod tests {
let temp = RandomTempPath::create_dir();
let mut sstore = SecretStore::new_test(&temp);
let addr = sstore.new_account("test").unwrap();
let _ok = sstore.unlock_account(&addr, "test").unwrap();
sstore.unlock_account(&addr, "test").unwrap();
let secret = sstore.account_secret(&addr).unwrap();
let kp = KeyPair::from_secret(secret).unwrap();
assert_eq!(Address::from(kp.public().sha3()), addr);

View File

@ -160,12 +160,12 @@ impl Connection {
}
}
/// Get socket token
/// Get socket token
pub fn token(&self) -> StreamToken {
self.token
}
/// Replace socket token
/// Replace socket token
pub fn set_token(&mut self, token: StreamToken) {
self.token = token;
}
@ -261,13 +261,13 @@ pub struct EncryptedConnection {
}
impl EncryptedConnection {
/// Get socket token
/// Get socket token
pub fn token(&self) -> StreamToken {
self.connection.token
}
/// Replace socket token
/// Replace socket token
pub fn set_token(&mut self, token: StreamToken) {
self.connection.set_token(token);
}
@ -513,8 +513,14 @@ mod tests {
buf_size: usize,
}
impl Default for TestSocket {
fn default() -> Self {
TestSocket::new()
}
}
impl TestSocket {
fn new() -> TestSocket {
fn new() -> Self {
TestSocket {
read_buffer: vec![],
write_buffer: vec![],
@ -593,8 +599,14 @@ mod tests {
type TestConnection = GenericConnection<TestSocket>;
impl Default for TestConnection {
fn default() -> Self {
TestConnection::new()
}
}
impl TestConnection {
pub fn new() -> TestConnection {
pub fn new() -> Self {
TestConnection {
token: 999998888usize,
socket: TestSocket::new(),
@ -609,8 +621,14 @@ mod tests {
type TestBrokenConnection = GenericConnection<TestBrokenSocket>;
impl Default for TestBrokenConnection {
fn default() -> Self {
TestBrokenConnection::new()
}
}
impl TestBrokenConnection {
pub fn new() -> TestBrokenConnection {
pub fn new() -> Self {
TestBrokenConnection {
token: 999998888usize,
socket: TestBrokenSocket { error: "test broken socket".to_owned() },