diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index fcea93b29..ee92e5628 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -390,7 +390,8 @@ impl BlockChain { return Some(hash); } } - return None; + + None } /// Set the cache configuration. diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index 22d0b8a02..802b68e82 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -43,7 +43,7 @@ pub struct EthashParams { pub dao_hardfork_transition: u64, /// DAO hard-fork refund contract address (C). pub dao_hardfork_beneficiary: Address, - /// DAO hard-fork DAO accounts list (L) + /// DAO hard-fork DAO accounts list (L) pub dao_hardfork_accounts: Vec
, } @@ -55,11 +55,11 @@ impl From for EthashParams { difficulty_bound_divisor: p.difficulty_bound_divisor.into(), duration_limit: p.duration_limit.into(), block_reward: p.block_reward.into(), - registrar: p.registrar.map(Into::into).unwrap_or(Address::new()), - frontier_compatibility_mode_limit: p.frontier_compatibility_mode_limit.map(Into::into).unwrap_or(0), - dao_hardfork_transition: p.dao_hardfork_transition.map(Into::into).unwrap_or(0x7fffffffffffffff), - dao_hardfork_beneficiary: p.dao_hardfork_beneficiary.map(Into::into).unwrap_or(Address::new()), - dao_hardfork_accounts: p.dao_hardfork_accounts.unwrap_or(vec![]).into_iter().map(Into::into).collect(), + registrar: p.registrar.map_or_else(Address::new, Into::into), + frontier_compatibility_mode_limit: p.frontier_compatibility_mode_limit.map_or(0, Into::into), + dao_hardfork_transition: p.dao_hardfork_transition.map_or(0x7fffffffffffffff, Into::into), + dao_hardfork_beneficiary: p.dao_hardfork_beneficiary.map_or_else(Address::new, Into::into), + dao_hardfork_accounts: p.dao_hardfork_accounts.unwrap_or_else(Vec::new).into_iter().map(Into::into).collect(), } } } @@ -131,7 +131,7 @@ impl Engine for Ethash { if header.number >= self.ethash_params.dao_hardfork_transition && header.number <= self.ethash_params.dao_hardfork_transition + 9 { header.extra_data = b"dao-hard-fork"[..].to_owned(); - } + } header.note_dirty(); // info!("ethash: populate_from_parent #{}: difficulty={} and gas_limit={}", header.number, header.difficulty, header.gas_limit); } @@ -141,7 +141,7 @@ impl Engine for Ethash { // TODO: enable trigger function maybe? // if block.fields().header.gas_limit <= 4_000_000.into() { let mut state = block.fields_mut().state; - for child in self.ethash_params.dao_hardfork_accounts.iter() { + for child in &self.ethash_params.dao_hardfork_accounts { let b = state.balance(child); state.transfer_balance(child, &self.ethash_params.dao_hardfork_beneficiary, &b); } @@ -199,8 +199,8 @@ impl Engine for Ethash { if header.gas_limit > 0x7fffffffffffffffu64.into() { return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: None, max: Some(0x7fffffffffffffffu64.into()), found: header.gas_limit }))); - } - + } + Ok(()) } diff --git a/ethcore/src/migrations/state/v7.rs b/ethcore/src/migrations/state/v7.rs index 15b457d32..faa289bd7 100644 --- a/ethcore/src/migrations/state/v7.rs +++ b/ethcore/src/migrations/state/v7.rs @@ -61,7 +61,7 @@ fn attempt_migrate(mut key_h: H256, val: &[u8]) -> Option { } } -/// Version for ArchiveDB. +/// Version for `ArchiveDB`. #[derive(Default)] pub struct ArchiveV7(usize); diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index aff16b86e..6a9d4ba76 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -72,7 +72,7 @@ pub fn take_snapshot(client: &BlockChainClient, mut path: PathBuf, state_db: &Ha let mut manifest_file = try!(File::create(&path)); - try!(manifest_file.write_all(&manifest_data.to_rlp())); + try!(manifest_file.write_all(&manifest_data.into_rlp())); Ok(()) } @@ -287,7 +287,7 @@ pub struct ManifestData { impl ManifestData { /// Encode the manifest data to rlp. - pub fn to_rlp(self) -> Bytes { + pub fn into_rlp(self) -> Bytes { let mut stream = RlpStream::new_list(5); stream.append(&self.state_hashes); stream.append(&self.block_hashes); @@ -414,4 +414,4 @@ fn rebuild_account_trie(db: &mut HashDB, account_chunk: &[&[u8]], out_chunk: &mu *out = (hash, thin_rlp); } Ok(()) -} \ No newline at end of file +} diff --git a/parity/main.rs b/parity/main.rs index ba1535689..a8fd63d2a 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -581,7 +581,7 @@ fn wait_for_exit( // Wait for signal let mutex = Mutex::new(()); - let _ = exit.wait(&mut mutex.lock()); + exit.wait(&mut mutex.lock()); info!("Finishing work, please wait..."); } diff --git a/parity/modules.rs b/parity/modules.rs index e0960dc6e..75a15e913 100644 --- a/parity/modules.rs +++ b/parity/modules.rs @@ -19,12 +19,14 @@ use std::sync::Arc; use ethcore::client::{ChainNotify, BlockChainClient}; use ethcore; +pub type Modules = (Arc, Arc, Arc); + #[cfg(feature="ipc")] pub fn sync( sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, client: Arc) - -> Result<(Arc, Arc, Arc), ethcore::error::Error> + -> Result { } @@ -33,8 +35,8 @@ pub fn sync( sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, client: Arc) - -> Result<(Arc, Arc, Arc), ethcore::error::Error> + -> Result { - let eth_sync = try!(EthSync::new(sync_cfg, client, net_cfg).map_err(|e| ethcore::error::Error::Util(e))); + let eth_sync = try!(EthSync::new(sync_cfg, client, net_cfg).map_err(ethcore::error::Error::Util)); Ok((eth_sync.clone() as Arc, eth_sync.clone() as Arc, eth_sync.clone() as Arc)) } diff --git a/util/src/common.rs b/util/src/common.rs index 941b5b0a6..0d3466dff 100644 --- a/util/src/common.rs +++ b/util/src/common.rs @@ -81,6 +81,7 @@ macro_rules! map_into { #[macro_export] macro_rules! flush { + ($arg:expr) => ($crate::flush($arg.into())); ($($arg:tt)*) => ($crate::flush(format!("{}", format_args!($($arg)*)))); } diff --git a/util/src/network/service.rs b/util/src/network/service.rs index df9c50ef4..10f8d3e90 100644 --- a/util/src/network/service.rs +++ b/util/src/network/service.rs @@ -33,7 +33,7 @@ impl IoHandler for HostHandler { fn message(&self, _io: &IoContext, message: &NetworkIoMessage) { if let NetworkIoMessage::NetworkStarted(ref public_url) = *message { let mut url = self.public_url.write(); - if url.as_ref().map(|uref| uref != public_url).unwrap_or(true) { + if url.as_ref().map_or(true, |uref| uref != public_url) { info!(target: "network", "Public node URL: {}", Colour::White.bold().paint(public_url.as_ref())); } *url = Some(public_url.to_owned()); diff --git a/util/src/trie/triedbmut.rs b/util/src/trie/triedbmut.rs index 8a3d1b9ba..c50a66cc8 100644 --- a/util/src/trie/triedbmut.rs +++ b/util/src/trie/triedbmut.rs @@ -122,7 +122,7 @@ impl Node { // encode a node to RLP // TODO: parallelize - fn to_rlp(self, mut child_cb: F) -> ElasticArray1024 + fn into_rlp(self, mut child_cb: F) -> ElasticArray1024 where F: FnMut(NodeHandle, &mut RlpStream) { match self { @@ -183,7 +183,7 @@ enum InsertAction { } impl InsertAction { - fn as_action(self) -> Action { + fn into_action(self) -> Action { match self { InsertAction::Replace(n) => Action::Replace(n), InsertAction::Restore(n) => Action::Restore(n), @@ -442,13 +442,14 @@ impl<'a> TrieDBMut<'a> { }; let stored = self.storage.destroy(h); let (new_stored, changed) = self.inspect(stored, move |trie, stored| { - trie.insert_inspector(stored, partial, value).as_action() + trie.insert_inspector(stored, partial, value).into_action() }).expect("Insertion never deletes."); (self.storage.alloc(new_stored), changed) } /// the insertion inspector. + #[cfg_attr(feature = "dev", allow(cyclomatic_complexity))] fn insert_inspector(&mut self, node: Node, partial: NibbleSlice, value: Bytes) -> InsertAction { trace!(target: "trie", "augmented (partial: {:?}, value: {:?})", partial, value.pretty()); @@ -819,7 +820,7 @@ impl<'a> TrieDBMut<'a> { match self.storage.destroy(handle) { Stored::New(node) => { - let root_rlp = node.to_rlp(|child, stream| self.commit_node(child, stream)); + let root_rlp = node.into_rlp(|child, stream| self.commit_node(child, stream)); *self.root = self.db.insert(&root_rlp[..]); self.hash_count += 1; @@ -842,7 +843,7 @@ impl<'a> TrieDBMut<'a> { NodeHandle::InMemory(h) => match self.storage.destroy(h) { Stored::Cached(_, h) => stream.append(&h), Stored::New(node) => { - let node_rlp = node.to_rlp(|child, stream| self.commit_node(child, stream)); + let node_rlp = node.into_rlp(|child, stream| self.commit_node(child, stream)); if node_rlp.len() >= 32 { let hash = self.db.insert(&node_rlp[..]); self.hash_count += 1; @@ -1257,4 +1258,4 @@ mod tests { assert!(t.is_empty()); assert_eq!(*t.root(), SHA3_NULL_RLP); } -} \ No newline at end of file +}