Fixing clippy warnings (#1660)
This commit is contained in:
parent
340f0b6f58
commit
038862fa9d
@ -390,7 +390,8 @@ impl BlockChain {
|
|||||||
return Some(hash);
|
return Some(hash);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return None;
|
|
||||||
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the cache configuration.
|
/// Set the cache configuration.
|
||||||
|
@ -55,11 +55,11 @@ impl From<ethjson::spec::EthashParams> for EthashParams {
|
|||||||
difficulty_bound_divisor: p.difficulty_bound_divisor.into(),
|
difficulty_bound_divisor: p.difficulty_bound_divisor.into(),
|
||||||
duration_limit: p.duration_limit.into(),
|
duration_limit: p.duration_limit.into(),
|
||||||
block_reward: p.block_reward.into(),
|
block_reward: p.block_reward.into(),
|
||||||
registrar: p.registrar.map(Into::into).unwrap_or(Address::new()),
|
registrar: p.registrar.map_or_else(Address::new, Into::into),
|
||||||
frontier_compatibility_mode_limit: p.frontier_compatibility_mode_limit.map(Into::into).unwrap_or(0),
|
frontier_compatibility_mode_limit: p.frontier_compatibility_mode_limit.map_or(0, Into::into),
|
||||||
dao_hardfork_transition: p.dao_hardfork_transition.map(Into::into).unwrap_or(0x7fffffffffffffff),
|
dao_hardfork_transition: p.dao_hardfork_transition.map_or(0x7fffffffffffffff, Into::into),
|
||||||
dao_hardfork_beneficiary: p.dao_hardfork_beneficiary.map(Into::into).unwrap_or(Address::new()),
|
dao_hardfork_beneficiary: p.dao_hardfork_beneficiary.map_or_else(Address::new, Into::into),
|
||||||
dao_hardfork_accounts: p.dao_hardfork_accounts.unwrap_or(vec![]).into_iter().map(Into::into).collect(),
|
dao_hardfork_accounts: p.dao_hardfork_accounts.unwrap_or_else(Vec::new).into_iter().map(Into::into).collect(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -141,7 +141,7 @@ impl Engine for Ethash {
|
|||||||
// TODO: enable trigger function maybe?
|
// TODO: enable trigger function maybe?
|
||||||
// if block.fields().header.gas_limit <= 4_000_000.into() {
|
// if block.fields().header.gas_limit <= 4_000_000.into() {
|
||||||
let mut state = block.fields_mut().state;
|
let mut state = block.fields_mut().state;
|
||||||
for child in self.ethash_params.dao_hardfork_accounts.iter() {
|
for child in &self.ethash_params.dao_hardfork_accounts {
|
||||||
let b = state.balance(child);
|
let b = state.balance(child);
|
||||||
state.transfer_balance(child, &self.ethash_params.dao_hardfork_beneficiary, &b);
|
state.transfer_balance(child, &self.ethash_params.dao_hardfork_beneficiary, &b);
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ fn attempt_migrate(mut key_h: H256, val: &[u8]) -> Option<H256> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Version for ArchiveDB.
|
/// Version for `ArchiveDB`.
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct ArchiveV7(usize);
|
pub struct ArchiveV7(usize);
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ pub fn take_snapshot(client: &BlockChainClient, mut path: PathBuf, state_db: &Ha
|
|||||||
|
|
||||||
let mut manifest_file = try!(File::create(&path));
|
let mut manifest_file = try!(File::create(&path));
|
||||||
|
|
||||||
try!(manifest_file.write_all(&manifest_data.to_rlp()));
|
try!(manifest_file.write_all(&manifest_data.into_rlp()));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -287,7 +287,7 @@ pub struct ManifestData {
|
|||||||
|
|
||||||
impl ManifestData {
|
impl ManifestData {
|
||||||
/// Encode the manifest data to rlp.
|
/// Encode the manifest data to rlp.
|
||||||
pub fn to_rlp(self) -> Bytes {
|
pub fn into_rlp(self) -> Bytes {
|
||||||
let mut stream = RlpStream::new_list(5);
|
let mut stream = RlpStream::new_list(5);
|
||||||
stream.append(&self.state_hashes);
|
stream.append(&self.state_hashes);
|
||||||
stream.append(&self.block_hashes);
|
stream.append(&self.block_hashes);
|
||||||
|
@ -581,7 +581,7 @@ fn wait_for_exit(
|
|||||||
|
|
||||||
// Wait for signal
|
// Wait for signal
|
||||||
let mutex = Mutex::new(());
|
let mutex = Mutex::new(());
|
||||||
let _ = exit.wait(&mut mutex.lock());
|
exit.wait(&mut mutex.lock());
|
||||||
info!("Finishing work, please wait...");
|
info!("Finishing work, please wait...");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,12 +19,14 @@ use std::sync::Arc;
|
|||||||
use ethcore::client::{ChainNotify, BlockChainClient};
|
use ethcore::client::{ChainNotify, BlockChainClient};
|
||||||
use ethcore;
|
use ethcore;
|
||||||
|
|
||||||
|
pub type Modules = (Arc<SyncProvider>, Arc<ManageNetwork>, Arc<ChainNotify>);
|
||||||
|
|
||||||
#[cfg(feature="ipc")]
|
#[cfg(feature="ipc")]
|
||||||
pub fn sync(
|
pub fn sync(
|
||||||
sync_cfg: SyncConfig,
|
sync_cfg: SyncConfig,
|
||||||
net_cfg: NetworkConfiguration,
|
net_cfg: NetworkConfiguration,
|
||||||
client: Arc<BlockChainClient>)
|
client: Arc<BlockChainClient>)
|
||||||
-> Result<(Arc<SyncProvider>, Arc<ManageNetwork>, Arc<ChainNotify>), ethcore::error::Error>
|
-> Result<Modules, ethcore::error::Error>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -33,8 +35,8 @@ pub fn sync(
|
|||||||
sync_cfg: SyncConfig,
|
sync_cfg: SyncConfig,
|
||||||
net_cfg: NetworkConfiguration,
|
net_cfg: NetworkConfiguration,
|
||||||
client: Arc<BlockChainClient>)
|
client: Arc<BlockChainClient>)
|
||||||
-> Result<(Arc<SyncProvider>, Arc<ManageNetwork>, Arc<ChainNotify>), ethcore::error::Error>
|
-> Result<Modules, ethcore::error::Error>
|
||||||
{
|
{
|
||||||
let eth_sync = try!(EthSync::new(sync_cfg, client, net_cfg).map_err(|e| ethcore::error::Error::Util(e)));
|
let eth_sync = try!(EthSync::new(sync_cfg, client, net_cfg).map_err(ethcore::error::Error::Util));
|
||||||
Ok((eth_sync.clone() as Arc<SyncProvider>, eth_sync.clone() as Arc<ManageNetwork>, eth_sync.clone() as Arc<ChainNotify>))
|
Ok((eth_sync.clone() as Arc<SyncProvider>, eth_sync.clone() as Arc<ManageNetwork>, eth_sync.clone() as Arc<ChainNotify>))
|
||||||
}
|
}
|
||||||
|
@ -81,6 +81,7 @@ macro_rules! map_into {
|
|||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! flush {
|
macro_rules! flush {
|
||||||
|
($arg:expr) => ($crate::flush($arg.into()));
|
||||||
($($arg:tt)*) => ($crate::flush(format!("{}", format_args!($($arg)*))));
|
($($arg:tt)*) => ($crate::flush(format!("{}", format_args!($($arg)*))));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ impl IoHandler<NetworkIoMessage> for HostHandler {
|
|||||||
fn message(&self, _io: &IoContext<NetworkIoMessage>, message: &NetworkIoMessage) {
|
fn message(&self, _io: &IoContext<NetworkIoMessage>, message: &NetworkIoMessage) {
|
||||||
if let NetworkIoMessage::NetworkStarted(ref public_url) = *message {
|
if let NetworkIoMessage::NetworkStarted(ref public_url) = *message {
|
||||||
let mut url = self.public_url.write();
|
let mut url = self.public_url.write();
|
||||||
if url.as_ref().map(|uref| uref != public_url).unwrap_or(true) {
|
if url.as_ref().map_or(true, |uref| uref != public_url) {
|
||||||
info!(target: "network", "Public node URL: {}", Colour::White.bold().paint(public_url.as_ref()));
|
info!(target: "network", "Public node URL: {}", Colour::White.bold().paint(public_url.as_ref()));
|
||||||
}
|
}
|
||||||
*url = Some(public_url.to_owned());
|
*url = Some(public_url.to_owned());
|
||||||
|
@ -122,7 +122,7 @@ impl Node {
|
|||||||
|
|
||||||
// encode a node to RLP
|
// encode a node to RLP
|
||||||
// TODO: parallelize
|
// TODO: parallelize
|
||||||
fn to_rlp<F>(self, mut child_cb: F) -> ElasticArray1024<u8>
|
fn into_rlp<F>(self, mut child_cb: F) -> ElasticArray1024<u8>
|
||||||
where F: FnMut(NodeHandle, &mut RlpStream)
|
where F: FnMut(NodeHandle, &mut RlpStream)
|
||||||
{
|
{
|
||||||
match self {
|
match self {
|
||||||
@ -183,7 +183,7 @@ enum InsertAction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl InsertAction {
|
impl InsertAction {
|
||||||
fn as_action(self) -> Action {
|
fn into_action(self) -> Action {
|
||||||
match self {
|
match self {
|
||||||
InsertAction::Replace(n) => Action::Replace(n),
|
InsertAction::Replace(n) => Action::Replace(n),
|
||||||
InsertAction::Restore(n) => Action::Restore(n),
|
InsertAction::Restore(n) => Action::Restore(n),
|
||||||
@ -442,13 +442,14 @@ impl<'a> TrieDBMut<'a> {
|
|||||||
};
|
};
|
||||||
let stored = self.storage.destroy(h);
|
let stored = self.storage.destroy(h);
|
||||||
let (new_stored, changed) = self.inspect(stored, move |trie, stored| {
|
let (new_stored, changed) = self.inspect(stored, move |trie, stored| {
|
||||||
trie.insert_inspector(stored, partial, value).as_action()
|
trie.insert_inspector(stored, partial, value).into_action()
|
||||||
}).expect("Insertion never deletes.");
|
}).expect("Insertion never deletes.");
|
||||||
|
|
||||||
(self.storage.alloc(new_stored), changed)
|
(self.storage.alloc(new_stored), changed)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// the insertion inspector.
|
/// the insertion inspector.
|
||||||
|
#[cfg_attr(feature = "dev", allow(cyclomatic_complexity))]
|
||||||
fn insert_inspector(&mut self, node: Node, partial: NibbleSlice, value: Bytes) -> InsertAction {
|
fn insert_inspector(&mut self, node: Node, partial: NibbleSlice, value: Bytes) -> InsertAction {
|
||||||
trace!(target: "trie", "augmented (partial: {:?}, value: {:?})", partial, value.pretty());
|
trace!(target: "trie", "augmented (partial: {:?}, value: {:?})", partial, value.pretty());
|
||||||
|
|
||||||
@ -819,7 +820,7 @@ impl<'a> TrieDBMut<'a> {
|
|||||||
|
|
||||||
match self.storage.destroy(handle) {
|
match self.storage.destroy(handle) {
|
||||||
Stored::New(node) => {
|
Stored::New(node) => {
|
||||||
let root_rlp = node.to_rlp(|child, stream| self.commit_node(child, stream));
|
let root_rlp = node.into_rlp(|child, stream| self.commit_node(child, stream));
|
||||||
*self.root = self.db.insert(&root_rlp[..]);
|
*self.root = self.db.insert(&root_rlp[..]);
|
||||||
self.hash_count += 1;
|
self.hash_count += 1;
|
||||||
|
|
||||||
@ -842,7 +843,7 @@ impl<'a> TrieDBMut<'a> {
|
|||||||
NodeHandle::InMemory(h) => match self.storage.destroy(h) {
|
NodeHandle::InMemory(h) => match self.storage.destroy(h) {
|
||||||
Stored::Cached(_, h) => stream.append(&h),
|
Stored::Cached(_, h) => stream.append(&h),
|
||||||
Stored::New(node) => {
|
Stored::New(node) => {
|
||||||
let node_rlp = node.to_rlp(|child, stream| self.commit_node(child, stream));
|
let node_rlp = node.into_rlp(|child, stream| self.commit_node(child, stream));
|
||||||
if node_rlp.len() >= 32 {
|
if node_rlp.len() >= 32 {
|
||||||
let hash = self.db.insert(&node_rlp[..]);
|
let hash = self.db.insert(&node_rlp[..]);
|
||||||
self.hash_count += 1;
|
self.hash_count += 1;
|
||||||
|
Loading…
Reference in New Issue
Block a user