diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 6d5b9f25f..c5e1ca003 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -154,7 +154,7 @@ impl ExecutedBlock { } } -/// Trait for a object that is_a `ExecutedBlock`. +/// Trait for a object that is a `ExecutedBlock`. pub trait IsBlock { /// Get the block associated with this object. fn block(&self) -> &ExecutedBlock; @@ -192,7 +192,7 @@ pub struct OpenBlock<'x> { last_hashes: LastHashes, } -/// Just like OpenBlock, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields, +/// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields, /// and collected the uncles. /// /// There is no function available to push a transaction. @@ -204,7 +204,7 @@ pub struct ClosedBlock { unclosed_state: State, } -/// Just like ClosedBlock except that we can't reopen it and it's faster. +/// Just like `ClosedBlock` except that we can't reopen it and it's faster. /// /// We actually store the post-`Engine::on_close_block` state, unlike in `ClosedBlock` where it's the pre. #[derive(Clone)] @@ -216,14 +216,15 @@ pub struct LockedBlock { /// A block that has a valid seal. /// -/// The block's header has valid seal arguments. The block cannot be reversed into a ClosedBlock or OpenBlock. +/// The block's header has valid seal arguments. The block cannot be reversed into a `ClosedBlock` or `OpenBlock`. pub struct SealedBlock { block: ExecutedBlock, uncle_bytes: Bytes, } impl<'x> OpenBlock<'x> { - /// Create a new OpenBlock ready for transaction pushing. + #[cfg_attr(feature="dev", allow(too_many_arguments))] + /// Create a new `OpenBlock` ready for transaction pushing. pub fn new(engine: &'x Engine, tracing: bool, db: Box, parent: &Header, last_hashes: LastHashes, author: Address, gas_floor_target: U256, extra_data: Bytes) -> Self { let mut r = OpenBlock { block: ExecutedBlock::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce()), tracing), @@ -319,7 +320,7 @@ impl<'x> OpenBlock<'x> { } } - /// Turn this into a `ClosedBlock`. A BlockChain must be provided in order to figure out the uncles. + /// Turn this into a `ClosedBlock`. A `BlockChain` must be provided in order to figure out the uncles. pub fn close(self) -> ClosedBlock { let mut s = self; @@ -454,6 +455,7 @@ impl IsBlock for SealedBlock { } /// Enact the block given by block header, transactions and uncles +#[cfg_attr(feature="dev", allow(too_many_arguments))] pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, tracing: bool, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { { if ::log::max_log_level() >= ::log::LogLevel::Trace { diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 042df1dc1..4a52d6a6b 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! A queue of blocks. Sits between network or other I/O and the BlockChain. +//! A queue of blocks. Sits between network or other I/O and the `BlockChain`. //! Sorts them ready for blockchain insertion. use std::thread::{JoinHandle, self}; use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; @@ -89,7 +89,7 @@ impl BlockQueueInfo { } } -/// A queue of blocks. Sits between network or other I/O and the BlockChain. +/// A queue of blocks. Sits between network or other I/O and the `BlockChain`. /// Sorts them ready for blockchain insertion. pub struct BlockQueue { panic_handler: Arc, diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index 43920708b..ebbae306e 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -427,6 +427,7 @@ impl BlockChain { } } + #[cfg_attr(feature="dev", allow(similar_names))] /// Inserts the block into backing cache database. /// Expects the block to be valid and already verified. /// If the block is already known, does nothing. @@ -855,6 +856,7 @@ impl BlockChain { #[cfg(test)] mod tests { + #![cfg_attr(feature="dev", allow(similar_names))] use std::str::FromStr; use rustc_serialize::hex::FromHex; use util::hash::*; diff --git a/ethcore/src/chainfilter/indexer.rs b/ethcore/src/chainfilter/indexer.rs index 524fab1a9..a10bb69d2 100644 --- a/ethcore/src/chainfilter/indexer.rs +++ b/ethcore/src/chainfilter/indexer.rs @@ -60,7 +60,7 @@ impl Indexer { } /// Return bloom which are dependencies for given index. - /// + /// /// Bloom indexes are ordered from lowest to highest. pub fn lower_level_bloom_indexes(&self, index: &BloomIndex) -> Vec { // this is the lowest level @@ -87,6 +87,7 @@ impl Indexer { #[cfg(test)] mod tests { + #![cfg_attr(feature="dev", allow(similar_names))] use chainfilter::BloomIndex; use chainfilter::indexer::Indexer; diff --git a/ethcore/src/chainfilter/tests.rs b/ethcore/src/chainfilter/tests.rs index 7dac29f11..560662829 100644 --- a/ethcore/src/chainfilter/tests.rs +++ b/ethcore/src/chainfilter/tests.rs @@ -23,7 +23,7 @@ use chainfilter::{BloomIndex, FilterDataSource, ChainFilter}; /// In memory cache for blooms. /// -/// Stores all blooms in HashMap, which indexes them by `BloomIndex`. +/// Stores all blooms in `HashMap`, which indexes them by `BloomIndex`. pub struct MemoryCache { blooms: HashMap, } diff --git a/ethcore/src/evm/evm.rs b/ethcore/src/evm/evm.rs index c1107f003..b6c2debc5 100644 --- a/ethcore/src/evm/evm.rs +++ b/ethcore/src/evm/evm.rs @@ -44,7 +44,7 @@ pub enum Error { /// Invoked instruction instruction: &'static str, /// How many stack elements was requested by instruction - wanted: usize, + wanted: usize, /// How many elements were on stack on_stack: usize }, @@ -64,8 +64,8 @@ pub enum Error { } /// Evm result. -/// -/// Returns gas_left if execution is successful, otherwise error. +/// +/// Returns `gas_left` if execution is successful, otherwise error. pub type Result = result::Result; /// Evm interface. diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index 6e79d737e..bd0ce426f 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -55,7 +55,7 @@ pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Res /// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash. /// Still operates on a individual block -/// Returns a PreverifiedBlock structure populated with transactions +/// Returns a `PreverifiedBlock` structure populated with transactions pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) -> Result { try!(engine.verify_block_unordered(&header, Some(&bytes))); for u in Rlp::new(&bytes).at(2).iter().map(|rlp| rlp.as_val::
()) { @@ -279,7 +279,7 @@ mod tests { impl BlockProvider for TestBlockChain { fn have_tracing(&self) -> bool { false } - + fn is_known(&self, hash: &H256) -> bool { self.blocks.contains_key(hash) } @@ -331,6 +331,7 @@ mod tests { } #[test] + #[cfg_attr(feature="dev", allow(similar_names))] fn test_verify_block() { // Test against morden let mut good = Header::new(); diff --git a/miner/src/miner.rs b/miner/src/miner.rs index 70bf1711a..ec821bf25 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -94,6 +94,7 @@ impl Miner { } /// Prepares new block for sealing including top transactions from queue. + #[cfg_attr(feature="dev", allow(match_same_arms))] fn prepare_sealing(&self, chain: &BlockChainClient) { trace!(target: "miner", "prepare_sealing: entering"); let transactions = self.transaction_queue.lock().unwrap().top_transactions(); @@ -164,7 +165,7 @@ impl Miner { } ); if let Some(block) = b { - if sealing_work.peek_last_ref().map(|pb| pb.block().fields().header.hash() != block.block().fields().header.hash()).unwrap_or(true) { + if sealing_work.peek_last_ref().map_or(true, |pb| pb.block().fields().header.hash() != block.block().fields().header.hash()) { trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash()); sealing_work.push(block); } @@ -200,7 +201,7 @@ impl MinerService for Miner { fn sensible_gas_price(&self) -> U256 { // 10% above our minimum. - self.transaction_queue.lock().unwrap().minimal_gas_price().clone() * x!(110) / x!(100) + *self.transaction_queue.lock().unwrap().minimal_gas_price() * x!(110) / x!(100) } fn author(&self) -> Address { diff --git a/miner/src/transaction_queue.rs b/miner/src/transaction_queue.rs index 659e1a663..46188e1d1 100644 --- a/miner/src/transaction_queue.rs +++ b/miner/src/transaction_queue.rs @@ -18,7 +18,7 @@ //! Transaction Queue //! -//! TransactionQueue keeps track of all transactions seen by the node (received from other peers) and own transactions +//! `TransactionQueue` keeps track of all transactions seen by the node (received from other peers) and own transactions //! and orders them by priority. Top priority transactions are those with low nonce height (difference between //! transaction's nonce and next nonce expected from this sender). If nonces are equal transaction's gas price is used //! for comparison (higher gas price = higher priority). @@ -179,7 +179,7 @@ impl VerifiedTransaction { /// Holds transactions accessible by (address, nonce) and by priority /// -/// TransactionSet keeps number of entries below limit, but it doesn't +/// `TransactionSet` keeps number of entries below limit, but it doesn't /// automatically happen during `insert/remove` operations. /// You have to call `enforce_limit` to remove lowest priority transactions from set. struct TransactionSet { @@ -262,7 +262,7 @@ pub struct AccountDetails { /// Transactions with `gas > (gas_limit + gas_limit * Factor(in percents))` are not imported to the queue. const GAS_LIMIT_HYSTERESIS: usize = 10; // % -/// TransactionQueue implementation +/// `TransactionQueue` implementation pub struct TransactionQueue { /// Gas Price threshold for transactions that can be imported to this queue (defaults to 0) minimal_gas_price: U256, diff --git a/parity/main.rs b/parity/main.rs index c5e0dce54..f701ff97b 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -19,6 +19,7 @@ #![warn(missing_docs)] #![cfg_attr(feature="dev", feature(plugin))] #![cfg_attr(feature="dev", plugin(clippy))] +#![cfg_attr(feature="dev", allow(useless_format))] extern crate docopt; extern crate num_cpus; extern crate rustc_serialize; @@ -361,9 +362,9 @@ impl Configuration { die!("{}: Invalid basic transaction price given in USD. Must be a decimal number.", self.args.flag_usd_per_tx) }); let usd_per_eth = match self.args.flag_usd_per_eth.as_str() { - "etherscan" => price_info::PriceInfo::get().map(|x| x.ethusd).unwrap_or_else(|| { + "etherscan" => price_info::PriceInfo::get().map_or_else(|| { die!("Unable to retrieve USD value of ETH from etherscan. Rerun with a different value for --usd-per-eth.") - }), + }, |x| x.ethusd), x => FromStr::from_str(x).unwrap_or_else(|_| die!("{}: Invalid ether price given in USD. Must be a decimal number.", x)) }; let wei_per_usd: f32 = 1.0e18 / usd_per_eth; @@ -421,7 +422,6 @@ impl Configuration { } } - #[cfg_attr(feature="dev", allow(useless_format))] fn net_addresses(&self) -> (Option, Option) { let listen_address = Some(SocketAddr::new(IpAddr::from_str("0.0.0.0").unwrap(), self.args.flag_port)); let public_address = if self.args.flag_nat.starts_with("extip:") { @@ -450,7 +450,6 @@ impl Configuration { ret } - #[cfg_attr(feature="dev", allow(useless_format))] fn client_config(&self) -> ClientConfig { let mut client_config = ClientConfig::default(); match self.args.flag_cache { @@ -551,7 +550,6 @@ impl Configuration { account_service } - #[cfg_attr(feature="dev", allow(useless_format))] fn execute_client(&self) { // Setup panic handler let panic_handler = PanicHandler::new_in_arc(); diff --git a/parity/price_info.rs b/parity/price_info.rs index 29e7505ee..405424b3d 100644 --- a/parity/price_info.rs +++ b/parity/price_info.rs @@ -19,8 +19,8 @@ impl PriceInfo { .and_then(|mut s| s.read_to_string(&mut body).ok()) .and_then(|_| Json::from_str(&body).ok()) .and_then(|json| json.find_path(&["result", "ethusd"]) - .and_then(|obj| match obj { - &Json::String(ref s) => Some(PriceInfo { + .and_then(|obj| match *obj { + Json::String(ref s) => Some(PriceInfo { ethusd: FromStr::from_str(&s).unwrap() }), _ => None diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 16bc41b70..1a7d11f51 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . /// -/// BlockChain synchronization strategy. +/// `BlockChain` synchronization strategy. /// Syncs to peers and keeps up to date. /// This implementation uses ethereum protocol v63 /// @@ -127,7 +127,7 @@ pub struct SyncStatus { pub protocol_version: u8, /// The underlying p2p network version. pub network_id: U256, - /// BlockChain height for the moment the sync started. + /// `BlockChain` height for the moment the sync started. pub start_block_number: BlockNumber, /// Last fully downloaded and imported block number (if any). pub last_imported_block_number: Option, @@ -1292,12 +1292,12 @@ impl ChainSync { fn propagate_new_transactions(&mut self, io: &mut SyncIo) -> usize { // Early out of nobody to send to. - if self.peers.len() == 0 { + if self.peers.is_empty() { return 0; } let mut packet = RlpStream::new_list(self.transactions_to_send.len()); - for tx in self.transactions_to_send.iter() { + for tx in &self.transactions_to_send { packet.append_raw(tx, 1); } self.transactions_to_send.clear(); @@ -1312,7 +1312,7 @@ impl ChainSync { .collect::>(); // taking at max of MAX_PEERS_PROPAGATION - lucky_peers.iter().map(|&id| id.clone()).take(min(lucky_peers.len(), MAX_PEERS_PROPAGATION)).collect::>() + lucky_peers.iter().cloned().take(min(lucky_peers.len(), MAX_PEERS_PROPAGATION)).collect::>() }; let sent = lucky_peers.len(); @@ -1701,8 +1701,8 @@ mod tests { let retracted_blocks = vec![client.block_hash_delta_minus(1)]; // Add some balance to clients - for h in vec![good_blocks[0], retracted_blocks[0]] { - let block = client.block(BlockId::Hash(h)).unwrap(); + for h in &[good_blocks[0], retracted_blocks[0]] { + let block = client.block(BlockId::Hash(*h)).unwrap(); let view = BlockView::new(&block); client.set_balance(view.transactions()[0].sender().unwrap(), U256::from(1_000_000_000)); } diff --git a/util/src/crypto.rs b/util/src/crypto.rs index e9b3116fd..040db3bca 100644 --- a/util/src/crypto.rs +++ b/util/src/crypto.rs @@ -157,6 +157,7 @@ impl KeyPair { } /// EC functions +#[cfg_attr(feature="dev", allow(similar_names))] pub mod ec { use numbers::*; use standard::*; @@ -193,6 +194,7 @@ pub mod ec { } Ok(signature) } + /// Verify signature. pub fn verify(public: &Public, signature: &Signature, message: &H256) -> Result { use secp256k1::*; @@ -233,6 +235,7 @@ pub mod ec { } /// ECDH functions +#[cfg_attr(feature="dev", allow(similar_names))] pub mod ecdh { use crypto::*; use crypto::{self}; @@ -254,6 +257,7 @@ pub mod ecdh { } /// ECIES function +#[cfg_attr(feature="dev", allow(similar_names))] pub mod ecies { use hash::*; use bytes::*; diff --git a/util/src/hash.rs b/util/src/hash.rs index b7fddbe8b..1b894d82f 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -392,7 +392,7 @@ macro_rules! impl_hash { } } - /// BitOr on references + /// `BitOr` on references impl<'a> BitOr for &'a $from { type Output = $from; @@ -408,7 +408,7 @@ macro_rules! impl_hash { } } - /// Moving BitOr + /// Moving `BitOr` impl BitOr for $from { type Output = $from; @@ -417,7 +417,7 @@ macro_rules! impl_hash { } } - /// BitAnd on references + /// `BitAnd` on references impl <'a> BitAnd for &'a $from { type Output = $from; @@ -433,7 +433,7 @@ macro_rules! impl_hash { } } - /// Moving BitAnd + /// Moving `BitAnd` impl BitAnd for $from { type Output = $from; @@ -442,7 +442,7 @@ macro_rules! impl_hash { } } - /// BitXor on references + /// `BitXor` on references impl <'a> BitXor for &'a $from { type Output = $from; @@ -458,7 +458,7 @@ macro_rules! impl_hash { } } - /// Moving BitXor + /// Moving `BitXor` impl BitXor for $from { type Output = $from; diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index 9a0ac5e43..380e8e423 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Disk-backed HashDB implementation. +//! Disk-backed `HashDB` implementation. use common::*; use rlp::*; @@ -25,11 +25,11 @@ use kvdb::{Database, DBTransaction, DatabaseConfig}; #[cfg(test)] use std::env; -/// Implementation of the HashDB trait for a disk-backed database with a memory overlay +/// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// -/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to -/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. pub struct ArchiveDB { @@ -176,6 +176,7 @@ impl JournalDB for ArchiveDB { #[cfg(test)] mod tests { #![cfg_attr(feature="dev", allow(blacklisted_name))] + #![cfg_attr(feature="dev", allow(similar_names))] use common::*; use super::*; diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs index 6279d6f40..eada4bbaa 100644 --- a/util/src/journaldb/earlymergedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Disk-backed HashDB implementation. +//! Disk-backed `HashDB` implementation. use common::*; use rlp::*; @@ -53,11 +53,11 @@ enum RemoveFrom { Archive, } -/// Implementation of the HashDB trait for a disk-backed database with a memory overlay +/// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// -/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to -/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. pub struct EarlyMergeDB { @@ -528,6 +528,7 @@ impl JournalDB for EarlyMergeDB { #[cfg(test)] mod tests { #![cfg_attr(feature="dev", allow(blacklisted_name))] + #![cfg_attr(feature="dev", allow(similar_names))] use common::*; use super::*; diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs index e73c12969..f65aebde1 100644 --- a/util/src/journaldb/mod.rs +++ b/util/src/journaldb/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! JournalDB interface and implementation. +//! `JournalDB` interface and implementation. use common::*; @@ -25,7 +25,7 @@ mod earlymergedb; mod overlayrecentdb; mod refcounteddb; -/// Export the JournalDB trait. +/// Export the `JournalDB` trait. pub use self::traits::JournalDB; /// A journal database algorithm. @@ -70,7 +70,7 @@ impl fmt::Display for Algorithm { } } -/// Create a new JournalDB trait object. +/// Create a new `JournalDB` trait object. pub fn new(path: &str, algorithm: Algorithm) -> Box { match algorithm { Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)), diff --git a/util/src/journaldb/overlayrecentdb.rs b/util/src/journaldb/overlayrecentdb.rs index 31b68f802..0b9ad4fda 100644 --- a/util/src/journaldb/overlayrecentdb.rs +++ b/util/src/journaldb/overlayrecentdb.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! JournalDB over in-memory overlay +//! `JournalDB` over in-memory overlay use common::*; use rlp::*; @@ -25,11 +25,11 @@ use kvdb::{Database, DBTransaction, DatabaseConfig}; use std::env; use super::JournalDB; -/// Implementation of the JournalDB trait for a disk-backed database with a memory overlay +/// Implementation of the `JournalDB` trait for a disk-backed database with a memory overlay /// and, possibly, latent-removal semantics. /// -/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to -/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. /// @@ -359,6 +359,7 @@ impl HashDB for OverlayRecentDB { #[cfg(test)] mod tests { #![cfg_attr(feature="dev", allow(blacklisted_name))] + #![cfg_attr(feature="dev", allow(similar_names))] use common::*; use super::*; diff --git a/util/src/journaldb/refcounteddb.rs b/util/src/journaldb/refcounteddb.rs index 20e1efb3f..e69eccab7 100644 --- a/util/src/journaldb/refcounteddb.rs +++ b/util/src/journaldb/refcounteddb.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Disk-backed, ref-counted JournalDB implementation. +//! Disk-backed, ref-counted `JournalDB` implementation. use common::*; use rlp::*; @@ -25,11 +25,11 @@ use kvdb::{Database, DBTransaction, DatabaseConfig}; #[cfg(test)] use std::env; -/// Implementation of the HashDB trait for a disk-backed database with a memory overlay +/// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// -/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to -/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. pub struct RefCountedDB { @@ -195,6 +195,7 @@ impl JournalDB for RefCountedDB { #[cfg(test)] mod tests { #![cfg_attr(feature="dev", allow(blacklisted_name))] + #![cfg_attr(feature="dev", allow(similar_names))] use common::*; use super::*; diff --git a/util/src/journaldb/traits.rs b/util/src/journaldb/traits.rs index afc6ab89a..b1ba27957 100644 --- a/util/src/journaldb/traits.rs +++ b/util/src/journaldb/traits.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Disk-backed HashDB implementation. +//! Disk-backed `HashDB` implementation. use common::*; use hashdb::*; -/// A HashDB which can manage a short-term journal potentially containing many forks of mutually +/// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually /// exclusive actions. pub trait JournalDB : HashDB + Send + Sync { /// Return a copy of ourself, in a box. diff --git a/util/src/keys/directory.rs b/util/src/keys/directory.rs index a92bf4593..082a7f427 100644 --- a/util/src/keys/directory.rs +++ b/util/src/keys/directory.rs @@ -326,7 +326,7 @@ fn uuid_from_string(s: &str) -> Result { #[derive(Clone)] -/// Stored key file struct with encrypted message (cipher_text) +/// Stored key file struct with encrypted message (`cipher_text`) /// also contains password derivation function settings (PBKDF2/Scrypt) pub struct KeyFileContent { version: KeyFileVersion, @@ -369,9 +369,9 @@ enum KeyFileParseError { } impl KeyFileContent { - /// New stored key file struct with encrypted message (cipher_text) + /// New stored key file struct with encrypted message (`cipher_text`) /// also contains password derivation function settings (PBKDF2/Scrypt) - /// to decrypt cipher_text given the password is provided. + /// to decrypt `cipher_text` given the password is provided. pub fn new(crypto: KeyFileCrypto) -> KeyFileContent { KeyFileContent { id: new_uuid(), diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index df5c2c448..9de71bd35 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Key-Value store abstraction with RocksDB backend. +//! Key-Value store abstraction with `RocksDB` backend. use std::default::Default; use rocksdb::{DB, Writable, WriteBatch, IteratorMode, DBVector, DBIterator, diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 0d4f8b2c9..cfd7237e6 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Reference-counted memory-based HashDB implementation. +//! Reference-counted memory-based `HashDB` implementation. use hash::*; use bytes::*; @@ -27,7 +27,7 @@ use std::collections::HashMap; use std::default::Default; #[derive(Debug,Clone)] -/// Reference-counted memory-based HashDB implementation. +/// Reference-counted memory-based `HashDB` implementation. /// /// Use `new()` to create a new database. Insert items with `insert()`, remove items /// with `remove()`, check for existence with `containce()` and lookup a hash to derive diff --git a/util/src/network/connection.rs b/util/src/network/connection.rs index 02c0e2cde..a3a42b44e 100644 --- a/util/src/network/connection.rs +++ b/util/src/network/connection.rs @@ -223,7 +223,7 @@ pub enum WriteStatus { Complete } -/// RLPx packet +/// `RLPx` packet pub struct Packet { pub protocol: u16, pub data: Bytes, @@ -237,7 +237,7 @@ enum EncryptedConnectionState { Payload, } -/// Connection implementing RLPx framing +/// Connection implementing `RLPx` framing /// https://github.com/ethereum/devp2p/blob/master/rlpx.md#framing pub struct EncryptedConnection { /// Underlying tcp connection diff --git a/util/src/network/handshake.rs b/util/src/network/handshake.rs index a72cc28ad..123531d8d 100644 --- a/util/src/network/handshake.rs +++ b/util/src/network/handshake.rs @@ -48,7 +48,7 @@ enum HandshakeState { StartSession, } -/// RLPx protocol handhake. See https://github.com/ethereum/devp2p/blob/master/rlpx.md#encrypted-handshake +/// `RLPx` protocol handhake. See https://github.com/ethereum/devp2p/blob/master/rlpx.md#encrypted-handshake pub struct Handshake { /// Remote node public key pub id: NodeId, @@ -66,11 +66,11 @@ pub struct Handshake { pub remote_ephemeral: Public, /// Remote connection nonce. pub remote_nonce: H256, - /// Remote RLPx protocol version. + /// Remote `RLPx` protocol version. pub remote_version: u64, - /// A copy of received encryped auth packet + /// A copy of received encryped auth packet pub auth_cipher: Bytes, - /// A copy of received encryped ack packet + /// A copy of received encryped ack packet pub ack_cipher: Bytes, /// This Handshake is marked for deleteion flag pub expired: bool, @@ -413,7 +413,7 @@ mod test { fn test_handshake_auth_plain() { let mut h = create_handshake(None); let secret = Secret::from_str("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291").unwrap(); - let auth = + let auth = "\ 048ca79ad18e4b0659fab4853fe5bc58eb83992980f4c9cc147d2aa31532efd29a3d3dc6a3d89eaf\ 913150cfc777ce0ce4af2758bf4810235f6e6ceccfee1acc6b22c005e9e3a49d6448610a58e98744\ @@ -434,7 +434,7 @@ mod test { fn test_handshake_auth_eip8() { let mut h = create_handshake(None); let secret = Secret::from_str("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291").unwrap(); - let auth = + let auth = "\ 01b304ab7578555167be8154d5cc456f567d5ba302662433674222360f08d5f1534499d3678b513b\ 0fca474f3a514b18e75683032eb63fccb16c156dc6eb2c0b1593f0d84ac74f6e475f1b8d56116b84\ @@ -460,7 +460,7 @@ mod test { fn test_handshake_auth_eip8_2() { let mut h = create_handshake(None); let secret = Secret::from_str("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291").unwrap(); - let auth = + let auth = "\ 01b8044c6c312173685d1edd268aa95e1d495474c6959bcdd10067ba4c9013df9e40ff45f5bfd6f7\ 2471f93a91b493f8e00abc4b80f682973de715d77ba3a005a242eb859f9a211d93a347fa64b597bf\ @@ -481,7 +481,7 @@ mod test { h.read_auth_eip8(&secret, &auth[super::V4_AUTH_PACKET_SIZE..]).unwrap(); assert_eq!(h.state, super::HandshakeState::StartSession); check_auth(&h, 56); - let ack = h.ack_cipher.clone(); + let ack = h.ack_cipher.clone(); let total = (((ack[0] as u16) << 8 | (ack[1] as u16)) as usize) + 2; assert_eq!(ack.len(), total); } @@ -491,7 +491,7 @@ mod test { let remote = Public::from_str("fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877").unwrap(); let mut h = create_handshake(Some(&remote)); let secret = Secret::from_str("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee").unwrap(); - let ack = + let ack = "\ 049f8abcfa9c0dc65b982e98af921bc0ba6e4243169348a236abe9df5f93aa69d99cadddaa387662\ b0ff2c08e9006d5a11a278b1b3331e5aaabf0a32f01281b6f4ede0e09a2d5f585b26513cb794d963\ @@ -511,7 +511,7 @@ mod test { let remote = Public::from_str("fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877").unwrap(); let mut h = create_handshake(Some(&remote)); let secret = Secret::from_str("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee").unwrap(); - let ack = + let ack = "\ 01ea0451958701280a56482929d3b0757da8f7fbe5286784beead59d95089c217c9b917788989470\ b0e330cc6e4fb383c0340ed85fab836ec9fb8a49672712aeabbdfd1e837c1ff4cace34311cd7f4de\ @@ -540,7 +540,7 @@ mod test { let remote = Public::from_str("fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877").unwrap(); let mut h = create_handshake(Some(&remote)); let secret = Secret::from_str("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee").unwrap(); - let ack = + let ack = "\ 01f004076e58aae772bb101ab1a8e64e01ee96e64857ce82b1113817c6cdd52c09d26f7b90981cd7\ ae835aeac72e1573b8a0225dd56d157a010846d888dac7464baf53f2ad4e3d584531fa203658fab0\ diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 63a935ca9..ce4e894c8 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Disk-backed HashDB implementation. +//! Disk-backed `HashDB` implementation. use error::*; use hash::*; @@ -28,7 +28,7 @@ use std::env; use std::collections::HashMap; use kvdb::{Database, DBTransaction}; -/// Implementation of the HashDB trait for a disk-backed database with a memory overlay. +/// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay. /// /// The operations `insert()` and `remove()` take place on the memory overlay; batches of /// such operations may be flushed to the disk-backed DB with `commit()` or discarded with diff --git a/util/src/rlp/bytes.rs b/util/src/rlp/bytes.rs index e8bfa57b0..1145ba27e 100644 --- a/util/src/rlp/bytes.rs +++ b/util/src/rlp/bytes.rs @@ -153,7 +153,7 @@ impl ToBytes for T where T: FixedHash { fn to_bytes_len(&self) -> usize { self.bytes().len() } } -/// Error returned when FromBytes conversation goes wrong +/// Error returned when `FromBytes` conversation goes wrong #[derive(Debug, PartialEq, Eq)] pub enum FromBytesError { /// Expected more RLP data @@ -174,7 +174,7 @@ impl fmt::Display for FromBytesError { } } -/// Alias for the result of FromBytes trait +/// Alias for the result of `FromBytes` trait pub type FromBytesResult = Result; /// Converts to given type from its bytes representation diff --git a/util/src/trie/sectriedb.rs b/util/src/trie/sectriedb.rs index 9f74e9917..3e74f8655 100644 --- a/util/src/trie/sectriedb.rs +++ b/util/src/trie/sectriedb.rs @@ -22,8 +22,8 @@ use super::triedb::*; use super::trietraits::*; /// A `Trie` implementation which hashes keys and uses a generic `HashDB` backing database. -/// -/// Use it as a `Trie` trait object. You can use `raw()` to get the backing TrieDB object. +/// +/// Use it as a `Trie` trait object. You can use `raw()` to get the backing `TrieDB` object. pub struct SecTrieDB<'db> { raw: TrieDB<'db> } @@ -32,16 +32,16 @@ impl<'db> SecTrieDB<'db> { /// Create a new trie with the backing database `db` and empty `root` /// Initialise to the state entailed by the genesis block. /// This guarantees the trie is built correctly. - pub fn new(db: &'db HashDB, root: &'db H256) -> Self { + pub fn new(db: &'db HashDB, root: &'db H256) -> Self { SecTrieDB { raw: TrieDB::new(db, root) } } - /// Get a reference to the underlying raw TrieDB struct. + /// Get a reference to the underlying raw `TrieDB` struct. pub fn raw(&self) -> &TrieDB { &self.raw } - /// Get a mutable reference to the underlying raw TrieDB struct. + /// Get a mutable reference to the underlying raw `TrieDB` struct. pub fn raw_mut(&mut self) -> &TrieDB { &mut self.raw } diff --git a/util/src/trie/sectriedbmut.rs b/util/src/trie/sectriedbmut.rs index 662f6852a..7e17610f8 100644 --- a/util/src/trie/sectriedbmut.rs +++ b/util/src/trie/sectriedbmut.rs @@ -22,8 +22,8 @@ use super::triedbmut::*; use super::trietraits::*; /// A mutable `Trie` implementation which hashes keys and uses a generic `HashDB` backing database. -/// -/// Use it as a `Trie` or `TrieMut` trait object. You can use `raw()` to get the backing TrieDBMut object. +/// +/// Use it as a `Trie` or `TrieMut` trait object. You can use `raw()` to get the backing `TrieDBMut` object. pub struct SecTrieDBMut<'db> { raw: TrieDBMut<'db> } @@ -32,7 +32,7 @@ impl<'db> SecTrieDBMut<'db> { /// Create a new trie with the backing database `db` and empty `root` /// Initialise to the state entailed by the genesis block. /// This guarantees the trie is built correctly. - pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self { + pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self { SecTrieDBMut { raw: TrieDBMut::new(db, root) } }