Fixing some clippy warnings
This commit is contained in:
parent
e1e7886918
commit
2b3d100de2
@ -869,8 +869,8 @@ impl BlockChainClient for Client {
|
||||
}
|
||||
|
||||
fn keep_alive(&self) {
|
||||
let should_wake = match &*self.mode.lock() {
|
||||
&Mode::Dark(..) | &Mode::Passive(..) => true,
|
||||
let should_wake = match *self.mode.lock() {
|
||||
Mode::Dark(..) | Mode::Passive(..) => true,
|
||||
_ => false,
|
||||
};
|
||||
if should_wake {
|
||||
|
@ -124,7 +124,7 @@ impl AuthorityRound {
|
||||
}
|
||||
|
||||
fn step_proposer(&self, step: usize) -> &Address {
|
||||
let ref p = self.our_params;
|
||||
let p = &self.our_params;
|
||||
p.authorities.get(step % p.authority_n).expect("There are authority_n authorities; taking number modulo authority_n gives number in authority_n range; qed")
|
||||
}
|
||||
|
||||
@ -211,7 +211,7 @@ impl Engine for AuthorityRound {
|
||||
fn on_close_block(&self, _block: &mut ExecutedBlock) {}
|
||||
|
||||
fn is_sealer(&self, author: &Address) -> Option<bool> {
|
||||
let ref p = self.our_params;
|
||||
let p = &self.our_params;
|
||||
Some(p.authorities.contains(author))
|
||||
}
|
||||
|
||||
|
@ -197,19 +197,17 @@ impl<Gas: CostType> Gasometer<Gas> {
|
||||
let address = u256_to_address(stack.peek(1));
|
||||
let is_value_transfer = !stack.peek(2).is_zero();
|
||||
|
||||
if instruction == instructions::CALL {
|
||||
if (
|
||||
!schedule.no_empty && !ext.exists(&address)
|
||||
) || (
|
||||
schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address)
|
||||
) {
|
||||
gas = overflowing!(gas.overflow_add(schedule.call_new_account_gas.into()));
|
||||
}
|
||||
};
|
||||
if instruction == instructions::CALL && (
|
||||
(!schedule.no_empty && !ext.exists(&address))
|
||||
||
|
||||
(schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address))
|
||||
) {
|
||||
gas = overflowing!(gas.overflow_add(schedule.call_new_account_gas.into()));
|
||||
}
|
||||
|
||||
if is_value_transfer {
|
||||
gas = overflowing!(gas.overflow_add(schedule.call_value_transfer_gas.into()));
|
||||
};
|
||||
}
|
||||
|
||||
let requested = *stack.peek(0);
|
||||
|
||||
@ -347,7 +345,7 @@ fn test_mem_gas_cost() {
|
||||
let result = gasometer.mem_gas_cost(&schedule, current_mem_size, &mem_size);
|
||||
|
||||
// then
|
||||
if let Ok(_) = result {
|
||||
if result.is_ok() {
|
||||
assert!(false, "Should fail with OutOfGas");
|
||||
}
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ impl Ext for FakeExt {
|
||||
}
|
||||
|
||||
fn balance(&self, address: &Address) -> U256 {
|
||||
*self.balances.get(address).unwrap()
|
||||
self.balances[address]
|
||||
}
|
||||
|
||||
fn blockhash(&self, number: &U256) -> H256 {
|
||||
|
@ -516,7 +516,6 @@ impl<'a> Executive<'a> {
|
||||
mod tests {
|
||||
use ethkey::{Generator, Random};
|
||||
use super::*;
|
||||
use util::*;
|
||||
use action_params::{ActionParams, ActionValue};
|
||||
use env_info::EnvInfo;
|
||||
use evm::{Factory, VMType};
|
||||
|
@ -151,7 +151,7 @@ impl GasPriceCalibrator {
|
||||
if Instant::now() >= self.next_calibration {
|
||||
let usd_per_tx = self.options.usd_per_tx;
|
||||
trace!(target: "miner", "Getting price info");
|
||||
if let Ok(_) = PriceInfo::get(move |price: PriceInfo| {
|
||||
let price_info = PriceInfo::get(move |price: PriceInfo| {
|
||||
trace!(target: "miner", "Price info arrived: {:?}", price);
|
||||
let usd_per_eth = price.ethusd;
|
||||
let wei_per_usd: f32 = 1.0e18 / usd_per_eth;
|
||||
@ -159,7 +159,9 @@ impl GasPriceCalibrator {
|
||||
let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx;
|
||||
info!(target: "miner", "Updated conversion rate to Ξ1 = {} ({} wei/gas)", Colour::White.bold().paint(format!("US${}", usd_per_eth)), Colour::Yellow.bold().paint(format!("{}", wei_per_gas)));
|
||||
set_price(U256::from(wei_per_gas as u64));
|
||||
}) {
|
||||
});
|
||||
|
||||
if price_info.is_ok() {
|
||||
self.next_calibration = Instant::now() + self.options.recalibration_period;
|
||||
} else {
|
||||
warn!(target: "miner", "Unable to update Ether price.");
|
||||
@ -1142,12 +1144,10 @@ mod tests {
|
||||
use std::time::Duration;
|
||||
use super::super::{MinerService, PrioritizationStrategy};
|
||||
use super::*;
|
||||
use util::*;
|
||||
use ethkey::{Generator, Random};
|
||||
use client::{BlockChainClient, TestBlockChainClient, EachBlockWith, TransactionImportResult};
|
||||
use header::BlockNumber;
|
||||
use types::transaction::{Transaction, SignedTransaction, Action};
|
||||
use block::*;
|
||||
use spec::Spec;
|
||||
use tests::helpers::{generate_dummy_client};
|
||||
|
||||
|
@ -990,7 +990,7 @@ impl TransactionQueue {
|
||||
let mut update_last_nonce_to = None;
|
||||
{
|
||||
let by_nonce = self.future.by_address.row_mut(&address);
|
||||
if let None = by_nonce {
|
||||
if by_nonce.is_none() {
|
||||
return;
|
||||
}
|
||||
let mut by_nonce = by_nonce.expect("None is tested in early-exit condition above; qed");
|
||||
@ -1212,7 +1212,6 @@ mod test {
|
||||
use util::table::*;
|
||||
use util::*;
|
||||
use ethkey::{Random, Generator};
|
||||
use transaction::*;
|
||||
use error::{Error, TransactionError};
|
||||
use super::*;
|
||||
use super::{TransactionSet, TransactionOrder, VerifiedTransaction};
|
||||
|
@ -552,11 +552,11 @@ const POW_VERIFY_RATE: f32 = 0.02;
|
||||
pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &Engine, chain: &BlockChain, body: Option<&[u8]>, always: bool) -> Result<(), ::error::Error> {
|
||||
if always || rng.gen::<f32>() <= POW_VERIFY_RATE {
|
||||
match chain.block_header(header.parent_hash()) {
|
||||
Some(parent) => engine.verify_block_family(&header, &parent, body),
|
||||
None => engine.verify_block_seal(&header),
|
||||
Some(parent) => engine.verify_block_family(header, &parent, body),
|
||||
None => engine.verify_block_seal(header),
|
||||
}
|
||||
} else {
|
||||
engine.verify_block_basic(&header, body)
|
||||
engine.verify_block_basic(header, body)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -449,7 +449,6 @@ mod tests {
|
||||
use util::*;
|
||||
use super::*;
|
||||
use account_db::*;
|
||||
use rlp::*;
|
||||
|
||||
#[test]
|
||||
fn account_compress() {
|
||||
|
@ -445,6 +445,7 @@ impl State {
|
||||
}
|
||||
|
||||
/// Add `incr` to the balance of account `a`.
|
||||
#[cfg_attr(feature="dev", allow(single_match))]
|
||||
pub fn add_balance(&mut self, a: &Address, incr: &U256, cleanup_mode: CleanupMode) {
|
||||
trace!(target: "state", "add_balance({}, {}): {}", a, incr, self.balance(a));
|
||||
let is_value_transfer = !incr.is_zero();
|
||||
|
@ -57,6 +57,7 @@ impl Substate {
|
||||
}
|
||||
|
||||
/// Get the cleanup mode object from this.
|
||||
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
|
||||
pub fn to_cleanup_mode(&mut self, schedule: &Schedule) -> CleanupMode {
|
||||
match (schedule.no_empty, schedule.kill_empty) {
|
||||
(false, _) => CleanupMode::ForceCreate,
|
||||
|
@ -397,6 +397,7 @@ impl StateDB {
|
||||
}
|
||||
|
||||
/// Get cached code based on hash.
|
||||
#[cfg_attr(feature="dev", allow(map_clone))]
|
||||
pub fn get_cached_code(&self, hash: &H256) -> Option<Arc<Vec<u8>>> {
|
||||
let mut cache = self.code_cache.lock();
|
||||
|
||||
|
@ -62,7 +62,7 @@ fn should_return_registrar() {
|
||||
&db_config
|
||||
).unwrap();
|
||||
let params = client.additional_params();
|
||||
let address = params.get("registrar").unwrap();
|
||||
let address = ¶ms["registrar"];
|
||||
|
||||
assert_eq!(address.len(), 40);
|
||||
assert!(U256::from_str(address).is_ok());
|
||||
@ -93,7 +93,7 @@ fn imports_good_block() {
|
||||
&db_config
|
||||
).unwrap();
|
||||
let good_block = get_good_dummy_block();
|
||||
if let Err(_) = client.import_block(good_block) {
|
||||
if client.import_block(good_block).is_err() {
|
||||
panic!("error importing block being good by definition");
|
||||
}
|
||||
client.flush_queue();
|
||||
@ -203,18 +203,18 @@ fn can_collect_garbage() {
|
||||
|
||||
#[test]
|
||||
fn can_generate_gas_price_median() {
|
||||
let client_result = generate_dummy_client_with_data(3, 1, &vec_into![1, 2, 3]);
|
||||
let client_result = generate_dummy_client_with_data(3, 1, slice_into![1, 2, 3]);
|
||||
let client = client_result.reference();
|
||||
assert_eq!(Some(U256::from(2)), client.gas_price_median(3));
|
||||
|
||||
let client_result = generate_dummy_client_with_data(4, 1, &vec_into![1, 4, 3, 2]);
|
||||
let client_result = generate_dummy_client_with_data(4, 1, slice_into![1, 4, 3, 2]);
|
||||
let client = client_result.reference();
|
||||
assert_eq!(Some(U256::from(3)), client.gas_price_median(4));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_generate_gas_price_histogram() {
|
||||
let client_result = generate_dummy_client_with_data(20, 1, &vec_into![6354,8593,6065,4842,7845,7002,689,4958,4250,6098,5804,4320,643,8895,2296,8589,7145,2000,2512,1408]);
|
||||
let client_result = generate_dummy_client_with_data(20, 1, slice_into![6354,8593,6065,4842,7845,7002,689,4958,4250,6098,5804,4320,643,8895,2296,8589,7145,2000,2512,1408]);
|
||||
let client = client_result.reference();
|
||||
|
||||
let hist = client.gas_price_histogram(20, 5).unwrap();
|
||||
@ -224,7 +224,7 @@ fn can_generate_gas_price_histogram() {
|
||||
|
||||
#[test]
|
||||
fn empty_gas_price_histogram() {
|
||||
let client_result = generate_dummy_client_with_data(20, 0, &vec_into![]);
|
||||
let client_result = generate_dummy_client_with_data(20, 0, slice_into![]);
|
||||
let client = client_result.reference();
|
||||
|
||||
assert!(client.gas_price_histogram(20, 5).is_none());
|
||||
|
@ -262,7 +262,7 @@ pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> GuardedTempResult<Arc<
|
||||
).unwrap();
|
||||
|
||||
for block in &blocks {
|
||||
if let Err(_) = client.import_block(block.clone()) {
|
||||
if client.import_block(block.clone()).is_err() {
|
||||
panic!("panic importing block which is well-formed");
|
||||
}
|
||||
}
|
||||
|
@ -19,16 +19,16 @@
|
||||
pub use std::time::Duration;
|
||||
use client::Mode as ClientMode;
|
||||
|
||||
/// IPC-capable shadow-type for client::config::Mode
|
||||
/// IPC-capable shadow-type for `client::config::Mode`
|
||||
#[derive(Clone, Binary, Debug)]
|
||||
pub enum Mode {
|
||||
/// Same as ClientMode::Off.
|
||||
/// Same as `ClientMode::Off`.
|
||||
Off,
|
||||
/// Same as ClientMode::Dark; values in seconds.
|
||||
/// Same as `ClientMode::Dark`; values in seconds.
|
||||
Dark(u64),
|
||||
/// Same as ClientMode::Passive; values in seconds.
|
||||
/// Same as `ClientMode::Passive`; values in seconds.
|
||||
Passive(u64, u64),
|
||||
/// Same as ClientMode::Active.
|
||||
/// Same as `ClientMode::Active`.
|
||||
Active,
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,7 @@ pub struct Transaction {
|
||||
impl Transaction {
|
||||
/// Append object with a without signature into RLP stream
|
||||
pub fn rlp_append_unsigned_transaction(&self, s: &mut RlpStream, network_id: Option<u8>) {
|
||||
s.begin_list(if let None = network_id { 6 } else { 9 });
|
||||
s.begin_list(if network_id.is_none() { 6 } else { 9 });
|
||||
s.append(&self.nonce);
|
||||
s.append(&self.gas_price);
|
||||
s.append(&self.gas);
|
||||
|
@ -156,7 +156,7 @@ pub fn execute(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<(), String> {
|
||||
// get the mode
|
||||
let mode = try!(mode_switch_to_bool(cmd.mode, &user_defaults));
|
||||
trace!(target: "mode", "mode is {:?}", mode);
|
||||
let network_enabled = match &mode { &Mode::Dark(_) | &Mode::Off => false, _ => true, };
|
||||
let network_enabled = match mode { Mode::Dark(_) | Mode::Off => false, _ => true, };
|
||||
|
||||
// prepare client and snapshot paths.
|
||||
let client_path = db_dirs.client_path(algorithm);
|
||||
|
@ -125,7 +125,7 @@ mod tests {
|
||||
use serde_json;
|
||||
use std::str::FromStr;
|
||||
use util::hash::*;
|
||||
use super::*;
|
||||
use super::{VariadicValue, Topic, Filter};
|
||||
use v1::types::BlockNumber;
|
||||
use ethcore::filter::Filter as EthFilter;
|
||||
use ethcore::client::BlockID;
|
||||
|
@ -80,6 +80,7 @@ pub struct AuthCodes<T: TimeProvider = DefaultTimeProvider> {
|
||||
impl AuthCodes<DefaultTimeProvider> {
|
||||
|
||||
/// Reads `AuthCodes` from file and creates new instance using `DefaultTimeProvider`.
|
||||
#[cfg_attr(feature="dev", allow(single_char_pattern))]
|
||||
pub fn from_file(file: &Path) -> io::Result<AuthCodes> {
|
||||
let content = {
|
||||
if let Ok(mut file) = fs::File::open(file) {
|
||||
@ -128,7 +129,7 @@ impl<T: TimeProvider> AuthCodes<T> {
|
||||
let mut file = try!(fs::File::create(file));
|
||||
let content = self.codes.iter().map(|code| {
|
||||
let mut data = vec![code.code.clone(), encode_time(code.created_at.clone())];
|
||||
if let Some(used_at) = code.last_used_at.clone() {
|
||||
if let Some(used_at) = code.last_used_at {
|
||||
data.push(encode_time(used_at));
|
||||
}
|
||||
data.join(SEPARATOR)
|
||||
|
@ -99,7 +99,7 @@ fn auth_is_valid(codes_path: &Path, protocols: ws::Result<Vec<&str>>) -> bool {
|
||||
|
||||
let res = codes.is_valid(&auth, time);
|
||||
// make sure to save back authcodes - it might have been modified
|
||||
if let Err(_) = codes.to_file(codes_path) {
|
||||
if codes.to_file(codes_path).is_err() {
|
||||
warn!(target: "signer", "Couldn't save authorization codes to file.");
|
||||
}
|
||||
res
|
||||
|
@ -624,7 +624,7 @@ impl ChainSync {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity, needless_borrow))]
|
||||
/// Called by peer once it has new block headers during sync
|
||||
fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
||||
let confirmed = match self.peers.get_mut(&peer_id) {
|
||||
@ -1173,7 +1173,7 @@ impl ChainSync {
|
||||
}
|
||||
},
|
||||
SyncState::SnapshotData => {
|
||||
if let RestorationStatus::Ongoing { state_chunks: _, block_chunks: _, state_chunks_done, block_chunks_done, } = io.snapshot_service().status() {
|
||||
if let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done, .. } = io.snapshot_service().status() {
|
||||
if self.snapshot.done_chunks() - (state_chunks_done + block_chunks_done) as usize > MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD {
|
||||
trace!(target: "sync", "Snapshot queue full, pausing sync");
|
||||
self.state = SyncState::SnapshotWaiting;
|
||||
@ -1744,7 +1744,7 @@ impl ChainSync {
|
||||
self.restart(io);
|
||||
self.continue_sync(io);
|
||||
},
|
||||
RestorationStatus::Ongoing { state_chunks: _, block_chunks: _, state_chunks_done, block_chunks_done, } => {
|
||||
RestorationStatus::Ongoing { state_chunks_done, block_chunks_done, .. } => {
|
||||
if !self.snapshot.is_complete() && self.snapshot.done_chunks() - (state_chunks_done + block_chunks_done) as usize <= MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD {
|
||||
trace!(target:"sync", "Resuming snapshot sync");
|
||||
self.state = SyncState::SnapshotData;
|
||||
@ -2002,8 +2002,6 @@ mod tests {
|
||||
use tests::snapshot::TestSnapshotService;
|
||||
use super::*;
|
||||
use ::SyncConfig;
|
||||
use util::*;
|
||||
use rlp::*;
|
||||
use super::{PeerInfo, PeerAsking};
|
||||
use ethcore::views::BlockView;
|
||||
use ethcore::header::*;
|
||||
|
@ -156,19 +156,19 @@ impl TestNet {
|
||||
}
|
||||
|
||||
pub fn peer(&self, i: usize) -> &TestPeer {
|
||||
self.peers.get(i).unwrap()
|
||||
&self.peers[i]
|
||||
}
|
||||
|
||||
pub fn peer_mut(&mut self, i: usize) -> &mut TestPeer {
|
||||
self.peers.get_mut(i).unwrap()
|
||||
&mut self.peers[i]
|
||||
}
|
||||
|
||||
pub fn start(&mut self) {
|
||||
for peer in 0..self.peers.len() {
|
||||
for client in 0..self.peers.len() {
|
||||
if peer != client {
|
||||
let mut p = self.peers.get_mut(peer).unwrap();
|
||||
p.sync.write().update_targets(&mut p.chain);
|
||||
let mut p = &mut self.peers[peer];
|
||||
p.sync.write().update_targets(&p.chain);
|
||||
p.sync.write().on_peer_connected(&mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(client as PeerId)), client as PeerId);
|
||||
}
|
||||
}
|
||||
@ -179,7 +179,7 @@ impl TestNet {
|
||||
for peer in 0..self.peers.len() {
|
||||
if let Some(packet) = self.peers[peer].queue.pop_front() {
|
||||
let disconnecting = {
|
||||
let mut p = self.peers.get_mut(packet.recipient).unwrap();
|
||||
let mut p = &mut self.peers[packet.recipient];
|
||||
trace!("--- {} -> {} ---", peer, packet.recipient);
|
||||
let to_disconnect = {
|
||||
let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId));
|
||||
@ -195,7 +195,7 @@ impl TestNet {
|
||||
};
|
||||
for d in &disconnecting {
|
||||
// notify other peers that this peer is disconnecting
|
||||
let mut p = self.peers.get_mut(*d).unwrap();
|
||||
let mut p = &mut self.peers[*d];
|
||||
let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId));
|
||||
p.sync.write().on_peer_aborting(&mut io, peer as PeerId);
|
||||
}
|
||||
|
@ -507,7 +507,6 @@ mod tests {
|
||||
use std::io::{Read, Write, Error, Cursor, ErrorKind};
|
||||
use mio::{Ready};
|
||||
use std::collections::VecDeque;
|
||||
use util::bytes::*;
|
||||
use devtools::*;
|
||||
use io::*;
|
||||
|
||||
|
@ -555,10 +555,6 @@ impl Discovery {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use util::hash::*;
|
||||
use util::sha3::*;
|
||||
use std::net::*;
|
||||
use node_table::*;
|
||||
use std::str::FromStr;
|
||||
use rustc_serialize::hex::FromHex;
|
||||
use ethkey::{Random, Generator};
|
||||
|
@ -333,7 +333,6 @@ mod test {
|
||||
use std::sync::Arc;
|
||||
use rustc_serialize::hex::FromHex;
|
||||
use super::*;
|
||||
use util::hash::*;
|
||||
use io::*;
|
||||
use mio::tcp::TcpStream;
|
||||
use stats::NetworkStats;
|
||||
|
@ -358,8 +358,6 @@ pub fn is_valid_node_url(url: &str) -> bool {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::str::FromStr;
|
||||
use std::net::*;
|
||||
use util::hash::*;
|
||||
use devtools::*;
|
||||
use AllowIP;
|
||||
|
||||
|
@ -33,6 +33,13 @@ macro_rules! vec_into {
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! slice_into {
|
||||
( $( $x:expr ),* ) => {
|
||||
&[ $( $x.into() ),* ]
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! hash_map {
|
||||
() => { HashMap::new() };
|
||||
|
@ -236,7 +236,6 @@ mod tests {
|
||||
|
||||
use common::*;
|
||||
use super::*;
|
||||
use hashdb::*;
|
||||
use journaldb::traits::JournalDB;
|
||||
use kvdb::Database;
|
||||
|
||||
|
@ -556,7 +556,6 @@ mod tests {
|
||||
use common::*;
|
||||
use super::*;
|
||||
use super::super::traits::JournalDB;
|
||||
use hashdb::*;
|
||||
use log::init_log;
|
||||
use kvdb::{Database, DatabaseConfig};
|
||||
|
||||
|
@ -422,7 +422,6 @@ mod tests {
|
||||
|
||||
use common::*;
|
||||
use super::*;
|
||||
use hashdb::*;
|
||||
use log::init_log;
|
||||
use journaldb::JournalDB;
|
||||
use kvdb::Database;
|
||||
|
@ -217,7 +217,6 @@ mod tests {
|
||||
use common::*;
|
||||
use super::*;
|
||||
use super::super::traits::JournalDB;
|
||||
use hashdb::*;
|
||||
|
||||
#[test]
|
||||
fn long_history() {
|
||||
|
@ -628,7 +628,6 @@ impl Drop for Database {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use hash::*;
|
||||
use super::*;
|
||||
use devtools::*;
|
||||
use std::str::FromStr;
|
||||
|
@ -67,7 +67,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn check_histogram() {
|
||||
let hist = Histogram::new(&vec_into![643,689,1408,2000,2296,2512,4250,4320,4842,4958,5804,6065,6098,6354,7002,7145,7845,8589,8593,8895], 5).unwrap();
|
||||
let hist = Histogram::new(slice_into![643,689,1408,2000,2296,2512,4250,4320,4842,4958,5804,6065,6098,6354,7002,7145,7845,8589,8593,8895], 5).unwrap();
|
||||
let correct_bounds: Vec<U256> = vec_into![643, 2294, 3945, 5596, 7247, 8898];
|
||||
assert_eq!(Histogram { bucket_bounds: correct_bounds, counts: vec![4,2,4,6,4] }, hist);
|
||||
}
|
||||
@ -75,7 +75,7 @@ mod tests {
|
||||
#[test]
|
||||
fn smaller_data_range_than_bucket_range() {
|
||||
assert_eq!(
|
||||
Histogram::new(&vec_into![1, 2, 2], 3),
|
||||
Histogram::new(slice_into![1, 2, 2], 3),
|
||||
Some(Histogram { bucket_bounds: vec_into![1, 2, 3, 4], counts: vec![1, 2, 0] })
|
||||
);
|
||||
}
|
||||
@ -83,7 +83,7 @@ mod tests {
|
||||
#[test]
|
||||
fn data_range_is_not_multiple_of_bucket_range() {
|
||||
assert_eq!(
|
||||
Histogram::new(&vec_into![1, 2, 5], 2),
|
||||
Histogram::new(slice_into![1, 2, 5], 2),
|
||||
Some(Histogram { bucket_bounds: vec_into![1, 4, 7], counts: vec![2, 1] })
|
||||
);
|
||||
}
|
||||
@ -91,13 +91,13 @@ mod tests {
|
||||
#[test]
|
||||
fn data_range_is_multiple_of_bucket_range() {
|
||||
assert_eq!(
|
||||
Histogram::new(&vec_into![1, 2, 6], 2),
|
||||
Histogram::new(slice_into![1, 2, 6], 2),
|
||||
Some(Histogram { bucket_bounds: vec_into![1, 4, 7], counts: vec![2, 1] })
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn none_when_too_few_data() {
|
||||
assert!(Histogram::new(&vec_into![], 1).is_none());
|
||||
assert!(Histogram::new(slice_into![], 1).is_none());
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user