2017-01-25 18:51:41 +01:00
|
|
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
2016-04-21 13:57:27 +02:00
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2016-05-24 20:30:21 +02:00
|
|
|
extern crate ansi_term;
|
2016-07-17 23:00:57 +02:00
|
|
|
use self::ansi_term::Colour::{White, Yellow, Green, Cyan, Blue};
|
2017-09-01 16:57:57 +02:00
|
|
|
use self::ansi_term::{Colour, Style};
|
2016-05-24 20:30:21 +02:00
|
|
|
|
2016-07-20 12:36:20 +02:00
|
|
|
use std::sync::{Arc};
|
2016-12-11 02:02:40 +01:00
|
|
|
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
2016-05-25 09:57:31 +02:00
|
|
|
use std::time::{Instant, Duration};
|
2017-07-10 13:21:11 +02:00
|
|
|
|
2018-04-11 12:56:37 +02:00
|
|
|
use atty;
|
2018-03-13 11:49:57 +01:00
|
|
|
use ethcore::client::{
|
|
|
|
BlockId, BlockChainClient, ChainInfo, BlockInfo, BlockChainInfo,
|
|
|
|
BlockQueueInfo, ChainNotify, ClientReport, Client, ClientIoMessage
|
|
|
|
};
|
2017-07-10 13:21:11 +02:00
|
|
|
use ethcore::header::BlockNumber;
|
2016-10-18 18:16:00 +02:00
|
|
|
use ethcore::snapshot::{RestorationStatus, SnapshotService as SS};
|
2017-07-10 13:21:11 +02:00
|
|
|
use ethcore::snapshot::service::Service as SnapshotService;
|
2018-04-10 12:13:49 +02:00
|
|
|
use sync::{LightSyncProvider, LightSync, SyncProvider, ManageNetwork};
|
2017-07-10 13:21:11 +02:00
|
|
|
use io::{TimerToken, IoContext, IoHandler};
|
|
|
|
use light::Cache as LightDataCache;
|
|
|
|
use light::client::LightChainClient;
|
2016-04-21 13:57:27 +02:00
|
|
|
use number_prefix::{binary_prefix, Standalone, Prefixed};
|
2017-04-13 16:32:07 +02:00
|
|
|
use parity_rpc::{is_major_importing};
|
|
|
|
use parity_rpc::informant::RpcStats;
|
2018-01-10 13:35:18 +01:00
|
|
|
use ethereum_types::H256;
|
2017-09-06 20:47:45 +02:00
|
|
|
use bytes::Bytes;
|
2017-09-02 20:09:13 +02:00
|
|
|
use parking_lot::{RwLock, Mutex};
|
2016-04-21 13:57:27 +02:00
|
|
|
|
2016-10-24 18:27:23 +02:00
|
|
|
/// Format byte counts to standard denominations.
|
|
|
|
pub fn format_bytes(b: usize) -> String {
|
|
|
|
match binary_prefix(b as f64) {
|
|
|
|
Standalone(bytes) => format!("{} bytes", bytes),
|
|
|
|
Prefixed(prefix, n) => format!("{:.0} {}B", n, prefix),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-16 12:18:27 +02:00
|
|
|
/// Something that can be converted to milliseconds.
|
2016-09-15 16:56:10 +02:00
|
|
|
pub trait MillisecondDuration {
|
2016-09-16 12:18:27 +02:00
|
|
|
/// Get the value in milliseconds.
|
2016-05-25 09:57:31 +02:00
|
|
|
fn as_milliseconds(&self) -> u64;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl MillisecondDuration for Duration {
|
|
|
|
fn as_milliseconds(&self) -> u64 {
|
2017-02-04 22:18:19 +01:00
|
|
|
self.as_secs() * 1000 + self.subsec_nanos() as u64 / 1_000_000
|
2016-05-25 09:57:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-10 13:21:11 +02:00
|
|
|
#[derive(Default)]
|
|
|
|
struct CacheSizes {
|
|
|
|
sizes: ::std::collections::BTreeMap<&'static str, usize>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl CacheSizes {
|
|
|
|
fn insert(&mut self, key: &'static str, bytes: usize) {
|
|
|
|
self.sizes.insert(key, bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn display<F>(&self, style: Style, paint: F) -> String
|
|
|
|
where F: Fn(Style, String) -> String
|
|
|
|
{
|
|
|
|
use std::fmt::Write;
|
|
|
|
|
|
|
|
let mut buf = String::new();
|
|
|
|
for (name, &size) in &self.sizes {
|
|
|
|
|
|
|
|
write!(buf, " {:>8} {}", paint(style, format_bytes(size)), name)
|
|
|
|
.expect("writing to string won't fail unless OOM; qed")
|
|
|
|
}
|
|
|
|
|
|
|
|
buf
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub struct SyncInfo {
|
|
|
|
last_imported_block_number: BlockNumber,
|
|
|
|
last_imported_old_block_number: Option<BlockNumber>,
|
|
|
|
num_peers: usize,
|
|
|
|
max_peers: u32,
|
2017-09-21 10:12:27 +02:00
|
|
|
snapshot_sync: bool,
|
2017-07-10 13:21:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
pub struct Report {
|
|
|
|
importing: bool,
|
|
|
|
chain_info: BlockChainInfo,
|
|
|
|
client_report: ClientReport,
|
|
|
|
queue_info: BlockQueueInfo,
|
|
|
|
cache_sizes: CacheSizes,
|
|
|
|
sync_info: Option<SyncInfo>,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Something which can provide data to the informant.
|
|
|
|
pub trait InformantData: Send + Sync {
|
|
|
|
/// Whether it executes transactions
|
|
|
|
fn executes_transactions(&self) -> bool;
|
|
|
|
|
|
|
|
/// Whether it is currently importing (also included in `Report`)
|
|
|
|
fn is_major_importing(&self) -> bool;
|
|
|
|
|
|
|
|
/// Generate a report of blockchain status, memory usage, and sync info.
|
|
|
|
fn report(&self) -> Report;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Informant data for a full node.
|
|
|
|
pub struct FullNodeInformantData {
|
|
|
|
pub client: Arc<Client>,
|
|
|
|
pub sync: Option<Arc<SyncProvider>>,
|
|
|
|
pub net: Option<Arc<ManageNetwork>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl InformantData for FullNodeInformantData {
|
|
|
|
fn executes_transactions(&self) -> bool { true }
|
|
|
|
|
|
|
|
fn is_major_importing(&self) -> bool {
|
|
|
|
let state = self.sync.as_ref().map(|sync| sync.status().state);
|
|
|
|
is_major_importing(state, self.client.queue_info())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn report(&self) -> Report {
|
|
|
|
let (client_report, queue_info, blockchain_cache_info) =
|
|
|
|
(self.client.report(), self.client.queue_info(), self.client.blockchain_cache_info());
|
|
|
|
|
|
|
|
let chain_info = self.client.chain_info();
|
|
|
|
|
|
|
|
let mut cache_sizes = CacheSizes::default();
|
|
|
|
cache_sizes.insert("db", client_report.state_db_mem);
|
|
|
|
cache_sizes.insert("queue", queue_info.mem_used);
|
|
|
|
cache_sizes.insert("chain", blockchain_cache_info.total());
|
|
|
|
|
|
|
|
let (importing, sync_info) = match (self.sync.as_ref(), self.net.as_ref()) {
|
|
|
|
(Some(sync), Some(net)) => {
|
|
|
|
let status = sync.status();
|
|
|
|
let net_config = net.network_config();
|
|
|
|
|
|
|
|
cache_sizes.insert("sync", status.mem_used);
|
|
|
|
|
|
|
|
let importing = is_major_importing(Some(status.state), queue_info.clone());
|
|
|
|
(importing, Some(SyncInfo {
|
|
|
|
last_imported_block_number: status.last_imported_block_number.unwrap_or(chain_info.best_block_number),
|
|
|
|
last_imported_old_block_number: status.last_imported_old_block_number,
|
|
|
|
num_peers: status.num_peers,
|
|
|
|
max_peers: status.current_max_peers(net_config.min_peers, net_config.max_peers),
|
2017-09-21 10:12:27 +02:00
|
|
|
snapshot_sync: status.is_snapshot_syncing(),
|
2017-07-10 13:21:11 +02:00
|
|
|
}))
|
|
|
|
}
|
2017-07-18 16:59:33 +02:00
|
|
|
_ => (is_major_importing(self.sync.as_ref().map(|s| s.status().state), queue_info.clone()), None),
|
2017-07-10 13:21:11 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
Report {
|
|
|
|
importing,
|
|
|
|
chain_info,
|
|
|
|
client_report,
|
|
|
|
queue_info,
|
|
|
|
cache_sizes,
|
|
|
|
sync_info,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Informant data for a light node -- note that the network is required.
|
|
|
|
pub struct LightNodeInformantData {
|
|
|
|
pub client: Arc<LightChainClient>,
|
|
|
|
pub sync: Arc<LightSync>,
|
|
|
|
pub cache: Arc<Mutex<LightDataCache>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl InformantData for LightNodeInformantData {
|
|
|
|
fn executes_transactions(&self) -> bool { false }
|
|
|
|
|
|
|
|
fn is_major_importing(&self) -> bool {
|
|
|
|
self.sync.is_major_importing()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn report(&self) -> Report {
|
|
|
|
let (client_report, queue_info, chain_info) =
|
|
|
|
(self.client.report(), self.client.queue_info(), self.client.chain_info());
|
|
|
|
|
|
|
|
let mut cache_sizes = CacheSizes::default();
|
|
|
|
cache_sizes.insert("queue", queue_info.mem_used);
|
|
|
|
cache_sizes.insert("cache", self.cache.lock().mem_used());
|
|
|
|
|
|
|
|
let peer_numbers = self.sync.peer_numbers();
|
|
|
|
let sync_info = Some(SyncInfo {
|
|
|
|
last_imported_block_number: chain_info.best_block_number,
|
|
|
|
last_imported_old_block_number: None,
|
|
|
|
num_peers: peer_numbers.connected,
|
|
|
|
max_peers: peer_numbers.max as u32,
|
2017-09-21 10:12:27 +02:00
|
|
|
snapshot_sync: false,
|
2017-07-10 13:21:11 +02:00
|
|
|
});
|
|
|
|
|
|
|
|
Report {
|
|
|
|
importing: self.sync.is_major_importing(),
|
|
|
|
chain_info,
|
|
|
|
client_report,
|
|
|
|
queue_info,
|
|
|
|
cache_sizes,
|
|
|
|
sync_info,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub struct Informant<T> {
|
|
|
|
last_tick: RwLock<Instant>,
|
|
|
|
with_color: bool,
|
|
|
|
target: T,
|
|
|
|
snapshot: Option<Arc<SnapshotService>>,
|
|
|
|
rpc_stats: Option<Arc<RpcStats>>,
|
|
|
|
last_import: Mutex<Instant>,
|
|
|
|
skipped: AtomicUsize,
|
|
|
|
skipped_txs: AtomicUsize,
|
|
|
|
in_shutdown: AtomicBool,
|
|
|
|
last_report: Mutex<ClientReport>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T: InformantData> Informant<T> {
|
2016-05-25 09:57:31 +02:00
|
|
|
/// Make a new instance potentially `with_color` output.
|
2017-02-04 22:18:19 +01:00
|
|
|
pub fn new(
|
2017-07-10 13:21:11 +02:00
|
|
|
target: T,
|
2017-02-04 22:18:19 +01:00
|
|
|
snapshot: Option<Arc<SnapshotService>>,
|
|
|
|
rpc_stats: Option<Arc<RpcStats>>,
|
|
|
|
with_color: bool,
|
|
|
|
) -> Self {
|
2016-05-25 09:57:31 +02:00
|
|
|
Informant {
|
|
|
|
last_tick: RwLock::new(Instant::now()),
|
|
|
|
with_color: with_color,
|
2017-07-10 13:21:11 +02:00
|
|
|
target: target,
|
2016-10-18 18:16:00 +02:00
|
|
|
snapshot: snapshot,
|
2017-02-04 22:18:19 +01:00
|
|
|
rpc_stats: rpc_stats,
|
2016-07-20 12:36:20 +02:00
|
|
|
last_import: Mutex::new(Instant::now()),
|
|
|
|
skipped: AtomicUsize::new(0),
|
2016-10-26 13:57:54 +02:00
|
|
|
skipped_txs: AtomicUsize::new(0),
|
2016-12-11 02:02:40 +01:00
|
|
|
in_shutdown: AtomicBool::new(false),
|
2017-07-10 13:21:11 +02:00
|
|
|
last_report: Mutex::new(Default::default()),
|
2016-05-25 09:57:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-11 02:02:40 +01:00
|
|
|
/// Signal that we're shutting down; no more output necessary.
|
|
|
|
pub fn shutdown(&self) {
|
|
|
|
self.in_shutdown.store(true, ::std::sync::atomic::Ordering::SeqCst);
|
|
|
|
}
|
2016-07-13 19:59:59 +02:00
|
|
|
|
2016-07-20 12:36:20 +02:00
|
|
|
pub fn tick(&self) {
|
2016-07-13 19:59:59 +02:00
|
|
|
let elapsed = self.last_tick.read().elapsed();
|
2016-05-25 09:57:31 +02:00
|
|
|
if elapsed < Duration::from_secs(5) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-07-16 17:43:43 +02:00
|
|
|
let (client_report, full_report) = {
|
|
|
|
let mut last_report = self.last_report.lock();
|
|
|
|
let full_report = self.target.report();
|
|
|
|
let diffed = full_report.client_report.clone() - &*last_report;
|
|
|
|
*last_report = full_report.client_report.clone();
|
|
|
|
(diffed, full_report)
|
|
|
|
};
|
|
|
|
|
2017-07-10 13:21:11 +02:00
|
|
|
let Report {
|
|
|
|
importing,
|
|
|
|
chain_info,
|
|
|
|
queue_info,
|
|
|
|
cache_sizes,
|
|
|
|
sync_info,
|
2017-07-16 17:43:43 +02:00
|
|
|
..
|
|
|
|
} = full_report;
|
2017-07-10 13:21:11 +02:00
|
|
|
|
2017-02-04 22:18:19 +01:00
|
|
|
let rpc_stats = self.rpc_stats.as_ref();
|
2016-04-21 13:57:27 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
let (snapshot_sync, snapshot_current, snapshot_total) = self.snapshot.as_ref().map_or((false, 0, 0), |s|
|
|
|
|
match s.status() {
|
|
|
|
RestorationStatus::Ongoing { state_chunks, block_chunks, state_chunks_done, block_chunks_done } =>
|
|
|
|
(true, state_chunks_done + block_chunks_done, state_chunks + block_chunks),
|
|
|
|
_ => (false, 0, 0),
|
|
|
|
}
|
|
|
|
);
|
2017-09-21 10:12:27 +02:00
|
|
|
let snapshot_sync = snapshot_sync && sync_info.as_ref().map_or(false, |s| s.snapshot_sync);
|
2016-10-18 18:16:00 +02:00
|
|
|
if !importing && !snapshot_sync && elapsed < Duration::from_secs(30) {
|
2016-07-17 23:00:57 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-07-18 16:59:33 +02:00
|
|
|
*self.last_tick.write() = Instant::now();
|
|
|
|
|
2018-04-11 12:56:37 +02:00
|
|
|
let paint = |c: Style, t: String| match self.with_color && atty::is(atty::Stream::Stdout) {
|
2016-05-25 09:57:31 +02:00
|
|
|
true => format!("{}", c.paint(t)),
|
|
|
|
false => t,
|
|
|
|
};
|
|
|
|
|
2017-02-04 22:18:19 +01:00
|
|
|
info!(target: "import", "{} {} {} {}",
|
2016-07-17 23:00:57 +02:00
|
|
|
match importing {
|
2016-10-18 18:16:00 +02:00
|
|
|
true => match snapshot_sync {
|
2017-02-04 22:18:19 +01:00
|
|
|
false => format!("Syncing {} {} {} {}+{} Qed",
|
2016-10-18 18:16:00 +02:00
|
|
|
paint(White.bold(), format!("{:>8}", format!("#{}", chain_info.best_block_number))),
|
|
|
|
paint(White.bold(), format!("{}", chain_info.best_block_hash)),
|
2017-07-10 13:21:11 +02:00
|
|
|
if self.target.executes_transactions() {
|
2016-10-18 18:16:00 +02:00
|
|
|
format!("{} blk/s {} tx/s {} Mgas/s",
|
2017-07-10 13:21:11 +02:00
|
|
|
paint(Yellow.bold(), format!("{:4}", (client_report.blocks_imported * 1000) as u64 / elapsed.as_milliseconds())),
|
|
|
|
paint(Yellow.bold(), format!("{:4}", (client_report.transactions_applied * 1000) as u64 / elapsed.as_milliseconds())),
|
|
|
|
paint(Yellow.bold(), format!("{:3}", (client_report.gas_processed / From::from(elapsed.as_milliseconds() * 1000)).low_u64()))
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
format!("{} hdr/s",
|
|
|
|
paint(Yellow.bold(), format!("{:4}", (client_report.blocks_imported * 1000) as u64 / elapsed.as_milliseconds()))
|
|
|
|
)
|
2016-10-18 18:16:00 +02:00
|
|
|
},
|
|
|
|
paint(Green.bold(), format!("{:5}", queue_info.unverified_queue_size)),
|
|
|
|
paint(Green.bold(), format!("{:5}", queue_info.verified_queue_size))
|
|
|
|
),
|
|
|
|
true => format!("Syncing snapshot {}/{}", snapshot_current, snapshot_total),
|
|
|
|
},
|
2016-07-17 23:00:57 +02:00
|
|
|
false => String::new(),
|
|
|
|
},
|
2017-07-10 13:21:11 +02:00
|
|
|
match sync_info.as_ref() {
|
|
|
|
Some(ref sync_info) => format!("{}{}/{} peers",
|
2016-07-17 23:00:57 +02:00
|
|
|
match importing {
|
2017-07-10 13:21:11 +02:00
|
|
|
true => format!("{} ", paint(Green.bold(), format!("{:>8}", format!("#{}", sync_info.last_imported_block_number)))),
|
2016-10-18 18:16:00 +02:00
|
|
|
false => match sync_info.last_imported_old_block_number {
|
|
|
|
Some(number) => format!("{} ", paint(Yellow.bold(), format!("{:>8}", format!("#{}", number)))),
|
|
|
|
None => String::new(),
|
|
|
|
}
|
2016-07-17 23:00:57 +02:00
|
|
|
},
|
|
|
|
paint(Cyan.bold(), format!("{:2}", sync_info.num_peers)),
|
2017-07-10 13:21:11 +02:00
|
|
|
paint(Cyan.bold(), format!("{:2}", sync_info.max_peers)),
|
2016-07-17 23:00:57 +02:00
|
|
|
),
|
2016-07-20 12:36:20 +02:00
|
|
|
_ => String::new(),
|
2016-07-17 23:00:57 +02:00
|
|
|
},
|
2017-07-10 13:21:11 +02:00
|
|
|
cache_sizes.display(Blue.bold(), &paint),
|
2017-02-04 22:18:19 +01:00
|
|
|
match rpc_stats {
|
|
|
|
Some(ref rpc_stats) => format!(
|
|
|
|
"RPC: {} conn, {} req/s, {} µs",
|
|
|
|
paint(Blue.bold(), format!("{:2}", rpc_stats.sessions())),
|
|
|
|
paint(Blue.bold(), format!("{:2}", rpc_stats.requests_rate())),
|
|
|
|
paint(Blue.bold(), format!("{:3}", rpc_stats.approximated_roundtrip())),
|
|
|
|
),
|
|
|
|
_ => String::new(),
|
|
|
|
},
|
2016-07-17 23:00:57 +02:00
|
|
|
);
|
2016-04-21 13:57:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-10 13:21:11 +02:00
|
|
|
impl ChainNotify for Informant<FullNodeInformantData> {
|
2016-12-08 21:13:32 +01:00
|
|
|
fn new_blocks(&self, imported: Vec<H256>, _invalid: Vec<H256>, _enacted: Vec<H256>, _retracted: Vec<H256>, _sealed: Vec<H256>, _proposed: Vec<Bytes>, duration: u64) {
|
2016-07-20 12:36:20 +02:00
|
|
|
let mut last_import = self.last_import.lock();
|
2017-07-10 13:21:11 +02:00
|
|
|
let client = &self.target.client;
|
|
|
|
|
|
|
|
let importing = self.target.is_major_importing();
|
2016-10-26 13:57:54 +02:00
|
|
|
let ripe = Instant::now() > *last_import + Duration::from_secs(1) && !importing;
|
|
|
|
let txs_imported = imported.iter()
|
2016-12-08 21:13:32 +01:00
|
|
|
.take(imported.len().saturating_sub(if ripe { 1 } else { 0 }))
|
2017-07-10 13:21:11 +02:00
|
|
|
.filter_map(|h| client.block(BlockId::Hash(*h)))
|
2016-12-28 13:44:51 +01:00
|
|
|
.map(|b| b.transactions_count())
|
2016-10-26 13:57:54 +02:00
|
|
|
.sum();
|
|
|
|
|
|
|
|
if ripe {
|
2017-07-10 13:21:11 +02:00
|
|
|
if let Some(block) = imported.last().and_then(|h| client.block(BlockId::Hash(*h))) {
|
2016-12-28 13:44:51 +01:00
|
|
|
let header_view = block.header_view();
|
|
|
|
let size = block.rlp().as_raw().len();
|
2016-11-01 22:44:24 +01:00
|
|
|
let (skipped, skipped_txs) = (self.skipped.load(AtomicOrdering::Relaxed) + imported.len() - 1, self.skipped_txs.load(AtomicOrdering::Relaxed) + txs_imported);
|
2016-07-24 17:38:29 +02:00
|
|
|
info!(target: "import", "Imported {} {} ({} txs, {} Mgas, {} ms, {} KiB){}",
|
2016-12-28 13:44:51 +01:00
|
|
|
Colour::White.bold().paint(format!("#{}", header_view.number())),
|
|
|
|
Colour::White.bold().paint(format!("{}", header_view.hash())),
|
|
|
|
Colour::Yellow.bold().paint(format!("{}", block.transactions_count())),
|
|
|
|
Colour::Yellow.bold().paint(format!("{:.2}", header_view.gas_used().low_u64() as f32 / 1000000f32)),
|
2016-07-20 12:36:20 +02:00
|
|
|
Colour::Purple.bold().paint(format!("{:.2}", duration as f32 / 1000000f32)),
|
|
|
|
Colour::Blue.bold().paint(format!("{:.2}", size as f32 / 1024f32)),
|
2016-10-26 13:57:54 +02:00
|
|
|
if skipped > 0 {
|
|
|
|
format!(" + another {} block(s) containing {} tx(s)",
|
|
|
|
Colour::Red.bold().paint(format!("{}", skipped)),
|
|
|
|
Colour::Red.bold().paint(format!("{}", skipped_txs))
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
String::new()
|
|
|
|
}
|
2016-07-24 17:38:29 +02:00
|
|
|
);
|
2016-10-26 13:57:54 +02:00
|
|
|
self.skipped.store(0, AtomicOrdering::Relaxed);
|
|
|
|
self.skipped_txs.store(0, AtomicOrdering::Relaxed);
|
2016-07-24 17:38:29 +02:00
|
|
|
*last_import = Instant::now();
|
2016-07-20 12:36:20 +02:00
|
|
|
}
|
|
|
|
} else {
|
2016-07-26 00:20:37 +02:00
|
|
|
self.skipped.fetch_add(imported.len(), AtomicOrdering::Relaxed);
|
2016-10-26 13:57:54 +02:00
|
|
|
self.skipped_txs.fetch_add(txs_imported, AtomicOrdering::Relaxed);
|
2016-07-20 12:36:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-11 02:02:40 +01:00
|
|
|
const INFO_TIMER: TimerToken = 0;
|
|
|
|
|
2017-07-10 13:21:11 +02:00
|
|
|
impl<T: InformantData> IoHandler<ClientIoMessage> for Informant<T> {
|
2016-12-10 23:58:39 +01:00
|
|
|
fn initialize(&self, io: &IoContext<ClientIoMessage>) {
|
2018-04-14 21:35:58 +02:00
|
|
|
io.register_timer(INFO_TIMER, Duration::from_secs(5)).expect("Error registering timer");
|
2016-12-10 23:58:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn timeout(&self, _io: &IoContext<ClientIoMessage>, timer: TimerToken) {
|
2016-12-11 02:02:40 +01:00
|
|
|
if timer == INFO_TIMER && !self.in_shutdown.load(AtomicOrdering::SeqCst) {
|
|
|
|
self.tick();
|
2016-12-10 23:58:39 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|