Remove the time dependency where possible (#8100)
This commit is contained in:
parent
5c47116889
commit
113c35af0a
8
Cargo.lock
generated
8
Cargo.lock
generated
@ -509,7 +509,6 @@ dependencies = [
|
||||
"stop-guard 0.1.0",
|
||||
"table 0.1.0",
|
||||
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"trace-time 0.1.0",
|
||||
"trie-standardmap 0.1.0",
|
||||
"triehash 0.1.0",
|
||||
@ -578,7 +577,6 @@ dependencies = [
|
||||
"smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"stats 0.1.0",
|
||||
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"triehash 0.1.0",
|
||||
"vm 0.1.0",
|
||||
]
|
||||
@ -675,7 +673,6 @@ dependencies = [
|
||||
"slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"snappy 0.1.0 (git+https://github.com/paritytech/rust-snappy)",
|
||||
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tiny-keccak 1.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -906,7 +903,6 @@ dependencies = [
|
||||
"rlp 0.2.1",
|
||||
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"triehash 0.1.0",
|
||||
]
|
||||
|
||||
@ -2015,7 +2011,6 @@ dependencies = [
|
||||
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"term_size 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2210,7 +2205,6 @@ dependencies = [
|
||||
"serde_json 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"stats 0.1.0",
|
||||
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tiny-keccak 1.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-timer 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"transient-hashmap 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2361,7 +2355,6 @@ dependencies = [
|
||||
"serde_json 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tiny-keccak 1.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -3312,7 +3305,6 @@ name = "trace-time"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -14,7 +14,6 @@ docopt = "0.8"
|
||||
clap = "2"
|
||||
term_size = "0.3"
|
||||
textwrap = "0.9"
|
||||
time = "0.1"
|
||||
num_cpus = "1.2"
|
||||
number_prefix = "0.2"
|
||||
rpassword = "1.0"
|
||||
|
@ -60,7 +60,6 @@ macros = { path = "../util/macros" }
|
||||
rust-crypto = "0.2.34"
|
||||
rustc-hex = "1.0"
|
||||
stats = { path = "../util/stats" }
|
||||
time = "0.1"
|
||||
trace-time = { path = "../util/trace-time" }
|
||||
using_queue = { path = "../util/using_queue" }
|
||||
table = { path = "../util/table" }
|
||||
|
@ -22,7 +22,6 @@ vm = { path = "../vm" }
|
||||
plain_hasher = { path = "../../util/plain_hasher" }
|
||||
rlp = { path = "../../util/rlp" }
|
||||
rlp_derive = { path = "../../util/rlp_derive" }
|
||||
time = "0.1"
|
||||
smallvec = "0.4"
|
||||
futures = "0.1"
|
||||
rand = "0.4"
|
||||
|
@ -25,7 +25,7 @@ use ethcore::header::BlockNumber;
|
||||
use ethcore::receipt::Receipt;
|
||||
|
||||
use stats::Corpus;
|
||||
use time::{SteadyTime, Duration};
|
||||
use std::time::{Instant, Duration};
|
||||
use heapsize::HeapSizeOf;
|
||||
use ethereum_types::{H256, U256};
|
||||
use memory_cache::MemoryLruCache;
|
||||
@ -69,7 +69,7 @@ pub struct Cache {
|
||||
bodies: MemoryLruCache<H256, encoded::Body>,
|
||||
receipts: MemoryLruCache<H256, Vec<Receipt>>,
|
||||
chain_score: MemoryLruCache<H256, U256>,
|
||||
corpus: Option<(Corpus<U256>, SteadyTime)>,
|
||||
corpus: Option<(Corpus<U256>, Instant)>,
|
||||
corpus_expiration: Duration,
|
||||
}
|
||||
|
||||
@ -139,7 +139,7 @@ impl Cache {
|
||||
|
||||
/// Get gas price corpus, if recent enough.
|
||||
pub fn gas_price_corpus(&self) -> Option<Corpus<U256>> {
|
||||
let now = SteadyTime::now();
|
||||
let now = Instant::now();
|
||||
|
||||
self.corpus.as_ref().and_then(|&(ref corpus, ref tm)| {
|
||||
if *tm + self.corpus_expiration >= now {
|
||||
@ -152,7 +152,7 @@ impl Cache {
|
||||
|
||||
/// Set the cached gas price corpus.
|
||||
pub fn set_gas_price_corpus(&mut self, corpus: Corpus<U256>) {
|
||||
self.corpus = Some((corpus, SteadyTime::now()))
|
||||
self.corpus = Some((corpus, Instant::now()))
|
||||
}
|
||||
|
||||
/// Get the memory used.
|
||||
@ -175,18 +175,18 @@ impl HeapSizeOf for Cache {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Cache;
|
||||
use time::Duration;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn corpus_inaccessible() {
|
||||
let mut cache = Cache::new(Default::default(), Duration::hours(5));
|
||||
let mut cache = Cache::new(Default::default(), Duration::from_secs(5 * 3600));
|
||||
|
||||
cache.set_gas_price_corpus(vec![].into());
|
||||
assert_eq!(cache.gas_price_corpus(), Some(vec![].into()));
|
||||
|
||||
{
|
||||
let corpus_time = &mut cache.corpus.as_mut().unwrap().1;
|
||||
*corpus_time = *corpus_time - Duration::hours(6);
|
||||
*corpus_time = *corpus_time - Duration::from_secs(6 * 3600);
|
||||
}
|
||||
assert!(cache.gas_price_corpus().is_none());
|
||||
}
|
||||
|
@ -732,7 +732,7 @@ mod tests {
|
||||
use kvdb::KeyValueDB;
|
||||
use kvdb_memorydb;
|
||||
|
||||
use time::Duration;
|
||||
use std::time::Duration;
|
||||
use parking_lot::Mutex;
|
||||
|
||||
fn make_db() -> Arc<KeyValueDB> {
|
||||
@ -745,7 +745,7 @@ mod tests {
|
||||
let genesis_header = spec.genesis_header();
|
||||
let db = make_db();
|
||||
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||
|
||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
||||
|
||||
@ -778,7 +778,7 @@ mod tests {
|
||||
let spec = Spec::new_test();
|
||||
let genesis_header = spec.genesis_header();
|
||||
let db = make_db();
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||
|
||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
||||
|
||||
@ -860,7 +860,7 @@ mod tests {
|
||||
fn earliest_is_latest() {
|
||||
let spec = Spec::new_test();
|
||||
let db = make_db();
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||
|
||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
||||
|
||||
@ -873,7 +873,7 @@ mod tests {
|
||||
let spec = Spec::new_test();
|
||||
let genesis_header = spec.genesis_header();
|
||||
let db = make_db();
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||
|
||||
{
|
||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
|
||||
@ -909,7 +909,7 @@ mod tests {
|
||||
let spec = Spec::new_test();
|
||||
let genesis_header = spec.genesis_header();
|
||||
let db = make_db();
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||
|
||||
{
|
||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
|
||||
@ -964,7 +964,7 @@ mod tests {
|
||||
let spec = Spec::new_test();
|
||||
let genesis_header = spec.genesis_header();
|
||||
let db = make_db();
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||
|
||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
|
||||
|
||||
@ -978,7 +978,7 @@ mod tests {
|
||||
let spec = Spec::new_test();
|
||||
let genesis_header = spec.genesis_header();
|
||||
let db = make_db();
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||
|
||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
||||
|
||||
|
@ -107,7 +107,7 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
use cache::Cache;
|
||||
use client::fetch;
|
||||
use time::Duration;
|
||||
use std::time::Duration;
|
||||
use parking_lot::Mutex;
|
||||
use kvdb_memorydb;
|
||||
use ethcore::db::NUM_COLUMNS;
|
||||
@ -116,7 +116,7 @@ mod tests {
|
||||
fn it_works() {
|
||||
let db = Arc::new(kvdb_memorydb::create(NUM_COLUMNS.unwrap_or(0)));
|
||||
let spec = Spec::new_test();
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||
|
||||
Service::start(Default::default(), &spec, fetch::unavailable(), db, cache).unwrap();
|
||||
}
|
||||
|
@ -75,7 +75,6 @@ extern crate rlp_derive;
|
||||
extern crate serde;
|
||||
extern crate smallvec;
|
||||
extern crate stats;
|
||||
extern crate time;
|
||||
extern crate vm;
|
||||
extern crate keccak_hash as hash;
|
||||
extern crate triehash;
|
||||
|
@ -27,11 +27,11 @@
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::fs::File;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use request::{CompleteRequest, Kind};
|
||||
|
||||
use bincode;
|
||||
use time;
|
||||
use parking_lot::{RwLock, Mutex};
|
||||
|
||||
/// Number of time periods samples should be kept for.
|
||||
@ -107,7 +107,7 @@ impl LoadDistribution {
|
||||
};
|
||||
|
||||
LoadTimer {
|
||||
start: time::precise_time_ns(),
|
||||
start: Instant::now(),
|
||||
n: n,
|
||||
dist: self,
|
||||
kind: kind,
|
||||
@ -151,10 +151,10 @@ impl LoadDistribution {
|
||||
store.store(&*samples);
|
||||
}
|
||||
|
||||
fn update(&self, kind: Kind, elapsed: u64, n: u64) {
|
||||
fn update(&self, kind: Kind, elapsed: Duration, n: u64) {
|
||||
macro_rules! update_counters {
|
||||
($counters: expr) => {
|
||||
$counters.0 = $counters.0.saturating_add(elapsed);
|
||||
$counters.0 = $counters.0.saturating_add({ elapsed.as_secs() * 1_000_000_000 + elapsed.subsec_nanos() as u64 });
|
||||
$counters.1 = $counters.1.saturating_add(n);
|
||||
}
|
||||
};
|
||||
@ -180,7 +180,7 @@ impl LoadDistribution {
|
||||
/// A timer for a single request.
|
||||
/// On drop, this will update the distribution.
|
||||
pub struct LoadTimer<'a> {
|
||||
start: u64,
|
||||
start: Instant,
|
||||
n: u64,
|
||||
dist: &'a LoadDistribution,
|
||||
kind: Kind,
|
||||
@ -188,7 +188,7 @@ pub struct LoadTimer<'a> {
|
||||
|
||||
impl<'a> Drop for LoadTimer<'a> {
|
||||
fn drop(&mut self) {
|
||||
let elapsed = time::precise_time_ns() - self.start;
|
||||
let elapsed = self.start.elapsed();
|
||||
self.dist.update(self.kind, elapsed, self.n);
|
||||
}
|
||||
}
|
||||
@ -225,7 +225,7 @@ mod tests {
|
||||
let dist = LoadDistribution::load(&NullStore);
|
||||
assert_eq!(dist.expected_time_ns(Kind::Headers), hardcoded_serve_time(Kind::Headers));
|
||||
|
||||
dist.update(Kind::Headers, 100_000, 100);
|
||||
dist.update(Kind::Headers, Duration::new(0, 100_000), 100);
|
||||
dist.end_period(&NullStore);
|
||||
|
||||
assert_eq!(dist.expected_time_ns(Kind::Headers), 1000);
|
||||
@ -238,7 +238,7 @@ mod tests {
|
||||
let mut sum = 0;
|
||||
|
||||
for (i, x) in (0..10).map(|x| x * 10_000).enumerate() {
|
||||
dist.update(Kind::Headers, x, 1);
|
||||
dist.update(Kind::Headers, Duration::new(0, x), 1);
|
||||
dist.end_period(&NullStore);
|
||||
|
||||
sum += x;
|
||||
@ -248,7 +248,7 @@ mod tests {
|
||||
|
||||
// should be weighted below the maximum entry.
|
||||
let arith_average = (sum as f64 / (i + 1) as f64) as u64;
|
||||
assert!(moving_average < x);
|
||||
assert!(moving_average < x as u64);
|
||||
|
||||
// when there are only 2 entries, they should be equal due to choice of
|
||||
// ALPHA = 1/N.
|
||||
|
@ -26,7 +26,7 @@ use rlp::{RlpStream, UntrustedRlp};
|
||||
use ethereum_types::{H256, U256};
|
||||
use kvdb::DBValue;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use time::{Duration, SteadyTime};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fmt;
|
||||
@ -73,7 +73,7 @@ const RECALCULATE_COSTS_TIMEOUT: TimerToken = 3;
|
||||
const RECALCULATE_COSTS_INTERVAL_MS: u64 = 60 * 60 * 1000;
|
||||
|
||||
// minimum interval between updates.
|
||||
const UPDATE_INTERVAL_MS: i64 = 5000;
|
||||
const UPDATE_INTERVAL_MS: u64 = 5000;
|
||||
|
||||
/// Supported protocol versions.
|
||||
pub const PROTOCOL_VERSIONS: &'static [u8] = &[1];
|
||||
@ -109,20 +109,20 @@ mod packet {
|
||||
|
||||
// timeouts for different kinds of requests. all values are in milliseconds.
|
||||
mod timeout {
|
||||
pub const HANDSHAKE: i64 = 2500;
|
||||
pub const ACKNOWLEDGE_UPDATE: i64 = 5000;
|
||||
pub const BASE: i64 = 1500; // base timeout for packet.
|
||||
pub const HANDSHAKE: u64 = 2500;
|
||||
pub const ACKNOWLEDGE_UPDATE: u64 = 5000;
|
||||
pub const BASE: u64 = 1500; // base timeout for packet.
|
||||
|
||||
// timeouts per request within packet.
|
||||
pub const HEADERS: i64 = 250; // per header?
|
||||
pub const TRANSACTION_INDEX: i64 = 100;
|
||||
pub const BODY: i64 = 50;
|
||||
pub const RECEIPT: i64 = 50;
|
||||
pub const PROOF: i64 = 100; // state proof
|
||||
pub const CONTRACT_CODE: i64 = 100;
|
||||
pub const HEADER_PROOF: i64 = 100;
|
||||
pub const TRANSACTION_PROOF: i64 = 1000; // per gas?
|
||||
pub const EPOCH_SIGNAL: i64 = 200;
|
||||
pub const HEADERS: u64 = 250; // per header?
|
||||
pub const TRANSACTION_INDEX: u64 = 100;
|
||||
pub const BODY: u64 = 50;
|
||||
pub const RECEIPT: u64 = 50;
|
||||
pub const PROOF: u64 = 100; // state proof
|
||||
pub const CONTRACT_CODE: u64 = 100;
|
||||
pub const HEADER_PROOF: u64 = 100;
|
||||
pub const TRANSACTION_PROOF: u64 = 1000; // per gas?
|
||||
pub const EPOCH_SIGNAL: u64 = 200;
|
||||
}
|
||||
|
||||
/// A request id.
|
||||
@ -144,7 +144,7 @@ impl fmt::Display for ReqId {
|
||||
// may not have received one for.
|
||||
struct PendingPeer {
|
||||
sent_head: H256,
|
||||
last_update: SteadyTime,
|
||||
last_update: Instant,
|
||||
}
|
||||
|
||||
/// Relevant data to each peer. Not accessible publicly, only `pub` due to
|
||||
@ -155,13 +155,13 @@ pub struct Peer {
|
||||
capabilities: Capabilities,
|
||||
remote_flow: Option<(Credits, FlowParams)>,
|
||||
sent_head: H256, // last chain head we've given them.
|
||||
last_update: SteadyTime,
|
||||
last_update: Instant,
|
||||
pending_requests: RequestSet,
|
||||
failed_requests: Vec<ReqId>,
|
||||
propagated_transactions: HashSet<H256>,
|
||||
skip_update: bool,
|
||||
local_flow: Arc<FlowParams>,
|
||||
awaiting_acknowledge: Option<(SteadyTime, Arc<FlowParams>)>,
|
||||
awaiting_acknowledge: Option<(Instant, Arc<FlowParams>)>,
|
||||
}
|
||||
|
||||
/// Whether or not a peer was kept by a handler
|
||||
@ -447,7 +447,7 @@ impl LightProtocol {
|
||||
});
|
||||
|
||||
// begin timeout.
|
||||
peer.pending_requests.insert(req_id, requests, cost, SteadyTime::now());
|
||||
peer.pending_requests.insert(req_id, requests, cost, Instant::now());
|
||||
Ok(req_id)
|
||||
}
|
||||
}
|
||||
@ -457,7 +457,7 @@ impl LightProtocol {
|
||||
/// The announcement is expected to be valid.
|
||||
pub fn make_announcement(&self, io: &IoContext, mut announcement: Announcement) {
|
||||
let mut reorgs_map = HashMap::new();
|
||||
let now = SteadyTime::now();
|
||||
let now = Instant::now();
|
||||
|
||||
// update stored capabilities
|
||||
self.capabilities.write().update_from(&announcement);
|
||||
@ -470,7 +470,7 @@ impl LightProtocol {
|
||||
// the timer approach will skip 1 (possibly 2) in rare occasions.
|
||||
if peer_info.sent_head == announcement.head_hash ||
|
||||
peer_info.status.head_num >= announcement.head_num ||
|
||||
now - peer_info.last_update < Duration::milliseconds(UPDATE_INTERVAL_MS) {
|
||||
now - peer_info.last_update < Duration::from_millis(UPDATE_INTERVAL_MS) {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -537,7 +537,7 @@ impl LightProtocol {
|
||||
Some(peer_info) => {
|
||||
let mut peer_info = peer_info.lock();
|
||||
let peer_info: &mut Peer = &mut *peer_info;
|
||||
let req_info = peer_info.pending_requests.remove(&req_id, SteadyTime::now());
|
||||
let req_info = peer_info.pending_requests.remove(&req_id, Instant::now());
|
||||
let last_batched = peer_info.pending_requests.is_empty();
|
||||
let flow_info = peer_info.remote_flow.as_mut();
|
||||
|
||||
@ -599,14 +599,14 @@ impl LightProtocol {
|
||||
|
||||
// check timeouts and punish peers.
|
||||
fn timeout_check(&self, io: &IoContext) {
|
||||
let now = SteadyTime::now();
|
||||
let now = Instant::now();
|
||||
|
||||
// handshake timeout
|
||||
{
|
||||
let mut pending = self.pending_peers.write();
|
||||
let slowpokes: Vec<_> = pending.iter()
|
||||
.filter(|&(_, ref peer)| {
|
||||
peer.last_update + Duration::milliseconds(timeout::HANDSHAKE) <= now
|
||||
peer.last_update + Duration::from_millis(timeout::HANDSHAKE) <= now
|
||||
})
|
||||
.map(|(&p, _)| p)
|
||||
.collect();
|
||||
@ -619,7 +619,7 @@ impl LightProtocol {
|
||||
}
|
||||
|
||||
// request and update ack timeouts
|
||||
let ack_duration = Duration::milliseconds(timeout::ACKNOWLEDGE_UPDATE);
|
||||
let ack_duration = Duration::from_millis(timeout::ACKNOWLEDGE_UPDATE);
|
||||
{
|
||||
for (peer_id, peer) in self.peers.read().iter() {
|
||||
let peer = peer.lock();
|
||||
@ -709,7 +709,7 @@ impl LightProtocol {
|
||||
|
||||
self.pending_peers.write().insert(*peer, PendingPeer {
|
||||
sent_head: chain_info.best_block_hash,
|
||||
last_update: SteadyTime::now(),
|
||||
last_update: Instant::now(),
|
||||
});
|
||||
|
||||
trace!(target: "pip", "Sending status to peer {}", peer);
|
||||
@ -771,7 +771,7 @@ impl LightProtocol {
|
||||
*self.flow_params.write() = new_params.clone();
|
||||
|
||||
let peers = self.peers.read();
|
||||
let now = SteadyTime::now();
|
||||
let now = Instant::now();
|
||||
|
||||
let packet_body = {
|
||||
let mut stream = RlpStream::new_list(3);
|
||||
|
@ -31,7 +31,7 @@ use super::error::Error;
|
||||
|
||||
use rlp::*;
|
||||
use ethereum_types::U256;
|
||||
use time::{Duration, SteadyTime};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
/// Credits value.
|
||||
///
|
||||
@ -41,7 +41,7 @@ use time::{Duration, SteadyTime};
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Credits {
|
||||
estimate: U256,
|
||||
recharge_point: SteadyTime,
|
||||
recharge_point: Instant,
|
||||
}
|
||||
|
||||
impl Credits {
|
||||
@ -53,7 +53,7 @@ impl Credits {
|
||||
/// a response to a request.
|
||||
pub fn update_to(&mut self, value: U256) {
|
||||
self.estimate = value;
|
||||
self.recharge_point = SteadyTime::now();
|
||||
self.recharge_point = Instant::now();
|
||||
}
|
||||
|
||||
/// Maintain ratio to current limit against an old limit.
|
||||
@ -351,19 +351,19 @@ impl FlowParams {
|
||||
pub fn create_credits(&self) -> Credits {
|
||||
Credits {
|
||||
estimate: self.limit,
|
||||
recharge_point: SteadyTime::now(),
|
||||
recharge_point: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Recharge the given credits based on time passed since last
|
||||
/// update.
|
||||
pub fn recharge(&self, credits: &mut Credits) {
|
||||
let now = SteadyTime::now();
|
||||
let now = Instant::now();
|
||||
|
||||
// recompute and update only in terms of full seconds elapsed
|
||||
// in order to keep the estimate as an underestimate.
|
||||
let elapsed = (now - credits.recharge_point).num_seconds();
|
||||
credits.recharge_point = credits.recharge_point + Duration::seconds(elapsed);
|
||||
let elapsed = (now - credits.recharge_point).as_secs();
|
||||
credits.recharge_point = credits.recharge_point + Duration::from_secs(elapsed);
|
||||
|
||||
let elapsed: U256 = elapsed.into();
|
||||
|
||||
|
@ -23,14 +23,13 @@
|
||||
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::iter::FromIterator;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use request::Request;
|
||||
use request::NetworkRequests as Requests;
|
||||
use net::{timeout, ReqId};
|
||||
use ethereum_types::U256;
|
||||
|
||||
use time::{Duration, SteadyTime};
|
||||
|
||||
// Request set entry: requests + cost.
|
||||
#[derive(Debug)]
|
||||
struct Entry(Requests, U256);
|
||||
@ -40,7 +39,7 @@ struct Entry(Requests, U256);
|
||||
pub struct RequestSet {
|
||||
counter: u64,
|
||||
cumulative_cost: U256,
|
||||
base: Option<SteadyTime>,
|
||||
base: Option<Instant>,
|
||||
ids: HashMap<ReqId, u64>,
|
||||
reqs: BTreeMap<u64, Entry>,
|
||||
}
|
||||
@ -59,7 +58,7 @@ impl Default for RequestSet {
|
||||
|
||||
impl RequestSet {
|
||||
/// Push requests onto the stack.
|
||||
pub fn insert(&mut self, req_id: ReqId, req: Requests, cost: U256, now: SteadyTime) {
|
||||
pub fn insert(&mut self, req_id: ReqId, req: Requests, cost: U256, now: Instant) {
|
||||
let counter = self.counter;
|
||||
self.cumulative_cost = self.cumulative_cost + cost;
|
||||
|
||||
@ -74,7 +73,7 @@ impl RequestSet {
|
||||
}
|
||||
|
||||
/// Remove a set of requests from the stack.
|
||||
pub fn remove(&mut self, req_id: &ReqId, now: SteadyTime) -> Option<Requests> {
|
||||
pub fn remove(&mut self, req_id: &ReqId, now: Instant) -> Option<Requests> {
|
||||
let id = match self.ids.remove(&req_id) {
|
||||
Some(id) => id,
|
||||
None => return None,
|
||||
@ -94,7 +93,7 @@ impl RequestSet {
|
||||
|
||||
/// Check for timeout against the given time. Returns true if
|
||||
/// has timed out, false otherwise.
|
||||
pub fn check_timeout(&self, now: SteadyTime) -> bool {
|
||||
pub fn check_timeout(&self, now: Instant) -> bool {
|
||||
let base = match self.base.as_ref().cloned() {
|
||||
Some(base) => base,
|
||||
None => return false,
|
||||
@ -128,7 +127,7 @@ impl RequestSet {
|
||||
// helper to calculate timeout for a specific set of requests.
|
||||
// it's a base amount + some amount per request.
|
||||
fn compute_timeout(reqs: &Requests) -> Duration {
|
||||
Duration::milliseconds(reqs.requests().iter().fold(timeout::BASE, |tm, req| {
|
||||
Duration::from_millis(reqs.requests().iter().fold(timeout::BASE, |tm, req| {
|
||||
tm + match *req {
|
||||
Request::Headers(_) => timeout::HEADERS,
|
||||
Request::HeaderProof(_) => timeout::HEADER_PROOF,
|
||||
@ -148,34 +147,34 @@ fn compute_timeout(reqs: &Requests) -> Duration {
|
||||
mod tests {
|
||||
use net::ReqId;
|
||||
use request::Builder;
|
||||
use time::{SteadyTime, Duration};
|
||||
use std::time::{Instant, Duration};
|
||||
use super::{RequestSet, compute_timeout};
|
||||
|
||||
#[test]
|
||||
fn multi_timeout() {
|
||||
let test_begin = SteadyTime::now();
|
||||
let test_begin = Instant::now();
|
||||
let mut req_set = RequestSet::default();
|
||||
|
||||
let the_req = Builder::default().build();
|
||||
let req_time = compute_timeout(&the_req);
|
||||
req_set.insert(ReqId(0), the_req.clone(), 0.into(), test_begin);
|
||||
req_set.insert(ReqId(1), the_req, 0.into(), test_begin + Duration::seconds(1));
|
||||
req_set.insert(ReqId(1), the_req, 0.into(), test_begin + Duration::from_secs(1));
|
||||
|
||||
assert_eq!(req_set.base, Some(test_begin));
|
||||
|
||||
let test_end = test_begin + req_time;
|
||||
assert!(req_set.check_timeout(test_end));
|
||||
|
||||
req_set.remove(&ReqId(0), test_begin + Duration::seconds(1)).unwrap();
|
||||
req_set.remove(&ReqId(0), test_begin + Duration::from_secs(1)).unwrap();
|
||||
assert!(!req_set.check_timeout(test_end));
|
||||
assert!(req_set.check_timeout(test_end + Duration::seconds(1)));
|
||||
assert!(req_set.check_timeout(test_end + Duration::from_secs(1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cumulative_cost() {
|
||||
let the_req = Builder::default().build();
|
||||
let test_begin = SteadyTime::now();
|
||||
let test_end = test_begin + Duration::seconds(1);
|
||||
let test_begin = Instant::now();
|
||||
let test_end = test_begin + Duration::from_secs(1);
|
||||
let mut req_set = RequestSet::default();
|
||||
|
||||
for i in 0..5 {
|
||||
|
@ -35,6 +35,7 @@ use rlp::*;
|
||||
use ethereum_types::{H256, U256, Address};
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
// helper for encoding a single request into a packet.
|
||||
// panics on bad backreference.
|
||||
@ -661,8 +662,8 @@ fn id_guard() {
|
||||
|
||||
let mut pending_requests = RequestSet::default();
|
||||
|
||||
pending_requests.insert(req_id_1, req.clone(), 0.into(), ::time::SteadyTime::now());
|
||||
pending_requests.insert(req_id_2, req, 1.into(), ::time::SteadyTime::now());
|
||||
pending_requests.insert(req_id_1, req.clone(), 0.into(), Instant::now());
|
||||
pending_requests.insert(req_id_2, req, 1.into(), Instant::now());
|
||||
|
||||
proto.peers.write().insert(peer_id, ::parking_lot::Mutex::new(Peer {
|
||||
local_credits: flow_params.create_credits(),
|
||||
@ -670,7 +671,7 @@ fn id_guard() {
|
||||
capabilities: capabilities.clone(),
|
||||
remote_flow: Some((flow_params.create_credits(), (&*flow_params).clone())),
|
||||
sent_head: provider.client.chain_info().best_block_hash,
|
||||
last_update: ::time::SteadyTime::now(),
|
||||
last_update: Instant::now(),
|
||||
pending_requests: pending_requests,
|
||||
failed_requests: Vec::new(),
|
||||
propagated_transactions: Default::default(),
|
||||
|
@ -941,6 +941,7 @@ impl Signal {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::time::Duration;
|
||||
use ethereum_types::{H256, Address};
|
||||
use memorydb::MemoryDB;
|
||||
use parking_lot::Mutex;
|
||||
@ -954,7 +955,7 @@ mod tests {
|
||||
use ethcore::receipt::{Receipt, TransactionOutcome};
|
||||
|
||||
fn make_cache() -> ::cache::Cache {
|
||||
::cache::Cache::new(Default::default(), ::time::Duration::seconds(1))
|
||||
::cache::Cache::new(Default::default(), Duration::from_secs(1))
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -24,7 +24,7 @@ use network::{PeerId, NodeId};
|
||||
use net::*;
|
||||
use ethereum_types::H256;
|
||||
use parking_lot::Mutex;
|
||||
use time::Duration;
|
||||
use std::time::Duration;
|
||||
use ::request::{self as basic_request, Response};
|
||||
|
||||
use std::sync::Arc;
|
||||
@ -88,7 +88,7 @@ struct Harness {
|
||||
|
||||
impl Harness {
|
||||
fn create() -> Self {
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::minutes(1))));
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(60))));
|
||||
Harness {
|
||||
service: OnDemand::new_test(cache),
|
||||
}
|
||||
|
@ -19,7 +19,6 @@ use std::str::FromStr;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
||||
use std::time::{Instant};
|
||||
use time::precise_time_ns;
|
||||
use itertools::Itertools;
|
||||
|
||||
// util
|
||||
@ -293,7 +292,7 @@ impl Importer {
|
||||
return 0;
|
||||
}
|
||||
trace_time!("import_verified_blocks");
|
||||
let start = precise_time_ns();
|
||||
let start = Instant::now();
|
||||
|
||||
for block in blocks {
|
||||
let header = &block.header;
|
||||
@ -326,7 +325,10 @@ impl Importer {
|
||||
self.block_queue.mark_as_bad(&invalid_blocks);
|
||||
}
|
||||
let is_empty = self.block_queue.mark_as_good(&imported_blocks);
|
||||
let duration_ns = precise_time_ns() - start;
|
||||
let duration_ns = {
|
||||
let elapsed = start.elapsed();
|
||||
elapsed.as_secs() * 1_000_000_000 + elapsed.subsec_nanos() as u64
|
||||
};
|
||||
(imported_blocks, import_results, invalid_blocks, imported, proposed_blocks, duration_ns, is_empty)
|
||||
};
|
||||
|
||||
@ -2036,7 +2038,7 @@ impl ScheduleInfo for Client {
|
||||
impl ImportSealedBlock for Client {
|
||||
fn import_sealed_block(&self, block: SealedBlock) -> ImportResult {
|
||||
let h = block.header().hash();
|
||||
let start = precise_time_ns();
|
||||
let start = Instant::now();
|
||||
let route = {
|
||||
// scope for self.import_lock
|
||||
let _import_lock = self.importer.import_lock.lock();
|
||||
@ -2061,7 +2063,10 @@ impl ImportSealedBlock for Client {
|
||||
retracted.clone(),
|
||||
vec![h.clone()],
|
||||
vec![],
|
||||
precise_time_ns() - start,
|
||||
{
|
||||
let elapsed = start.elapsed();
|
||||
elapsed.as_secs() * 1_000_000_000 + elapsed.subsec_nanos() as u64
|
||||
},
|
||||
);
|
||||
});
|
||||
self.db.read().flush().expect("DB flush failed.");
|
||||
|
@ -17,7 +17,7 @@
|
||||
//! Tendermint specific parameters.
|
||||
|
||||
use ethjson;
|
||||
use time::Duration;
|
||||
use std::time::Duration;
|
||||
use ethereum_types::U256;
|
||||
use super::super::validator_set::{ValidatorSet, new_validator_set};
|
||||
use super::super::transition::Timeouts;
|
||||
@ -45,10 +45,10 @@ pub struct TendermintTimeouts {
|
||||
impl Default for TendermintTimeouts {
|
||||
fn default() -> Self {
|
||||
TendermintTimeouts {
|
||||
propose: Duration::milliseconds(1000),
|
||||
prevote: Duration::milliseconds(1000),
|
||||
precommit: Duration::milliseconds(1000),
|
||||
commit: Duration::milliseconds(1000),
|
||||
propose: Duration::from_millis(1000),
|
||||
prevote: Duration::from_millis(1000),
|
||||
precommit: Duration::from_millis(1000),
|
||||
commit: Duration::from_millis(1000),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -70,7 +70,7 @@ impl Timeouts<Step> for TendermintTimeouts {
|
||||
|
||||
fn to_duration(ms: ethjson::uint::Uint) -> Duration {
|
||||
let ms: usize = ms.into();
|
||||
Duration::milliseconds(ms as i64)
|
||||
Duration::from_millis(ms as u64)
|
||||
}
|
||||
|
||||
impl From<ethjson::spec::TendermintParams> for TendermintParams {
|
||||
|
@ -17,7 +17,7 @@
|
||||
//! Engine timeout transitioning calls `Engine.step()` on timeout.
|
||||
|
||||
use std::sync::Weak;
|
||||
use time::Duration;
|
||||
use std::time::Duration;
|
||||
use io::{IoContext, IoHandler, TimerToken};
|
||||
use engines::Engine;
|
||||
use parity_machine::Machine;
|
||||
@ -51,7 +51,8 @@ impl<S, M: Machine> TransitionHandler<S, M> where S: Sync + Send + Clone {
|
||||
pub const ENGINE_TIMEOUT_TOKEN: TimerToken = 23;
|
||||
|
||||
fn set_timeout<S: Sync + Send + Clone>(io: &IoContext<S>, timeout: Duration) {
|
||||
io.register_timer_once(ENGINE_TIMEOUT_TOKEN, timeout.num_milliseconds() as u64)
|
||||
let ms = timeout.as_secs() * 1_000 + timeout.subsec_nanos() as u64 / 1_000_000;
|
||||
io.register_timer_once(ENGINE_TIMEOUT_TOKEN, ms)
|
||||
.unwrap_or_else(|e| warn!(target: "engine", "Failed to set consensus step timeout: {}.", e))
|
||||
}
|
||||
|
||||
@ -60,7 +61,7 @@ impl<S, M> IoHandler<S> for TransitionHandler<S, M>
|
||||
{
|
||||
fn initialize(&self, io: &IoContext<S>) {
|
||||
let initial = self.timeouts.initial();
|
||||
trace!(target: "engine", "Setting the initial timeout to {}.", initial);
|
||||
trace!(target: "engine", "Setting the initial timeout to {:?}.", initial);
|
||||
set_timeout(io, initial);
|
||||
}
|
||||
|
||||
|
@ -18,11 +18,11 @@
|
||||
|
||||
use std::cmp;
|
||||
use std::cell::RefCell;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP, keccak};
|
||||
use heapsize::HeapSizeOf;
|
||||
use ethereum_types::{H256, U256, Address, Bloom};
|
||||
use bytes::Bytes;
|
||||
use time::get_time;
|
||||
use rlp::*;
|
||||
|
||||
pub use types::BlockNumber;
|
||||
@ -189,7 +189,7 @@ impl Header {
|
||||
/// Set the timestamp field of the header.
|
||||
pub fn set_timestamp(&mut self, a: u64) { self.timestamp = a; self.note_dirty(); }
|
||||
/// Set the timestamp field of the header to the current time.
|
||||
pub fn set_timestamp_now(&mut self, but_later_than: u64) { self.timestamp = cmp::max(get_time().sec as u64, but_later_than + 1); self.note_dirty(); }
|
||||
pub fn set_timestamp_now(&mut self, but_later_than: u64) { let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default(); self.timestamp = cmp::max(now.as_secs() as u64, but_later_than + 1); self.note_dirty(); }
|
||||
/// Set the number field of the header.
|
||||
pub fn set_number(&mut self, a: BlockNumber) { self.number = a; self.note_dirty(); }
|
||||
/// Set the author field of the header.
|
||||
|
@ -109,7 +109,6 @@ extern crate rlp_derive;
|
||||
extern crate rustc_hex;
|
||||
extern crate stats;
|
||||
extern crate stop_guard;
|
||||
extern crate time;
|
||||
extern crate using_queue;
|
||||
extern crate table;
|
||||
extern crate vm;
|
||||
|
@ -22,13 +22,13 @@
|
||||
//! 3. Final verification against the blockchain done before enactment.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use bytes::Bytes;
|
||||
use ethereum_types::{H256, U256};
|
||||
use hash::keccak;
|
||||
use heapsize::HeapSizeOf;
|
||||
use rlp::UntrustedRlp;
|
||||
use time::get_time;
|
||||
use triehash::ordered_trie_root;
|
||||
use unexpected::{Mismatch, OutOfBounds};
|
||||
|
||||
@ -284,7 +284,8 @@ pub fn verify_header_params(header: &Header, engine: &EthEngine, is_full: bool)
|
||||
|
||||
if is_full {
|
||||
const ACCEPTABLE_DRIFT_SECS: u64 = 15;
|
||||
let max_time = get_time().sec as u64 + ACCEPTABLE_DRIFT_SECS;
|
||||
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default();
|
||||
let max_time = now.as_secs() + ACCEPTABLE_DRIFT_SECS;
|
||||
let invalid_threshold = max_time + ACCEPTABLE_DRIFT_SECS * 9;
|
||||
let timestamp = header.timestamp();
|
||||
|
||||
@ -346,6 +347,7 @@ mod tests {
|
||||
use super::*;
|
||||
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use ethereum_types::{H256, Bloom, U256};
|
||||
use blockchain::{BlockDetails, TransactionAddress, BlockReceipts};
|
||||
use encoded;
|
||||
@ -355,7 +357,6 @@ mod tests {
|
||||
use ethkey::{Random, Generator};
|
||||
use spec::{CommonParams, Spec};
|
||||
use tests::helpers::{create_test_block_with_data, create_test_block};
|
||||
use time::get_time;
|
||||
use transaction::{SignedTransaction, Transaction, UnverifiedTransaction, Action};
|
||||
use types::log_entry::{LogEntry, LocalizedLogEntry};
|
||||
use rlp;
|
||||
@ -682,11 +683,11 @@ mod tests {
|
||||
check_fail_timestamp(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine), false);
|
||||
|
||||
header = good.clone();
|
||||
header.set_timestamp(get_time().sec as u64 + 20);
|
||||
header.set_timestamp(SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() + 20);
|
||||
check_fail_timestamp(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine), true);
|
||||
|
||||
header = good.clone();
|
||||
header.set_timestamp(get_time().sec as u64 + 10);
|
||||
header.set_timestamp(SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() + 10);
|
||||
header.set_uncles_hash(good_uncles_hash.clone());
|
||||
header.set_transactions_root(good_transactions_root.clone());
|
||||
check_ok(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine));
|
||||
|
@ -193,7 +193,7 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> {
|
||||
cmd.dirs.create_dirs(false, false, false)?;
|
||||
|
||||
let cache = Arc::new(Mutex::new(
|
||||
LightDataCache::new(Default::default(), ::time::Duration::seconds(0))
|
||||
LightDataCache::new(Default::default(), Duration::new(0, 0))
|
||||
));
|
||||
|
||||
let mut config = LightClientConfig {
|
||||
|
@ -43,7 +43,6 @@ extern crate serde;
|
||||
extern crate serde_json;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate time;
|
||||
extern crate toml;
|
||||
|
||||
extern crate ethcore;
|
||||
|
@ -76,7 +76,7 @@ const SNAPSHOT_HISTORY: u64 = 100;
|
||||
|
||||
// Number of minutes before a given gas price corpus should expire.
|
||||
// Light client only.
|
||||
const GAS_CORPUS_EXPIRATION_MINUTES: i64 = 60 * 6;
|
||||
const GAS_CORPUS_EXPIRATION_MINUTES: u64 = 60 * 6;
|
||||
|
||||
// Pops along with error messages when a password is missing or invalid.
|
||||
const VERIFY_PASSWORD_HINT: &'static str = "Make sure valid password is present in files passed using `--password` or in the configuration file.";
|
||||
@ -217,7 +217,7 @@ fn execute_light_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger
|
||||
info!("Running in experimental {} mode.", Colour::Blue.bold().paint("Light Client"));
|
||||
|
||||
// TODO: configurable cache size.
|
||||
let cache = LightDataCache::new(Default::default(), ::time::Duration::minutes(GAS_CORPUS_EXPIRATION_MINUTES));
|
||||
let cache = LightDataCache::new(Default::default(), Duration::from_secs(60 * GAS_CORPUS_EXPIRATION_MINUTES));
|
||||
let cache = Arc::new(Mutex::new(cache));
|
||||
|
||||
// start client and create transaction queue.
|
||||
|
@ -24,7 +24,6 @@ serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
tempdir = "0.3"
|
||||
time = "0.1"
|
||||
tiny-keccak = "1.3"
|
||||
tokio-timer = "0.1"
|
||||
transient-hashmap = "0.4"
|
||||
|
@ -34,7 +34,6 @@ extern crate rustc_hex;
|
||||
extern crate semver;
|
||||
extern crate serde;
|
||||
extern crate serde_json;
|
||||
extern crate time;
|
||||
extern crate tiny_keccak;
|
||||
extern crate tokio_timer;
|
||||
extern crate transient_hashmap;
|
||||
|
@ -17,11 +17,10 @@
|
||||
//! Eth rpc implementation.
|
||||
|
||||
use std::thread;
|
||||
use std::time::{Instant, Duration};
|
||||
use std::time::{Instant, Duration, SystemTime, UNIX_EPOCH};
|
||||
use std::sync::Arc;
|
||||
|
||||
use rlp::{self, UntrustedRlp};
|
||||
use time::get_time;
|
||||
use ethereum_types::{U256, H64, H160, H256, Address};
|
||||
use parking_lot::Mutex;
|
||||
|
||||
@ -769,7 +768,8 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM, T: StateInfo + 'static> Eth for EthClient<
|
||||
let target = Ethash::difficulty_to_boundary(b.block().header().difficulty());
|
||||
let seed_hash = self.seed_compute.lock().hash_block_number(b.block().header().number());
|
||||
|
||||
if no_new_work_timeout > 0 && b.block().header().timestamp() + no_new_work_timeout < get_time().sec as u64 {
|
||||
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
if no_new_work_timeout > 0 && b.block().header().timestamp() + no_new_work_timeout < now {
|
||||
Err(errors::no_new_work())
|
||||
} else if self.options.send_block_number_in_get_work {
|
||||
let block_number = b.block().header().number();
|
||||
|
@ -17,7 +17,7 @@
|
||||
use std::str::FromStr;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Instant, Duration};
|
||||
use std::time::{Instant, Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use ethereum_types::{H256, U256, Address};
|
||||
use parking_lot::Mutex;
|
||||
@ -31,7 +31,6 @@ use ethsync::SyncState;
|
||||
use miner::external::ExternalMiner;
|
||||
use rlp;
|
||||
use rustc_hex::{FromHex, ToHex};
|
||||
use time::get_time;
|
||||
use transaction::{Transaction, Action};
|
||||
|
||||
use jsonrpc_core::IoHandler;
|
||||
@ -1144,7 +1143,8 @@ fn rpc_get_work_should_not_return_block_number() {
|
||||
fn rpc_get_work_should_timeout() {
|
||||
let eth_tester = EthTester::default();
|
||||
eth_tester.miner.set_author(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap());
|
||||
eth_tester.client.set_latest_block_timestamp(get_time().sec as u64 - 1000); // Set latest block to 1000 seconds ago
|
||||
let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() - 1000; // Set latest block to 1000 seconds ago
|
||||
eth_tester.client.set_latest_block_timestamp(timestamp);
|
||||
let hash = eth_tester.miner.map_sealing_work(&*eth_tester.client, |b| b.hash()).unwrap();
|
||||
|
||||
// Request without providing timeout. This should work since we're disabling timeout.
|
||||
|
@ -24,7 +24,6 @@ kvdb = { path = "../util/kvdb" }
|
||||
macros = { path = "../util/macros" }
|
||||
log = "0.3"
|
||||
env_logger = "0.4"
|
||||
time = "0.1.34"
|
||||
rand = "0.4"
|
||||
heapsize = "0.4"
|
||||
semver = "0.6"
|
||||
|
@ -90,6 +90,7 @@
|
||||
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::cmp;
|
||||
use std::time::Instant;
|
||||
use hash::keccak;
|
||||
use heapsize::HeapSizeOf;
|
||||
use ethereum_types::{H256, U256};
|
||||
@ -104,7 +105,6 @@ use ethcore::error::*;
|
||||
use ethcore::snapshot::{ManifestData, RestorationStatus};
|
||||
use transaction::PendingTransaction;
|
||||
use sync_io::SyncIo;
|
||||
use time;
|
||||
use super::SyncConfig;
|
||||
use block_sync::{BlockDownloader, BlockRequest, BlockDownloaderImportError as DownloaderImportError, DownloadAction};
|
||||
use rand::Rng;
|
||||
@ -305,7 +305,7 @@ struct PeerInfo {
|
||||
/// Holds requested snapshot chunk hash if any.
|
||||
asking_snapshot_data: Option<H256>,
|
||||
/// Request timestamp
|
||||
ask_time: u64,
|
||||
ask_time: Instant,
|
||||
/// Holds a set of transactions recently sent to this peer to avoid spamming.
|
||||
last_sent_transactions: HashSet<H256>,
|
||||
/// Pending request is expired and result should be ignored
|
||||
@ -377,9 +377,9 @@ pub struct ChainSync {
|
||||
snapshot: Snapshot,
|
||||
/// Connected peers pending Status message.
|
||||
/// Value is request timestamp.
|
||||
handshaking_peers: HashMap<PeerId, u64>,
|
||||
handshaking_peers: HashMap<PeerId, Instant>,
|
||||
/// Sync start timestamp. Measured when first peer is connected
|
||||
sync_start_time: Option<u64>,
|
||||
sync_start_time: Option<Instant>,
|
||||
/// Transactions propagation statistics
|
||||
transactions_stats: TransactionsStats,
|
||||
/// Enable ancient block downloading
|
||||
@ -544,7 +544,7 @@ impl ChainSync {
|
||||
(best_hash, max_peers, snapshot_peers)
|
||||
};
|
||||
|
||||
let timeout = (self.state == SyncState::WaitingPeers) && self.sync_start_time.map_or(false, |t| ((time::precise_time_ns() - t) / 1_000_000_000) > WAIT_PEERS_TIMEOUT_SEC);
|
||||
let timeout = (self.state == SyncState::WaitingPeers) && self.sync_start_time.map_or(false, |t| t.elapsed().as_secs() > WAIT_PEERS_TIMEOUT_SEC);
|
||||
|
||||
if let (Some(hash), Some(peers)) = (best_hash, best_hash.map_or(None, |h| snapshot_peers.get(&h))) {
|
||||
if max_peers >= SNAPSHOT_MIN_PEERS {
|
||||
@ -616,7 +616,7 @@ impl ChainSync {
|
||||
asking: PeerAsking::Nothing,
|
||||
asking_blocks: Vec::new(),
|
||||
asking_hash: None,
|
||||
ask_time: 0,
|
||||
ask_time: Instant::now(),
|
||||
last_sent_transactions: HashSet::new(),
|
||||
expired: false,
|
||||
confirmation: if self.fork_block.is_none() { ForkConfirmation::Confirmed } else { ForkConfirmation::Unconfirmed },
|
||||
@ -627,7 +627,7 @@ impl ChainSync {
|
||||
};
|
||||
|
||||
if self.sync_start_time.is_none() {
|
||||
self.sync_start_time = Some(time::precise_time_ns());
|
||||
self.sync_start_time = Some(Instant::now());
|
||||
}
|
||||
|
||||
trace!(target: "sync", "New peer {} (protocol: {}, network: {:?}, difficulty: {:?}, latest:{}, genesis:{}, snapshot:{:?})",
|
||||
@ -1150,7 +1150,7 @@ impl ChainSync {
|
||||
debug!(target:"sync", "Error sending status request: {:?}", e);
|
||||
io.disconnect_peer(peer);
|
||||
} else {
|
||||
self.handshaking_peers.insert(peer, time::precise_time_ns());
|
||||
self.handshaking_peers.insert(peer, Instant::now());
|
||||
}
|
||||
}
|
||||
|
||||
@ -1438,7 +1438,7 @@ impl ChainSync {
|
||||
warn!(target:"sync", "Asking {:?} while requesting {:?}", peer.asking, asking);
|
||||
}
|
||||
peer.asking = asking;
|
||||
peer.ask_time = time::precise_time_ns();
|
||||
peer.ask_time = Instant::now();
|
||||
let result = if packet_id >= ETH_PACKET_COUNT {
|
||||
sync.send_protocol(WARP_SYNC_PROTOCOL_ID, peer_id, packet_id, packet)
|
||||
} else {
|
||||
@ -1778,10 +1778,10 @@ impl ChainSync {
|
||||
}
|
||||
|
||||
pub fn maintain_peers(&mut self, io: &mut SyncIo) {
|
||||
let tick = time::precise_time_ns();
|
||||
let tick = Instant::now();
|
||||
let mut aborting = Vec::new();
|
||||
for (peer_id, peer) in &self.peers {
|
||||
let elapsed = (tick - peer.ask_time) / 1_000_000_000;
|
||||
let elapsed = (tick - peer.ask_time).as_secs();
|
||||
let timeout = match peer.asking {
|
||||
PeerAsking::BlockHeaders => elapsed > HEADERS_TIMEOUT_SEC,
|
||||
PeerAsking::BlockBodies => elapsed > BODIES_TIMEOUT_SEC,
|
||||
@ -1802,9 +1802,9 @@ impl ChainSync {
|
||||
}
|
||||
|
||||
// Check for handshake timeouts
|
||||
for (peer, ask_time) in &self.handshaking_peers {
|
||||
for (peer, &ask_time) in &self.handshaking_peers {
|
||||
let elapsed = (tick - ask_time) / 1_000_000_000;
|
||||
if elapsed > STATUS_TIMEOUT_SEC {
|
||||
if elapsed.as_secs() > STATUS_TIMEOUT_SEC {
|
||||
trace!(target:"sync", "Status timeout {}", peer);
|
||||
io.disconnect_peer(*peer);
|
||||
}
|
||||
@ -2474,7 +2474,7 @@ mod tests {
|
||||
asking: PeerAsking::Nothing,
|
||||
asking_blocks: Vec::new(),
|
||||
asking_hash: None,
|
||||
ask_time: 0,
|
||||
ask_time: Instant::now(),
|
||||
last_sent_transactions: HashSet::new(),
|
||||
expired: false,
|
||||
confirmation: super::ForkConfirmation::Confirmed,
|
||||
@ -2595,7 +2595,7 @@ mod tests {
|
||||
asking: PeerAsking::Nothing,
|
||||
asking_blocks: Vec::new(),
|
||||
asking_hash: None,
|
||||
ask_time: 0,
|
||||
ask_time: Instant::now(),
|
||||
last_sent_transactions: HashSet::new(),
|
||||
expired: false,
|
||||
confirmation: super::ForkConfirmation::Confirmed,
|
||||
|
@ -29,7 +29,6 @@ extern crate ethcore_transaction as transaction;
|
||||
extern crate ethcore;
|
||||
extern crate ethereum_types;
|
||||
extern crate env_logger;
|
||||
extern crate time;
|
||||
extern crate plain_hasher;
|
||||
extern crate rand;
|
||||
extern crate semver;
|
||||
|
@ -32,7 +32,7 @@ use light::provider::LightProvider;
|
||||
use network::{NodeId, PeerId};
|
||||
use parking_lot::RwLock;
|
||||
|
||||
use time::Duration;
|
||||
use std::time::Duration;
|
||||
use light::cache::Cache;
|
||||
|
||||
const NETWORK_ID: u64 = 0xcafebabe;
|
||||
@ -218,7 +218,7 @@ impl TestNet<Peer> {
|
||||
|
||||
// skip full verification because the blocks are bad.
|
||||
config.verify_full = false;
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||
let db = kvdb_memorydb::create(0);
|
||||
let client = LightClient::new(
|
||||
config,
|
||||
|
@ -11,7 +11,6 @@ log = "0.3"
|
||||
mio = "0.6.8"
|
||||
bytes = "0.4"
|
||||
rand = "0.4"
|
||||
time = "0.1.34"
|
||||
tiny-keccak = "1.3"
|
||||
rust-crypto = "0.2.34"
|
||||
slab = "0.2"
|
||||
|
@ -19,11 +19,11 @@ use std::net::SocketAddr;
|
||||
use std::collections::{HashSet, HashMap, BTreeMap, VecDeque};
|
||||
use std::mem;
|
||||
use std::default::Default;
|
||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
use mio::*;
|
||||
use mio::deprecated::{Handler, EventLoop};
|
||||
use mio::udp::*;
|
||||
use hash::keccak;
|
||||
use time;
|
||||
use ethereum_types::{H256, H520};
|
||||
use rlp::*;
|
||||
use node_table::*;
|
||||
@ -59,7 +59,7 @@ pub struct NodeEntry {
|
||||
pub struct BucketEntry {
|
||||
pub address: NodeEntry,
|
||||
pub id_hash: H256,
|
||||
pub timeout: Option<u64>,
|
||||
pub timeout: Option<Instant>,
|
||||
}
|
||||
|
||||
pub struct NodeBucket {
|
||||
@ -170,7 +170,7 @@ impl Discovery {
|
||||
if bucket.nodes.len() > BUCKET_SIZE {
|
||||
//ping least active node
|
||||
let last = bucket.nodes.back_mut().expect("Last item is always present when len() > 0");
|
||||
last.timeout = Some(time::precise_time_ns());
|
||||
last.timeout = Some(Instant::now());
|
||||
Some(last.address.endpoint.clone())
|
||||
} else { None }
|
||||
};
|
||||
@ -262,7 +262,7 @@ impl Discovery {
|
||||
for i in 0 .. source.item_count() {
|
||||
rlp.append_raw(source.at(i).as_raw(), 1);
|
||||
}
|
||||
let timestamp = time::get_time().sec as u32 + 60;
|
||||
let timestamp = 60 + SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs() as u32;
|
||||
rlp.append(×tamp);
|
||||
|
||||
let bytes = rlp.drain();
|
||||
@ -394,7 +394,8 @@ impl Discovery {
|
||||
|
||||
/// Validate that given timestamp is in within one second of now or in the future
|
||||
fn check_timestamp(&self, timestamp: u64) -> Result<(), Error> {
|
||||
if self.check_timestamps && timestamp < time::get_time().sec as u64{
|
||||
let secs_since_epoch = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
if self.check_timestamps && timestamp < secs_since_epoch {
|
||||
debug!(target: "discovery", "Expired packet");
|
||||
return Err(ErrorKind::Expired.into());
|
||||
}
|
||||
@ -504,12 +505,12 @@ impl Discovery {
|
||||
}
|
||||
|
||||
fn check_expired(&mut self, force: bool) -> HashSet<NodeId> {
|
||||
let now = time::precise_time_ns();
|
||||
let now = Instant::now();
|
||||
let mut removed: HashSet<NodeId> = HashSet::new();
|
||||
for bucket in &mut self.node_buckets {
|
||||
bucket.nodes.retain(|node| {
|
||||
if let Some(timeout) = node.timeout {
|
||||
if !force && now - timeout < PING_TIMEOUT_MS * 1000_0000 {
|
||||
if !force && now.duration_since(timeout) < Duration::from_millis(PING_TIMEOUT_MS) {
|
||||
true
|
||||
}
|
||||
else {
|
||||
|
@ -67,7 +67,6 @@ extern crate mio;
|
||||
extern crate tiny_keccak;
|
||||
extern crate crypto as rcrypto;
|
||||
extern crate rand;
|
||||
extern crate time;
|
||||
extern crate ansi_term; //TODO: remove this
|
||||
extern crate rustc_hex;
|
||||
extern crate igd;
|
||||
|
@ -18,6 +18,7 @@ use std::{str, io};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::*;
|
||||
use std::collections::HashMap;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use mio::*;
|
||||
use mio::deprecated::{Handler, EventLoop};
|
||||
@ -32,7 +33,6 @@ use network::{SessionCapabilityInfo, HostInfo as HostInfoTrait};
|
||||
use host::*;
|
||||
use node_table::NodeId;
|
||||
use stats::NetworkStats;
|
||||
use time;
|
||||
use snappy;
|
||||
|
||||
// Timeout must be less than (interval - 1).
|
||||
@ -59,8 +59,8 @@ pub struct Session {
|
||||
had_hello: bool,
|
||||
/// Session is no longer active flag.
|
||||
expired: bool,
|
||||
ping_time_ns: u64,
|
||||
pong_time_ns: Option<u64>,
|
||||
ping_time: Instant,
|
||||
pong_time: Option<Instant>,
|
||||
state: State,
|
||||
// Protocol states -- accumulates pending packets until signaled as ready.
|
||||
protocol_states: HashMap<ProtocolId, ProtocolState>,
|
||||
@ -123,8 +123,8 @@ impl Session {
|
||||
remote_address: "Handshake".to_owned(),
|
||||
local_address: local_addr,
|
||||
},
|
||||
ping_time_ns: 0,
|
||||
pong_time_ns: None,
|
||||
ping_time: Instant::now(),
|
||||
pong_time: None,
|
||||
expired: false,
|
||||
protocol_states: HashMap::new(),
|
||||
compression: false,
|
||||
@ -299,13 +299,13 @@ impl Session {
|
||||
if let State::Handshake(_) = self.state {
|
||||
return true;
|
||||
}
|
||||
let timed_out = if let Some(pong) = self.pong_time_ns {
|
||||
pong - self.ping_time_ns > PING_TIMEOUT_SEC * 1000_000_000
|
||||
let timed_out = if let Some(pong) = self.pong_time {
|
||||
pong.duration_since(self.ping_time) > Duration::from_secs(PING_TIMEOUT_SEC)
|
||||
} else {
|
||||
time::precise_time_ns() - self.ping_time_ns > PING_TIMEOUT_SEC * 1000_000_000
|
||||
self.ping_time.elapsed() > Duration::from_secs(PING_TIMEOUT_SEC)
|
||||
};
|
||||
|
||||
if !timed_out && time::precise_time_ns() - self.ping_time_ns > PING_INTERVAL_SEC * 1000_000_000 {
|
||||
if !timed_out && self.ping_time.elapsed() > Duration::from_secs(PING_INTERVAL_SEC) {
|
||||
if let Err(e) = self.send_ping(io) {
|
||||
debug!("Error sending ping message: {:?}", e);
|
||||
}
|
||||
@ -368,9 +368,11 @@ impl Session {
|
||||
Ok(SessionData::Continue)
|
||||
},
|
||||
PACKET_PONG => {
|
||||
let time = time::precise_time_ns();
|
||||
self.pong_time_ns = Some(time);
|
||||
self.info.ping_ms = Some((time - self.ping_time_ns) / 1000_000);
|
||||
let time = Instant::now();
|
||||
self.pong_time = Some(time);
|
||||
let ping_elapsed = time.duration_since(self.ping_time);
|
||||
self.info.ping_ms = Some(ping_elapsed.as_secs() * 1_000 +
|
||||
ping_elapsed.subsec_nanos() as u64 / 1_000_000);
|
||||
Ok(SessionData::Continue)
|
||||
},
|
||||
PACKET_GET_PEERS => Ok(SessionData::None), //TODO;
|
||||
@ -482,11 +484,11 @@ impl Session {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Senf ping packet
|
||||
/// Send ping packet
|
||||
pub fn send_ping<Message>(&mut self, io: &IoContext<Message>) -> Result<(), Error> where Message: Send + Sync + Clone {
|
||||
self.send_packet(io, None, PACKET_PING, &EMPTY_LIST_RLP)?;
|
||||
self.ping_time_ns = time::precise_time_ns();
|
||||
self.pong_time_ns = None;
|
||||
self.ping_time = Instant::now();
|
||||
self.pong_time = None;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -4,5 +4,4 @@ version = "0.1.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
time = "0.1.34"
|
||||
log = "0.3"
|
||||
|
@ -16,11 +16,10 @@
|
||||
|
||||
//! Performance timer with logging
|
||||
|
||||
extern crate time;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
use time::precise_time_ns;
|
||||
use std::time::Instant;
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! trace_time {
|
||||
@ -33,7 +32,7 @@ macro_rules! trace_time {
|
||||
/// elapsed time in the destructor or when `stop` is called.
|
||||
pub struct PerfTimer {
|
||||
name: &'static str,
|
||||
start: u64,
|
||||
start: Instant,
|
||||
}
|
||||
|
||||
impl PerfTimer {
|
||||
@ -41,13 +40,16 @@ impl PerfTimer {
|
||||
pub fn new(name: &'static str) -> PerfTimer {
|
||||
PerfTimer {
|
||||
name,
|
||||
start: precise_time_ns(),
|
||||
start: Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PerfTimer {
|
||||
fn drop(&mut self) {
|
||||
trace!(target: "perf", "{}: {:.2}ms", self.name, (precise_time_ns() - self.start) as f32 / 1000_000.0);
|
||||
let elapsed = self.start.elapsed();
|
||||
let ms = elapsed.subsec_nanos() as f32 / 1_000_000.0 +
|
||||
elapsed.as_secs() as f32 * 1_000.0;
|
||||
trace!(target: "perf", "{}: {:.2}ms", self.name, ms);
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
slab = "0.3"
|
||||
smallvec = "0.4"
|
||||
time = "0.1"
|
||||
tiny-keccak = "1.3"
|
||||
|
||||
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.10" }
|
||||
|
@ -31,7 +31,6 @@ extern crate ring;
|
||||
extern crate serde;
|
||||
extern crate slab;
|
||||
extern crate smallvec;
|
||||
extern crate time;
|
||||
extern crate tiny_keccak;
|
||||
|
||||
extern crate jsonrpc_core;
|
||||
|
@ -17,7 +17,7 @@
|
||||
//! Whisper message parsing, handlers, and construction.
|
||||
|
||||
use std::fmt;
|
||||
use std::time::{self, SystemTime, Duration};
|
||||
use std::time::{self, SystemTime, Duration, Instant};
|
||||
|
||||
use ethereum_types::{H256, H512};
|
||||
use rlp::{self, DecoderError, RlpStream, UntrustedRlp};
|
||||
@ -299,9 +299,9 @@ impl Message {
|
||||
let mut nonce: [u8; 8] = rng.gen();
|
||||
let mut best_found = try_nonce(&nonce);
|
||||
|
||||
let start = ::time::precise_time_ns();
|
||||
let start = Instant::now();
|
||||
|
||||
while ::time::precise_time_ns() <= start + params.work * 1_000_000 {
|
||||
while start.elapsed() <= Duration::from_millis(params.work) {
|
||||
let temp_nonce = rng.gen();
|
||||
let hash = try_nonce(&temp_nonce);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user