Remove the time dependency where possible (#8100)
This commit is contained in:
parent
5c47116889
commit
113c35af0a
8
Cargo.lock
generated
8
Cargo.lock
generated
@ -509,7 +509,6 @@ dependencies = [
|
|||||||
"stop-guard 0.1.0",
|
"stop-guard 0.1.0",
|
||||||
"table 0.1.0",
|
"table 0.1.0",
|
||||||
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"trace-time 0.1.0",
|
"trace-time 0.1.0",
|
||||||
"trie-standardmap 0.1.0",
|
"trie-standardmap 0.1.0",
|
||||||
"triehash 0.1.0",
|
"triehash 0.1.0",
|
||||||
@ -578,7 +577,6 @@ dependencies = [
|
|||||||
"smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"stats 0.1.0",
|
"stats 0.1.0",
|
||||||
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"triehash 0.1.0",
|
"triehash 0.1.0",
|
||||||
"vm 0.1.0",
|
"vm 0.1.0",
|
||||||
]
|
]
|
||||||
@ -675,7 +673,6 @@ dependencies = [
|
|||||||
"slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"snappy 0.1.0 (git+https://github.com/paritytech/rust-snappy)",
|
"snappy 0.1.0 (git+https://github.com/paritytech/rust-snappy)",
|
||||||
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"tiny-keccak 1.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tiny-keccak 1.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -906,7 +903,6 @@ dependencies = [
|
|||||||
"rlp 0.2.1",
|
"rlp 0.2.1",
|
||||||
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"triehash 0.1.0",
|
"triehash 0.1.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -2015,7 +2011,6 @@ dependencies = [
|
|||||||
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"term_size 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"term_size 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2210,7 +2205,6 @@ dependencies = [
|
|||||||
"serde_json 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"stats 0.1.0",
|
"stats 0.1.0",
|
||||||
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"tiny-keccak 1.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tiny-keccak 1.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tokio-timer 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tokio-timer 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"transient-hashmap 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"transient-hashmap 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2361,7 +2355,6 @@ dependencies = [
|
|||||||
"serde_json 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"tiny-keccak 1.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tiny-keccak 1.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -3312,7 +3305,6 @@ name = "trace-time"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -14,7 +14,6 @@ docopt = "0.8"
|
|||||||
clap = "2"
|
clap = "2"
|
||||||
term_size = "0.3"
|
term_size = "0.3"
|
||||||
textwrap = "0.9"
|
textwrap = "0.9"
|
||||||
time = "0.1"
|
|
||||||
num_cpus = "1.2"
|
num_cpus = "1.2"
|
||||||
number_prefix = "0.2"
|
number_prefix = "0.2"
|
||||||
rpassword = "1.0"
|
rpassword = "1.0"
|
||||||
|
@ -60,7 +60,6 @@ macros = { path = "../util/macros" }
|
|||||||
rust-crypto = "0.2.34"
|
rust-crypto = "0.2.34"
|
||||||
rustc-hex = "1.0"
|
rustc-hex = "1.0"
|
||||||
stats = { path = "../util/stats" }
|
stats = { path = "../util/stats" }
|
||||||
time = "0.1"
|
|
||||||
trace-time = { path = "../util/trace-time" }
|
trace-time = { path = "../util/trace-time" }
|
||||||
using_queue = { path = "../util/using_queue" }
|
using_queue = { path = "../util/using_queue" }
|
||||||
table = { path = "../util/table" }
|
table = { path = "../util/table" }
|
||||||
|
@ -22,7 +22,6 @@ vm = { path = "../vm" }
|
|||||||
plain_hasher = { path = "../../util/plain_hasher" }
|
plain_hasher = { path = "../../util/plain_hasher" }
|
||||||
rlp = { path = "../../util/rlp" }
|
rlp = { path = "../../util/rlp" }
|
||||||
rlp_derive = { path = "../../util/rlp_derive" }
|
rlp_derive = { path = "../../util/rlp_derive" }
|
||||||
time = "0.1"
|
|
||||||
smallvec = "0.4"
|
smallvec = "0.4"
|
||||||
futures = "0.1"
|
futures = "0.1"
|
||||||
rand = "0.4"
|
rand = "0.4"
|
||||||
|
@ -25,7 +25,7 @@ use ethcore::header::BlockNumber;
|
|||||||
use ethcore::receipt::Receipt;
|
use ethcore::receipt::Receipt;
|
||||||
|
|
||||||
use stats::Corpus;
|
use stats::Corpus;
|
||||||
use time::{SteadyTime, Duration};
|
use std::time::{Instant, Duration};
|
||||||
use heapsize::HeapSizeOf;
|
use heapsize::HeapSizeOf;
|
||||||
use ethereum_types::{H256, U256};
|
use ethereum_types::{H256, U256};
|
||||||
use memory_cache::MemoryLruCache;
|
use memory_cache::MemoryLruCache;
|
||||||
@ -69,7 +69,7 @@ pub struct Cache {
|
|||||||
bodies: MemoryLruCache<H256, encoded::Body>,
|
bodies: MemoryLruCache<H256, encoded::Body>,
|
||||||
receipts: MemoryLruCache<H256, Vec<Receipt>>,
|
receipts: MemoryLruCache<H256, Vec<Receipt>>,
|
||||||
chain_score: MemoryLruCache<H256, U256>,
|
chain_score: MemoryLruCache<H256, U256>,
|
||||||
corpus: Option<(Corpus<U256>, SteadyTime)>,
|
corpus: Option<(Corpus<U256>, Instant)>,
|
||||||
corpus_expiration: Duration,
|
corpus_expiration: Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -139,7 +139,7 @@ impl Cache {
|
|||||||
|
|
||||||
/// Get gas price corpus, if recent enough.
|
/// Get gas price corpus, if recent enough.
|
||||||
pub fn gas_price_corpus(&self) -> Option<Corpus<U256>> {
|
pub fn gas_price_corpus(&self) -> Option<Corpus<U256>> {
|
||||||
let now = SteadyTime::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
self.corpus.as_ref().and_then(|&(ref corpus, ref tm)| {
|
self.corpus.as_ref().and_then(|&(ref corpus, ref tm)| {
|
||||||
if *tm + self.corpus_expiration >= now {
|
if *tm + self.corpus_expiration >= now {
|
||||||
@ -152,7 +152,7 @@ impl Cache {
|
|||||||
|
|
||||||
/// Set the cached gas price corpus.
|
/// Set the cached gas price corpus.
|
||||||
pub fn set_gas_price_corpus(&mut self, corpus: Corpus<U256>) {
|
pub fn set_gas_price_corpus(&mut self, corpus: Corpus<U256>) {
|
||||||
self.corpus = Some((corpus, SteadyTime::now()))
|
self.corpus = Some((corpus, Instant::now()))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the memory used.
|
/// Get the memory used.
|
||||||
@ -175,18 +175,18 @@ impl HeapSizeOf for Cache {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::Cache;
|
use super::Cache;
|
||||||
use time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn corpus_inaccessible() {
|
fn corpus_inaccessible() {
|
||||||
let mut cache = Cache::new(Default::default(), Duration::hours(5));
|
let mut cache = Cache::new(Default::default(), Duration::from_secs(5 * 3600));
|
||||||
|
|
||||||
cache.set_gas_price_corpus(vec![].into());
|
cache.set_gas_price_corpus(vec![].into());
|
||||||
assert_eq!(cache.gas_price_corpus(), Some(vec![].into()));
|
assert_eq!(cache.gas_price_corpus(), Some(vec![].into()));
|
||||||
|
|
||||||
{
|
{
|
||||||
let corpus_time = &mut cache.corpus.as_mut().unwrap().1;
|
let corpus_time = &mut cache.corpus.as_mut().unwrap().1;
|
||||||
*corpus_time = *corpus_time - Duration::hours(6);
|
*corpus_time = *corpus_time - Duration::from_secs(6 * 3600);
|
||||||
}
|
}
|
||||||
assert!(cache.gas_price_corpus().is_none());
|
assert!(cache.gas_price_corpus().is_none());
|
||||||
}
|
}
|
||||||
|
@ -732,7 +732,7 @@ mod tests {
|
|||||||
use kvdb::KeyValueDB;
|
use kvdb::KeyValueDB;
|
||||||
use kvdb_memorydb;
|
use kvdb_memorydb;
|
||||||
|
|
||||||
use time::Duration;
|
use std::time::Duration;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
|
|
||||||
fn make_db() -> Arc<KeyValueDB> {
|
fn make_db() -> Arc<KeyValueDB> {
|
||||||
@ -745,7 +745,7 @@ mod tests {
|
|||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
let db = make_db();
|
let db = make_db();
|
||||||
|
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
||||||
|
|
||||||
@ -778,7 +778,7 @@ mod tests {
|
|||||||
let spec = Spec::new_test();
|
let spec = Spec::new_test();
|
||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
let db = make_db();
|
let db = make_db();
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
||||||
|
|
||||||
@ -860,7 +860,7 @@ mod tests {
|
|||||||
fn earliest_is_latest() {
|
fn earliest_is_latest() {
|
||||||
let spec = Spec::new_test();
|
let spec = Spec::new_test();
|
||||||
let db = make_db();
|
let db = make_db();
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
||||||
|
|
||||||
@ -873,7 +873,7 @@ mod tests {
|
|||||||
let spec = Spec::new_test();
|
let spec = Spec::new_test();
|
||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
let db = make_db();
|
let db = make_db();
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
{
|
{
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
|
||||||
@ -909,7 +909,7 @@ mod tests {
|
|||||||
let spec = Spec::new_test();
|
let spec = Spec::new_test();
|
||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
let db = make_db();
|
let db = make_db();
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
{
|
{
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
|
||||||
@ -964,7 +964,7 @@ mod tests {
|
|||||||
let spec = Spec::new_test();
|
let spec = Spec::new_test();
|
||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
let db = make_db();
|
let db = make_db();
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
|
||||||
|
|
||||||
@ -978,7 +978,7 @@ mod tests {
|
|||||||
let spec = Spec::new_test();
|
let spec = Spec::new_test();
|
||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
let db = make_db();
|
let db = make_db();
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ mod tests {
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use cache::Cache;
|
use cache::Cache;
|
||||||
use client::fetch;
|
use client::fetch;
|
||||||
use time::Duration;
|
use std::time::Duration;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use kvdb_memorydb;
|
use kvdb_memorydb;
|
||||||
use ethcore::db::NUM_COLUMNS;
|
use ethcore::db::NUM_COLUMNS;
|
||||||
@ -116,7 +116,7 @@ mod tests {
|
|||||||
fn it_works() {
|
fn it_works() {
|
||||||
let db = Arc::new(kvdb_memorydb::create(NUM_COLUMNS.unwrap_or(0)));
|
let db = Arc::new(kvdb_memorydb::create(NUM_COLUMNS.unwrap_or(0)));
|
||||||
let spec = Spec::new_test();
|
let spec = Spec::new_test();
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
Service::start(Default::default(), &spec, fetch::unavailable(), db, cache).unwrap();
|
Service::start(Default::default(), &spec, fetch::unavailable(), db, cache).unwrap();
|
||||||
}
|
}
|
||||||
|
@ -75,7 +75,6 @@ extern crate rlp_derive;
|
|||||||
extern crate serde;
|
extern crate serde;
|
||||||
extern crate smallvec;
|
extern crate smallvec;
|
||||||
extern crate stats;
|
extern crate stats;
|
||||||
extern crate time;
|
|
||||||
extern crate vm;
|
extern crate vm;
|
||||||
extern crate keccak_hash as hash;
|
extern crate keccak_hash as hash;
|
||||||
extern crate triehash;
|
extern crate triehash;
|
||||||
|
@ -27,11 +27,11 @@
|
|||||||
use std::collections::{HashMap, VecDeque};
|
use std::collections::{HashMap, VecDeque};
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use request::{CompleteRequest, Kind};
|
use request::{CompleteRequest, Kind};
|
||||||
|
|
||||||
use bincode;
|
use bincode;
|
||||||
use time;
|
|
||||||
use parking_lot::{RwLock, Mutex};
|
use parking_lot::{RwLock, Mutex};
|
||||||
|
|
||||||
/// Number of time periods samples should be kept for.
|
/// Number of time periods samples should be kept for.
|
||||||
@ -107,7 +107,7 @@ impl LoadDistribution {
|
|||||||
};
|
};
|
||||||
|
|
||||||
LoadTimer {
|
LoadTimer {
|
||||||
start: time::precise_time_ns(),
|
start: Instant::now(),
|
||||||
n: n,
|
n: n,
|
||||||
dist: self,
|
dist: self,
|
||||||
kind: kind,
|
kind: kind,
|
||||||
@ -151,10 +151,10 @@ impl LoadDistribution {
|
|||||||
store.store(&*samples);
|
store.store(&*samples);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update(&self, kind: Kind, elapsed: u64, n: u64) {
|
fn update(&self, kind: Kind, elapsed: Duration, n: u64) {
|
||||||
macro_rules! update_counters {
|
macro_rules! update_counters {
|
||||||
($counters: expr) => {
|
($counters: expr) => {
|
||||||
$counters.0 = $counters.0.saturating_add(elapsed);
|
$counters.0 = $counters.0.saturating_add({ elapsed.as_secs() * 1_000_000_000 + elapsed.subsec_nanos() as u64 });
|
||||||
$counters.1 = $counters.1.saturating_add(n);
|
$counters.1 = $counters.1.saturating_add(n);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -180,7 +180,7 @@ impl LoadDistribution {
|
|||||||
/// A timer for a single request.
|
/// A timer for a single request.
|
||||||
/// On drop, this will update the distribution.
|
/// On drop, this will update the distribution.
|
||||||
pub struct LoadTimer<'a> {
|
pub struct LoadTimer<'a> {
|
||||||
start: u64,
|
start: Instant,
|
||||||
n: u64,
|
n: u64,
|
||||||
dist: &'a LoadDistribution,
|
dist: &'a LoadDistribution,
|
||||||
kind: Kind,
|
kind: Kind,
|
||||||
@ -188,7 +188,7 @@ pub struct LoadTimer<'a> {
|
|||||||
|
|
||||||
impl<'a> Drop for LoadTimer<'a> {
|
impl<'a> Drop for LoadTimer<'a> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
let elapsed = time::precise_time_ns() - self.start;
|
let elapsed = self.start.elapsed();
|
||||||
self.dist.update(self.kind, elapsed, self.n);
|
self.dist.update(self.kind, elapsed, self.n);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -225,7 +225,7 @@ mod tests {
|
|||||||
let dist = LoadDistribution::load(&NullStore);
|
let dist = LoadDistribution::load(&NullStore);
|
||||||
assert_eq!(dist.expected_time_ns(Kind::Headers), hardcoded_serve_time(Kind::Headers));
|
assert_eq!(dist.expected_time_ns(Kind::Headers), hardcoded_serve_time(Kind::Headers));
|
||||||
|
|
||||||
dist.update(Kind::Headers, 100_000, 100);
|
dist.update(Kind::Headers, Duration::new(0, 100_000), 100);
|
||||||
dist.end_period(&NullStore);
|
dist.end_period(&NullStore);
|
||||||
|
|
||||||
assert_eq!(dist.expected_time_ns(Kind::Headers), 1000);
|
assert_eq!(dist.expected_time_ns(Kind::Headers), 1000);
|
||||||
@ -238,7 +238,7 @@ mod tests {
|
|||||||
let mut sum = 0;
|
let mut sum = 0;
|
||||||
|
|
||||||
for (i, x) in (0..10).map(|x| x * 10_000).enumerate() {
|
for (i, x) in (0..10).map(|x| x * 10_000).enumerate() {
|
||||||
dist.update(Kind::Headers, x, 1);
|
dist.update(Kind::Headers, Duration::new(0, x), 1);
|
||||||
dist.end_period(&NullStore);
|
dist.end_period(&NullStore);
|
||||||
|
|
||||||
sum += x;
|
sum += x;
|
||||||
@ -248,7 +248,7 @@ mod tests {
|
|||||||
|
|
||||||
// should be weighted below the maximum entry.
|
// should be weighted below the maximum entry.
|
||||||
let arith_average = (sum as f64 / (i + 1) as f64) as u64;
|
let arith_average = (sum as f64 / (i + 1) as f64) as u64;
|
||||||
assert!(moving_average < x);
|
assert!(moving_average < x as u64);
|
||||||
|
|
||||||
// when there are only 2 entries, they should be equal due to choice of
|
// when there are only 2 entries, they should be equal due to choice of
|
||||||
// ALPHA = 1/N.
|
// ALPHA = 1/N.
|
||||||
|
@ -26,7 +26,7 @@ use rlp::{RlpStream, UntrustedRlp};
|
|||||||
use ethereum_types::{H256, U256};
|
use ethereum_types::{H256, U256};
|
||||||
use kvdb::DBValue;
|
use kvdb::DBValue;
|
||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock};
|
||||||
use time::{Duration, SteadyTime};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
@ -73,7 +73,7 @@ const RECALCULATE_COSTS_TIMEOUT: TimerToken = 3;
|
|||||||
const RECALCULATE_COSTS_INTERVAL_MS: u64 = 60 * 60 * 1000;
|
const RECALCULATE_COSTS_INTERVAL_MS: u64 = 60 * 60 * 1000;
|
||||||
|
|
||||||
// minimum interval between updates.
|
// minimum interval between updates.
|
||||||
const UPDATE_INTERVAL_MS: i64 = 5000;
|
const UPDATE_INTERVAL_MS: u64 = 5000;
|
||||||
|
|
||||||
/// Supported protocol versions.
|
/// Supported protocol versions.
|
||||||
pub const PROTOCOL_VERSIONS: &'static [u8] = &[1];
|
pub const PROTOCOL_VERSIONS: &'static [u8] = &[1];
|
||||||
@ -109,20 +109,20 @@ mod packet {
|
|||||||
|
|
||||||
// timeouts for different kinds of requests. all values are in milliseconds.
|
// timeouts for different kinds of requests. all values are in milliseconds.
|
||||||
mod timeout {
|
mod timeout {
|
||||||
pub const HANDSHAKE: i64 = 2500;
|
pub const HANDSHAKE: u64 = 2500;
|
||||||
pub const ACKNOWLEDGE_UPDATE: i64 = 5000;
|
pub const ACKNOWLEDGE_UPDATE: u64 = 5000;
|
||||||
pub const BASE: i64 = 1500; // base timeout for packet.
|
pub const BASE: u64 = 1500; // base timeout for packet.
|
||||||
|
|
||||||
// timeouts per request within packet.
|
// timeouts per request within packet.
|
||||||
pub const HEADERS: i64 = 250; // per header?
|
pub const HEADERS: u64 = 250; // per header?
|
||||||
pub const TRANSACTION_INDEX: i64 = 100;
|
pub const TRANSACTION_INDEX: u64 = 100;
|
||||||
pub const BODY: i64 = 50;
|
pub const BODY: u64 = 50;
|
||||||
pub const RECEIPT: i64 = 50;
|
pub const RECEIPT: u64 = 50;
|
||||||
pub const PROOF: i64 = 100; // state proof
|
pub const PROOF: u64 = 100; // state proof
|
||||||
pub const CONTRACT_CODE: i64 = 100;
|
pub const CONTRACT_CODE: u64 = 100;
|
||||||
pub const HEADER_PROOF: i64 = 100;
|
pub const HEADER_PROOF: u64 = 100;
|
||||||
pub const TRANSACTION_PROOF: i64 = 1000; // per gas?
|
pub const TRANSACTION_PROOF: u64 = 1000; // per gas?
|
||||||
pub const EPOCH_SIGNAL: i64 = 200;
|
pub const EPOCH_SIGNAL: u64 = 200;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A request id.
|
/// A request id.
|
||||||
@ -144,7 +144,7 @@ impl fmt::Display for ReqId {
|
|||||||
// may not have received one for.
|
// may not have received one for.
|
||||||
struct PendingPeer {
|
struct PendingPeer {
|
||||||
sent_head: H256,
|
sent_head: H256,
|
||||||
last_update: SteadyTime,
|
last_update: Instant,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Relevant data to each peer. Not accessible publicly, only `pub` due to
|
/// Relevant data to each peer. Not accessible publicly, only `pub` due to
|
||||||
@ -155,13 +155,13 @@ pub struct Peer {
|
|||||||
capabilities: Capabilities,
|
capabilities: Capabilities,
|
||||||
remote_flow: Option<(Credits, FlowParams)>,
|
remote_flow: Option<(Credits, FlowParams)>,
|
||||||
sent_head: H256, // last chain head we've given them.
|
sent_head: H256, // last chain head we've given them.
|
||||||
last_update: SteadyTime,
|
last_update: Instant,
|
||||||
pending_requests: RequestSet,
|
pending_requests: RequestSet,
|
||||||
failed_requests: Vec<ReqId>,
|
failed_requests: Vec<ReqId>,
|
||||||
propagated_transactions: HashSet<H256>,
|
propagated_transactions: HashSet<H256>,
|
||||||
skip_update: bool,
|
skip_update: bool,
|
||||||
local_flow: Arc<FlowParams>,
|
local_flow: Arc<FlowParams>,
|
||||||
awaiting_acknowledge: Option<(SteadyTime, Arc<FlowParams>)>,
|
awaiting_acknowledge: Option<(Instant, Arc<FlowParams>)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Whether or not a peer was kept by a handler
|
/// Whether or not a peer was kept by a handler
|
||||||
@ -447,7 +447,7 @@ impl LightProtocol {
|
|||||||
});
|
});
|
||||||
|
|
||||||
// begin timeout.
|
// begin timeout.
|
||||||
peer.pending_requests.insert(req_id, requests, cost, SteadyTime::now());
|
peer.pending_requests.insert(req_id, requests, cost, Instant::now());
|
||||||
Ok(req_id)
|
Ok(req_id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -457,7 +457,7 @@ impl LightProtocol {
|
|||||||
/// The announcement is expected to be valid.
|
/// The announcement is expected to be valid.
|
||||||
pub fn make_announcement(&self, io: &IoContext, mut announcement: Announcement) {
|
pub fn make_announcement(&self, io: &IoContext, mut announcement: Announcement) {
|
||||||
let mut reorgs_map = HashMap::new();
|
let mut reorgs_map = HashMap::new();
|
||||||
let now = SteadyTime::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
// update stored capabilities
|
// update stored capabilities
|
||||||
self.capabilities.write().update_from(&announcement);
|
self.capabilities.write().update_from(&announcement);
|
||||||
@ -470,7 +470,7 @@ impl LightProtocol {
|
|||||||
// the timer approach will skip 1 (possibly 2) in rare occasions.
|
// the timer approach will skip 1 (possibly 2) in rare occasions.
|
||||||
if peer_info.sent_head == announcement.head_hash ||
|
if peer_info.sent_head == announcement.head_hash ||
|
||||||
peer_info.status.head_num >= announcement.head_num ||
|
peer_info.status.head_num >= announcement.head_num ||
|
||||||
now - peer_info.last_update < Duration::milliseconds(UPDATE_INTERVAL_MS) {
|
now - peer_info.last_update < Duration::from_millis(UPDATE_INTERVAL_MS) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -537,7 +537,7 @@ impl LightProtocol {
|
|||||||
Some(peer_info) => {
|
Some(peer_info) => {
|
||||||
let mut peer_info = peer_info.lock();
|
let mut peer_info = peer_info.lock();
|
||||||
let peer_info: &mut Peer = &mut *peer_info;
|
let peer_info: &mut Peer = &mut *peer_info;
|
||||||
let req_info = peer_info.pending_requests.remove(&req_id, SteadyTime::now());
|
let req_info = peer_info.pending_requests.remove(&req_id, Instant::now());
|
||||||
let last_batched = peer_info.pending_requests.is_empty();
|
let last_batched = peer_info.pending_requests.is_empty();
|
||||||
let flow_info = peer_info.remote_flow.as_mut();
|
let flow_info = peer_info.remote_flow.as_mut();
|
||||||
|
|
||||||
@ -599,14 +599,14 @@ impl LightProtocol {
|
|||||||
|
|
||||||
// check timeouts and punish peers.
|
// check timeouts and punish peers.
|
||||||
fn timeout_check(&self, io: &IoContext) {
|
fn timeout_check(&self, io: &IoContext) {
|
||||||
let now = SteadyTime::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
// handshake timeout
|
// handshake timeout
|
||||||
{
|
{
|
||||||
let mut pending = self.pending_peers.write();
|
let mut pending = self.pending_peers.write();
|
||||||
let slowpokes: Vec<_> = pending.iter()
|
let slowpokes: Vec<_> = pending.iter()
|
||||||
.filter(|&(_, ref peer)| {
|
.filter(|&(_, ref peer)| {
|
||||||
peer.last_update + Duration::milliseconds(timeout::HANDSHAKE) <= now
|
peer.last_update + Duration::from_millis(timeout::HANDSHAKE) <= now
|
||||||
})
|
})
|
||||||
.map(|(&p, _)| p)
|
.map(|(&p, _)| p)
|
||||||
.collect();
|
.collect();
|
||||||
@ -619,7 +619,7 @@ impl LightProtocol {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// request and update ack timeouts
|
// request and update ack timeouts
|
||||||
let ack_duration = Duration::milliseconds(timeout::ACKNOWLEDGE_UPDATE);
|
let ack_duration = Duration::from_millis(timeout::ACKNOWLEDGE_UPDATE);
|
||||||
{
|
{
|
||||||
for (peer_id, peer) in self.peers.read().iter() {
|
for (peer_id, peer) in self.peers.read().iter() {
|
||||||
let peer = peer.lock();
|
let peer = peer.lock();
|
||||||
@ -709,7 +709,7 @@ impl LightProtocol {
|
|||||||
|
|
||||||
self.pending_peers.write().insert(*peer, PendingPeer {
|
self.pending_peers.write().insert(*peer, PendingPeer {
|
||||||
sent_head: chain_info.best_block_hash,
|
sent_head: chain_info.best_block_hash,
|
||||||
last_update: SteadyTime::now(),
|
last_update: Instant::now(),
|
||||||
});
|
});
|
||||||
|
|
||||||
trace!(target: "pip", "Sending status to peer {}", peer);
|
trace!(target: "pip", "Sending status to peer {}", peer);
|
||||||
@ -771,7 +771,7 @@ impl LightProtocol {
|
|||||||
*self.flow_params.write() = new_params.clone();
|
*self.flow_params.write() = new_params.clone();
|
||||||
|
|
||||||
let peers = self.peers.read();
|
let peers = self.peers.read();
|
||||||
let now = SteadyTime::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
let packet_body = {
|
let packet_body = {
|
||||||
let mut stream = RlpStream::new_list(3);
|
let mut stream = RlpStream::new_list(3);
|
||||||
|
@ -31,7 +31,7 @@ use super::error::Error;
|
|||||||
|
|
||||||
use rlp::*;
|
use rlp::*;
|
||||||
use ethereum_types::U256;
|
use ethereum_types::U256;
|
||||||
use time::{Duration, SteadyTime};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
/// Credits value.
|
/// Credits value.
|
||||||
///
|
///
|
||||||
@ -41,7 +41,7 @@ use time::{Duration, SteadyTime};
|
|||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub struct Credits {
|
pub struct Credits {
|
||||||
estimate: U256,
|
estimate: U256,
|
||||||
recharge_point: SteadyTime,
|
recharge_point: Instant,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Credits {
|
impl Credits {
|
||||||
@ -53,7 +53,7 @@ impl Credits {
|
|||||||
/// a response to a request.
|
/// a response to a request.
|
||||||
pub fn update_to(&mut self, value: U256) {
|
pub fn update_to(&mut self, value: U256) {
|
||||||
self.estimate = value;
|
self.estimate = value;
|
||||||
self.recharge_point = SteadyTime::now();
|
self.recharge_point = Instant::now();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Maintain ratio to current limit against an old limit.
|
/// Maintain ratio to current limit against an old limit.
|
||||||
@ -351,19 +351,19 @@ impl FlowParams {
|
|||||||
pub fn create_credits(&self) -> Credits {
|
pub fn create_credits(&self) -> Credits {
|
||||||
Credits {
|
Credits {
|
||||||
estimate: self.limit,
|
estimate: self.limit,
|
||||||
recharge_point: SteadyTime::now(),
|
recharge_point: Instant::now(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Recharge the given credits based on time passed since last
|
/// Recharge the given credits based on time passed since last
|
||||||
/// update.
|
/// update.
|
||||||
pub fn recharge(&self, credits: &mut Credits) {
|
pub fn recharge(&self, credits: &mut Credits) {
|
||||||
let now = SteadyTime::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
// recompute and update only in terms of full seconds elapsed
|
// recompute and update only in terms of full seconds elapsed
|
||||||
// in order to keep the estimate as an underestimate.
|
// in order to keep the estimate as an underestimate.
|
||||||
let elapsed = (now - credits.recharge_point).num_seconds();
|
let elapsed = (now - credits.recharge_point).as_secs();
|
||||||
credits.recharge_point = credits.recharge_point + Duration::seconds(elapsed);
|
credits.recharge_point = credits.recharge_point + Duration::from_secs(elapsed);
|
||||||
|
|
||||||
let elapsed: U256 = elapsed.into();
|
let elapsed: U256 = elapsed.into();
|
||||||
|
|
||||||
|
@ -23,14 +23,13 @@
|
|||||||
|
|
||||||
use std::collections::{BTreeMap, HashMap};
|
use std::collections::{BTreeMap, HashMap};
|
||||||
use std::iter::FromIterator;
|
use std::iter::FromIterator;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use request::Request;
|
use request::Request;
|
||||||
use request::NetworkRequests as Requests;
|
use request::NetworkRequests as Requests;
|
||||||
use net::{timeout, ReqId};
|
use net::{timeout, ReqId};
|
||||||
use ethereum_types::U256;
|
use ethereum_types::U256;
|
||||||
|
|
||||||
use time::{Duration, SteadyTime};
|
|
||||||
|
|
||||||
// Request set entry: requests + cost.
|
// Request set entry: requests + cost.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct Entry(Requests, U256);
|
struct Entry(Requests, U256);
|
||||||
@ -40,7 +39,7 @@ struct Entry(Requests, U256);
|
|||||||
pub struct RequestSet {
|
pub struct RequestSet {
|
||||||
counter: u64,
|
counter: u64,
|
||||||
cumulative_cost: U256,
|
cumulative_cost: U256,
|
||||||
base: Option<SteadyTime>,
|
base: Option<Instant>,
|
||||||
ids: HashMap<ReqId, u64>,
|
ids: HashMap<ReqId, u64>,
|
||||||
reqs: BTreeMap<u64, Entry>,
|
reqs: BTreeMap<u64, Entry>,
|
||||||
}
|
}
|
||||||
@ -59,7 +58,7 @@ impl Default for RequestSet {
|
|||||||
|
|
||||||
impl RequestSet {
|
impl RequestSet {
|
||||||
/// Push requests onto the stack.
|
/// Push requests onto the stack.
|
||||||
pub fn insert(&mut self, req_id: ReqId, req: Requests, cost: U256, now: SteadyTime) {
|
pub fn insert(&mut self, req_id: ReqId, req: Requests, cost: U256, now: Instant) {
|
||||||
let counter = self.counter;
|
let counter = self.counter;
|
||||||
self.cumulative_cost = self.cumulative_cost + cost;
|
self.cumulative_cost = self.cumulative_cost + cost;
|
||||||
|
|
||||||
@ -74,7 +73,7 @@ impl RequestSet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Remove a set of requests from the stack.
|
/// Remove a set of requests from the stack.
|
||||||
pub fn remove(&mut self, req_id: &ReqId, now: SteadyTime) -> Option<Requests> {
|
pub fn remove(&mut self, req_id: &ReqId, now: Instant) -> Option<Requests> {
|
||||||
let id = match self.ids.remove(&req_id) {
|
let id = match self.ids.remove(&req_id) {
|
||||||
Some(id) => id,
|
Some(id) => id,
|
||||||
None => return None,
|
None => return None,
|
||||||
@ -94,7 +93,7 @@ impl RequestSet {
|
|||||||
|
|
||||||
/// Check for timeout against the given time. Returns true if
|
/// Check for timeout against the given time. Returns true if
|
||||||
/// has timed out, false otherwise.
|
/// has timed out, false otherwise.
|
||||||
pub fn check_timeout(&self, now: SteadyTime) -> bool {
|
pub fn check_timeout(&self, now: Instant) -> bool {
|
||||||
let base = match self.base.as_ref().cloned() {
|
let base = match self.base.as_ref().cloned() {
|
||||||
Some(base) => base,
|
Some(base) => base,
|
||||||
None => return false,
|
None => return false,
|
||||||
@ -128,7 +127,7 @@ impl RequestSet {
|
|||||||
// helper to calculate timeout for a specific set of requests.
|
// helper to calculate timeout for a specific set of requests.
|
||||||
// it's a base amount + some amount per request.
|
// it's a base amount + some amount per request.
|
||||||
fn compute_timeout(reqs: &Requests) -> Duration {
|
fn compute_timeout(reqs: &Requests) -> Duration {
|
||||||
Duration::milliseconds(reqs.requests().iter().fold(timeout::BASE, |tm, req| {
|
Duration::from_millis(reqs.requests().iter().fold(timeout::BASE, |tm, req| {
|
||||||
tm + match *req {
|
tm + match *req {
|
||||||
Request::Headers(_) => timeout::HEADERS,
|
Request::Headers(_) => timeout::HEADERS,
|
||||||
Request::HeaderProof(_) => timeout::HEADER_PROOF,
|
Request::HeaderProof(_) => timeout::HEADER_PROOF,
|
||||||
@ -148,34 +147,34 @@ fn compute_timeout(reqs: &Requests) -> Duration {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use net::ReqId;
|
use net::ReqId;
|
||||||
use request::Builder;
|
use request::Builder;
|
||||||
use time::{SteadyTime, Duration};
|
use std::time::{Instant, Duration};
|
||||||
use super::{RequestSet, compute_timeout};
|
use super::{RequestSet, compute_timeout};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn multi_timeout() {
|
fn multi_timeout() {
|
||||||
let test_begin = SteadyTime::now();
|
let test_begin = Instant::now();
|
||||||
let mut req_set = RequestSet::default();
|
let mut req_set = RequestSet::default();
|
||||||
|
|
||||||
let the_req = Builder::default().build();
|
let the_req = Builder::default().build();
|
||||||
let req_time = compute_timeout(&the_req);
|
let req_time = compute_timeout(&the_req);
|
||||||
req_set.insert(ReqId(0), the_req.clone(), 0.into(), test_begin);
|
req_set.insert(ReqId(0), the_req.clone(), 0.into(), test_begin);
|
||||||
req_set.insert(ReqId(1), the_req, 0.into(), test_begin + Duration::seconds(1));
|
req_set.insert(ReqId(1), the_req, 0.into(), test_begin + Duration::from_secs(1));
|
||||||
|
|
||||||
assert_eq!(req_set.base, Some(test_begin));
|
assert_eq!(req_set.base, Some(test_begin));
|
||||||
|
|
||||||
let test_end = test_begin + req_time;
|
let test_end = test_begin + req_time;
|
||||||
assert!(req_set.check_timeout(test_end));
|
assert!(req_set.check_timeout(test_end));
|
||||||
|
|
||||||
req_set.remove(&ReqId(0), test_begin + Duration::seconds(1)).unwrap();
|
req_set.remove(&ReqId(0), test_begin + Duration::from_secs(1)).unwrap();
|
||||||
assert!(!req_set.check_timeout(test_end));
|
assert!(!req_set.check_timeout(test_end));
|
||||||
assert!(req_set.check_timeout(test_end + Duration::seconds(1)));
|
assert!(req_set.check_timeout(test_end + Duration::from_secs(1)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn cumulative_cost() {
|
fn cumulative_cost() {
|
||||||
let the_req = Builder::default().build();
|
let the_req = Builder::default().build();
|
||||||
let test_begin = SteadyTime::now();
|
let test_begin = Instant::now();
|
||||||
let test_end = test_begin + Duration::seconds(1);
|
let test_end = test_begin + Duration::from_secs(1);
|
||||||
let mut req_set = RequestSet::default();
|
let mut req_set = RequestSet::default();
|
||||||
|
|
||||||
for i in 0..5 {
|
for i in 0..5 {
|
||||||
|
@ -35,6 +35,7 @@ use rlp::*;
|
|||||||
use ethereum_types::{H256, U256, Address};
|
use ethereum_types::{H256, U256, Address};
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
// helper for encoding a single request into a packet.
|
// helper for encoding a single request into a packet.
|
||||||
// panics on bad backreference.
|
// panics on bad backreference.
|
||||||
@ -661,8 +662,8 @@ fn id_guard() {
|
|||||||
|
|
||||||
let mut pending_requests = RequestSet::default();
|
let mut pending_requests = RequestSet::default();
|
||||||
|
|
||||||
pending_requests.insert(req_id_1, req.clone(), 0.into(), ::time::SteadyTime::now());
|
pending_requests.insert(req_id_1, req.clone(), 0.into(), Instant::now());
|
||||||
pending_requests.insert(req_id_2, req, 1.into(), ::time::SteadyTime::now());
|
pending_requests.insert(req_id_2, req, 1.into(), Instant::now());
|
||||||
|
|
||||||
proto.peers.write().insert(peer_id, ::parking_lot::Mutex::new(Peer {
|
proto.peers.write().insert(peer_id, ::parking_lot::Mutex::new(Peer {
|
||||||
local_credits: flow_params.create_credits(),
|
local_credits: flow_params.create_credits(),
|
||||||
@ -670,7 +671,7 @@ fn id_guard() {
|
|||||||
capabilities: capabilities.clone(),
|
capabilities: capabilities.clone(),
|
||||||
remote_flow: Some((flow_params.create_credits(), (&*flow_params).clone())),
|
remote_flow: Some((flow_params.create_credits(), (&*flow_params).clone())),
|
||||||
sent_head: provider.client.chain_info().best_block_hash,
|
sent_head: provider.client.chain_info().best_block_hash,
|
||||||
last_update: ::time::SteadyTime::now(),
|
last_update: Instant::now(),
|
||||||
pending_requests: pending_requests,
|
pending_requests: pending_requests,
|
||||||
failed_requests: Vec::new(),
|
failed_requests: Vec::new(),
|
||||||
propagated_transactions: Default::default(),
|
propagated_transactions: Default::default(),
|
||||||
|
@ -941,6 +941,7 @@ impl Signal {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use std::time::Duration;
|
||||||
use ethereum_types::{H256, Address};
|
use ethereum_types::{H256, Address};
|
||||||
use memorydb::MemoryDB;
|
use memorydb::MemoryDB;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
@ -954,7 +955,7 @@ mod tests {
|
|||||||
use ethcore::receipt::{Receipt, TransactionOutcome};
|
use ethcore::receipt::{Receipt, TransactionOutcome};
|
||||||
|
|
||||||
fn make_cache() -> ::cache::Cache {
|
fn make_cache() -> ::cache::Cache {
|
||||||
::cache::Cache::new(Default::default(), ::time::Duration::seconds(1))
|
::cache::Cache::new(Default::default(), Duration::from_secs(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -24,7 +24,7 @@ use network::{PeerId, NodeId};
|
|||||||
use net::*;
|
use net::*;
|
||||||
use ethereum_types::H256;
|
use ethereum_types::H256;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use time::Duration;
|
use std::time::Duration;
|
||||||
use ::request::{self as basic_request, Response};
|
use ::request::{self as basic_request, Response};
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -88,7 +88,7 @@ struct Harness {
|
|||||||
|
|
||||||
impl Harness {
|
impl Harness {
|
||||||
fn create() -> Self {
|
fn create() -> Self {
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::minutes(1))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(60))));
|
||||||
Harness {
|
Harness {
|
||||||
service: OnDemand::new_test(cache),
|
service: OnDemand::new_test(cache),
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,6 @@ use std::str::FromStr;
|
|||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
||||||
use std::time::{Instant};
|
use std::time::{Instant};
|
||||||
use time::precise_time_ns;
|
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
|
|
||||||
// util
|
// util
|
||||||
@ -293,7 +292,7 @@ impl Importer {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
trace_time!("import_verified_blocks");
|
trace_time!("import_verified_blocks");
|
||||||
let start = precise_time_ns();
|
let start = Instant::now();
|
||||||
|
|
||||||
for block in blocks {
|
for block in blocks {
|
||||||
let header = &block.header;
|
let header = &block.header;
|
||||||
@ -326,7 +325,10 @@ impl Importer {
|
|||||||
self.block_queue.mark_as_bad(&invalid_blocks);
|
self.block_queue.mark_as_bad(&invalid_blocks);
|
||||||
}
|
}
|
||||||
let is_empty = self.block_queue.mark_as_good(&imported_blocks);
|
let is_empty = self.block_queue.mark_as_good(&imported_blocks);
|
||||||
let duration_ns = precise_time_ns() - start;
|
let duration_ns = {
|
||||||
|
let elapsed = start.elapsed();
|
||||||
|
elapsed.as_secs() * 1_000_000_000 + elapsed.subsec_nanos() as u64
|
||||||
|
};
|
||||||
(imported_blocks, import_results, invalid_blocks, imported, proposed_blocks, duration_ns, is_empty)
|
(imported_blocks, import_results, invalid_blocks, imported, proposed_blocks, duration_ns, is_empty)
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -2036,7 +2038,7 @@ impl ScheduleInfo for Client {
|
|||||||
impl ImportSealedBlock for Client {
|
impl ImportSealedBlock for Client {
|
||||||
fn import_sealed_block(&self, block: SealedBlock) -> ImportResult {
|
fn import_sealed_block(&self, block: SealedBlock) -> ImportResult {
|
||||||
let h = block.header().hash();
|
let h = block.header().hash();
|
||||||
let start = precise_time_ns();
|
let start = Instant::now();
|
||||||
let route = {
|
let route = {
|
||||||
// scope for self.import_lock
|
// scope for self.import_lock
|
||||||
let _import_lock = self.importer.import_lock.lock();
|
let _import_lock = self.importer.import_lock.lock();
|
||||||
@ -2061,7 +2063,10 @@ impl ImportSealedBlock for Client {
|
|||||||
retracted.clone(),
|
retracted.clone(),
|
||||||
vec![h.clone()],
|
vec![h.clone()],
|
||||||
vec![],
|
vec![],
|
||||||
precise_time_ns() - start,
|
{
|
||||||
|
let elapsed = start.elapsed();
|
||||||
|
elapsed.as_secs() * 1_000_000_000 + elapsed.subsec_nanos() as u64
|
||||||
|
},
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
self.db.read().flush().expect("DB flush failed.");
|
self.db.read().flush().expect("DB flush failed.");
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
//! Tendermint specific parameters.
|
//! Tendermint specific parameters.
|
||||||
|
|
||||||
use ethjson;
|
use ethjson;
|
||||||
use time::Duration;
|
use std::time::Duration;
|
||||||
use ethereum_types::U256;
|
use ethereum_types::U256;
|
||||||
use super::super::validator_set::{ValidatorSet, new_validator_set};
|
use super::super::validator_set::{ValidatorSet, new_validator_set};
|
||||||
use super::super::transition::Timeouts;
|
use super::super::transition::Timeouts;
|
||||||
@ -45,10 +45,10 @@ pub struct TendermintTimeouts {
|
|||||||
impl Default for TendermintTimeouts {
|
impl Default for TendermintTimeouts {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
TendermintTimeouts {
|
TendermintTimeouts {
|
||||||
propose: Duration::milliseconds(1000),
|
propose: Duration::from_millis(1000),
|
||||||
prevote: Duration::milliseconds(1000),
|
prevote: Duration::from_millis(1000),
|
||||||
precommit: Duration::milliseconds(1000),
|
precommit: Duration::from_millis(1000),
|
||||||
commit: Duration::milliseconds(1000),
|
commit: Duration::from_millis(1000),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -70,7 +70,7 @@ impl Timeouts<Step> for TendermintTimeouts {
|
|||||||
|
|
||||||
fn to_duration(ms: ethjson::uint::Uint) -> Duration {
|
fn to_duration(ms: ethjson::uint::Uint) -> Duration {
|
||||||
let ms: usize = ms.into();
|
let ms: usize = ms.into();
|
||||||
Duration::milliseconds(ms as i64)
|
Duration::from_millis(ms as u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ethjson::spec::TendermintParams> for TendermintParams {
|
impl From<ethjson::spec::TendermintParams> for TendermintParams {
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
//! Engine timeout transitioning calls `Engine.step()` on timeout.
|
//! Engine timeout transitioning calls `Engine.step()` on timeout.
|
||||||
|
|
||||||
use std::sync::Weak;
|
use std::sync::Weak;
|
||||||
use time::Duration;
|
use std::time::Duration;
|
||||||
use io::{IoContext, IoHandler, TimerToken};
|
use io::{IoContext, IoHandler, TimerToken};
|
||||||
use engines::Engine;
|
use engines::Engine;
|
||||||
use parity_machine::Machine;
|
use parity_machine::Machine;
|
||||||
@ -51,7 +51,8 @@ impl<S, M: Machine> TransitionHandler<S, M> where S: Sync + Send + Clone {
|
|||||||
pub const ENGINE_TIMEOUT_TOKEN: TimerToken = 23;
|
pub const ENGINE_TIMEOUT_TOKEN: TimerToken = 23;
|
||||||
|
|
||||||
fn set_timeout<S: Sync + Send + Clone>(io: &IoContext<S>, timeout: Duration) {
|
fn set_timeout<S: Sync + Send + Clone>(io: &IoContext<S>, timeout: Duration) {
|
||||||
io.register_timer_once(ENGINE_TIMEOUT_TOKEN, timeout.num_milliseconds() as u64)
|
let ms = timeout.as_secs() * 1_000 + timeout.subsec_nanos() as u64 / 1_000_000;
|
||||||
|
io.register_timer_once(ENGINE_TIMEOUT_TOKEN, ms)
|
||||||
.unwrap_or_else(|e| warn!(target: "engine", "Failed to set consensus step timeout: {}.", e))
|
.unwrap_or_else(|e| warn!(target: "engine", "Failed to set consensus step timeout: {}.", e))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -60,7 +61,7 @@ impl<S, M> IoHandler<S> for TransitionHandler<S, M>
|
|||||||
{
|
{
|
||||||
fn initialize(&self, io: &IoContext<S>) {
|
fn initialize(&self, io: &IoContext<S>) {
|
||||||
let initial = self.timeouts.initial();
|
let initial = self.timeouts.initial();
|
||||||
trace!(target: "engine", "Setting the initial timeout to {}.", initial);
|
trace!(target: "engine", "Setting the initial timeout to {:?}.", initial);
|
||||||
set_timeout(io, initial);
|
set_timeout(io, initial);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,11 +18,11 @@
|
|||||||
|
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::cell::RefCell;
|
use std::cell::RefCell;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP, keccak};
|
use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP, keccak};
|
||||||
use heapsize::HeapSizeOf;
|
use heapsize::HeapSizeOf;
|
||||||
use ethereum_types::{H256, U256, Address, Bloom};
|
use ethereum_types::{H256, U256, Address, Bloom};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use time::get_time;
|
|
||||||
use rlp::*;
|
use rlp::*;
|
||||||
|
|
||||||
pub use types::BlockNumber;
|
pub use types::BlockNumber;
|
||||||
@ -189,7 +189,7 @@ impl Header {
|
|||||||
/// Set the timestamp field of the header.
|
/// Set the timestamp field of the header.
|
||||||
pub fn set_timestamp(&mut self, a: u64) { self.timestamp = a; self.note_dirty(); }
|
pub fn set_timestamp(&mut self, a: u64) { self.timestamp = a; self.note_dirty(); }
|
||||||
/// Set the timestamp field of the header to the current time.
|
/// Set the timestamp field of the header to the current time.
|
||||||
pub fn set_timestamp_now(&mut self, but_later_than: u64) { self.timestamp = cmp::max(get_time().sec as u64, but_later_than + 1); self.note_dirty(); }
|
pub fn set_timestamp_now(&mut self, but_later_than: u64) { let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default(); self.timestamp = cmp::max(now.as_secs() as u64, but_later_than + 1); self.note_dirty(); }
|
||||||
/// Set the number field of the header.
|
/// Set the number field of the header.
|
||||||
pub fn set_number(&mut self, a: BlockNumber) { self.number = a; self.note_dirty(); }
|
pub fn set_number(&mut self, a: BlockNumber) { self.number = a; self.note_dirty(); }
|
||||||
/// Set the author field of the header.
|
/// Set the author field of the header.
|
||||||
|
@ -109,7 +109,6 @@ extern crate rlp_derive;
|
|||||||
extern crate rustc_hex;
|
extern crate rustc_hex;
|
||||||
extern crate stats;
|
extern crate stats;
|
||||||
extern crate stop_guard;
|
extern crate stop_guard;
|
||||||
extern crate time;
|
|
||||||
extern crate using_queue;
|
extern crate using_queue;
|
||||||
extern crate table;
|
extern crate table;
|
||||||
extern crate vm;
|
extern crate vm;
|
||||||
|
@ -22,13 +22,13 @@
|
|||||||
//! 3. Final verification against the blockchain done before enactment.
|
//! 3. Final verification against the blockchain done before enactment.
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use ethereum_types::{H256, U256};
|
use ethereum_types::{H256, U256};
|
||||||
use hash::keccak;
|
use hash::keccak;
|
||||||
use heapsize::HeapSizeOf;
|
use heapsize::HeapSizeOf;
|
||||||
use rlp::UntrustedRlp;
|
use rlp::UntrustedRlp;
|
||||||
use time::get_time;
|
|
||||||
use triehash::ordered_trie_root;
|
use triehash::ordered_trie_root;
|
||||||
use unexpected::{Mismatch, OutOfBounds};
|
use unexpected::{Mismatch, OutOfBounds};
|
||||||
|
|
||||||
@ -284,7 +284,8 @@ pub fn verify_header_params(header: &Header, engine: &EthEngine, is_full: bool)
|
|||||||
|
|
||||||
if is_full {
|
if is_full {
|
||||||
const ACCEPTABLE_DRIFT_SECS: u64 = 15;
|
const ACCEPTABLE_DRIFT_SECS: u64 = 15;
|
||||||
let max_time = get_time().sec as u64 + ACCEPTABLE_DRIFT_SECS;
|
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default();
|
||||||
|
let max_time = now.as_secs() + ACCEPTABLE_DRIFT_SECS;
|
||||||
let invalid_threshold = max_time + ACCEPTABLE_DRIFT_SECS * 9;
|
let invalid_threshold = max_time + ACCEPTABLE_DRIFT_SECS * 9;
|
||||||
let timestamp = header.timestamp();
|
let timestamp = header.timestamp();
|
||||||
|
|
||||||
@ -346,6 +347,7 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
use std::collections::{BTreeMap, HashMap};
|
use std::collections::{BTreeMap, HashMap};
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
use ethereum_types::{H256, Bloom, U256};
|
use ethereum_types::{H256, Bloom, U256};
|
||||||
use blockchain::{BlockDetails, TransactionAddress, BlockReceipts};
|
use blockchain::{BlockDetails, TransactionAddress, BlockReceipts};
|
||||||
use encoded;
|
use encoded;
|
||||||
@ -355,7 +357,6 @@ mod tests {
|
|||||||
use ethkey::{Random, Generator};
|
use ethkey::{Random, Generator};
|
||||||
use spec::{CommonParams, Spec};
|
use spec::{CommonParams, Spec};
|
||||||
use tests::helpers::{create_test_block_with_data, create_test_block};
|
use tests::helpers::{create_test_block_with_data, create_test_block};
|
||||||
use time::get_time;
|
|
||||||
use transaction::{SignedTransaction, Transaction, UnverifiedTransaction, Action};
|
use transaction::{SignedTransaction, Transaction, UnverifiedTransaction, Action};
|
||||||
use types::log_entry::{LogEntry, LocalizedLogEntry};
|
use types::log_entry::{LogEntry, LocalizedLogEntry};
|
||||||
use rlp;
|
use rlp;
|
||||||
@ -682,11 +683,11 @@ mod tests {
|
|||||||
check_fail_timestamp(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine), false);
|
check_fail_timestamp(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine), false);
|
||||||
|
|
||||||
header = good.clone();
|
header = good.clone();
|
||||||
header.set_timestamp(get_time().sec as u64 + 20);
|
header.set_timestamp(SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() + 20);
|
||||||
check_fail_timestamp(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine), true);
|
check_fail_timestamp(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine), true);
|
||||||
|
|
||||||
header = good.clone();
|
header = good.clone();
|
||||||
header.set_timestamp(get_time().sec as u64 + 10);
|
header.set_timestamp(SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() + 10);
|
||||||
header.set_uncles_hash(good_uncles_hash.clone());
|
header.set_uncles_hash(good_uncles_hash.clone());
|
||||||
header.set_transactions_root(good_transactions_root.clone());
|
header.set_transactions_root(good_transactions_root.clone());
|
||||||
check_ok(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine));
|
check_ok(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine));
|
||||||
|
@ -193,7 +193,7 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> {
|
|||||||
cmd.dirs.create_dirs(false, false, false)?;
|
cmd.dirs.create_dirs(false, false, false)?;
|
||||||
|
|
||||||
let cache = Arc::new(Mutex::new(
|
let cache = Arc::new(Mutex::new(
|
||||||
LightDataCache::new(Default::default(), ::time::Duration::seconds(0))
|
LightDataCache::new(Default::default(), Duration::new(0, 0))
|
||||||
));
|
));
|
||||||
|
|
||||||
let mut config = LightClientConfig {
|
let mut config = LightClientConfig {
|
||||||
|
@ -43,7 +43,6 @@ extern crate serde;
|
|||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate serde_derive;
|
extern crate serde_derive;
|
||||||
extern crate time;
|
|
||||||
extern crate toml;
|
extern crate toml;
|
||||||
|
|
||||||
extern crate ethcore;
|
extern crate ethcore;
|
||||||
|
@ -76,7 +76,7 @@ const SNAPSHOT_HISTORY: u64 = 100;
|
|||||||
|
|
||||||
// Number of minutes before a given gas price corpus should expire.
|
// Number of minutes before a given gas price corpus should expire.
|
||||||
// Light client only.
|
// Light client only.
|
||||||
const GAS_CORPUS_EXPIRATION_MINUTES: i64 = 60 * 6;
|
const GAS_CORPUS_EXPIRATION_MINUTES: u64 = 60 * 6;
|
||||||
|
|
||||||
// Pops along with error messages when a password is missing or invalid.
|
// Pops along with error messages when a password is missing or invalid.
|
||||||
const VERIFY_PASSWORD_HINT: &'static str = "Make sure valid password is present in files passed using `--password` or in the configuration file.";
|
const VERIFY_PASSWORD_HINT: &'static str = "Make sure valid password is present in files passed using `--password` or in the configuration file.";
|
||||||
@ -217,7 +217,7 @@ fn execute_light_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger
|
|||||||
info!("Running in experimental {} mode.", Colour::Blue.bold().paint("Light Client"));
|
info!("Running in experimental {} mode.", Colour::Blue.bold().paint("Light Client"));
|
||||||
|
|
||||||
// TODO: configurable cache size.
|
// TODO: configurable cache size.
|
||||||
let cache = LightDataCache::new(Default::default(), ::time::Duration::minutes(GAS_CORPUS_EXPIRATION_MINUTES));
|
let cache = LightDataCache::new(Default::default(), Duration::from_secs(60 * GAS_CORPUS_EXPIRATION_MINUTES));
|
||||||
let cache = Arc::new(Mutex::new(cache));
|
let cache = Arc::new(Mutex::new(cache));
|
||||||
|
|
||||||
// start client and create transaction queue.
|
// start client and create transaction queue.
|
||||||
|
@ -24,7 +24,6 @@ serde = "1.0"
|
|||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
tempdir = "0.3"
|
tempdir = "0.3"
|
||||||
time = "0.1"
|
|
||||||
tiny-keccak = "1.3"
|
tiny-keccak = "1.3"
|
||||||
tokio-timer = "0.1"
|
tokio-timer = "0.1"
|
||||||
transient-hashmap = "0.4"
|
transient-hashmap = "0.4"
|
||||||
|
@ -34,7 +34,6 @@ extern crate rustc_hex;
|
|||||||
extern crate semver;
|
extern crate semver;
|
||||||
extern crate serde;
|
extern crate serde;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
extern crate time;
|
|
||||||
extern crate tiny_keccak;
|
extern crate tiny_keccak;
|
||||||
extern crate tokio_timer;
|
extern crate tokio_timer;
|
||||||
extern crate transient_hashmap;
|
extern crate transient_hashmap;
|
||||||
|
@ -17,11 +17,10 @@
|
|||||||
//! Eth rpc implementation.
|
//! Eth rpc implementation.
|
||||||
|
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time::{Instant, Duration};
|
use std::time::{Instant, Duration, SystemTime, UNIX_EPOCH};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use rlp::{self, UntrustedRlp};
|
use rlp::{self, UntrustedRlp};
|
||||||
use time::get_time;
|
|
||||||
use ethereum_types::{U256, H64, H160, H256, Address};
|
use ethereum_types::{U256, H64, H160, H256, Address};
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
|
|
||||||
@ -769,7 +768,8 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM, T: StateInfo + 'static> Eth for EthClient<
|
|||||||
let target = Ethash::difficulty_to_boundary(b.block().header().difficulty());
|
let target = Ethash::difficulty_to_boundary(b.block().header().difficulty());
|
||||||
let seed_hash = self.seed_compute.lock().hash_block_number(b.block().header().number());
|
let seed_hash = self.seed_compute.lock().hash_block_number(b.block().header().number());
|
||||||
|
|
||||||
if no_new_work_timeout > 0 && b.block().header().timestamp() + no_new_work_timeout < get_time().sec as u64 {
|
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||||
|
if no_new_work_timeout > 0 && b.block().header().timestamp() + no_new_work_timeout < now {
|
||||||
Err(errors::no_new_work())
|
Err(errors::no_new_work())
|
||||||
} else if self.options.send_block_number_in_get_work {
|
} else if self.options.send_block_number_in_get_work {
|
||||||
let block_number = b.block().header().number();
|
let block_number = b.block().header().number();
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Instant, Duration};
|
use std::time::{Instant, Duration, SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
use ethereum_types::{H256, U256, Address};
|
use ethereum_types::{H256, U256, Address};
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
@ -31,7 +31,6 @@ use ethsync::SyncState;
|
|||||||
use miner::external::ExternalMiner;
|
use miner::external::ExternalMiner;
|
||||||
use rlp;
|
use rlp;
|
||||||
use rustc_hex::{FromHex, ToHex};
|
use rustc_hex::{FromHex, ToHex};
|
||||||
use time::get_time;
|
|
||||||
use transaction::{Transaction, Action};
|
use transaction::{Transaction, Action};
|
||||||
|
|
||||||
use jsonrpc_core::IoHandler;
|
use jsonrpc_core::IoHandler;
|
||||||
@ -1144,7 +1143,8 @@ fn rpc_get_work_should_not_return_block_number() {
|
|||||||
fn rpc_get_work_should_timeout() {
|
fn rpc_get_work_should_timeout() {
|
||||||
let eth_tester = EthTester::default();
|
let eth_tester = EthTester::default();
|
||||||
eth_tester.miner.set_author(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap());
|
eth_tester.miner.set_author(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap());
|
||||||
eth_tester.client.set_latest_block_timestamp(get_time().sec as u64 - 1000); // Set latest block to 1000 seconds ago
|
let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() - 1000; // Set latest block to 1000 seconds ago
|
||||||
|
eth_tester.client.set_latest_block_timestamp(timestamp);
|
||||||
let hash = eth_tester.miner.map_sealing_work(&*eth_tester.client, |b| b.hash()).unwrap();
|
let hash = eth_tester.miner.map_sealing_work(&*eth_tester.client, |b| b.hash()).unwrap();
|
||||||
|
|
||||||
// Request without providing timeout. This should work since we're disabling timeout.
|
// Request without providing timeout. This should work since we're disabling timeout.
|
||||||
|
@ -24,7 +24,6 @@ kvdb = { path = "../util/kvdb" }
|
|||||||
macros = { path = "../util/macros" }
|
macros = { path = "../util/macros" }
|
||||||
log = "0.3"
|
log = "0.3"
|
||||||
env_logger = "0.4"
|
env_logger = "0.4"
|
||||||
time = "0.1.34"
|
|
||||||
rand = "0.4"
|
rand = "0.4"
|
||||||
heapsize = "0.4"
|
heapsize = "0.4"
|
||||||
semver = "0.6"
|
semver = "0.6"
|
||||||
|
@ -90,6 +90,7 @@
|
|||||||
|
|
||||||
use std::collections::{HashSet, HashMap};
|
use std::collections::{HashSet, HashMap};
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
|
use std::time::Instant;
|
||||||
use hash::keccak;
|
use hash::keccak;
|
||||||
use heapsize::HeapSizeOf;
|
use heapsize::HeapSizeOf;
|
||||||
use ethereum_types::{H256, U256};
|
use ethereum_types::{H256, U256};
|
||||||
@ -104,7 +105,6 @@ use ethcore::error::*;
|
|||||||
use ethcore::snapshot::{ManifestData, RestorationStatus};
|
use ethcore::snapshot::{ManifestData, RestorationStatus};
|
||||||
use transaction::PendingTransaction;
|
use transaction::PendingTransaction;
|
||||||
use sync_io::SyncIo;
|
use sync_io::SyncIo;
|
||||||
use time;
|
|
||||||
use super::SyncConfig;
|
use super::SyncConfig;
|
||||||
use block_sync::{BlockDownloader, BlockRequest, BlockDownloaderImportError as DownloaderImportError, DownloadAction};
|
use block_sync::{BlockDownloader, BlockRequest, BlockDownloaderImportError as DownloaderImportError, DownloadAction};
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
@ -305,7 +305,7 @@ struct PeerInfo {
|
|||||||
/// Holds requested snapshot chunk hash if any.
|
/// Holds requested snapshot chunk hash if any.
|
||||||
asking_snapshot_data: Option<H256>,
|
asking_snapshot_data: Option<H256>,
|
||||||
/// Request timestamp
|
/// Request timestamp
|
||||||
ask_time: u64,
|
ask_time: Instant,
|
||||||
/// Holds a set of transactions recently sent to this peer to avoid spamming.
|
/// Holds a set of transactions recently sent to this peer to avoid spamming.
|
||||||
last_sent_transactions: HashSet<H256>,
|
last_sent_transactions: HashSet<H256>,
|
||||||
/// Pending request is expired and result should be ignored
|
/// Pending request is expired and result should be ignored
|
||||||
@ -377,9 +377,9 @@ pub struct ChainSync {
|
|||||||
snapshot: Snapshot,
|
snapshot: Snapshot,
|
||||||
/// Connected peers pending Status message.
|
/// Connected peers pending Status message.
|
||||||
/// Value is request timestamp.
|
/// Value is request timestamp.
|
||||||
handshaking_peers: HashMap<PeerId, u64>,
|
handshaking_peers: HashMap<PeerId, Instant>,
|
||||||
/// Sync start timestamp. Measured when first peer is connected
|
/// Sync start timestamp. Measured when first peer is connected
|
||||||
sync_start_time: Option<u64>,
|
sync_start_time: Option<Instant>,
|
||||||
/// Transactions propagation statistics
|
/// Transactions propagation statistics
|
||||||
transactions_stats: TransactionsStats,
|
transactions_stats: TransactionsStats,
|
||||||
/// Enable ancient block downloading
|
/// Enable ancient block downloading
|
||||||
@ -544,7 +544,7 @@ impl ChainSync {
|
|||||||
(best_hash, max_peers, snapshot_peers)
|
(best_hash, max_peers, snapshot_peers)
|
||||||
};
|
};
|
||||||
|
|
||||||
let timeout = (self.state == SyncState::WaitingPeers) && self.sync_start_time.map_or(false, |t| ((time::precise_time_ns() - t) / 1_000_000_000) > WAIT_PEERS_TIMEOUT_SEC);
|
let timeout = (self.state == SyncState::WaitingPeers) && self.sync_start_time.map_or(false, |t| t.elapsed().as_secs() > WAIT_PEERS_TIMEOUT_SEC);
|
||||||
|
|
||||||
if let (Some(hash), Some(peers)) = (best_hash, best_hash.map_or(None, |h| snapshot_peers.get(&h))) {
|
if let (Some(hash), Some(peers)) = (best_hash, best_hash.map_or(None, |h| snapshot_peers.get(&h))) {
|
||||||
if max_peers >= SNAPSHOT_MIN_PEERS {
|
if max_peers >= SNAPSHOT_MIN_PEERS {
|
||||||
@ -616,7 +616,7 @@ impl ChainSync {
|
|||||||
asking: PeerAsking::Nothing,
|
asking: PeerAsking::Nothing,
|
||||||
asking_blocks: Vec::new(),
|
asking_blocks: Vec::new(),
|
||||||
asking_hash: None,
|
asking_hash: None,
|
||||||
ask_time: 0,
|
ask_time: Instant::now(),
|
||||||
last_sent_transactions: HashSet::new(),
|
last_sent_transactions: HashSet::new(),
|
||||||
expired: false,
|
expired: false,
|
||||||
confirmation: if self.fork_block.is_none() { ForkConfirmation::Confirmed } else { ForkConfirmation::Unconfirmed },
|
confirmation: if self.fork_block.is_none() { ForkConfirmation::Confirmed } else { ForkConfirmation::Unconfirmed },
|
||||||
@ -627,7 +627,7 @@ impl ChainSync {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if self.sync_start_time.is_none() {
|
if self.sync_start_time.is_none() {
|
||||||
self.sync_start_time = Some(time::precise_time_ns());
|
self.sync_start_time = Some(Instant::now());
|
||||||
}
|
}
|
||||||
|
|
||||||
trace!(target: "sync", "New peer {} (protocol: {}, network: {:?}, difficulty: {:?}, latest:{}, genesis:{}, snapshot:{:?})",
|
trace!(target: "sync", "New peer {} (protocol: {}, network: {:?}, difficulty: {:?}, latest:{}, genesis:{}, snapshot:{:?})",
|
||||||
@ -1150,7 +1150,7 @@ impl ChainSync {
|
|||||||
debug!(target:"sync", "Error sending status request: {:?}", e);
|
debug!(target:"sync", "Error sending status request: {:?}", e);
|
||||||
io.disconnect_peer(peer);
|
io.disconnect_peer(peer);
|
||||||
} else {
|
} else {
|
||||||
self.handshaking_peers.insert(peer, time::precise_time_ns());
|
self.handshaking_peers.insert(peer, Instant::now());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1438,7 +1438,7 @@ impl ChainSync {
|
|||||||
warn!(target:"sync", "Asking {:?} while requesting {:?}", peer.asking, asking);
|
warn!(target:"sync", "Asking {:?} while requesting {:?}", peer.asking, asking);
|
||||||
}
|
}
|
||||||
peer.asking = asking;
|
peer.asking = asking;
|
||||||
peer.ask_time = time::precise_time_ns();
|
peer.ask_time = Instant::now();
|
||||||
let result = if packet_id >= ETH_PACKET_COUNT {
|
let result = if packet_id >= ETH_PACKET_COUNT {
|
||||||
sync.send_protocol(WARP_SYNC_PROTOCOL_ID, peer_id, packet_id, packet)
|
sync.send_protocol(WARP_SYNC_PROTOCOL_ID, peer_id, packet_id, packet)
|
||||||
} else {
|
} else {
|
||||||
@ -1778,10 +1778,10 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn maintain_peers(&mut self, io: &mut SyncIo) {
|
pub fn maintain_peers(&mut self, io: &mut SyncIo) {
|
||||||
let tick = time::precise_time_ns();
|
let tick = Instant::now();
|
||||||
let mut aborting = Vec::new();
|
let mut aborting = Vec::new();
|
||||||
for (peer_id, peer) in &self.peers {
|
for (peer_id, peer) in &self.peers {
|
||||||
let elapsed = (tick - peer.ask_time) / 1_000_000_000;
|
let elapsed = (tick - peer.ask_time).as_secs();
|
||||||
let timeout = match peer.asking {
|
let timeout = match peer.asking {
|
||||||
PeerAsking::BlockHeaders => elapsed > HEADERS_TIMEOUT_SEC,
|
PeerAsking::BlockHeaders => elapsed > HEADERS_TIMEOUT_SEC,
|
||||||
PeerAsking::BlockBodies => elapsed > BODIES_TIMEOUT_SEC,
|
PeerAsking::BlockBodies => elapsed > BODIES_TIMEOUT_SEC,
|
||||||
@ -1802,9 +1802,9 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check for handshake timeouts
|
// Check for handshake timeouts
|
||||||
for (peer, ask_time) in &self.handshaking_peers {
|
for (peer, &ask_time) in &self.handshaking_peers {
|
||||||
let elapsed = (tick - ask_time) / 1_000_000_000;
|
let elapsed = (tick - ask_time) / 1_000_000_000;
|
||||||
if elapsed > STATUS_TIMEOUT_SEC {
|
if elapsed.as_secs() > STATUS_TIMEOUT_SEC {
|
||||||
trace!(target:"sync", "Status timeout {}", peer);
|
trace!(target:"sync", "Status timeout {}", peer);
|
||||||
io.disconnect_peer(*peer);
|
io.disconnect_peer(*peer);
|
||||||
}
|
}
|
||||||
@ -2474,7 +2474,7 @@ mod tests {
|
|||||||
asking: PeerAsking::Nothing,
|
asking: PeerAsking::Nothing,
|
||||||
asking_blocks: Vec::new(),
|
asking_blocks: Vec::new(),
|
||||||
asking_hash: None,
|
asking_hash: None,
|
||||||
ask_time: 0,
|
ask_time: Instant::now(),
|
||||||
last_sent_transactions: HashSet::new(),
|
last_sent_transactions: HashSet::new(),
|
||||||
expired: false,
|
expired: false,
|
||||||
confirmation: super::ForkConfirmation::Confirmed,
|
confirmation: super::ForkConfirmation::Confirmed,
|
||||||
@ -2595,7 +2595,7 @@ mod tests {
|
|||||||
asking: PeerAsking::Nothing,
|
asking: PeerAsking::Nothing,
|
||||||
asking_blocks: Vec::new(),
|
asking_blocks: Vec::new(),
|
||||||
asking_hash: None,
|
asking_hash: None,
|
||||||
ask_time: 0,
|
ask_time: Instant::now(),
|
||||||
last_sent_transactions: HashSet::new(),
|
last_sent_transactions: HashSet::new(),
|
||||||
expired: false,
|
expired: false,
|
||||||
confirmation: super::ForkConfirmation::Confirmed,
|
confirmation: super::ForkConfirmation::Confirmed,
|
||||||
|
@ -29,7 +29,6 @@ extern crate ethcore_transaction as transaction;
|
|||||||
extern crate ethcore;
|
extern crate ethcore;
|
||||||
extern crate ethereum_types;
|
extern crate ethereum_types;
|
||||||
extern crate env_logger;
|
extern crate env_logger;
|
||||||
extern crate time;
|
|
||||||
extern crate plain_hasher;
|
extern crate plain_hasher;
|
||||||
extern crate rand;
|
extern crate rand;
|
||||||
extern crate semver;
|
extern crate semver;
|
||||||
|
@ -32,7 +32,7 @@ use light::provider::LightProvider;
|
|||||||
use network::{NodeId, PeerId};
|
use network::{NodeId, PeerId};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
|
|
||||||
use time::Duration;
|
use std::time::Duration;
|
||||||
use light::cache::Cache;
|
use light::cache::Cache;
|
||||||
|
|
||||||
const NETWORK_ID: u64 = 0xcafebabe;
|
const NETWORK_ID: u64 = 0xcafebabe;
|
||||||
@ -218,7 +218,7 @@ impl TestNet<Peer> {
|
|||||||
|
|
||||||
// skip full verification because the blocks are bad.
|
// skip full verification because the blocks are bad.
|
||||||
config.verify_full = false;
|
config.verify_full = false;
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
let db = kvdb_memorydb::create(0);
|
let db = kvdb_memorydb::create(0);
|
||||||
let client = LightClient::new(
|
let client = LightClient::new(
|
||||||
config,
|
config,
|
||||||
|
@ -11,7 +11,6 @@ log = "0.3"
|
|||||||
mio = "0.6.8"
|
mio = "0.6.8"
|
||||||
bytes = "0.4"
|
bytes = "0.4"
|
||||||
rand = "0.4"
|
rand = "0.4"
|
||||||
time = "0.1.34"
|
|
||||||
tiny-keccak = "1.3"
|
tiny-keccak = "1.3"
|
||||||
rust-crypto = "0.2.34"
|
rust-crypto = "0.2.34"
|
||||||
slab = "0.2"
|
slab = "0.2"
|
||||||
|
@ -19,11 +19,11 @@ use std::net::SocketAddr;
|
|||||||
use std::collections::{HashSet, HashMap, BTreeMap, VecDeque};
|
use std::collections::{HashSet, HashMap, BTreeMap, VecDeque};
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::default::Default;
|
use std::default::Default;
|
||||||
|
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||||
use mio::*;
|
use mio::*;
|
||||||
use mio::deprecated::{Handler, EventLoop};
|
use mio::deprecated::{Handler, EventLoop};
|
||||||
use mio::udp::*;
|
use mio::udp::*;
|
||||||
use hash::keccak;
|
use hash::keccak;
|
||||||
use time;
|
|
||||||
use ethereum_types::{H256, H520};
|
use ethereum_types::{H256, H520};
|
||||||
use rlp::*;
|
use rlp::*;
|
||||||
use node_table::*;
|
use node_table::*;
|
||||||
@ -59,7 +59,7 @@ pub struct NodeEntry {
|
|||||||
pub struct BucketEntry {
|
pub struct BucketEntry {
|
||||||
pub address: NodeEntry,
|
pub address: NodeEntry,
|
||||||
pub id_hash: H256,
|
pub id_hash: H256,
|
||||||
pub timeout: Option<u64>,
|
pub timeout: Option<Instant>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct NodeBucket {
|
pub struct NodeBucket {
|
||||||
@ -170,7 +170,7 @@ impl Discovery {
|
|||||||
if bucket.nodes.len() > BUCKET_SIZE {
|
if bucket.nodes.len() > BUCKET_SIZE {
|
||||||
//ping least active node
|
//ping least active node
|
||||||
let last = bucket.nodes.back_mut().expect("Last item is always present when len() > 0");
|
let last = bucket.nodes.back_mut().expect("Last item is always present when len() > 0");
|
||||||
last.timeout = Some(time::precise_time_ns());
|
last.timeout = Some(Instant::now());
|
||||||
Some(last.address.endpoint.clone())
|
Some(last.address.endpoint.clone())
|
||||||
} else { None }
|
} else { None }
|
||||||
};
|
};
|
||||||
@ -262,7 +262,7 @@ impl Discovery {
|
|||||||
for i in 0 .. source.item_count() {
|
for i in 0 .. source.item_count() {
|
||||||
rlp.append_raw(source.at(i).as_raw(), 1);
|
rlp.append_raw(source.at(i).as_raw(), 1);
|
||||||
}
|
}
|
||||||
let timestamp = time::get_time().sec as u32 + 60;
|
let timestamp = 60 + SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs() as u32;
|
||||||
rlp.append(×tamp);
|
rlp.append(×tamp);
|
||||||
|
|
||||||
let bytes = rlp.drain();
|
let bytes = rlp.drain();
|
||||||
@ -394,7 +394,8 @@ impl Discovery {
|
|||||||
|
|
||||||
/// Validate that given timestamp is in within one second of now or in the future
|
/// Validate that given timestamp is in within one second of now or in the future
|
||||||
fn check_timestamp(&self, timestamp: u64) -> Result<(), Error> {
|
fn check_timestamp(&self, timestamp: u64) -> Result<(), Error> {
|
||||||
if self.check_timestamps && timestamp < time::get_time().sec as u64{
|
let secs_since_epoch = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||||
|
if self.check_timestamps && timestamp < secs_since_epoch {
|
||||||
debug!(target: "discovery", "Expired packet");
|
debug!(target: "discovery", "Expired packet");
|
||||||
return Err(ErrorKind::Expired.into());
|
return Err(ErrorKind::Expired.into());
|
||||||
}
|
}
|
||||||
@ -504,12 +505,12 @@ impl Discovery {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn check_expired(&mut self, force: bool) -> HashSet<NodeId> {
|
fn check_expired(&mut self, force: bool) -> HashSet<NodeId> {
|
||||||
let now = time::precise_time_ns();
|
let now = Instant::now();
|
||||||
let mut removed: HashSet<NodeId> = HashSet::new();
|
let mut removed: HashSet<NodeId> = HashSet::new();
|
||||||
for bucket in &mut self.node_buckets {
|
for bucket in &mut self.node_buckets {
|
||||||
bucket.nodes.retain(|node| {
|
bucket.nodes.retain(|node| {
|
||||||
if let Some(timeout) = node.timeout {
|
if let Some(timeout) = node.timeout {
|
||||||
if !force && now - timeout < PING_TIMEOUT_MS * 1000_0000 {
|
if !force && now.duration_since(timeout) < Duration::from_millis(PING_TIMEOUT_MS) {
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -67,7 +67,6 @@ extern crate mio;
|
|||||||
extern crate tiny_keccak;
|
extern crate tiny_keccak;
|
||||||
extern crate crypto as rcrypto;
|
extern crate crypto as rcrypto;
|
||||||
extern crate rand;
|
extern crate rand;
|
||||||
extern crate time;
|
|
||||||
extern crate ansi_term; //TODO: remove this
|
extern crate ansi_term; //TODO: remove this
|
||||||
extern crate rustc_hex;
|
extern crate rustc_hex;
|
||||||
extern crate igd;
|
extern crate igd;
|
||||||
|
@ -18,6 +18,7 @@ use std::{str, io};
|
|||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::*;
|
use std::sync::*;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use mio::*;
|
use mio::*;
|
||||||
use mio::deprecated::{Handler, EventLoop};
|
use mio::deprecated::{Handler, EventLoop};
|
||||||
@ -32,7 +33,6 @@ use network::{SessionCapabilityInfo, HostInfo as HostInfoTrait};
|
|||||||
use host::*;
|
use host::*;
|
||||||
use node_table::NodeId;
|
use node_table::NodeId;
|
||||||
use stats::NetworkStats;
|
use stats::NetworkStats;
|
||||||
use time;
|
|
||||||
use snappy;
|
use snappy;
|
||||||
|
|
||||||
// Timeout must be less than (interval - 1).
|
// Timeout must be less than (interval - 1).
|
||||||
@ -59,8 +59,8 @@ pub struct Session {
|
|||||||
had_hello: bool,
|
had_hello: bool,
|
||||||
/// Session is no longer active flag.
|
/// Session is no longer active flag.
|
||||||
expired: bool,
|
expired: bool,
|
||||||
ping_time_ns: u64,
|
ping_time: Instant,
|
||||||
pong_time_ns: Option<u64>,
|
pong_time: Option<Instant>,
|
||||||
state: State,
|
state: State,
|
||||||
// Protocol states -- accumulates pending packets until signaled as ready.
|
// Protocol states -- accumulates pending packets until signaled as ready.
|
||||||
protocol_states: HashMap<ProtocolId, ProtocolState>,
|
protocol_states: HashMap<ProtocolId, ProtocolState>,
|
||||||
@ -123,8 +123,8 @@ impl Session {
|
|||||||
remote_address: "Handshake".to_owned(),
|
remote_address: "Handshake".to_owned(),
|
||||||
local_address: local_addr,
|
local_address: local_addr,
|
||||||
},
|
},
|
||||||
ping_time_ns: 0,
|
ping_time: Instant::now(),
|
||||||
pong_time_ns: None,
|
pong_time: None,
|
||||||
expired: false,
|
expired: false,
|
||||||
protocol_states: HashMap::new(),
|
protocol_states: HashMap::new(),
|
||||||
compression: false,
|
compression: false,
|
||||||
@ -299,13 +299,13 @@ impl Session {
|
|||||||
if let State::Handshake(_) = self.state {
|
if let State::Handshake(_) = self.state {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
let timed_out = if let Some(pong) = self.pong_time_ns {
|
let timed_out = if let Some(pong) = self.pong_time {
|
||||||
pong - self.ping_time_ns > PING_TIMEOUT_SEC * 1000_000_000
|
pong.duration_since(self.ping_time) > Duration::from_secs(PING_TIMEOUT_SEC)
|
||||||
} else {
|
} else {
|
||||||
time::precise_time_ns() - self.ping_time_ns > PING_TIMEOUT_SEC * 1000_000_000
|
self.ping_time.elapsed() > Duration::from_secs(PING_TIMEOUT_SEC)
|
||||||
};
|
};
|
||||||
|
|
||||||
if !timed_out && time::precise_time_ns() - self.ping_time_ns > PING_INTERVAL_SEC * 1000_000_000 {
|
if !timed_out && self.ping_time.elapsed() > Duration::from_secs(PING_INTERVAL_SEC) {
|
||||||
if let Err(e) = self.send_ping(io) {
|
if let Err(e) = self.send_ping(io) {
|
||||||
debug!("Error sending ping message: {:?}", e);
|
debug!("Error sending ping message: {:?}", e);
|
||||||
}
|
}
|
||||||
@ -368,9 +368,11 @@ impl Session {
|
|||||||
Ok(SessionData::Continue)
|
Ok(SessionData::Continue)
|
||||||
},
|
},
|
||||||
PACKET_PONG => {
|
PACKET_PONG => {
|
||||||
let time = time::precise_time_ns();
|
let time = Instant::now();
|
||||||
self.pong_time_ns = Some(time);
|
self.pong_time = Some(time);
|
||||||
self.info.ping_ms = Some((time - self.ping_time_ns) / 1000_000);
|
let ping_elapsed = time.duration_since(self.ping_time);
|
||||||
|
self.info.ping_ms = Some(ping_elapsed.as_secs() * 1_000 +
|
||||||
|
ping_elapsed.subsec_nanos() as u64 / 1_000_000);
|
||||||
Ok(SessionData::Continue)
|
Ok(SessionData::Continue)
|
||||||
},
|
},
|
||||||
PACKET_GET_PEERS => Ok(SessionData::None), //TODO;
|
PACKET_GET_PEERS => Ok(SessionData::None), //TODO;
|
||||||
@ -482,11 +484,11 @@ impl Session {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Senf ping packet
|
/// Send ping packet
|
||||||
pub fn send_ping<Message>(&mut self, io: &IoContext<Message>) -> Result<(), Error> where Message: Send + Sync + Clone {
|
pub fn send_ping<Message>(&mut self, io: &IoContext<Message>) -> Result<(), Error> where Message: Send + Sync + Clone {
|
||||||
self.send_packet(io, None, PACKET_PING, &EMPTY_LIST_RLP)?;
|
self.send_packet(io, None, PACKET_PING, &EMPTY_LIST_RLP)?;
|
||||||
self.ping_time_ns = time::precise_time_ns();
|
self.ping_time = Instant::now();
|
||||||
self.pong_time_ns = None;
|
self.pong_time = None;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,5 +4,4 @@ version = "0.1.0"
|
|||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
time = "0.1.34"
|
|
||||||
log = "0.3"
|
log = "0.3"
|
||||||
|
@ -16,11 +16,10 @@
|
|||||||
|
|
||||||
//! Performance timer with logging
|
//! Performance timer with logging
|
||||||
|
|
||||||
extern crate time;
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log;
|
extern crate log;
|
||||||
|
|
||||||
use time::precise_time_ns;
|
use std::time::Instant;
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! trace_time {
|
macro_rules! trace_time {
|
||||||
@ -33,7 +32,7 @@ macro_rules! trace_time {
|
|||||||
/// elapsed time in the destructor or when `stop` is called.
|
/// elapsed time in the destructor or when `stop` is called.
|
||||||
pub struct PerfTimer {
|
pub struct PerfTimer {
|
||||||
name: &'static str,
|
name: &'static str,
|
||||||
start: u64,
|
start: Instant,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PerfTimer {
|
impl PerfTimer {
|
||||||
@ -41,13 +40,16 @@ impl PerfTimer {
|
|||||||
pub fn new(name: &'static str) -> PerfTimer {
|
pub fn new(name: &'static str) -> PerfTimer {
|
||||||
PerfTimer {
|
PerfTimer {
|
||||||
name,
|
name,
|
||||||
start: precise_time_ns(),
|
start: Instant::now(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for PerfTimer {
|
impl Drop for PerfTimer {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
trace!(target: "perf", "{}: {:.2}ms", self.name, (precise_time_ns() - self.start) as f32 / 1000_000.0);
|
let elapsed = self.start.elapsed();
|
||||||
|
let ms = elapsed.subsec_nanos() as f32 / 1_000_000.0 +
|
||||||
|
elapsed.as_secs() as f32 * 1_000.0;
|
||||||
|
trace!(target: "perf", "{}: {:.2}ms", self.name, ms);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,6 @@ serde_derive = "1.0"
|
|||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
slab = "0.3"
|
slab = "0.3"
|
||||||
smallvec = "0.4"
|
smallvec = "0.4"
|
||||||
time = "0.1"
|
|
||||||
tiny-keccak = "1.3"
|
tiny-keccak = "1.3"
|
||||||
|
|
||||||
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.10" }
|
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.10" }
|
||||||
|
@ -31,7 +31,6 @@ extern crate ring;
|
|||||||
extern crate serde;
|
extern crate serde;
|
||||||
extern crate slab;
|
extern crate slab;
|
||||||
extern crate smallvec;
|
extern crate smallvec;
|
||||||
extern crate time;
|
|
||||||
extern crate tiny_keccak;
|
extern crate tiny_keccak;
|
||||||
|
|
||||||
extern crate jsonrpc_core;
|
extern crate jsonrpc_core;
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
//! Whisper message parsing, handlers, and construction.
|
//! Whisper message parsing, handlers, and construction.
|
||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::time::{self, SystemTime, Duration};
|
use std::time::{self, SystemTime, Duration, Instant};
|
||||||
|
|
||||||
use ethereum_types::{H256, H512};
|
use ethereum_types::{H256, H512};
|
||||||
use rlp::{self, DecoderError, RlpStream, UntrustedRlp};
|
use rlp::{self, DecoderError, RlpStream, UntrustedRlp};
|
||||||
@ -299,9 +299,9 @@ impl Message {
|
|||||||
let mut nonce: [u8; 8] = rng.gen();
|
let mut nonce: [u8; 8] = rng.gen();
|
||||||
let mut best_found = try_nonce(&nonce);
|
let mut best_found = try_nonce(&nonce);
|
||||||
|
|
||||||
let start = ::time::precise_time_ns();
|
let start = Instant::now();
|
||||||
|
|
||||||
while ::time::precise_time_ns() <= start + params.work * 1_000_000 {
|
while start.elapsed() <= Duration::from_millis(params.work) {
|
||||||
let temp_nonce = rng.gen();
|
let temp_nonce = rng.gen();
|
||||||
let hash = try_nonce(&temp_nonce);
|
let hash = try_nonce(&temp_nonce);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user