use constant durations (#8278)

* use constant durations

* fix CI

* address comments
This commit is contained in:
Ryan Leung 2018-04-02 16:47:56 +08:00 committed by Tomasz Drwięga
parent 68a08df9c3
commit 9c9ddaccec
12 changed files with 88 additions and 84 deletions

View File

@ -17,7 +17,7 @@
//! Reporting node's health.
use std::sync::Arc;
use std::time;
use std::time::Duration;
use futures::Future;
use futures::sync::oneshot;
use types::{HealthInfo, HealthStatus, Health};
@ -26,7 +26,7 @@ use parity_reactor::Remote;
use parking_lot::Mutex;
use {SyncStatus};
const TIMEOUT_SECS: u64 = 5;
const TIMEOUT: Duration = Duration::from_secs(5);
const PROOF: &str = "Only one closure is invoked.";
/// A struct enabling you to query for node's health.
@ -57,7 +57,7 @@ impl NodeHealth {
let _ = tx.lock().take().expect(PROOF).send(Ok(result));
Ok(())
}),
time::Duration::from_secs(TIMEOUT_SECS),
TIMEOUT,
move || {
let _ = tx2.lock().take().expect(PROOF).send(Err(()));
},

View File

@ -32,7 +32,7 @@ use handlers::{ContentHandler, StreamingHandler};
use page::local;
use {Embeddable};
const FETCH_TIMEOUT: u64 = 300;
const FETCH_TIMEOUT: Duration = Duration::from_secs(300);
pub enum ValidatorResponse {
Local(local::Dapp),
@ -57,7 +57,7 @@ impl Default for FetchControl {
FetchControl {
abort: Arc::new(AtomicBool::new(false)),
listeners: Arc::new(Mutex::new(Vec::new())),
deadline: Instant::now() + Duration::from_secs(FETCH_TIMEOUT),
deadline: Instant::now() + FETCH_TIMEOUT,
}
}
}
@ -193,7 +193,7 @@ impl Errors {
ContentHandler::error(
StatusCode::GatewayTimeout,
"Download Timeout",
&format!("Could not fetch content within {} seconds.", FETCH_TIMEOUT),
&format!("Could not fetch content within {} seconds.", FETCH_TIMEOUT.as_secs()),
None,
self.embeddable_on.clone(),
)

View File

@ -73,7 +73,7 @@ const RECALCULATE_COSTS_TIMEOUT: TimerToken = 3;
const RECALCULATE_COSTS_INTERVAL_MS: u64 = 60 * 60 * 1000;
// minimum interval between updates.
const UPDATE_INTERVAL_MS: u64 = 5000;
const UPDATE_INTERVAL: Duration = Duration::from_millis(5000);
/// Supported protocol versions.
pub const PROTOCOL_VERSIONS: &'static [u8] = &[1];
@ -109,8 +109,10 @@ mod packet {
// timeouts for different kinds of requests. all values are in milliseconds.
mod timeout {
pub const HANDSHAKE: u64 = 2500;
pub const ACKNOWLEDGE_UPDATE: u64 = 5000;
use std::time::Duration;
pub const HANDSHAKE: Duration = Duration::from_millis(2500);
pub const ACKNOWLEDGE_UPDATE: Duration = Duration::from_millis(5000);
pub const BASE: u64 = 1500; // base timeout for packet.
// timeouts per request within packet.
@ -470,7 +472,7 @@ impl LightProtocol {
// the timer approach will skip 1 (possibly 2) in rare occasions.
if peer_info.sent_head == announcement.head_hash ||
peer_info.status.head_num >= announcement.head_num ||
now - peer_info.last_update < Duration::from_millis(UPDATE_INTERVAL_MS) {
now - peer_info.last_update < UPDATE_INTERVAL {
continue
}
@ -606,7 +608,7 @@ impl LightProtocol {
let mut pending = self.pending_peers.write();
let slowpokes: Vec<_> = pending.iter()
.filter(|&(_, ref peer)| {
peer.last_update + Duration::from_millis(timeout::HANDSHAKE) <= now
peer.last_update + timeout::HANDSHAKE <= now
})
.map(|(&p, _)| p)
.collect();
@ -619,7 +621,7 @@ impl LightProtocol {
}
// request and update ack timeouts
let ack_duration = Duration::from_millis(timeout::ACKNOWLEDGE_UPDATE);
let ack_duration = timeout::ACKNOWLEDGE_UPDATE;
{
for (peer_id, peer) in self.peers.read().iter() {
let peer = peer.lock();

View File

@ -53,11 +53,11 @@ impl ExternalMiner {
}
}
const ENTRY_TIMEOUT: u64 = 2;
const ENTRY_TIMEOUT: Duration = Duration::from_secs(2);
impl ExternalMinerService for ExternalMiner {
fn submit_hashrate(&self, hashrate: U256, id: H256) {
self.hashrates.lock().insert(id, (Instant::now() + Duration::from_secs(ENTRY_TIMEOUT), hashrate));
self.hashrates.lock().insert(id, (Instant::now() + ENTRY_TIMEOUT, hashrate));
}
fn hashrate(&self) -> U256 {

View File

@ -38,7 +38,7 @@ const TOKEN: TimerToken = 1;
const TIMEOUT_MS: u64 = 1000 * 60 * 10;
// But make each attempt last only 9 minutes
const PURGE_TIMEOUT_MS: u64 = 1000 * 60 * 9;
const PURGE_TIMEOUT: Duration = Duration::from_millis(1000 * 60 * 9);
/// Periodically culls the transaction queue of mined transactions.
pub struct QueueCull<T> {
@ -100,6 +100,6 @@ impl<T: LightChainClient + 'static> IoHandler<ClientIoMessage> for QueueCull<T>
future::Either::B(future::ok(()))
},
}
}, Duration::from_millis(PURGE_TIMEOUT_MS), || {})
}, PURGE_TIMEOUT, || {})
}
}

View File

@ -16,7 +16,7 @@
use std::collections::{BTreeSet, BTreeMap, VecDeque};
use std::fmt::{Debug, Formatter, Error as FmtError};
use std::time;
use std::time::Duration;
use std::sync::Arc;
use parking_lot::{Condvar, Mutex};
use ethereum_types::Address;
@ -252,7 +252,7 @@ impl SessionImpl {
}
/// Wait for session completion.
pub fn wait(&self, timeout: Option<time::Duration>) -> Result<Public, Error> {
pub fn wait(&self, timeout: Option<Duration>) -> Result<Public, Error> {
Self::wait_session(&self.completed, &self.data, timeout, |data| data.joint_public_and_secret.clone()
.map(|r| r.map(|r| r.0.clone())))
}
@ -932,9 +932,9 @@ pub fn check_threshold(threshold: usize, nodes: &BTreeSet<NodeId>) -> Result<(),
#[cfg(test)]
pub mod tests {
use std::time;
use std::sync::Arc;
use std::collections::{BTreeSet, BTreeMap, VecDeque};
use std::time::Duration;
use tokio_core::reactor::Core;
use ethereum_types::Address;
use ethkey::{Random, Generator, Public, KeyPair};
@ -1386,12 +1386,12 @@ pub mod tests {
run_clusters(&clusters);
// establish connections
loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established));
loop_until(&mut core, Duration::from_millis(300), || clusters.iter().all(all_connections_established));
// run session to completion
let session_id = SessionId::default();
let session = clusters[0].client().new_generation_session(session_id, Public::default(), threshold).unwrap();
loop_until(&mut core, time::Duration::from_millis(1000), || session.joint_public_and_secret().is_some());
loop_until(&mut core, Duration::from_millis(1000), || session.joint_public_and_secret().is_some());
}
}

View File

@ -15,7 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use std::time;
use std::time::{Duration, Instant};
use std::sync::Arc;
use std::collections::{BTreeMap, BTreeSet};
use std::collections::btree_map::Entry;
@ -53,10 +53,10 @@ const MAINTAIN_INTERVAL: u64 = 10;
/// When no messages have been received from node within KEEP_ALIVE_SEND_INTERVAL seconds,
/// we must send KeepAlive message to the node to check if it still responds to messages.
const KEEP_ALIVE_SEND_INTERVAL: u64 = 30;
const KEEP_ALIVE_SEND_INTERVAL: Duration = Duration::from_secs(30);
/// When no messages have been received from node within KEEP_ALIVE_DISCONNECT_INTERVAL seconds,
/// we must treat this node as non-responding && disconnect from it.
const KEEP_ALIVE_DISCONNECT_INTERVAL: u64 = 60;
const KEEP_ALIVE_DISCONNECT_INTERVAL: Duration = Duration::from_secs(60);
/// Empty future.
pub type BoxedEmptyFuture = Box<Future<Item = (), Error = ()> + Send>;
@ -220,7 +220,7 @@ pub struct Connection {
/// Connection key.
key: KeyPair,
/// Last message time.
last_message_time: Mutex<time::Instant>,
last_message_time: Mutex<Instant>,
}
impl ClusterCore {
@ -324,7 +324,7 @@ impl ClusterCore {
/// Schedule mainatain procedures.
fn schedule_maintain(handle: &Handle, data: Arc<ClusterData>) {
let d = data.clone();
let interval: BoxedEmptyFuture = Box::new(Interval::new(time::Duration::new(MAINTAIN_INTERVAL, 0), handle)
let interval: BoxedEmptyFuture = Box::new(Interval::new(Duration::new(MAINTAIN_INTERVAL, 0), handle)
.expect("failed to create interval")
.and_then(move |_| Ok(ClusterCore::maintain(data.clone())))
.for_each(|_| Ok(()))
@ -374,12 +374,12 @@ impl ClusterCore {
fn keep_alive(data: Arc<ClusterData>) {
data.sessions.sessions_keep_alive();
for connection in data.connections.active_connections() {
let last_message_diff = time::Instant::now() - connection.last_message_time();
if last_message_diff > time::Duration::from_secs(KEEP_ALIVE_DISCONNECT_INTERVAL) {
let last_message_diff = Instant::now() - connection.last_message_time();
if last_message_diff > KEEP_ALIVE_DISCONNECT_INTERVAL {
data.connections.remove(data.clone(), connection.node_id(), connection.is_inbound());
data.sessions.on_connection_timeout(connection.node_id());
}
else if last_message_diff > time::Duration::from_secs(KEEP_ALIVE_SEND_INTERVAL) {
else if last_message_diff > KEEP_ALIVE_SEND_INTERVAL {
data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {}))));
}
}
@ -434,7 +434,7 @@ impl ClusterCore {
/// Process single message from the connection.
fn process_connection_message(data: Arc<ClusterData>, connection: Arc<Connection>, message: Message) {
connection.set_last_message_time(time::Instant::now());
connection.set_last_message_time(Instant::now());
trace!(target: "secretstore_net", "{}: received message {} from {}", data.self_key_pair.public(), message, connection.node_id());
// error is ignored as we only process errors on session level
match message {
@ -799,7 +799,7 @@ impl Connection {
is_inbound: is_inbound,
stream: connection.stream,
key: connection.key,
last_message_time: Mutex::new(time::Instant::now()),
last_message_time: Mutex::new(Instant::now()),
})
}
@ -811,11 +811,11 @@ impl Connection {
&self.node_id
}
pub fn last_message_time(&self) -> time::Instant {
pub fn last_message_time(&self) -> Instant {
*self.last_message_time.lock()
}
pub fn set_last_message_time(&self, last_message_time: time::Instant) {
pub fn set_last_message_time(&self, last_message_time: Instant) {
*self.last_message_time.lock() = last_message_time;
}
@ -1085,7 +1085,7 @@ fn make_socket_address(address: &str, port: u16) -> Result<SocketAddr, Error> {
#[cfg(test)]
pub mod tests {
use std::sync::Arc;
use std::time;
use std::time::{Duration, Instant};
use std::collections::{BTreeSet, VecDeque};
use parking_lot::Mutex;
use tokio_core::reactor::Core;
@ -1104,6 +1104,8 @@ pub mod tests {
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession,
IsolatedSessionTransport as KeyVersionNegotiationSessionTransport};
const TIMEOUT: Duration = Duration::from_millis(300);
#[derive(Default)]
pub struct DummyClusterClient;
@ -1192,15 +1194,15 @@ pub mod tests {
}
}
pub fn loop_until<F>(core: &mut Core, timeout: time::Duration, predicate: F) where F: Fn() -> bool {
let start = time::Instant::now();
pub fn loop_until<F>(core: &mut Core, timeout: Duration, predicate: F) where F: Fn() -> bool {
let start = Instant::now();
loop {
core.turn(Some(time::Duration::from_millis(1)));
core.turn(Some(Duration::from_millis(1)));
if predicate() {
break;
}
if time::Instant::now() - start > timeout {
if Instant::now() - start > timeout {
panic!("no result in {:?}", timeout);
}
}
@ -1248,7 +1250,7 @@ pub mod tests {
let mut core = Core::new().unwrap();
let clusters = make_clusters(&core, 6010, 3);
run_clusters(&clusters);
loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established));
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
}
#[test]
@ -1269,14 +1271,14 @@ pub mod tests {
let mut core = Core::new().unwrap();
let clusters = make_clusters(&core, 6016, 3);
run_clusters(&clusters);
loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established));
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
// ask one of nodes to produce faulty generation sessions
clusters[1].client().make_faulty_generation_sessions();
// start && wait for generation session to fail
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some()
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some()
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_err());
@ -1285,7 +1287,7 @@ pub mod tests {
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
// wait for both session completion && session removal (session completion event is fired
// before session is removed from its own container by cluster)
loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some()
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some()
&& clusters[i].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_err());
}
@ -1298,14 +1300,14 @@ pub mod tests {
let mut core = Core::new().unwrap();
let clusters = make_clusters(&core, 6025, 3);
run_clusters(&clusters);
loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established));
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
// ask one of nodes to produce faulty generation sessions
clusters[0].client().make_faulty_generation_sessions();
// start && wait for generation session to fail
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some()
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some()
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_err());
@ -1314,7 +1316,7 @@ pub mod tests {
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
// wait for both session completion && session removal (session completion event is fired
// before session is removed from its own container by cluster)
loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some()
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some()
&& clusters[i].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_err());
}
@ -1327,11 +1329,11 @@ pub mod tests {
let mut core = Core::new().unwrap();
let clusters = make_clusters(&core, 6019, 3);
run_clusters(&clusters);
loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established));
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
// start && wait for generation session to complete
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
loop_until(&mut core, time::Duration::from_millis(300), || (session.state() == GenerationSessionState::Finished
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished
|| session.state() == GenerationSessionState::Failed)
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_ok());
@ -1339,7 +1341,7 @@ pub mod tests {
// check that session is either removed from all nodes, or nonexistent (already removed)
for i in 1..3 {
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
loop_until(&mut core, time::Duration::from_millis(300), || (session.state() == GenerationSessionState::Finished
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished
|| session.state() == GenerationSessionState::Failed)
&& clusters[i].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_err());
@ -1352,7 +1354,7 @@ pub mod tests {
let mut core = Core::new().unwrap();
let clusters = make_clusters(&core, 6022, 3);
run_clusters(&clusters);
loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established));
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
// generation session
{
@ -1388,11 +1390,11 @@ pub mod tests {
let mut core = Core::new().unwrap();
let clusters = make_clusters(&core, 6028, 3);
run_clusters(&clusters);
loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established));
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
// start && wait for generation session to complete
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
loop_until(&mut core, time::Duration::from_millis(300), || (session.state() == GenerationSessionState::Finished
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished
|| session.state() == GenerationSessionState::Failed)
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_ok());
@ -1406,7 +1408,7 @@ pub mod tests {
let session0 = clusters[0].client().new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap();
let session = clusters[0].data.sessions.schnorr_signing_sessions.first().unwrap();
loop_until(&mut core, time::Duration::from_millis(300), || session.is_finished() && (0..3).all(|i|
loop_until(&mut core, TIMEOUT, || session.is_finished() && (0..3).all(|i|
clusters[i].data.sessions.schnorr_signing_sessions.is_empty()));
session0.wait().unwrap();
@ -1415,7 +1417,7 @@ pub mod tests {
let session2 = clusters[2].client().new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap();
let session = clusters[2].data.sessions.schnorr_signing_sessions.first().unwrap();
loop_until(&mut core, time::Duration::from_millis(300), || session.is_finished() && (0..3).all(|i|
loop_until(&mut core, TIMEOUT, || session.is_finished() && (0..3).all(|i|
clusters[i].data.sessions.schnorr_signing_sessions.is_empty()));
session2.wait().unwrap();
@ -1427,7 +1429,7 @@ pub mod tests {
let session1 = clusters[0].client().new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap();
let session = clusters[0].data.sessions.schnorr_signing_sessions.first().unwrap();
loop_until(&mut core, time::Duration::from_millis(300), || session.is_finished());
loop_until(&mut core, TIMEOUT, || session.is_finished());
session1.wait().unwrap_err();
}
@ -1437,11 +1439,11 @@ pub mod tests {
let mut core = Core::new().unwrap();
let clusters = make_clusters(&core, 6041, 4);
run_clusters(&clusters);
loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established));
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
// start && wait for generation session to complete
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
loop_until(&mut core, time::Duration::from_millis(300), || (session.state() == GenerationSessionState::Finished
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished
|| session.state() == GenerationSessionState::Failed)
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_ok());
@ -1455,7 +1457,7 @@ pub mod tests {
let session0 = clusters[0].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap();
let session = clusters[0].data.sessions.ecdsa_signing_sessions.first().unwrap();
loop_until(&mut core, time::Duration::from_millis(1000), || session.is_finished() && (0..3).all(|i|
loop_until(&mut core, Duration::from_millis(1000), || session.is_finished() && (0..3).all(|i|
clusters[i].data.sessions.ecdsa_signing_sessions.is_empty()));
session0.wait().unwrap();
@ -1463,7 +1465,7 @@ pub mod tests {
let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap();
let session2 = clusters[2].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap();
let session = clusters[2].data.sessions.ecdsa_signing_sessions.first().unwrap();
loop_until(&mut core, time::Duration::from_millis(1000), || session.is_finished() && (0..3).all(|i|
loop_until(&mut core, Duration::from_millis(1000), || session.is_finished() && (0..3).all(|i|
clusters[i].data.sessions.ecdsa_signing_sessions.is_empty()));
session2.wait().unwrap();
@ -1474,7 +1476,7 @@ pub mod tests {
let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap();
let session1 = clusters[0].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap();
let session = clusters[0].data.sessions.ecdsa_signing_sessions.first().unwrap();
loop_until(&mut core, time::Duration::from_millis(1000), || session.is_finished());
loop_until(&mut core, Duration::from_millis(1000), || session.is_finished());
session1.wait().unwrap_err();
}
}

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::time;
use std::time::{Duration, Instant};
use std::sync::{Arc, Weak};
use std::sync::atomic::AtomicBool;
use std::collections::{VecDeque, BTreeMap, BTreeSet};
@ -43,9 +43,9 @@ use key_server_cluster::cluster_sessions_creator::{GenerationSessionCreator, Enc
/// we must treat this session as stalled && finish it with an error.
/// This timeout is for cases when node is responding to KeepAlive messages, but intentionally ignores
/// session messages.
const SESSION_TIMEOUT_INTERVAL: u64 = 60;
const SESSION_TIMEOUT_INTERVAL: Duration = Duration::from_secs(60);
/// Interval to send session-level KeepAlive-messages.
const SESSION_KEEP_ALIVE_INTERVAL: u64 = 30;
const SESSION_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(30);
lazy_static! {
/// Servers set change session id (there could be at most 1 session => hardcoded id).
@ -84,7 +84,7 @@ pub trait ClusterSession {
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error>;
/// 'Wait for session completion' helper.
fn wait_session<T, U, F: Fn(&U) -> Option<Result<T, Error>>>(completion_event: &Condvar, session_data: &Mutex<U>, timeout: Option<time::Duration>, result_reader: F) -> Result<T, Error> {
fn wait_session<T, U, F: Fn(&U) -> Option<Result<T, Error>>>(completion_event: &Condvar, session_data: &Mutex<U>, timeout: Option<Duration>, result_reader: F) -> Result<T, Error> {
let mut locked_data = session_data.lock();
match result_reader(&locked_data) {
Some(result) => result,
@ -170,9 +170,9 @@ pub struct QueuedSession<S> {
/// Cluster view.
pub cluster_view: Arc<Cluster>,
/// Last keep alive time.
pub last_keep_alive_time: time::Instant,
pub last_keep_alive_time: Instant,
/// Last received message time.
pub last_message_time: time::Instant,
pub last_message_time: Instant,
/// Generation session.
pub session: Arc<S>,
/// Messages queue.
@ -291,7 +291,7 @@ impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: C
sessions.get_mut(session_id)
.map(|s| {
if update_last_message_time {
s.last_message_time = time::Instant::now();
s.last_message_time = Instant::now();
}
s.session.clone()
})
@ -319,8 +319,8 @@ impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: C
let queued_session = QueuedSession {
master: master,
cluster_view: cluster,
last_keep_alive_time: time::Instant::now(),
last_message_time: time::Instant::now(),
last_keep_alive_time: Instant::now(),
last_message_time: Instant::now(),
session: session.clone(),
queue: VecDeque::new(),
};
@ -353,7 +353,7 @@ impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: C
for sid in sessions.keys().cloned().collect::<Vec<_>>() {
let remove_session = {
let session = sessions.get(&sid).expect("enumerating only existing sessions; qed");
if time::Instant::now() - session.last_message_time > time::Duration::from_secs(SESSION_TIMEOUT_INTERVAL) {
if Instant::now() - session.last_message_time > SESSION_TIMEOUT_INTERVAL {
session.session.on_session_timeout();
session.session.is_finished()
} else {
@ -401,8 +401,8 @@ impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: C
impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: ClusterSessionCreator<S, D>, SessionId: From<S::Id> {
pub fn send_keep_alive(&self, session_id: &S::Id, self_node_id: &NodeId) {
if let Some(session) = self.sessions.write().get_mut(session_id) {
let now = time::Instant::now();
if self_node_id == &session.master && now - session.last_keep_alive_time > time::Duration::from_secs(SESSION_KEEP_ALIVE_INTERVAL) {
let now = Instant::now();
if self_node_id == &session.master && now - session.last_keep_alive_time > SESSION_KEEP_ALIVE_INTERVAL {
session.last_keep_alive_time = now;
// since we send KeepAlive message to prevent nodes from disconnecting
// && worst thing that can happen if node is disconnected is that session is failed
@ -416,7 +416,7 @@ impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: C
pub fn on_keep_alive(&self, session_id: &S::Id, sender: &NodeId) {
if let Some(session) = self.sessions.write().get_mut(session_id) {
let now = time::Instant::now();
let now = Instant::now();
// we only accept keep alive from master node of ServersSetChange session
if sender == &session.master {
session.last_keep_alive_time = now;

View File

@ -34,7 +34,7 @@ use tokio_core::reactor;
use url::{self, Url};
const MAX_SIZE: usize = 64 * 1024 * 1024;
const MAX_SECS: u64 = 5;
const MAX_SECS: Duration = Duration::from_secs(5);
const MAX_REDR: usize = 5;
/// A handle to abort requests.
@ -55,7 +55,7 @@ impl Default for Abort {
Abort {
abort: Arc::new(AtomicBool::new(false)),
size: MAX_SIZE,
time: Duration::from_secs(MAX_SECS),
time: MAX_SECS,
redir: MAX_REDR,
}
}
@ -66,7 +66,7 @@ impl From<Arc<AtomicBool>> for Abort {
Abort {
abort: a,
size: MAX_SIZE,
time: Duration::from_secs(MAX_SECS),
time: MAX_SECS,
redir: MAX_REDR,
}
}

View File

@ -47,7 +47,7 @@ const PACKET_PONG: u8 = 2;
const PACKET_FIND_NODE: u8 = 3;
const PACKET_NEIGHBOURS: u8 = 4;
const PING_TIMEOUT_MS: u64 = 300;
const PING_TIMEOUT: Duration = Duration::from_millis(300);
const MAX_NODES_PING: usize = 32; // Max nodes to add/ping at once
#[derive(Clone, Debug)]
@ -513,7 +513,7 @@ impl Discovery {
for bucket in &mut self.node_buckets {
bucket.nodes.retain(|node| {
if let Some(timeout) = node.timeout {
if !force && now.duration_since(timeout) < Duration::from_millis(PING_TIMEOUT_MS) {
if !force && now.duration_since(timeout) < PING_TIMEOUT {
true
}
else {

View File

@ -34,8 +34,8 @@ use node_table::NodeId;
use snappy;
// Timeout must be less than (interval - 1).
const PING_TIMEOUT_SEC: u64 = 60;
const PING_INTERVAL_SEC: u64 = 120;
const PING_TIMEOUT_SEC: Duration = Duration::from_secs(60);
const PING_INTERVAL_SEC: Duration = Duration::from_secs(120);
const MIN_PROTOCOL_VERSION: u32 = 4;
const MIN_COMPRESSION_PROTOCOL_VERSION: u32 = 5;
@ -298,12 +298,12 @@ impl Session {
return true;
}
let timed_out = if let Some(pong) = self.pong_time {
pong.duration_since(self.ping_time) > Duration::from_secs(PING_TIMEOUT_SEC)
pong.duration_since(self.ping_time) > PING_TIMEOUT_SEC
} else {
self.ping_time.elapsed() > Duration::from_secs(PING_TIMEOUT_SEC)
self.ping_time.elapsed() > PING_TIMEOUT_SEC
};
if !timed_out && self.ping_time.elapsed() > Duration::from_secs(PING_INTERVAL_SEC) {
if !timed_out && self.ping_time.elapsed() > PING_INTERVAL_SEC {
if let Err(e) = self.send_ping(io) {
debug!("Error sending ping message: {:?}", e);
}

View File

@ -45,7 +45,7 @@ pub const PROTOCOL_VERSION: usize = 6;
pub const SUPPORTED_VERSIONS: &'static [u8] = &[PROTOCOL_VERSION as u8];
// maximum tolerated delay between messages packets.
const MAX_TOLERATED_DELAY_MS: u64 = 5000;
const MAX_TOLERATED_DELAY: Duration = Duration::from_millis(5000);
/// Number of packets. A bunch are reserved.
pub const PACKET_COUNT: u8 = 128;
@ -469,7 +469,7 @@ impl<T: MessageHandler> Network<T> {
peer_data.note_evicted(&pruned_hashes);
let punish_timeout = |last_activity: &SystemTime| {
if *last_activity + Duration::from_millis(MAX_TOLERATED_DELAY_MS) <= now {
if *last_activity + MAX_TOLERATED_DELAY <= now {
debug!(target: "whisper", "Disconnecting peer {} due to excessive timeout.", peer_id);
io.disconnect_peer(*peer_id);
}