Using multiple NTP servers (#6173)

* Small improvements to time estimation.

* Allow multiple NTP servers to be used.

* Removing boxing.

* Be nice.

* Be nicer.

* Update list of servers and add reference.
This commit is contained in:
Tomasz Drwięga 2017-08-09 08:45:07 +02:00 committed by Marek Kotewicz
parent 72fa6a79a2
commit e93466c897
11 changed files with 188 additions and 78 deletions

View File

@ -21,7 +21,7 @@ use hyper::method::Method;
use hyper::status::StatusCode;
use api::{response, types};
use api::time::TimeChecker;
use api::time::{TimeChecker, MAX_DRIFT};
use apps::fetcher::Fetcher;
use handlers::{self, extract_url};
use endpoint::{Endpoint, Handler, EndpointPath};
@ -122,7 +122,6 @@ impl RestApiRouter {
// Check time
let time = {
const MAX_DRIFT: i64 = 500;
let (status, message, details) = match time {
Ok(Ok(diff)) if diff < MAX_DRIFT && diff > -MAX_DRIFT => {
(HealthStatus::Ok, "".into(), diff)

View File

@ -33,11 +33,13 @@
use std::io;
use std::{fmt, mem, time};
use std::sync::Arc;
use std::collections::VecDeque;
use std::sync::atomic::{self, AtomicUsize};
use std::sync::Arc;
use futures::{self, Future, BoxFuture};
use futures_cpupool::CpuPool;
use futures::future::{self, IntoFuture};
use futures_cpupool::{CpuPool, CpuFuture};
use ntp;
use time::{Duration, Timespec};
use util::RwLock;
@ -45,6 +47,8 @@ use util::RwLock;
/// Time checker error.
#[derive(Debug, Clone, PartialEq)]
pub enum Error {
/// No servers are currently available for a query.
NoServersAvailable,
/// There was an error when trying to reach the NTP server.
Ntp(String),
/// IO error when reading NTP response.
@ -56,6 +60,7 @@ impl fmt::Display for Error {
use self::Error::*;
match *self {
NoServersAvailable => write!(fmt, "No NTP servers available"),
Ntp(ref err) => write!(fmt, "NTP error: {}", err),
Io(ref err) => write!(fmt, "Connection Error: {}", err),
}
@ -72,58 +77,123 @@ impl From<ntp::errors::Error> for Error {
/// NTP time drift checker.
pub trait Ntp {
/// Returned Future.
type Future: IntoFuture<Item=Duration, Error=Error>;
/// Returns the current time drift.
fn drift(&self) -> BoxFuture<Duration, Error>;
fn drift(&self) -> Self::Future;
}
const SERVER_MAX_POLL_INTERVAL_SECS: u64 = 60;
#[derive(Debug)]
struct Server {
pub address: String,
next_call: RwLock<time::Instant>,
failures: AtomicUsize,
}
impl Server {
pub fn is_available(&self) -> bool {
*self.next_call.read() < time::Instant::now()
}
pub fn report_success(&self) {
self.failures.store(0, atomic::Ordering::SeqCst);
self.update_next_call(1)
}
pub fn report_failure(&self) {
let errors = self.failures.fetch_add(1, atomic::Ordering::SeqCst);
self.update_next_call(1 << errors)
}
fn update_next_call(&self, delay: usize) {
*self.next_call.write() = time::Instant::now() + time::Duration::from_secs(delay as u64 * SERVER_MAX_POLL_INTERVAL_SECS);
}
}
impl<T: AsRef<str>> From<T> for Server {
fn from(t: T) -> Self {
Server {
address: t.as_ref().to_owned(),
next_call: RwLock::new(time::Instant::now()),
failures: Default::default(),
}
}
}
/// NTP client using the SNTP algorithm for calculating drift.
#[derive(Clone)]
pub struct SimpleNtp {
address: Arc<String>,
addresses: Vec<Arc<Server>>,
pool: CpuPool,
}
impl fmt::Debug for SimpleNtp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Ntp {{ address: {} }}", self.address)
f
.debug_struct("SimpleNtp")
.field("addresses", &self.addresses)
.finish()
}
}
impl SimpleNtp {
fn new(address: &str, pool: CpuPool) -> SimpleNtp {
fn new<T: AsRef<str>>(addresses: &[T], pool: CpuPool) -> SimpleNtp {
SimpleNtp {
address: Arc::new(address.to_owned()),
addresses: addresses.iter().map(Server::from).map(Arc::new).collect(),
pool: pool,
}
}
}
impl Ntp for SimpleNtp {
fn drift(&self) -> BoxFuture<Duration, Error> {
let address = self.address.clone();
if &*address == "none" {
return futures::future::err(Error::Ntp("NTP server is not provided.".into())).boxed();
}
type Future = future::Either<
CpuFuture<Duration, Error>,
future::FutureResult<Duration, Error>,
>;
self.pool.spawn_fn(move || {
let packet = ntp::request(&*address)?;
let dest_time = ::time::now_utc().to_timespec();
let orig_time = Timespec::from(packet.orig_time);
let recv_time = Timespec::from(packet.recv_time);
let transmit_time = Timespec::from(packet.transmit_time);
fn drift(&self) -> Self::Future {
use self::future::Either::{A, B};
let drift = ((recv_time - orig_time) + (transmit_time - dest_time)) / 2;
let server = self.addresses.iter().find(|server| server.is_available());
server.map(|server| {
let server = server.clone();
A(self.pool.spawn_fn(move || {
debug!(target: "dapps", "Fetching time from {}.", server.address);
Ok(drift)
}).boxed()
match ntp::request(&server.address) {
Ok(packet) => {
let dest_time = ::time::now_utc().to_timespec();
let orig_time = Timespec::from(packet.orig_time);
let recv_time = Timespec::from(packet.recv_time);
let transmit_time = Timespec::from(packet.transmit_time);
let drift = ((recv_time - orig_time) + (transmit_time - dest_time)) / 2;
server.report_success();
Ok(drift)
},
Err(err) => {
server.report_failure();
Err(err.into())
},
}
}))
}).unwrap_or_else(|| B(future::err(Error::NoServersAvailable)))
}
}
// NOTE In a positive scenario first results will be seen after:
// MAX_RESULTS * UPDATE_TIMEOUT_OK_SECS seconds.
const MAX_RESULTS: usize = 7;
const UPDATE_TIMEOUT_OK_SECS: u64 = 30;
const UPDATE_TIMEOUT_ERR_SECS: u64 = 2;
// MAX_RESULTS * UPDATE_TIMEOUT_INCOMPLETE_SECS seconds.
const MAX_RESULTS: usize = 4;
const UPDATE_TIMEOUT_OK_SECS: u64 = 6 * 60 * 60;
const UPDATE_TIMEOUT_WARN_SECS: u64 = 15 * 60;
const UPDATE_TIMEOUT_ERR_SECS: u64 = 60;
const UPDATE_TIMEOUT_INCOMPLETE_SECS: u64 = 10;
/// Maximal valid time drift.
pub const MAX_DRIFT: i64 = 500;
#[derive(Debug, Clone)]
/// A time checker.
@ -134,13 +204,13 @@ pub struct TimeChecker<N: Ntp = SimpleNtp> {
impl TimeChecker<SimpleNtp> {
/// Creates new time checker given the NTP server address.
pub fn new(ntp_address: String, pool: CpuPool) -> Self {
pub fn new<T: AsRef<str>>(ntp_addresses: &[T], pool: CpuPool) -> Self {
let last_result = Arc::new(RwLock::new(
// Assume everything is ok at the very beginning.
(time::Instant::now(), vec![Ok(0)].into())
));
let ntp = SimpleNtp::new(&ntp_address, pool);
let ntp = SimpleNtp::new(ntp_addresses, pool);
TimeChecker {
ntp,
@ -149,22 +219,34 @@ impl TimeChecker<SimpleNtp> {
}
}
impl<N: Ntp> TimeChecker<N> {
impl<N: Ntp> TimeChecker<N> where <N::Future as IntoFuture>::Future: Send + 'static {
/// Updates the time
pub fn update(&self) -> BoxFuture<i64, Error> {
trace!(target: "dapps", "Updating time from NTP.");
let last_result = self.last_result.clone();
self.ntp.drift().then(move |res| {
self.ntp.drift().into_future().then(move |res| {
let res = res.map(|d| d.num_milliseconds());
if let Err(Error::NoServersAvailable) = res {
debug!(target: "dapps", "No NTP servers available. Selecting an older result.");
return select_result(last_result.read().1.iter());
}
// Update the results.
let mut results = mem::replace(&mut last_result.write().1, VecDeque::new());
let has_all_results = results.len() >= MAX_RESULTS;
let valid_till = time::Instant::now() + time::Duration::from_secs(
if res.is_ok() && results.len() == MAX_RESULTS {
UPDATE_TIMEOUT_OK_SECS
} else {
UPDATE_TIMEOUT_ERR_SECS
match res {
Ok(time) if has_all_results && time < MAX_DRIFT => UPDATE_TIMEOUT_OK_SECS,
Ok(_) if has_all_results => UPDATE_TIMEOUT_WARN_SECS,
Err(_) if has_all_results => UPDATE_TIMEOUT_ERR_SECS,
_ => UPDATE_TIMEOUT_INCOMPLETE_SECS,
}
);
trace!(target: "dapps", "New time drift received: {:?}", res);
// Push the result.
results.push_back(res.map(|d| d.num_milliseconds()));
results.push_back(res);
while results.len() > MAX_RESULTS {
results.pop_front();
}
@ -209,7 +291,7 @@ mod tests {
use std::cell::{Cell, RefCell};
use std::time::Instant;
use time::Duration;
use futures::{self, BoxFuture, Future};
use futures::{future, Future};
use super::{Ntp, TimeChecker, Error};
use util::RwLock;
@ -224,9 +306,11 @@ mod tests {
}
impl Ntp for FakeNtp {
fn drift(&self) -> BoxFuture<Duration, Error> {
type Future = future::FutureResult<Duration, Error>;
fn drift(&self) -> Self::Future {
self.1.set(self.1.get() + 1);
futures::future::ok(self.0.borrow_mut().pop().expect("Unexpected call to drift().")).boxed()
future::ok(self.0.borrow_mut().pop().expect("Unexpected call to drift()."))
}
}

View File

@ -130,7 +130,7 @@ impl Middleware {
/// Creates new middleware for UI server.
pub fn ui<F: Fetch>(
ntp_server: &str,
ntp_servers: &[String],
pool: CpuPool,
remote: Remote,
dapps_domain: &str,
@ -146,7 +146,7 @@ impl Middleware {
).embeddable_on(None).allow_dapps(false));
let special = {
let mut special = special_endpoints(
ntp_server,
ntp_servers,
pool,
content_fetcher.clone(),
remote.clone(),
@ -171,7 +171,7 @@ impl Middleware {
/// Creates new Dapps server middleware.
pub fn dapps<F: Fetch>(
ntp_server: &str,
ntp_servers: &[String],
pool: CpuPool,
remote: Remote,
ui_address: Option<(String, u16)>,
@ -203,7 +203,7 @@ impl Middleware {
let special = {
let mut special = special_endpoints(
ntp_server,
ntp_servers,
pool,
content_fetcher.clone(),
remote.clone(),
@ -237,8 +237,8 @@ impl http::RequestMiddleware for Middleware {
}
}
fn special_endpoints(
ntp_server: &str,
fn special_endpoints<T: AsRef<str>>(
ntp_servers: &[T],
pool: CpuPool,
content_fetcher: Arc<apps::fetcher::Fetcher>,
remote: Remote,
@ -250,7 +250,7 @@ fn special_endpoints(
special.insert(router::SpecialEndpoint::Api, Some(api::RestApi::new(
content_fetcher,
sync_status,
api::TimeChecker::new(ntp_server.into(), pool),
api::TimeChecker::new(ntp_servers, pool),
remote,
)));
special

View File

@ -255,7 +255,7 @@ impl Server {
fetch: F,
) -> Result<Server, http::Error> {
let middleware = Middleware::dapps(
"pool.ntp.org:123",
&["0.pool.ntp.org:123".into(), "1.pool.ntp.org:123".into()],
CpuPool::new(4),
remote,
signer_address,

View File

@ -78,7 +78,7 @@ disable_periodic = true
jit = false
[misc]
ntp_server = "pool.ntp.org:123"
ntp_servers = ["0.parity.pool.ntp.org:123"]
logging = "own_tx=trace"
log_file = "/var/log/parity.log"
color = true

View File

@ -359,8 +359,8 @@ usage! {
or |c: &Config| otry!(c.vm).jit.clone(),
// -- Miscellaneous Options
flag_ntp_server: String = "none",
or |c: &Config| otry!(c.misc).ntp_server.clone(),
flag_ntp_servers: String = "0.parity.pool.ntp.org:123,1.parity.pool.ntp.org:123,2.parity.pool.ntp.org:123,3.parity.pool.ntp.org:123",
or |c: &Config| otry!(c.misc).ntp_servers.clone().map(|vec| vec.join(",")),
flag_logging: Option<String> = None,
or |c: &Config| otry!(c.misc).logging.clone().map(Some),
flag_log_file: Option<String> = None,
@ -606,7 +606,7 @@ struct VM {
#[derive(Default, Debug, PartialEq, Deserialize)]
struct Misc {
ntp_server: Option<String>,
ntp_servers: Option<Vec<String>>,
logging: Option<String>,
log_file: Option<String>,
color: Option<bool>,
@ -919,7 +919,7 @@ mod tests {
flag_dapps_apis_all: None,
// -- Miscellaneous Options
flag_ntp_server: "none".into(),
flag_ntp_servers: "0.parity.pool.ntp.org:123,1.parity.pool.ntp.org:123,2.parity.pool.ntp.org:123,3.parity.pool.ntp.org:123".into(),
flag_version: false,
flag_logging: Some("own_tx=trace".into()),
flag_log_file: Some("/var/log/parity.log".into()),
@ -1098,7 +1098,7 @@ mod tests {
jit: Some(false),
}),
misc: Some(Misc {
ntp_server: Some("pool.ntp.org:123".into()),
ntp_servers: Some(vec!["0.parity.pool.ntp.org:123".into()]),
logging: Some("own_tx=trace".into()),
log_file: Some("/var/log/parity.log".into()),
color: Some(true),

View File

@ -483,8 +483,10 @@ Internal Options:
--can-restart Executable will auto-restart if exiting with 69.
Miscellaneous Options:
--ntp-server HOST NTP server to provide current time (host:port). Used to verify node health.
(default: {flag_ntp_server})
--ntp-servers HOSTS Comma separated list of NTP servers to provide current time (host:port).
Used to verify node health. Parity uses pool.ntp.org NTP servers,
consider joining the pool: http://www.pool.ntp.org/join.html
(default: {flag_ntp_servers})
-l --logging LOGGING Specify the logging level. Must conform to the same
format as RUST_LOG. (default: {flag_logging:?})
--log-file FILENAME Specify a filename into which logging should be

View File

@ -551,10 +551,14 @@ impl Configuration {
Ok(options)
}
fn ntp_servers(&self) -> Vec<String> {
self.args.flag_ntp_servers.split(",").map(str::to_owned).collect()
}
fn ui_config(&self) -> UiConfiguration {
UiConfiguration {
enabled: self.ui_enabled(),
ntp_server: self.args.flag_ntp_server.clone(),
ntp_servers: self.ntp_servers(),
interface: self.ui_interface(),
port: self.args.flag_ports_shift + self.args.flag_ui_port,
hosts: self.ui_hosts(),
@ -564,7 +568,7 @@ impl Configuration {
fn dapps_config(&self) -> DappsConfiguration {
DappsConfiguration {
enabled: self.dapps_enabled(),
ntp_server: self.args.flag_ntp_server.clone(),
ntp_servers: self.ntp_servers(),
dapps_path: PathBuf::from(self.directories().dapps),
extra_dapps: if self.args.cmd_dapp {
self.args.arg_path.iter().map(|path| PathBuf::from(path)).collect()
@ -1278,7 +1282,12 @@ mod tests {
support_token_api: true
}, UiConfiguration {
enabled: true,
ntp_server: "none".into(),
ntp_servers: vec![
"0.parity.pool.ntp.org:123".into(),
"1.parity.pool.ntp.org:123".into(),
"2.parity.pool.ntp.org:123".into(),
"3.parity.pool.ntp.org:123".into(),
],
interface: "127.0.0.1".into(),
port: 8180,
hosts: Some(vec![]),
@ -1521,10 +1530,16 @@ mod tests {
let conf3 = parse(&["parity", "--ui-path", "signer", "--ui-interface", "test"]);
// then
let ntp_servers = vec![
"0.parity.pool.ntp.org:123".into(),
"1.parity.pool.ntp.org:123".into(),
"2.parity.pool.ntp.org:123".into(),
"3.parity.pool.ntp.org:123".into(),
];
assert_eq!(conf0.directories().signer, "signer".to_owned());
assert_eq!(conf0.ui_config(), UiConfiguration {
enabled: true,
ntp_server: "none".into(),
ntp_servers: ntp_servers.clone(),
interface: "127.0.0.1".into(),
port: 8180,
hosts: Some(vec![]),
@ -1533,7 +1548,7 @@ mod tests {
assert_eq!(conf1.directories().signer, "signer".to_owned());
assert_eq!(conf1.ui_config(), UiConfiguration {
enabled: true,
ntp_server: "none".into(),
ntp_servers: ntp_servers.clone(),
interface: "127.0.0.1".into(),
port: 8180,
hosts: Some(vec![]),
@ -1543,7 +1558,7 @@ mod tests {
assert_eq!(conf2.directories().signer, "signer".to_owned());
assert_eq!(conf2.ui_config(), UiConfiguration {
enabled: true,
ntp_server: "none".into(),
ntp_servers: ntp_servers.clone(),
interface: "127.0.0.1".into(),
port: 3123,
hosts: Some(vec![]),
@ -1552,7 +1567,7 @@ mod tests {
assert_eq!(conf3.directories().signer, "signer".to_owned());
assert_eq!(conf3.ui_config(), UiConfiguration {
enabled: true,
ntp_server: "none".into(),
ntp_servers: ntp_servers.clone(),
interface: "test".into(),
port: 8180,
hosts: Some(vec![]),

View File

@ -36,7 +36,7 @@ use util::{Bytes, Address};
#[derive(Debug, PartialEq, Clone)]
pub struct Configuration {
pub enabled: bool,
pub ntp_server: String,
pub ntp_servers: Vec<String>,
pub dapps_path: PathBuf,
pub extra_dapps: Vec<PathBuf>,
pub extra_embed_on: Vec<(String, u16)>,
@ -47,7 +47,12 @@ impl Default for Configuration {
let data_dir = default_data_path();
Configuration {
enabled: true,
ntp_server: "none".into(),
ntp_servers: vec![
"0.parity.pool.ntp.org:123".into(),
"1.parity.pool.ntp.org:123".into(),
"2.parity.pool.ntp.org:123".into(),
"3.parity.pool.ntp.org:123".into(),
],
dapps_path: replace_home(&data_dir, "$BASE/dapps").into(),
extra_dapps: vec![],
extra_embed_on: vec![],
@ -158,7 +163,7 @@ pub fn new(configuration: Configuration, deps: Dependencies) -> Result<Option<Mi
server::dapps_middleware(
deps,
&configuration.ntp_server,
&configuration.ntp_servers,
configuration.dapps_path,
configuration.extra_dapps,
rpc::DAPPS_DOMAIN,
@ -166,14 +171,14 @@ pub fn new(configuration: Configuration, deps: Dependencies) -> Result<Option<Mi
).map(Some)
}
pub fn new_ui(enabled: bool, ntp_server: &str, deps: Dependencies) -> Result<Option<Middleware>, String> {
pub fn new_ui(enabled: bool, ntp_servers: &[String], deps: Dependencies) -> Result<Option<Middleware>, String> {
if !enabled {
return Ok(None);
}
server::ui_middleware(
deps,
ntp_server,
ntp_servers,
rpc::DAPPS_DOMAIN,
).map(Some)
}
@ -204,7 +209,7 @@ mod server {
pub fn dapps_middleware(
_deps: Dependencies,
_ntp_server: &str,
_ntp_servers: &[String],
_dapps_path: PathBuf,
_extra_dapps: Vec<PathBuf>,
_dapps_domain: &str,
@ -215,7 +220,7 @@ mod server {
pub fn ui_middleware(
_deps: Dependencies,
_ntp_server: &str,
_ntp_servers: &[String],
_dapps_domain: &str,
) -> Result<Middleware, String> {
Err("Your Parity version has been compiled without UI support.".into())
@ -241,7 +246,7 @@ mod server {
pub fn dapps_middleware(
deps: Dependencies,
ntp_server: &str,
ntp_servers: &[String],
dapps_path: PathBuf,
extra_dapps: Vec<PathBuf>,
dapps_domain: &str,
@ -252,7 +257,7 @@ mod server {
let web_proxy_tokens = Arc::new(move |token| signer.web_proxy_access_token_domain(&token));
Ok(parity_dapps::Middleware::dapps(
ntp_server,
ntp_servers,
deps.pool,
parity_remote,
deps.ui_address,
@ -269,12 +274,12 @@ mod server {
pub fn ui_middleware(
deps: Dependencies,
ntp_server: &str,
ntp_servers: &[String],
dapps_domain: &str,
) -> Result<Middleware, String> {
let parity_remote = parity_reactor::Remote::new(deps.remote.clone());
Ok(parity_dapps::Middleware::ui(
ntp_server,
ntp_servers,
deps.pool,
parity_remote,
dapps_domain,

View File

@ -73,7 +73,7 @@ impl Default for HttpConfiguration {
#[derive(Debug, PartialEq, Clone)]
pub struct UiConfiguration {
pub enabled: bool,
pub ntp_server: String,
pub ntp_servers: Vec<String>,
pub interface: String,
pub port: u16,
pub hosts: Option<Vec<String>>,
@ -107,7 +107,12 @@ impl Default for UiConfiguration {
fn default() -> Self {
UiConfiguration {
enabled: true && cfg!(feature = "ui-enabled"),
ntp_server: "none".into(),
ntp_servers: vec![
"0.parity.pool.ntp.org:123".into(),
"1.parity.pool.ntp.org:123".into(),
"2.parity.pool.ntp.org:123".into(),
"3.parity.pool.ntp.org:123".into(),
],
port: 8180,
interface: "127.0.0.1".into(),
hosts: Some(vec![]),

View File

@ -311,7 +311,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
};
let dapps_middleware = dapps::new(cmd.dapps_conf.clone(), dapps_deps.clone())?;
let ui_middleware = dapps::new_ui(cmd.ui_conf.enabled, &cmd.ui_conf.ntp_server, dapps_deps)?;
let ui_middleware = dapps::new_ui(cmd.ui_conf.enabled, &cmd.ui_conf.ntp_servers, dapps_deps)?;
// start RPCs
let dapps_service = dapps::service(&dapps_middleware);
@ -687,7 +687,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
}
};
let dapps_middleware = dapps::new(cmd.dapps_conf.clone(), dapps_deps.clone())?;
let ui_middleware = dapps::new_ui(cmd.ui_conf.enabled, &cmd.ui_conf.ntp_server, dapps_deps)?;
let ui_middleware = dapps::new_ui(cmd.ui_conf.enabled, &cmd.ui_conf.ntp_servers, dapps_deps)?;
let dapps_service = dapps::service(&dapps_middleware);
let deps_for_rpc_apis = Arc::new(rpc_apis::FullDependencies {