2019-01-07 11:33:07 +01:00
|
|
|
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
|
|
|
// This file is part of Parity Ethereum.
|
2016-12-27 16:43:28 +01:00
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is free software: you can redistribute it and/or modify
|
2016-12-27 16:43:28 +01:00
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is distributed in the hope that it will be useful,
|
2016-12-27 16:43:28 +01:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2019-01-07 11:33:07 +01:00
|
|
|
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2016-12-27 16:43:28 +01:00
|
|
|
|
|
|
|
//! On-demand chain requests over LES. This is a major building block for RPCs.
|
|
|
|
//! The request service is implemented using Futures. Higher level request handlers
|
|
|
|
//! will take the raw data received here and extract meaningful results from it.
|
|
|
|
|
2018-02-17 21:54:39 +01:00
|
|
|
use std::cmp;
|
2018-12-05 10:36:53 +01:00
|
|
|
use std::collections::HashMap;
|
2017-04-06 20:01:09 +02:00
|
|
|
use std::marker::PhantomData;
|
2017-02-16 20:46:59 +01:00
|
|
|
use std::sync::Arc;
|
2018-12-05 10:36:53 +01:00
|
|
|
use std::time::Duration;
|
2016-12-27 16:43:28 +01:00
|
|
|
|
2018-09-12 11:47:01 +02:00
|
|
|
use futures::{Poll, Future, Async};
|
|
|
|
use futures::sync::oneshot::{self, Receiver};
|
2016-12-27 16:43:28 +01:00
|
|
|
use network::PeerId;
|
2017-09-02 20:09:13 +02:00
|
|
|
use parking_lot::{RwLock, Mutex};
|
2018-02-08 21:36:46 +01:00
|
|
|
use rand;
|
2018-12-05 10:36:53 +01:00
|
|
|
use rand::Rng;
|
2016-12-27 16:43:28 +01:00
|
|
|
|
2017-06-30 10:58:48 +02:00
|
|
|
use net::{
|
2018-12-05 10:36:53 +01:00
|
|
|
Handler, PeerStatus, Status, Capabilities,
|
2017-06-30 12:10:12 +02:00
|
|
|
Announcement, EventContext, BasicContext, ReqId,
|
2017-06-30 10:58:48 +02:00
|
|
|
};
|
2018-12-05 10:36:53 +01:00
|
|
|
|
2017-02-16 20:46:59 +01:00
|
|
|
use cache::Cache;
|
2017-04-06 17:22:05 +02:00
|
|
|
use request::{self as basic_request, Request as NetworkRequest};
|
2017-04-07 19:35:39 +02:00
|
|
|
use self::request::CheckedRequest;
|
2017-01-03 16:18:46 +01:00
|
|
|
|
2019-03-06 15:30:35 +01:00
|
|
|
pub use ethcore::executed::ExecutionResult;
|
2018-12-05 10:36:53 +01:00
|
|
|
pub use self::request::{Request, Response, HeaderRef, Error as ValidityError};
|
|
|
|
pub use self::request_guard::{RequestGuard, Error as RequestError};
|
|
|
|
pub use self::response_guard::{ResponseGuard, Error as ResponseGuardError, Inner as ResponseGuardInner};
|
|
|
|
pub use types::request::ResponseError;
|
2017-04-07 19:35:39 +02:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests;
|
2016-12-27 16:43:28 +01:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
pub mod request;
|
2018-12-05 10:36:53 +01:00
|
|
|
mod request_guard;
|
|
|
|
mod response_guard;
|
2017-04-06 17:22:05 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
/// The initial backoff interval for OnDemand queries
|
|
|
|
pub const DEFAULT_REQUEST_MIN_BACKOFF_DURATION: Duration = Duration::from_secs(10);
|
|
|
|
/// The maximum request interval for OnDemand queries
|
|
|
|
pub const DEFAULT_REQUEST_MAX_BACKOFF_DURATION: Duration = Duration::from_secs(100);
|
|
|
|
/// The default window length a response is evaluated
|
2018-12-05 12:17:26 +01:00
|
|
|
pub const DEFAULT_RESPONSE_TIME_TO_LIVE: Duration = Duration::from_secs(10);
|
2018-12-05 10:36:53 +01:00
|
|
|
/// The default number of maximum backoff iterations
|
|
|
|
pub const DEFAULT_MAX_REQUEST_BACKOFF_ROUNDS: usize = 10;
|
|
|
|
/// The default number failed request to be regarded as failure
|
|
|
|
pub const DEFAULT_NUM_CONSECUTIVE_FAILED_REQUESTS: usize = 1;
|
2018-09-12 11:47:01 +02:00
|
|
|
|
|
|
|
/// OnDemand related errors
|
|
|
|
pub mod error {
|
|
|
|
use futures::sync::oneshot::Canceled;
|
|
|
|
|
2019-06-17 08:44:59 +02:00
|
|
|
/// OnDemand Error
|
|
|
|
#[derive(Debug, derive_more::Display, derive_more::From)]
|
|
|
|
pub enum Error {
|
|
|
|
/// Canceled oneshot channel
|
|
|
|
ChannelCanceled(Canceled),
|
|
|
|
/// Timeout bad response
|
|
|
|
BadResponse(String),
|
|
|
|
/// OnDemand requests limit exceeded
|
|
|
|
#[display(fmt = "OnDemand request maximum backoff iterations exceeded")]
|
|
|
|
RequestLimit,
|
|
|
|
}
|
2018-09-12 11:47:01 +02:00
|
|
|
|
2019-06-17 08:44:59 +02:00
|
|
|
impl std::error::Error for Error {
|
2019-07-09 10:04:20 +02:00
|
|
|
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
2019-06-17 08:44:59 +02:00
|
|
|
match self {
|
|
|
|
Error::ChannelCanceled(err) => Some(err),
|
|
|
|
_ => None,
|
2018-09-12 11:47:01 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-06-17 08:44:59 +02:00
|
|
|
|
|
|
|
/// OnDemand Result
|
|
|
|
pub type Result<T> = std::result::Result<T, Error>;
|
2018-09-12 11:47:01 +02:00
|
|
|
}
|
|
|
|
|
2019-03-27 14:46:20 +01:00
|
|
|
/// Public interface for performing network requests `OnDemand`
|
|
|
|
pub trait OnDemandRequester: Send + Sync {
|
|
|
|
/// Submit a strongly-typed batch of requests.
|
|
|
|
///
|
|
|
|
/// Fails if back-reference are not coherent.
|
2019-07-09 10:04:20 +02:00
|
|
|
fn request<T>(&self, ctx: &dyn BasicContext, requests: T) -> Result<OnResponses<T>, basic_request::NoSuchOutput>
|
2019-03-27 14:46:20 +01:00
|
|
|
where
|
|
|
|
T: request::RequestAdapter;
|
|
|
|
|
|
|
|
/// Submit a vector of requests to be processed together.
|
|
|
|
///
|
|
|
|
/// Fails if back-references are not coherent.
|
|
|
|
/// The returned vector of responses will correspond to the requests exactly.
|
2019-07-09 10:04:20 +02:00
|
|
|
fn request_raw(&self, ctx: &dyn BasicContext, requests: Vec<Request>)
|
2019-03-27 14:46:20 +01:00
|
|
|
-> Result<Receiver<PendingResponse>, basic_request::NoSuchOutput>;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-12-27 16:43:28 +01:00
|
|
|
// relevant peer info.
|
2018-09-12 11:47:01 +02:00
|
|
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
2016-12-27 16:43:28 +01:00
|
|
|
struct Peer {
|
|
|
|
status: Status,
|
|
|
|
capabilities: Capabilities,
|
|
|
|
}
|
|
|
|
|
2017-03-16 20:23:59 +01:00
|
|
|
impl Peer {
|
2017-07-27 13:50:12 +02:00
|
|
|
// whether this peer can fulfill the necessary capabilities for the given
|
|
|
|
// request.
|
|
|
|
fn can_fulfill(&self, request: &Capabilities) -> bool {
|
|
|
|
let local_caps = &self.capabilities;
|
|
|
|
let can_serve_since = |req, local| {
|
|
|
|
match (req, local) {
|
|
|
|
(Some(request_block), Some(serve_since)) => request_block >= serve_since,
|
|
|
|
(Some(_), None) => false,
|
|
|
|
(None, _) => true,
|
|
|
|
}
|
|
|
|
};
|
2017-04-06 15:34:48 +02:00
|
|
|
|
2017-07-27 13:50:12 +02:00
|
|
|
local_caps.serve_headers >= request.serve_headers &&
|
2018-09-06 15:44:40 +02:00
|
|
|
can_serve_since(request.serve_chain_since, local_caps.serve_chain_since) &&
|
|
|
|
can_serve_since(request.serve_state_since, local_caps.serve_state_since)
|
2017-03-16 20:23:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:47:01 +02:00
|
|
|
/// Either an array of responses or a single error.
|
|
|
|
type PendingResponse = self::error::Result<Vec<Response>>;
|
|
|
|
|
2017-01-03 16:18:46 +01:00
|
|
|
// Attempted request info and sender to put received value.
|
2017-04-06 15:34:48 +02:00
|
|
|
struct Pending {
|
2017-09-24 19:18:17 +02:00
|
|
|
requests: basic_request::Batch<CheckedRequest>,
|
|
|
|
net_requests: basic_request::Batch<NetworkRequest>,
|
2017-04-06 15:34:48 +02:00
|
|
|
required_capabilities: Capabilities,
|
|
|
|
responses: Vec<Response>,
|
2018-09-12 11:47:01 +02:00
|
|
|
sender: oneshot::Sender<PendingResponse>,
|
2018-12-05 10:36:53 +01:00
|
|
|
request_guard: RequestGuard,
|
|
|
|
response_guard: ResponseGuard,
|
2016-12-27 16:43:28 +01:00
|
|
|
}
|
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
impl Pending {
|
|
|
|
// answer as many of the given requests from the supplied cache as possible.
|
|
|
|
// TODO: support re-shuffling.
|
|
|
|
fn answer_from_cache(&mut self, cache: &Mutex<Cache>) {
|
|
|
|
while !self.requests.is_complete() {
|
|
|
|
let idx = self.requests.num_answered();
|
|
|
|
match self.requests[idx].respond_local(cache) {
|
|
|
|
Some(response) => {
|
|
|
|
self.requests.supply_response_unchecked(&response);
|
2018-01-17 09:45:36 +01:00
|
|
|
|
|
|
|
// update header and back-references after each from-cache
|
|
|
|
// response to ensure that the requests are left in a consistent
|
|
|
|
// state and increase the likelihood of being able to answer
|
|
|
|
// the next request from cache.
|
2017-05-23 12:39:25 +02:00
|
|
|
self.update_header_refs(idx, &response);
|
2018-01-17 09:45:36 +01:00
|
|
|
self.fill_unanswered();
|
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
self.responses.push(response);
|
|
|
|
}
|
|
|
|
None => break,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// update header refs if the given response contains a header future requests require for
|
|
|
|
// verification.
|
|
|
|
// `idx` is the index of the request the response corresponds to.
|
|
|
|
fn update_header_refs(&mut self, idx: usize, response: &Response) {
|
2018-09-06 15:44:40 +02:00
|
|
|
if let Response::HeaderByHash(ref hdr) = *response {
|
2017-05-23 12:39:25 +02:00
|
|
|
// fill the header for all requests waiting on this one.
|
|
|
|
// TODO: could be faster if we stored a map usize => Vec<usize>
|
|
|
|
// but typical use just has one header request that others
|
|
|
|
// depend on.
|
2018-09-06 15:44:40 +02:00
|
|
|
for r in self.requests.iter_mut().skip(idx + 1) {
|
|
|
|
if r.needs_header().map_or(false, |(i, _)| i == idx) {
|
|
|
|
r.provide_header(hdr.clone())
|
2017-05-23 12:39:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// supply a response.
|
|
|
|
fn supply_response(&mut self, cache: &Mutex<Cache>, response: &basic_request::Response)
|
|
|
|
-> Result<(), basic_request::ResponseError<self::request::Error>>
|
|
|
|
{
|
|
|
|
match self.requests.supply_response(&cache, response) {
|
|
|
|
Ok(response) => {
|
|
|
|
let idx = self.responses.len();
|
|
|
|
self.update_header_refs(idx, &response);
|
|
|
|
self.responses.push(response);
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
Err(e) => Err(e),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// if the requests are complete, send the result and consume self.
|
|
|
|
fn try_complete(self) -> Option<Self> {
|
|
|
|
if self.requests.is_complete() {
|
2018-09-12 11:47:01 +02:00
|
|
|
if self.sender.send(Ok(self.responses)).is_err() {
|
2018-12-05 10:36:53 +01:00
|
|
|
debug!(target: "on_demand", "Dropped oneshot channel receiver on request");
|
2018-09-12 11:47:01 +02:00
|
|
|
}
|
2017-05-23 12:39:25 +02:00
|
|
|
None
|
|
|
|
} else {
|
|
|
|
Some(self)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn fill_unanswered(&mut self) {
|
|
|
|
self.requests.fill_unanswered();
|
|
|
|
}
|
|
|
|
|
|
|
|
// update the cached network requests.
|
|
|
|
fn update_net_requests(&mut self) {
|
|
|
|
use request::IncompleteRequest;
|
|
|
|
|
2017-09-24 19:18:17 +02:00
|
|
|
let mut builder = basic_request::Builder::default();
|
2017-05-23 12:39:25 +02:00
|
|
|
let num_answered = self.requests.num_answered();
|
|
|
|
let mut mapping = move |idx| idx - num_answered;
|
|
|
|
|
|
|
|
for request in self.requests.iter().skip(num_answered) {
|
|
|
|
let mut net_req = request.clone().into_net_request();
|
|
|
|
|
|
|
|
// all back-references with request index less than `num_answered` have
|
|
|
|
// been filled by now. all remaining requests point to nothing earlier
|
|
|
|
// than the next unanswered request.
|
|
|
|
net_req.adjust_refs(&mut mapping);
|
|
|
|
builder.push(net_req)
|
|
|
|
.expect("all back-references to answered requests have been filled; qed");
|
|
|
|
}
|
|
|
|
|
|
|
|
// update pending fields.
|
|
|
|
let capabilities = guess_capabilities(&self.requests[num_answered..]);
|
|
|
|
self.net_requests = builder.build();
|
|
|
|
self.required_capabilities = capabilities;
|
|
|
|
}
|
2018-09-12 11:47:01 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
// received too many empty responses, may be away to indicate a faulty request
|
|
|
|
fn bad_response(self, response_err: ResponseGuardError) {
|
|
|
|
let reqs: Vec<&str> = self.requests.requests().iter().map(|req| {
|
|
|
|
match req {
|
|
|
|
CheckedRequest::HeaderProof(_, _) => "HeaderProof",
|
|
|
|
CheckedRequest::HeaderByHash(_, _) => "HeaderByHash",
|
|
|
|
CheckedRequest::HeaderWithAncestors(_, _) => "HeaderWithAncestors",
|
|
|
|
CheckedRequest::TransactionIndex(_, _) => "TransactionIndex",
|
|
|
|
CheckedRequest::Receipts(_, _) => "Receipts",
|
|
|
|
CheckedRequest::Body(_, _) => "Body",
|
|
|
|
CheckedRequest::Account(_, _) => "Account",
|
|
|
|
CheckedRequest::Code(_, _) => "Code",
|
|
|
|
CheckedRequest::Execution(_, _) => "Execution",
|
|
|
|
CheckedRequest::Signal(_, _) => "Signal",
|
|
|
|
}
|
|
|
|
}).collect();
|
|
|
|
|
|
|
|
let err = format!("Bad response on {}: [ {} ]. {}",
|
|
|
|
if reqs.len() > 1 { "requests" } else { "request" },
|
|
|
|
reqs.join(", "),
|
|
|
|
response_err
|
|
|
|
);
|
|
|
|
|
2019-06-17 08:44:59 +02:00
|
|
|
let err = self::error::Error::BadResponse(err);
|
2018-09-12 11:47:01 +02:00
|
|
|
if self.sender.send(Err(err.into())).is_err() {
|
|
|
|
debug!(target: "on_demand", "Dropped oneshot channel receiver on no response");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// returning a peer discovery timeout during query attempts
|
2018-12-05 10:36:53 +01:00
|
|
|
fn request_limit_reached(self) {
|
2019-06-17 08:44:59 +02:00
|
|
|
let err = self::error::Error::RequestLimit;
|
2018-09-12 11:47:01 +02:00
|
|
|
if self.sender.send(Err(err.into())).is_err() {
|
|
|
|
debug!(target: "on_demand", "Dropped oneshot channel receiver on time out");
|
|
|
|
}
|
|
|
|
}
|
2017-05-23 12:39:25 +02:00
|
|
|
}
|
|
|
|
|
2017-04-06 15:34:48 +02:00
|
|
|
// helper to guess capabilities required for a given batch of network requests.
|
|
|
|
fn guess_capabilities(requests: &[CheckedRequest]) -> Capabilities {
|
|
|
|
let mut caps = Capabilities {
|
|
|
|
serve_headers: false,
|
|
|
|
serve_chain_since: None,
|
|
|
|
serve_state_since: None,
|
|
|
|
tx_relay: false,
|
|
|
|
};
|
|
|
|
|
|
|
|
let update_since = |current: &mut Option<u64>, new|
|
|
|
|
*current = match *current {
|
|
|
|
Some(x) => Some(::std::cmp::min(x, new)),
|
|
|
|
None => Some(new),
|
|
|
|
};
|
|
|
|
|
|
|
|
for request in requests {
|
|
|
|
match *request {
|
|
|
|
// TODO: might be worth returning a required block number for this also.
|
|
|
|
CheckedRequest::HeaderProof(_, _) =>
|
|
|
|
caps.serve_headers = true,
|
|
|
|
CheckedRequest::HeaderByHash(_, _) =>
|
|
|
|
caps.serve_headers = true,
|
2018-08-25 23:06:01 +02:00
|
|
|
CheckedRequest::HeaderWithAncestors(_, _) =>
|
|
|
|
caps.serve_headers = true,
|
2017-10-08 18:19:27 +02:00
|
|
|
CheckedRequest::TransactionIndex(_, _) => {} // hashes yield no info.
|
2017-09-05 17:54:05 +02:00
|
|
|
CheckedRequest::Signal(_, _) =>
|
|
|
|
caps.serve_headers = true,
|
2017-05-23 12:39:25 +02:00
|
|
|
CheckedRequest::Body(ref req, _) => if let Ok(ref hdr) = req.0.as_ref() {
|
|
|
|
update_since(&mut caps.serve_chain_since, hdr.number());
|
|
|
|
},
|
|
|
|
CheckedRequest::Receipts(ref req, _) => if let Ok(ref hdr) = req.0.as_ref() {
|
|
|
|
update_since(&mut caps.serve_chain_since, hdr.number());
|
|
|
|
},
|
|
|
|
CheckedRequest::Account(ref req, _) => if let Ok(ref hdr) = req.header.as_ref() {
|
|
|
|
update_since(&mut caps.serve_state_since, hdr.number());
|
|
|
|
},
|
|
|
|
CheckedRequest::Code(ref req, _) => if let Ok(ref hdr) = req.header.as_ref() {
|
|
|
|
update_since(&mut caps.serve_state_since, hdr.number());
|
|
|
|
},
|
|
|
|
CheckedRequest::Execution(ref req, _) => if let Ok(ref hdr) = req.header.as_ref() {
|
|
|
|
update_since(&mut caps.serve_state_since, hdr.number());
|
|
|
|
},
|
2017-03-16 20:23:59 +01:00
|
|
|
}
|
|
|
|
}
|
2017-04-06 15:34:48 +02:00
|
|
|
|
|
|
|
caps
|
2017-03-16 20:23:59 +01:00
|
|
|
}
|
|
|
|
|
2017-04-06 20:01:09 +02:00
|
|
|
/// A future extracting the concrete output type of the generic adapter
|
|
|
|
/// from a vector of responses.
|
|
|
|
pub struct OnResponses<T: request::RequestAdapter> {
|
2018-09-12 11:47:01 +02:00
|
|
|
receiver: Receiver<PendingResponse>,
|
2017-04-06 20:01:09 +02:00
|
|
|
_marker: PhantomData<T>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T: request::RequestAdapter> Future for OnResponses<T> {
|
|
|
|
type Item = T::Out;
|
2018-09-12 11:47:01 +02:00
|
|
|
type Error = self::error::Error;
|
2017-04-06 20:01:09 +02:00
|
|
|
|
|
|
|
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
2018-09-12 11:47:01 +02:00
|
|
|
match self.receiver.poll() {
|
|
|
|
Ok(Async::Ready(Ok(v))) => Ok(Async::Ready(T::extract_from(v))),
|
|
|
|
Ok(Async::Ready(Err(e))) => Err(e),
|
|
|
|
Ok(Async::NotReady) => Ok(Async::NotReady),
|
|
|
|
Err(e) => Err(e.into()),
|
|
|
|
}
|
2017-04-06 20:01:09 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-27 16:43:28 +01:00
|
|
|
/// On demand request service. See module docs for more details.
|
|
|
|
/// Accumulates info about all peers' capabilities and dispatches
|
|
|
|
/// requests to them accordingly.
|
2017-04-06 15:34:48 +02:00
|
|
|
// lock in declaration order.
|
2016-12-27 16:43:28 +01:00
|
|
|
pub struct OnDemand {
|
2017-04-06 15:34:48 +02:00
|
|
|
pending: RwLock<Vec<Pending>>,
|
2016-12-27 16:43:28 +01:00
|
|
|
peers: RwLock<HashMap<PeerId, Peer>>,
|
2017-04-06 15:34:48 +02:00
|
|
|
in_transit: RwLock<HashMap<ReqId, Pending>>,
|
2017-02-16 20:46:59 +01:00
|
|
|
cache: Arc<Mutex<Cache>>,
|
2017-04-07 19:35:39 +02:00
|
|
|
no_immediate_dispatch: bool,
|
2018-12-05 10:36:53 +01:00
|
|
|
response_time_window: Duration,
|
|
|
|
request_backoff_start: Duration,
|
|
|
|
request_backoff_max: Duration,
|
|
|
|
request_backoff_rounds_max: usize,
|
|
|
|
request_number_of_consecutive_errors: usize
|
2016-12-27 16:43:28 +01:00
|
|
|
}
|
|
|
|
|
2019-03-27 14:46:20 +01:00
|
|
|
impl OnDemandRequester for OnDemand {
|
2019-07-09 10:04:20 +02:00
|
|
|
fn request_raw(&self, ctx: &dyn BasicContext, requests: Vec<Request>)
|
2018-09-12 11:47:01 +02:00
|
|
|
-> Result<Receiver<PendingResponse>, basic_request::NoSuchOutput>
|
2017-04-06 15:34:48 +02:00
|
|
|
{
|
2016-12-27 16:43:28 +01:00
|
|
|
let (sender, receiver) = oneshot::channel();
|
2017-04-06 17:22:05 +02:00
|
|
|
if requests.is_empty() {
|
2018-09-12 11:47:01 +02:00
|
|
|
assert!(sender.send(Ok(Vec::new())).is_ok(), "receiver still in scope; qed");
|
2017-04-06 17:22:05 +02:00
|
|
|
return Ok(receiver);
|
|
|
|
}
|
|
|
|
|
2017-09-24 19:18:17 +02:00
|
|
|
let mut builder = basic_request::Builder::default();
|
2017-02-07 15:29:38 +01:00
|
|
|
|
2017-04-06 15:34:48 +02:00
|
|
|
let responses = Vec::with_capacity(requests.len());
|
2017-05-23 12:39:25 +02:00
|
|
|
|
|
|
|
let mut header_producers = HashMap::new();
|
|
|
|
for (i, request) in requests.into_iter().enumerate() {
|
|
|
|
let request = CheckedRequest::from(request);
|
|
|
|
|
|
|
|
// ensure that all requests needing headers will get them.
|
|
|
|
if let Some((idx, field)) = request.needs_header() {
|
|
|
|
// a request chain with a header back-reference is valid only if it both
|
|
|
|
// points to a request that returns a header and has the same back-reference
|
|
|
|
// for the block hash.
|
|
|
|
match header_producers.get(&idx) {
|
|
|
|
Some(ref f) if &field == *f => {}
|
|
|
|
_ => return Err(basic_request::NoSuchOutput),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if let CheckedRequest::HeaderByHash(ref req, _) = request {
|
2018-09-06 15:44:40 +02:00
|
|
|
header_producers.insert(i, req.0);
|
2017-05-23 12:39:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
builder.push(request)?;
|
2017-02-07 15:29:38 +01:00
|
|
|
}
|
|
|
|
|
2017-04-06 15:34:48 +02:00
|
|
|
let requests = builder.build();
|
|
|
|
let net_requests = requests.clone().map_requests(|req| req.into_net_request());
|
|
|
|
let capabilities = guess_capabilities(requests.requests());
|
2017-02-25 20:10:38 +01:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
self.submit_pending(ctx, Pending {
|
2018-09-06 15:44:40 +02:00
|
|
|
requests,
|
|
|
|
net_requests,
|
2017-04-06 15:34:48 +02:00
|
|
|
required_capabilities: capabilities,
|
2018-09-06 15:44:40 +02:00
|
|
|
responses,
|
|
|
|
sender,
|
2018-12-05 10:36:53 +01:00
|
|
|
request_guard: RequestGuard::new(
|
|
|
|
self.request_number_of_consecutive_errors as u32,
|
|
|
|
self.request_backoff_rounds_max,
|
|
|
|
self.request_backoff_start,
|
|
|
|
self.request_backoff_max,
|
|
|
|
),
|
|
|
|
response_guard: ResponseGuard::new(self.response_time_window),
|
2017-04-06 15:34:48 +02:00
|
|
|
});
|
2017-02-25 20:10:38 +01:00
|
|
|
|
2017-04-06 15:34:48 +02:00
|
|
|
Ok(receiver)
|
2017-02-25 20:10:38 +01:00
|
|
|
}
|
|
|
|
|
2019-07-09 10:04:20 +02:00
|
|
|
fn request<T>(&self, ctx: &dyn BasicContext, requests: T) -> Result<OnResponses<T>, basic_request::NoSuchOutput>
|
2017-04-06 20:01:09 +02:00
|
|
|
where T: request::RequestAdapter
|
|
|
|
{
|
|
|
|
self.request_raw(ctx, requests.make_requests()).map(|recv| OnResponses {
|
|
|
|
receiver: recv,
|
|
|
|
_marker: PhantomData,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-03-27 14:46:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl OnDemand {
|
|
|
|
|
|
|
|
/// Create a new `OnDemand` service with the given cache.
|
|
|
|
pub fn new(
|
|
|
|
cache: Arc<Mutex<Cache>>,
|
|
|
|
response_time_window: Duration,
|
|
|
|
request_backoff_start: Duration,
|
|
|
|
request_backoff_max: Duration,
|
|
|
|
request_backoff_rounds_max: usize,
|
|
|
|
request_number_of_consecutive_errors: usize,
|
|
|
|
) -> Self {
|
|
|
|
|
|
|
|
Self {
|
|
|
|
pending: RwLock::new(Vec::new()),
|
|
|
|
peers: RwLock::new(HashMap::new()),
|
|
|
|
in_transit: RwLock::new(HashMap::new()),
|
|
|
|
cache,
|
|
|
|
no_immediate_dispatch: false,
|
|
|
|
response_time_window: Self::sanitize_circuit_breaker_input(response_time_window, "Response time window"),
|
|
|
|
request_backoff_start: Self::sanitize_circuit_breaker_input(request_backoff_start, "Request initial backoff time window"),
|
|
|
|
request_backoff_max: Self::sanitize_circuit_breaker_input(request_backoff_max, "Request maximum backoff time window"),
|
|
|
|
request_backoff_rounds_max,
|
|
|
|
request_number_of_consecutive_errors,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn sanitize_circuit_breaker_input(dur: Duration, name: &'static str) -> Duration {
|
|
|
|
if dur.as_secs() < 1 {
|
|
|
|
warn!(target: "on_demand",
|
|
|
|
"{} is too short must be at least 1 second, configuring it to 1 second", name);
|
|
|
|
Duration::from_secs(1)
|
|
|
|
} else {
|
|
|
|
dur
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// make a test version: this doesn't dispatch pending requests
|
|
|
|
// until you trigger it manually.
|
|
|
|
#[cfg(test)]
|
|
|
|
fn new_test(
|
|
|
|
cache: Arc<Mutex<Cache>>,
|
|
|
|
request_ttl: Duration,
|
|
|
|
request_backoff_start: Duration,
|
|
|
|
request_backoff_max: Duration,
|
|
|
|
request_backoff_rounds_max: usize,
|
|
|
|
request_number_of_consecutive_errors: usize,
|
|
|
|
) -> Self {
|
|
|
|
let mut me = OnDemand::new(
|
|
|
|
cache,
|
|
|
|
request_ttl,
|
|
|
|
request_backoff_start,
|
|
|
|
request_backoff_max,
|
|
|
|
request_backoff_rounds_max,
|
|
|
|
request_number_of_consecutive_errors,
|
|
|
|
);
|
|
|
|
me.no_immediate_dispatch = true;
|
|
|
|
|
|
|
|
me
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
// maybe dispatch pending requests.
|
|
|
|
// sometimes
|
2019-07-09 10:04:20 +02:00
|
|
|
fn attempt_dispatch(&self, ctx: &dyn BasicContext) {
|
2017-04-07 19:35:39 +02:00
|
|
|
if !self.no_immediate_dispatch {
|
|
|
|
self.dispatch_pending(ctx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 15:34:48 +02:00
|
|
|
// dispatch pending requests, and discard those for which the corresponding
|
2017-02-07 16:49:14 +01:00
|
|
|
// receiver has been dropped.
|
2019-07-09 10:04:20 +02:00
|
|
|
fn dispatch_pending(&self, ctx: &dyn BasicContext) {
|
2018-12-05 10:36:53 +01:00
|
|
|
if self.pending.read().is_empty() {
|
|
|
|
return
|
|
|
|
}
|
2017-04-06 15:34:48 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
let mut pending = self.pending.write();
|
2017-07-27 13:50:12 +02:00
|
|
|
|
2017-04-06 15:34:48 +02:00
|
|
|
// iterate over all pending requests, and check them for hang-up.
|
|
|
|
// then, try and find a peer who can serve it.
|
|
|
|
let peers = self.peers.read();
|
2018-12-05 10:36:53 +01:00
|
|
|
|
|
|
|
*pending = ::std::mem::replace(&mut *pending, Vec::new())
|
|
|
|
.into_iter()
|
2018-01-18 18:19:04 +01:00
|
|
|
.filter(|pending| !pending.sender.is_canceled())
|
2018-09-12 11:47:01 +02:00
|
|
|
.filter_map(|mut pending| {
|
2018-12-05 10:36:53 +01:00
|
|
|
|
2018-02-08 21:36:46 +01:00
|
|
|
let num_peers = peers.len();
|
2018-12-05 10:36:53 +01:00
|
|
|
// The first peer to dispatch the request is chosen at random
|
|
|
|
let rand = rand::thread_rng().gen_range(0, cmp::max(1, num_peers));
|
|
|
|
|
|
|
|
for (peer_id, peer) in peers
|
|
|
|
.iter()
|
|
|
|
.cycle()
|
|
|
|
.skip(rand)
|
|
|
|
.take(num_peers)
|
|
|
|
{
|
|
|
|
|
|
|
|
if !peer.can_fulfill(&pending.required_capabilities) {
|
|
|
|
trace!(target: "on_demand", "Peer {} without required capabilities, skipping", peer_id);
|
|
|
|
continue
|
2017-04-06 15:34:48 +02:00
|
|
|
}
|
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
if pending.request_guard.is_call_permitted() {
|
|
|
|
if let Ok(req_id) = ctx.request_from(*peer_id, pending.net_requests.clone()) {
|
|
|
|
self.in_transit.write().insert(req_id, pending);
|
|
|
|
return None;
|
2017-04-06 15:34:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-07-27 13:50:12 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
// Register that the request round failed
|
|
|
|
if let RequestError::ReachedLimit = pending.request_guard.register_error() {
|
|
|
|
pending.request_limit_reached();
|
2018-09-12 11:47:01 +02:00
|
|
|
None
|
|
|
|
} else {
|
|
|
|
Some(pending)
|
|
|
|
}
|
2018-12-05 10:36:53 +01:00
|
|
|
})
|
|
|
|
.collect(); // `pending` now contains all requests we couldn't dispatch
|
2017-07-27 13:50:12 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
trace!(target: "on_demand", "Was unable to dispatch {} requests.", pending.len());
|
2017-02-07 15:29:38 +01:00
|
|
|
}
|
2017-05-23 12:39:25 +02:00
|
|
|
|
|
|
|
// submit a pending request set. attempts to answer from cache before
|
|
|
|
// going to the network. if complete, sends response and consumes the struct.
|
2019-07-09 10:04:20 +02:00
|
|
|
fn submit_pending(&self, ctx: &dyn BasicContext, mut pending: Pending) {
|
2017-05-23 12:39:25 +02:00
|
|
|
// answer as many requests from cache as we can, and schedule for dispatch
|
|
|
|
// if incomplete.
|
2017-07-27 13:50:12 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
pending.answer_from_cache(&*self.cache);
|
|
|
|
if let Some(mut pending) = pending.try_complete() {
|
2018-12-05 10:36:53 +01:00
|
|
|
// update cached requests
|
2017-05-23 12:39:25 +02:00
|
|
|
pending.update_net_requests();
|
2018-12-05 10:36:53 +01:00
|
|
|
// push into `pending` buffer
|
2017-05-23 12:39:25 +02:00
|
|
|
self.pending.write().push(pending);
|
2018-12-05 10:36:53 +01:00
|
|
|
// try to dispatch
|
2017-05-23 12:39:25 +02:00
|
|
|
self.attempt_dispatch(ctx);
|
|
|
|
}
|
|
|
|
}
|
2016-12-27 16:43:28 +01:00
|
|
|
}
|
2017-01-03 16:18:46 +01:00
|
|
|
|
|
|
|
impl Handler for OnDemand {
|
2017-06-30 10:58:48 +02:00
|
|
|
fn on_connect(
|
|
|
|
&self,
|
2019-07-09 10:04:20 +02:00
|
|
|
ctx: &dyn EventContext,
|
2017-06-30 10:58:48 +02:00
|
|
|
status: &Status,
|
|
|
|
capabilities: &Capabilities
|
|
|
|
) -> PeerStatus {
|
|
|
|
self.peers.write().insert(
|
|
|
|
ctx.peer(),
|
2018-08-20 12:53:47 +02:00
|
|
|
Peer { status: status.clone(), capabilities: *capabilities }
|
2017-06-30 10:58:48 +02:00
|
|
|
);
|
2017-04-07 19:35:39 +02:00
|
|
|
self.attempt_dispatch(ctx.as_basic());
|
2017-06-30 10:58:48 +02:00
|
|
|
PeerStatus::Kept
|
2017-01-03 16:18:46 +01:00
|
|
|
}
|
|
|
|
|
2019-07-09 10:04:20 +02:00
|
|
|
fn on_disconnect(&self, ctx: &dyn EventContext, unfulfilled: &[ReqId]) {
|
2017-01-03 16:18:46 +01:00
|
|
|
self.peers.write().remove(&ctx.peer());
|
2017-01-04 13:58:26 +01:00
|
|
|
let ctx = ctx.as_basic();
|
2017-01-03 16:18:46 +01:00
|
|
|
|
2017-02-07 16:49:14 +01:00
|
|
|
{
|
2017-04-06 15:34:48 +02:00
|
|
|
let mut pending = self.pending.write();
|
2017-02-07 16:49:14 +01:00
|
|
|
for unfulfilled in unfulfilled {
|
2017-04-06 15:34:48 +02:00
|
|
|
if let Some(unfulfilled) = self.in_transit.write().remove(unfulfilled) {
|
2017-02-07 16:49:14 +01:00
|
|
|
trace!(target: "on_demand", "Attempting to reassign dropped request");
|
2017-04-06 15:34:48 +02:00
|
|
|
pending.push(unfulfilled);
|
2017-01-04 13:58:26 +01:00
|
|
|
}
|
2017-01-03 16:18:46 +01:00
|
|
|
}
|
|
|
|
}
|
2017-02-07 16:49:14 +01:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
self.attempt_dispatch(ctx);
|
2017-01-03 16:18:46 +01:00
|
|
|
}
|
|
|
|
|
2019-07-09 10:04:20 +02:00
|
|
|
fn on_announcement(&self, ctx: &dyn EventContext, announcement: &Announcement) {
|
2017-03-23 02:55:25 +01:00
|
|
|
{
|
|
|
|
let mut peers = self.peers.write();
|
|
|
|
if let Some(ref mut peer) = peers.get_mut(&ctx.peer()) {
|
|
|
|
peer.status.update_from(&announcement);
|
|
|
|
peer.capabilities.update_from(&announcement);
|
|
|
|
}
|
2017-01-03 16:18:46 +01:00
|
|
|
}
|
2017-02-07 16:49:14 +01:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
self.attempt_dispatch(ctx.as_basic());
|
2017-01-03 16:18:46 +01:00
|
|
|
}
|
|
|
|
|
2019-07-09 10:04:20 +02:00
|
|
|
fn on_responses(&self, ctx: &dyn EventContext, req_id: ReqId, responses: &[basic_request::Response]) {
|
2017-04-06 15:34:48 +02:00
|
|
|
let mut pending = match self.in_transit.write().remove(&req_id) {
|
2017-01-03 16:18:46 +01:00
|
|
|
Some(req) => req,
|
|
|
|
None => return,
|
|
|
|
};
|
|
|
|
|
2018-09-12 11:47:01 +02:00
|
|
|
if responses.is_empty() {
|
2018-12-05 10:36:53 +01:00
|
|
|
// Max number of `bad` responses reached, drop the request
|
|
|
|
if let Err(e) = pending.response_guard.register_error(&ResponseError::Validity(ValidityError::Empty)) {
|
|
|
|
pending.bad_response(e);
|
2018-09-12 11:47:01 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 15:34:48 +02:00
|
|
|
// for each incoming response
|
2017-05-23 12:39:25 +02:00
|
|
|
// 1. ensure verification data filled.
|
2017-04-06 15:34:48 +02:00
|
|
|
// 2. pending.requests.supply_response
|
2017-05-12 17:25:02 +02:00
|
|
|
// 3. if extracted on-demand response, keep it for later.
|
2017-04-06 15:34:48 +02:00
|
|
|
for response in responses {
|
2017-05-23 12:39:25 +02:00
|
|
|
if let Err(e) = pending.supply_response(&*self.cache, response) {
|
|
|
|
let peer = ctx.peer();
|
|
|
|
debug!(target: "on_demand", "Peer {} gave bad response: {:?}", peer, e);
|
|
|
|
ctx.disable_peer(peer);
|
2017-04-06 15:34:48 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
// Max number of `bad` responses reached, drop the request
|
|
|
|
if let Err(err) = pending.response_guard.register_error(&e) {
|
|
|
|
pending.bad_response(err);
|
|
|
|
return;
|
|
|
|
}
|
2017-02-25 20:10:38 +01:00
|
|
|
}
|
|
|
|
}
|
2017-03-16 20:23:59 +01:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
pending.fill_unanswered();
|
|
|
|
self.submit_pending(ctx.as_basic(), pending);
|
2017-02-25 20:10:38 +01:00
|
|
|
}
|
|
|
|
|
2019-07-09 10:04:20 +02:00
|
|
|
fn tick(&self, ctx: &dyn BasicContext) {
|
2017-04-07 19:35:39 +02:00
|
|
|
self.attempt_dispatch(ctx)
|
2017-01-04 14:54:50 +01:00
|
|
|
}
|
|
|
|
}
|