Allow dropping light client RPC query with no results (#9318)

* OnDemand no longer loop until there is a query.
All peer known at the time will be queried, and the query fail if all
return no reply.
Returning the failure is done through an empty Vec of reply (the type
of the oneshot channel remains unchanged).
Before this commit the query were send randomly to any peer until there
is a reply (for a query that got no result it was an issue, for other
queries it was quering multiple times the same peers).
After this commit the first query is random but next queries
follows hashmap iterator order.

Test no_capability was broken by this commit (the pending query was
removed).

* OnDemand no longer loop until there is a query.
All peer known at the time will be queried, and the query fail if all
return no reply.
Returning the failure is done through an empty Vec of reply (the type
of the oneshot channel remains unchanged).
Before this commit the query were send randomly to any peer until there
is a reply (for a query that got no result it was an issue, for other
queries it was quering multiple times the same peers).
After this commit the first query is random but next queries
follows hashmap iterator order.

Test no_capability was broken by this commit (the pending query was
removed). If adding some kind of timeout mechanism it could be restored.

* Comment plus better field names.

* No panick on dropped oneshot channel.

* Use Set to avoid counter heuristic

* Cli option `on_demand_nb_retry` for maximum number of retry when doing
on demand query in light client.

* Missing test update for previous commit

* Add a timeout (only when there is no peer to query), that way we do not
set number of query to minimum current number peer or configured number
of query : that way capability test was restored.

* Adding an error type for on_demand, it helps having variant of error
reported at rpc level : choice of rpc error code error might not be
right.

* Duration as constant is nice

* Switch to duration in main too

* Fix indentation (sorry for that).

* Fix error management (bad merge in previous commit)

* Lots of english corrections, major change on the new command parameters :
 - use standard '-' instead of '_'
 - renaming nb_retry params to 'on-demand-retry-count'
This commit is contained in:
cheme 2018-09-12 11:47:01 +02:00 committed by Afri Schoedon
parent 69667317c1
commit 61f4534e2a
10 changed files with 299 additions and 65 deletions

View File

@ -19,17 +19,18 @@
//! will take the raw data received here and extract meaningful results from it.
use std::cmp;
use std::collections::HashMap;
use std::collections::{HashMap, BTreeSet};
use std::marker::PhantomData;
use std::sync::Arc;
use ethcore::executed::{Executed, ExecutionError};
use futures::{Poll, Future};
use futures::sync::oneshot::{self, Receiver, Canceled};
use futures::{Poll, Future, Async};
use futures::sync::oneshot::{self, Receiver};
use network::PeerId;
use parking_lot::{RwLock, Mutex};
use rand;
use std::time::{Duration, SystemTime};
use net::{
self, Handler, PeerStatus, Status, Capabilities,
@ -49,7 +50,45 @@ pub mod request;
/// The result of execution
pub type ExecutionResult = Result<Executed, ExecutionError>;
/// The default number of retries for OnDemand queries to send to the other nodes
pub const DEFAULT_RETRY_COUNT: usize = 10;
/// The default time limit in milliseconds for inactive (no new peer to connect to) OnDemand queries (0 for unlimited)
pub const DEFAULT_QUERY_TIME_LIMIT: Duration = Duration::from_millis(10000);
const NULL_DURATION: Duration = Duration::from_secs(0);
/// OnDemand related errors
pub mod error {
use futures::sync::oneshot::Canceled;
error_chain! {
foreign_links {
ChannelCanceled(Canceled) #[doc = "Canceled oneshot channel"];
}
errors {
#[doc = "Max number of on-demand query attempts reached without result."]
MaxAttemptReach(query_index: usize) {
description("On-demand query limit reached")
display("On-demand query limit reached on query #{}", query_index)
}
#[doc = "No reply with current peer set, time out occured while waiting for new peers for additional query attempt."]
TimeoutOnNewPeers(query_index: usize, remaining_attempts: usize) {
description("Timeout for On-demand query")
display("Timeout for On-demand query; {} query attempts remain for query #{}", remaining_attempts, query_index)
}
}
}
}
// relevant peer info.
#[derive(Debug, Clone, PartialEq, Eq)]
struct Peer {
status: Status,
capabilities: Capabilities,
@ -74,13 +113,21 @@ impl Peer {
}
}
/// Either an array of responses or a single error.
type PendingResponse = self::error::Result<Vec<Response>>;
// Attempted request info and sender to put received value.
struct Pending {
requests: basic_request::Batch<CheckedRequest>,
net_requests: basic_request::Batch<NetworkRequest>,
required_capabilities: Capabilities,
responses: Vec<Response>,
sender: oneshot::Sender<Vec<Response>>,
sender: oneshot::Sender<PendingResponse>,
base_query_index: usize,
remaining_query_count: usize,
query_id_history: BTreeSet<PeerId>,
inactive_time_limit: Option<SystemTime>,
}
impl Pending {
@ -142,7 +189,9 @@ impl Pending {
// if the requests are complete, send the result and consume self.
fn try_complete(self) -> Option<Self> {
if self.requests.is_complete() {
let _ = self.sender.send(self.responses);
if self.sender.send(Ok(self.responses)).is_err() {
debug!(target: "on_demand", "Dropped oneshot channel receiver on complete request at query #{}", self.query_id_history.len());
}
None
} else {
Some(self)
@ -177,6 +226,25 @@ impl Pending {
self.net_requests = builder.build();
self.required_capabilities = capabilities;
}
// returning no reponse, it will result in an error.
// self is consumed on purpose.
fn no_response(self) {
trace!(target: "on_demand", "Dropping a pending query (no reply) at query #{}", self.query_id_history.len());
let err = self::error::ErrorKind::MaxAttemptReach(self.requests.num_answered());
if self.sender.send(Err(err.into())).is_err() {
debug!(target: "on_demand", "Dropped oneshot channel receiver on no response");
}
}
// returning a peer discovery timeout during query attempts
fn time_out(self) {
trace!(target: "on_demand", "Dropping a pending query (no new peer time out) at query #{}", self.query_id_history.len());
let err = self::error::ErrorKind::TimeoutOnNewPeers(self.requests.num_answered(), self.query_id_history.len());
if self.sender.send(Err(err.into())).is_err() {
debug!(target: "on_demand", "Dropped oneshot channel receiver on time out");
}
}
}
// helper to guess capabilities required for a given batch of network requests.
@ -230,16 +298,21 @@ fn guess_capabilities(requests: &[CheckedRequest]) -> Capabilities {
/// A future extracting the concrete output type of the generic adapter
/// from a vector of responses.
pub struct OnResponses<T: request::RequestAdapter> {
receiver: Receiver<Vec<Response>>,
receiver: Receiver<PendingResponse>,
_marker: PhantomData<T>,
}
impl<T: request::RequestAdapter> Future for OnResponses<T> {
type Item = T::Out;
type Error = Canceled;
type Error = self::error::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.receiver.poll().map(|async| async.map(T::extract_from))
match self.receiver.poll() {
Ok(Async::Ready(Ok(v))) => Ok(Async::Ready(T::extract_from(v))),
Ok(Async::Ready(Err(e))) => Err(e),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(e) => Err(e.into()),
}
}
}
@ -253,9 +326,12 @@ pub struct OnDemand {
in_transit: RwLock<HashMap<ReqId, Pending>>,
cache: Arc<Mutex<Cache>>,
no_immediate_dispatch: bool,
base_retry_count: usize,
query_inactive_time_limit: Option<Duration>,
}
impl OnDemand {
/// Create a new `OnDemand` service with the given cache.
pub fn new(cache: Arc<Mutex<Cache>>) -> Self {
OnDemand {
@ -264,6 +340,8 @@ impl OnDemand {
in_transit: RwLock::new(HashMap::new()),
cache,
no_immediate_dispatch: false,
base_retry_count: DEFAULT_RETRY_COUNT,
query_inactive_time_limit: Some(DEFAULT_QUERY_TIME_LIMIT),
}
}
@ -282,11 +360,11 @@ impl OnDemand {
/// Fails if back-references are not coherent.
/// The returned vector of responses will correspond to the requests exactly.
pub fn request_raw(&self, ctx: &BasicContext, requests: Vec<Request>)
-> Result<Receiver<Vec<Response>>, basic_request::NoSuchOutput>
-> Result<Receiver<PendingResponse>, basic_request::NoSuchOutput>
{
let (sender, receiver) = oneshot::channel();
if requests.is_empty() {
assert!(sender.send(Vec::new()).is_ok(), "receiver still in scope; qed");
assert!(sender.send(Ok(Vec::new())).is_ok(), "receiver still in scope; qed");
return Ok(receiver);
}
@ -325,6 +403,10 @@ impl OnDemand {
required_capabilities: capabilities,
responses,
sender,
base_query_index: 0,
remaining_query_count: 0,
query_id_history: BTreeSet::new(),
inactive_time_limit: None,
});
Ok(receiver)
@ -363,30 +445,68 @@ impl OnDemand {
let peers = self.peers.read();
*pending = ::std::mem::replace(&mut *pending, Vec::new()).into_iter()
.filter(|pending| !pending.sender.is_canceled())
.filter_map(|pending| {
.filter_map(|mut pending| {
// the peer we dispatch to is chosen randomly
let num_peers = peers.len();
let rng = rand::random::<usize>() % cmp::max(num_peers, 1);
for (peer_id, peer) in peers.iter().chain(peers.iter()).skip(rng).take(num_peers) {
let history_len = pending.query_id_history.len();
let offset = if history_len == 0 {
pending.remaining_query_count = self.base_retry_count;
let rand = rand::random::<usize>();
pending.base_query_index = rand;
rand
} else {
pending.base_query_index + history_len
} % cmp::max(num_peers, 1);
let init_remaining_query_count = pending.remaining_query_count; // to fail in case of big reduction of nb of peers
for (peer_id, peer) in peers.iter().chain(peers.iter())
.skip(offset).take(num_peers) {
// TODO: see which requests can be answered by the cache?
if !peer.can_fulfill(&pending.required_capabilities) {
continue
if pending.remaining_query_count == 0 {
break
}
match ctx.request_from(*peer_id, pending.net_requests.clone()) {
Ok(req_id) => {
trace!(target: "on_demand", "Dispatched request {} to peer {}", req_id, peer_id);
self.in_transit.write().insert(req_id, pending);
return None
if pending.query_id_history.insert(peer_id.clone()) {
if !peer.can_fulfill(&pending.required_capabilities) {
trace!(target: "on_demand", "Peer {} without required capabilities, skipping, {} remaining attempts", peer_id, pending.remaining_query_count);
continue
}
pending.remaining_query_count -= 1;
pending.inactive_time_limit = None;
match ctx.request_from(*peer_id, pending.net_requests.clone()) {
Ok(req_id) => {
trace!(target: "on_demand", "Dispatched request {} to peer {}, {} remaining attempts", req_id, peer_id, pending.remaining_query_count);
self.in_transit.write().insert(req_id, pending);
return None
}
Err(net::Error::NoCredits) | Err(net::Error::NotServer) => {}
Err(e) => debug!(target: "on_demand", "Error dispatching request to peer: {}", e),
}
Err(net::Error::NoCredits) | Err(net::Error::NotServer) => {}
Err(e) => debug!(target: "on_demand", "Error dispatching request to peer: {}", e),
}
}
// TODO: maximum number of failures _when we have peers_.
Some(pending)
if pending.remaining_query_count == 0 {
pending.no_response();
None
} else if init_remaining_query_count == pending.remaining_query_count {
if let Some(query_inactive_time_limit) = self.query_inactive_time_limit {
let now = SystemTime::now();
if let Some(inactive_time_limit) = pending.inactive_time_limit {
if now > inactive_time_limit {
pending.time_out();
return None
}
} else {
debug!(target: "on_demand", "No more peers to query, waiting for {} seconds until dropping query", query_inactive_time_limit.as_secs());
pending.inactive_time_limit = Some(now + query_inactive_time_limit);
}
}
Some(pending)
} else {
Some(pending)
}
})
.collect(); // `pending` now contains all requests we couldn't dispatch.
@ -406,6 +526,21 @@ impl OnDemand {
self.attempt_dispatch(ctx);
}
}
/// Set the retry count for a query.
pub fn default_retry_number(&mut self, nb_retry: usize) {
self.base_retry_count = nb_retry;
}
/// Set the time limit for a query.
pub fn query_inactive_time_limit(&mut self, inactive_time_limit: Duration) {
self.query_inactive_time_limit = if inactive_time_limit == NULL_DURATION {
None
} else {
Some(inactive_time_limit)
};
}
}
impl Handler for OnDemand {
@ -458,6 +593,16 @@ impl Handler for OnDemand {
None => return,
};
if responses.is_empty() {
if pending.remaining_query_count == 0 {
pending.no_response();
return;
}
} else {
// do not keep query counter for others elements of this batch
pending.query_id_history.clear();
}
// for each incoming response
// 1. ensure verification data filled.
// 2. pending.requests.supply_response

@ -1 +1 @@
Subproject commit d17bfb6962041c4ac7f82eb79f72eef8d42f9447
Subproject commit 0edbf860ff7ed4b6b6336097ba44836e8c6482dd

View File

@ -407,7 +407,7 @@ usage! {
"--port=[PORT]",
"Override the port on which the node should listen.",
ARG arg_interface: (String) = "all", or |c: &Config| c.network.as_ref()?.interface.clone(),
ARG arg_interface: (String) = "all", or |c: &Config| c.network.as_ref()?.interface.clone(),
"--interface=[IP]",
"Network interfaces. Valid values are 'all', 'local' or the ip of the interface you want parity to listen to.",
@ -471,7 +471,7 @@ usage! {
"--jsonrpc-port=[PORT]",
"Specify the port portion of the HTTP JSON-RPC API server.",
ARG arg_jsonrpc_interface: (String) = "local", or |c: &Config| c.rpc.as_ref()?.interface.clone(),
ARG arg_jsonrpc_interface: (String) = "local", or |c: &Config| c.rpc.as_ref()?.interface.clone(),
"--jsonrpc-interface=[IP]",
"Specify the hostname portion of the HTTP JSON-RPC API server, IP should be an interface's IP address, or all (all interfaces) or local.",
@ -508,7 +508,7 @@ usage! {
"--ws-port=[PORT]",
"Specify the port portion of the WebSockets JSON-RPC server.",
ARG arg_ws_interface: (String) = "local", or |c: &Config| c.websockets.as_ref()?.interface.clone(),
ARG arg_ws_interface: (String) = "local", or |c: &Config| c.websockets.as_ref()?.interface.clone(),
"--ws-interface=[IP]",
"Specify the hostname portion of the WebSockets JSON-RPC server, IP should be an interface's IP address, or all (all interfaces) or local.",
@ -562,6 +562,15 @@ usage! {
"--ipfs-api-cors=[URL]",
"Specify CORS header for IPFS API responses. Special options: \"all\", \"none\".",
["Light Client Options"]
ARG arg_on_demand_retry_count: (Option<usize>) = None, or |c: &Config| c.light.as_ref()?.on_demand_retry_count,
"--on-demand-retry-count=[RETRIES]",
"Specify the query retry count.",
ARG arg_on_demand_inactive_time_limit: (Option<u64>) = None, or |c: &Config| c.light.as_ref()?.on_demand_inactive_time_limit,
"--on-demand-inactive-time-limit=[MS]",
"Specify light client query inactive time limit. O for no limit.",
["Secret Store Options"]
FLAG flag_no_secretstore: (bool) = false, or |c: &Config| c.secretstore.as_ref()?.disable.clone(),
"--no-secretstore",
@ -875,7 +884,7 @@ usage! {
"Target size of the whisper message pool in megabytes.",
["Legacy Options"]
// Options that are hidden from config, but are still unique for its functionality.
// Options that are hidden from config, but are still unique for its functionality.
FLAG flag_geth: (bool) = false, or |_| None,
"--geth",
@ -1100,6 +1109,7 @@ struct Config {
misc: Option<Misc>,
stratum: Option<Stratum>,
whisper: Option<Whisper>,
light: Option<Light>,
}
#[derive(Default, Debug, PartialEq, Deserialize)]
@ -1364,12 +1374,19 @@ struct Whisper {
pool_size: Option<usize>,
}
#[derive(Default, Debug, PartialEq, Deserialize)]
#[serde(deny_unknown_fields)]
struct Light {
on_demand_retry_count: Option<usize>,
on_demand_inactive_time_limit: Option<u64>,
}
#[cfg(test)]
mod tests {
use super::{
Args, ArgsError,
Config, Operating, Account, Ui, Network, Ws, Rpc, Ipc, Dapps, Ipfs, Mining, Footprint,
Snapshots, Misc, Whisper, SecretStore,
Snapshots, Misc, Whisper, SecretStore, Light,
};
use toml;
use clap::{ErrorKind as ClapErrorKind};
@ -1772,6 +1789,10 @@ mod tests {
arg_snapshot_at: "latest".into(),
flag_no_periodic_snapshot: false,
// -- Light options.
arg_on_demand_retry_count: Some(15),
arg_on_demand_inactive_time_limit: Some(15000),
// -- Whisper options.
flag_whisper: false,
arg_whisper_pool_size: 20,
@ -2019,6 +2040,10 @@ mod tests {
scale_verifiers: Some(false),
num_verifiers: None,
}),
light: Some(Light {
on_demand_retry_count: Some(12),
on_demand_inactive_time_limit: Some(20000),
}),
snapshots: Some(Snapshots {
disable_periodic: Some(true),
}),

View File

@ -155,6 +155,10 @@ fat_db = "auto"
scale_verifiers = true
num_verifiers = 6
[light]
on_demand_retry_count = 15
on_demand_inactive_time_limit = 15000
[snapshots]
disable_periodic = false

View File

@ -70,6 +70,10 @@ db_compaction = "ssd"
fat_db = "off"
scale_verifiers = false
[light]
on_demand_retry_count = 12
on_demand_inactive_time_limit = 20000
[snapshots]
disable_periodic = true

View File

@ -156,13 +156,13 @@ impl Configuration {
port: ws_conf.port,
authfile: authfile,
}
} else if self.args.cmd_signer_reject {
} else if self.args.cmd_signer_reject {
Cmd::SignerReject {
id: self.args.arg_signer_reject_id,
port: ws_conf.port,
authfile: authfile,
}
} else if self.args.cmd_signer_list {
} else if self.args.cmd_signer_list {
Cmd::SignerList {
port: ws_conf.port,
authfile: authfile,
@ -205,7 +205,7 @@ impl Configuration {
};
Cmd::Account(account_cmd)
} else if self.args.flag_import_geth_keys {
let account_cmd = AccountCmd::ImportFromGeth(
let account_cmd = AccountCmd::ImportFromGeth(
ImportFromGethAccounts {
spec: spec,
to: dirs.keys,
@ -383,6 +383,8 @@ impl Configuration {
no_persistent_txqueue: self.args.flag_no_persistent_txqueue,
whisper: whisper_config,
no_hardcoded_sync: self.args.flag_no_hardcoded_sync,
on_demand_retry_count: self.args.arg_on_demand_retry_count,
on_demand_inactive_time_limit: self.args.arg_on_demand_inactive_time_limit,
};
Cmd::Run(run_cmd)
};
@ -1331,10 +1333,10 @@ mod tests {
support_token_api: true,
max_connections: 100,
}, LogConfig {
color: true,
mode: None,
file: None,
} ));
color: true,
mode: None,
file: None,
} ));
}
#[test]
@ -1408,6 +1410,8 @@ mod tests {
no_hardcoded_sync: false,
no_persistent_txqueue: false,
whisper: Default::default(),
on_demand_retry_count: None,
on_demand_inactive_time_limit: None,
};
expected.secretstore_conf.enabled = cfg!(feature = "secretstore");
expected.secretstore_conf.http_enabled = cfg!(feature = "secretstore");
@ -1516,11 +1520,11 @@ mod tests {
"--jsonrpc-apis", "web3,eth"
]);
let conf2 = parse(&["parity", "--rpc",
"--rpcport", "8000",
"--rpcaddr", "all",
"--rpccorsdomain", "*",
"--rpcapi", "web3,eth"
]);
"--rpcport", "8000",
"--rpcaddr", "all",
"--rpccorsdomain", "*",
"--rpcapi", "web3,eth"
]);
// then
assert(conf1);

View File

@ -128,6 +128,8 @@ pub struct RunCmd {
pub no_persistent_txqueue: bool,
pub whisper: ::whisper::Config,
pub no_hardcoded_sync: bool,
pub on_demand_retry_count: Option<usize>,
pub on_demand_inactive_time_limit: Option<u64>,
}
// node info fetcher for the local store.
@ -206,7 +208,13 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
config.queue.verifier_settings = cmd.verifier_settings;
// start on_demand service.
let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone()));
let on_demand = Arc::new({
let mut on_demand = ::light::on_demand::OnDemand::new(cache.clone());
on_demand.default_retry_number(cmd.on_demand_retry_count.unwrap_or(::light::on_demand::DEFAULT_RETRY_COUNT));
on_demand.query_inactive_time_limit(cmd.on_demand_inactive_time_limit.map(Duration::from_millis)
.unwrap_or(::light::on_demand::DEFAULT_QUERY_TIME_LIMIT));
on_demand
});
let sync_handle = Arc::new(RwLock::new(Weak::new()));
let fetch = ::light_helpers::EpochFetch {
@ -352,7 +360,7 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq: Cr,
on_updater_rq: Rr) -> Result<RunningClient, String>
where Cr: Fn(String) + 'static + Send,
Rr: Fn() + 'static + Send
Rr: Fn() + 'static + Send
{
// load spec
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
@ -900,7 +908,7 @@ impl RunningClient {
pub fn execute<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>,
on_client_rq: Cr, on_updater_rq: Rr) -> Result<RunningClient, String>
where Cr: Fn(String) + 'static + Send,
Rr: Fn() + 'static + Send
Rr: Fn() + 'static + Send
{
if cmd.light {
execute_light_impl(cmd, logger)

View File

@ -26,6 +26,7 @@ use rlp::DecoderError;
use transaction::Error as TransactionError;
use ethcore_private_tx::Error as PrivateTransactionError;
use vm::Error as VMError;
use light::on_demand::error::{Error as OnDemandError, ErrorKind as OnDemandErrorKind};
mod codes {
// NOTE [ToDr] Codes from [-32099, -32000]
@ -444,7 +445,41 @@ pub fn filter_block_not_found(id: BlockId) -> Error {
}
}
pub fn on_demand_error(err: OnDemandError) -> Error {
match err {
OnDemandError(OnDemandErrorKind::ChannelCanceled(e), _) => on_demand_cancel(e),
OnDemandError(OnDemandErrorKind::MaxAttemptReach(_), _) => max_attempts_reached(&err),
OnDemandError(OnDemandErrorKind::TimeoutOnNewPeers(_,_), _) => timeout_new_peer(&err),
_ => on_demand_others(&err),
}
}
// on-demand sender cancelled.
pub fn on_demand_cancel(_cancel: futures::sync::oneshot::Canceled) -> Error {
internal("on-demand sender cancelled", "")
}
pub fn max_attempts_reached(err: &OnDemandError) -> Error {
Error {
code: ErrorCode::ServerError(codes::REQUEST_NOT_FOUND),
message: err.to_string(),
data: None,
}
}
pub fn timeout_new_peer(err: &OnDemandError) -> Error {
Error {
code: ErrorCode::ServerError(codes::NO_LIGHT_PEERS),
message: err.to_string(),
data: None,
}
}
pub fn on_demand_others(err: &OnDemandError) -> Error {
Error {
code: ErrorCode::ServerError(codes::UNKNOWN_ERROR),
message: err.to_string(),
data: None,
}
}

View File

@ -19,6 +19,7 @@
use std::cmp;
use std::sync::Arc;
use light::on_demand::error::Error as OnDemandError;
use ethcore::basic_account::BasicAccount;
use ethcore::encoded;
use ethcore::filter::Filter as EthcoreFilter;
@ -49,7 +50,9 @@ use transaction::{Action, Transaction as EthTransaction, SignedTransaction, Loca
use v1::helpers::{CallRequest as CallRequestHelper, errors, dispatch};
use v1::types::{BlockNumber, CallRequest, Log, Transaction};
const NO_INVALID_BACK_REFS: &str = "Fails only on invalid back-references; back-references here known to be valid; qed";
const NO_INVALID_BACK_REFS_PROOF: &str = "Fails only on invalid back-references; back-references here known to be valid; qed";
const WRONG_RESPONSE_AMOUNT_TYPE_PROOF: &str = "responses correspond directly with requests in amount and type; qed";
/// Helper for fetching blockchain data either from the light client or the network
/// as necessary.
@ -148,7 +151,7 @@ impl LightFetch {
Either::B(self.send_requests(reqs, |res|
extract_header(&res, header_ref)
.expect("these responses correspond to requests that header_ref belongs to \
therefore it will not fail; qed")
therefore it will not fail; qed")
))
}
@ -166,7 +169,7 @@ impl LightFetch {
Either::B(self.send_requests(reqs, |mut res| match res.pop() {
Some(OnDemandResponse::Code(code)) => code,
_ => panic!("responses correspond directly with requests in amount and type; qed"),
_ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF),
}))
}
@ -183,7 +186,7 @@ impl LightFetch {
Either::B(self.send_requests(reqs, |mut res|match res.pop() {
Some(OnDemandResponse::Account(acc)) => acc,
_ => panic!("responses correspond directly with requests in amount and type; qed"),
_ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF),
}))
}
@ -276,7 +279,7 @@ impl LightFetch {
Either::B(self.send_requests(reqs, |mut res| match res.pop() {
Some(OnDemandResponse::Body(b)) => b,
_ => panic!("responses correspond directly with requests in amount and type; qed"),
_ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF),
}))
}
@ -292,7 +295,7 @@ impl LightFetch {
Either::B(self.send_requests(reqs, |mut res| match res.pop() {
Some(OnDemandResponse::Receipts(b)) => b,
_ => panic!("responses correspond directly with requests in amount and type; qed"),
_ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF),
}))
}
@ -322,7 +325,7 @@ impl LightFetch {
bit_combos.iter().any(|bloom| hdr_bloom.contains_bloom(bloom))
})
.map(|hdr| (hdr.number(), hdr.hash(), request::BlockReceipts(hdr.into())))
.map(|(num, hash, req)| on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS).map(move |x| (num, hash, x)))
.map(|(num, hash, req)| on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS_PROOF).map(move |x| (num, hash, x)))
.collect();
// as the receipts come in, find logs within them which match the filter.
@ -351,10 +354,10 @@ impl LightFetch {
block_index += 1;
}
}
future::ok(matches)
future::ok::<_,OnDemandError>(matches)
}) // and then collect them into a vector.
.map(|matches| matches.into_iter().map(|(_, v)| v).collect())
.map_err(errors::on_demand_cancel)
.map_err(errors::on_demand_error)
});
match maybe_future {
@ -379,7 +382,7 @@ impl LightFetch {
});
let eventual_index = match maybe_future {
Some(e) => e.expect(NO_INVALID_BACK_REFS).map_err(errors::on_demand_cancel),
Some(e) => e.expect(NO_INVALID_BACK_REFS_PROOF).map_err(errors::on_demand_error),
None => return Either::A(future::err(errors::network_disabled())),
};
@ -429,9 +432,15 @@ impl LightFetch {
{
let maybe_future = self.sync.with_context(move |ctx| {
Box::new(self.on_demand.request_raw(ctx, reqs)
.expect(NO_INVALID_BACK_REFS)
.map(parse_response)
.map_err(errors::on_demand_cancel))
.expect(NO_INVALID_BACK_REFS_PROOF)
.map_err(errors::on_demand_cancel)
.and_then(|responses| {
match responses {
Ok(responses) => Ok(parse_response(responses)),
Err(e) => Err(errors::on_demand_error(e)),
}
})
)
});
match maybe_future {
@ -638,7 +647,7 @@ fn execute_tx(gas_known: bool, params: ExecuteParams) -> impl Future<Item = Exec
on_demand
.request(ctx, request)
.expect("no back-references; therefore all back-refs valid; qed")
.map_err(errors::on_demand_cancel)
.map_err(errors::on_demand_error)
});
match proved_future {

View File

@ -216,7 +216,7 @@ impl<T: LightChainClient + 'static> EthClient<T> {
};
fill_rich(block, score)
}).map_err(errors::on_demand_cancel)),
}).map_err(errors::on_demand_error)),
None => Either::A(future::err(errors::network_disabled())),
}
}
@ -316,7 +316,7 @@ impl<T: LightChainClient + 'static> Eth for EthClient<T> {
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
.map(|x| x.expect(NO_INVALID_BACK_REFS))
.map(|x| x.map(|b| Some(U256::from(b.transactions_count()).into())))
.map(|x| Either::B(x.map_err(errors::on_demand_cancel)))
.map(|x| Either::B(x.map_err(errors::on_demand_error)))
.unwrap_or_else(|| Either::A(future::err(errors::network_disabled())))
}
}))
@ -332,7 +332,7 @@ impl<T: LightChainClient + 'static> Eth for EthClient<T> {
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
.map(|x| x.expect(NO_INVALID_BACK_REFS))
.map(|x| x.map(|b| Some(U256::from(b.transactions_count()).into())))
.map(|x| Either::B(x.map_err(errors::on_demand_cancel)))
.map(|x| Either::B(x.map_err(errors::on_demand_error)))
.unwrap_or_else(|| Either::A(future::err(errors::network_disabled())))
}
}))
@ -348,7 +348,7 @@ impl<T: LightChainClient + 'static> Eth for EthClient<T> {
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
.map(|x| x.expect(NO_INVALID_BACK_REFS))
.map(|x| x.map(|b| Some(U256::from(b.uncles_count()).into())))
.map(|x| Either::B(x.map_err(errors::on_demand_cancel)))
.map(|x| Either::B(x.map_err(errors::on_demand_error)))
.unwrap_or_else(|| Either::A(future::err(errors::network_disabled())))
}
}))
@ -364,7 +364,7 @@ impl<T: LightChainClient + 'static> Eth for EthClient<T> {
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
.map(|x| x.expect(NO_INVALID_BACK_REFS))
.map(|x| x.map(|b| Some(U256::from(b.uncles_count()).into())))
.map(|x| Either::A(x.map_err(errors::on_demand_cancel)))
.map(|x| Either::A(x.map_err(errors::on_demand_error)))
.unwrap_or_else(|| Either::B(future::err(errors::network_disabled())))
}
}))