openethereum/rpc/src/v1/impls/light/eth.rs

601 lines
20 KiB
Rust
Raw Normal View History

// Copyright 2015-2019 Parity Technologies (UK) Ltd.
// This file is part of Parity Ethereum.
2017-02-03 16:20:43 +01:00
// Parity Ethereum is free software: you can redistribute it and/or modify
2017-02-03 16:20:43 +01:00
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Ethereum is distributed in the hope that it will be useful,
2017-02-03 16:20:43 +01:00
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
2017-02-03 16:20:43 +01:00
//! Eth RPC interface for the light client.
use std::collections::BTreeSet;
2017-02-03 16:20:43 +01:00
use std::sync::Arc;
2017-11-14 11:38:17 +01:00
use jsonrpc_core::{Result, BoxFuture};
use jsonrpc_core::futures::{future, Future};
use jsonrpc_core::futures::future::Either;
2017-02-03 16:20:43 +01:00
2017-02-26 15:05:33 +01:00
use light::cache::Cache as LightDataCache;
use light::client::LightChainClient;
2017-02-09 19:17:37 +01:00
use light::{cht, TransactionQueue};
2017-02-07 23:25:17 +01:00
use light::on_demand::{request, OnDemand};
2017-02-03 16:20:43 +01:00
use ethereum_types::{Address, H64, H160, H256, U64, U256};
use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP};
use parking_lot::{RwLock, Mutex};
use rlp::Rlp;
use types::transaction::SignedTransaction;
use types::encoded;
use types::filter::Filter as EthcoreFilter;
use types::ids::BlockId;
2017-02-03 16:20:43 +01:00
2017-03-28 15:42:23 +02:00
use v1::impls::eth_filter::Filterable;
use v1::helpers::{errors, limit_logs};
use v1::helpers::{SyncPollFilter, PollManager};
use v1::helpers::deprecated::{self, DeprecationNotice};
use v1::helpers::light_fetch::{self, LightFetch};
2017-02-03 16:20:43 +01:00
use v1::traits::Eth;
use v1::types::{
RichBlock, Block, BlockTransactions, BlockNumber, LightBlockNumber, Bytes, SyncStatus as RpcSyncStatus,
SyncInfo as RpcSyncInfo, Transaction, CallRequest, Index, Filter, Log, Receipt, Work, EthAccount
2017-02-03 16:20:43 +01:00
};
use v1::metadata::Metadata;
use sync::{LightSyncInfo, LightSyncProvider, LightNetworkDispatcher, ManageNetwork};
const NO_INVALID_BACK_REFS: &str = "Fails only on invalid back-references; back-references here known to be valid; qed";
2017-03-28 15:42:23 +02:00
/// Light client `ETH` (and filter) RPC.
pub struct EthClient<C, S: LightSyncProvider + LightNetworkDispatcher + 'static> {
sync: Arc<S>,
client: Arc<C>,
2017-02-03 16:20:43 +01:00
on_demand: Arc<OnDemand>,
2017-02-09 19:17:37 +01:00
transaction_queue: Arc<RwLock<TransactionQueue>>,
accounts: Arc<Fn() -> Vec<Address> + Send + Sync>,
2017-02-26 15:05:33 +01:00
cache: Arc<Mutex<LightDataCache>>,
polls: Mutex<PollManager<SyncPollFilter>>,
poll_lifetime: u32,
gas_price_percentile: usize,
deprecation_notice: DeprecationNotice,
2017-03-28 15:42:23 +02:00
}
impl<C, S> Clone for EthClient<C, S>
where
S: LightSyncProvider + LightNetworkDispatcher + 'static
{
2017-03-28 15:42:23 +02:00
fn clone(&self) -> Self {
// each instance should have its own poll manager.
EthClient {
sync: self.sync.clone(),
client: self.client.clone(),
on_demand: self.on_demand.clone(),
transaction_queue: self.transaction_queue.clone(),
accounts: self.accounts.clone(),
cache: self.cache.clone(),
polls: Mutex::new(PollManager::new(self.poll_lifetime)),
poll_lifetime: self.poll_lifetime,
gas_price_percentile: self.gas_price_percentile,
deprecation_notice: Default::default(),
2017-03-28 15:42:23 +02:00
}
}
2017-02-03 16:20:43 +01:00
}
impl<C, S> EthClient<C, S>
where
C: LightChainClient + 'static,
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static
{
2017-02-03 16:20:43 +01:00
/// Create a new `EthClient` with a handle to the light sync instance, client,
/// and on-demand request service, which is assumed to be attached as a handler.
pub fn new(
sync: Arc<S>,
client: Arc<C>,
2017-02-03 16:20:43 +01:00
on_demand: Arc<OnDemand>,
2017-02-09 19:17:37 +01:00
transaction_queue: Arc<RwLock<TransactionQueue>>,
accounts: Arc<Fn() -> Vec<Address> + Send + Sync>,
2017-02-26 15:05:33 +01:00
cache: Arc<Mutex<LightDataCache>>,
gas_price_percentile: usize,
poll_lifetime: u32
2017-02-03 16:20:43 +01:00
) -> Self {
EthClient {
sync,
client,
on_demand,
transaction_queue,
accounts,
cache,
polls: Mutex::new(PollManager::new(poll_lifetime)),
poll_lifetime,
gas_price_percentile,
deprecation_notice: Default::default(),
2017-02-03 16:20:43 +01:00
}
}
/// Create a light data fetcher instance.
fn fetcher(&self) -> LightFetch<S>
{
LightFetch {
client: self.client.clone(),
on_demand: self.on_demand.clone(),
sync: self.sync.clone(),
cache: self.cache.clone(),
gas_price_percentile: self.gas_price_percentile,
2017-02-03 16:20:43 +01:00
}
}
2017-04-06 17:44:31 +02:00
// get a "rich" block structure. Fails on unknown block.
2017-11-14 11:38:17 +01:00
fn rich_block(&self, id: BlockId, include_txs: bool) -> BoxFuture<RichBlock> {
2017-03-28 17:15:36 +02:00
let (on_demand, sync) = (self.on_demand.clone(), self.sync.clone());
let (client, engine) = (self.client.clone(), self.client.engine().clone());
// helper for filling out a rich block once we've got a block and a score.
let fill_rich = move |block: encoded::Block, score: Option<U256>| {
let header = block.decode_header();
let extra_info = engine.extra_info(&header);
RichBlock {
inner: Block {
hash: Some(header.hash()),
2017-03-28 17:15:36 +02:00
size: Some(block.rlp().as_raw().len().into()),
parent_hash: *header.parent_hash(),
uncles_hash: *header.uncles_hash(),
author: *header.author(),
miner: *header.author(),
state_root: *header.state_root(),
transactions_root: *header.transactions_root(),
receipts_root: *header.receipts_root(),
2017-03-28 17:15:36 +02:00
number: Some(header.number().into()),
gas_used: *header.gas_used(),
gas_limit: *header.gas_limit(),
logs_bloom: Some(*header.log_bloom()),
2017-03-28 17:15:36 +02:00
timestamp: header.timestamp().into(),
difficulty: *header.difficulty(),
2017-03-28 17:15:36 +02:00
total_difficulty: score.map(Into::into),
seal_fields: header.seal().iter().cloned().map(Into::into).collect(),
2017-03-28 17:15:36 +02:00
uncles: block.uncle_hashes().into_iter().map(Into::into).collect(),
transactions: match include_txs {
true => BlockTransactions::Full(block.view().localized_transactions().into_iter().map(Transaction::from_localized).collect()),
_ => BlockTransactions::Hashes(block.transaction_hashes().into_iter().map(Into::into).collect()),
2017-03-28 17:15:36 +02:00
},
extra_data: Bytes::new(header.extra_data().clone()),
2017-03-28 17:15:36 +02:00
},
extra_info,
2017-03-28 17:15:36 +02:00
}
};
// get the block itself.
Box::new(self.fetcher().block(id).and_then(move |block| {
2017-04-06 17:44:31 +02:00
// then fetch the total difficulty (this is much easier after getting the block).
match client.score(id) {
Some(score) => Either::A(future::ok(fill_rich(block, Some(score)))),
2017-04-06 17:44:31 +02:00
None => {
// make a CHT request to fetch the chain score.
let req = cht::block_to_cht_number(block.number())
.and_then(|num| client.cht_root(num as usize))
.and_then(|root| request::HeaderProof::new(block.number(), root));
let req = match req {
Some(req) => req,
None => {
// somehow the genesis block slipped past other checks.
// return it now.
let score = client.block_header(BlockId::Number(0))
.expect("genesis always stored; qed")
.difficulty();
return Either::A(future::ok(fill_rich(block, Some(score))))
2017-03-28 17:15:36 +02:00
}
2017-04-06 17:44:31 +02:00
};
// three possible outcomes:
// - network is down.
// - we get a score, but our hash is non-canonical.
2017-05-12 17:16:38 +02:00
// - we get a score, and our hash is canonical.
let maybe_fut = sync.with_context(move |ctx| on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS));
2017-04-06 17:44:31 +02:00
match maybe_fut {
Some(fut) => Either::B(fut
.map(move |(hash, score)| {
2017-04-06 17:44:31 +02:00
let score = if hash == block.hash() {
Some(score)
} else {
None
};
fill_rich(block, score)
Allow dropping light client RPC query with no results (#9318) * OnDemand no longer loop until there is a query. All peer known at the time will be queried, and the query fail if all return no reply. Returning the failure is done through an empty Vec of reply (the type of the oneshot channel remains unchanged). Before this commit the query were send randomly to any peer until there is a reply (for a query that got no result it was an issue, for other queries it was quering multiple times the same peers). After this commit the first query is random but next queries follows hashmap iterator order. Test no_capability was broken by this commit (the pending query was removed). * OnDemand no longer loop until there is a query. All peer known at the time will be queried, and the query fail if all return no reply. Returning the failure is done through an empty Vec of reply (the type of the oneshot channel remains unchanged). Before this commit the query were send randomly to any peer until there is a reply (for a query that got no result it was an issue, for other queries it was quering multiple times the same peers). After this commit the first query is random but next queries follows hashmap iterator order. Test no_capability was broken by this commit (the pending query was removed). If adding some kind of timeout mechanism it could be restored. * Comment plus better field names. * No panick on dropped oneshot channel. * Use Set to avoid counter heuristic * Cli option `on_demand_nb_retry` for maximum number of retry when doing on demand query in light client. * Missing test update for previous commit * Add a timeout (only when there is no peer to query), that way we do not set number of query to minimum current number peer or configured number of query : that way capability test was restored. * Adding an error type for on_demand, it helps having variant of error reported at rpc level : choice of rpc error code error might not be right. * Duration as constant is nice * Switch to duration in main too * Fix indentation (sorry for that). * Fix error management (bad merge in previous commit) * Lots of english corrections, major change on the new command parameters : - use standard '-' instead of '_' - renaming nb_retry params to 'on-demand-retry-count'
2018-09-12 11:47:01 +02:00
}).map_err(errors::on_demand_error)),
None => Either::A(future::err(errors::network_disabled())),
2017-03-28 17:15:36 +02:00
}
}
}
}))
2017-03-28 17:15:36 +02:00
}
2017-02-03 16:20:43 +01:00
}
impl<C, S> Eth for EthClient<C, S>
where
C: LightChainClient + 'static,
S: LightSyncInfo + LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static
{
2017-02-03 16:20:43 +01:00
type Metadata = Metadata;
2017-11-14 11:38:17 +01:00
fn protocol_version(&self) -> Result<String> {
2017-02-03 16:20:43 +01:00
Ok(format!("{}", ::light::net::MAX_PROTOCOL_VERSION))
}
fn syncing(&self) -> Result<RpcSyncStatus> {
2017-03-23 03:23:53 +01:00
if self.sync.is_major_importing() {
let chain_info = self.client.chain_info();
let current_block = U256::from(chain_info.best_block_number);
let highest_block = self.sync.highest_block().map(U256::from)
.unwrap_or_else(|| current_block);
2017-03-23 03:23:53 +01:00
Ok(RpcSyncStatus::Info(RpcSyncInfo {
starting_block: U256::from(self.sync.start_block()),
current_block,
highest_block,
2017-03-23 03:23:53 +01:00
warp_chunks_amount: None,
warp_chunks_processed: None,
}))
} else {
Ok(RpcSyncStatus::None)
2017-03-23 03:23:53 +01:00
}
2017-02-03 16:20:43 +01:00
}
fn author(&self) -> Result<H160> {
(self.accounts)()
.first()
.cloned()
.map(From::from)
.ok_or_else(|| errors::account("No accounts were found", ""))
2017-02-03 16:20:43 +01:00
}
2017-11-14 11:38:17 +01:00
fn is_mining(&self) -> Result<bool> {
2017-02-03 16:20:43 +01:00
Ok(false)
}
fn chain_id(&self) -> Result<Option<U64>> {
Ok(self.client.signing_chain_id().map(U64::from))
}
fn hashrate(&self) -> Result<U256> {
2017-02-03 16:20:43 +01:00
Ok(Default::default())
}
fn gas_price(&self) -> Result<U256> {
2017-03-28 17:15:36 +02:00
Ok(self.cache.lock().gas_price_corpus()
.and_then(|c| c.percentile(self.gas_price_percentile).cloned())
.map(U256::from)
2017-03-28 17:15:36 +02:00
.unwrap_or_else(Default::default))
2017-02-03 16:20:43 +01:00
}
fn accounts(&self) -> Result<Vec<H160>> {
self.deprecation_notice.print("eth_accounts", deprecated::msgs::ACCOUNTS);
Ok((self.accounts)()
.into_iter()
.map(Into::into)
.collect())
2017-02-03 16:20:43 +01:00
}
fn block_number(&self) -> Result<U256> {
2017-02-03 16:20:43 +01:00
Ok(self.client.chain_info().best_block_number.into())
}
fn balance(&self, address: H160, num: Option<BlockNumber>) -> BoxFuture<U256> {
Box::new(self.fetcher().account(address, num.unwrap_or_default().to_block_id())
.map(|acc| acc.map_or(0.into(), |a| a.balance)))
2017-02-03 17:41:25 +01:00
}
fn storage_at(&self, _address: H160, _key: U256, _num: Option<BlockNumber>) -> BoxFuture<H256> {
Box::new(future::err(errors::unimplemented(None)))
2017-02-03 17:41:25 +01:00
}
fn block_by_hash(&self, hash: H256, include_txs: bool) -> BoxFuture<Option<RichBlock>> {
Box::new(self.rich_block(BlockId::Hash(hash), include_txs).map(Some))
2017-02-03 17:41:25 +01:00
}
2017-11-14 11:38:17 +01:00
fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture<Option<RichBlock>> {
Box::new(self.rich_block(num.to_block_id(), include_txs).map(Some))
2017-02-03 17:41:25 +01:00
}
fn transaction_count(&self, address: H160, num: Option<BlockNumber>) -> BoxFuture<U256> {
Box::new(self.fetcher().account(address, num.unwrap_or_default().to_block_id())
.map(|acc| acc.map_or(0.into(), |a| a.nonce)))
2017-02-03 17:41:25 +01:00
}
fn block_transaction_count_by_hash(&self, hash: H256) -> BoxFuture<Option<U256>> {
2017-02-07 14:45:48 +01:00
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
Box::new(self.fetcher().header(BlockId::Hash(hash)).and_then(move |hdr| {
2017-08-31 11:35:41 +02:00
if hdr.transactions_root() == KECCAK_NULL_RLP {
Either::A(future::ok(Some(U256::from(0))))
2017-02-07 14:45:48 +01:00
} else {
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
.map(|x| x.expect(NO_INVALID_BACK_REFS))
.map(|x| x.map(|b| Some(U256::from(b.transactions_count()))))
Allow dropping light client RPC query with no results (#9318) * OnDemand no longer loop until there is a query. All peer known at the time will be queried, and the query fail if all return no reply. Returning the failure is done through an empty Vec of reply (the type of the oneshot channel remains unchanged). Before this commit the query were send randomly to any peer until there is a reply (for a query that got no result it was an issue, for other queries it was quering multiple times the same peers). After this commit the first query is random but next queries follows hashmap iterator order. Test no_capability was broken by this commit (the pending query was removed). * OnDemand no longer loop until there is a query. All peer known at the time will be queried, and the query fail if all return no reply. Returning the failure is done through an empty Vec of reply (the type of the oneshot channel remains unchanged). Before this commit the query were send randomly to any peer until there is a reply (for a query that got no result it was an issue, for other queries it was quering multiple times the same peers). After this commit the first query is random but next queries follows hashmap iterator order. Test no_capability was broken by this commit (the pending query was removed). If adding some kind of timeout mechanism it could be restored. * Comment plus better field names. * No panick on dropped oneshot channel. * Use Set to avoid counter heuristic * Cli option `on_demand_nb_retry` for maximum number of retry when doing on demand query in light client. * Missing test update for previous commit * Add a timeout (only when there is no peer to query), that way we do not set number of query to minimum current number peer or configured number of query : that way capability test was restored. * Adding an error type for on_demand, it helps having variant of error reported at rpc level : choice of rpc error code error might not be right. * Duration as constant is nice * Switch to duration in main too * Fix indentation (sorry for that). * Fix error management (bad merge in previous commit) * Lots of english corrections, major change on the new command parameters : - use standard '-' instead of '_' - renaming nb_retry params to 'on-demand-retry-count'
2018-09-12 11:47:01 +02:00
.map(|x| Either::B(x.map_err(errors::on_demand_error)))
.unwrap_or_else(|| Either::A(future::err(errors::network_disabled())))
2017-02-07 14:45:48 +01:00
}
}))
2017-02-03 17:41:25 +01:00
}
fn block_transaction_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<U256>> {
2017-02-07 14:45:48 +01:00
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
Box::new(self.fetcher().header(num.to_block_id()).and_then(move |hdr| {
2017-08-31 11:35:41 +02:00
if hdr.transactions_root() == KECCAK_NULL_RLP {
Either::A(future::ok(Some(U256::from(0))))
2017-02-07 14:45:48 +01:00
} else {
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
.map(|x| x.expect(NO_INVALID_BACK_REFS))
.map(|x| x.map(|b| Some(U256::from(b.transactions_count()))))
Allow dropping light client RPC query with no results (#9318) * OnDemand no longer loop until there is a query. All peer known at the time will be queried, and the query fail if all return no reply. Returning the failure is done through an empty Vec of reply (the type of the oneshot channel remains unchanged). Before this commit the query were send randomly to any peer until there is a reply (for a query that got no result it was an issue, for other queries it was quering multiple times the same peers). After this commit the first query is random but next queries follows hashmap iterator order. Test no_capability was broken by this commit (the pending query was removed). * OnDemand no longer loop until there is a query. All peer known at the time will be queried, and the query fail if all return no reply. Returning the failure is done through an empty Vec of reply (the type of the oneshot channel remains unchanged). Before this commit the query were send randomly to any peer until there is a reply (for a query that got no result it was an issue, for other queries it was quering multiple times the same peers). After this commit the first query is random but next queries follows hashmap iterator order. Test no_capability was broken by this commit (the pending query was removed). If adding some kind of timeout mechanism it could be restored. * Comment plus better field names. * No panick on dropped oneshot channel. * Use Set to avoid counter heuristic * Cli option `on_demand_nb_retry` for maximum number of retry when doing on demand query in light client. * Missing test update for previous commit * Add a timeout (only when there is no peer to query), that way we do not set number of query to minimum current number peer or configured number of query : that way capability test was restored. * Adding an error type for on_demand, it helps having variant of error reported at rpc level : choice of rpc error code error might not be right. * Duration as constant is nice * Switch to duration in main too * Fix indentation (sorry for that). * Fix error management (bad merge in previous commit) * Lots of english corrections, major change on the new command parameters : - use standard '-' instead of '_' - renaming nb_retry params to 'on-demand-retry-count'
2018-09-12 11:47:01 +02:00
.map(|x| Either::B(x.map_err(errors::on_demand_error)))
.unwrap_or_else(|| Either::A(future::err(errors::network_disabled())))
2017-02-07 14:45:48 +01:00
}
}))
2017-02-03 17:41:25 +01:00
}
fn block_uncles_count_by_hash(&self, hash: H256) -> BoxFuture<Option<U256>> {
2017-02-07 14:45:48 +01:00
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
Box::new(self.fetcher().header(BlockId::Hash(hash)).and_then(move |hdr| {
2017-08-31 11:35:41 +02:00
if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP {
Either::A(future::ok(Some(U256::from(0))))
2017-02-07 14:45:48 +01:00
} else {
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
.map(|x| x.expect(NO_INVALID_BACK_REFS))
.map(|x| x.map(|b| Some(U256::from(b.uncles_count()))))
Allow dropping light client RPC query with no results (#9318) * OnDemand no longer loop until there is a query. All peer known at the time will be queried, and the query fail if all return no reply. Returning the failure is done through an empty Vec of reply (the type of the oneshot channel remains unchanged). Before this commit the query were send randomly to any peer until there is a reply (for a query that got no result it was an issue, for other queries it was quering multiple times the same peers). After this commit the first query is random but next queries follows hashmap iterator order. Test no_capability was broken by this commit (the pending query was removed). * OnDemand no longer loop until there is a query. All peer known at the time will be queried, and the query fail if all return no reply. Returning the failure is done through an empty Vec of reply (the type of the oneshot channel remains unchanged). Before this commit the query were send randomly to any peer until there is a reply (for a query that got no result it was an issue, for other queries it was quering multiple times the same peers). After this commit the first query is random but next queries follows hashmap iterator order. Test no_capability was broken by this commit (the pending query was removed). If adding some kind of timeout mechanism it could be restored. * Comment plus better field names. * No panick on dropped oneshot channel. * Use Set to avoid counter heuristic * Cli option `on_demand_nb_retry` for maximum number of retry when doing on demand query in light client. * Missing test update for previous commit * Add a timeout (only when there is no peer to query), that way we do not set number of query to minimum current number peer or configured number of query : that way capability test was restored. * Adding an error type for on_demand, it helps having variant of error reported at rpc level : choice of rpc error code error might not be right. * Duration as constant is nice * Switch to duration in main too * Fix indentation (sorry for that). * Fix error management (bad merge in previous commit) * Lots of english corrections, major change on the new command parameters : - use standard '-' instead of '_' - renaming nb_retry params to 'on-demand-retry-count'
2018-09-12 11:47:01 +02:00
.map(|x| Either::B(x.map_err(errors::on_demand_error)))
.unwrap_or_else(|| Either::A(future::err(errors::network_disabled())))
2017-02-07 14:45:48 +01:00
}
}))
2017-02-03 17:41:25 +01:00
}
fn block_uncles_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<U256>> {
2017-02-07 14:45:48 +01:00
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
Box::new(self.fetcher().header(num.to_block_id()).and_then(move |hdr| {
2017-08-31 11:35:41 +02:00
if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP {
Either::B(future::ok(Some(U256::from(0))))
2017-02-07 14:45:48 +01:00
} else {
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
.map(|x| x.expect(NO_INVALID_BACK_REFS))
.map(|x| x.map(|b| Some(U256::from(b.uncles_count()))))
Allow dropping light client RPC query with no results (#9318) * OnDemand no longer loop until there is a query. All peer known at the time will be queried, and the query fail if all return no reply. Returning the failure is done through an empty Vec of reply (the type of the oneshot channel remains unchanged). Before this commit the query were send randomly to any peer until there is a reply (for a query that got no result it was an issue, for other queries it was quering multiple times the same peers). After this commit the first query is random but next queries follows hashmap iterator order. Test no_capability was broken by this commit (the pending query was removed). * OnDemand no longer loop until there is a query. All peer known at the time will be queried, and the query fail if all return no reply. Returning the failure is done through an empty Vec of reply (the type of the oneshot channel remains unchanged). Before this commit the query were send randomly to any peer until there is a reply (for a query that got no result it was an issue, for other queries it was quering multiple times the same peers). After this commit the first query is random but next queries follows hashmap iterator order. Test no_capability was broken by this commit (the pending query was removed). If adding some kind of timeout mechanism it could be restored. * Comment plus better field names. * No panick on dropped oneshot channel. * Use Set to avoid counter heuristic * Cli option `on_demand_nb_retry` for maximum number of retry when doing on demand query in light client. * Missing test update for previous commit * Add a timeout (only when there is no peer to query), that way we do not set number of query to minimum current number peer or configured number of query : that way capability test was restored. * Adding an error type for on_demand, it helps having variant of error reported at rpc level : choice of rpc error code error might not be right. * Duration as constant is nice * Switch to duration in main too * Fix indentation (sorry for that). * Fix error management (bad merge in previous commit) * Lots of english corrections, major change on the new command parameters : - use standard '-' instead of '_' - renaming nb_retry params to 'on-demand-retry-count'
2018-09-12 11:47:01 +02:00
.map(|x| Either::A(x.map_err(errors::on_demand_error)))
.unwrap_or_else(|| Either::B(future::err(errors::network_disabled())))
2017-02-07 14:45:48 +01:00
}
}))
2017-02-03 17:41:25 +01:00
}
fn code_at(&self, address: H160, num: Option<BlockNumber>) -> BoxFuture<Bytes> {
Box::new(self.fetcher().code(address, num.unwrap_or_default().to_block_id()).map(Into::into))
2017-02-03 17:41:25 +01:00
}
fn send_raw_transaction(&self, raw: Bytes) -> Result<H256> {
let best_header = self.client.best_block_header().decode().map_err(errors::decode)?;
2017-02-09 19:58:29 +01:00
Rlp::new(&raw.into_vec()).as_val()
.map_err(errors::rlp)
2017-02-09 19:58:29 +01:00
.and_then(|tx| {
self.client.engine().verify_transaction_basic(&tx, &best_header)
.map_err(errors::transaction)?;
2017-02-09 19:58:29 +01:00
let signed = SignedTransaction::new(tx).map_err(errors::transaction)?;
2017-02-09 19:17:37 +01:00
let hash = signed.hash();
2017-02-09 19:58:29 +01:00
2017-02-09 19:17:37 +01:00
self.transaction_queue.write().import(signed.into())
.map(|_| hash)
.map_err(errors::transaction)
2017-02-09 19:17:37 +01:00
})
.map(Into::into)
2017-02-03 17:41:25 +01:00
}
fn submit_transaction(&self, raw: Bytes) -> Result<H256> {
2017-02-09 19:17:37 +01:00
self.send_raw_transaction(raw)
2017-02-03 17:41:25 +01:00
}
fn call(&self, req: CallRequest, num: Option<BlockNumber>) -> BoxFuture<Bytes> {
Box::new(self.fetcher().proved_read_only_execution(req, num).and_then(|res| {
2017-02-26 13:48:56 +01:00
match res {
2017-02-26 15:05:33 +01:00
Ok(exec) => Ok(exec.output.into()),
2017-02-26 13:48:56 +01:00
Err(e) => Err(errors::execution(e)),
}
}))
2017-02-03 17:41:25 +01:00
}
fn estimate_gas(&self, req: CallRequest, num: Option<BlockNumber>) -> BoxFuture<U256> {
2017-02-26 13:48:56 +01:00
// TODO: binary chop for more accurate estimates.
Box::new(self.fetcher().proved_read_only_execution(req, num).and_then(|res| {
2017-02-26 13:48:56 +01:00
match res {
Ok(exec) => Ok(exec.refunded + exec.gas_used),
2017-02-26 13:48:56 +01:00
Err(e) => Err(errors::execution(e)),
}
}))
2017-02-03 17:41:25 +01:00
}
fn transaction_by_hash(&self, hash: H256) -> BoxFuture<Option<Transaction>> {
2017-10-24 07:09:48 +02:00
{
let tx_queue = self.transaction_queue.read();
if let Some(tx) = tx_queue.get(&hash) {
return Box::new(future::ok(Some(Transaction::from_pending(
tx.clone(),
))));
}
}
Box::new(self.fetcher().transaction_by_hash(hash).map(|x| x.map(|(tx, _)| tx)))
2017-02-03 17:41:25 +01:00
}
fn transaction_by_block_hash_and_index(&self, hash: H256, idx: Index) -> BoxFuture<Option<Transaction>> {
Box::new(self.fetcher().block(BlockId::Hash(hash)).map(move |block| {
light_fetch::extract_transaction_at_index(block, idx.value())
}))
2017-02-03 17:41:25 +01:00
}
2017-11-14 11:38:17 +01:00
fn transaction_by_block_number_and_index(&self, num: BlockNumber, idx: Index) -> BoxFuture<Option<Transaction>> {
Box::new(self.fetcher().block(num.to_block_id()).map(move |block| {
light_fetch::extract_transaction_at_index(block, idx.value())
}))
2017-02-03 17:41:25 +01:00
}
fn transaction_receipt(&self, hash: H256) -> BoxFuture<Option<Receipt>> {
let fetcher = self.fetcher();
Box::new(fetcher.transaction_by_hash(hash).and_then(move |tx| {
// the block hash included in the transaction object here has
// already been checked for canonicality and whether it contains
// the transaction.
match tx {
Some((tx, index)) => match tx.block_hash {
Some(block_hash) => {
let extract_receipt = fetcher.receipts(BlockId::Hash(block_hash))
.and_then(move |mut receipts| future::ok(receipts.swap_remove(index)))
.map(Receipt::from)
.map(move |mut receipt| {
receipt.transaction_hash = Some(hash);
receipt.transaction_index = Some(index.into());
receipt.block_hash = Some(block_hash);
receipt.block_number = tx.block_number;
receipt
})
.map(Some);
Either::B(extract_receipt)
}
None => Either::A(future::err(errors::unknown_block())),
},
None => Either::A(future::ok(None)),
}
}))
2017-02-03 17:41:25 +01:00
}
fn uncle_by_block_hash_and_index(&self, hash: H256, idx: Index) -> BoxFuture<Option<RichBlock>> {
let client = self.client.clone();
Box::new(self.fetcher().block(BlockId::Hash(hash)).map(move |block| {
extract_uncle_at_index(block, idx, client)
}))
2017-02-03 17:41:25 +01:00
}
2017-11-14 11:38:17 +01:00
fn uncle_by_block_number_and_index(&self, num: BlockNumber, idx: Index) -> BoxFuture<Option<RichBlock>> {
let client = self.client.clone();
Box::new(self.fetcher().block(num.to_block_id()).map(move |block| {
extract_uncle_at_index(block, idx, client)
}))
2017-02-03 17:41:25 +01:00
}
fn proof(&self, _address: H160, _values:Vec<H256>, _num: Option<BlockNumber>) -> BoxFuture<EthAccount> {
EIP-1186: add `eth_getProof` RPC-Method (#9001) * added eth_getAccount * changed to getProof * implemented storage_proof * better formatting of storage proof * fixed imports;2C * removed spaces * fixed whitespace * fixed docker * added doc * fixed Compile-error * expose more ports * added eth_getAccount * changed to getProof * implemented storage_proof * better formatting of storage proof * fixed docker * removed slockit-changes * fixed Dockerfile * intend * spaces * removed spaces * fixed whitespace * fixed docker * tabs * fixed Compile-error * added eth_getAccount * changed to getProof * implemented storage_proof * fixed docker * removed slockit-changes * fixed Dockerfile * intend * spaces * removed spaces * fixed whitespace * fixed docker * tabs * merged changes * fixed warnings * added eth_getAccount * changed to getProof * implemented storage_proof * better formatting of storage proof * Update Dockerfile * fixed docker * removed slockit-changes * fixed Dockerfile * intend * spaces * removed spaces * fixed whitespace * fixed docker * tabs * added eth_getAccount * changed to getProof * implemented storage_proof * removed spaces * fixed whitespace * fixed docker * added eth_getAccount * changed to getProof * implemented storage_proof * better formatting of storage proof * fixed docker * removed slockit-changes * fixed Dockerfile * intend * spaces * removed spaces * fixed whitespace * fixed docker * tabs * merged changes * fixed merge error * fixed formatting * fixed rename_all = "camelCase" * fixed tabs * fixed spaces * removed port exposer * formatting * fixed comment * use filter_map * formatting * use better variable names * changed casting * fixed tabs * remote into() from address * remove space Co-Authored-By: simon-jentzsch <simon@slock.it> * fixed storage_index Co-Authored-By: simon-jentzsch <simon@slock.it> * fixed clone * fixed format Co-Authored-By: simon-jentzsch <simon@slock.it> * fixed empty lines * removed Option from EthAccount * fixed storage_index * implemented test and fixed the struct-spaces * fixed tests * added experimental RPCs flag for getProof * optmized code
2018-11-21 20:09:33 +01:00
Box::new(future::err(errors::unimplemented(None)))
}
2017-11-14 11:38:17 +01:00
fn compilers(&self) -> Result<Vec<String>> {
2017-02-26 13:48:56 +01:00
Err(errors::deprecated("Compilation functionality is deprecated.".to_string()))
2017-02-03 17:41:25 +01:00
}
2017-11-14 11:38:17 +01:00
fn compile_lll(&self, _: String) -> Result<Bytes> {
2017-02-26 13:48:56 +01:00
Err(errors::deprecated("Compilation of LLL via RPC is deprecated".to_string()))
2017-02-03 17:41:25 +01:00
}
2017-11-14 11:38:17 +01:00
fn compile_serpent(&self, _: String) -> Result<Bytes> {
2017-02-26 13:48:56 +01:00
Err(errors::deprecated("Compilation of Serpent via RPC is deprecated".to_string()))
2017-02-03 17:41:25 +01:00
}
2017-11-14 11:38:17 +01:00
fn compile_solidity(&self, _: String) -> Result<Bytes> {
2017-02-26 13:48:56 +01:00
Err(errors::deprecated("Compilation of Solidity via RPC is deprecated".to_string()))
2017-02-03 17:41:25 +01:00
}
2017-11-14 11:38:17 +01:00
fn logs(&self, filter: Filter) -> BoxFuture<Vec<Log>> {
2017-03-28 15:42:23 +02:00
let limit = filter.limit;
Box::new(
Filterable::logs(self, match filter.try_into() {
Ok(value) => value,
Err(err) => return Box::new(future::err(err)),
}).map(move |logs| limit_logs(logs, limit)))
2017-02-03 17:41:25 +01:00
}
fn work(&self, _timeout: Option<u64>) -> Result<Work> {
2017-03-28 15:42:23 +02:00
Err(errors::light_unimplemented(None))
2017-02-03 17:41:25 +01:00
}
fn submit_work(&self, _nonce: H64, _pow_hash: H256, _mix_hash: H256) -> Result<bool> {
2017-03-28 15:42:23 +02:00
Err(errors::light_unimplemented(None))
2017-02-03 17:41:25 +01:00
}
fn submit_hashrate(&self, _rate: U256, _id: H256) -> Result<bool> {
2017-03-28 15:42:23 +02:00
Err(errors::light_unimplemented(None))
}
}
// This trait implementation triggers a blanked impl of `EthFilter`.
impl<C, S> Filterable for EthClient<C, S>
where
C: LightChainClient + 'static,
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static
{
2017-03-28 15:42:23 +02:00
fn best_block_number(&self) -> u64 { self.client.chain_info().best_block_number }
fn block_hash(&self, id: BlockId) -> Option<H256> {
self.client.block_hash(id)
2017-03-28 15:42:23 +02:00
}
fn pending_transaction_hashes(&self) -> BTreeSet<H256> {
BTreeSet::new()
2017-03-28 15:42:23 +02:00
}
2017-11-14 11:38:17 +01:00
fn logs(&self, filter: EthcoreFilter) -> BoxFuture<Vec<Log>> {
Box::new(self.fetcher().logs(filter)) as BoxFuture<_>
2017-03-28 15:42:23 +02:00
}
fn pending_logs(&self, _block_number: u64, _filter: &EthcoreFilter) -> Vec<Log> {
Vec::new() // light clients don't mine.
}
fn polls(&self) -> &Mutex<PollManager<SyncPollFilter>> {
2017-03-28 15:42:23 +02:00
&self.polls
2017-02-03 16:20:43 +01:00
}
fn removed_logs(&self, _block_hash: ::ethereum_types::H256, _filter: &EthcoreFilter) -> (Vec<Log>, u64) {
(Default::default(), 0)
}
2017-02-03 16:20:43 +01:00
}
fn extract_uncle_at_index<T: LightChainClient>(block: encoded::Block, index: Index, client: Arc<T>) -> Option<RichBlock> {
let uncle = match block.uncles().into_iter().nth(index.value()) {
Some(u) => u,
None => return None,
};
let extra_info = client.engine().extra_info(&uncle);
Some(RichBlock {
inner: Block {
hash: Some(uncle.hash()),
size: None,
parent_hash: *uncle.parent_hash(),
uncles_hash: *uncle.uncles_hash(),
author: *uncle.author(),
miner: *uncle.author(),
state_root: *uncle.state_root(),
transactions_root: *uncle.transactions_root(),
number: Some(uncle.number().into()),
gas_used: *uncle.gas_used(),
gas_limit: *uncle.gas_limit(),
logs_bloom: Some(*uncle.log_bloom()),
timestamp: uncle.timestamp().into(),
difficulty: *uncle.difficulty(),
total_difficulty: None,
receipts_root: *uncle.receipts_root(),
extra_data: uncle.extra_data().clone().into(),
seal_fields: uncle.seal().iter().cloned().map(Into::into).collect(),
uncles: vec![],
transactions: BlockTransactions::Hashes(vec![]),
},
extra_info,
})
}