2017-02-03 16:20:43 +01:00
|
|
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
//! Eth RPC interface for the light client.
|
|
|
|
|
2017-02-08 19:09:52 +01:00
|
|
|
// TODO: remove when complete.
|
|
|
|
#![allow(unused_imports, unused_variables)]
|
|
|
|
|
2017-02-03 16:20:43 +01:00
|
|
|
use std::sync::Arc;
|
|
|
|
|
|
|
|
use jsonrpc_core::Error;
|
|
|
|
use jsonrpc_macros::Trailing;
|
|
|
|
|
2017-02-26 15:05:33 +01:00
|
|
|
use light::cache::Cache as LightDataCache;
|
2017-02-03 16:20:43 +01:00
|
|
|
use light::client::Client as LightClient;
|
2017-02-09 19:17:37 +01:00
|
|
|
use light::{cht, TransactionQueue};
|
2017-02-07 23:25:17 +01:00
|
|
|
use light::on_demand::{request, OnDemand};
|
2017-02-03 16:20:43 +01:00
|
|
|
|
2017-02-03 17:41:25 +01:00
|
|
|
use ethcore::account_provider::{AccountProvider, DappId};
|
2017-02-03 17:53:48 +01:00
|
|
|
use ethcore::basic_account::BasicAccount;
|
2017-02-03 16:20:43 +01:00
|
|
|
use ethcore::encoded;
|
2017-02-26 15:05:33 +01:00
|
|
|
use ethcore::executed::{Executed, ExecutionError};
|
2017-02-03 16:20:43 +01:00
|
|
|
use ethcore::ids::BlockId;
|
2017-03-28 15:42:23 +02:00
|
|
|
use ethcore::filter::Filter as EthcoreFilter;
|
2017-02-26 15:05:33 +01:00
|
|
|
use ethcore::transaction::{Action, SignedTransaction, Transaction as EthTransaction};
|
2017-02-03 16:20:43 +01:00
|
|
|
use ethsync::LightSync;
|
2017-03-22 14:41:46 +01:00
|
|
|
use rlp::UntrustedRlp;
|
2017-02-07 14:45:48 +01:00
|
|
|
use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP};
|
2017-03-11 19:58:15 +01:00
|
|
|
use util::{RwLock, Mutex, Uint, U256};
|
2017-02-03 16:20:43 +01:00
|
|
|
|
2017-02-26 13:48:56 +01:00
|
|
|
use futures::{future, Future, BoxFuture, IntoFuture};
|
2017-02-07 23:25:17 +01:00
|
|
|
use futures::sync::oneshot;
|
2017-02-03 16:20:43 +01:00
|
|
|
|
2017-03-28 15:42:23 +02:00
|
|
|
use v1::impls::eth_filter::Filterable;
|
2017-02-26 15:05:33 +01:00
|
|
|
use v1::helpers::{CallRequest as CRequest, errors, limit_logs, dispatch};
|
2017-03-28 15:42:23 +02:00
|
|
|
use v1::helpers::{PollFilter, PollManager};
|
2017-02-03 16:20:43 +01:00
|
|
|
use v1::helpers::block_import::is_major_importing;
|
|
|
|
use v1::traits::Eth;
|
|
|
|
use v1::types::{
|
|
|
|
RichBlock, Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo,
|
|
|
|
Transaction, CallRequest, Index, Filter, Log, Receipt, Work,
|
|
|
|
H64 as RpcH64, H256 as RpcH256, H160 as RpcH160, U256 as RpcU256,
|
|
|
|
};
|
|
|
|
use v1::metadata::Metadata;
|
|
|
|
|
2017-02-03 17:53:48 +01:00
|
|
|
use util::Address;
|
|
|
|
|
2017-03-28 15:42:23 +02:00
|
|
|
/// Light client `ETH` (and filter) RPC.
|
2017-02-03 16:20:43 +01:00
|
|
|
pub struct EthClient {
|
|
|
|
sync: Arc<LightSync>,
|
|
|
|
client: Arc<LightClient>,
|
|
|
|
on_demand: Arc<OnDemand>,
|
2017-02-09 19:17:37 +01:00
|
|
|
transaction_queue: Arc<RwLock<TransactionQueue>>,
|
2017-02-03 16:20:43 +01:00
|
|
|
accounts: Arc<AccountProvider>,
|
2017-02-26 15:05:33 +01:00
|
|
|
cache: Arc<Mutex<LightDataCache>>,
|
2017-03-28 15:42:23 +02:00
|
|
|
polls: Mutex<PollManager<PollFilter>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Clone for EthClient {
|
|
|
|
fn clone(&self) -> Self {
|
|
|
|
// each instance should have its own poll manager.
|
|
|
|
EthClient {
|
|
|
|
sync: self.sync.clone(),
|
|
|
|
client: self.client.clone(),
|
|
|
|
on_demand: self.on_demand.clone(),
|
|
|
|
transaction_queue: self.transaction_queue.clone(),
|
|
|
|
accounts: self.accounts.clone(),
|
|
|
|
cache: self.cache.clone(),
|
|
|
|
polls: Mutex::new(PollManager::new()),
|
|
|
|
}
|
|
|
|
}
|
2017-02-03 16:20:43 +01:00
|
|
|
}
|
|
|
|
|
2017-02-07 23:25:17 +01:00
|
|
|
// helper for internal error: on demand sender cancelled.
|
|
|
|
fn err_premature_cancel(_cancel: oneshot::Canceled) -> Error {
|
|
|
|
errors::internal("on-demand sender prematurely cancelled", "")
|
|
|
|
}
|
|
|
|
|
2017-02-26 15:05:33 +01:00
|
|
|
type ExecutionResult = Result<Executed, ExecutionError>;
|
|
|
|
|
2017-02-03 16:20:43 +01:00
|
|
|
impl EthClient {
|
|
|
|
/// Create a new `EthClient` with a handle to the light sync instance, client,
|
|
|
|
/// and on-demand request service, which is assumed to be attached as a handler.
|
|
|
|
pub fn new(
|
|
|
|
sync: Arc<LightSync>,
|
|
|
|
client: Arc<LightClient>,
|
|
|
|
on_demand: Arc<OnDemand>,
|
2017-02-09 19:17:37 +01:00
|
|
|
transaction_queue: Arc<RwLock<TransactionQueue>>,
|
2017-02-03 16:20:43 +01:00
|
|
|
accounts: Arc<AccountProvider>,
|
2017-02-26 15:05:33 +01:00
|
|
|
cache: Arc<Mutex<LightDataCache>>,
|
2017-02-03 16:20:43 +01:00
|
|
|
) -> Self {
|
|
|
|
EthClient {
|
|
|
|
sync: sync,
|
|
|
|
client: client,
|
|
|
|
on_demand: on_demand,
|
2017-02-09 19:17:37 +01:00
|
|
|
transaction_queue: transaction_queue,
|
2017-02-03 16:20:43 +01:00
|
|
|
accounts: accounts,
|
2017-02-26 15:05:33 +01:00
|
|
|
cache: cache,
|
2017-03-28 15:42:23 +02:00
|
|
|
polls: Mutex::new(PollManager::new()),
|
2017-02-03 16:20:43 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get a block header from the on demand service or client, or error.
|
2017-04-06 17:44:31 +02:00
|
|
|
fn header(&self, id: BlockId) -> BoxFuture<encoded::Header, Error> {
|
2017-02-08 20:22:41 +01:00
|
|
|
if let Some(h) = self.client.block_header(id) {
|
2017-04-06 17:44:31 +02:00
|
|
|
return future::ok(h).boxed()
|
2017-02-03 16:20:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
let maybe_future = match id {
|
|
|
|
BlockId::Number(n) => {
|
2017-02-03 17:41:25 +01:00
|
|
|
let cht_root = cht::block_to_cht_number(n).and_then(|cn| self.client.cht_root(cn as usize));
|
2017-02-03 16:20:43 +01:00
|
|
|
match cht_root {
|
2017-04-06 17:44:31 +02:00
|
|
|
None => return future::err(errors::unknown_block()).boxed(),
|
2017-02-03 16:20:43 +01:00
|
|
|
Some(root) => {
|
2017-03-17 00:14:29 +01:00
|
|
|
let req = request::HeaderProof::new(n, root)
|
2017-02-07 23:25:17 +01:00
|
|
|
.expect("only fails for 0; client always stores genesis; client already queried; qed");
|
2017-02-03 16:20:43 +01:00
|
|
|
|
2017-03-17 00:14:29 +01:00
|
|
|
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
|
|
|
|
self.sync.with_context(|ctx| {
|
|
|
|
let fut = self.on_demand.hash_by_number(ctx, req)
|
|
|
|
.map(request::HeaderByHash)
|
|
|
|
.map_err(err_premature_cancel);
|
|
|
|
|
|
|
|
fut.and_then(move |req| {
|
|
|
|
match sync.with_context(|ctx| on_demand.header_by_hash(ctx, req)) {
|
|
|
|
Some(fut) => fut.map_err(err_premature_cancel).boxed(),
|
|
|
|
None => future::err(errors::network_disabled()).boxed(),
|
|
|
|
}
|
2017-04-06 17:44:31 +02:00
|
|
|
}).boxed()
|
2017-03-17 00:14:29 +01:00
|
|
|
})
|
2017-02-03 16:20:43 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BlockId::Hash(h) => {
|
2017-02-04 22:22:57 +01:00
|
|
|
self.sync.with_context(|ctx|
|
|
|
|
self.on_demand.header_by_hash(ctx, request::HeaderByHash(h))
|
2017-02-07 14:45:48 +01:00
|
|
|
.then(|res| future::done(match res {
|
2017-04-06 17:44:31 +02:00
|
|
|
Ok(h) => Ok(h),
|
2017-02-07 23:25:17 +01:00
|
|
|
Err(e) => Err(err_premature_cancel(e)),
|
2017-02-07 14:45:48 +01:00
|
|
|
}))
|
2017-02-04 22:22:57 +01:00
|
|
|
.boxed()
|
|
|
|
)
|
2017-02-03 16:20:43 +01:00
|
|
|
}
|
|
|
|
_ => None, // latest, earliest, and pending will have all already returned.
|
|
|
|
};
|
|
|
|
|
|
|
|
match maybe_future {
|
2017-02-04 22:22:57 +01:00
|
|
|
Some(recv) => recv,
|
2017-02-17 22:21:43 +01:00
|
|
|
None => future::err(errors::network_disabled()).boxed()
|
2017-02-03 16:20:43 +01:00
|
|
|
}
|
|
|
|
}
|
2017-02-03 17:53:48 +01:00
|
|
|
|
2017-02-07 14:45:48 +01:00
|
|
|
// helper for getting account info at a given block.
|
2017-04-06 17:44:31 +02:00
|
|
|
// `None` indicates the account doesn't exist at the given block.
|
2017-02-07 14:45:48 +01:00
|
|
|
fn account(&self, address: Address, id: BlockId) -> BoxFuture<Option<BasicAccount>, Error> {
|
2017-02-03 17:53:48 +01:00
|
|
|
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
|
|
|
|
|
|
|
|
self.header(id).and_then(move |header| {
|
2017-03-28 18:42:45 +02:00
|
|
|
let maybe_fut = sync.with_context(|ctx| on_demand.account(ctx, request::Account {
|
2017-02-03 17:53:48 +01:00
|
|
|
header: header,
|
|
|
|
address: address,
|
2017-03-28 18:42:45 +02:00
|
|
|
}));
|
|
|
|
|
|
|
|
match maybe_fut {
|
2017-04-06 17:44:31 +02:00
|
|
|
Some(fut) => fut.map_err(err_premature_cancel).boxed(),
|
2017-03-28 18:42:45 +02:00
|
|
|
None => future::err(errors::network_disabled()).boxed(),
|
|
|
|
}
|
2017-02-03 17:53:48 +01:00
|
|
|
}).boxed()
|
|
|
|
}
|
2017-02-26 13:48:56 +01:00
|
|
|
|
|
|
|
// helper for getting proved execution.
|
2017-02-26 15:05:33 +01:00
|
|
|
fn proved_execution(&self, req: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<ExecutionResult, Error> {
|
|
|
|
const DEFAULT_GAS_PRICE: U256 = U256([0, 0, 0, 21_000_000]);
|
|
|
|
|
|
|
|
let (sync, on_demand, client) = (self.sync.clone(), self.on_demand.clone(), self.client.clone());
|
2017-02-26 13:48:56 +01:00
|
|
|
let req: CRequest = req.into();
|
|
|
|
let id = num.0.into();
|
|
|
|
|
2017-02-26 15:05:33 +01:00
|
|
|
let from = req.from.unwrap_or(Address::zero());
|
|
|
|
let nonce_fut = match req.nonce {
|
|
|
|
Some(nonce) => future::ok(Some(nonce)).boxed(),
|
|
|
|
None => self.account(from, id).map(|acc| acc.map(|a| a.nonce)).boxed(),
|
|
|
|
};
|
2017-02-26 13:48:56 +01:00
|
|
|
|
2017-02-26 15:05:33 +01:00
|
|
|
let gas_price_fut = match req.gas_price {
|
|
|
|
Some(price) => future::ok(price).boxed(),
|
|
|
|
None => dispatch::fetch_gas_price_corpus(
|
|
|
|
self.sync.clone(),
|
|
|
|
self.client.clone(),
|
|
|
|
self.on_demand.clone(),
|
|
|
|
self.cache.clone(),
|
|
|
|
).map(|corp| match corp.median() {
|
|
|
|
Some(median) => *median,
|
|
|
|
None => DEFAULT_GAS_PRICE,
|
|
|
|
}).boxed()
|
|
|
|
};
|
2017-02-26 13:48:56 +01:00
|
|
|
|
2017-02-26 15:05:33 +01:00
|
|
|
// if nonce resolves, this should too since it'll be in the LRU-cache.
|
|
|
|
let header_fut = self.header(id);
|
|
|
|
|
|
|
|
// fetch missing transaction fields from the network.
|
|
|
|
nonce_fut.join(gas_price_fut).and_then(move |(nonce, gas_price)| {
|
|
|
|
let action = req.to.map_or(Action::Create, Action::Call);
|
|
|
|
let gas = req.gas.unwrap_or(U256::from(10_000_000)); // better gas amount?
|
|
|
|
let value = req.value.unwrap_or_else(U256::zero);
|
|
|
|
let data = req.data.map_or_else(Vec::new, |d| d.to_vec());
|
|
|
|
|
|
|
|
future::done(match nonce {
|
|
|
|
Some(n) => Ok(EthTransaction {
|
|
|
|
nonce: n,
|
|
|
|
action: action,
|
|
|
|
gas: gas,
|
|
|
|
gas_price: gas_price,
|
|
|
|
value: value,
|
|
|
|
data: data,
|
|
|
|
}.fake_sign(from)),
|
|
|
|
None => Err(errors::unknown_block()),
|
|
|
|
})
|
|
|
|
}).join(header_fut).and_then(move |(tx, hdr)| {
|
|
|
|
// then request proved execution.
|
|
|
|
// TODO: get last-hashes from network.
|
2017-04-06 17:44:31 +02:00
|
|
|
let env_info = match client.env_info(id) {
|
|
|
|
Some(env_info) => env_info,
|
2017-02-26 15:05:33 +01:00
|
|
|
_ => return future::err(errors::unknown_block()).boxed(),
|
|
|
|
};
|
2017-04-06 17:44:31 +02:00
|
|
|
|
2017-02-26 15:05:33 +01:00
|
|
|
let request = request::TransactionProof {
|
|
|
|
tx: tx,
|
|
|
|
header: hdr,
|
|
|
|
env_info: env_info,
|
|
|
|
engine: client.engine().clone(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let proved_future = sync.with_context(move |ctx| {
|
|
|
|
on_demand.transaction_proof(ctx, request).map_err(err_premature_cancel).boxed()
|
|
|
|
});
|
|
|
|
|
|
|
|
match proved_future {
|
|
|
|
Some(fut) => fut.boxed(),
|
|
|
|
None => future::err(errors::network_disabled()).boxed(),
|
|
|
|
}
|
|
|
|
}).boxed()
|
2017-02-26 13:48:56 +01:00
|
|
|
}
|
2017-03-28 17:15:36 +02:00
|
|
|
|
2017-04-06 17:44:31 +02:00
|
|
|
// get a block itself. fails on unknown block ID.
|
|
|
|
fn block(&self, id: BlockId) -> BoxFuture<encoded::Block, Error> {
|
2017-03-28 17:15:36 +02:00
|
|
|
let (on_demand, sync) = (self.on_demand.clone(), self.sync.clone());
|
|
|
|
|
2017-04-06 17:44:31 +02:00
|
|
|
self.header(id).map(request::Body::new).and_then(move |req| {
|
2017-03-28 17:15:36 +02:00
|
|
|
match sync.with_context(move |ctx| on_demand.block(ctx, req)) {
|
2017-04-06 17:44:31 +02:00
|
|
|
Some(fut) => fut.map_err(err_premature_cancel).boxed(),
|
2017-03-28 17:15:36 +02:00
|
|
|
None => future::err(errors::network_disabled()).boxed(),
|
|
|
|
}
|
|
|
|
}).boxed()
|
|
|
|
}
|
|
|
|
|
2017-04-06 17:44:31 +02:00
|
|
|
// get a "rich" block structure. Fails on unknown block.
|
|
|
|
fn rich_block(&self, id: BlockId, include_txs: bool) -> BoxFuture<RichBlock, Error> {
|
2017-03-28 17:15:36 +02:00
|
|
|
let (on_demand, sync) = (self.on_demand.clone(), self.sync.clone());
|
|
|
|
let (client, engine) = (self.client.clone(), self.client.engine().clone());
|
|
|
|
|
|
|
|
// helper for filling out a rich block once we've got a block and a score.
|
|
|
|
let fill_rich = move |block: encoded::Block, score: Option<U256>| {
|
|
|
|
let header = block.decode_header();
|
|
|
|
let extra_info = engine.extra_info(&header);
|
|
|
|
RichBlock {
|
|
|
|
block: Block {
|
|
|
|
hash: Some(header.hash().into()),
|
|
|
|
size: Some(block.rlp().as_raw().len().into()),
|
|
|
|
parent_hash: header.parent_hash().clone().into(),
|
|
|
|
uncles_hash: header.uncles_hash().clone().into(),
|
|
|
|
author: header.author().clone().into(),
|
|
|
|
miner: header.author().clone().into(),
|
|
|
|
state_root: header.state_root().clone().into(),
|
|
|
|
transactions_root: header.transactions_root().clone().into(),
|
|
|
|
receipts_root: header.receipts_root().clone().into(),
|
|
|
|
number: Some(header.number().into()),
|
|
|
|
gas_used: header.gas_used().clone().into(),
|
|
|
|
gas_limit: header.gas_limit().clone().into(),
|
|
|
|
logs_bloom: header.log_bloom().clone().into(),
|
|
|
|
timestamp: header.timestamp().into(),
|
|
|
|
difficulty: header.difficulty().clone().into(),
|
|
|
|
total_difficulty: score.map(Into::into),
|
|
|
|
seal_fields: header.seal().into_iter().cloned().map(Into::into).collect(),
|
|
|
|
uncles: block.uncle_hashes().into_iter().map(Into::into).collect(),
|
|
|
|
transactions: match include_txs {
|
|
|
|
true => BlockTransactions::Full(block.view().localized_transactions().into_iter().map(Into::into).collect()),
|
|
|
|
false => BlockTransactions::Hashes(block.transaction_hashes().into_iter().map(Into::into).collect()),
|
|
|
|
},
|
|
|
|
extra_data: Bytes::new(header.extra_data().to_vec()),
|
|
|
|
},
|
|
|
|
extra_info: extra_info
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// get the block itself.
|
2017-04-06 17:44:31 +02:00
|
|
|
self.block(id).and_then(move |block| {
|
|
|
|
// then fetch the total difficulty (this is much easier after getting the block).
|
|
|
|
match client.score(id) {
|
|
|
|
Some(score) => future::ok(fill_rich(block, Some(score))).boxed(),
|
|
|
|
None => {
|
|
|
|
// make a CHT request to fetch the chain score.
|
|
|
|
let req = cht::block_to_cht_number(block.number())
|
|
|
|
.and_then(|num| client.cht_root(num as usize))
|
|
|
|
.and_then(|root| request::HeaderProof::new(block.number(), root));
|
|
|
|
|
|
|
|
let req = match req {
|
|
|
|
Some(req) => req,
|
|
|
|
None => {
|
|
|
|
// somehow the genesis block slipped past other checks.
|
|
|
|
// return it now.
|
|
|
|
let score = client.block_header(BlockId::Number(0))
|
|
|
|
.expect("genesis always stored; qed")
|
|
|
|
.difficulty();
|
|
|
|
|
|
|
|
return future::ok(fill_rich(block, Some(score))).boxed()
|
2017-03-28 17:15:36 +02:00
|
|
|
}
|
2017-04-06 17:44:31 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
// three possible outcomes:
|
|
|
|
// - network is down.
|
|
|
|
// - we get a score, but our hash is non-canonical.
|
|
|
|
// - we get ascore, and our hash is canonical.
|
|
|
|
let maybe_fut = sync.with_context(move |ctx| on_demand.hash_and_score_by_number(ctx, req));
|
|
|
|
match maybe_fut {
|
|
|
|
Some(fut) => fut.map(move |(hash, score)| {
|
|
|
|
let score = if hash == block.hash() {
|
|
|
|
Some(score)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
fill_rich(block, score)
|
|
|
|
}).map_err(err_premature_cancel).boxed(),
|
|
|
|
None => return future::err(errors::network_disabled()).boxed(),
|
2017-03-28 17:15:36 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}).boxed()
|
|
|
|
}
|
2017-02-03 16:20:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Eth for EthClient {
|
|
|
|
type Metadata = Metadata;
|
|
|
|
|
|
|
|
fn protocol_version(&self) -> Result<String, Error> {
|
|
|
|
Ok(format!("{}", ::light::net::MAX_PROTOCOL_VERSION))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn syncing(&self) -> Result<SyncStatus, Error> {
|
2017-03-23 03:23:53 +01:00
|
|
|
if self.sync.is_major_importing() {
|
|
|
|
let chain_info = self.client.chain_info();
|
|
|
|
let current_block = U256::from(chain_info.best_block_number);
|
|
|
|
let highest_block = self.sync.highest_block().map(U256::from)
|
|
|
|
.unwrap_or_else(|| current_block.clone());
|
|
|
|
|
|
|
|
Ok(SyncStatus::Info(SyncInfo {
|
|
|
|
starting_block: U256::from(self.sync.start_block()).into(),
|
|
|
|
current_block: current_block.into(),
|
|
|
|
highest_block: highest_block.into(),
|
|
|
|
warp_chunks_amount: None,
|
|
|
|
warp_chunks_processed: None,
|
|
|
|
}))
|
|
|
|
} else {
|
|
|
|
Ok(SyncStatus::None)
|
|
|
|
}
|
2017-02-03 16:20:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn author(&self, _meta: Self::Metadata) -> BoxFuture<RpcH160, Error> {
|
2017-02-03 17:41:25 +01:00
|
|
|
future::ok(Default::default()).boxed()
|
2017-02-03 16:20:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn is_mining(&self) -> Result<bool, Error> {
|
|
|
|
Ok(false)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn hashrate(&self) -> Result<RpcU256, Error> {
|
|
|
|
Ok(Default::default())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn gas_price(&self) -> Result<RpcU256, Error> {
|
2017-03-28 17:15:36 +02:00
|
|
|
Ok(self.cache.lock().gas_price_corpus()
|
|
|
|
.and_then(|c| c.median().cloned())
|
|
|
|
.map(RpcU256::from)
|
|
|
|
.unwrap_or_else(Default::default))
|
2017-02-03 16:20:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn accounts(&self, meta: Metadata) -> BoxFuture<Vec<RpcH160>, Error> {
|
2017-02-14 22:45:43 +01:00
|
|
|
let dapp: DappId = meta.dapp_id().into();
|
2017-02-03 16:20:43 +01:00
|
|
|
|
|
|
|
let accounts = self.accounts
|
|
|
|
.note_dapp_used(dapp.clone())
|
2017-02-20 16:33:12 +01:00
|
|
|
.and_then(|_| self.accounts.dapp_addresses(dapp))
|
2017-03-10 10:25:13 +01:00
|
|
|
.map_err(|e| errors::account("Could not fetch accounts.", e))
|
2017-02-03 17:41:25 +01:00
|
|
|
.map(|accs| accs.into_iter().map(Into::<RpcH160>::into).collect());
|
2017-02-03 16:20:43 +01:00
|
|
|
|
|
|
|
future::done(accounts).boxed()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn block_number(&self) -> Result<RpcU256, Error> {
|
|
|
|
Ok(self.client.chain_info().best_block_number.into())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn balance(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> {
|
2017-02-07 14:45:48 +01:00
|
|
|
self.account(address.into(), num.0.into())
|
|
|
|
.map(|acc| acc.map_or(0.into(), |a| a.balance).into()).boxed()
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
2017-02-06 15:28:16 +01:00
|
|
|
fn storage_at(&self, _address: RpcH160, _key: RpcU256, _num: Trailing<BlockNumber>) -> BoxFuture<RpcH256, Error> {
|
2017-02-03 17:41:25 +01:00
|
|
|
future::err(errors::unimplemented(None)).boxed()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn block_by_hash(&self, hash: RpcH256, include_txs: bool) -> BoxFuture<Option<RichBlock>, Error> {
|
2017-04-06 17:44:31 +02:00
|
|
|
self.rich_block(BlockId::Hash(hash.into()), include_txs).map(Some).boxed()
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture<Option<RichBlock>, Error> {
|
2017-04-06 17:44:31 +02:00
|
|
|
self.rich_block(num.into(), include_txs).map(Some).boxed()
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn transaction_count(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> {
|
2017-02-07 14:45:48 +01:00
|
|
|
self.account(address.into(), num.0.into())
|
|
|
|
.map(|acc| acc.map_or(0.into(), |a| a.nonce).into()).boxed()
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn block_transaction_count_by_hash(&self, hash: RpcH256) -> BoxFuture<Option<RpcU256>, Error> {
|
2017-02-07 14:45:48 +01:00
|
|
|
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
|
|
|
|
|
|
|
|
self.header(BlockId::Hash(hash.into())).and_then(move |hdr| {
|
|
|
|
if hdr.transactions_root() == SHA3_NULL_RLP {
|
|
|
|
future::ok(Some(U256::from(0).into())).boxed()
|
|
|
|
} else {
|
|
|
|
sync.with_context(|ctx| on_demand.block(ctx, request::Body::new(hdr)))
|
|
|
|
.map(|x| x.map(|b| Some(U256::from(b.transactions_count()).into())))
|
2017-02-07 23:25:17 +01:00
|
|
|
.map(|x| x.map_err(err_premature_cancel).boxed())
|
2017-02-17 22:21:43 +01:00
|
|
|
.unwrap_or_else(|| future::err(errors::network_disabled()).boxed())
|
2017-02-07 14:45:48 +01:00
|
|
|
}
|
|
|
|
}).boxed()
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn block_transaction_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<RpcU256>, Error> {
|
2017-02-07 14:45:48 +01:00
|
|
|
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
|
|
|
|
|
|
|
|
self.header(num.into()).and_then(move |hdr| {
|
|
|
|
if hdr.transactions_root() == SHA3_NULL_RLP {
|
|
|
|
future::ok(Some(U256::from(0).into())).boxed()
|
|
|
|
} else {
|
|
|
|
sync.with_context(|ctx| on_demand.block(ctx, request::Body::new(hdr)))
|
|
|
|
.map(|x| x.map(|b| Some(U256::from(b.transactions_count()).into())))
|
2017-02-07 23:25:17 +01:00
|
|
|
.map(|x| x.map_err(err_premature_cancel).boxed())
|
2017-02-17 22:21:43 +01:00
|
|
|
.unwrap_or_else(|| future::err(errors::network_disabled()).boxed())
|
2017-02-07 14:45:48 +01:00
|
|
|
}
|
|
|
|
}).boxed()
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn block_uncles_count_by_hash(&self, hash: RpcH256) -> BoxFuture<Option<RpcU256>, Error> {
|
2017-02-07 14:45:48 +01:00
|
|
|
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
|
|
|
|
|
|
|
|
self.header(BlockId::Hash(hash.into())).and_then(move |hdr| {
|
|
|
|
if hdr.uncles_hash() == SHA3_EMPTY_LIST_RLP {
|
|
|
|
future::ok(Some(U256::from(0).into())).boxed()
|
|
|
|
} else {
|
|
|
|
sync.with_context(|ctx| on_demand.block(ctx, request::Body::new(hdr)))
|
|
|
|
.map(|x| x.map(|b| Some(U256::from(b.uncles_count()).into())))
|
2017-02-07 23:25:17 +01:00
|
|
|
.map(|x| x.map_err(err_premature_cancel).boxed())
|
2017-02-17 22:21:43 +01:00
|
|
|
.unwrap_or_else(|| future::err(errors::network_disabled()).boxed())
|
2017-02-07 14:45:48 +01:00
|
|
|
}
|
|
|
|
}).boxed()
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn block_uncles_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<RpcU256>, Error> {
|
2017-02-07 14:45:48 +01:00
|
|
|
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
|
|
|
|
|
|
|
|
self.header(num.into()).and_then(move |hdr| {
|
|
|
|
if hdr.uncles_hash() == SHA3_EMPTY_LIST_RLP {
|
|
|
|
future::ok(Some(U256::from(0).into())).boxed()
|
|
|
|
} else {
|
|
|
|
sync.with_context(|ctx| on_demand.block(ctx, request::Body::new(hdr)))
|
|
|
|
.map(|x| x.map(|b| Some(U256::from(b.uncles_count()).into())))
|
2017-02-07 23:25:17 +01:00
|
|
|
.map(|x| x.map_err(err_premature_cancel).boxed())
|
2017-02-17 22:21:43 +01:00
|
|
|
.unwrap_or_else(|| future::err(errors::network_disabled()).boxed())
|
2017-02-07 14:45:48 +01:00
|
|
|
}
|
|
|
|
}).boxed()
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn code_at(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<Bytes, Error> {
|
|
|
|
future::err(errors::unimplemented(None)).boxed()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn send_raw_transaction(&self, raw: Bytes) -> Result<RpcH256, Error> {
|
2017-02-13 16:49:01 +01:00
|
|
|
let best_header = self.client.best_block_header().decode();
|
2017-02-09 19:58:29 +01:00
|
|
|
|
2017-02-09 19:17:37 +01:00
|
|
|
UntrustedRlp::new(&raw.into_vec()).as_val()
|
|
|
|
.map_err(errors::from_rlp_error)
|
2017-02-09 19:58:29 +01:00
|
|
|
.and_then(|tx| {
|
|
|
|
self.client.engine().verify_transaction_basic(&tx, &best_header)
|
|
|
|
.map_err(errors::from_transaction_error)?;
|
|
|
|
|
|
|
|
let signed = SignedTransaction::new(tx).map_err(errors::from_transaction_error)?;
|
2017-02-09 19:17:37 +01:00
|
|
|
let hash = signed.hash();
|
2017-02-09 19:58:29 +01:00
|
|
|
|
2017-02-09 19:17:37 +01:00
|
|
|
self.transaction_queue.write().import(signed.into())
|
|
|
|
.map(|_| hash)
|
|
|
|
.map_err(Into::into)
|
|
|
|
.map_err(errors::from_transaction_error)
|
|
|
|
})
|
|
|
|
.map(Into::into)
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn submit_transaction(&self, raw: Bytes) -> Result<RpcH256, Error> {
|
2017-02-09 19:17:37 +01:00
|
|
|
self.send_raw_transaction(raw)
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
2017-02-26 13:48:56 +01:00
|
|
|
fn call(&self, req: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<Bytes, Error> {
|
2017-02-26 15:05:33 +01:00
|
|
|
self.proved_execution(req, num).and_then(|res| {
|
2017-02-26 13:48:56 +01:00
|
|
|
match res {
|
2017-02-26 15:05:33 +01:00
|
|
|
Ok(exec) => Ok(exec.output.into()),
|
2017-02-26 13:48:56 +01:00
|
|
|
Err(e) => Err(errors::execution(e)),
|
|
|
|
}
|
|
|
|
}).boxed()
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
2017-02-26 13:48:56 +01:00
|
|
|
fn estimate_gas(&self, req: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> {
|
|
|
|
// TODO: binary chop for more accurate estimates.
|
2017-02-26 15:05:33 +01:00
|
|
|
self.proved_execution(req, num).and_then(|res| {
|
2017-02-26 13:48:56 +01:00
|
|
|
match res {
|
2017-02-26 15:05:33 +01:00
|
|
|
Ok(exec) => Ok((exec.refunded + exec.gas_used).into()),
|
2017-02-26 13:48:56 +01:00
|
|
|
Err(e) => Err(errors::execution(e)),
|
|
|
|
}
|
|
|
|
}).boxed()
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn transaction_by_hash(&self, hash: RpcH256) -> Result<Option<Transaction>, Error> {
|
|
|
|
Err(errors::unimplemented(None))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn transaction_by_block_hash_and_index(&self, hash: RpcH256, idx: Index) -> Result<Option<Transaction>, Error> {
|
|
|
|
Err(errors::unimplemented(None))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn transaction_by_block_number_and_index(&self, num: BlockNumber, idx: Index) -> Result<Option<Transaction>, Error> {
|
|
|
|
Err(errors::unimplemented(None))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn transaction_receipt(&self, hash: RpcH256) -> Result<Option<Receipt>, Error> {
|
|
|
|
Err(errors::unimplemented(None))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn uncle_by_block_hash_and_index(&self, hash: RpcH256, idx: Index) -> Result<Option<RichBlock>, Error> {
|
|
|
|
Err(errors::unimplemented(None))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn uncle_by_block_number_and_index(&self, num: BlockNumber, idx: Index) -> Result<Option<RichBlock>, Error> {
|
|
|
|
Err(errors::unimplemented(None))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn compilers(&self) -> Result<Vec<String>, Error> {
|
2017-02-26 13:48:56 +01:00
|
|
|
Err(errors::deprecated("Compilation functionality is deprecated.".to_string()))
|
|
|
|
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
2017-02-26 13:48:56 +01:00
|
|
|
fn compile_lll(&self, _: String) -> Result<Bytes, Error> {
|
|
|
|
Err(errors::deprecated("Compilation of LLL via RPC is deprecated".to_string()))
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
2017-02-26 13:48:56 +01:00
|
|
|
fn compile_serpent(&self, _: String) -> Result<Bytes, Error> {
|
|
|
|
Err(errors::deprecated("Compilation of Serpent via RPC is deprecated".to_string()))
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
2017-02-26 13:48:56 +01:00
|
|
|
fn compile_solidity(&self, _: String) -> Result<Bytes, Error> {
|
|
|
|
Err(errors::deprecated("Compilation of Solidity via RPC is deprecated".to_string()))
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
2017-03-28 15:42:23 +02:00
|
|
|
fn logs(&self, filter: Filter) -> BoxFuture<Vec<Log>, Error> {
|
|
|
|
let limit = filter.limit;
|
|
|
|
|
|
|
|
Filterable::logs(self, filter.into())
|
|
|
|
.map(move|logs| limit_logs(logs, limit))
|
|
|
|
.boxed()
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn work(&self, _timeout: Trailing<u64>) -> Result<Work, Error> {
|
2017-03-28 15:42:23 +02:00
|
|
|
Err(errors::light_unimplemented(None))
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn submit_work(&self, _nonce: RpcH64, _pow_hash: RpcH256, _mix_hash: RpcH256) -> Result<bool, Error> {
|
2017-03-28 15:42:23 +02:00
|
|
|
Err(errors::light_unimplemented(None))
|
2017-02-03 17:41:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn submit_hashrate(&self, _rate: RpcU256, _id: RpcH256) -> Result<bool, Error> {
|
2017-03-28 15:42:23 +02:00
|
|
|
Err(errors::light_unimplemented(None))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This trait implementation triggers a blanked impl of `EthFilter`.
|
|
|
|
impl Filterable for EthClient {
|
|
|
|
fn best_block_number(&self) -> u64 { self.client.chain_info().best_block_number }
|
|
|
|
|
|
|
|
fn block_hash(&self, id: BlockId) -> Option<RpcH256> {
|
|
|
|
self.client.block_hash(id).map(Into::into)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn pending_transactions_hashes(&self, _block_number: u64) -> Vec<::util::H256> {
|
|
|
|
Vec::new()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn logs(&self, filter: EthcoreFilter) -> BoxFuture<Vec<Log>, Error> {
|
|
|
|
use std::collections::BTreeMap;
|
|
|
|
|
|
|
|
use futures::stream::{self, Stream};
|
|
|
|
use util::H2048;
|
|
|
|
|
|
|
|
// early exit for "to" block before "from" block.
|
|
|
|
match filter.from_block {
|
|
|
|
BlockId::Latest | BlockId::Pending => {
|
|
|
|
let best = self.client.best_block_header();
|
|
|
|
let chain_info = self.client.chain_info();
|
|
|
|
if best.number() != chain_info.best_block_number || best.hash() != chain_info.best_block_hash {
|
|
|
|
return future::ok(Vec::new()).boxed()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
|
|
|
|
let maybe_future = self.sync.with_context(move |ctx| {
|
|
|
|
// find all headers which match the filter, and fetch the receipts for each one.
|
|
|
|
// match them with their numbers for easy sorting later.
|
|
|
|
let bit_combos = filter.bloom_possibilities();
|
|
|
|
let receipts_futures: Vec<_> = self.client.ancestry_iter(filter.to_block)
|
|
|
|
.take_while(|ref hdr| BlockId::Number(hdr.number()) != filter.from_block)
|
|
|
|
.take_while(|ref hdr| BlockId::Hash(hdr.hash()) != filter.from_block)
|
|
|
|
.filter(|ref hdr| {
|
|
|
|
let hdr_bloom = hdr.log_bloom();
|
|
|
|
bit_combos.iter().find(|&bloom| hdr_bloom & *bloom == *bloom).is_some()
|
|
|
|
})
|
|
|
|
.map(|hdr| (hdr.number(), request::BlockReceipts(hdr)))
|
|
|
|
.map(|(num, req)| self.on_demand.block_receipts(ctx, req).map(move |x| (num, x)))
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
// as the receipts come in, find logs within them which match the filter.
|
|
|
|
// insert them into a BTreeMap to maintain order by number and block index.
|
|
|
|
stream::futures_unordered(receipts_futures)
|
|
|
|
.fold(BTreeMap::new(), move |mut matches, (num, receipts)| {
|
|
|
|
for (block_index, log) in receipts.into_iter().flat_map(|r| r.logs).enumerate() {
|
|
|
|
if filter.matches(&log) {
|
|
|
|
matches.insert((num, block_index), log.into());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
future::ok(matches)
|
|
|
|
}) // and then collect them into a vector.
|
|
|
|
.map(|matches| matches.into_iter().map(|(_, v)| v).collect())
|
|
|
|
.map_err(err_premature_cancel)
|
|
|
|
});
|
|
|
|
|
|
|
|
match maybe_future {
|
|
|
|
Some(fut) => fut.boxed(),
|
|
|
|
None => future::err(errors::network_disabled()).boxed(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn pending_logs(&self, _block_number: u64, _filter: &EthcoreFilter) -> Vec<Log> {
|
|
|
|
Vec::new() // light clients don't mine.
|
|
|
|
}
|
|
|
|
|
|
|
|
fn polls(&self) -> &Mutex<PollManager<PollFilter>> {
|
|
|
|
&self.polls
|
2017-02-03 16:20:43 +01:00
|
|
|
}
|
|
|
|
}
|