From da3dd6572676d0212413f3bff5a7dfae1eef3e5c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 28 Mar 2017 11:23:43 +0200 Subject: [PATCH 01/24] block_hash method for LightChainClient --- ethcore/light/src/client/header_chain.rs | 16 ++++++++++++++++ ethcore/light/src/client/mod.rs | 12 ++++++++++++ 2 files changed, 28 insertions(+) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 1c218204b..3edbf7171 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -355,6 +355,22 @@ impl HeaderChain { } } + /// Get a block's hash by ID. In the case of query by number, only canonical results + /// will be returned. + pub fn block_hash(&self, id: BlockId) -> Option { + match id { + BlockId::Earliest => Some(self.genesis_hash()), + BlockId::Hash(hash) => Some(hash), + BlockId::Number(num) => { + if self.best_block.read().number < num { return None } + self.candidates.read().get(&num).map(|entry| entry.canonical_hash) + } + BlockId::Latest | BlockId::Pending => { + Some(self.best_block.read().hash) + } + } + } + /// Get a block header. In the case of query by number, only canonical blocks /// will be returned. pub fn block_header(&self, id: BlockId) -> Option { diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index d294053e1..eb1c5cdcd 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -65,6 +65,9 @@ pub trait LightChainClient: Send + Sync { /// parent queued prior. fn queue_header(&self, header: Header) -> Result; + /// Attempt to get a block hash by block id. + fn block_hash(&self, id: BlockId) -> Option; + /// Attempt to get block header by block id. fn block_header(&self, id: BlockId) -> Option; @@ -181,6 +184,11 @@ impl Client { self.queue.queue_info() } + /// Attempt to get a block hash by block id. + pub fn block_hash(&self, id: BlockId) -> Option { + self.chain.block_hash(id) + } + /// Get a block header by Id. pub fn block_header(&self, id: BlockId) -> Option { self.chain.block_header(id) @@ -308,6 +316,10 @@ impl LightChainClient for Client { self.import_header(header) } + fn block_hash(&self, id: BlockId) -> Option { + Client::block_hash(self, id) + } + fn block_header(&self, id: BlockId) -> Option { Client::block_header(self, id) } From a6b6c312b8da9ee3332a41408b18034d32c8c650 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 28 Mar 2017 14:19:21 +0200 Subject: [PATCH 02/24] abstraction and futures-based eth_filter --- parity/rpc_apis.rs | 2 +- rpc/src/v1/impls/eth_filter.rs | 170 ++++++++++++++++++++------------- rpc/src/v1/tests/mocked/eth.rs | 2 +- rpc/src/v1/traits/eth.rs | 8 +- 4 files changed, 112 insertions(+), 70 deletions(-) diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index 5cfb28474..01a649469 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -216,7 +216,7 @@ impl Dependencies for FullDependencies { ); handler.extend_with(client.to_delegate()); - let filter_client = EthFilterClient::new(&self.client, &self.miner); + let filter_client = EthFilterClient::new(self.client.clone(), self.miner.clone()); handler.extend_with(filter_client.to_delegate()); add_signing_methods!(EthSigning, handler, self); diff --git a/rpc/src/v1/impls/eth_filter.rs b/rpc/src/v1/impls/eth_filter.rs index cf3398498..9a7281243 100644 --- a/rpc/src/v1/impls/eth_filter.rs +++ b/rpc/src/v1/impls/eth_filter.rs @@ -16,89 +16,131 @@ //! Eth Filter RPC implementation -use std::sync::{Arc, Weak}; +use std::sync::Arc; use std::collections::HashSet; + use jsonrpc_core::*; use ethcore::miner::MinerService; use ethcore::filter::Filter as EthcoreFilter; use ethcore::client::{BlockChainClient, BlockId}; -use util::Mutex; +use util::{H256, Mutex}; + +use futures::{future, Future, BoxFuture}; + use v1::traits::EthFilter; use v1::types::{BlockNumber, Index, Filter, FilterChanges, Log, H256 as RpcH256, U256 as RpcU256}; use v1::helpers::{PollFilter, PollManager, limit_logs}; use v1::impls::eth::pending_logs; +/// Something which provides data that can be filtered over. +pub trait Filterable { + /// Current best block number. + fn best_block_number(&self) -> u64; + + /// Get a block hash by block id. + fn block_hash(&self, id: BlockId) -> Option; + + /// pending transaction hashes at the given block. + fn pending_transactions_hashes(&self, block_number: u64) -> Vec; + + /// Get logs that match the given filter. + fn logs(&self, filter: EthcoreFilter) -> BoxFuture, Error>; + + /// Get logs from the pending block. + fn pending_logs(&self, block_number: u64, filter: &EthcoreFilter) -> Vec; + + /// Get a reference to the poll manager. + fn polls(&self) -> &Mutex>; +} + /// Eth filter rpc implementation. pub struct EthFilterClient where C: BlockChainClient, M: MinerService { - client: Weak, - miner: Weak, + client: Arc, + miner: Arc, polls: Mutex>, } -impl EthFilterClient where - C: BlockChainClient, - M: MinerService { - +impl EthFilterClient where C: BlockChainClient, M: MinerService { /// Creates new Eth filter client. - pub fn new(client: &Arc, miner: &Arc) -> Self { + pub fn new(client: Arc, miner: Arc) -> Self { EthFilterClient { - client: Arc::downgrade(client), - miner: Arc::downgrade(miner), + client: client, + miner: miner, polls: Mutex::new(PollManager::new()), } } } -impl EthFilter for EthFilterClient - where C: BlockChainClient + 'static, M: MinerService + 'static -{ +impl Filterable for EthFilterClient where C: BlockChainClient, M: MinerService { + fn best_block_number(&self) -> u64 { + self.client.chain_info().best_block_number + } + + fn block_hash(&self, id: BlockId) -> Option { + self.client.block_hash(id).map(Into::into) + } + + fn pending_transactions_hashes(&self, best: u64) -> Vec { + self.miner.pending_transactions_hashes(best) + } + + fn logs(&self, filter: EthcoreFilter) -> BoxFuture, Error> { + future::ok(self.client.logs(filter).into_iter().map(Into::into).collect()).boxed() + } + + fn pending_logs(&self, block_number: u64, filter: &EthcoreFilter) -> Vec { + pending_logs(&*self.miner, block_number, filter) + } + + fn polls(&self) -> &Mutex> { &self.polls } +} + +impl EthFilter for T { fn new_filter(&self, filter: Filter) -> Result { - let mut polls = self.polls.lock(); - let block_number = take_weak!(self.client).chain_info().best_block_number; + let mut polls = self.polls().lock(); + let block_number = self.best_block_number(); let id = polls.create_poll(PollFilter::Logs(block_number, Default::default(), filter)); Ok(id.into()) } fn new_block_filter(&self) -> Result { - let mut polls = self.polls.lock(); - let id = polls.create_poll(PollFilter::Block(take_weak!(self.client).chain_info().best_block_number)); + let mut polls = self.polls().lock(); + let id = polls.create_poll(PollFilter::Block(self.best_block_number())); Ok(id.into()) } fn new_pending_transaction_filter(&self) -> Result { - let mut polls = self.polls.lock(); - let best_block = take_weak!(self.client).chain_info().best_block_number; - let pending_transactions = take_weak!(self.miner).pending_transactions_hashes(best_block); + let mut polls = self.polls().lock(); + let best_block = self.best_block_number(); + let pending_transactions = self.pending_transactions_hashes(best_block); let id = polls.create_poll(PollFilter::PendingTransaction(pending_transactions)); Ok(id.into()) } - fn filter_changes(&self, index: Index) -> Result { - let client = take_weak!(self.client); - let mut polls = self.polls.lock(); + fn filter_changes(&self, index: Index) -> BoxFuture { + let mut polls = self.polls().lock(); match polls.poll_mut(&index.value()) { - None => Ok(FilterChanges::Empty), + None => future::ok(FilterChanges::Empty).boxed(), Some(filter) => match *filter { PollFilter::Block(ref mut block_number) => { // + 1, cause we want to return hashes including current block hash. - let current_number = client.chain_info().best_block_number + 1; + let current_number = self.best_block_number() + 1; let hashes = (*block_number..current_number).into_iter() .map(BlockId::Number) - .filter_map(|id| client.block_hash(id)) - .map(Into::into) + .filter_map(|id| self.block_hash(id)) .collect::>(); *block_number = current_number; - Ok(FilterChanges::Hashes(hashes)) + future::ok(FilterChanges::Hashes(hashes)).boxed() }, PollFilter::PendingTransaction(ref mut previous_hashes) => { // get hashes of pending transactions - let best_block = take_weak!(self.client).chain_info().best_block_number; - let current_hashes = take_weak!(self.miner).pending_transactions_hashes(best_block); + let best_block = self.best_block_number(); + let current_hashes = self.pending_transactions_hashes(best_block); let new_hashes = { @@ -117,11 +159,11 @@ impl EthFilter for EthFilterClient *previous_hashes = current_hashes; // return new hashes - Ok(FilterChanges::Hashes(new_hashes)) + future::ok(FilterChanges::Hashes(new_hashes)).boxed() }, PollFilter::Logs(ref mut block_number, ref mut previous_logs, ref filter) => { // retrive the current block number - let current_number = client.chain_info().best_block_number; + let current_number = self.best_block_number(); // check if we need to check pending hashes let include_pending = filter.to_block == Some(BlockNumber::Pending); @@ -131,16 +173,9 @@ impl EthFilter for EthFilterClient filter.from_block = BlockId::Number(*block_number); filter.to_block = BlockId::Latest; - // retrieve logs in range from_block..min(BlockId::Latest..to_block) - let mut logs = client.logs(filter.clone()) - .into_iter() - .map(From::from) - .collect::>(); - - // additionally retrieve pending logs - if include_pending { - let best_block = take_weak!(self.client).chain_info().best_block_number; - let pending_logs = pending_logs(&*take_weak!(self.miner), best_block, &filter); + // retrieve pending logs + let pending = if include_pending { + let pending_logs = self.pending_logs(current_number, &filter); // remove logs about which client was already notified about let new_pending_logs: Vec<_> = pending_logs.iter() @@ -151,49 +186,56 @@ impl EthFilter for EthFilterClient // save all logs retrieved by client *previous_logs = pending_logs.into_iter().collect(); - // append logs array with new pending logs - logs.extend(new_pending_logs); - } - - let logs = limit_logs(logs, filter.limit); + new_pending_logs + } else { + Vec::new() + }; // save the number of the next block as a first block from which // we want to get logs *block_number = current_number + 1; - Ok(FilterChanges::Logs(logs)) + // retrieve logs in range from_block..min(BlockId::Latest..to_block) + let limit = filter.limit; + self.logs(filter) + .map(move |mut logs| { logs.extend(pending); logs }) // append fetched pending logs + .map(move |logs| limit_logs(logs, limit)) // limit the logs + .map(FilterChanges::Logs) + .boxed() } } } } - fn filter_logs(&self, index: Index) -> Result, Error> { - let mut polls = self.polls.lock(); + fn filter_logs(&self, index: Index) -> BoxFuture, Error> { + let mut polls = self.polls().lock(); match polls.poll(&index.value()) { Some(&PollFilter::Logs(ref _block_number, ref _previous_log, ref filter)) => { let include_pending = filter.to_block == Some(BlockNumber::Pending); let filter: EthcoreFilter = filter.clone().into(); - let mut logs = take_weak!(self.client).logs(filter.clone()) - .into_iter() - .map(From::from) - .collect::>(); - if include_pending { - let best_block = take_weak!(self.client).chain_info().best_block_number; - logs.extend(pending_logs(&*take_weak!(self.miner), best_block, &filter)); - } + // fetch pending logs. + let pending = if include_pending { + let best_block = self.best_block_number(); + self.pending_logs(best_block, &filter) + } else { + Vec::new() + }; - let logs = limit_logs(logs, filter.limit); - - Ok(logs) + // retrieve logs asynchronously, appending pending logs. + let limit = filter.limit; + self.logs(filter) + .map(move |mut logs| { logs.extend(pending); logs }) + .map(move |logs| limit_logs(logs, limit)) + .boxed() }, // just empty array - _ => Ok(Vec::new()), + _ => future::ok(Vec::new()).boxed() } } fn uninstall_filter(&self, index: Index) -> Result { - self.polls.lock().remove_poll(&index.value()); + self.polls().lock().remove_poll(&index.value()); Ok(true) } } diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index dfd64d38d..8f05ed6d4 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -89,7 +89,7 @@ impl EthTester { let hashrates = Arc::new(Mutex::new(HashMap::new())); let external_miner = Arc::new(ExternalMiner::new(hashrates.clone())); let eth = EthClient::new(&client, &snapshot, &sync, &ap, &miner, &external_miner, options).to_delegate(); - let filter = EthFilterClient::new(&client, &miner).to_delegate(); + let filter = EthFilterClient::new(client.clone(), miner.clone()).to_delegate(); let dispatcher = FullDispatcher::new(Arc::downgrade(&client), Arc::downgrade(&miner)); let sign = SigningUnsafeClient::new(&ap, dispatcher).to_delegate(); diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 365ad9320..7f21829c7 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -196,12 +196,12 @@ build_rpc_trait! { fn new_pending_transaction_filter(&self) -> Result; /// Returns filter changes since last poll. - #[rpc(name = "eth_getFilterChanges")] - fn filter_changes(&self, Index) -> Result; + #[rpc(async, name = "eth_getFilterChanges")] + fn filter_changes(&self, Index) -> BoxFuture; /// Returns all logs matching given filter (in a range 'from' - 'to'). - #[rpc(name = "eth_getFilterLogs")] - fn filter_logs(&self, Index) -> Result, Error>; + #[rpc(async, name = "eth_getFilterLogs")] + fn filter_logs(&self, Index) -> BoxFuture, Error>; /// Uninstalls filter. #[rpc(name = "eth_uninstallFilter")] From f4091681267b707a9a59861436422edf992be2ce Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 28 Mar 2017 15:42:23 +0200 Subject: [PATCH 03/24] log fetching for light client --- rpc/src/v1/impls/eth.rs | 10 +-- rpc/src/v1/impls/eth_filter.rs | 4 +- rpc/src/v1/impls/light/eth.rs | 111 +++++++++++++++++++++++++++++++-- rpc/src/v1/traits/eth.rs | 4 +- 4 files changed, 115 insertions(+), 14 deletions(-) diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 811d5aa90..677a50535 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -537,23 +537,23 @@ impl Eth for EthClient where Err(errors::deprecated("Compilation functionality is deprecated.".to_string())) } - fn logs(&self, filter: Filter) -> Result, Error> { + fn logs(&self, filter: Filter) -> BoxFuture, Error> { let include_pending = filter.to_block == Some(BlockNumber::Pending); let filter: EthcoreFilter = filter.into(); - let mut logs = take_weak!(self.client).logs(filter.clone()) + let mut logs = take_weakf!(self.client).logs(filter.clone()) .into_iter() .map(From::from) .collect::>(); if include_pending { - let best_block = take_weak!(self.client).chain_info().best_block_number; - let pending = pending_logs(&*take_weak!(self.miner), best_block, &filter); + let best_block = take_weakf!(self.client).chain_info().best_block_number; + let pending = pending_logs(&*take_weakf!(self.miner), best_block, &filter); logs.extend(pending); } let logs = limit_logs(logs, filter.limit); - Ok(logs) + future::ok(logs).boxed() } fn work(&self, no_new_work_timeout: Trailing) -> Result { diff --git a/rpc/src/v1/impls/eth_filter.rs b/rpc/src/v1/impls/eth_filter.rs index 9a7281243..8f448feb5 100644 --- a/rpc/src/v1/impls/eth_filter.rs +++ b/rpc/src/v1/impls/eth_filter.rs @@ -53,7 +53,7 @@ pub trait Filterable { fn polls(&self) -> &Mutex>; } -/// Eth filter rpc implementation. +/// Eth filter rpc implementation for a full node. pub struct EthFilterClient where C: BlockChainClient, M: MinerService { @@ -98,6 +98,8 @@ impl Filterable for EthFilterClient where C: BlockChainClient, M: Mi fn polls(&self) -> &Mutex> { &self.polls } } + + impl EthFilter for T { fn new_filter(&self, filter: Filter) -> Result { let mut polls = self.polls().lock(); diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index 1851f479e..11747873e 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -34,6 +34,7 @@ use ethcore::basic_account::BasicAccount; use ethcore::encoded; use ethcore::executed::{Executed, ExecutionError}; use ethcore::ids::BlockId; +use ethcore::filter::Filter as EthcoreFilter; use ethcore::transaction::{Action, SignedTransaction, Transaction as EthTransaction}; use ethsync::LightSync; use rlp::UntrustedRlp; @@ -43,7 +44,9 @@ use util::{RwLock, Mutex, Uint, U256}; use futures::{future, Future, BoxFuture, IntoFuture}; use futures::sync::oneshot; +use v1::impls::eth_filter::Filterable; use v1::helpers::{CallRequest as CRequest, errors, limit_logs, dispatch}; +use v1::helpers::{PollFilter, PollManager}; use v1::helpers::block_import::is_major_importing; use v1::traits::Eth; use v1::types::{ @@ -55,7 +58,7 @@ use v1::metadata::Metadata; use util::Address; -/// Light client `ETH` RPC. +/// Light client `ETH` (and filter) RPC. pub struct EthClient { sync: Arc, client: Arc, @@ -63,6 +66,22 @@ pub struct EthClient { transaction_queue: Arc>, accounts: Arc, cache: Arc>, + polls: Mutex>, +} + +impl Clone for EthClient { + fn clone(&self) -> Self { + // each instance should have its own poll manager. + EthClient { + sync: self.sync.clone(), + client: self.client.clone(), + on_demand: self.on_demand.clone(), + transaction_queue: self.transaction_queue.clone(), + accounts: self.accounts.clone(), + cache: self.cache.clone(), + polls: Mutex::new(PollManager::new()), + } + } } // helper for internal error: on demand sender cancelled. @@ -90,6 +109,7 @@ impl EthClient { transaction_queue: transaction_queue, accounts: accounts, cache: cache, + polls: Mutex::new(PollManager::new()), } } @@ -484,19 +504,98 @@ impl Eth for EthClient { Err(errors::deprecated("Compilation of Solidity via RPC is deprecated".to_string())) } - fn logs(&self, _filter: Filter) -> Result, Error> { - Err(errors::unimplemented(None)) + fn logs(&self, filter: Filter) -> BoxFuture, Error> { + let limit = filter.limit; + + Filterable::logs(self, filter.into()) + .map(move|logs| limit_logs(logs, limit)) + .boxed() } fn work(&self, _timeout: Trailing) -> Result { - Err(errors::unimplemented(None)) + Err(errors::light_unimplemented(None)) } fn submit_work(&self, _nonce: RpcH64, _pow_hash: RpcH256, _mix_hash: RpcH256) -> Result { - Err(errors::unimplemented(None)) + Err(errors::light_unimplemented(None)) } fn submit_hashrate(&self, _rate: RpcU256, _id: RpcH256) -> Result { - Err(errors::unimplemented(None)) + Err(errors::light_unimplemented(None)) + } +} + +// This trait implementation triggers a blanked impl of `EthFilter`. +impl Filterable for EthClient { + fn best_block_number(&self) -> u64 { self.client.chain_info().best_block_number } + + fn block_hash(&self, id: BlockId) -> Option { + self.client.block_hash(id).map(Into::into) + } + + fn pending_transactions_hashes(&self, _block_number: u64) -> Vec<::util::H256> { + Vec::new() + } + + fn logs(&self, filter: EthcoreFilter) -> BoxFuture, Error> { + use std::collections::BTreeMap; + + use futures::stream::{self, Stream}; + use util::H2048; + + // early exit for "to" block before "from" block. + match filter.from_block { + BlockId::Latest | BlockId::Pending => { + let best = self.client.best_block_header(); + let chain_info = self.client.chain_info(); + if best.number() != chain_info.best_block_number || best.hash() != chain_info.best_block_hash { + return future::ok(Vec::new()).boxed() + } + } + _ => {} + } + + let maybe_future = self.sync.with_context(move |ctx| { + // find all headers which match the filter, and fetch the receipts for each one. + // match them with their numbers for easy sorting later. + let bit_combos = filter.bloom_possibilities(); + let receipts_futures: Vec<_> = self.client.ancestry_iter(filter.to_block) + .take_while(|ref hdr| BlockId::Number(hdr.number()) != filter.from_block) + .take_while(|ref hdr| BlockId::Hash(hdr.hash()) != filter.from_block) + .filter(|ref hdr| { + let hdr_bloom = hdr.log_bloom(); + bit_combos.iter().find(|&bloom| hdr_bloom & *bloom == *bloom).is_some() + }) + .map(|hdr| (hdr.number(), request::BlockReceipts(hdr))) + .map(|(num, req)| self.on_demand.block_receipts(ctx, req).map(move |x| (num, x))) + .collect(); + + // as the receipts come in, find logs within them which match the filter. + // insert them into a BTreeMap to maintain order by number and block index. + stream::futures_unordered(receipts_futures) + .fold(BTreeMap::new(), move |mut matches, (num, receipts)| { + for (block_index, log) in receipts.into_iter().flat_map(|r| r.logs).enumerate() { + if filter.matches(&log) { + matches.insert((num, block_index), log.into()); + } + } + future::ok(matches) + }) // and then collect them into a vector. + .map(|matches| matches.into_iter().map(|(_, v)| v).collect()) + .map_err(err_premature_cancel) + }); + + match maybe_future { + Some(fut) => fut.boxed(), + None => future::err(errors::network_disabled()).boxed(), + } + } + + fn pending_logs(&self, _block_number: u64, _filter: &EthcoreFilter) -> Vec { + Vec::new() // light clients don't mine. + } + + fn polls(&self) -> &Mutex> { + &self.polls } } diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 7f21829c7..941263335 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -162,8 +162,8 @@ build_rpc_trait! { fn compile_serpent(&self, String) -> Result; /// Returns logs matching given filter object. - #[rpc(name = "eth_getLogs")] - fn logs(&self, Filter) -> Result, Error>; + #[rpc(async, name = "eth_getLogs")] + fn logs(&self, Filter) -> BoxFuture, Error>; /// Returns the hash of the current block, the seedHash, and the boundary condition to be met. #[rpc(name = "eth_getWork")] From d8893b959dbeb35467cd6e7cb0dfa2a5e989417d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 28 Mar 2017 15:56:51 +0200 Subject: [PATCH 04/24] add eth-filter delegate --- parity/rpc_apis.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index 01a649469..1e726aaf8 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -344,9 +344,8 @@ impl Dependencies for LightDependencies { self.secret_store.clone(), self.cache.clone(), ); - handler.extend_with(client.to_delegate()); - - // TODO: filters. + handler.extend_with(Eth::to_delegate(client.clone())); + handler.extend_with(EthFilter::to_delegate(client)); add_signing_methods!(EthSigning, handler, self); }, Api::Personal => { From 73fa0cdc31f43c36bcaccaa2e65d2cdb61e5fecf Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 28 Mar 2017 17:15:36 +0200 Subject: [PATCH 05/24] eth_block fetching RPCs --- ethcore/light/src/client/header_chain.rs | 22 +++++ ethcore/light/src/client/mod.rs | 14 ++- ethcore/src/types/encoded.rs | 6 ++ rpc/src/v1/impls/light/eth.rs | 114 ++++++++++++++++++++++- 4 files changed, 152 insertions(+), 4 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 3edbf7171..15386b685 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -411,6 +411,28 @@ impl HeaderChain { } } + /// Get a block's chain score. + /// Returns nothing for non-canonical blocks. + pub fn score(&self, id: BlockId) -> Option { + let genesis_hash = self.genesis_hash(); + match id { + BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_header.difficulty()), + BlockId::Hash(hash) if hash == genesis_hash => Some(self.genesis_header.difficulty()), + BlockId::Hash(hash) => match self.block_header(BlockId::Hash(hash)) { + Some(header) => self.candidates.read().get(&header.number()) + .and_then(|era| era.candidates.iter().find(|e| e.hash == hash)) + .map(|c| c.total_difficulty), + None => None, + }, + BlockId::Number(num) => { + let candidates = self.candidates.read(); + if self.best_block.read().number < num { return None } + candidates.get(&num).map(|era| era.candidates[0].total_difficulty) + } + BlockId::Latest | BlockId::Pending => Some(self.best_block.read().total_difficulty) + } + } + /// Get the best block's header. pub fn best_header(&self) -> encoded::Header { self.block_header(BlockId::Latest).expect("Header for best block always stored; qed") diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index eb1c5cdcd..fe09f1d20 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -31,7 +31,7 @@ use ethcore::service::ClientIoMessage; use ethcore::encoded; use io::IoChannel; -use util::{H256, Mutex, RwLock}; +use util::{H256, U256, Mutex, RwLock}; use util::kvdb::{KeyValueDB, CompactionProfile}; use self::header_chain::{AncestryIter, HeaderChain}; @@ -74,6 +74,9 @@ pub trait LightChainClient: Send + Sync { /// Get the best block header. fn best_block_header(&self) -> encoded::Header; + /// Get a block's chain score by ID. + fn score(&self, id: BlockId) -> Option; + /// Get an iterator over a block and its ancestry. fn ancestry_iter<'a>(&'a self, start: BlockId) -> Box + 'a>; @@ -199,6 +202,11 @@ impl Client { self.chain.best_header() } + /// Get a block's chain score. + pub fn score(&self, id: BlockId) -> Option { + self.chain.score(id) + } + /// Get an iterator over a block and its ancestry. pub fn ancestry_iter(&self, start: BlockId) -> AncestryIter { self.chain.ancestry_iter(start) @@ -328,6 +336,10 @@ impl LightChainClient for Client { Client::best_block_header(self) } + fn score(&self, id: BlockId) -> Option { + Client::score(self, id) + } + fn ancestry_iter<'a>(&'a self, start: BlockId) -> Box + 'a> { Box::new(Client::ancestry_iter(self, start)) } diff --git a/ethcore/src/types/encoded.rs b/ethcore/src/types/encoded.rs index 0a4164044..125a00fd0 100644 --- a/ethcore/src/types/encoded.rs +++ b/ethcore/src/types/encoded.rs @@ -199,6 +199,12 @@ impl Block { /// Decode to a full block. pub fn decode(&self) -> FullBlock { ::rlp::decode(&self.0) } + /// Decode the header. + pub fn decode_header(&self) -> FullHeader { self.rlp().val_at(0) } + + /// Clone the encoded header. + pub fn header(&self) -> Header { Header(self.rlp().at(0).as_raw().to_vec()) } + /// Get the rlp of this block. #[inline] pub fn rlp(&self) -> Rlp { diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index 11747873e..c341d539a 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -254,6 +254,111 @@ impl EthClient { } }).boxed() } + + fn block(&self, id: BlockId) -> BoxFuture, Error> { + let (on_demand, sync) = (self.on_demand.clone(), self.sync.clone()); + + self.header(id).and_then(move |hdr| { + let req = match hdr { + Some(hdr) => request::Body::new(hdr), + None => return future::ok(None).boxed(), + }; + + match sync.with_context(move |ctx| on_demand.block(ctx, req)) { + Some(fut) => fut.map_err(err_premature_cancel).map(Some).boxed(), + None => future::err(errors::network_disabled()).boxed(), + } + }).boxed() + } + + // get a "rich" block structure + fn rich_block(&self, id: BlockId, include_txs: bool) -> BoxFuture, Error> { + let (on_demand, sync) = (self.on_demand.clone(), self.sync.clone()); + let (client, engine) = (self.client.clone(), self.client.engine().clone()); + + // helper for filling out a rich block once we've got a block and a score. + let fill_rich = move |block: encoded::Block, score: Option| { + let header = block.decode_header(); + let extra_info = engine.extra_info(&header); + RichBlock { + block: Block { + hash: Some(header.hash().into()), + size: Some(block.rlp().as_raw().len().into()), + parent_hash: header.parent_hash().clone().into(), + uncles_hash: header.uncles_hash().clone().into(), + author: header.author().clone().into(), + miner: header.author().clone().into(), + state_root: header.state_root().clone().into(), + transactions_root: header.transactions_root().clone().into(), + receipts_root: header.receipts_root().clone().into(), + number: Some(header.number().into()), + gas_used: header.gas_used().clone().into(), + gas_limit: header.gas_limit().clone().into(), + logs_bloom: header.log_bloom().clone().into(), + timestamp: header.timestamp().into(), + difficulty: header.difficulty().clone().into(), + total_difficulty: score.map(Into::into), + seal_fields: header.seal().into_iter().cloned().map(Into::into).collect(), + uncles: block.uncle_hashes().into_iter().map(Into::into).collect(), + transactions: match include_txs { + true => BlockTransactions::Full(block.view().localized_transactions().into_iter().map(Into::into).collect()), + false => BlockTransactions::Hashes(block.transaction_hashes().into_iter().map(Into::into).collect()), + }, + extra_data: Bytes::new(header.extra_data().to_vec()), + }, + extra_info: extra_info + } + }; + + // get the block itself. + self.block(id).and_then(move |block| match block { + None => return future::ok(None).boxed(), + Some(block) => { + // then fetch the total difficulty (this is much easier after getting the block). + match client.score(id) { + Some(score) => future::ok(fill_rich(block, Some(score))).map(Some).boxed(), + None => { + // make a CHT request to fetch the chain score. + let req = cht::block_to_cht_number(block.number()) + .and_then(|num| client.cht_root(num as usize)) + .and_then(|root| request::HeaderProof::new(block.number(), root)); + + + let req = match req { + Some(req) => req, + None => { + // somehow the genesis block slipped past other checks. + // return it now. + let score = client.block_header(BlockId::Number(0)) + .expect("genesis always stored; qed") + .difficulty(); + + return future::ok(fill_rich(block, Some(score))).map(Some).boxed() + } + }; + + // three possible outcomes: + // - network is down. + // - we get a score, but our hash is non-canonical. + // - we get ascore, and our hash is canonical. + let maybe_fut = sync.with_context(move |ctx| on_demand.hash_and_score_by_number(ctx, req)); + match maybe_fut { + Some(fut) => fut.map(move |(hash, score)| { + let score = if hash == block.hash() { + Some(score) + } else { + None + }; + + Some(fill_rich(block, score)) + }).map_err(err_premature_cancel).boxed(), + None => return future::err(errors::network_disabled()).boxed(), + } + } + } + } + }).boxed() + } } impl Eth for EthClient { @@ -295,7 +400,10 @@ impl Eth for EthClient { } fn gas_price(&self) -> Result { - Ok(Default::default()) + Ok(self.cache.lock().gas_price_corpus() + .and_then(|c| c.median().cloned()) + .map(RpcU256::from) + .unwrap_or_else(Default::default)) } fn accounts(&self, meta: Metadata) -> BoxFuture, Error> { @@ -324,11 +432,11 @@ impl Eth for EthClient { } fn block_by_hash(&self, hash: RpcH256, include_txs: bool) -> BoxFuture, Error> { - future::err(errors::unimplemented(None)).boxed() + self.rich_block(BlockId::Hash(hash.into()), include_txs) } fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture, Error> { - future::err(errors::unimplemented(None)).boxed() + self.rich_block(num.into(), include_txs) } fn transaction_count(&self, address: RpcH160, num: Trailing) -> BoxFuture { From 1b0a369889b1dac101f4b83250dab2d6f755278b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 28 Mar 2017 18:42:45 +0200 Subject: [PATCH 06/24] return default accounts from on_demand --- ethcore/light/src/on_demand/mod.rs | 27 +++++++++++++++++++-------- parity/light_helpers/queue_cull.rs | 4 +--- parity/run.rs | 3 ++- rpc/src/v1/helpers/dispatch.rs | 2 +- rpc/src/v1/impls/light/eth.rs | 11 +++++++---- 5 files changed, 30 insertions(+), 17 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index a7c1ba2c4..c756844c9 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -35,7 +35,7 @@ use futures::sync::oneshot::{self, Sender, Receiver}; use network::PeerId; use rlp::RlpStream; use util::{Bytes, RwLock, Mutex, U256, H256}; -use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; +use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY, SHA3_EMPTY_LIST_RLP}; use net::{self, Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; use cache::Cache; @@ -83,7 +83,7 @@ enum Pending { HeaderByHash(request::HeaderByHash, Sender), Block(request::Body, Sender), BlockReceipts(request::BlockReceipts, Sender>), - Account(request::Account, Sender>), + Account(request::Account, Sender), Code(request::Code, Sender), TxProof(request::TransactionProof, Sender>), } @@ -136,18 +136,20 @@ pub struct OnDemand { pending_requests: RwLock>, cache: Arc>, orphaned_requests: RwLock>, + start_nonce: U256, } const RECEIVER_IN_SCOPE: &'static str = "Receiver is still in scope, so it's not dropped; qed"; impl OnDemand { /// Create a new `OnDemand` service with the given cache. - pub fn new(cache: Arc>) -> Self { + pub fn new(cache: Arc>, account_start_nonce: U256) -> Self { OnDemand { peers: RwLock::new(HashMap::new()), pending_requests: RwLock::new(HashMap::new()), cache: cache, orphaned_requests: RwLock::new(Vec::new()), + start_nonce: account_start_nonce, } } @@ -268,7 +270,7 @@ impl OnDemand { /// Request an account by address and block header -- which gives a hash to query and a state root /// to verify against. - pub fn account(&self, ctx: &BasicContext, req: request::Account) -> Receiver> { + pub fn account(&self, ctx: &BasicContext, req: request::Account) -> Receiver { let (sender, receiver) = oneshot::channel(); self.dispatch(ctx, Pending::Account(req, sender)); receiver @@ -279,7 +281,7 @@ impl OnDemand { let (sender, receiver) = oneshot::channel(); // fast path for no code. - if req.code_hash == ::util::sha3::SHA3_EMPTY { + if req.code_hash == SHA3_EMPTY { sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE) } else { self.dispatch(ctx, Pending::Code(req, sender)); @@ -497,10 +499,19 @@ impl Handler for OnDemand { Pending::Account(req, sender) => { if let NetworkResponse::Account(ref response) = *response { match req.check_response(&response.proof) { - Ok(maybe_account) => { + Ok(account) => { + let account = account.unwrap_or_else(|| { + BasicAccount { + balance: 0.into(), + nonce: self.start_nonce, + code_hash: SHA3_EMPTY, + storage_root: SHA3_NULL_RLP + } + }); + // TODO: validate against request outputs. // needs engine + env info as part of request. - let _ = sender.send(maybe_account); + let _ = sender.send(account); return } Err(e) => warn!(target: "on_demand", "Error handling response for state request: {:?}", e), @@ -572,7 +583,7 @@ mod tests { #[test] fn detects_hangup() { let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); - let on_demand = OnDemand::new(cache); + let on_demand = OnDemand::new(cache, 0.into()); let result = on_demand.header_by_hash(&FakeContext, request::HeaderByHash(H256::default())); assert!(on_demand.orphaned_requests.read().len() == 1); diff --git a/parity/light_helpers/queue_cull.rs b/parity/light_helpers/queue_cull.rs index 10865d485..548ee33cd 100644 --- a/parity/light_helpers/queue_cull.rs +++ b/parity/light_helpers/queue_cull.rs @@ -67,7 +67,6 @@ impl IoHandler for QueueCull { let (sync, on_demand, txq) = (self.sync.clone(), self.on_demand.clone(), self.txq.clone()); let best_header = self.client.best_block_header(); - let start_nonce = self.client.engine().account_start_nonce(); info!(target: "cull", "Attempting to cull queued transactions from {} senders.", senders.len()); self.remote.spawn_with_timeout(move || { @@ -75,8 +74,7 @@ impl IoHandler for QueueCull { // fetch the nonce of each sender in the queue. let nonce_futures = senders.iter() .map(|&address| request::Account { header: best_header.clone(), address: address }) - .map(|request| on_demand.account(ctx, request)) - .map(move |fut| fut.map(move |x| x.map(|acc| acc.nonce).unwrap_or(start_nonce))) + .map(|request| on_demand.account(ctx, request).map(|acc| acc.nonce)) .zip(senders.iter()) .map(|(fut, &addr)| fut.map(move |nonce| (addr, nonce))); diff --git a/parity/run.rs b/parity/run.rs index cf7d5e82c..9caa4120e 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -225,7 +225,8 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> let cache = Arc::new(::util::Mutex::new(cache)); // start on_demand service. - let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone())); + let account_start_nonce = service.client().engine().account_start_nonce(); + let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone(), account_start_nonce)); // set network path. net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned()); diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index e1b298b9f..d58a211ed 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -268,7 +268,7 @@ impl LightDispatcher { match nonce_future { Some(x) => - x.map(|acc| acc.map_or_else(Default::default, |acc| acc.nonce)) + x.map(|acc| acc.nonce) .map_err(|_| errors::no_light_peers()) .boxed(), None => future::err(errors::network_disabled()).boxed() diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index c341d539a..e1014afe8 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -173,12 +173,15 @@ impl EthClient { Some(hdr) => hdr, }; - sync.with_context(|ctx| on_demand.account(ctx, request::Account { + let maybe_fut = sync.with_context(|ctx| on_demand.account(ctx, request::Account { header: header, address: address, - })) - .map(|x| x.map_err(err_premature_cancel).boxed()) - .unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) + })); + + match maybe_fut { + Some(fut) => fut.map(Some).map_err(err_premature_cancel).boxed(), + None => future::err(errors::network_disabled()).boxed(), + } }).boxed() } From 35740456a47275195244aea29c8e0f0bbb558fc2 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 5 Apr 2017 15:02:44 +0200 Subject: [PATCH 07/24] generalize RequestBuilder --- ethcore/light/src/net/context.rs | 2 +- ethcore/light/src/net/mod.rs | 2 +- ethcore/light/src/net/request_set.rs | 2 +- ethcore/light/src/net/tests/mod.rs | 4 +- ethcore/light/src/on_demand/mod.rs | 4 +- ethcore/light/src/types/request/builder.rs | 44 ++++++++------ ethcore/light/src/types/request/mod.rs | 67 ++++++++++++++++------ 7 files changed, 82 insertions(+), 43 deletions(-) diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index 64ddd19a3..33009d7f6 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -20,7 +20,7 @@ use network::{NetworkContext, PeerId, NodeId}; use super::{Announcement, LightProtocol, ReqId}; use super::error::Error; -use request::Requests; +use request::NetworkRequests as Requests; /// An I/O context which allows sending and receiving packets as well as /// disconnecting peers. This is used as a generalization of the portions diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index e32e92145..7d006662c 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -33,7 +33,7 @@ use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use provider::Provider; -use request::{Request, Requests, Response}; +use request::{Request, NetworkRequests as Requests, Response}; use self::request_credits::{Credits, FlowParams}; use self::context::{Ctx, TickCtx}; diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index 094fa1894..c5608050f 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -25,7 +25,7 @@ use std::collections::{BTreeMap, HashMap}; use std::iter::FromIterator; use request::Request; -use request::Requests; +use request::NetworkRequests as Requests; use net::{timeout, ReqId}; use util::U256; diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 6dc5fbe7e..94788a727 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -39,14 +39,14 @@ use std::sync::Arc; // helper for encoding a single request into a packet. // panics on bad backreference. -fn encode_single(request: Request) -> Requests { +fn encode_single(request: Request) -> NetworkRequests { let mut builder = RequestBuilder::default(); builder.push(request).unwrap(); builder.build() } // helper for making a packet out of `Requests`. -fn make_packet(req_id: usize, requests: &Requests) -> Vec { +fn make_packet(req_id: usize, requests: &NetworkRequests) -> Vec { let mut stream = RlpStream::new_list(2); stream.append(&req_id).append_list(&requests.requests()); stream.out() diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index c756844c9..e3ea28887 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -562,7 +562,7 @@ mod tests { use cache::Cache; use net::{Announcement, BasicContext, ReqId, Error as LesError}; - use request::Requests; + use request::NetworkRequests; use network::{PeerId, NodeId}; use time::Duration; @@ -572,7 +572,7 @@ mod tests { impl BasicContext for FakeContext { fn persistent_peer_id(&self, _: PeerId) -> Option { None } - fn request_from(&self, _: PeerId, _: Requests) -> Result { + fn request_from(&self, _: PeerId, _: NetworkRequests) -> Result { unimplemented!() } fn make_announcement(&self, _: Announcement) { } diff --git a/ethcore/light/src/types/request/builder.rs b/ethcore/light/src/types/request/builder.rs index 77f1389c2..015edf4c4 100644 --- a/ethcore/light/src/types/request/builder.rs +++ b/ethcore/light/src/types/request/builder.rs @@ -20,22 +20,30 @@ use std::collections::HashMap; use request::{ - IncompleteRequest, CompleteRequest, Request, - OutputKind, Output, NoSuchOutput, Response, ResponseError, + IncompleteRequest, OutputKind, Output, NoSuchOutput, ResponseError, ResponseLike, }; /// Build chained requests. Push them onto the series with `push`, /// and produce a `Requests` object with `build`. Outputs are checked for consistency. -#[derive(Debug, Default, Clone, PartialEq, Eq)] -pub struct RequestBuilder { +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RequestBuilder { output_kinds: HashMap<(usize, usize), OutputKind>, - requests: Vec, + requests: Vec, } -impl RequestBuilder { +impl Default for RequestBuilder { + fn default() -> Self { + RequestBuilder { + output_kinds: HashMap::new(), + requests: Vec::new(), + } + } +} + +impl RequestBuilder { /// Attempt to push a request onto the request chain. Fails if the request /// references a non-existent output of a prior request. - pub fn push(&mut self, request: Request) -> Result<(), NoSuchOutput> { + pub fn push(&mut self, request: T) -> Result<(), NoSuchOutput> { request.check_outputs(|req, idx, kind| { match self.output_kinds.get(&(req, idx)) { Some(k) if k == &kind => Ok(()), @@ -54,7 +62,7 @@ impl RequestBuilder { } /// Convert this into a "requests" object. - pub fn build(self) -> Requests { + pub fn build(self) -> Requests { Requests { outputs: HashMap::new(), requests: self.requests, @@ -65,18 +73,18 @@ impl RequestBuilder { /// Requests pending responses. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct Requests { +pub struct Requests { outputs: HashMap<(usize, usize), Output>, - requests: Vec, + requests: Vec, answered: usize, } -impl Requests { - /// For each request, produce responses for each. +impl Requests { + /// For each request, produce a response. /// The responses vector produced goes up to the point where the responder /// first returns `None`, an invalid response, or until all requests have been responded to. - pub fn respond_to_all(mut self, responder: F) -> Vec - where F: Fn(CompleteRequest) -> Option + pub fn respond_to_all(mut self, responder: F) -> Vec + where F: Fn(T::Complete) -> Option { let mut responses = Vec::new(); @@ -95,13 +103,13 @@ impl Requests { /// Get access to the underlying slice of requests. // TODO: unimplemented -> Vec, // do we _have to_ allocate? - pub fn requests(&self) -> &[Request] { &self.requests } + pub fn requests(&self) -> &[T] { &self.requests } /// Get the number of answered requests. pub fn num_answered(&self) -> usize { self.answered } /// Get the next request as a filled request. Returns `None` when all requests answered. - pub fn next_complete(&self) -> Option { + pub fn next_complete(&self) -> Option { if self.answered == self.requests.len() { None } else { @@ -113,12 +121,12 @@ impl Requests { /// Supply a response for the next request. /// Fails on: wrong request kind, all requests answered already. - pub fn supply_response(&mut self, response: &Response) -> Result<(), ResponseError> { + pub fn supply_response(&mut self, response: &T::Response) -> Result<(), ResponseError> { let idx = self.answered; // check validity. if idx == self.requests.len() { return Err(ResponseError::Unexpected) } - if self.requests[idx].kind() != response.kind() { return Err(ResponseError::WrongKind) } + if !self.requests[idx].check_response(&response) { return Err(ResponseError::WrongKind) } let outputs = &mut self.outputs; response.fill_outputs(|out_idx, output| { diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 3099f8fed..4d0049696 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -197,6 +197,9 @@ impl Encodable for HashOrNumber { } } +/// Type alias for "network requests". +pub type NetworkRequests = Requests; + /// All request types, as they're sent over the network. /// They may be incomplete, with back-references to outputs /// of prior requests. @@ -296,6 +299,7 @@ impl Encodable for Request { impl IncompleteRequest for Request { type Complete = CompleteRequest; + type Response = Response; fn check_outputs(&self, f: F) -> Result<(), NoSuchOutput> where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> @@ -338,6 +342,10 @@ impl IncompleteRequest for Request { } } + fn check_response(&self, response: &Response) -> bool { + self.kind() == response.kind() + } + fn complete(self) -> Result { match self { Request::Headers(req) => req.complete().map(CompleteRequest::Headers), @@ -421,9 +429,9 @@ pub enum Response { Execution(ExecutionResponse), } -impl Response { +impl ResponseLike for Response { /// Fill reusable outputs by writing them into the function. - pub fn fill_outputs(&self, f: F) where F: FnMut(usize, Output) { + fn fill_outputs(&self, f: F) where F: FnMut(usize, Output) { match *self { Response::Headers(ref res) => res.fill_outputs(f), Response::HeaderProof(ref res) => res.fill_outputs(f), @@ -435,7 +443,9 @@ impl Response { Response::Execution(ref res) => res.fill_outputs(f), } } +} +impl Response { /// Inspect the kind of this response. pub fn kind(&self) -> Kind { match *self { @@ -490,6 +500,8 @@ impl Encodable for Response { pub trait IncompleteRequest: Sized { /// The complete variant of this request. type Complete; + /// The response to this request. + type Response: ResponseLike; /// Check prior outputs against the needed inputs. /// @@ -508,11 +520,22 @@ pub trait IncompleteRequest: Sized { /// Only outputs previously checked with `check_outputs` may be available. fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result; + /// Check whether the response matches (beyond the type). + fn check_response(&self, _response: &Self::Response) -> bool { true } + /// Attempt to convert this request into its complete variant. /// Will succeed if all fields have been filled, will fail otherwise. fn complete(self) -> Result; } +/// A response-like object. +/// +/// These contain re-usable outputs. +pub trait ResponseLike { + /// Write all re-usable outputs into the provided function. + fn fill_outputs(&self, output_store: F) where F: FnMut(usize, Output); +} + /// Header request. pub mod header { use super::{Field, HashOrNumber, NoSuchOutput, OutputKind, Output}; @@ -555,6 +578,7 @@ pub mod header { impl super::IncompleteRequest for Incomplete { type Complete = Complete; + type Response = Response; fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> @@ -608,9 +632,9 @@ pub mod header { pub headers: Vec, } - impl Response { + impl super::ResponseLike for Response { /// Fill reusable outputs by writing them into the function. - pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) { } + fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) { } } impl Decodable for Response { @@ -671,6 +695,7 @@ pub mod header_proof { impl super::IncompleteRequest for Incomplete { type Complete = Complete; + type Response = Response; fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> @@ -719,9 +744,9 @@ pub mod header_proof { pub td: U256, } - impl Response { + impl super::ResponseLike for Response { /// Fill reusable outputs by providing them to the function. - pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { f(0, Output::Hash(self.hash)); } } @@ -776,6 +801,7 @@ pub mod block_receipts { impl super::IncompleteRequest for Incomplete { type Complete = Complete; + type Response = Response; fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> @@ -818,9 +844,9 @@ pub mod block_receipts { pub receipts: Vec } - impl Response { + impl super::ResponseLike for Response { /// Fill reusable outputs by providing them to the function. - pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} } impl Decodable for Response { @@ -868,6 +894,7 @@ pub mod block_body { impl super::IncompleteRequest for Incomplete { type Complete = Complete; + type Response = Response; fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> @@ -910,9 +937,9 @@ pub mod block_body { pub body: encoded::Body, } - impl Response { + impl super::ResponseLike for Response { /// Fill reusable outputs by providing them to the function. - pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} } impl Decodable for Response { @@ -971,6 +998,7 @@ pub mod account { impl super::IncompleteRequest for Incomplete { type Complete = Complete; + type Response = Response; fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> @@ -1039,9 +1067,9 @@ pub mod account { pub storage_root: H256, } - impl Response { + impl super::ResponseLike for Response { /// Fill reusable outputs by providing them to the function. - pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { f(0, Output::Hash(self.code_hash)); f(1, Output::Hash(self.storage_root)); } @@ -1109,6 +1137,7 @@ pub mod storage { impl super::IncompleteRequest for Incomplete { type Complete = Complete; + type Response = Response; fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> @@ -1184,9 +1213,9 @@ pub mod storage { pub value: H256, } - impl Response { + impl super::ResponseLike for Response { /// Fill reusable outputs by providing them to the function. - pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { f(0, Output::Hash(self.value)); } } @@ -1243,6 +1272,7 @@ pub mod contract_code { impl super::IncompleteRequest for Incomplete { type Complete = Complete; + type Response = Response; fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> @@ -1299,9 +1329,9 @@ pub mod contract_code { pub code: Bytes, } - impl Response { + impl super::ResponseLike for Response { /// Fill reusable outputs by providing them to the function. - pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} } impl Decodable for Response { @@ -1380,6 +1410,7 @@ pub mod execution { impl super::IncompleteRequest for Incomplete { type Complete = Complete; + type Response = Response; fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> @@ -1440,9 +1471,9 @@ pub mod execution { pub items: Vec, } - impl Response { + impl super::ResponseLike for Response { /// Fill reusable outputs by providing them to the function. - pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} } impl Decodable for Response { From 08d8709ef6eff186a573a653ef5bd5b03e58d3ac Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 5 Apr 2017 18:56:01 +0200 Subject: [PATCH 08/24] checked request for OnDemand --- ethcore/light/src/on_demand/mod.rs | 18 +-- ethcore/light/src/on_demand/request.rs | 165 ++++++++++++++++++++- ethcore/light/src/types/request/builder.rs | 55 ++++--- ethcore/light/src/types/request/mod.rs | 41 +++-- 4 files changed, 232 insertions(+), 47 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index e3ea28887..fcdebb2d4 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -459,15 +459,13 @@ impl Handler for OnDemand { } Pending::HeaderByHash(req, sender) => { if let NetworkResponse::Headers(ref response) = *response { - if let Some(header) = response.headers.get(0) { - match req.check_response(header) { - Ok(header) => { - self.cache.lock().insert_block_header(req.0, header.clone()); - let _ = sender.send(header); - return - } - Err(e) => warn!(target: "on_demand", "Error handling response for header request: {:?}", e), + match req.check_response(&response.headers) { + Ok(header) => { + self.cache.lock().insert_block_header(req.0, header.clone()); + let _ = sender.send(header); + return } + Err(e) => warn!(target: "on_demand", "Error handling response for header request: {:?}", e), } } } @@ -521,8 +519,8 @@ impl Handler for OnDemand { Pending::Code(req, sender) => { if let NetworkResponse::Code(ref response) = *response { match req.check_response(response.code.as_slice()) { - Ok(()) => { - let _ = sender.send(response.code.clone()); + Ok(code) => { + let _ = sender.send(code); return } Err(e) => warn!(target: "on_demand", "Error handling response for code request: {:?}", e), diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 30337cc2c..8efee1d20 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -26,17 +26,171 @@ use ethcore::receipt::Receipt; use ethcore::state::{self, ProvedExecution}; use ethcore::transaction::SignedTransaction; +use request::{self as net_request, IncompleteRequest, Output, OutputKind}; + use rlp::{RlpStream, UntrustedRlp}; use util::{Address, Bytes, DBValue, HashDB, H256, U256}; use util::memorydb::MemoryDB; use util::sha3::Hashable; use util::trie::{Trie, TrieDB, TrieError}; +/// Core unit of the API: submit batches of these to be answered with `Response`s. +pub enum Request { + /// A request for a header proof. + HeaderProof(HeaderProof), + /// A request for a header by hash. + HeaderByHash(HeaderByHash), + /// A request for block receipts. + Receipts(BlockReceipts), + /// A request for a block body. + Body(Body), + /// A request for an account. + Account(Account), + /// A request for a contract's code. + Code(Code), + /// A request for proof of execution. + Execution(TransactionProof), +} + +/// Requests coupled with their required data for verification. +/// This is used internally but not part of the public API. +#[derive(Clone)] +#[allow(missing_docs)] +pub enum CheckedRequest { + HeaderProof(HeaderProof, net_request::IncompleteHeaderProofRequest), + HeaderByHash(HeaderByHash, net_request::IncompleteHeadersRequest), + Receipts(BlockReceipts, net_request::IncompleteReceiptsRequest), + Body(Body, net_request::IncompleteBodyRequest), + Account(Account, net_request::IncompleteAccountRequest), + Code(Code, net_request::IncompleteCodeRequest), + Execution(TransactionProof, net_request::IncompleteExecutionRequest), +} + +impl IncompleteRequest for CheckedRequest { + type Complete = net_request::CompleteRequest; + type Response = net_request::Response; + + /// Check prior outputs against the needed inputs. + /// + /// This is called to ensure consistency of this request with + /// others in the same packet. + fn check_outputs(&self, f: F) -> Result<(), net_request::NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), net_request::NoSuchOutput> + { + match *self { + CheckedRequest::HeaderProof(_, ref req) => req.check_outputs(f), + CheckedRequest::HeaderByHash(_, ref req) => req.check_outputs(f), + CheckedRequest::Receipts(_, ref req) => req.check_outputs(f), + CheckedRequest::Body(_, ref req) => req.check_outputs(f), + CheckedRequest::Account(_, ref req) => req.check_outputs(f), + CheckedRequest::Code(_, ref req) => req.check_outputs(f), + CheckedRequest::Execution(_, ref req) => req.check_outputs(f), + } + } + + /// Note that this request will produce the following outputs. + fn note_outputs(&self, f: F) where F: FnMut(usize, OutputKind) { + match *self { + CheckedRequest::HeaderProof(_, ref req) => req.note_outputs(f), + CheckedRequest::HeaderByHash(_, ref req) => req.note_outputs(f), + CheckedRequest::Receipts(_, ref req) => req.note_outputs(f), + CheckedRequest::Body(_, ref req) => req.note_outputs(f), + CheckedRequest::Account(_, ref req) => req.note_outputs(f), + CheckedRequest::Code(_, ref req) => req.note_outputs(f), + CheckedRequest::Execution(_, ref req) => req.note_outputs(f), + } + } + + /// Fill fields of the request. + /// + /// This function is provided an "output oracle" which allows fetching of + /// prior request outputs. + /// Only outputs previously checked with `check_outputs` may be available. + fn fill(&mut self, f: F) where F: Fn(usize, usize) -> Result { + match *self { + CheckedRequest::HeaderProof(_, ref mut req) => req.fill(f), + CheckedRequest::HeaderByHash(_, ref mut req) => req.fill(f), + CheckedRequest::Receipts(_, ref mut req) => req.fill(f), + CheckedRequest::Body(_, ref mut req) => req.fill(f), + CheckedRequest::Account(_, ref mut req) => req.fill(f), + CheckedRequest::Code(_, ref mut req) => req.fill(f), + CheckedRequest::Execution(_, ref mut req) => req.fill(f), + } + } + + /// Will succeed if all fields have been filled, will fail otherwise. + fn complete(self) -> Result { + use ::request::CompleteRequest; + + match self { + CheckedRequest::HeaderProof(_, req) => req.complete().map(CompleteRequest::HeaderProof), + CheckedRequest::HeaderByHash(_, req) => req.complete().map(CompleteRequest::Headers), + CheckedRequest::Receipts(_, req) => req.complete().map(CompleteRequest::Receipts), + CheckedRequest::Body(_, req) => req.complete().map(CompleteRequest::Body), + CheckedRequest::Account(_, req) => req.complete().map(CompleteRequest::Account), + CheckedRequest::Code(_, req) => req.complete().map(CompleteRequest::Code), + CheckedRequest::Execution(_, req) => req.complete().map(CompleteRequest::Execution), + } + } +} + +impl net_request::CheckedRequest for CheckedRequest { + type Extract = Response; + type Error = Error; + + /// Check whether the response matches (beyond the type). + fn check_response(&self, response: &Self::Response) -> Result { + use ::request::Response as NetResponse; + + // check response against contained prover. + match (self, response) { + (&CheckedRequest::HeaderProof(ref prover, _), &NetResponse::HeaderProof(ref res)) => + prover.check_response(&res.proof).map(|(h, s)| Response::HeaderProof(h, s)), + (&CheckedRequest::HeaderByHash(ref prover, _), &NetResponse::Headers(ref res)) => + prover.check_response(&res.headers).map(Response::HeaderByHash), + (&CheckedRequest::Receipts(ref prover, _), &NetResponse::Receipts(ref res)) => + prover.check_response(&res.receipts).map(Response::Receipts), + (&CheckedRequest::Body(ref prover, _), &NetResponse::Body(ref res)) => + prover.check_response(&res.body).map(Response::Body), + (&CheckedRequest::Account(ref prover, _), &NetResponse::Account(ref res)) => + prover.check_response(&res.proof).map(Response::Account), + (&CheckedRequest::Code(ref prover, _), &NetResponse::Code(ref res)) => + prover.check_response(&res.code).map(Response::Code), + (&CheckedRequest::Execution(ref prover, _), &NetResponse::Execution(ref res)) => + Ok(Response::Execution(prover.check_response(&res.items))), + _ => Err(Error::WrongKind), + } + } +} + +/// Responses to on-demand requests. +/// All of these are checked. +pub enum Response { + /// Response to a header proof request. + /// Returns the hash and chain score. + HeaderProof(H256, U256), + /// Response to a header-by-hash request. + HeaderByHash(encoded::Header), + /// Response to a receipts request. + Receipts(Vec), + /// Response to a block body request. + Body(encoded::Block), + /// Response to an Account request. + // TODO: `unwrap_or(engine_defaults)` + Account(Option), + /// Response to a request for code. + Code(Vec), + /// Response to a request for proved execution. + Execution(ProvedExecution), // TODO: make into `Result` +} + /// Errors in verification. #[derive(Debug, PartialEq)] pub enum Error { /// RLP decoder error. Decoder(::rlp::DecoderError), + /// Empty response. + Empty, /// Trie lookup error (result of bad proof) Trie(TrieError), /// Bad inclusion proof @@ -47,6 +201,8 @@ pub enum Error { WrongHash(H256, H256), /// Wrong trie root. WrongTrieRoot(H256, H256), + /// Wrong response kind. + WrongKind, } impl From<::rlp::DecoderError> for Error { @@ -107,7 +263,8 @@ pub struct HeaderByHash(pub H256); impl HeaderByHash { /// Check a response for the header. - pub fn check_response(&self, header: &encoded::Header) -> Result { + pub fn check_response(&self, headers: &[encoded::Header]) -> Result { + let header = headers.get(0).ok_or(Error::Empty)?; let hash = header.sha3(); match hash == self.0 { true => Ok(header.clone()), @@ -208,6 +365,7 @@ impl Account { } /// Request for account code. +#[derive(Debug, Clone, PartialEq, Eq)] pub struct Code { /// Block hash, number pair. pub block_id: (H256, u64), @@ -217,10 +375,10 @@ pub struct Code { impl Code { /// Check a response with code against the code hash. - pub fn check_response(&self, code: &[u8]) -> Result<(), Error> { + pub fn check_response(&self, code: &[u8]) -> Result, Error> { let found_hash = code.sha3(); if found_hash == self.code_hash { - Ok(()) + Ok(code.to_vec()) } else { Err(Error::WrongHash(self.code_hash, found_hash)) } @@ -228,6 +386,7 @@ impl Code { } /// Request for transaction execution, along with the parts necessary to verify the proof. +#[derive(Clone)] pub struct TransactionProof { /// The transaction to request proof of. pub tx: SignedTransaction, diff --git a/ethcore/light/src/types/request/builder.rs b/ethcore/light/src/types/request/builder.rs index 015edf4c4..ec271f0a3 100644 --- a/ethcore/light/src/types/request/builder.rs +++ b/ethcore/light/src/types/request/builder.rs @@ -80,27 +80,6 @@ pub struct Requests { } impl Requests { - /// For each request, produce a response. - /// The responses vector produced goes up to the point where the responder - /// first returns `None`, an invalid response, or until all requests have been responded to. - pub fn respond_to_all(mut self, responder: F) -> Vec - where F: Fn(T::Complete) -> Option - { - let mut responses = Vec::new(); - - while let Some(response) = self.next_complete().and_then(&responder) { - match self.supply_response(&response) { - Ok(()) => responses.push(response), - Err(e) => { - debug!(target: "pip", "produced bad response to request: {:?}", e); - return responses; - } - } - } - - responses - } - /// Get access to the underlying slice of requests. // TODO: unimplemented -> Vec, // do we _have to_ allocate? pub fn requests(&self) -> &[T] { &self.requests } @@ -118,15 +97,20 @@ impl Requests { .expect("All outputs checked as invariant of `Requests` object; qed")) } } +} +impl Requests { /// Supply a response for the next request. /// Fails on: wrong request kind, all requests answered already. - pub fn supply_response(&mut self, response: &T::Response) -> Result<(), ResponseError> { + pub fn supply_response(&mut self, response: &T::Response) + -> Result> + { let idx = self.answered; // check validity. if idx == self.requests.len() { return Err(ResponseError::Unexpected) } - if !self.requests[idx].check_response(&response) { return Err(ResponseError::WrongKind) } + let extracted = self.requests[idx] + .check_response(&response).map_err(ResponseError::Validity)?; let outputs = &mut self.outputs; response.fill_outputs(|out_idx, output| { @@ -143,7 +127,30 @@ impl Requests { req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) } - Ok(()) + Ok(extracted) + } +} + +impl Requests { + /// For each request, produce a response. + /// The responses vector produced goes up to the point where the responder + /// first returns `None`, an invalid response, or until all requests have been responded to. + pub fn respond_to_all(mut self, responder: F) -> Vec + where F: Fn(super::CompleteRequest) -> Option + { + let mut responses = Vec::new(); + + while let Some(response) = self.next_complete().and_then(&responder) { + match self.supply_response(&response) { + Ok(()) => responses.push(response), + Err(e) => { + debug!(target: "pip", "produced bad response to request: {:?}", e); + return responses; + } + } + } + + responses } } diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 4d0049696..38a54b52c 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -69,11 +69,15 @@ pub use self::builder::{RequestBuilder, Requests}; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct NoSuchOutput; +/// Wrong kind of response corresponding to request. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct WrongKind; + /// Error on processing a response. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum ResponseError { - /// Wrong kind of response. - WrongKind, +pub enum ResponseError { + /// Error in validity. + Validity(T), /// No responses expected. Unexpected, } @@ -342,10 +346,6 @@ impl IncompleteRequest for Request { } } - fn check_response(&self, response: &Response) -> bool { - self.kind() == response.kind() - } - fn complete(self) -> Result { match self { Request::Headers(req) => req.complete().map(CompleteRequest::Headers), @@ -360,6 +360,19 @@ impl IncompleteRequest for Request { } } +impl CheckedRequest for Request { + type Extract = (); + type Error = WrongKind; + + fn check_response(&self, response: &Response) -> Result<(), WrongKind> { + if self.kind() == response.kind() { + Ok(()) + } else { + Err(WrongKind) + } + } +} + /// Kinds of requests. /// Doubles as the "ID" field of the request. #[repr(u8)] @@ -520,14 +533,22 @@ pub trait IncompleteRequest: Sized { /// Only outputs previously checked with `check_outputs` may be available. fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result; - /// Check whether the response matches (beyond the type). - fn check_response(&self, _response: &Self::Response) -> bool { true } - /// Attempt to convert this request into its complete variant. /// Will succeed if all fields have been filled, will fail otherwise. fn complete(self) -> Result; } +/// A request which can be checked against its response for more validity. +pub trait CheckedRequest: IncompleteRequest { + /// Data extracted during the check. + type Extract; + /// Error encountered during the check. + type Error; + + /// Check whether the response matches (beyond the type). + fn check_response(&self, _response: &Self::Response) -> Result; +} + /// A response-like object. /// /// These contain re-usable outputs. From 3eea77709b66e3f03c39632102584b94e9645cd3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 5 Apr 2017 19:19:04 +0200 Subject: [PATCH 09/24] convert Request to CheckedRequest --- ethcore/light/src/on_demand/request.rs | 62 +++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 1 deletion(-) diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 8efee1d20..ea5214786 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -66,6 +66,66 @@ pub enum CheckedRequest { Execution(TransactionProof, net_request::IncompleteExecutionRequest), } +impl From for CheckedRequest { + fn from(req: Request) -> Self { + match req { + Request::HeaderByHash(req) => { + let net_req = net_request::IncompleteHeadersRequest { + start: net_request::HashOrNumber::Hash(req.0).into(), + skip: 0, + max: 1, + reverse: false, + }; + CheckedRequest::HeaderByHash(req, net_req) + } + Request::HeaderProof(req) => { + let net_req = net_request::IncompleteHeaderProofRequest { + num: req.num().into(), + }; + CheckedRequest::HeaderProof(req, net_req) + } + Request::Body(req) => { + let net_req = net_request::IncompleteBodyRequest { + hash: req.hash.into(), + }; + CheckedRequest::Body(req, net_req) + } + Request::Receipts(req) => { + let net_req = net_request::IncompleteReceiptsRequest { + hash: req.0.hash().into(), + }; + CheckedRequest::Receipts(req, net_req) + } + Request::Account(req) => { + let net_req = net_request::IncompleteAccountRequest { + block_hash: req.header.hash().into(), + address_hash: ::util::Hashable::sha3(&req.address).into(), + }; + CheckedRequest::Account(req, net_req) + } + Request::Code(req) => { + let net_req = net_request::IncompleteCodeRequest { + block_hash: req.block_id.0.into(), + code_hash: req.code_hash.into(), + }; + CheckedRequest::Code(req, net_req) + } + Request::Execution(req) => { + let net_req = net_request::IncompleteExecutionRequest { + block_hash: req.header.hash().into(), + from: req.tx.sender(), + gas: req.tx.gas, + gas_price: req.tx.gas_price, + action: req.tx.action.clone(), + value: req.tx.value, + data: req.tx.data.clone(), + }; + CheckedRequest::Execution(req, net_req) + } + } + } +} + impl IncompleteRequest for CheckedRequest { type Complete = net_request::CompleteRequest; type Response = net_request::Response; @@ -465,7 +525,7 @@ mod tests { let hash = header.hash(); let raw_header = encoded::Header::new(::rlp::encode(&header).to_vec()); - assert!(HeaderByHash(hash).check_response(&raw_header).is_ok()) + assert!(HeaderByHash(hash).check_response(&[raw_header]).is_ok()) } #[test] From 574cfae470396899e6484312d1f6f2fe9623fa58 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 6 Apr 2017 15:34:48 +0200 Subject: [PATCH 10/24] dispatch batched requests --- ethcore/light/src/on_demand/mod.rs | 673 +++++++++------------ ethcore/light/src/on_demand/request.rs | 30 + ethcore/light/src/types/request/builder.rs | 22 +- ethcore/light/src/types/request/mod.rs | 58 ++ 4 files changed, 403 insertions(+), 380 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index fcdebb2d4..145e66703 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -43,6 +43,8 @@ use request::{self as basic_request, Request as NetworkRequest, Response as Netw pub mod request; +pub use self::request::{CheckedRequest ,Request, Response}; + // relevant peer info. struct Peer { status: Status, @@ -50,23 +52,13 @@ struct Peer { } impl Peer { - // Whether a given peer can handle a specific request. - fn can_handle(&self, pending: &Pending) -> bool { - match *pending { - Pending::HeaderProof(ref req, _) => - self.capabilities.serve_headers && self.status.head_num > req.num(), - Pending::HeaderByHash(_, _) => self.capabilities.serve_headers, - Pending::Block(ref req, _) => - self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x <= req.header.number()), - Pending::BlockReceipts(ref req, _) => - self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x <= req.0.number()), - Pending::Account(ref req, _) => - self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.header.number()), - Pending::Code(ref req, _) => - self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.block_id.1), - Pending::TxProof(ref req, _) => - self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.header.number()), - } + // whether this peer can fulfill the + fn can_fulfill(&self, c: &Capabilities) -> bool { + let caps = &self.capabilities; + + caps.serve_headers == c.serve_headers && + caps.serve_chain_since >= c.serve_chain_since && + caps.serve_state_since >= c.serve_chain_since } } @@ -78,262 +70,256 @@ enum ChtProofSender { } // Attempted request info and sender to put received value. -enum Pending { - HeaderProof(request::HeaderProof, ChtProofSender), - HeaderByHash(request::HeaderByHash, Sender), - Block(request::Body, Sender), - BlockReceipts(request::BlockReceipts, Sender>), - Account(request::Account, Sender), - Code(request::Code, Sender), - TxProof(request::TransactionProof, Sender>), +struct Pending { + requests: basic_request::Requests, + net_requests: basic_request::Requests, + required_capabilities: Capabilities, + responses: Vec, + sender: oneshot::Sender>, } -impl Pending { - // Create a network request. - fn make_request(&self) -> NetworkRequest { - match *self { - Pending::HeaderByHash(ref req, _) => NetworkRequest::Headers(basic_request::IncompleteHeadersRequest { - start: basic_request::HashOrNumber::Hash(req.0).into(), - skip: 0, - max: 1, - reverse: false, - }), - Pending::HeaderProof(ref req, _) => NetworkRequest::HeaderProof(basic_request::IncompleteHeaderProofRequest { - num: req.num().into(), - }), - Pending::Block(ref req, _) => NetworkRequest::Body(basic_request::IncompleteBodyRequest { - hash: req.hash.into(), - }), - Pending::BlockReceipts(ref req, _) => NetworkRequest::Receipts(basic_request::IncompleteReceiptsRequest { - hash: req.0.hash().into(), - }), - Pending::Account(ref req, _) => NetworkRequest::Account(basic_request::IncompleteAccountRequest { - block_hash: req.header.hash().into(), - address_hash: ::util::Hashable::sha3(&req.address).into(), - }), - Pending::Code(ref req, _) => NetworkRequest::Code(basic_request::IncompleteCodeRequest { - block_hash: req.block_id.0.into(), - code_hash: req.code_hash.into(), - }), - Pending::TxProof(ref req, _) => NetworkRequest::Execution(basic_request::IncompleteExecutionRequest { - block_hash: req.header.hash().into(), - from: req.tx.sender(), - gas: req.tx.gas, - gas_price: req.tx.gas_price, - action: req.tx.action.clone(), - value: req.tx.value, - data: req.tx.data.clone(), - }), +// helper to guess capabilities required for a given batch of network requests. +fn guess_capabilities(requests: &[CheckedRequest]) -> Capabilities { + let mut caps = Capabilities { + serve_headers: false, + serve_chain_since: None, + serve_state_since: None, + tx_relay: false, + }; + + let update_since = |current: &mut Option, new| + *current = match *current { + Some(x) => Some(::std::cmp::min(x, new)), + None => Some(new), + }; + + for request in requests { + match *request { + // TODO: might be worth returning a required block number for this also. + CheckedRequest::HeaderProof(_, _) => + caps.serve_headers = true, + CheckedRequest::HeaderByHash(_, _) => + caps.serve_headers = true, + CheckedRequest::Body(ref req, _) => + update_since(&mut caps.serve_chain_since, req.header.number()), + CheckedRequest::Receipts(ref req, _) => + update_since(&mut caps.serve_chain_since, req.0.number()), + CheckedRequest::Account(ref req, _) => + update_since(&mut caps.serve_state_since, req.header.number()), + CheckedRequest::Code(ref req, _) => + update_since(&mut caps.serve_state_since, req.block_id.1), + CheckedRequest::Execution(ref req, _) => + update_since(&mut caps.serve_state_since, req.header.number()), } } + + caps } /// On demand request service. See module docs for more details. /// Accumulates info about all peers' capabilities and dispatches /// requests to them accordingly. +// lock in declaration order. pub struct OnDemand { + pending: RwLock>, peers: RwLock>, - pending_requests: RwLock>, + in_transit: RwLock>, cache: Arc>, - orphaned_requests: RwLock>, - start_nonce: U256, } const RECEIVER_IN_SCOPE: &'static str = "Receiver is still in scope, so it's not dropped; qed"; impl OnDemand { /// Create a new `OnDemand` service with the given cache. - pub fn new(cache: Arc>, account_start_nonce: U256) -> Self { + pub fn new(cache: Arc>) -> Self { OnDemand { + pending: RwLock::new(Vec::new()), peers: RwLock::new(HashMap::new()), - pending_requests: RwLock::new(HashMap::new()), + in_transit: RwLock::new(HashMap::new()), cache: cache, - orphaned_requests: RwLock::new(Vec::new()), - start_nonce: account_start_nonce, } } - /// Request a header's hash by block number and CHT root hash. - /// Returns the hash. - pub fn hash_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver { - let (sender, receiver) = oneshot::channel(); - let cached = { - let mut cache = self.cache.lock(); - cache.block_hash(&req.num()) - }; + // /// Request a header's hash by block number and CHT root hash. + // /// Returns the hash. + // pub fn hash_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver { + // let (sender, receiver) = oneshot::channel(); + // let cached = { + // let mut cache = self.cache.lock(); + // cache.block_hash(&req.num()) + // }; - match cached { - Some(hash) => sender.send(hash).expect(RECEIVER_IN_SCOPE), - None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Hash(sender))), - } - receiver - } + // match cached { + // Some(hash) => sender.send(hash).expect(RECEIVER_IN_SCOPE), + // None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Hash(sender))), + // } + // receiver + // } - /// Request a canonical block's chain score. - /// Returns the chain score. - pub fn chain_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver { - let (sender, receiver) = oneshot::channel(); - let cached = { - let mut cache = self.cache.lock(); - cache.block_hash(&req.num()).and_then(|hash| cache.chain_score(&hash)) - }; + // /// Request a canonical block's chain score. + // /// Returns the chain score. + // pub fn chain_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver { + // let (sender, receiver) = oneshot::channel(); + // let cached = { + // let mut cache = self.cache.lock(); + // cache.block_hash(&req.num()).and_then(|hash| cache.chain_score(&hash)) + // }; - match cached { - Some(score) => sender.send(score).expect(RECEIVER_IN_SCOPE), - None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::ChainScore(sender))), - } + // match cached { + // Some(score) => sender.send(score).expect(RECEIVER_IN_SCOPE), + // None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::ChainScore(sender))), + // } - receiver - } + // receiver + // } - /// Request a canonical block's hash and chain score by number. - /// Returns the hash and chain score. - pub fn hash_and_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver<(H256, U256)> { - let (sender, receiver) = oneshot::channel(); - let cached = { - let mut cache = self.cache.lock(); - let hash = cache.block_hash(&req.num()); - ( - hash.clone(), - hash.and_then(|hash| cache.chain_score(&hash)), - ) - }; + // /// Request a canonical block's hash and chain score by number. + // /// Returns the hash and chain score. + // pub fn hash_and_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver<(H256, U256)> { + // let (sender, receiver) = oneshot::channel(); + // let cached = { + // let mut cache = self.cache.lock(); + // let hash = cache.block_hash(&req.num()); + // ( + // hash.clone(), + // hash.and_then(|hash| cache.chain_score(&hash)), + // ) + // }; - match cached { - (Some(hash), Some(score)) => sender.send((hash, score)).expect(RECEIVER_IN_SCOPE), - _ => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Both(sender))), - } + // match cached { + // (Some(hash), Some(score)) => sender.send((hash, score)).expect(RECEIVER_IN_SCOPE), + // _ => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Both(sender))), + // } - receiver - } + // receiver + // } - /// Request a header by hash. This is less accurate than by-number because we don't know - /// where in the chain this header lies, and therefore can't find a peer who is supposed to have - /// it as easily. - pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> Receiver { - let (sender, receiver) = oneshot::channel(); - match { self.cache.lock().block_header(&req.0) } { - Some(hdr) => sender.send(hdr).expect(RECEIVER_IN_SCOPE), - None => self.dispatch(ctx, Pending::HeaderByHash(req, sender)), - } - receiver - } + // /// Request a header by hash. This is less accurate than by-number because we don't know + // /// where in the chain this header lies, and therefore can't find a peer who is supposed to have + // /// it as easily. + // pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> Receiver { + // let (sender, receiver) = oneshot::channel(); + // match { self.cache.lock().block_header(&req.0) } { + // Some(hdr) => sender.send(hdr).expect(RECEIVER_IN_SCOPE), + // None => self.dispatch(ctx, Pending::HeaderByHash(req, sender)), + // } + // receiver + // } - /// Request a block, given its header. Block bodies are requestable by hash only, - /// and the header is required anyway to verify and complete the block body - /// -- this just doesn't obscure the network query. - pub fn block(&self, ctx: &BasicContext, req: request::Body) -> Receiver { + // /// Request a block, given its header. Block bodies are requestable by hash only, + // /// and the header is required anyway to verify and complete the block body + // /// -- this just doesn't obscure the network query. + // pub fn block(&self, ctx: &BasicContext, req: request::Body) -> Receiver { + // let (sender, receiver) = oneshot::channel(); + + // // fast path for empty body. + // if req.header.transactions_root() == SHA3_NULL_RLP && req.header.uncles_hash() == SHA3_EMPTY_LIST_RLP { + // let mut stream = RlpStream::new_list(3); + // stream.append_raw(&req.header.into_inner(), 1); + // stream.begin_list(0); + // stream.begin_list(0); + + // sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); + // } else { + // match { self.cache.lock().block_body(&req.hash) } { + // Some(body) => { + // let mut stream = RlpStream::new_list(3); + // let body = body.rlp(); + // stream.append_raw(&req.header.into_inner(), 1); + // stream.append_raw(&body.at(0).as_raw(), 1); + // stream.append_raw(&body.at(1).as_raw(), 1); + + // sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); + // } + // None => self.dispatch(ctx, Pending::Block(req, sender)), + // } + // } + // receiver + // } + + // /// Request the receipts for a block. The header serves two purposes: + // /// provide the block hash to fetch receipts for, and for verification of the receipts root. + // pub fn block_receipts(&self, ctx: &BasicContext, req: request::BlockReceipts) -> Receiver> { + // let (sender, receiver) = oneshot::channel(); + + // // fast path for empty receipts. + // if req.0.receipts_root() == SHA3_NULL_RLP { + // sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE); + // } else { + // match { self.cache.lock().block_receipts(&req.0.hash()) } { + // Some(receipts) => sender.send(receipts).expect(RECEIVER_IN_SCOPE), + // None => self.dispatch(ctx, Pending::BlockReceipts(req, sender)), + // } + // } + + // receiver + // } + + // /// Request an account by address and block header -- which gives a hash to query and a state root + // /// to verify against. + // pub fn account(&self, ctx: &BasicContext, req: request::Account) -> Receiver { + // let (sender, receiver) = oneshot::channel(); + // self.dispatch(ctx, Pending::Account(req, sender)); + // receiver + // } + + // /// Request code by address, known code hash, and block header. + // pub fn code(&self, ctx: &BasicContext, req: request::Code) -> Receiver { + // let (sender, receiver) = oneshot::channel(); + + // // fast path for no code. + // if req.code_hash == SHA3_EMPTY { + // sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE) + // } else { + // self.dispatch(ctx, Pending::Code(req, sender)); + // } + + // receiver + // } + + // /// Request proof-of-execution for a transaction. + // pub fn transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof) -> Receiver> { + // let (sender, receiver) = oneshot::channel(); + + // self.dispatch(ctx, Pending::TxProof(req, sender)); + + // receiver + // } + + /// Submit a batch of requests. + /// + /// Fails if back-references are not coherent. + /// The returned vector of responses will match the requests exactly. + pub fn make_requests(&self, ctx: &BasicContext, requests: Vec) + -> Result>, basic_request::NoSuchOutput> + { let (sender, receiver) = oneshot::channel(); - // fast path for empty body. - if req.header.transactions_root() == SHA3_NULL_RLP && req.header.uncles_hash() == SHA3_EMPTY_LIST_RLP { - let mut stream = RlpStream::new_list(3); - stream.append_raw(&req.header.into_inner(), 1); - stream.begin_list(0); - stream.begin_list(0); - - sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); - } else { - match { self.cache.lock().block_body(&req.hash) } { - Some(body) => { - let mut stream = RlpStream::new_list(3); - let body = body.rlp(); - stream.append_raw(&req.header.into_inner(), 1); - stream.append_raw(&body.at(0).as_raw(), 1); - stream.append_raw(&body.at(1).as_raw(), 1); - - sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); - } - None => self.dispatch(ctx, Pending::Block(req, sender)), - } - } - receiver - } - - /// Request the receipts for a block. The header serves two purposes: - /// provide the block hash to fetch receipts for, and for verification of the receipts root. - pub fn block_receipts(&self, ctx: &BasicContext, req: request::BlockReceipts) -> Receiver> { - let (sender, receiver) = oneshot::channel(); - - // fast path for empty receipts. - if req.0.receipts_root() == SHA3_NULL_RLP { - sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE); - } else { - match { self.cache.lock().block_receipts(&req.0.hash()) } { - Some(receipts) => sender.send(receipts).expect(RECEIVER_IN_SCOPE), - None => self.dispatch(ctx, Pending::BlockReceipts(req, sender)), - } - } - - receiver - } - - /// Request an account by address and block header -- which gives a hash to query and a state root - /// to verify against. - pub fn account(&self, ctx: &BasicContext, req: request::Account) -> Receiver { - let (sender, receiver) = oneshot::channel(); - self.dispatch(ctx, Pending::Account(req, sender)); - receiver - } - - /// Request code by address, known code hash, and block header. - pub fn code(&self, ctx: &BasicContext, req: request::Code) -> Receiver { - let (sender, receiver) = oneshot::channel(); - - // fast path for no code. - if req.code_hash == SHA3_EMPTY { - sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE) - } else { - self.dispatch(ctx, Pending::Code(req, sender)); - } - - receiver - } - - /// Request proof-of-execution for a transaction. - pub fn transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof) -> Receiver> { - let (sender, receiver) = oneshot::channel(); - - self.dispatch(ctx, Pending::TxProof(req, sender)); - - receiver - } - - // dispatch the request, with a "suitability" function to filter acceptable peers. - fn dispatch(&self, ctx: &BasicContext, pending: Pending) { let mut builder = basic_request::RequestBuilder::default(); - builder.push(pending.make_request()) - .expect("make_request always returns fully complete request; qed"); - let complete = builder.build(); - - let kind = complete.requests()[0].kind(); - for (id, peer) in self.peers.read().iter() { - if !peer.can_handle(&pending) { continue } - match ctx.request_from(*id, complete.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "{}: Assigned {:?} to peer {}", - req_id, kind, id); - - self.pending_requests.write().insert( - req_id, - pending, - ); - return - } - Err(net::Error::NoCredits) => {} - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } + let responses = Vec::with_capacity(requests.len()); + for request in requests { + builder.push(CheckedRequest::from(request))?; } - self.orphaned_requests.write().push(pending); + let requests = builder.build(); + let net_requests = requests.clone().map_requests(|req| req.into_net_request()); + let capabilities = guess_capabilities(requests.requests()); + + self.pending.write().push(Pending { + requests: requests, + net_requests: net_requests, + required_capabilities: capabilities, + responses: responses, + sender: sender, + }); + + Ok(receiver) } - - // dispatch orphaned requests, and discard those for which the corresponding + // dispatch pending requests, and discard those for which the corresponding // receiver has been dropped. - fn dispatch_orphaned(&self, ctx: &BasicContext) { + fn dispatch_pending(&self, ctx: &BasicContext) { // wrapper future for calling `poll_cancel` on our `Senders` to preserve // the invariant that it's always within a task. struct CheckHangup<'a, T: 'a>(&'a mut Sender); @@ -356,35 +342,42 @@ impl OnDemand { CheckHangup(send).wait().expect("CheckHangup always returns ok; qed") } - if self.orphaned_requests.read().is_empty() { return } + if self.pending.read().is_empty() { return } + let mut pending = self.pending.write(); - let to_dispatch = ::std::mem::replace(&mut *self.orphaned_requests.write(), Vec::new()); + // iterate over all pending requests, and check them for hang-up. + // then, try and find a peer who can serve it. + let peers = self.peers.read(); + *pending = ::std::mem::replace(&mut *pending, Vec::new()).into_iter() + .filter_map(|mut pending| match check_hangup(&mut pending.sender) { + true => Some(pending), + false => None, + }) + .filter_map(|pending| { + for (peer_id, peer) in peers.iter() { // .shuffle? + if !peer.can_fulfill(&pending.required_capabilities) { + continue + } - trace!(target: "on_demand", "Attempting to dispatch {} orphaned requests.", to_dispatch.len()); - for mut orphaned in to_dispatch { - let hung_up = match orphaned { - Pending::HeaderProof(_, ref mut sender) => match *sender { - ChtProofSender::Both(ref mut s) => check_hangup(s), - ChtProofSender::Hash(ref mut s) => check_hangup(s), - ChtProofSender::ChainScore(ref mut s) => check_hangup(s), - }, - Pending::HeaderByHash(_, ref mut sender) => check_hangup(sender), - Pending::Block(_, ref mut sender) => check_hangup(sender), - Pending::BlockReceipts(_, ref mut sender) => check_hangup(sender), - Pending::Account(_, ref mut sender) => check_hangup(sender), - Pending::Code(_, ref mut sender) => check_hangup(sender), - Pending::TxProof(_, ref mut sender) => check_hangup(sender), - }; - - if !hung_up { self.dispatch(ctx, orphaned) } - } + match ctx.request_from(*peer_id, pending.net_requests.clone()) { + Ok(req_id) => { + self.in_transit.write().insert(req_id, pending); + return None + } + Err(net::Error::NoCredits) => {} + Err(e) => debug!(target: "on_demand", "Error dispatching request to peer: {}", e), + } + } + Some(pending) + }) + .collect(); // `pending` now contains all requests we couldn't dispatch. } } impl Handler for OnDemand { fn on_connect(&self, ctx: &EventContext, status: &Status, capabilities: &Capabilities) { self.peers.write().insert(ctx.peer(), Peer { status: status.clone(), capabilities: capabilities.clone() }); - self.dispatch_orphaned(ctx.as_basic()); + self.dispatch_pending(ctx.as_basic()); } fn on_disconnect(&self, ctx: &EventContext, unfulfilled: &[ReqId]) { @@ -392,16 +385,16 @@ impl Handler for OnDemand { let ctx = ctx.as_basic(); { - let mut orphaned = self.orphaned_requests.write(); + let mut pending = self.pending.write(); for unfulfilled in unfulfilled { - if let Some(pending) = self.pending_requests.write().remove(unfulfilled) { + if let Some(unfulfilled) = self.in_transit.write().remove(unfulfilled) { trace!(target: "on_demand", "Attempting to reassign dropped request"); - orphaned.push(pending); + pending.push(unfulfilled); } } } - self.dispatch_orphaned(ctx); + self.dispatch_pending(ctx); } fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { @@ -413,142 +406,68 @@ impl Handler for OnDemand { } } - self.dispatch_orphaned(ctx.as_basic()); + self.dispatch_pending(ctx.as_basic()); } fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[basic_request::Response]) { + use request::IncompleteRequest; + let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { + let mut pending = match self.in_transit.write().remove(&req_id) { Some(req) => req, None => return, }; - let response = match responses.get(0) { - Some(response) => response, - None => { - trace!(target: "on_demand", "Ignoring empty response for request {}", req_id); - self.dispatch(ctx.as_basic(), req); - return; - } - }; + // for each incoming response + // 1. ensure verification data filled. + // 2. pending.requests.supply_response + // 3. if extracted on-demand response + for response in responses { + match pending.requests.supply_response(response) { + Ok(response) => pending.responses.push(response), + Err(e) => { + let peer = ctx.peer(); + debug!(target: "on_demand", "Peer {} gave bad response: {:?}", peer, e); + ctx.disable_peer(peer); - trace!(target: "on_demand", "Handling response for request {}, kind={:?}", req_id, response.kind()); - - // handle the response appropriately for the request. - // all branches which do not return early lead to disabling of the peer - // due to misbehavior. - match req { - Pending::HeaderProof(req, sender) => { - if let NetworkResponse::HeaderProof(ref response) = *response { - match req.check_response(&response.proof) { - Ok((hash, score)) => { - let mut cache = self.cache.lock(); - cache.insert_block_hash(req.num(), hash); - cache.insert_chain_score(hash, score); - - match sender { - ChtProofSender::Both(sender) => { let _ = sender.send((hash, score)); } - ChtProofSender::Hash(sender) => { let _ = sender.send(hash); } - ChtProofSender::ChainScore(sender) => { let _ = sender.send(score); } - } - return - } - Err(e) => warn!(target: "on_demand", "Error handling response for header request: {:?}", e), - } - } - } - Pending::HeaderByHash(req, sender) => { - if let NetworkResponse::Headers(ref response) = *response { - match req.check_response(&response.headers) { - Ok(header) => { - self.cache.lock().insert_block_header(req.0, header.clone()); - let _ = sender.send(header); - return - } - Err(e) => warn!(target: "on_demand", "Error handling response for header request: {:?}", e), - } - } - } - Pending::Block(req, sender) => { - if let NetworkResponse::Body(ref response) = *response { - match req.check_response(&response.body) { - Ok(block) => { - self.cache.lock().insert_block_body(req.hash, response.body.clone()); - let _ = sender.send(block); - return - } - Err(e) => warn!(target: "on_demand", "Error handling response for block request: {:?}", e), - } - } - } - Pending::BlockReceipts(req, sender) => { - if let NetworkResponse::Receipts(ref response) = *response { - match req.check_response(&response.receipts) { - Ok(receipts) => { - let hash = req.0.hash(); - self.cache.lock().insert_block_receipts(hash, receipts.clone()); - let _ = sender.send(receipts); - return - } - Err(e) => warn!(target: "on_demand", "Error handling response for receipts request: {:?}", e), - } - } - } - Pending::Account(req, sender) => { - if let NetworkResponse::Account(ref response) = *response { - match req.check_response(&response.proof) { - Ok(account) => { - let account = account.unwrap_or_else(|| { - BasicAccount { - balance: 0.into(), - nonce: self.start_nonce, - code_hash: SHA3_EMPTY, - storage_root: SHA3_NULL_RLP - } - }); - - // TODO: validate against request outputs. - // needs engine + env info as part of request. - let _ = sender.send(account); - return - } - Err(e) => warn!(target: "on_demand", "Error handling response for state request: {:?}", e), - } - } - } - Pending::Code(req, sender) => { - if let NetworkResponse::Code(ref response) = *response { - match req.check_response(response.code.as_slice()) { - Ok(code) => { - let _ = sender.send(code); - return - } - Err(e) => warn!(target: "on_demand", "Error handling response for code request: {:?}", e), - } - } - } - Pending::TxProof(req, sender) => { - if let NetworkResponse::Execution(ref response) = *response { - match req.check_response(&response.items) { - ProvedExecution::Complete(executed) => { - let _ = sender.send(Ok(executed)); - return - } - ProvedExecution::Failed(err) => { - let _ = sender.send(Err(err)); - return - } - ProvedExecution::BadProof => warn!(target: "on_demand", "Error handling response for transaction proof request"), - } + break; } } } - ctx.disable_peer(peer); + if pending.requests.is_complete() { + let _ = pending.sender.send(pending.responses); + + return; + } + + // update network requests (unless we're done, in which case fulfill the future.) + let mut builder = basic_request::RequestBuilder::default(); + let num_answered = pending.requests.num_answered(); + let mut mapping = move |idx| idx - num_answered; + + for request in pending.requests.requests().iter().skip(num_answered) { + let mut net_req = request.clone().into_net_request(); + + // all back-references with request index less than `num_answered` have + // been filled by now. all remaining requests point to nothing earlier + // than the next unanswered request. + net_req.adjust_refs(&mut mapping); + builder.push(net_req) + .expect("all back-references to answered requests have been filled; qed"); + } + + // update pending fields and re-queue. + let capabilities = guess_capabilities(&pending.requests.requests()[num_answered..]); + pending.net_requests = builder.build(); + pending.required_capabilities = capabilities; + + self.pending.write().push(pending); + self.dispatch_pending(ctx.as_basic()); } fn tick(&self, ctx: &BasicContext) { - self.dispatch_orphaned(ctx) + self.dispatch_pending(ctx) } } @@ -587,7 +506,7 @@ mod tests { assert!(on_demand.orphaned_requests.read().len() == 1); drop(result); - on_demand.dispatch_orphaned(&FakeContext); + on_demand.dispatch_pending(&FakeContext); assert!(on_demand.orphaned_requests.read().is_empty()); } } diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index ea5214786..ad348445c 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -126,6 +126,23 @@ impl From for CheckedRequest { } } +impl CheckedRequest { + /// Convert this into a network request. + pub fn into_net_request(self) -> net_request::Request { + use ::request::Request as NetRequest; + + match self { + CheckedRequest::HeaderProof(_, req) => NetRequest::HeaderProof(req), + CheckedRequest::HeaderByHash(_, req) => NetRequest::Headers(req), + CheckedRequest::Receipts(_, req) => NetRequest::Receipts(req), + CheckedRequest::Body(_, req) => NetRequest::Body(req), + CheckedRequest::Account(_, req) => NetRequest::Account(req), + CheckedRequest::Code(_, req) => NetRequest::Code(req), + CheckedRequest::Execution(_, req) => NetRequest::Execution(req), + } + } +} + impl IncompleteRequest for CheckedRequest { type Complete = net_request::CompleteRequest; type Response = net_request::Response; @@ -192,6 +209,19 @@ impl IncompleteRequest for CheckedRequest { CheckedRequest::Execution(_, req) => req.complete().map(CompleteRequest::Execution), } } + + + fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { + match *self { + CheckedRequest::HeaderProof(_, ref mut req) => req.adjust_refs(mapping), + CheckedRequest::HeaderByHash(_, ref mut req) => req.adjust_refs(mapping), + CheckedRequest::Receipts(_, ref mut req) => req.adjust_refs(mapping), + CheckedRequest::Body(_, ref mut req) => req.adjust_refs(mapping), + CheckedRequest::Account(_, ref mut req) => req.adjust_refs(mapping), + CheckedRequest::Code(_, ref mut req) => req.adjust_refs(mapping), + CheckedRequest::Execution(_, ref mut req) => req.adjust_refs(mapping), + } + } } impl net_request::CheckedRequest for CheckedRequest { diff --git a/ethcore/light/src/types/request/builder.rs b/ethcore/light/src/types/request/builder.rs index ec271f0a3..6a40d288e 100644 --- a/ethcore/light/src/types/request/builder.rs +++ b/ethcore/light/src/types/request/builder.rs @@ -87,9 +87,14 @@ impl Requests { /// Get the number of answered requests. pub fn num_answered(&self) -> usize { self.answered } + /// Whether the batch is complete. + pub fn is_complete(&self) -> bool { + self.answered == self.requests.len() + } + /// Get the next request as a filled request. Returns `None` when all requests answered. pub fn next_complete(&self) -> Option { - if self.answered == self.requests.len() { + if self.is_complete() { None } else { Some(self.requests[self.answered].clone() @@ -97,6 +102,17 @@ impl Requests { .expect("All outputs checked as invariant of `Requests` object; qed")) } } + + /// Map requests from one type into another. + pub fn map_requests(self, f: F) -> Requests + where F: FnMut(T) -> U, U: IncompleteRequest + { + Requests { + outputs: self.outputs, + requests: self.requests.into_iter().map(f).collect(), + answered: self.answered, + } + } } impl Requests { @@ -122,8 +138,8 @@ impl Requests { self.answered += 1; - // fill as much of the next request as we can. - if let Some(ref mut req) = self.requests.get_mut(self.answered) { + // fill as much of each remaining request as we can. + for req in self.requests.iter_mut().skip(self.answered) { req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) } diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 38a54b52c..f26908acd 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -100,6 +100,12 @@ impl Field { _ => Err(NoSuchOutput), } } + + fn adjust_req(&mut self, mut mapping: F) where F: FnMut(usize) -> usize { + if let Field::BackReference(ref mut req_idx, _) = *self { + *req_idx = mapping(*req_idx) + } + } } impl From for Field { @@ -358,6 +364,19 @@ impl IncompleteRequest for Request { Request::Execution(req) => req.complete().map(CompleteRequest::Execution), } } + + fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { + match *self { + Request::Headers(ref mut req) => req.adjust_refs(mapping), + Request::HeaderProof(ref mut req) => req.adjust_refs(mapping), + Request::Receipts(ref mut req) => req.adjust_refs(mapping), + Request::Body(ref mut req) => req.adjust_refs(mapping), + Request::Account(ref mut req) => req.adjust_refs(mapping), + Request::Storage(ref mut req) => req.adjust_refs(mapping), + Request::Code(ref mut req) => req.adjust_refs(mapping), + Request::Execution(ref mut req) => req.adjust_refs(mapping), + } + } } impl CheckedRequest for Request { @@ -536,6 +555,9 @@ pub trait IncompleteRequest: Sized { /// Attempt to convert this request into its complete variant. /// Will succeed if all fields have been filled, will fail otherwise. fn complete(self) -> Result; + + /// Adjust back-reference request indices. + fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize; } /// A request which can be checked against its response for more validity. @@ -631,6 +653,10 @@ pub mod header { reverse: self.reverse, }) } + + fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { + self.start.adjust_req(mapping) + } } /// A complete header request. @@ -745,6 +771,10 @@ pub mod header_proof { num: self.num.into_scalar()?, }) } + + fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { + self.num.adjust_req(mapping) + } } /// A complete header proof request. @@ -849,6 +879,10 @@ pub mod block_receipts { hash: self.hash.into_scalar()?, }) } + + fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { + self.hash.adjust_req(mapping) + } } /// A complete block receipts request. @@ -942,6 +976,10 @@ pub mod block_body { hash: self.hash.into_scalar()?, }) } + + fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { + self.hash.adjust_req(mapping) + } } /// A complete block body request. @@ -1062,6 +1100,11 @@ pub mod account { address_hash: self.address_hash.into_scalar()?, }) } + + fn adjust_refs(&mut self, mut mapping: F) where F: FnMut(usize) -> usize { + self.block_hash.adjust_req(&mut mapping); + self.address_hash.adjust_req(&mut mapping); + } } /// A complete request for an account. @@ -1212,6 +1255,12 @@ pub mod storage { key_hash: self.key_hash.into_scalar()?, }) } + + fn adjust_refs(&mut self, mut mapping: F) where F: FnMut(usize) -> usize { + self.block_hash.adjust_req(&mut mapping); + self.address_hash.adjust_req(&mut mapping); + self.key_hash.adjust_req(&mut mapping); + } } /// A complete request for a storage proof. @@ -1332,6 +1381,11 @@ pub mod contract_code { code_hash: self.code_hash.into_scalar()?, }) } + + fn adjust_refs(&mut self, mut mapping: F) where F: FnMut(usize) -> usize { + self.block_hash.adjust_req(&mut mapping); + self.code_hash.adjust_req(&mut mapping); + } } /// A complete request. @@ -1464,6 +1518,10 @@ pub mod execution { data: self.data, }) } + + fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { + self.block_hash.adjust_req(mapping); + } } /// A complete request. From d19232a84804bdd8d95328a88484176159e13a1a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 6 Apr 2017 17:22:05 +0200 Subject: [PATCH 11/24] use cache in on-demand again --- ethcore/light/src/on_demand/mod.rs | 351 ++++++++++++--------- ethcore/light/src/on_demand/request.rs | 92 ++++-- ethcore/light/src/types/request/builder.rs | 6 +- ethcore/light/src/types/request/mod.rs | 7 +- 4 files changed, 268 insertions(+), 188 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 145e66703..f5a1fe02e 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -27,11 +27,10 @@ use std::sync::Arc; use ethcore::basic_account::BasicAccount; use ethcore::encoded; use ethcore::receipt::Receipt; -use ethcore::state::ProvedExecution; use ethcore::executed::{Executed, ExecutionError}; -use futures::{Async, Poll, Future}; -use futures::sync::oneshot::{self, Sender, Receiver}; +use futures::{future, Async, Poll, Future, BoxFuture}; +use futures::sync::oneshot::{self, Sender, Receiver, Canceled}; use network::PeerId; use rlp::RlpStream; use util::{Bytes, RwLock, Mutex, U256, H256}; @@ -39,11 +38,14 @@ use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY, SHA3_EMPTY_LIST_RLP}; use net::{self, Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; use cache::Cache; -use request::{self as basic_request, Request as NetworkRequest, Response as NetworkResponse}; +use request::{self as basic_request, Request as NetworkRequest}; pub mod request; -pub use self::request::{CheckedRequest ,Request, Response}; +pub use self::request::{CheckedRequest, Request, Response}; + +/// The result of execution +pub type ExecutionResult = Result; // relevant peer info. struct Peer { @@ -62,13 +64,6 @@ impl Peer { } } -// Which portions of a CHT proof should be sent. -enum ChtProofSender { - Both(Sender<(H256, U256)>), - Hash(Sender), - ChainScore(Sender), -} - // Attempted request info and sender to put received value. struct Pending { requests: basic_request::Requests, @@ -127,7 +122,7 @@ pub struct OnDemand { cache: Arc>, } -const RECEIVER_IN_SCOPE: &'static str = "Receiver is still in scope, so it's not dropped; qed"; +const RESPONSES_MATCH: &'static str = "N requests always leads to N responses; qed"; impl OnDemand { /// Create a new `OnDemand` service with the given cache. @@ -140,151 +135,191 @@ impl OnDemand { } } - // /// Request a header's hash by block number and CHT root hash. - // /// Returns the hash. - // pub fn hash_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver { - // let (sender, receiver) = oneshot::channel(); - // let cached = { - // let mut cache = self.cache.lock(); - // cache.block_hash(&req.num()) - // }; + /// Request a header's hash by block number and CHT root hash. + /// Returns the hash. + pub fn hash_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> BoxFuture { + let cached = { + let mut cache = self.cache.lock(); + cache.block_hash(&req.num()) + }; - // match cached { - // Some(hash) => sender.send(hash).expect(RECEIVER_IN_SCOPE), - // None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Hash(sender))), - // } - // receiver - // } + match cached { + Some(hash) => future::ok(hash).boxed(), + None => { + self.make_requests(ctx, vec![Request::HeaderProof(req)]) + .expect("request given fully fleshed out; qed") + .map(|responses| match responses[0] { + Response::HeaderProof(ref hash, _) => *hash, + _ => panic!("header proof request leads to header proof response; qed") + }) + .boxed() + }, + } + } - // /// Request a canonical block's chain score. - // /// Returns the chain score. - // pub fn chain_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver { - // let (sender, receiver) = oneshot::channel(); - // let cached = { - // let mut cache = self.cache.lock(); - // cache.block_hash(&req.num()).and_then(|hash| cache.chain_score(&hash)) - // }; + /// Request a canonical block's chain score. + /// Returns the chain score. + pub fn chain_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> BoxFuture { + let cached = { + let mut cache = self.cache.lock(); + cache.block_hash(&req.num()).and_then(|hash| cache.chain_score(&hash)) + }; - // match cached { - // Some(score) => sender.send(score).expect(RECEIVER_IN_SCOPE), - // None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::ChainScore(sender))), - // } + match cached { + Some(score) => future::ok(score).boxed(), + None => { + self.make_requests(ctx, vec![Request::HeaderProof(req)]) + .expect("request given fully fleshed out; qed") + .map(|responses| match responses[0] { + Response::HeaderProof(_, ref score) => *score, + _ => panic!("header proof request leads to header proof response; qed") + }) + .boxed() + }, + } + } - // receiver - // } + /// Request a canonical block's hash and chain score by number. + /// Returns the hash and chain score. + pub fn hash_and_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> BoxFuture<(H256, U256), Canceled> { + let cached = { + let mut cache = self.cache.lock(); + let hash = cache.block_hash(&req.num()); + ( + hash.clone(), + hash.and_then(|hash| cache.chain_score(&hash)), + ) + }; - // /// Request a canonical block's hash and chain score by number. - // /// Returns the hash and chain score. - // pub fn hash_and_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver<(H256, U256)> { - // let (sender, receiver) = oneshot::channel(); - // let cached = { - // let mut cache = self.cache.lock(); - // let hash = cache.block_hash(&req.num()); - // ( - // hash.clone(), - // hash.and_then(|hash| cache.chain_score(&hash)), - // ) - // }; + match cached { + (Some(hash), Some(score)) => future::ok((hash, score)).boxed(), + _ => { + self.make_requests(ctx, vec![Request::HeaderProof(req)]) + .expect("request given fully fleshed out; qed") + .map(|responses| match responses[0] { + Response::HeaderProof(ref hash, ref score) => (*hash, *score), + _ => panic!("header proof request leads to header proof response; qed") + }) + .boxed() + }, + } + } - // match cached { - // (Some(hash), Some(score)) => sender.send((hash, score)).expect(RECEIVER_IN_SCOPE), - // _ => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Both(sender))), - // } + /// Request a header by hash. This is less accurate than by-number because we don't know + /// where in the chain this header lies, and therefore can't find a peer who is supposed to have + /// it as easily. + pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> BoxFuture { + match { self.cache.lock().block_header(&req.0) } { + Some(hdr) => future::ok(hdr).boxed(), + None => { + self.make_requests(ctx, vec![Request::HeaderByHash(req)]) + .expect("request given fully fleshed out; qed") + .map(|mut responses| match responses.pop().expect(RESPONSES_MATCH) { + Response::HeaderByHash(header) => header, + _ => panic!("header request leads to header response; qed") + }) + .boxed() + }, + } + } - // receiver - // } + /// Request a block, given its header. Block bodies are requestable by hash only, + /// and the header is required anyway to verify and complete the block body + /// -- this just doesn't obscure the network query. + pub fn block(&self, ctx: &BasicContext, req: request::Body) -> BoxFuture { + // fast path for empty body. + if req.header.transactions_root() == SHA3_NULL_RLP && req.header.uncles_hash() == SHA3_EMPTY_LIST_RLP { + let mut stream = RlpStream::new_list(3); + stream.append_raw(&req.header.into_inner(), 1); + stream.begin_list(0); + stream.begin_list(0); - // /// Request a header by hash. This is less accurate than by-number because we don't know - // /// where in the chain this header lies, and therefore can't find a peer who is supposed to have - // /// it as easily. - // pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> Receiver { - // let (sender, receiver) = oneshot::channel(); - // match { self.cache.lock().block_header(&req.0) } { - // Some(hdr) => sender.send(hdr).expect(RECEIVER_IN_SCOPE), - // None => self.dispatch(ctx, Pending::HeaderByHash(req, sender)), - // } - // receiver - // } + future::ok(encoded::Block::new(stream.out())).boxed() + } else { + match { self.cache.lock().block_body(&req.hash) } { + Some(body) => { + let mut stream = RlpStream::new_list(3); + let body = body.rlp(); + stream.append_raw(&req.header.into_inner(), 1); + stream.append_raw(&body.at(0).as_raw(), 1); + stream.append_raw(&body.at(1).as_raw(), 1); - // /// Request a block, given its header. Block bodies are requestable by hash only, - // /// and the header is required anyway to verify and complete the block body - // /// -- this just doesn't obscure the network query. - // pub fn block(&self, ctx: &BasicContext, req: request::Body) -> Receiver { - // let (sender, receiver) = oneshot::channel(); + future::ok(encoded::Block::new(stream.out())).boxed() + } + None => { + self.make_requests(ctx, vec![Request::Body(req)]) + .expect("request given fully fleshed out; qed") + .map(|mut responses| match responses.pop().expect(RESPONSES_MATCH) { + Response::Body(body) => body, + _ => panic!("body request leads to body response; qed") + }) + .boxed() + } + } + } + } - // // fast path for empty body. - // if req.header.transactions_root() == SHA3_NULL_RLP && req.header.uncles_hash() == SHA3_EMPTY_LIST_RLP { - // let mut stream = RlpStream::new_list(3); - // stream.append_raw(&req.header.into_inner(), 1); - // stream.begin_list(0); - // stream.begin_list(0); + /// Request the receipts for a block. The header serves two purposes: + /// provide the block hash to fetch receipts for, and for verification of the receipts root. + pub fn block_receipts(&self, ctx: &BasicContext, req: request::BlockReceipts) -> BoxFuture, Canceled> { + // fast path for empty receipts. + if req.0.receipts_root() == SHA3_NULL_RLP { + return future::ok(Vec::new()).boxed() + } - // sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); - // } else { - // match { self.cache.lock().block_body(&req.hash) } { - // Some(body) => { - // let mut stream = RlpStream::new_list(3); - // let body = body.rlp(); - // stream.append_raw(&req.header.into_inner(), 1); - // stream.append_raw(&body.at(0).as_raw(), 1); - // stream.append_raw(&body.at(1).as_raw(), 1); + match { self.cache.lock().block_receipts(&req.0.hash()) } { + Some(receipts) => future::ok(receipts).boxed(), + None => { + self.make_requests(ctx, vec![Request::Receipts(req)]) + .expect("request given fully fleshed out; qed") + .map(|mut responses| match responses.pop().expect(RESPONSES_MATCH) { + Response::Receipts(receipts) => receipts, + _ => panic!("receipts request leads to receipts response; qed") + }) + .boxed() + }, + } + } - // sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); - // } - // None => self.dispatch(ctx, Pending::Block(req, sender)), - // } - // } - // receiver - // } + /// Request an account by address and block header -- which gives a hash to query and a state root + /// to verify against. + /// `None` here means that no account by the queried key exists in the queried state. + pub fn account(&self, ctx: &BasicContext, req: request::Account) -> BoxFuture, Canceled> { + self.make_requests(ctx, vec![Request::Account(req)]) + .expect("request given fully fleshed out; qed") + .map(|mut responses| match responses.pop().expect(RESPONSES_MATCH) { + Response::Account(account) => account, + _ => panic!("account request leads to account response; qed") + }) + .boxed() + } - // /// Request the receipts for a block. The header serves two purposes: - // /// provide the block hash to fetch receipts for, and for verification of the receipts root. - // pub fn block_receipts(&self, ctx: &BasicContext, req: request::BlockReceipts) -> Receiver> { - // let (sender, receiver) = oneshot::channel(); + /// Request code by address, known code hash, and block header. + pub fn code(&self, ctx: &BasicContext, req: request::Code) -> BoxFuture { + // fast path for no code. + if req.code_hash == SHA3_EMPTY { + future::ok(Vec::new()).boxed() + } else { + self.make_requests(ctx, vec![Request::Code(req)]) + .expect("request given fully fleshed out; qed") + .map(|mut responses| match responses.pop().expect(RESPONSES_MATCH) { + Response::Code(code) => code, + _ => panic!("code request leads to code response; qed") + }) + .boxed() + } + } - // // fast path for empty receipts. - // if req.0.receipts_root() == SHA3_NULL_RLP { - // sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE); - // } else { - // match { self.cache.lock().block_receipts(&req.0.hash()) } { - // Some(receipts) => sender.send(receipts).expect(RECEIVER_IN_SCOPE), - // None => self.dispatch(ctx, Pending::BlockReceipts(req, sender)), - // } - // } - - // receiver - // } - - // /// Request an account by address and block header -- which gives a hash to query and a state root - // /// to verify against. - // pub fn account(&self, ctx: &BasicContext, req: request::Account) -> Receiver { - // let (sender, receiver) = oneshot::channel(); - // self.dispatch(ctx, Pending::Account(req, sender)); - // receiver - // } - - // /// Request code by address, known code hash, and block header. - // pub fn code(&self, ctx: &BasicContext, req: request::Code) -> Receiver { - // let (sender, receiver) = oneshot::channel(); - - // // fast path for no code. - // if req.code_hash == SHA3_EMPTY { - // sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE) - // } else { - // self.dispatch(ctx, Pending::Code(req, sender)); - // } - - // receiver - // } - - // /// Request proof-of-execution for a transaction. - // pub fn transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof) -> Receiver> { - // let (sender, receiver) = oneshot::channel(); - - // self.dispatch(ctx, Pending::TxProof(req, sender)); - - // receiver - // } + /// Request proof-of-execution for a transaction. + pub fn transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof) -> BoxFuture { + self.make_requests(ctx, vec![Request::Execution(req)]) + .expect("request given fully fleshed out; qed") + .map(|mut responses| match responses.pop().expect(RESPONSES_MATCH) { + Response::Execution(execution) => execution, + _ => panic!("execution request leads to execution response; qed") + }) + .boxed() + } /// Submit a batch of requests. /// @@ -295,6 +330,11 @@ impl OnDemand { { let (sender, receiver) = oneshot::channel(); + if requests.is_empty() { + assert!(sender.send(Vec::new()).is_ok(), "receiver still in scope; qed"); + return Ok(receiver); + } + let mut builder = basic_request::RequestBuilder::default(); let responses = Vec::with_capacity(requests.len()); @@ -314,6 +354,8 @@ impl OnDemand { sender: sender, }); + self.dispatch_pending(ctx); + Ok(receiver) } @@ -350,11 +392,13 @@ impl OnDemand { let peers = self.peers.read(); *pending = ::std::mem::replace(&mut *pending, Vec::new()).into_iter() .filter_map(|mut pending| match check_hangup(&mut pending.sender) { - true => Some(pending), - false => None, + false => Some(pending), + true => None, }) .filter_map(|pending| { for (peer_id, peer) in peers.iter() { // .shuffle? + // TODO: see which requests can be answered by the cache? + if !peer.can_fulfill(&pending.required_capabilities) { continue } @@ -412,19 +456,20 @@ impl Handler for OnDemand { fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[basic_request::Response]) { use request::IncompleteRequest; - let peer = ctx.peer(); let mut pending = match self.in_transit.write().remove(&req_id) { Some(req) => req, None => return, }; // for each incoming response - // 1. ensure verification data filled. + // 1. ensure verification data filled. (still TODO since on_demand doesn't use back-references yet) // 2. pending.requests.supply_response // 3. if extracted on-demand response for response in responses { - match pending.requests.supply_response(response) { - Ok(response) => pending.responses.push(response), + match pending.requests.supply_response(&*self.cache, response) { + Ok(response) => { + pending.responses.push(response) + } Err(e) => { let peer = ctx.peer(); debug!(target: "on_demand", "Peer {} gave bad response: {:?}", peer, e); @@ -500,13 +545,13 @@ mod tests { #[test] fn detects_hangup() { let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); - let on_demand = OnDemand::new(cache, 0.into()); + let on_demand = OnDemand::new(cache); let result = on_demand.header_by_hash(&FakeContext, request::HeaderByHash(H256::default())); - assert!(on_demand.orphaned_requests.read().len() == 1); + assert!(on_demand.pending.read().len() == 1); drop(result); on_demand.dispatch_pending(&FakeContext); - assert!(on_demand.orphaned_requests.read().is_empty()); + assert!(on_demand.pending.read().is_empty()); } } diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index ad348445c..d6751bc86 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -29,12 +29,13 @@ use ethcore::transaction::SignedTransaction; use request::{self as net_request, IncompleteRequest, Output, OutputKind}; use rlp::{RlpStream, UntrustedRlp}; -use util::{Address, Bytes, DBValue, HashDB, H256, U256}; +use util::{Address, Bytes, DBValue, HashDB, Mutex, H256, U256}; use util::memorydb::MemoryDB; use util::sha3::Hashable; use util::trie::{Trie, TrieDB, TrieError}; /// Core unit of the API: submit batches of these to be answered with `Response`s. +#[derive(Clone)] pub enum Request { /// A request for a header proof. HeaderProof(HeaderProof), @@ -227,27 +228,28 @@ impl IncompleteRequest for CheckedRequest { impl net_request::CheckedRequest for CheckedRequest { type Extract = Response; type Error = Error; + type Environment = Mutex<::cache::Cache>; /// Check whether the response matches (beyond the type). - fn check_response(&self, response: &Self::Response) -> Result { + fn check_response(&self, cache: &Mutex<::cache::Cache>, response: &Self::Response) -> Result { use ::request::Response as NetResponse; // check response against contained prover. match (self, response) { (&CheckedRequest::HeaderProof(ref prover, _), &NetResponse::HeaderProof(ref res)) => - prover.check_response(&res.proof).map(|(h, s)| Response::HeaderProof(h, s)), + prover.check_response(cache, &res.proof).map(|(h, s)| Response::HeaderProof(h, s)), (&CheckedRequest::HeaderByHash(ref prover, _), &NetResponse::Headers(ref res)) => - prover.check_response(&res.headers).map(Response::HeaderByHash), + prover.check_response(cache, &res.headers).map(Response::HeaderByHash), (&CheckedRequest::Receipts(ref prover, _), &NetResponse::Receipts(ref res)) => - prover.check_response(&res.receipts).map(Response::Receipts), + prover.check_response(cache, &res.receipts).map(Response::Receipts), (&CheckedRequest::Body(ref prover, _), &NetResponse::Body(ref res)) => - prover.check_response(&res.body).map(Response::Body), + prover.check_response(cache, &res.body).map(Response::Body), (&CheckedRequest::Account(ref prover, _), &NetResponse::Account(ref res)) => - prover.check_response(&res.proof).map(Response::Account), + prover.check_response(cache, &res.proof).map(Response::Account), (&CheckedRequest::Code(ref prover, _), &NetResponse::Code(ref res)) => - prover.check_response(&res.code).map(Response::Code), + prover.check_response(cache, &res.code).map(Response::Code), (&CheckedRequest::Execution(ref prover, _), &NetResponse::Execution(ref res)) => - Ok(Response::Execution(prover.check_response(&res.items))), + prover.check_response(cache, &res.items).map(Response::Execution), _ => Err(Error::WrongKind), } } @@ -271,7 +273,7 @@ pub enum Response { /// Response to a request for code. Code(Vec), /// Response to a request for proved execution. - Execution(ProvedExecution), // TODO: make into `Result` + Execution(super::ExecutionResult), } /// Errors in verification. @@ -339,9 +341,15 @@ impl HeaderProof { pub fn cht_root(&self) -> H256 { self.cht_root } /// Check a response with a CHT proof, get a hash and total difficulty back. - pub fn check_response(&self, proof: &[Bytes]) -> Result<(H256, U256), Error> { + pub fn check_response(&self, cache: &Mutex<::cache::Cache>, proof: &[Bytes]) -> Result<(H256, U256), Error> { match ::cht::check_proof(proof, self.num, self.cht_root) { - Some((expected_hash, td)) => Ok((expected_hash, td)), + Some((expected_hash, td)) => { + let mut cache = cache.lock(); + cache.insert_block_hash(self.num, expected_hash); + cache.insert_chain_score(expected_hash, td); + + Ok((expected_hash, td)) + } None => Err(Error::BadProof), } } @@ -353,11 +361,14 @@ pub struct HeaderByHash(pub H256); impl HeaderByHash { /// Check a response for the header. - pub fn check_response(&self, headers: &[encoded::Header]) -> Result { + pub fn check_response(&self, cache: &Mutex<::cache::Cache>, headers: &[encoded::Header]) -> Result { let header = headers.get(0).ok_or(Error::Empty)?; let hash = header.sha3(); match hash == self.0 { - true => Ok(header.clone()), + true => { + cache.lock().insert_block_header(hash, header.clone()); + Ok(header.clone()) + } false => Err(Error::WrongHash(self.0, hash)), } } @@ -383,7 +394,7 @@ impl Body { } /// Check a response for this block body. - pub fn check_response(&self, body: &encoded::Body) -> Result { + pub fn check_response(&self, cache: &Mutex<::cache::Cache>, body: &encoded::Body) -> Result { // check the integrity of the the body against the header let tx_root = ::util::triehash::ordered_trie_root(body.rlp().at(0).iter().map(|r| r.as_raw().to_vec())); if tx_root != self.header.transactions_root() { @@ -401,6 +412,8 @@ impl Body { stream.append_raw(body.rlp().at(0).as_raw(), 1); stream.append_raw(body.rlp().at(1).as_raw(), 1); + cache.lock().insert_block_body(self.hash, body.clone()); + Ok(encoded::Block::new(stream.out())) } } @@ -411,12 +424,15 @@ pub struct BlockReceipts(pub encoded::Header); impl BlockReceipts { /// Check a response with receipts against the stored header. - pub fn check_response(&self, receipts: &[Receipt]) -> Result, Error> { + pub fn check_response(&self, cache: &Mutex<::cache::Cache>, receipts: &[Receipt]) -> Result, Error> { let receipts_root = self.0.receipts_root(); let found_root = ::util::triehash::ordered_trie_root(receipts.iter().map(|r| ::rlp::encode(r).to_vec())); match receipts_root == found_root { - true => Ok(receipts.to_vec()), + true => { + cache.lock().insert_block_receipts(receipts_root, receipts.to_vec()); + Ok(receipts.to_vec()) + } false => Err(Error::WrongTrieRoot(receipts_root, found_root)), } } @@ -433,7 +449,7 @@ pub struct Account { impl Account { /// Check a response with an account against the stored header. - pub fn check_response(&self, proof: &[Bytes]) -> Result, Error> { + pub fn check_response(&self, _: &Mutex<::cache::Cache>, proof: &[Bytes]) -> Result, Error> { let state_root = self.header.state_root(); let mut db = MemoryDB::new(); @@ -465,7 +481,7 @@ pub struct Code { impl Code { /// Check a response with code against the code hash. - pub fn check_response(&self, code: &[u8]) -> Result, Error> { + pub fn check_response(&self, _: &Mutex<::cache::Cache>, code: &[u8]) -> Result, Error> { let found_hash = code.sha3(); if found_hash == self.code_hash { Ok(code.to_vec()) @@ -490,23 +506,29 @@ pub struct TransactionProof { impl TransactionProof { /// Check the proof, returning the proved execution or indicate that the proof was bad. - pub fn check_response(&self, state_items: &[DBValue]) -> ProvedExecution { + pub fn check_response(&self, _: &Mutex<::cache::Cache>, state_items: &[DBValue]) -> Result { let root = self.header.state_root(); - state::check_proof( + let proved_execution = state::check_proof( state_items, root, &self.tx, &*self.engine, &self.env_info, - ) + ); + + match proved_execution { + ProvedExecution::BadProof => Err(Error::BadProof), + ProvedExecution::Failed(e) => Ok(Err(e)), + ProvedExecution::Complete(e) => Ok(Ok(e)), + } } } #[cfg(test)] mod tests { use super::*; - use util::{MemoryDB, Address, H256}; + use util::{MemoryDB, Address, Mutex, H256}; use util::trie::{Trie, TrieMut, SecTrieDB, SecTrieDBMut}; use util::trie::recorder::Recorder; @@ -515,6 +537,10 @@ mod tests { use ethcore::encoded; use ethcore::receipt::Receipt; + fn make_cache() -> ::cache::Cache { + ::cache::Cache::new(Default::default(), ::time::Duration::seconds(1)) + } + #[test] fn no_invalid_header_by_number() { assert!(HeaderProof::new(0, Default::default()).is_none()) @@ -544,7 +570,8 @@ mod tests { let proof = cht.prove(10_000, 0).unwrap().unwrap(); let req = HeaderProof::new(10_000, cht.root()).unwrap(); - assert!(req.check_response(&proof[..]).is_ok()); + let cache = Mutex::new(make_cache()); + assert!(req.check_response(&cache, &proof[..]).is_ok()); } #[test] @@ -555,7 +582,8 @@ mod tests { let hash = header.hash(); let raw_header = encoded::Header::new(::rlp::encode(&header).to_vec()); - assert!(HeaderByHash(hash).check_response(&[raw_header]).is_ok()) + let cache = Mutex::new(make_cache()); + assert!(HeaderByHash(hash).check_response(&cache, &[raw_header]).is_ok()) } #[test] @@ -571,8 +599,9 @@ mod tests { hash: header.hash(), }; + let cache = Mutex::new(make_cache()); let response = encoded::Body::new(body_stream.drain().to_vec()); - assert!(req.check_response(&response).is_ok()) + assert!(req.check_response(&cache, &response).is_ok()) } #[test] @@ -593,7 +622,8 @@ mod tests { let req = BlockReceipts(encoded::Header::new(::rlp::encode(&header).to_vec())); - assert!(req.check_response(&receipts).is_ok()) + let cache = Mutex::new(make_cache()); + assert!(req.check_response(&cache, &receipts).is_ok()) } #[test] @@ -642,7 +672,8 @@ mod tests { address: addr, }; - assert!(req.check_response(&proof[..]).is_ok()); + let cache = Mutex::new(make_cache()); + assert!(req.check_response(&cache, &proof[..]).is_ok()); } #[test] @@ -653,7 +684,8 @@ mod tests { code_hash: ::util::Hashable::sha3(&code), }; - assert!(req.check_response(&code).is_ok()); - assert!(req.check_response(&[]).is_err()); + let cache = Mutex::new(make_cache()); + assert!(req.check_response(&cache, &code).is_ok()); + assert!(req.check_response(&cache, &[]).is_err()); } } diff --git a/ethcore/light/src/types/request/builder.rs b/ethcore/light/src/types/request/builder.rs index 6a40d288e..3dfb9ff40 100644 --- a/ethcore/light/src/types/request/builder.rs +++ b/ethcore/light/src/types/request/builder.rs @@ -118,7 +118,7 @@ impl Requests { impl Requests { /// Supply a response for the next request. /// Fails on: wrong request kind, all requests answered already. - pub fn supply_response(&mut self, response: &T::Response) + pub fn supply_response(&mut self, env: &T::Environment, response: &T::Response) -> Result> { let idx = self.answered; @@ -126,7 +126,7 @@ impl Requests { // check validity. if idx == self.requests.len() { return Err(ResponseError::Unexpected) } let extracted = self.requests[idx] - .check_response(&response).map_err(ResponseError::Validity)?; + .check_response(env, response).map_err(ResponseError::Validity)?; let outputs = &mut self.outputs; response.fill_outputs(|out_idx, output| { @@ -157,7 +157,7 @@ impl Requests { let mut responses = Vec::new(); while let Some(response) = self.next_complete().and_then(&responder) { - match self.supply_response(&response) { + match self.supply_response(&(), &response) { Ok(()) => responses.push(response), Err(e) => { debug!(target: "pip", "produced bad response to request: {:?}", e); diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index f26908acd..3dd2db629 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -382,8 +382,9 @@ impl IncompleteRequest for Request { impl CheckedRequest for Request { type Extract = (); type Error = WrongKind; + type Environment = (); - fn check_response(&self, response: &Response) -> Result<(), WrongKind> { + fn check_response(&self, _: &(), response: &Response) -> Result<(), WrongKind> { if self.kind() == response.kind() { Ok(()) } else { @@ -566,9 +567,11 @@ pub trait CheckedRequest: IncompleteRequest { type Extract; /// Error encountered during the check. type Error; + /// Environment passed to response check. + type Environment; /// Check whether the response matches (beyond the type). - fn check_response(&self, _response: &Self::Response) -> Result; + fn check_response(&self, &Self::Environment, &Self::Response) -> Result; } /// A response-like object. From 528dbf909a277d0e6d834c4e1b3b0fe3435fa0df Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 6 Apr 2017 17:44:31 +0200 Subject: [PATCH 12/24] fix RPC fallout --- parity/light_helpers/queue_cull.rs | 6 +- parity/run.rs | 3 +- rpc/src/v1/helpers/dispatch.rs | 3 +- rpc/src/v1/impls/light/eth.rs | 139 +++++++++++------------------ 4 files changed, 62 insertions(+), 89 deletions(-) diff --git a/parity/light_helpers/queue_cull.rs b/parity/light_helpers/queue_cull.rs index 548ee33cd..235090052 100644 --- a/parity/light_helpers/queue_cull.rs +++ b/parity/light_helpers/queue_cull.rs @@ -67,6 +67,7 @@ impl IoHandler for QueueCull { let (sync, on_demand, txq) = (self.sync.clone(), self.on_demand.clone(), self.txq.clone()); let best_header = self.client.best_block_header(); + let start_nonce = self.client.engine().account_start_nonce; info!(target: "cull", "Attempting to cull queued transactions from {} senders.", senders.len()); self.remote.spawn_with_timeout(move || { @@ -74,7 +75,10 @@ impl IoHandler for QueueCull { // fetch the nonce of each sender in the queue. let nonce_futures = senders.iter() .map(|&address| request::Account { header: best_header.clone(), address: address }) - .map(|request| on_demand.account(ctx, request).map(|acc| acc.nonce)) + .map(move |request| { + on_demand.account(ctx, request) + .map(move |maybe_acc| maybe_acc.map_or(start_nonce, |acc.nonce|)) + }) .zip(senders.iter()) .map(|(fut, &addr)| fut.map(move |nonce| (addr, nonce))); diff --git a/parity/run.rs b/parity/run.rs index a4fc6ad19..2292bf87d 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -221,8 +221,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> let cache = Arc::new(::util::Mutex::new(cache)); // start on_demand service. - let account_start_nonce = service.client().engine().account_start_nonce(); - let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone(), account_start_nonce)); + let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone())); // set network path. net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned()); diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index d58a211ed..5bce4b10c 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -261,6 +261,7 @@ impl LightDispatcher { } let best_header = self.client.best_block_header(); + let account_start_nonce = self.client.engine().account_start_nonce(); let nonce_future = self.sync.with_context(|ctx| self.on_demand.account(ctx, request::Account { header: best_header, address: addr, @@ -268,7 +269,7 @@ impl LightDispatcher { match nonce_future { Some(x) => - x.map(|acc| acc.nonce) + x.map(|acc| acc.map_or(account_start_nonce, |acc| acc.nonce)) .map_err(|_| errors::no_light_peers()) .boxed(), None => future::err(errors::network_disabled()).boxed() diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index e1014afe8..6a051cce0 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -114,16 +114,16 @@ impl EthClient { } /// Get a block header from the on demand service or client, or error. - fn header(&self, id: BlockId) -> BoxFuture, Error> { + fn header(&self, id: BlockId) -> BoxFuture { if let Some(h) = self.client.block_header(id) { - return future::ok(Some(h)).boxed() + return future::ok(h).boxed() } let maybe_future = match id { BlockId::Number(n) => { let cht_root = cht::block_to_cht_number(n).and_then(|cn| self.client.cht_root(cn as usize)); match cht_root { - None => return future::ok(None).boxed(), + None => return future::err(errors::unknown_block()).boxed(), Some(root) => { let req = request::HeaderProof::new(n, root) .expect("only fails for 0; client always stores genesis; client already queried; qed"); @@ -139,7 +139,7 @@ impl EthClient { Some(fut) => fut.map_err(err_premature_cancel).boxed(), None => future::err(errors::network_disabled()).boxed(), } - }).map(Some).boxed() + }).boxed() }) } } @@ -148,7 +148,7 @@ impl EthClient { self.sync.with_context(|ctx| self.on_demand.header_by_hash(ctx, request::HeaderByHash(h)) .then(|res| future::done(match res { - Ok(h) => Ok(Some(h)), + Ok(h) => Ok(h), Err(e) => Err(err_premature_cancel(e)), })) .boxed() @@ -164,22 +164,18 @@ impl EthClient { } // helper for getting account info at a given block. + // `None` indicates the account doesn't exist at the given block. fn account(&self, address: Address, id: BlockId) -> BoxFuture, Error> { let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); self.header(id).and_then(move |header| { - let header = match header { - None => return future::ok(None).boxed(), - Some(hdr) => hdr, - }; - let maybe_fut = sync.with_context(|ctx| on_demand.account(ctx, request::Account { header: header, address: address, })); match maybe_fut { - Some(fut) => fut.map(Some).map_err(err_premature_cancel).boxed(), + Some(fut) => fut.map_err(err_premature_cancel).boxed(), None => future::err(errors::network_disabled()).boxed(), } }).boxed() @@ -236,10 +232,11 @@ impl EthClient { }).join(header_fut).and_then(move |(tx, hdr)| { // then request proved execution. // TODO: get last-hashes from network. - let (env_info, hdr) = match (client.env_info(id), hdr) { - (Some(env_info), Some(hdr)) => (env_info, hdr), + let env_info = match client.env_info(id) { + Some(env_info) => env_info, _ => return future::err(errors::unknown_block()).boxed(), }; + let request = request::TransactionProof { tx: tx, header: hdr, @@ -258,24 +255,20 @@ impl EthClient { }).boxed() } - fn block(&self, id: BlockId) -> BoxFuture, Error> { + // get a block itself. fails on unknown block ID. + fn block(&self, id: BlockId) -> BoxFuture { let (on_demand, sync) = (self.on_demand.clone(), self.sync.clone()); - self.header(id).and_then(move |hdr| { - let req = match hdr { - Some(hdr) => request::Body::new(hdr), - None => return future::ok(None).boxed(), - }; - + self.header(id).map(request::Body::new).and_then(move |req| { match sync.with_context(move |ctx| on_demand.block(ctx, req)) { - Some(fut) => fut.map_err(err_premature_cancel).map(Some).boxed(), + Some(fut) => fut.map_err(err_premature_cancel).boxed(), None => future::err(errors::network_disabled()).boxed(), } }).boxed() } - // get a "rich" block structure - fn rich_block(&self, id: BlockId, include_txs: bool) -> BoxFuture, Error> { + // get a "rich" block structure. Fails on unknown block. + fn rich_block(&self, id: BlockId, include_txs: bool) -> BoxFuture { let (on_demand, sync) = (self.on_demand.clone(), self.sync.clone()); let (client, engine) = (self.client.clone(), self.client.engine().clone()); @@ -314,49 +307,45 @@ impl EthClient { }; // get the block itself. - self.block(id).and_then(move |block| match block { - None => return future::ok(None).boxed(), - Some(block) => { - // then fetch the total difficulty (this is much easier after getting the block). - match client.score(id) { - Some(score) => future::ok(fill_rich(block, Some(score))).map(Some).boxed(), - None => { - // make a CHT request to fetch the chain score. - let req = cht::block_to_cht_number(block.number()) - .and_then(|num| client.cht_root(num as usize)) - .and_then(|root| request::HeaderProof::new(block.number(), root)); + self.block(id).and_then(move |block| { + // then fetch the total difficulty (this is much easier after getting the block). + match client.score(id) { + Some(score) => future::ok(fill_rich(block, Some(score))).boxed(), + None => { + // make a CHT request to fetch the chain score. + let req = cht::block_to_cht_number(block.number()) + .and_then(|num| client.cht_root(num as usize)) + .and_then(|root| request::HeaderProof::new(block.number(), root)); + let req = match req { + Some(req) => req, + None => { + // somehow the genesis block slipped past other checks. + // return it now. + let score = client.block_header(BlockId::Number(0)) + .expect("genesis always stored; qed") + .difficulty(); - let req = match req { - Some(req) => req, - None => { - // somehow the genesis block slipped past other checks. - // return it now. - let score = client.block_header(BlockId::Number(0)) - .expect("genesis always stored; qed") - .difficulty(); - - return future::ok(fill_rich(block, Some(score))).map(Some).boxed() - } - }; - - // three possible outcomes: - // - network is down. - // - we get a score, but our hash is non-canonical. - // - we get ascore, and our hash is canonical. - let maybe_fut = sync.with_context(move |ctx| on_demand.hash_and_score_by_number(ctx, req)); - match maybe_fut { - Some(fut) => fut.map(move |(hash, score)| { - let score = if hash == block.hash() { - Some(score) - } else { - None - }; - - Some(fill_rich(block, score)) - }).map_err(err_premature_cancel).boxed(), - None => return future::err(errors::network_disabled()).boxed(), + return future::ok(fill_rich(block, Some(score))).boxed() } + }; + + // three possible outcomes: + // - network is down. + // - we get a score, but our hash is non-canonical. + // - we get ascore, and our hash is canonical. + let maybe_fut = sync.with_context(move |ctx| on_demand.hash_and_score_by_number(ctx, req)); + match maybe_fut { + Some(fut) => fut.map(move |(hash, score)| { + let score = if hash == block.hash() { + Some(score) + } else { + None + }; + + fill_rich(block, score) + }).map_err(err_premature_cancel).boxed(), + None => return future::err(errors::network_disabled()).boxed(), } } } @@ -435,11 +424,11 @@ impl Eth for EthClient { } fn block_by_hash(&self, hash: RpcH256, include_txs: bool) -> BoxFuture, Error> { - self.rich_block(BlockId::Hash(hash.into()), include_txs) + self.rich_block(BlockId::Hash(hash.into()), include_txs).map(Some).boxed() } fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture, Error> { - self.rich_block(num.into(), include_txs) + self.rich_block(num.into(), include_txs).map(Some).boxed() } fn transaction_count(&self, address: RpcH160, num: Trailing) -> BoxFuture { @@ -451,11 +440,6 @@ impl Eth for EthClient { let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); self.header(BlockId::Hash(hash.into())).and_then(move |hdr| { - let hdr = match hdr { - None => return future::ok(None).boxed(), - Some(hdr) => hdr, - }; - if hdr.transactions_root() == SHA3_NULL_RLP { future::ok(Some(U256::from(0).into())).boxed() } else { @@ -471,11 +455,6 @@ impl Eth for EthClient { let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); self.header(num.into()).and_then(move |hdr| { - let hdr = match hdr { - None => return future::ok(None).boxed(), - Some(hdr) => hdr, - }; - if hdr.transactions_root() == SHA3_NULL_RLP { future::ok(Some(U256::from(0).into())).boxed() } else { @@ -491,11 +470,6 @@ impl Eth for EthClient { let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); self.header(BlockId::Hash(hash.into())).and_then(move |hdr| { - let hdr = match hdr { - None => return future::ok(None).boxed(), - Some(hdr) => hdr, - }; - if hdr.uncles_hash() == SHA3_EMPTY_LIST_RLP { future::ok(Some(U256::from(0).into())).boxed() } else { @@ -511,11 +485,6 @@ impl Eth for EthClient { let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); self.header(num.into()).and_then(move |hdr| { - let hdr = match hdr { - None => return future::ok(None).boxed(), - Some(hdr) => hdr, - }; - if hdr.uncles_hash() == SHA3_EMPTY_LIST_RLP { future::ok(Some(U256::from(0).into())).boxed() } else { From 5793bb8facebf639324a5db6bbdd97560c2db72f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 6 Apr 2017 20:01:09 +0200 Subject: [PATCH 13/24] typestrong API --- ethcore/light/src/on_demand/mod.rs | 93 +++++++++---------- ethcore/light/src/on_demand/request.rs | 124 ++++++++++++++++++++++++- rpc/src/v1/impls/light/eth.rs | 1 - 3 files changed, 165 insertions(+), 53 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index f5a1fe02e..9ffd5ef1f 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -22,6 +22,7 @@ #![allow(deprecated)] use std::collections::HashMap; +use std::marker::PhantomData; use std::sync::Arc; use ethcore::basic_account::BasicAccount; @@ -111,6 +112,22 @@ fn guess_capabilities(requests: &[CheckedRequest]) -> Capabilities { caps } +/// A future extracting the concrete output type of the generic adapter +/// from a vector of responses. +pub struct OnResponses { + receiver: Receiver>, + _marker: PhantomData, +} + +impl Future for OnResponses { + type Item = T::Out; + type Error = Canceled; + + fn poll(&mut self) -> Poll { + self.receiver.poll().map(|async| async.map(T::extract_from)) + } +} + /// On demand request service. See module docs for more details. /// Accumulates info about all peers' capabilities and dispatches /// requests to them accordingly. @@ -122,8 +139,6 @@ pub struct OnDemand { cache: Arc>, } -const RESPONSES_MATCH: &'static str = "N requests always leads to N responses; qed"; - impl OnDemand { /// Create a new `OnDemand` service with the given cache. pub fn new(cache: Arc>) -> Self { @@ -146,12 +161,9 @@ impl OnDemand { match cached { Some(hash) => future::ok(hash).boxed(), None => { - self.make_requests(ctx, vec![Request::HeaderProof(req)]) + self.request(ctx, req) .expect("request given fully fleshed out; qed") - .map(|responses| match responses[0] { - Response::HeaderProof(ref hash, _) => *hash, - _ => panic!("header proof request leads to header proof response; qed") - }) + .map(|(h, _)| h) .boxed() }, } @@ -168,12 +180,9 @@ impl OnDemand { match cached { Some(score) => future::ok(score).boxed(), None => { - self.make_requests(ctx, vec![Request::HeaderProof(req)]) + self.request(ctx, req) .expect("request given fully fleshed out; qed") - .map(|responses| match responses[0] { - Response::HeaderProof(_, ref score) => *score, - _ => panic!("header proof request leads to header proof response; qed") - }) + .map(|(_, s)| s) .boxed() }, } @@ -194,12 +203,8 @@ impl OnDemand { match cached { (Some(hash), Some(score)) => future::ok((hash, score)).boxed(), _ => { - self.make_requests(ctx, vec![Request::HeaderProof(req)]) + self.request(ctx, req) .expect("request given fully fleshed out; qed") - .map(|responses| match responses[0] { - Response::HeaderProof(ref hash, ref score) => (*hash, *score), - _ => panic!("header proof request leads to header proof response; qed") - }) .boxed() }, } @@ -212,12 +217,8 @@ impl OnDemand { match { self.cache.lock().block_header(&req.0) } { Some(hdr) => future::ok(hdr).boxed(), None => { - self.make_requests(ctx, vec![Request::HeaderByHash(req)]) + self.request(ctx, req) .expect("request given fully fleshed out; qed") - .map(|mut responses| match responses.pop().expect(RESPONSES_MATCH) { - Response::HeaderByHash(header) => header, - _ => panic!("header request leads to header response; qed") - }) .boxed() }, } @@ -247,12 +248,8 @@ impl OnDemand { future::ok(encoded::Block::new(stream.out())).boxed() } None => { - self.make_requests(ctx, vec![Request::Body(req)]) + self.request(ctx, req) .expect("request given fully fleshed out; qed") - .map(|mut responses| match responses.pop().expect(RESPONSES_MATCH) { - Response::Body(body) => body, - _ => panic!("body request leads to body response; qed") - }) .boxed() } } @@ -270,12 +267,8 @@ impl OnDemand { match { self.cache.lock().block_receipts(&req.0.hash()) } { Some(receipts) => future::ok(receipts).boxed(), None => { - self.make_requests(ctx, vec![Request::Receipts(req)]) + self.request(ctx, req) .expect("request given fully fleshed out; qed") - .map(|mut responses| match responses.pop().expect(RESPONSES_MATCH) { - Response::Receipts(receipts) => receipts, - _ => panic!("receipts request leads to receipts response; qed") - }) .boxed() }, } @@ -285,12 +278,8 @@ impl OnDemand { /// to verify against. /// `None` here means that no account by the queried key exists in the queried state. pub fn account(&self, ctx: &BasicContext, req: request::Account) -> BoxFuture, Canceled> { - self.make_requests(ctx, vec![Request::Account(req)]) + self.request(ctx, req) .expect("request given fully fleshed out; qed") - .map(|mut responses| match responses.pop().expect(RESPONSES_MATCH) { - Response::Account(account) => account, - _ => panic!("account request leads to account response; qed") - }) .boxed() } @@ -300,32 +289,24 @@ impl OnDemand { if req.code_hash == SHA3_EMPTY { future::ok(Vec::new()).boxed() } else { - self.make_requests(ctx, vec![Request::Code(req)]) + self.request(ctx, req) .expect("request given fully fleshed out; qed") - .map(|mut responses| match responses.pop().expect(RESPONSES_MATCH) { - Response::Code(code) => code, - _ => panic!("code request leads to code response; qed") - }) .boxed() } } /// Request proof-of-execution for a transaction. pub fn transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof) -> BoxFuture { - self.make_requests(ctx, vec![Request::Execution(req)]) + self.request(ctx, req) .expect("request given fully fleshed out; qed") - .map(|mut responses| match responses.pop().expect(RESPONSES_MATCH) { - Response::Execution(execution) => execution, - _ => panic!("execution request leads to execution response; qed") - }) .boxed() } - /// Submit a batch of requests. + /// Submit a vector of requests to be processed together. /// /// Fails if back-references are not coherent. - /// The returned vector of responses will match the requests exactly. - pub fn make_requests(&self, ctx: &BasicContext, requests: Vec) + /// The returned vector of responses will correspond to the requests exactly. + pub fn request_raw(&self, ctx: &BasicContext, requests: Vec) -> Result>, basic_request::NoSuchOutput> { let (sender, receiver) = oneshot::channel(); @@ -359,6 +340,18 @@ impl OnDemand { Ok(receiver) } + /// Submit a strongly-typed batch of requests. + /// + /// Fails if back-reference are not coherent. + pub fn request(&self, ctx: &BasicContext, requests: T) -> Result, basic_request::NoSuchOutput> + where T: request::RequestAdapter + { + self.request_raw(ctx, requests.make_requests()).map(|recv| OnResponses { + receiver: recv, + _marker: PhantomData, + }) + } + // dispatch pending requests, and discard those for which the corresponding // receiver has been dropped. fn dispatch_pending(&self, ctx: &BasicContext) { diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index f7db825c3..8361661eb 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -34,6 +34,8 @@ use util::memorydb::MemoryDB; use util::sha3::Hashable; use util::trie::{Trie, TrieDB, TrieError}; +const SUPPLIED_MATCHES: &'static str = "supplied responses always match produced requests; qed"; + /// Core unit of the API: submit batches of these to be answered with `Response`s. #[derive(Clone)] pub enum Request { @@ -53,6 +55,124 @@ pub enum Request { Execution(TransactionProof), } +/// A request argument. +pub trait RequestArg { + /// the response type. + type Out; + + /// Create the request type. + /// `extract` must not fail when presented with the corresponding + /// `Response`. + fn make(self) -> Request; + + /// May not panic if the response corresponds with the request + /// from `make`. + /// Is free to panic otherwise. + fn extract(r: Response) -> Self::Out; +} + +/// An adapter can be thought of as a grouping of request argument types. +/// This is implemented for various tuples and convenient types. +pub trait RequestAdapter { + /// The output type. + type Out; + + /// Infallibly produce requests. When `extract_from` is presented + /// with the corresponding response vector, it may not fail. + fn make_requests(self) -> Vec; + + /// Extract the output type from the given responses. + /// If they are the corresponding responses to the requests + /// made by `make_requests`, do not panic. + fn extract_from(Vec) -> Self::Out; +} + +// helper to implement `RequestArg` and `From` for a single request kind. +macro_rules! impl_single { + ($variant: ident, $me: ty, $out: ty) => { + impl RequestArg for $me { + type Out = $out; + + fn make(self) -> Request { + Request::$variant(self) + } + + fn extract(r: Response) -> $out { + match r { + Response::$variant(x) => x, + _ => panic!(SUPPLIED_MATCHES), + } + } + } + + impl From<$me> for Request { + fn from(me: $me) -> Request { + Request::$variant(me) + } + } + } +} + +// implement traits for each kind of request. +impl_single!(HeaderProof, HeaderProof, (H256, U256)); +impl_single!(HeaderByHash, HeaderByHash, encoded::Header); +impl_single!(Receipts, BlockReceipts, Vec); +impl_single!(Body, Body, encoded::Block); +impl_single!(Account, Account, Option); +impl_single!(Code, Code, Bytes); +impl_single!(Execution, TransactionProof, super::ExecutionResult); + +macro_rules! impl_args { + () => { + impl RequestAdapter for T { + type Out = T::Out; + + fn make_requests(self) -> Vec { + vec![self.make()] + } + + fn extract_from(mut responses: Vec) -> Self::Out { + T::extract(responses.pop().expect(SUPPLIED_MATCHES)) + } + } + }; + ($first: ident, $($next: ident,)*) => { + impl< + $first: RequestArg, + $($next: RequestArg,)* + > + RequestAdapter for ($first, $($next,)*) { + type Out = ($first::Out, $($next::Out,)*); + + fn make_requests(self) -> Vec { + let ($first, $($next,)*) = self; + + vec![ + $first.make(), + $($next.make(),)* + ] + } + + fn extract_from(responses: Vec) -> Self::Out { + let mut iter = responses.into_iter(); + ( + $first::extract(iter.next().expect(SUPPLIED_MATCHES)), + $($next::extract(iter.next().expect(SUPPLIED_MATCHES)),)* + ) + } + } + impl_args!($($next,)*); + } +} + +mod impls { + #![allow(non_snake_case)] + + use super::{RequestAdapter, RequestArg, Request, Response, SUPPLIED_MATCHES}; + + impl_args!(A, B, C, D, E, F, G, H, I, J, K, L,); +} + /// Requests coupled with their required data for verification. /// This is used internally but not part of the public API. #[derive(Clone)] @@ -237,7 +357,7 @@ impl net_request::CheckedRequest for CheckedRequest { // check response against contained prover. match (self, response) { (&CheckedRequest::HeaderProof(ref prover, _), &NetResponse::HeaderProof(ref res)) => - prover.check_response(cache, &res.proof).map(|(h, s)| Response::HeaderProof(h, s)), + prover.check_response(cache, &res.proof).map(Response::HeaderProof), (&CheckedRequest::HeaderByHash(ref prover, _), &NetResponse::Headers(ref res)) => prover.check_response(cache, &res.headers).map(Response::HeaderByHash), (&CheckedRequest::Receipts(ref prover, _), &NetResponse::Receipts(ref res)) => @@ -260,7 +380,7 @@ impl net_request::CheckedRequest for CheckedRequest { pub enum Response { /// Response to a header proof request. /// Returns the hash and chain score. - HeaderProof(H256, U256), + HeaderProof((H256, U256)), /// Response to a header-by-hash request. HeaderByHash(encoded::Header), /// Response to a receipts request. diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index 752e61d64..7e53f66d0 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -115,7 +115,6 @@ impl EthClient { on_demand: self.on_demand.clone(), sync: self.sync.clone(), cache: self.cache.clone(), - } } From 68ec7ae41ec95c7b82b7f4eaf261677cdd1cd87b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 7 Apr 2017 19:35:39 +0200 Subject: [PATCH 14/24] tests for on_demand --- ethcore/light/src/net/mod.rs | 5 + ethcore/light/src/on_demand/mod.rs | 80 +++--- ethcore/light/src/on_demand/tests.rs | 397 +++++++++++++++++++++++++++ ethcore/src/header.rs | 4 +- 4 files changed, 437 insertions(+), 49 deletions(-) create mode 100644 ethcore/light/src/on_demand/tests.rs diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 7d006662c..917ef9049 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -108,9 +108,14 @@ mod timeout { } /// A request id. +#[cfg(not(test))] #[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)] pub struct ReqId(usize); +#[cfg(test)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)] +pub struct ReqId(pub usize); + impl fmt::Display for ReqId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Request #{}", self.0) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 9ffd5ef1f..78767f015 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -40,11 +40,15 @@ use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY, SHA3_EMPTY_LIST_RLP}; use net::{self, Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; use cache::Cache; use request::{self as basic_request, Request as NetworkRequest}; +use self::request::CheckedRequest; + +pub use self::request::{Request, Response}; + +#[cfg(test)] +mod tests; pub mod request; -pub use self::request::{CheckedRequest, Request, Response}; - /// The result of execution pub type ExecutionResult = Result; @@ -137,6 +141,7 @@ pub struct OnDemand { peers: RwLock>, in_transit: RwLock>, cache: Arc>, + no_immediate_dispatch: bool, } impl OnDemand { @@ -147,9 +152,20 @@ impl OnDemand { peers: RwLock::new(HashMap::new()), in_transit: RwLock::new(HashMap::new()), cache: cache, + no_immediate_dispatch: true, } } + // make a test version: this doesn't dispatch pending requests + // until you trigger it manually. + #[cfg(test)] + fn new_test(cache: Arc>) -> Self { + let mut me = OnDemand::new(cache); + me.no_immediate_dispatch = true; + + me + } + /// Request a header's hash by block number and CHT root hash. /// Returns the hash. pub fn hash_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> BoxFuture { @@ -335,7 +351,7 @@ impl OnDemand { sender: sender, }); - self.dispatch_pending(ctx); + self.attempt_dispatch(ctx); Ok(receiver) } @@ -352,6 +368,14 @@ impl OnDemand { }) } + // maybe dispatch pending requests. + // sometimes + fn attempt_dispatch(&self, ctx: &BasicContext) { + if !self.no_immediate_dispatch { + self.dispatch_pending(ctx) + } + } + // dispatch pending requests, and discard those for which the corresponding // receiver has been dropped. fn dispatch_pending(&self, ctx: &BasicContext) { @@ -414,7 +438,7 @@ impl OnDemand { impl Handler for OnDemand { fn on_connect(&self, ctx: &EventContext, status: &Status, capabilities: &Capabilities) { self.peers.write().insert(ctx.peer(), Peer { status: status.clone(), capabilities: capabilities.clone() }); - self.dispatch_pending(ctx.as_basic()); + self.attempt_dispatch(ctx.as_basic()); } fn on_disconnect(&self, ctx: &EventContext, unfulfilled: &[ReqId]) { @@ -431,7 +455,7 @@ impl Handler for OnDemand { } } - self.dispatch_pending(ctx); + self.attempt_dispatch(ctx); } fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { @@ -443,7 +467,7 @@ impl Handler for OnDemand { } } - self.dispatch_pending(ctx.as_basic()); + self.attempt_dispatch(ctx.as_basic()); } fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[basic_request::Response]) { @@ -501,50 +525,10 @@ impl Handler for OnDemand { pending.required_capabilities = capabilities; self.pending.write().push(pending); - self.dispatch_pending(ctx.as_basic()); + self.attempt_dispatch(ctx.as_basic()); } fn tick(&self, ctx: &BasicContext) { - self.dispatch_pending(ctx) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - use std::sync::Arc; - - use cache::Cache; - use net::{Announcement, BasicContext, ReqId, Error as LesError}; - use request::NetworkRequests; - - use network::{PeerId, NodeId}; - use time::Duration; - use util::{H256, Mutex}; - - struct FakeContext; - - impl BasicContext for FakeContext { - fn persistent_peer_id(&self, _: PeerId) -> Option { None } - fn request_from(&self, _: PeerId, _: NetworkRequests) -> Result { - unimplemented!() - } - fn make_announcement(&self, _: Announcement) { } - fn disconnect_peer(&self, _: PeerId) { } - fn disable_peer(&self, _: PeerId) { } - } - - #[test] - fn detects_hangup() { - let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); - let on_demand = OnDemand::new(cache); - let result = on_demand.header_by_hash(&FakeContext, request::HeaderByHash(H256::default())); - - assert!(on_demand.pending.read().len() == 1); - drop(result); - - on_demand.dispatch_pending(&FakeContext); - assert!(on_demand.pending.read().is_empty()); + self.attempt_dispatch(ctx) } } diff --git a/ethcore/light/src/on_demand/tests.rs b/ethcore/light/src/on_demand/tests.rs new file mode 100644 index 000000000..d5789c5e1 --- /dev/null +++ b/ethcore/light/src/on_demand/tests.rs @@ -0,0 +1,397 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Tests for the on-demand service. + +use cache::Cache; +use ethcore::encoded; +use ethcore::header::{Header, Seal}; +use futures::Future; +use network::{PeerId, NodeId}; +use net::*; +use util::{H256, Mutex}; +use time::Duration; +use ::request::{self as basic_request, Response}; + +use std::sync::Arc; + +use super::{request, OnDemand, Peer}; + +// useful contexts to give the service. +enum Context { + NoOp, + WithPeer(PeerId), + RequestFrom(PeerId, ReqId), + Punish(PeerId), +} + +impl EventContext for Context { + fn peer(&self) -> PeerId { + match *self { + Context::WithPeer(id) + | Context::RequestFrom(id, _) + | Context::Punish(id) => id, + _ => panic!("didn't expect to have peer queried."), + } + } + + fn as_basic(&self) -> &BasicContext { self } +} + +impl BasicContext for Context { + /// Returns the relevant's peer persistent Id (aka NodeId). + fn persistent_peer_id(&self, _: PeerId) -> Option { + panic!("didn't expect to provide persistent ID") + } + + fn request_from(&self, peer_id: PeerId, _: ::request::NetworkRequests) -> Result { + match *self { + Context::RequestFrom(id, req_id) => if peer_id == id { Ok(req_id) } else { Err(Error::NoCredits) }, + _ => panic!("didn't expect to have requests dispatched."), + } + } + + fn make_announcement(&self, _: Announcement) { + panic!("didn't expect to make announcement") + } + + fn disconnect_peer(&self, id: PeerId) { + self.disable_peer(id) + } + + fn disable_peer(&self, peer_id: PeerId) { + match *self { + Context::Punish(id) if id == peer_id => {}, + _ => panic!("Unexpectedly punished peer."), + } + } +} + +// test harness. +struct Harness { + service: OnDemand, +} + +impl Harness { + fn create() -> Self { + let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::minutes(1)))); + Harness { + service: OnDemand::new_test(cache), + } + } + + fn inject_peer(&self, id: PeerId, peer: Peer) { + self.service.peers.write().insert(id, peer); + } +} + +fn dummy_status() -> Status { + Status { + protocol_version: 1, + network_id: 999, + head_td: 1.into(), + head_hash: H256::default(), + head_num: 1359, + genesis_hash: H256::default(), + last_head: None, + } +} + +fn dummy_capabilities() -> Capabilities { + Capabilities { + serve_headers: true, + serve_chain_since: Some(1), + serve_state_since: Some(1), + tx_relay: true, + } +} + +#[test] +fn detects_hangup() { + let on_demand = Harness::create().service; + let result = on_demand.header_by_hash(&Context::NoOp, request::HeaderByHash(H256::default())); + + assert_eq!(on_demand.pending.read().len(), 1); + drop(result); + + on_demand.dispatch_pending(&Context::NoOp); + assert!(on_demand.pending.read().is_empty()); +} + +#[test] +fn single_request() { + let harness = Harness::create(); + + let peer_id = 10101; + let req_id = ReqId(14426); + + harness.inject_peer(peer_id, Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }); + + let header = Header::default(); + let encoded = encoded::Header::new(header.rlp(Seal::With)); + + let recv = harness.service.request_raw( + &Context::NoOp, + vec![request::HeaderByHash(header.hash()).into()] + ).unwrap(); + + assert_eq!(harness.service.pending.read().len(), 1); + + harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_id)); + + assert_eq!(harness.service.pending.read().len(), 0); + + harness.service.on_responses( + &Context::WithPeer(peer_id), + req_id, + &[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded] })] + ); + + assert!(recv.wait().is_ok()); +} + +#[test] +fn no_capabilities() { + let harness = Harness::create(); + + let peer_id = 10101; + + let mut capabilities = dummy_capabilities(); + capabilities.serve_headers = false; + + harness.inject_peer(peer_id, Peer { + status: dummy_status(), + capabilities: capabilities, + }); + + let _recv = harness.service.request_raw( + &Context::NoOp, + vec![request::HeaderByHash(Default::default()).into()] + ).unwrap(); + + assert_eq!(harness.service.pending.read().len(), 1); + + harness.service.dispatch_pending(&Context::NoOp); + + assert_eq!(harness.service.pending.read().len(), 1); +} + +#[test] +fn reassign() { + let harness = Harness::create(); + + let peer_ids = (10101, 12345); + let req_ids = (ReqId(14426), ReqId(555)); + + harness.inject_peer(peer_ids.0, Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }); + + let header = Header::default(); + let encoded = encoded::Header::new(header.rlp(Seal::With)); + + let recv = harness.service.request_raw( + &Context::NoOp, + vec![request::HeaderByHash(header.hash()).into()] + ).unwrap(); + + assert_eq!(harness.service.pending.read().len(), 1); + + harness.service.dispatch_pending(&Context::RequestFrom(peer_ids.0, req_ids.0)); + assert_eq!(harness.service.pending.read().len(), 0); + + harness.service.on_disconnect(&Context::WithPeer(peer_ids.0), &[req_ids.0]); + assert_eq!(harness.service.pending.read().len(), 1); + + harness.inject_peer(peer_ids.1, Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }); + + harness.service.dispatch_pending(&Context::RequestFrom(peer_ids.1, req_ids.1)); + assert_eq!(harness.service.pending.read().len(), 0); + + harness.service.on_responses( + &Context::WithPeer(peer_ids.1), + req_ids.1, + &[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded] })] + ); + + assert!(recv.wait().is_ok()); +} + +#[test] +fn partial_response() { + let harness = Harness::create(); + + let peer_id = 111; + let req_ids = (ReqId(14426), ReqId(555)); + + harness.inject_peer(peer_id, Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }); + + let make = |num| { + let mut hdr = Header::default(); + hdr.set_number(num); + + let encoded = encoded::Header::new(hdr.rlp(Seal::With)); + (hdr, encoded) + }; + + let (header1, encoded1) = make(5); + let (header2, encoded2) = make(23452); + + // request two headers. + let recv = harness.service.request_raw( + &Context::NoOp, + vec![ + request::HeaderByHash(header1.hash()).into(), + request::HeaderByHash(header2.hash()).into(), + ], + ).unwrap(); + + assert_eq!(harness.service.pending.read().len(), 1); + + harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0)); + assert_eq!(harness.service.pending.read().len(), 0); + + // supply only the first one. + harness.service.on_responses( + &Context::WithPeer(peer_id), + req_ids.0, + &[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded1] })] + ); + + assert_eq!(harness.service.pending.read().len(), 1); + + harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1)); + assert_eq!(harness.service.pending.read().len(), 0); + + // supply the next one. + harness.service.on_responses( + &Context::WithPeer(peer_id), + req_ids.1, + &[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded2] })] + ); + + assert!(recv.wait().is_ok()); +} + +#[test] +fn part_bad_part_good() { + let harness = Harness::create(); + + let peer_id = 111; + let req_ids = (ReqId(14426), ReqId(555)); + + harness.inject_peer(peer_id, Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }); + + let make = |num| { + let mut hdr = Header::default(); + hdr.set_number(num); + + let encoded = encoded::Header::new(hdr.rlp(Seal::With)); + (hdr, encoded) + }; + + let (header1, encoded1) = make(5); + let (header2, encoded2) = make(23452); + + // request two headers. + let recv = harness.service.request_raw( + &Context::NoOp, + vec![ + request::HeaderByHash(header1.hash()).into(), + request::HeaderByHash(header2.hash()).into(), + ], + ).unwrap(); + + assert_eq!(harness.service.pending.read().len(), 1); + + harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0)); + assert_eq!(harness.service.pending.read().len(), 0); + + // supply only the first one, but followed by the wrong kind of response. + // the first header should be processed. + harness.service.on_responses( + &Context::Punish(peer_id), + req_ids.0, + &[ + Response::Headers(basic_request::HeadersResponse { headers: vec![encoded1] }), + Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] } ), + ] + ); + + assert_eq!(harness.service.pending.read().len(), 1); + + harness.inject_peer(peer_id, Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }); + + harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1)); + assert_eq!(harness.service.pending.read().len(), 0); + + // supply the next one. + harness.service.on_responses( + &Context::WithPeer(peer_id), + req_ids.1, + &[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded2] })] + ); + + assert!(recv.wait().is_ok()); +} + +#[test] +fn wrong_kind() { + let harness = Harness::create(); + + let peer_id = 10101; + let req_id = ReqId(14426); + + harness.inject_peer(peer_id, Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }); + + let _recv = harness.service.request_raw( + &Context::NoOp, + vec![request::HeaderByHash(Default::default()).into()] + ).unwrap(); + + assert_eq!(harness.service.pending.read().len(), 1); + + harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_id)); + + assert_eq!(harness.service.pending.read().len(), 0); + + harness.service.on_responses( + &Context::Punish(peer_id), + req_id, + &[Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] })] + ); + + assert_eq!(harness.service.pending.read().len(), 1); +} diff --git a/ethcore/src/header.rs b/ethcore/src/header.rs index 4517c5764..e8b1fc430 100644 --- a/ethcore/src/header.rs +++ b/ethcore/src/header.rs @@ -17,12 +17,14 @@ //! Block header. use util::*; -use basic_types::{LogBloom, Seal, ZERO_LOGBLOOM}; +use basic_types::{LogBloom, ZERO_LOGBLOOM}; use time::get_time; use rlp::*; use std::cell::RefCell; +pub use basic_types::Seal; + /// Type for Block number pub type BlockNumber = u64; From fd4d7c4b68a8c248c987b64d0d4559936f782068 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 13 Apr 2017 17:21:12 +0200 Subject: [PATCH 15/24] correct state test checkout --- ethcore/res/ethereum/tests | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/res/ethereum/tests b/ethcore/res/ethereum/tests index 9028c4801..d52059307 160000 --- a/ethcore/res/ethereum/tests +++ b/ethcore/res/ethereum/tests @@ -1 +1 @@ -Subproject commit 9028c4801fd39fbb71a9796979182549a24e81c8 +Subproject commit d520593078fa0849dcd1f907e44ed0a616892e33 From 9358f81ac1f3673e251d010d04f2825c6bb81be3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 9 May 2017 14:24:45 +0200 Subject: [PATCH 16/24] fix indentation --- ethcore/light/src/lib.rs | 4 +- ethcore/light/src/on_demand/request.rs | 88 +++++++++++++------------- ethcore/light/src/transaction_queue.rs | 2 +- 3 files changed, 47 insertions(+), 47 deletions(-) diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index 82b6ea126..5e970b837 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -44,13 +44,13 @@ pub mod provider; #[cfg(feature = "ipc")] pub mod provider { - #![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues + #![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues include!(concat!(env!("OUT_DIR"), "/provider.rs")); } #[cfg(feature = "ipc")] pub mod remote { - pub use provider::LightProviderClient; + pub use provider::LightProviderClient; } mod types; diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 8361661eb..4039c2cb3 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -58,33 +58,33 @@ pub enum Request { /// A request argument. pub trait RequestArg { /// the response type. - type Out; + type Out; /// Create the request type. /// `extract` must not fail when presented with the corresponding /// `Response`. - fn make(self) -> Request; + fn make(self) -> Request; /// May not panic if the response corresponds with the request /// from `make`. /// Is free to panic otherwise. - fn extract(r: Response) -> Self::Out; + fn extract(r: Response) -> Self::Out; } /// An adapter can be thought of as a grouping of request argument types. /// This is implemented for various tuples and convenient types. pub trait RequestAdapter { /// The output type. - type Out; + type Out; /// Infallibly produce requests. When `extract_from` is presented /// with the corresponding response vector, it may not fail. - fn make_requests(self) -> Vec; + fn make_requests(self) -> Vec; /// Extract the output type from the given responses. /// If they are the corresponding responses to the requests /// made by `make_requests`, do not panic. - fn extract_from(Vec) -> Self::Out; + fn extract_from(Vec) -> Self::Out; } // helper to implement `RequestArg` and `From` for a single request kind. @@ -123,54 +123,54 @@ impl_single!(Code, Code, Bytes); impl_single!(Execution, TransactionProof, super::ExecutionResult); macro_rules! impl_args { - () => { - impl RequestAdapter for T { - type Out = T::Out; + () => { + impl RequestAdapter for T { + type Out = T::Out; - fn make_requests(self) -> Vec { - vec![self.make()] - } + fn make_requests(self) -> Vec { + vec![self.make()] + } - fn extract_from(mut responses: Vec) -> Self::Out { - T::extract(responses.pop().expect(SUPPLIED_MATCHES)) - } - } - }; - ($first: ident, $($next: ident,)*) => { - impl< - $first: RequestArg, - $($next: RequestArg,)* - > - RequestAdapter for ($first, $($next,)*) { - type Out = ($first::Out, $($next::Out,)*); + fn extract_from(mut responses: Vec) -> Self::Out { + T::extract(responses.pop().expect(SUPPLIED_MATCHES)) + } + } + }; + ($first: ident, $($next: ident,)*) => { + impl< + $first: RequestArg, + $($next: RequestArg,)* + > + RequestAdapter for ($first, $($next,)*) { + type Out = ($first::Out, $($next::Out,)*); - fn make_requests(self) -> Vec { - let ($first, $($next,)*) = self; + fn make_requests(self) -> Vec { + let ($first, $($next,)*) = self; - vec![ - $first.make(), - $($next.make(),)* - ] - } + vec![ + $first.make(), + $($next.make(),)* + ] + } - fn extract_from(responses: Vec) -> Self::Out { - let mut iter = responses.into_iter(); - ( - $first::extract(iter.next().expect(SUPPLIED_MATCHES)), - $($next::extract(iter.next().expect(SUPPLIED_MATCHES)),)* - ) - } - } - impl_args!($($next,)*); - } + fn extract_from(responses: Vec) -> Self::Out { + let mut iter = responses.into_iter(); + ( + $first::extract(iter.next().expect(SUPPLIED_MATCHES)), + $($next::extract(iter.next().expect(SUPPLIED_MATCHES)),)* + ) + } + } + impl_args!($($next,)*); + } } mod impls { - #![allow(non_snake_case)] + #![allow(non_snake_case)] - use super::{RequestAdapter, RequestArg, Request, Response, SUPPLIED_MATCHES}; + use super::{RequestAdapter, RequestArg, Request, Response, SUPPLIED_MATCHES}; - impl_args!(A, B, C, D, E, F, G, H, I, J, K, L,); + impl_args!(A, B, C, D, E, F, G, H, I, J, K, L,); } /// Requests coupled with their required data for verification. diff --git a/ethcore/light/src/transaction_queue.rs b/ethcore/light/src/transaction_queue.rs index d17a863f5..7ce2bd53d 100644 --- a/ethcore/light/src/transaction_queue.rs +++ b/ethcore/light/src/transaction_queue.rs @@ -131,7 +131,7 @@ impl TransactionQueue { if self.by_hash.contains_key(&hash) { return Err(TransactionError::AlreadyImported) } - let res = match self.by_account.entry(sender) { + let res = match self.by_account.entry(sender) { Entry::Vacant(entry) => { entry.insert(AccountTransactions { cur_nonce: CurrentNonce::Assumed(nonce), From 2d87f562f677aaab6b66a01176482a2fcc5df4ef Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 12 May 2017 17:16:38 +0200 Subject: [PATCH 17/24] address grumbles --- ethcore/light/src/on_demand/request.rs | 41 +++++++++++++--------- ethcore/light/src/types/request/builder.rs | 35 +++++++++--------- rpc/src/v1/impls/light/eth.rs | 2 +- 3 files changed, 45 insertions(+), 33 deletions(-) diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 4039c2cb3..8f4be1632 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -354,23 +354,32 @@ impl net_request::CheckedRequest for CheckedRequest { fn check_response(&self, cache: &Mutex<::cache::Cache>, response: &Self::Response) -> Result { use ::request::Response as NetResponse; + // helper for expecting a specific response for a given request. + macro_rules! expect { + ($res: pat => $e: expr) => { + match *response { + $res => $e, + _ => Err(Error::WrongKind), + } + } + } + // check response against contained prover. - match (self, response) { - (&CheckedRequest::HeaderProof(ref prover, _), &NetResponse::HeaderProof(ref res)) => - prover.check_response(cache, &res.proof).map(Response::HeaderProof), - (&CheckedRequest::HeaderByHash(ref prover, _), &NetResponse::Headers(ref res)) => - prover.check_response(cache, &res.headers).map(Response::HeaderByHash), - (&CheckedRequest::Receipts(ref prover, _), &NetResponse::Receipts(ref res)) => - prover.check_response(cache, &res.receipts).map(Response::Receipts), - (&CheckedRequest::Body(ref prover, _), &NetResponse::Body(ref res)) => - prover.check_response(cache, &res.body).map(Response::Body), - (&CheckedRequest::Account(ref prover, _), &NetResponse::Account(ref res)) => - prover.check_response(cache, &res.proof).map(Response::Account), - (&CheckedRequest::Code(ref prover, _), &NetResponse::Code(ref res)) => - prover.check_response(cache, &res.code).map(Response::Code), - (&CheckedRequest::Execution(ref prover, _), &NetResponse::Execution(ref res)) => - prover.check_response(cache, &res.items).map(Response::Execution), - _ => Err(Error::WrongKind), + match *self { + CheckedRequest::HeaderProof(ref prover, _) => expect!(NetResponse::HeaderProof(ref res) => + prover.check_response(cache, &res.proof).map(Response::HeaderProof)), + CheckedRequest::HeaderByHash(ref prover, _) => expect!(NetResponse::Headers(ref res) => + prover.check_response(cache, &res.headers).map(Response::HeaderByHash)), + CheckedRequest::Receipts(ref prover, _) => expect!(NetResponse::Receipts(ref res) => + prover.check_response(cache, &res.receipts).map(Response::Receipts)), + CheckedRequest::Body(ref prover, _) => expect!(NetResponse::Body(ref res) => + prover.check_response(cache, &res.body).map(Response::Body)), + CheckedRequest::Account(ref prover, _) => expect!(NetResponse::Account(ref res) => + prover.check_response(cache, &res.proof).map(Response::Account)), + CheckedRequest::Code(ref prover, _) => expect!(NetResponse::Code(ref res) => + prover.check_response(cache, &res.code).map(Response::Code)), + CheckedRequest::Execution(ref prover, _) => expect!(NetResponse::Execution(ref res) => + prover.check_response(cache, &res.items).map(Response::Execution)), } } } diff --git a/ethcore/light/src/types/request/builder.rs b/ethcore/light/src/types/request/builder.rs index 3dfb9ff40..27875dc7b 100644 --- a/ethcore/light/src/types/request/builder.rs +++ b/ethcore/light/src/types/request/builder.rs @@ -26,12 +26,12 @@ use request::{ /// Build chained requests. Push them onto the series with `push`, /// and produce a `Requests` object with `build`. Outputs are checked for consistency. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct RequestBuilder { +pub struct RequestBuilder { output_kinds: HashMap<(usize, usize), OutputKind>, requests: Vec, } -impl Default for RequestBuilder { +impl Default for RequestBuilder { fn default() -> Self { RequestBuilder { output_kinds: HashMap::new(), @@ -73,13 +73,13 @@ impl RequestBuilder { /// Requests pending responses. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct Requests { +pub struct Requests { outputs: HashMap<(usize, usize), Output>, requests: Vec, answered: usize, } -impl Requests { +impl Requests { /// Get access to the underlying slice of requests. // TODO: unimplemented -> Vec, // do we _have to_ allocate? pub fn requests(&self) -> &[T] { &self.requests } @@ -92,17 +92,6 @@ impl Requests { self.answered == self.requests.len() } - /// Get the next request as a filled request. Returns `None` when all requests answered. - pub fn next_complete(&self) -> Option { - if self.is_complete() { - None - } else { - Some(self.requests[self.answered].clone() - .complete() - .expect("All outputs checked as invariant of `Requests` object; qed")) - } - } - /// Map requests from one type into another. pub fn map_requests(self, f: F) -> Requests where F: FnMut(T) -> U, U: IncompleteRequest @@ -115,6 +104,19 @@ impl Requests { } } +impl Requests { + /// Get the next request as a filled request. Returns `None` when all requests answered. + pub fn next_complete(&self) -> Option { + if self.is_complete() { + None + } else { + Some(self.requests[self.answered].clone() + .complete() + .expect("All outputs checked as invariant of `Requests` object; qed")) + } + } +} + impl Requests { /// Supply a response for the next request. /// Fails on: wrong request kind, all requests answered already. @@ -124,7 +126,8 @@ impl Requests { let idx = self.answered; // check validity. - if idx == self.requests.len() { return Err(ResponseError::Unexpected) } + if self.is_complete() { return Err(ResponseError::Unexpected) } + let extracted = self.requests[idx] .check_response(env, response).map_err(ResponseError::Validity)?; diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index 37ae164ad..e02ccc987 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -185,7 +185,7 @@ impl EthClient { // three possible outcomes: // - network is down. // - we get a score, but our hash is non-canonical. - // - we get ascore, and our hash is canonical. + // - we get a score, and our hash is canonical. let maybe_fut = sync.with_context(move |ctx| on_demand.hash_and_score_by_number(ctx, req)); match maybe_fut { Some(fut) => fut.map(move |(hash, score)| { From 909f3d76d87d924fe059cea7e5987e40ea35ec3c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 12 May 2017 17:25:02 +0200 Subject: [PATCH 18/24] optimize back-reference filling --- ethcore/light/src/on_demand/mod.rs | 4 +++- ethcore/light/src/types/request/builder.rs | 13 +++++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 78767f015..35aa8c590 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -481,7 +481,7 @@ impl Handler for OnDemand { // for each incoming response // 1. ensure verification data filled. (still TODO since on_demand doesn't use back-references yet) // 2. pending.requests.supply_response - // 3. if extracted on-demand response + // 3. if extracted on-demand response, keep it for later. for response in responses { match pending.requests.supply_response(&*self.cache, response) { Ok(response) => { @@ -497,12 +497,14 @@ impl Handler for OnDemand { } } + pending.requests.fill_unanswered(); if pending.requests.is_complete() { let _ = pending.sender.send(pending.responses); return; } + // update network requests (unless we're done, in which case fulfill the future.) let mut builder = basic_request::RequestBuilder::default(); let num_answered = pending.requests.num_answered(); diff --git a/ethcore/light/src/types/request/builder.rs b/ethcore/light/src/types/request/builder.rs index 27875dc7b..dff33513a 100644 --- a/ethcore/light/src/types/request/builder.rs +++ b/ethcore/light/src/types/request/builder.rs @@ -115,6 +115,15 @@ impl Requests { .expect("All outputs checked as invariant of `Requests` object; qed")) } } + + /// Sweep through all unanswered requests, filling them as necessary. + pub fn fill_unanswered(&mut self) { + let outputs = &mut self.outputs; + + for req in self.requests.iter_mut().skip(self.answered) { + req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) + } + } } impl Requests { @@ -141,8 +150,8 @@ impl Requests { self.answered += 1; - // fill as much of each remaining request as we can. - for req in self.requests.iter_mut().skip(self.answered) { + // fill as much of the next request as we can. + if let Some(ref mut req) = self.requests.get_mut(self.answered) { req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) } From 2a6f38c58730f7aff4d6d3e5c861e8cf6d2fe48c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 12 May 2017 17:53:10 +0200 Subject: [PATCH 19/24] removing slienced warning --- ethcore/light/src/on_demand/mod.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 35aa8c590..e61c126d6 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -18,9 +18,6 @@ //! The request service is implemented using Futures. Higher level request handlers //! will take the raw data received here and extract meaningful results from it. -// TODO [ToDr] Suppressing deprecation warnings. Rob will fix the API anyway. -#![allow(deprecated)] - use std::collections::HashMap; use std::marker::PhantomData; use std::sync::Arc; From f8279bb7bbd093dcf1d3c53805b4a0083495d06f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 12 May 2017 18:00:01 +0200 Subject: [PATCH 20/24] code cleanup with macro --- ethcore/light/src/on_demand/request.rs | 54 +++++++++----------------- 1 file changed, 18 insertions(+), 36 deletions(-) diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 8f4be1632..e3aa4342b 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -264,6 +264,20 @@ impl CheckedRequest { } } +macro_rules! match_me { + ($me: expr, ($check: pat, $req: pat) => $e: expr) => { + match $me { + CheckedRequest::HeaderProof($check, $req) => $e, + CheckedRequest::HeaderByHash($check, $req) => $e, + CheckedRequest::Receipts($check, $req) => $e, + CheckedRequest::Body($check, $req) => $e, + CheckedRequest::Account($check, $req) => $e, + CheckedRequest::Code($check, $req) => $e, + CheckedRequest::Execution($check, $req) => $e, + } + } +} + impl IncompleteRequest for CheckedRequest { type Complete = net_request::CompleteRequest; type Response = net_request::Response; @@ -275,28 +289,12 @@ impl IncompleteRequest for CheckedRequest { fn check_outputs(&self, f: F) -> Result<(), net_request::NoSuchOutput> where F: FnMut(usize, usize, OutputKind) -> Result<(), net_request::NoSuchOutput> { - match *self { - CheckedRequest::HeaderProof(_, ref req) => req.check_outputs(f), - CheckedRequest::HeaderByHash(_, ref req) => req.check_outputs(f), - CheckedRequest::Receipts(_, ref req) => req.check_outputs(f), - CheckedRequest::Body(_, ref req) => req.check_outputs(f), - CheckedRequest::Account(_, ref req) => req.check_outputs(f), - CheckedRequest::Code(_, ref req) => req.check_outputs(f), - CheckedRequest::Execution(_, ref req) => req.check_outputs(f), - } + match_me!(*self, (_, ref req) => req.check_outputs(f)) } /// Note that this request will produce the following outputs. fn note_outputs(&self, f: F) where F: FnMut(usize, OutputKind) { - match *self { - CheckedRequest::HeaderProof(_, ref req) => req.note_outputs(f), - CheckedRequest::HeaderByHash(_, ref req) => req.note_outputs(f), - CheckedRequest::Receipts(_, ref req) => req.note_outputs(f), - CheckedRequest::Body(_, ref req) => req.note_outputs(f), - CheckedRequest::Account(_, ref req) => req.note_outputs(f), - CheckedRequest::Code(_, ref req) => req.note_outputs(f), - CheckedRequest::Execution(_, ref req) => req.note_outputs(f), - } + match_me!(*self, (_, ref req) => req.note_outputs(f)) } /// Fill fields of the request. @@ -305,15 +303,7 @@ impl IncompleteRequest for CheckedRequest { /// prior request outputs. /// Only outputs previously checked with `check_outputs` may be available. fn fill(&mut self, f: F) where F: Fn(usize, usize) -> Result { - match *self { - CheckedRequest::HeaderProof(_, ref mut req) => req.fill(f), - CheckedRequest::HeaderByHash(_, ref mut req) => req.fill(f), - CheckedRequest::Receipts(_, ref mut req) => req.fill(f), - CheckedRequest::Body(_, ref mut req) => req.fill(f), - CheckedRequest::Account(_, ref mut req) => req.fill(f), - CheckedRequest::Code(_, ref mut req) => req.fill(f), - CheckedRequest::Execution(_, ref mut req) => req.fill(f), - } + match_me!(*self, (_, ref mut req) => req.fill(f)) } /// Will succeed if all fields have been filled, will fail otherwise. @@ -333,15 +323,7 @@ impl IncompleteRequest for CheckedRequest { fn adjust_refs(&mut self, mapping: F) where F: FnMut(usize) -> usize { - match *self { - CheckedRequest::HeaderProof(_, ref mut req) => req.adjust_refs(mapping), - CheckedRequest::HeaderByHash(_, ref mut req) => req.adjust_refs(mapping), - CheckedRequest::Receipts(_, ref mut req) => req.adjust_refs(mapping), - CheckedRequest::Body(_, ref mut req) => req.adjust_refs(mapping), - CheckedRequest::Account(_, ref mut req) => req.adjust_refs(mapping), - CheckedRequest::Code(_, ref mut req) => req.adjust_refs(mapping), - CheckedRequest::Execution(_, ref mut req) => req.adjust_refs(mapping), - } + match_me!(*self, (_, ref mut req) => req.adjust_refs(mapping)) } } From 7626ddc9c3ced915961b53e51153c0786c6f8f97 Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Tue, 16 May 2017 15:03:40 +0000 Subject: [PATCH 21/24] [ci skip] js-precompiled 20170516-145955 --- Cargo.lock | 2 +- js/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 07a5b58f6..b1041f8ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1783,7 +1783,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#05e0ea878ee54bed2e62a5f434663706bdf1919e" +source = "git+https://github.com/paritytech/js-precompiled.git#3dd953a83569af644c5737a22c0ceb7d5f68b138" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package.json b/js/package.json index c0db9488e..6f2a091c1 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.7.76", + "version": "1.7.77", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", From c7cf43d1c1cb436a19bec0ffb0de98bccb0ae630 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 16 May 2017 17:24:12 +0200 Subject: [PATCH 22/24] improve assertion --- ethcore/light/src/on_demand/request.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index e3aa4342b..c9a5c4d9b 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -34,7 +34,7 @@ use util::memorydb::MemoryDB; use util::sha3::Hashable; use util::trie::{Trie, TrieDB, TrieError}; -const SUPPLIED_MATCHES: &'static str = "supplied responses always match produced requests; qed"; +const SUPPLIED_MATCHES: &'static str = "supplied responses always match produced requests; enforced by `check_response`; qed"; /// Core unit of the API: submit batches of these to be answered with `Response`s. #[derive(Clone)] From fa4426c8143bc70511ae5b232b0e0e997f594f89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 17 May 2017 10:07:20 +0200 Subject: [PATCH 23/24] Bump bigint. --- Cargo.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b1041f8ee..947f48fb8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -124,7 +124,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bigint" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -441,7 +441,7 @@ name = "ethcore-bigint" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bigint 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "bigint 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2802,7 +2802,7 @@ dependencies = [ "checksum aster 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfdf7355d9db158df68f976ed030ab0f6578af811f5a7bb6dcf221ec24e0e0" "checksum base-x 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2f59103b47307f76e03bef1633aec7fa9e29bfb5aa6daf5a334f94233c71f6c1" "checksum base32 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1b9605ba46d61df0410d8ac686b0007add8172eba90e8e909c347856fe794d8c" -"checksum bigint 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4865ae66523e00114a17935fc03865323c668381e9e37fa96c525a8bbcc4e04f" +"checksum bigint 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "5d1b3ef6756498df0e2c6bb67c065f4154d0ecd721eb5b3c3f865c8012b9fd74" "checksum bit-set 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e6e1e6fb1c9e3d6fcdec57216a74eaa03e41f52a22f13a16438251d8e88b89da" "checksum bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d9bf6104718e80d7b26a68fdbacff3481cfc05df670821affc7e9cbc1884400c" "checksum bit-vec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "5b97c2c8e8bbb4251754f559df8af22fb264853c7d009084a576cdf12565089d" From 4c5e4ac8da70fdad9071a1c53a5382ea376d1ebd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 17 May 2017 12:41:33 +0200 Subject: [PATCH 24/24] PoA warp implementation (#5488) * separate modules for consensus snapshot chunks * bulk of authority warp logic * finish authority warp implementation * squash warnings and enable authority snapshot mode * test harness for PoA * fiddle with harness * epoch generation proof fixes * update constructor code * store epoch transition proof after block commit * basic snap and restore test * use keyvaluedb in state restoration * decompress chunks * fix encoding issues * fixed-to-contract-to-contract test * implement ancient block import * restore genesis transition in PoW snapshot * add format version method to snapshot components * supported version numbers in snapshot_components * allow returning of ancient epoch transitions * genesis hash mismatch check * remove commented code --- devtools/src/random_path.rs | 2 +- ethcore/Cargo.toml | 3 + ethcore/native_contracts/Cargo.toml | 3 + ethcore/native_contracts/build.rs | 8 + ethcore/native_contracts/src/lib.rs | 2 + .../src/test_contracts/mod.rs | 21 + .../src/test_contracts/validator_set.rs | 21 + ethcore/src/blockchain/blockchain.rs | 18 +- ethcore/src/client/ancient_import.rs | 68 +++ ethcore/src/client/client.rs | 94 ++-- ethcore/src/client/mod.rs | 1 + ethcore/src/engines/authority_round.rs | 4 + ethcore/src/engines/basic_authority.rs | 4 + ethcore/src/engines/epoch_verifier.rs | 2 +- ethcore/src/engines/mod.rs | 12 +- ethcore/src/engines/validator_set/multi.rs | 30 +- .../engines/validator_set/safe_contract.rs | 5 +- ethcore/src/error.rs | 3 + ethcore/src/executive.rs | 15 + ethcore/src/snapshot/consensus/authority.rs | 498 ++++++++++++++++++ ethcore/src/snapshot/consensus/mod.rs | 301 +---------- ethcore/src/snapshot/consensus/work.rs | 311 +++++++++++ ethcore/src/snapshot/error.rs | 6 + ethcore/src/snapshot/mod.rs | 16 +- ethcore/src/snapshot/service.rs | 18 +- .../src/snapshot/snapshot_service_trait.rs | 4 + ethcore/src/snapshot/tests/helpers.rs | 79 ++- ethcore/src/snapshot/tests/mod.rs | 3 +- .../src/snapshot/tests/proof_of_authority.rs | 249 +++++++++ .../tests/{blocks.rs => proof_of_work.rs} | 7 +- .../tests/test_validator_contract.json | 51 ++ ethcore/src/state/mod.rs | 20 +- rpc/src/v1/tests/helpers/snapshot_service.rs | 1 + sync/src/chain.rs | 10 +- sync/src/tests/snapshot.rs | 4 + 35 files changed, 1547 insertions(+), 347 deletions(-) create mode 100644 ethcore/native_contracts/src/test_contracts/mod.rs create mode 100644 ethcore/native_contracts/src/test_contracts/validator_set.rs create mode 100644 ethcore/src/client/ancient_import.rs create mode 100644 ethcore/src/snapshot/consensus/authority.rs create mode 100644 ethcore/src/snapshot/consensus/work.rs create mode 100644 ethcore/src/snapshot/tests/proof_of_authority.rs rename ethcore/src/snapshot/tests/{blocks.rs => proof_of_work.rs} (94%) create mode 100644 ethcore/src/snapshot/tests/test_validator_contract.json diff --git a/devtools/src/random_path.rs b/devtools/src/random_path.rs index 0abc89e6b..9c399115b 100644 --- a/devtools/src/random_path.rs +++ b/devtools/src/random_path.rs @@ -96,7 +96,7 @@ impl Drop for RandomTempPath { pub struct GuardedTempResult { pub result: Option, - pub _temp: RandomTempPath + pub _temp: RandomTempPath, } impl GuardedTempResult { diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 697ac2e1c..c3ea4e844 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -52,6 +52,9 @@ stats = { path = "../util/stats" } time = "0.1" transient-hashmap = "0.4" +[dev-dependencies] +native-contracts = { path = "native_contracts", features = ["test_contracts"] } + [features] jit = ["evmjit"] evm-debug = ["slow-blocks"] diff --git a/ethcore/native_contracts/Cargo.toml b/ethcore/native_contracts/Cargo.toml index 085908509..57cca0923 100644 --- a/ethcore/native_contracts/Cargo.toml +++ b/ethcore/native_contracts/Cargo.toml @@ -13,3 +13,6 @@ ethcore-util = { path = "../../util" } [build-dependencies] native-contract-generator = { path = "generator" } + +[features] +default = [] diff --git a/ethcore/native_contracts/build.rs b/ethcore/native_contracts/build.rs index 9c6fb85c4..a56605f75 100644 --- a/ethcore/native_contracts/build.rs +++ b/ethcore/native_contracts/build.rs @@ -33,6 +33,8 @@ const VALIDATOR_SET_ABI: &'static str = r#"[{"constant":true,"inputs":[],"name": const VALIDATOR_REPORT_ABI: &'static str = r#"[{"constant":false,"inputs":[{"name":"validator","type":"address"},{"name":"blockNumber","type":"uint256"},{"name":"proof","type":"bytes"}],"name":"reportMalicious","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"validator","type":"address"},{"name":"blockNumber","type":"uint256"}],"name":"reportBenign","outputs":[],"payable":false,"type":"function"}]"#; +const TEST_VALIDATOR_SET_ABI: &'static str = r#"[{"constant":true,"inputs":[],"name":"transitionNonce","outputs":[{"name":"n","type":"uint256"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"newValidators","type":"address[]"}],"name":"setValidators","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"getValidators","outputs":[{"name":"vals","type":"address[]"}],"payable":false,"type":"function"},{"inputs":[],"payable":false,"type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_parent_hash","type":"bytes32"},{"indexed":true,"name":"_nonce","type":"uint256"},{"indexed":false,"name":"_new_set","type":"address[]"}],"name":"ValidatorsChanged","type":"event"}]"#; + fn build_file(name: &str, abi: &str, filename: &str) { let code = ::native_contract_generator::generate_module(name, abi).unwrap(); @@ -43,10 +45,16 @@ fn build_file(name: &str, abi: &str, filename: &str) { f.write_all(code.as_bytes()).unwrap(); } +fn build_test_contracts() { + build_file("ValidatorSet", TEST_VALIDATOR_SET_ABI, "test_validator_set.rs"); +} + fn main() { build_file("Registry", REGISTRY_ABI, "registry.rs"); build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs"); build_file("SecretStoreAclStorage", SECRETSTORE_ACL_STORAGE_ABI, "secretstore_acl_storage.rs"); build_file("ValidatorSet", VALIDATOR_SET_ABI, "validator_set.rs"); build_file("ValidatorReport", VALIDATOR_REPORT_ABI, "validator_report.rs"); + + build_test_contracts(); } diff --git a/ethcore/native_contracts/src/lib.rs b/ethcore/native_contracts/src/lib.rs index 1aaaf07b1..d33d7a22a 100644 --- a/ethcore/native_contracts/src/lib.rs +++ b/ethcore/native_contracts/src/lib.rs @@ -29,6 +29,8 @@ mod secretstore_acl_storage; mod validator_set; mod validator_report; +pub mod test_contracts; + pub use self::registry::Registry; pub use self::service_transaction::ServiceTransactionChecker; pub use self::secretstore_acl_storage::SecretStoreAclStorage; diff --git a/ethcore/native_contracts/src/test_contracts/mod.rs b/ethcore/native_contracts/src/test_contracts/mod.rs new file mode 100644 index 000000000..44810b3b7 --- /dev/null +++ b/ethcore/native_contracts/src/test_contracts/mod.rs @@ -0,0 +1,21 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Contracts used for testing. + +pub mod validator_set; + +pub use self::validator_set::ValidatorSet; diff --git a/ethcore/native_contracts/src/test_contracts/validator_set.rs b/ethcore/native_contracts/src/test_contracts/validator_set.rs new file mode 100644 index 000000000..8a63c90dd --- /dev/null +++ b/ethcore/native_contracts/src/test_contracts/validator_set.rs @@ -0,0 +1,21 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +#![allow(unused_mut, unused_variables, unused_imports)] + +//! Test validator set contract. + +include!(concat!(env!("OUT_DIR"), "/test_validator_set.rs")); diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index 21b2be4cd..631369e87 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -445,7 +445,12 @@ impl<'a> Iterator for EpochTransitionIter<'a> { let is_in_canon_chain = self.chain.block_hash(transition.block_number) .map_or(false, |hash| hash == transition.block_hash); - if is_in_canon_chain { + // if the transition is within the block gap, there will only be + // one candidate, and it will be from a snapshot restored from. + let is_ancient = self.chain.first_block_number() + .map_or(false, |first| first > transition.block_number); + + if is_ancient || is_in_canon_chain { return Some((transitions.number, transition)) } } @@ -864,6 +869,7 @@ impl BlockChain { } /// Iterate over all epoch transitions. + /// This will only return transitions within the canonical chain. pub fn epoch_transitions(&self) -> EpochTransitionIter { let iter = self.db.iter_from_prefix(db::COL_EXTRA, &EPOCH_KEY_PREFIX[..]); EpochTransitionIter { @@ -872,6 +878,16 @@ impl BlockChain { } } + /// Get a specific epoch transition by epoch number and provided block hash. + pub fn epoch_transition(&self, epoch_num: u64, block_hash: H256) -> Option { + trace!(target: "blockchain", "Loading epoch {} transition at block {}", + epoch_num, block_hash); + + self.db.read(db::COL_EXTRA, &epoch_num).and_then(|transitions: EpochTransitions| { + transitions.candidates.into_iter().find(|c| c.block_hash == block_hash) + }) + } + /// Add a child to a given block. Assumes that the block hash is in /// the chain and the child's parent is this block. /// diff --git a/ethcore/src/client/ancient_import.rs b/ethcore/src/client/ancient_import.rs new file mode 100644 index 000000000..b62450641 --- /dev/null +++ b/ethcore/src/client/ancient_import.rs @@ -0,0 +1,68 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Helper for ancient block import. + +use std::sync::Arc; + +use engines::{Engine, EpochVerifier, EpochChange}; +use error::Error; +use header::Header; + +use rand::Rng; +use util::RwLock; + +// do "heavy" verification on ~1/50 blocks, randomly sampled. +const HEAVY_VERIFY_RATE: f32 = 0.02; + +/// Ancient block verifier: import an ancient sequence of blocks in order from a starting +/// epoch. +pub struct AncientVerifier { + cur_verifier: RwLock>, + engine: Arc, +} + +impl AncientVerifier { + /// Create a new ancient block verifier with the given engine and initial verifier. + pub fn new(engine: Arc, start_verifier: Box) -> Self { + AncientVerifier { + cur_verifier: RwLock::new(start_verifier), + engine: engine, + } + } + + /// Verify the next block header, randomly choosing whether to do heavy or light + /// verification. If the block is the end of an epoch, updates the epoch verifier. + pub fn verify Result, Error>>( + &self, + rng: &mut R, + header: &Header, + block: &[u8], + receipts: &[::receipt::Receipt], + load_verifier: F, + ) -> Result<(), ::error::Error> { + match rng.gen::() <= HEAVY_VERIFY_RATE { + true => self.cur_verifier.read().verify_heavy(header)?, + false => self.cur_verifier.read().verify_light(header)?, + } + + if let EpochChange::Yes(num) = self.engine.is_epoch_end(header, Some(block), Some(receipts)) { + *self.cur_verifier.write() = load_verifier(num)?; + } + + Ok(()) + } +} diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index a1f47251c..f48d94cff 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -34,6 +34,7 @@ use basic_types::Seal; use block::*; use blockchain::{BlockChain, BlockProvider, EpochTransition, TreeRoute, ImportRoute}; use blockchain::extras::TransactionAddress; +use client::ancient_import::AncientVerifier; use client::Error as ClientError; use client::{ BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient, @@ -61,7 +62,7 @@ use service::ClientIoMessage; use snapshot::{self, io as snapshot_io}; use spec::Spec; use state_db::StateDB; -use state::{self, State, CleanupMode}; +use state::{self, State}; use trace; use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase}; use trace::FlatTransactionTraces; @@ -152,6 +153,7 @@ pub struct Client { factories: Factories, history: u64, rng: Mutex, + ancient_verifier: Mutex>, on_user_defaults_change: Mutex) + 'static + Send>>>, registrar: Mutex>, exit_handler: Mutex) + 'static + Send>>>, @@ -241,6 +243,7 @@ impl Client { factories: factories, history: history, rng: Mutex::new(OsRng::new().map_err(::util::UtilError::StdIo)?), + ancient_verifier: Mutex::new(None), on_user_defaults_change: Mutex::new(None), registrar: Mutex::new(None), exit_handler: Mutex::new(None), @@ -256,7 +259,11 @@ impl Client { // ensure genesis epoch proof in the DB. { let chain = client.chain.read(); - client.generate_epoch_proof(&spec.genesis_header(), 0, &*chain); + let gh = spec.genesis_header(); + if chain.epoch_transition(0, spec.genesis_header().hash()).is_none() { + trace!(target: "client", "No genesis transition found."); + client.generate_epoch_proof(&gh, 0, &*chain); + } } if let Some(reg_addr) = client.additional_params().get("registrar").and_then(|s| Address::from_str(s).ok()) { @@ -540,25 +547,56 @@ impl Client { fn import_old_block(&self, block_bytes: Bytes, receipts_bytes: Bytes) -> Result { let block = BlockView::new(&block_bytes); let header = block.header(); + let receipts = ::rlp::decode_list(&receipts_bytes); let hash = header.hash(); let _import_lock = self.import_lock.lock(); + { let _timer = PerfTimer::new("import_old_block"); - let mut rng = self.rng.lock(); let chain = self.chain.read(); + let mut ancient_verifier = self.ancient_verifier.lock(); - // verify block. - ::snapshot::verify_old_block( - &mut *rng, - &header, - &*self.engine, - &*chain, - Some(&block_bytes), - false, - )?; + { + // closure for verifying a block. + let verify_with = |verifier: &AncientVerifier| -> Result<(), ::error::Error> { + // verify the block, passing a closure used to load an epoch verifier + // by number. + verifier.verify( + &mut *self.rng.lock(), + &header, + &block_bytes, + &receipts, + |epoch_num| chain.epoch_transition(epoch_num, hash) + .ok_or(BlockError::UnknownEpochTransition(epoch_num)) + .map_err(Into::into) + .and_then(|t| self.engine.epoch_verifier(&header, &t.proof)) + ) + }; + + // initialize the ancient block verifier if we don't have one already. + match &mut *ancient_verifier { + &mut Some(ref verifier) => { + verify_with(verifier)? + } + x @ &mut None => { + // load most recent epoch. + trace!(target: "client", "Initializing ancient block restoration."); + let current_epoch_data = chain.epoch_transitions() + .take_while(|&(_, ref t)| t.block_number < header.number()) + .last() + .map(|(_, t)| t.proof) + .expect("At least one epoch entry (genesis) always stored; qed"); + + let current_verifier = self.engine.epoch_verifier(&header, ¤t_epoch_data)?; + let current_verifier = AncientVerifier::new(self.engine.clone(), current_verifier); + + verify_with(¤t_verifier)?; + *x = Some(current_verifier); + } + } + } // Commit results - let receipts = ::rlp::decode_list(&receipts_bytes); let mut batch = DBTransaction::new(); chain.insert_unordered_block(&mut batch, &block_bytes, receipts, None, false, true); // Final commit to the DB @@ -590,7 +628,7 @@ impl Client { let entering_new_epoch = { use engines::EpochChange; match self.engine.is_epoch_end(block.header(), Some(block_data), Some(&receipts)) { - EpochChange::Yes(e, _) => Some((block.header().clone(), e)), + EpochChange::Yes(e) => Some((block.header().clone(), e)), EpochChange::No => None, EpochChange::Unsure(_) => { warn!(target: "client", "Detected invalid engine implementation."); @@ -641,7 +679,8 @@ impl Client { let mut batch = DBTransaction::new(); let hash = header.hash(); - debug!(target: "client", "Generating validation proof for block {}", hash); + debug!(target: "client", "Generating validation proof for epoch {} at block {}", + epoch_number, hash); // proof is two-part. state items read in lexicographical order, // and the secondary "proof" part. @@ -880,8 +919,8 @@ impl Client { let start_hash = match at { BlockId::Latest => { let start_num = match db.earliest_era() { - Some(era) => ::std::cmp::max(era, best_block_number - history), - None => best_block_number - history, + Some(era) => ::std::cmp::max(era, best_block_number.saturating_sub(history)), + None => best_block_number.saturating_sub(history), }; match self.block_hash(BlockId::Number(start_num)) { @@ -992,16 +1031,9 @@ impl BlockChainClient for Client { let mut state = self.state_at(block).ok_or(CallError::StatePruned)?; let original_state = if analytics.state_diffing { Some(state.clone()) } else { None }; - let sender = t.sender(); - let balance = state.balance(&sender).map_err(|_| CallError::StateCorrupt)?; - let needed_balance = t.value + t.gas * t.gas_price; - if balance < needed_balance { - // give the sender a sufficient balance - state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty) - .map_err(|_| CallError::StateCorrupt)?; - } let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; - let mut ret = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(t, options)?; + let mut ret = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm) + .transact_virtual(t, options)?; // TODO gav move this into Executive. if let Some(original) = original_state { @@ -1023,7 +1055,6 @@ impl BlockChainClient for Client { // that's just a copy of the state. let original_state = self.state_at(block).ok_or(CallError::StatePruned)?; let sender = t.sender(); - let balance = original_state.balance(&sender).map_err(ExecutionError::from)?; let options = TransactOptions { tracing: true, vm_tracing: false, check_nonce: false }; let cond = |gas| { @@ -1032,15 +1063,8 @@ impl BlockChainClient for Client { let tx = tx.fake_sign(sender); let mut state = original_state.clone(); - let needed_balance = tx.value + tx.gas * tx.gas_price; - if balance < needed_balance { - // give the sender a sufficient balance - state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty) - .map_err(ExecutionError::from)?; - } - Ok(Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm) - .transact(&tx, options.clone()) + .transact_virtual(&tx, options.clone()) .map(|r| r.exception.is_none()) .unwrap_or(false)) }; diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index 6c1280de7..f768e8d43 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -16,6 +16,7 @@ //! Blockchain database client. +mod ancient_import; mod config; mod error; mod test_client; diff --git a/ethcore/src/engines/authority_round.rs b/ethcore/src/engines/authority_round.rs index d3910b851..75a8d58a9 100644 --- a/ethcore/src/engines/authority_round.rs +++ b/ethcore/src/engines/authority_round.rs @@ -452,6 +452,10 @@ impl Engine for AuthorityRound { fn sign(&self, hash: H256) -> Result { self.signer.sign(hash).map_err(Into::into) } + + fn snapshot_components(&self) -> Option> { + Some(Box::new(::snapshot::PoaSnapshot)) + } } #[cfg(test)] diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index 8de683e0a..81a734f04 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -216,6 +216,10 @@ impl Engine for BasicAuthority { fn sign(&self, hash: H256) -> Result { self.signer.sign(hash).map_err(Into::into) } + + fn snapshot_components(&self) -> Option> { + Some(Box::new(::snapshot::PoaSnapshot)) + } } #[cfg(test)] diff --git a/ethcore/src/engines/epoch_verifier.rs b/ethcore/src/engines/epoch_verifier.rs index 0d9c87e53..5fc794ec1 100644 --- a/ethcore/src/engines/epoch_verifier.rs +++ b/ethcore/src/engines/epoch_verifier.rs @@ -22,7 +22,7 @@ use header::Header; /// Verifier for all blocks within an epoch with self-contained state. /// /// See docs on `Engine` relating to proving functions for more details. -pub trait EpochVerifier: Sync { +pub trait EpochVerifier: Send + Sync { /// Get the epoch number. fn epoch_number(&self) -> u64; diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs index f95cdf9f8..d38edd8d3 100644 --- a/ethcore/src/engines/mod.rs +++ b/ethcore/src/engines/mod.rs @@ -107,8 +107,8 @@ pub enum EpochChange { Unsure(Unsure), /// No epoch change. No, - /// Validation proof required, and the new epoch number and expected proof. - Yes(u64, Bytes), + /// Validation proof required, and the new epoch number. + Yes(u64), } /// More data required to determine if an epoch change occurred at a given block. @@ -227,6 +227,9 @@ pub trait Engine : Sync + Send { /// For example, for PoA chains the proof will be a validator set, /// and the corresponding `EpochVerifier` can be used to correctly validate /// all blocks produced under that `ValidatorSet` + /// + /// It must be possible to generate an epoch proof for any block in an epoch, + /// and it should always be equivalent to the proof of the transition block. fn epoch_proof(&self, _header: &Header, _caller: &Call) -> Result, Error> { @@ -234,6 +237,11 @@ pub trait Engine : Sync + Send { } /// Whether an epoch change occurred at the given header. + /// + /// If the block or receipts are required, return `Unsure` and the function will be + /// called again with them. + /// Return `Yes` or `No` when the answer is definitively known. + /// /// Should not interact with state. fn is_epoch_end(&self, _header: &Header, _block: Option<&[u8]>, _receipts: Option<&[Receipt]>) -> EpochChange diff --git a/ethcore/src/engines/validator_set/multi.rs b/ethcore/src/engines/validator_set/multi.rs index df3659ac3..c16a3424f 100644 --- a/ethcore/src/engines/validator_set/multi.rs +++ b/ethcore/src/engines/validator_set/multi.rs @@ -76,23 +76,45 @@ impl ValidatorSet for Multi { -> EpochChange { let (set_block, set) = self.correct_set_by_number(header.number()); + let (next_set_block, _) = self.correct_set_by_number(header.number() + 1); + + // multi-set transitions require epoch changes. + if next_set_block != set_block { + return EpochChange::Yes(next_set_block); + } match set.is_epoch_end(header, block, receipts) { - EpochChange::Yes(num, proof) => EpochChange::Yes(set_block + num, proof), + EpochChange::Yes(num) => EpochChange::Yes(set_block + num), other => other, } } fn epoch_proof(&self, header: &Header, caller: &Call) -> Result, String> { - self.correct_set_by_number(header.number()).1.epoch_proof(header, caller) + let (set_block, set) = self.correct_set_by_number(header.number()); + let (next_set_block, next_set) = self.correct_set_by_number(header.number() + 1); + + if next_set_block != set_block { + return next_set.epoch_proof(header, caller); + } + + set.epoch_proof(header, caller) } fn epoch_set(&self, header: &Header, proof: &[u8]) -> Result<(u64, super::SimpleList), ::error::Error> { // "multi" epoch is the inner set's epoch plus the transition block to that set. // ensures epoch increases monotonically. let (set_block, set) = self.correct_set_by_number(header.number()); - let (inner_epoch, list) = set.epoch_set(header, proof)?; - Ok((set_block + inner_epoch, list)) + let (next_set_block, next_set) = self.correct_set_by_number(header.number() + 1); + + // this block kicks off a new validator set -- get the validator set + // starting there. + if next_set_block != set_block { + let (inner_epoch, list) = next_set.epoch_set(header, proof)?; + Ok((next_set_block + inner_epoch, list)) + } else { + let (inner_epoch, list) = set.epoch_set(header, proof)?; + Ok((set_block + inner_epoch, list)) + } } fn contains_with_caller(&self, bh: &H256, address: &Address, caller: &Call) -> bool { diff --git a/ethcore/src/engines/validator_set/safe_contract.rs b/ethcore/src/engines/validator_set/safe_contract.rs index bfdab65b0..262aa0def 100644 --- a/ethcore/src/engines/validator_set/safe_contract.rs +++ b/ethcore/src/engines/validator_set/safe_contract.rs @@ -182,10 +182,9 @@ impl ValidatorSet for ValidatorSafeContract { ); match (nonce, validators) { - (Some(nonce), Some(validators)) => { - let proof = encode_proof(nonce, &validators); + (Some(nonce), Some(_)) => { let new_epoch = nonce.low_u64(); - ::engines::EpochChange::Yes(new_epoch, proof) + ::engines::EpochChange::Yes(new_epoch) } _ => { debug!(target: "engine", "Successfully decoded log turned out to be bad."); diff --git a/ethcore/src/error.rs b/ethcore/src/error.rs index 984df79bc..dd9b7464c 100644 --- a/ethcore/src/error.rs +++ b/ethcore/src/error.rs @@ -169,6 +169,8 @@ pub enum BlockError { UnknownParent(H256), /// Uncle parent given is unknown. UnknownUncleParent(H256), + /// No transition to epoch number. + UnknownEpochTransition(u64), } impl fmt::Display for BlockError { @@ -202,6 +204,7 @@ impl fmt::Display for BlockError { RidiculousNumber(ref oob) => format!("Implausible block number. {}", oob), UnknownParent(ref hash) => format!("Unknown parent: {}", hash), UnknownUncleParent(ref hash) => format!("Unknown uncle parent: {}", hash), + UnknownEpochTransition(ref num) => format!("Unknown transition to epoch number: {}", num), }; f.write_fmt(format_args!("Block error ({})", msg)) diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index 1974a6c8d..bfba4ab3d 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -129,6 +129,21 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { } } + /// Execute a transaction in a "virtual" context. + /// This will ensure the caller has enough balance to execute the desired transaction. + /// Used for extra-block executions for things like consensus contracts and RPCs + pub fn transact_virtual(&'a mut self, t: &SignedTransaction, options: TransactOptions) -> Result { + let sender = t.sender(); + let balance = self.state.balance(&sender)?; + let needed_balance = t.value + t.gas * t.gas_price; + if balance < needed_balance { + // give the sender a sufficient balance + self.state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty)?; + } + + self.transact(t, options) + } + /// Execute transaction/call with tracing enabled pub fn transact_with_tracer( &'a mut self, diff --git a/ethcore/src/snapshot/consensus/authority.rs b/ethcore/src/snapshot/consensus/authority.rs new file mode 100644 index 000000000..4568700b9 --- /dev/null +++ b/ethcore/src/snapshot/consensus/authority.rs @@ -0,0 +1,498 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Secondary chunk creation and restoration, implementation for proof-of-authority +//! based engines. +//! +//! The chunks here contain state proofs of transitions, along with validator proofs. + +use super::{SnapshotComponents, Rebuilder, ChunkSink}; + +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; + +use blockchain::{BlockChain, BlockProvider, EpochTransition}; +use engines::{Engine, EpochVerifier}; +use env_info::EnvInfo; +use ids::BlockId; +use header::Header; +use receipt::Receipt; +use snapshot::{Error, ManifestData}; +use state_db::StateDB; + +use itertools::{Position, Itertools}; +use rlp::{RlpStream, UntrustedRlp}; +use util::{Address, Bytes, H256, KeyValueDB, DBValue}; + +/// Snapshot creation and restoration for PoA chains. +/// Chunk format: +/// +/// [FLAG, [header, epoch_number, epoch data, state proof, last hashes], ...] +/// - Header data at which transition occurred, +/// - epoch data (usually list of validators) +/// - state items required to check epoch data +/// - last 256 hashes before the transition; required for checking state changes. +/// +/// FLAG is a bool: true for last chunk, false otherwise. +/// +/// The last item of the last chunk will be a list containing data for the warp target block: +/// [header, transactions, uncles, receipts, last_hashes, parent_td]. +/// If this block is not a transition block, the epoch data should be the same as that +/// for the last transition. +pub struct PoaSnapshot; + +impl SnapshotComponents for PoaSnapshot { + fn chunk_all( + &mut self, + chain: &BlockChain, + block_at: H256, + sink: &mut ChunkSink, + preferred_size: usize, + ) -> Result<(), Error> { + let number = chain.block_number(&block_at) + .ok_or_else(|| Error::InvalidStartingBlock(BlockId::Hash(block_at)))?; + + let mut pending_size = 0; + let mut rlps = Vec::new(); + + // TODO: this will become irrelevant after recent block hashes are moved into + // the state. can we optimize it out in that case? + let make_last_hashes = |parent_hash| chain.ancestry_iter(parent_hash) + .into_iter() + .flat_map(|inner| inner) + .take(255) + .collect::>(); + + for (epoch_number, transition) in chain.epoch_transitions() + .take_while(|&(_, ref t)| t.block_number <= number) + { + let header = chain.block_header_data(&transition.block_hash) + .ok_or(Error::BlockNotFound(transition.block_hash))?; + + let last_hashes: Vec<_> = make_last_hashes(header.parent_hash()); + + let entry = { + let mut entry_stream = RlpStream::new_list(5); + entry_stream + .append_raw(&header.into_inner(), 1) + .append(&epoch_number) + .append(&transition.proof); + + entry_stream.begin_list(transition.state_proof.len()); + for item in transition.state_proof { + entry_stream.append(&&*item); + } + + entry_stream.append_list(&last_hashes); + entry_stream.out() + }; + + // cut of the chunk if too large. + let new_loaded_size = pending_size + entry.len(); + pending_size = if new_loaded_size > preferred_size && !rlps.is_empty() { + write_chunk(false, &mut rlps, sink)?; + entry.len() + } else { + new_loaded_size + }; + + rlps.push(entry); + } + + let (block, receipts) = chain.block(&block_at) + .and_then(|b| chain.block_receipts(&block_at).map(|r| (b, r))) + .ok_or(Error::BlockNotFound(block_at))?; + let block = block.decode(); + + let parent_td = chain.block_details(block.header.parent_hash()) + .map(|d| d.total_difficulty) + .ok_or(Error::BlockNotFound(block_at))?; + + let last_hashes = make_last_hashes(*block.header.parent_hash()); + + rlps.push({ + let mut stream = RlpStream::new_list(6); + stream + .append(&block.header) + .append_list(&block.transactions) + .append_list(&block.uncles) + .append(&receipts) + .append_list(&last_hashes) + .append(&parent_td); + stream.out() + }); + + write_chunk(true, &mut rlps, sink)?; + + Ok(()) + } + + fn rebuilder( + &self, + chain: BlockChain, + db: Arc, + manifest: &ManifestData, + ) -> Result, ::error::Error> { + Ok(Box::new(ChunkRebuilder { + manifest: manifest.clone(), + warp_target: None, + chain: chain, + db: db, + had_genesis: false, + unverified_firsts: Vec::new(), + last_proofs: Vec::new(), + })) + } + + fn min_supported_version(&self) -> u64 { 3 } + fn current_version(&self) -> u64 { 3 } +} + +// writes a chunk composed of the inner RLPs here. +// flag indicates whether the chunk is the last chunk. +fn write_chunk(last: bool, chunk_data: &mut Vec, sink: &mut ChunkSink) -> Result<(), Error> { + let mut stream = RlpStream::new_list(1 + chunk_data.len()); + + stream.append(&last); + for item in chunk_data.drain(..) { + stream.append_raw(&item, 1); + } + + (sink)(stream.out().as_slice()).map_err(Into::into) +} + +// rebuilder checks state proofs for all transitions, and checks that each +// transition header is verifiable from the epoch data of the one prior. +struct ChunkRebuilder { + manifest: ManifestData, + warp_target: Option<(Header, Vec)>, + chain: BlockChain, + db: Arc, + had_genesis: bool, + + // sorted vectors of unverified first blocks in a chunk + // and epoch data from last blocks in chunks. + // verification for these will be done at the end. + unverified_firsts: Vec<(u64, Header)>, + last_proofs: Vec<(u64, Header, Bytes)>, +} + +// verified data. +struct Verified { + epoch_number: u64, + epoch_transition: EpochTransition, + header: Header, +} + +// make a transaction and env info. +// TODO: hardcoded 50M to match constants in client. +// would be nice to extract magic numbers, or better yet +// off-chain transaction execution, into its own module. +fn make_tx_and_env( + engine: &Engine, + addr: Address, + data: Bytes, + header: &Header, + last_hashes: Arc>, +) -> (::transaction::SignedTransaction, EnvInfo) { + use transaction::{Action, Transaction}; + + let transaction = Transaction { + nonce: engine.account_start_nonce(), + action: Action::Call(addr), + gas: 50_000_000.into(), + gas_price: 0.into(), + value: 0.into(), + data: data, + }.fake_sign(Default::default()); + + let env = EnvInfo { + number: header.number(), + author: *header.author(), + timestamp: header.timestamp(), + difficulty: *header.difficulty(), + gas_limit: 50_000_000.into(), + last_hashes: last_hashes, + gas_used: 0.into(), + }; + + (transaction, env) +} + +impl ChunkRebuilder { + fn verify_transition( + &mut self, + last_verifier: &mut Option>, + transition_rlp: UntrustedRlp, + engine: &Engine, + ) -> Result { + // decode. + let header: Header = transition_rlp.val_at(0)?; + let epoch_number: u64 = transition_rlp.val_at(1)?; + let epoch_data: Bytes = transition_rlp.val_at(2)?; + let state_proof: Vec = transition_rlp.at(3)? + .iter() + .map(|x| Ok(DBValue::from_slice(x.data()?))) + .collect::>()?; + let last_hashes: Vec = transition_rlp.list_at(4)?; + let last_hashes = Arc::new(last_hashes); + + trace!(target: "snapshot", "verifying transition to epoch {}", epoch_number); + + // check current transition against validators of last epoch. + if let Some(verifier) = last_verifier.as_ref() { + verifier.verify_heavy(&header)?; + } + + { + // check the provided state proof actually leads to the + // given epoch data. + let caller = |addr, data| { + use state::{check_proof, ProvedExecution}; + + let (transaction, env_info) = make_tx_and_env( + engine, + addr, + data, + &header, + last_hashes.clone(), + ); + + let result = check_proof( + &state_proof, + header.state_root().clone(), + &transaction, + engine, + &env_info, + ); + + match result { + ProvedExecution::Complete(executed) => Ok(executed.output), + _ => Err("Bad state proof".into()), + } + }; + + let extracted_proof = engine.epoch_proof(&header, &caller) + .map_err(|_| Error::BadEpochProof(epoch_number))?; + + if extracted_proof != epoch_data { + return Err(Error::BadEpochProof(epoch_number).into()); + } + } + + // create new epoch verifier. + *last_verifier = Some(engine.epoch_verifier(&header, &epoch_data)?); + + Ok(Verified { + epoch_number: epoch_number, + epoch_transition: EpochTransition { + block_hash: header.hash(), + block_number: header.number(), + state_proof: state_proof, + proof: epoch_data, + }, + header: header, + }) + } +} + +impl Rebuilder for ChunkRebuilder { + fn feed( + &mut self, + chunk: &[u8], + engine: &Engine, + abort_flag: &AtomicBool, + ) -> Result<(), ::error::Error> { + let rlp = UntrustedRlp::new(chunk); + let is_last_chunk: bool = rlp.val_at(0)?; + let num_items = rlp.item_count()?; + + // number of transitions in the chunk. + let num_transitions = if is_last_chunk { + num_items - 2 + } else { + num_items - 1 + }; + + let mut last_verifier = None; + let mut last_number = None; + for transition_rlp in rlp.iter().skip(1).take(num_transitions).with_position() { + if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) } + + let (is_first, is_last) = match transition_rlp { + Position::First(_) => (true, false), + Position::Middle(_) => (false, false), + Position::Last(_) => (false, true), + Position::Only(_) => (true, true), + }; + + let transition_rlp = transition_rlp.into_inner(); + let verified = self.verify_transition( + &mut last_verifier, + transition_rlp, + engine, + )?; + + if last_number.map_or(false, |num| verified.header.number() <= num) { + return Err(Error::WrongChunkFormat("Later epoch transition in earlier or same block.".into()).into()); + } + + last_number = Some(verified.header.number()); + + // book-keep borders for verification later. + if is_first { + // make sure the genesis transition was included, + // but it doesn't need verification later. + if verified.epoch_number == 0 && verified.header.number() == 0 { + if verified.header.hash() != self.chain.genesis_hash() { + return Err(Error::WrongBlockHash(0, verified.header.hash(), self.chain.genesis_hash()).into()); + } + + self.had_genesis = true; + } else { + let idx = self.unverified_firsts + .binary_search_by_key(&verified.epoch_number, |&(a, _)| a) + .unwrap_or_else(|x| x); + + let entry = (verified.epoch_number, verified.header.clone()); + self.unverified_firsts.insert(idx, entry); + } + } + if is_last { + let idx = self.last_proofs + .binary_search_by_key(&verified.epoch_number, |&(a, _, _)| a) + .unwrap_or_else(|x| x); + + let entry = ( + verified.epoch_number, + verified.header.clone(), + verified.epoch_transition.proof.clone() + ); + self.last_proofs.insert(idx, entry); + } + + // write epoch transition into database. + let mut batch = self.db.transaction(); + self.chain.insert_epoch_transition(&mut batch, verified.epoch_number, + verified.epoch_transition); + self.db.write_buffered(batch); + + trace!(target: "snapshot", "Verified epoch transition for epoch {}", verified.epoch_number); + } + + if is_last_chunk { + use block::Block; + + let last_rlp = rlp.at(num_items - 1)?; + let block = Block { + header: last_rlp.val_at(0)?, + transactions: last_rlp.list_at(1)?, + uncles: last_rlp.list_at(2)?, + }; + let block_data = block.rlp_bytes(::basic_types::Seal::With); + let receipts: Vec = last_rlp.list_at(3)?; + + { + let hash = block.header.hash(); + let best_hash = self.manifest.block_hash; + if hash != best_hash { + return Err(Error::WrongBlockHash(block.header.number(), best_hash, hash).into()) + } + } + + let last_hashes: Vec = last_rlp.list_at(4)?; + let parent_td: ::util::U256 = last_rlp.val_at(5)?; + + let mut batch = self.db.transaction(); + self.chain.insert_unordered_block(&mut batch, &block_data, receipts, Some(parent_td), true, false); + self.db.write_buffered(batch); + + self.warp_target = Some((block.header, last_hashes)); + } + + Ok(()) + } + + fn finalize(&mut self, db: StateDB, engine: &Engine) -> Result<(), ::error::Error> { + use state::State; + + if !self.had_genesis { + return Err(Error::WrongChunkFormat("No genesis transition included.".into()).into()); + } + + let (target_header, target_last_hashes) = match self.warp_target.take() { + Some(x) => x, + None => return Err(Error::WrongChunkFormat("Warp target block not included.".into()).into()), + }; + + // we store the last data even for the last chunk for easier verification + // of warp target, but we don't store genesis transition data. + // other than that, there should be a one-to-one correspondence of + // chunk ends to chunk beginnings. + if self.last_proofs.len() != self.unverified_firsts.len() + 1 { + return Err(Error::WrongChunkFormat("More than one 'last' chunk".into()).into()); + } + + // verify the first entries of chunks we couldn't before. + let lasts_iter = self.last_proofs.iter().map(|&(_, ref hdr, ref proof)| (hdr, &proof[..])); + let firsts_iter = self.unverified_firsts.iter().map(|&(_, ref hdr)| hdr); + + for ((last_hdr, last_proof), first_hdr) in lasts_iter.zip(firsts_iter) { + let verifier = engine.epoch_verifier(&last_hdr, &last_proof)?; + verifier.verify_heavy(&first_hdr)?; + } + + // verify that the validator set of the warp target is the same as that of the + // most recent transition. if the warp target was a transition itself, + // `last_data` will still be correct + let &(_, _, ref last_data) = self.last_proofs.last() + .expect("last_proofs known to have at least one element by the check above; qed"); + + let target_last_hashes = Arc::new(target_last_hashes); + let caller = |addr, data| { + use executive::{Executive, TransactOptions}; + + let factories = ::factory::Factories::default(); + let mut state = State::from_existing( + db.boxed_clone(), + self.manifest.state_root.clone(), + engine.account_start_nonce(), + factories.clone(), + ).map_err(|e| format!("State root mismatch: {}", e))?; + + let (tx, env_info) = make_tx_and_env( + engine, + addr, + data, + &target_header, + target_last_hashes.clone(), + ); + + let options = TransactOptions { tracing: false, vm_tracing: false, check_nonce: false }; + Executive::new(&mut state, &env_info, engine, &factories.vm) + .transact_virtual(&tx, options) + .map(|e| e.output) + .map_err(|e| format!("Error executing: {}", e)) + }; + + let data = engine.epoch_proof(&target_header, &caller)?; + if &data[..] != &last_data[..] { + return Err(Error::WrongChunkFormat("Warp target has different epoch data than epoch transition.".into()).into()) + } + + Ok(()) + } +} diff --git a/ethcore/src/snapshot/consensus/mod.rs b/ethcore/src/snapshot/consensus/mod.rs index 4d853ca27..0ed8c909d 100644 --- a/ethcore/src/snapshot/consensus/mod.rs +++ b/ethcore/src/snapshot/consensus/mod.rs @@ -17,24 +17,24 @@ //! Secondary chunk creation and restoration, implementations for different consensus //! engines. -use std::collections::VecDeque; -use std::io; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::atomic::AtomicBool; use std::sync::Arc; -use blockchain::{BlockChain, BlockProvider}; +use blockchain::BlockChain; use engines::Engine; use snapshot::{Error, ManifestData}; -use snapshot::block::AbridgedBlock; -use util::{Bytes, H256}; +use util::H256; use util::kvdb::KeyValueDB; -use rand::OsRng; -use rlp::{RlpStream, UntrustedRlp}; +mod authority; +mod work; + +pub use self::authority::*; +pub use self::work::*; /// A sink for produced chunks. -pub type ChunkSink<'a> = FnMut(&[u8]) -> io::Result<()> + 'a; +pub type ChunkSink<'a> = FnMut(&[u8]) -> ::std::io::Result<()> + 'a; /// Components necessary for snapshot creation and restoration. pub trait SnapshotComponents: Send { @@ -57,13 +57,21 @@ pub trait SnapshotComponents: Send { /// order and then be finalized. /// /// The manifest, a database, and fresh `BlockChain` are supplied. - // TODO: supply anything for state? + /// + /// The engine passed to the `Rebuilder` methods will be the same instance + /// that created the `SnapshotComponents`. fn rebuilder( &self, chain: BlockChain, db: Arc, manifest: &ManifestData, ) -> Result, ::error::Error>; + + /// Minimum supported snapshot version number. + fn min_supported_version(&self) -> u64; + + /// Current version number + fn current_version(&self) -> u64; } @@ -82,271 +90,10 @@ pub trait Rebuilder: Send { /// Finalize the restoration. Will be done after all chunks have been /// fed successfully. - /// This will apply the necessary "glue" between chunks. - fn finalize(&mut self) -> Result<(), Error>; -} - -/// Snapshot creation and restoration for PoW chains. -/// This includes blocks from the head of the chain as a -/// loose assurance that the chain is valid. -/// -/// The field is the number of blocks from the head of the chain -/// to include in the snapshot. -#[derive(Clone, Copy, PartialEq)] -pub struct PowSnapshot(pub u64); - -impl SnapshotComponents for PowSnapshot { - fn chunk_all( - &mut self, - chain: &BlockChain, - block_at: H256, - chunk_sink: &mut ChunkSink, - preferred_size: usize, - ) -> Result<(), Error> { - PowWorker { - chain: chain, - rlps: VecDeque::new(), - current_hash: block_at, - writer: chunk_sink, - preferred_size: preferred_size, - }.chunk_all(self.0) - } - - fn rebuilder( - &self, - chain: BlockChain, - db: Arc, - manifest: &ManifestData, - ) -> Result, ::error::Error> { - PowRebuilder::new(chain, db, manifest, self.0).map(|r| Box::new(r) as Box<_>) - } -} - -/// Used to build block chunks. -struct PowWorker<'a> { - chain: &'a BlockChain, - // block, receipt rlp pairs. - rlps: VecDeque, - current_hash: H256, - writer: &'a mut ChunkSink<'a>, - preferred_size: usize, -} - -impl<'a> PowWorker<'a> { - // Repeatedly fill the buffers and writes out chunks, moving backwards from starting block hash. - // Loops until we reach the first desired block, and writes out the remainder. - fn chunk_all(&mut self, snapshot_blocks: u64) -> Result<(), Error> { - let mut loaded_size = 0; - let mut last = self.current_hash; - - let genesis_hash = self.chain.genesis_hash(); - - for _ in 0..snapshot_blocks { - if self.current_hash == genesis_hash { break } - - let (block, receipts) = self.chain.block(&self.current_hash) - .and_then(|b| self.chain.block_receipts(&self.current_hash).map(|r| (b, r))) - .ok_or(Error::BlockNotFound(self.current_hash))?; - - let abridged_rlp = AbridgedBlock::from_block_view(&block.view()).into_inner(); - - let pair = { - let mut pair_stream = RlpStream::new_list(2); - pair_stream.append_raw(&abridged_rlp, 1).append(&receipts); - pair_stream.out() - }; - - let new_loaded_size = loaded_size + pair.len(); - - // cut off the chunk if too large. - - if new_loaded_size > self.preferred_size && !self.rlps.is_empty() { - self.write_chunk(last)?; - loaded_size = pair.len(); - } else { - loaded_size = new_loaded_size; - } - - self.rlps.push_front(pair); - - last = self.current_hash; - self.current_hash = block.header_view().parent_hash(); - } - - if loaded_size != 0 { - self.write_chunk(last)?; - } - - Ok(()) - } - - // write out the data in the buffers to a chunk on disk - // - // we preface each chunk with the parent of the first block's details, - // obtained from the details of the last block written. - fn write_chunk(&mut self, last: H256) -> Result<(), Error> { - trace!(target: "snapshot", "prepared block chunk with {} blocks", self.rlps.len()); - - let (last_header, last_details) = self.chain.block_header(&last) - .and_then(|n| self.chain.block_details(&last).map(|d| (n, d))) - .ok_or(Error::BlockNotFound(last))?; - - let parent_number = last_header.number() - 1; - let parent_hash = last_header.parent_hash(); - let parent_total_difficulty = last_details.total_difficulty - *last_header.difficulty(); - - trace!(target: "snapshot", "parent last written block: {}", parent_hash); - - let num_entries = self.rlps.len(); - let mut rlp_stream = RlpStream::new_list(3 + num_entries); - rlp_stream.append(&parent_number).append(parent_hash).append(&parent_total_difficulty); - - for pair in self.rlps.drain(..) { - rlp_stream.append_raw(&pair, 1); - } - - let raw_data = rlp_stream.out(); - - (self.writer)(&raw_data)?; - - Ok(()) - } -} - -/// Rebuilder for proof-of-work chains. -/// Does basic verification for all blocks, but `PoW` verification for some. -/// Blocks must be fed in-order. -/// -/// The first block in every chunk is disconnected from the last block in the -/// chunk before it, as chunks may be submitted out-of-order. -/// -/// After all chunks have been submitted, we "glue" the chunks together. -pub struct PowRebuilder { - chain: BlockChain, - db: Arc, - rng: OsRng, - disconnected: Vec<(u64, H256)>, - best_number: u64, - best_hash: H256, - best_root: H256, - fed_blocks: u64, - snapshot_blocks: u64, -} - -impl PowRebuilder { - /// Create a new PowRebuilder. - fn new(chain: BlockChain, db: Arc, manifest: &ManifestData, snapshot_blocks: u64) -> Result { - Ok(PowRebuilder { - chain: chain, - db: db, - rng: OsRng::new()?, - disconnected: Vec::new(), - best_number: manifest.block_number, - best_hash: manifest.block_hash, - best_root: manifest.state_root, - fed_blocks: 0, - snapshot_blocks: snapshot_blocks, - }) - } -} - -impl Rebuilder for PowRebuilder { - /// Feed the rebuilder an uncompressed block chunk. - /// Returns the number of blocks fed or any errors. - fn feed(&mut self, chunk: &[u8], engine: &Engine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> { - use basic_types::Seal::With; - use views::BlockView; - use snapshot::verify_old_block; - use util::U256; - use util::triehash::ordered_trie_root; - - let rlp = UntrustedRlp::new(chunk); - let item_count = rlp.item_count()?; - let num_blocks = (item_count - 3) as u64; - - trace!(target: "snapshot", "restoring block chunk with {} blocks.", item_count - 3); - - if self.fed_blocks + num_blocks > self.snapshot_blocks { - return Err(Error::TooManyBlocks(self.snapshot_blocks, self.fed_blocks).into()) - } - - // todo: assert here that these values are consistent with chunks being in order. - let mut cur_number = rlp.val_at::(0)? + 1; - let mut parent_hash = rlp.val_at::(1)?; - let parent_total_difficulty = rlp.val_at::(2)?; - - for idx in 3..item_count { - if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) } - - let pair = rlp.at(idx)?; - let abridged_rlp = pair.at(0)?.as_raw().to_owned(); - let abridged_block = AbridgedBlock::from_raw(abridged_rlp); - let receipts: Vec<::receipt::Receipt> = pair.list_at(1)?; - let receipts_root = ordered_trie_root( - pair.at(1)?.iter().map(|r| r.as_raw().to_owned()) - ); - - let block = abridged_block.to_block(parent_hash, cur_number, receipts_root)?; - let block_bytes = block.rlp_bytes(With); - let is_best = cur_number == self.best_number; - - if is_best { - if block.header.hash() != self.best_hash { - return Err(Error::WrongBlockHash(cur_number, self.best_hash, block.header.hash()).into()) - } - - if block.header.state_root() != &self.best_root { - return Err(Error::WrongStateRoot(self.best_root, *block.header.state_root()).into()) - } - } - - verify_old_block( - &mut self.rng, - &block.header, - engine, - &self.chain, - Some(&block_bytes), - is_best - )?; - - let mut batch = self.db.transaction(); - - // special-case the first block in each chunk. - if idx == 3 { - if self.chain.insert_unordered_block(&mut batch, &block_bytes, receipts, Some(parent_total_difficulty), is_best, false) { - self.disconnected.push((cur_number, block.header.hash())); - } - } else { - self.chain.insert_unordered_block(&mut batch, &block_bytes, receipts, None, is_best, false); - } - self.db.write_buffered(batch); - self.chain.commit(); - - parent_hash = BlockView::new(&block_bytes).hash(); - cur_number += 1; - } - - self.fed_blocks += num_blocks; - - Ok(()) - } - - /// Glue together any disconnected chunks and check that the chain is complete. - fn finalize(&mut self) -> Result<(), Error> { - let mut batch = self.db.transaction(); - - for (first_num, first_hash) in self.disconnected.drain(..) { - let parent_num = first_num - 1; - - // check if the parent is even in the chain. - // since we don't restore every single block in the chain, - // the first block of the first chunks has nothing to connect to. - if let Some(parent_hash) = self.chain.block_hash(parent_num) { - // if so, add the child to it. - self.chain.add_child(&mut batch, parent_hash, first_hash); - } - } - self.db.write_buffered(batch); - Ok(()) - } + /// + /// This should apply the necessary "glue" between chunks, + /// and verify against the restored state. + /// + /// The database passed contains the state for the warp target block. + fn finalize(&mut self, db: ::state_db::StateDB, engine: &Engine) -> Result<(), ::error::Error>; } diff --git a/ethcore/src/snapshot/consensus/work.rs b/ethcore/src/snapshot/consensus/work.rs new file mode 100644 index 000000000..ff193ad00 --- /dev/null +++ b/ethcore/src/snapshot/consensus/work.rs @@ -0,0 +1,311 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Secondary chunk creation and restoration, implementation for proof-of-work +//! chains. +//! +//! The secondary chunks in this instance are 30,000 "abridged blocks" from the head +//! of the chain, which serve as an indication of valid chain. + +use super::{SnapshotComponents, Rebuilder, ChunkSink}; + +use std::collections::VecDeque; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; + +use blockchain::{BlockChain, BlockProvider}; +use engines::Engine; +use snapshot::{Error, ManifestData}; +use snapshot::block::AbridgedBlock; +use util::{Bytes, H256, KeyValueDB}; +use rlp::{RlpStream, UntrustedRlp}; +use rand::OsRng; + +/// Snapshot creation and restoration for PoW chains. +/// This includes blocks from the head of the chain as a +/// loose assurance that the chain is valid. +/// +/// The field is the number of blocks from the head of the chain +/// to include in the snapshot. +#[derive(Clone, Copy, PartialEq)] +pub struct PowSnapshot(pub u64); + +impl SnapshotComponents for PowSnapshot { + fn chunk_all( + &mut self, + chain: &BlockChain, + block_at: H256, + chunk_sink: &mut ChunkSink, + preferred_size: usize, + ) -> Result<(), Error> { + PowWorker { + chain: chain, + rlps: VecDeque::new(), + current_hash: block_at, + writer: chunk_sink, + preferred_size: preferred_size, + }.chunk_all(self.0) + } + + fn rebuilder( + &self, + chain: BlockChain, + db: Arc, + manifest: &ManifestData, + ) -> Result, ::error::Error> { + PowRebuilder::new(chain, db, manifest, self.0).map(|r| Box::new(r) as Box<_>) + } + + fn min_supported_version(&self) -> u64 { ::snapshot::MIN_SUPPORTED_STATE_CHUNK_VERSION } + fn current_version(&self) -> u64 { ::snapshot::STATE_CHUNK_VERSION } +} + +/// Used to build block chunks. +struct PowWorker<'a> { + chain: &'a BlockChain, + // block, receipt rlp pairs. + rlps: VecDeque, + current_hash: H256, + writer: &'a mut ChunkSink<'a>, + preferred_size: usize, +} + +impl<'a> PowWorker<'a> { + // Repeatedly fill the buffers and writes out chunks, moving backwards from starting block hash. + // Loops until we reach the first desired block, and writes out the remainder. + fn chunk_all(&mut self, snapshot_blocks: u64) -> Result<(), Error> { + let mut loaded_size = 0; + let mut last = self.current_hash; + + let genesis_hash = self.chain.genesis_hash(); + + for _ in 0..snapshot_blocks { + if self.current_hash == genesis_hash { break } + + let (block, receipts) = self.chain.block(&self.current_hash) + .and_then(|b| self.chain.block_receipts(&self.current_hash).map(|r| (b, r))) + .ok_or(Error::BlockNotFound(self.current_hash))?; + + let abridged_rlp = AbridgedBlock::from_block_view(&block.view()).into_inner(); + + let pair = { + let mut pair_stream = RlpStream::new_list(2); + pair_stream.append_raw(&abridged_rlp, 1).append(&receipts); + pair_stream.out() + }; + + let new_loaded_size = loaded_size + pair.len(); + + // cut off the chunk if too large. + + if new_loaded_size > self.preferred_size && !self.rlps.is_empty() { + self.write_chunk(last)?; + loaded_size = pair.len(); + } else { + loaded_size = new_loaded_size; + } + + self.rlps.push_front(pair); + + last = self.current_hash; + self.current_hash = block.header_view().parent_hash(); + } + + if loaded_size != 0 { + self.write_chunk(last)?; + } + + Ok(()) + } + + // write out the data in the buffers to a chunk on disk + // + // we preface each chunk with the parent of the first block's details, + // obtained from the details of the last block written. + fn write_chunk(&mut self, last: H256) -> Result<(), Error> { + trace!(target: "snapshot", "prepared block chunk with {} blocks", self.rlps.len()); + + let (last_header, last_details) = self.chain.block_header(&last) + .and_then(|n| self.chain.block_details(&last).map(|d| (n, d))) + .ok_or(Error::BlockNotFound(last))?; + + let parent_number = last_header.number() - 1; + let parent_hash = last_header.parent_hash(); + let parent_total_difficulty = last_details.total_difficulty - *last_header.difficulty(); + + trace!(target: "snapshot", "parent last written block: {}", parent_hash); + + let num_entries = self.rlps.len(); + let mut rlp_stream = RlpStream::new_list(3 + num_entries); + rlp_stream.append(&parent_number).append(parent_hash).append(&parent_total_difficulty); + + for pair in self.rlps.drain(..) { + rlp_stream.append_raw(&pair, 1); + } + + let raw_data = rlp_stream.out(); + + (self.writer)(&raw_data)?; + + Ok(()) + } +} + +/// Rebuilder for proof-of-work chains. +/// Does basic verification for all blocks, but `PoW` verification for some. +/// Blocks must be fed in-order. +/// +/// The first block in every chunk is disconnected from the last block in the +/// chunk before it, as chunks may be submitted out-of-order. +/// +/// After all chunks have been submitted, we "glue" the chunks together. +pub struct PowRebuilder { + chain: BlockChain, + db: Arc, + rng: OsRng, + disconnected: Vec<(u64, H256)>, + best_number: u64, + best_hash: H256, + best_root: H256, + fed_blocks: u64, + snapshot_blocks: u64, +} + +impl PowRebuilder { + /// Create a new PowRebuilder. + fn new(chain: BlockChain, db: Arc, manifest: &ManifestData, snapshot_blocks: u64) -> Result { + Ok(PowRebuilder { + chain: chain, + db: db, + rng: OsRng::new()?, + disconnected: Vec::new(), + best_number: manifest.block_number, + best_hash: manifest.block_hash, + best_root: manifest.state_root, + fed_blocks: 0, + snapshot_blocks: snapshot_blocks, + }) + } +} + +impl Rebuilder for PowRebuilder { + /// Feed the rebuilder an uncompressed block chunk. + /// Returns the number of blocks fed or any errors. + fn feed(&mut self, chunk: &[u8], engine: &Engine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> { + use basic_types::Seal::With; + use views::BlockView; + use snapshot::verify_old_block; + use util::U256; + use util::triehash::ordered_trie_root; + + let rlp = UntrustedRlp::new(chunk); + let item_count = rlp.item_count()?; + let num_blocks = (item_count - 3) as u64; + + trace!(target: "snapshot", "restoring block chunk with {} blocks.", item_count - 3); + + if self.fed_blocks + num_blocks > self.snapshot_blocks { + return Err(Error::TooManyBlocks(self.snapshot_blocks, self.fed_blocks).into()) + } + + // todo: assert here that these values are consistent with chunks being in order. + let mut cur_number = rlp.val_at::(0)? + 1; + let mut parent_hash = rlp.val_at::(1)?; + let parent_total_difficulty = rlp.val_at::(2)?; + + for idx in 3..item_count { + if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) } + + let pair = rlp.at(idx)?; + let abridged_rlp = pair.at(0)?.as_raw().to_owned(); + let abridged_block = AbridgedBlock::from_raw(abridged_rlp); + let receipts: Vec<::receipt::Receipt> = pair.list_at(1)?; + let receipts_root = ordered_trie_root( + pair.at(1)?.iter().map(|r| r.as_raw().to_owned()) + ); + + let block = abridged_block.to_block(parent_hash, cur_number, receipts_root)?; + let block_bytes = block.rlp_bytes(With); + let is_best = cur_number == self.best_number; + + if is_best { + if block.header.hash() != self.best_hash { + return Err(Error::WrongBlockHash(cur_number, self.best_hash, block.header.hash()).into()) + } + + if block.header.state_root() != &self.best_root { + return Err(Error::WrongStateRoot(self.best_root, *block.header.state_root()).into()) + } + } + + verify_old_block( + &mut self.rng, + &block.header, + engine, + &self.chain, + Some(&block_bytes), + is_best + )?; + + let mut batch = self.db.transaction(); + + // special-case the first block in each chunk. + if idx == 3 { + if self.chain.insert_unordered_block(&mut batch, &block_bytes, receipts, Some(parent_total_difficulty), is_best, false) { + self.disconnected.push((cur_number, block.header.hash())); + } + } else { + self.chain.insert_unordered_block(&mut batch, &block_bytes, receipts, None, is_best, false); + } + self.db.write_buffered(batch); + self.chain.commit(); + + parent_hash = BlockView::new(&block_bytes).hash(); + cur_number += 1; + } + + self.fed_blocks += num_blocks; + + Ok(()) + } + + /// Glue together any disconnected chunks and check that the chain is complete. + fn finalize(&mut self, _: ::state_db::StateDB, _: &Engine) -> Result<(), ::error::Error> { + let mut batch = self.db.transaction(); + + for (first_num, first_hash) in self.disconnected.drain(..) { + let parent_num = first_num - 1; + + // check if the parent is even in the chain. + // since we don't restore every single block in the chain, + // the first block of the first chunks has nothing to connect to. + if let Some(parent_hash) = self.chain.block_hash(parent_num) { + // if so, add the child to it. + self.chain.add_child(&mut batch, parent_hash, first_hash); + } + } + + let genesis_hash = self.chain.genesis_hash(); + self.chain.insert_epoch_transition(&mut batch, 0, ::blockchain::EpochTransition { + block_number: 0, + block_hash: genesis_hash, + proof: vec![], + state_proof: vec![], + }); + self.db.write_buffered(batch); + Ok(()) + } +} diff --git a/ethcore/src/snapshot/error.rs b/ethcore/src/snapshot/error.rs index 7a3ffdca2..56be84c96 100644 --- a/ethcore/src/snapshot/error.rs +++ b/ethcore/src/snapshot/error.rs @@ -59,6 +59,10 @@ pub enum Error { ChunkTooSmall, /// Snapshots not supported by the consensus engine. SnapshotsUnsupported, + /// Bad epoch transition. + BadEpochProof(u64), + /// Wrong chunk format. + WrongChunkFormat(String), } impl fmt::Display for Error { @@ -82,6 +86,8 @@ impl fmt::Display for Error { Error::VersionNotSupported(ref ver) => write!(f, "Snapshot version {} is not supprted.", ver), Error::ChunkTooSmall => write!(f, "Chunk size is too small."), Error::SnapshotsUnsupported => write!(f, "Snapshots unsupported by consensus engine."), + Error::BadEpochProof(i) => write!(f, "Bad epoch proof for transition to epoch {}", i), + Error::WrongChunkFormat(ref msg) => write!(f, "Wrong chunk format: {}", msg), } } } diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index db3bebde9..36c50f227 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -33,7 +33,7 @@ use util::{Bytes, Hashable, HashDB, DBValue, snappy, U256, Uint}; use util::Mutex; use util::hash::{H256}; use util::journaldb::{self, Algorithm, JournalDB}; -use util::kvdb::Database; +use util::kvdb::KeyValueDB; use util::trie::{TrieDB, TrieDBMut, Trie, TrieMut}; use util::sha3::SHA3_NULL_RLP; use rlp::{RlpStream, UntrustedRlp}; @@ -83,6 +83,11 @@ mod traits { // Try to have chunks be around 4MB (before compression) const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024; +// Minimum supported state chunk version. +const MIN_SUPPORTED_STATE_CHUNK_VERSION: u64 = 1; +// current state chunk version. +const STATE_CHUNK_VERSION: u64 = 2; + /// A progress indicator for snapshots. #[derive(Debug, Default)] pub struct Progress { @@ -135,6 +140,7 @@ pub fn take_snapshot( let writer = Mutex::new(writer); let chunker = engine.snapshot_components().ok_or(Error::SnapshotsUnsupported)?; + let snapshot_version = chunker.current_version(); let (state_hashes, block_hashes) = scope(|scope| { let writer = &writer; let block_guard = scope.spawn(move || chunk_secondary(chunker, chain, block_at, writer, p)); @@ -148,7 +154,7 @@ pub fn take_snapshot( info!("produced {} state chunks and {} block chunks.", state_hashes.len(), block_hashes.len()); let manifest_data = ManifestData { - version: 2, + version: snapshot_version, state_hashes: state_hashes, block_hashes: block_hashes, state_root: *state_root, @@ -309,7 +315,7 @@ pub struct StateRebuilder { impl StateRebuilder { /// Create a new state rebuilder to write into the given backing DB. - pub fn new(db: Arc, pruning: Algorithm) -> Self { + pub fn new(db: Arc, pruning: Algorithm) -> Self { StateRebuilder { db: journaldb::new(db.clone(), pruning, ::db::COL_STATE), state_root: SHA3_NULL_RLP, @@ -384,7 +390,7 @@ impl StateRebuilder { /// Finalize the restoration. Check for accounts missing code and make a dummy /// journal entry. /// Once all chunks have been fed, there should be nothing missing. - pub fn finalize(mut self, era: u64, id: H256) -> Result<(), ::error::Error> { + pub fn finalize(mut self, era: u64, id: H256) -> Result, ::error::Error> { let missing = self.missing_code.keys().cloned().collect::>(); if !missing.is_empty() { return Err(Error::MissingCode(missing).into()) } @@ -392,7 +398,7 @@ impl StateRebuilder { self.db.journal_under(&mut batch, era, &id)?; self.db.backing().write_buffered(batch); - Ok(()) + Ok(self.db) } /// Get the state root of the rebuilder. diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index dc92b5427..1f2aef9f7 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -106,6 +106,7 @@ impl Restoration { let secondary = components.rebuilder(chain, raw_db.clone(), &manifest)?; let root = manifest.state_root.clone(); + Ok(Restoration { manifest: manifest, state_chunks_left: state_chunks, @@ -150,7 +151,7 @@ impl Restoration { } // finish up restoration. - fn finalize(mut self) -> Result<(), Error> { + fn finalize(mut self, engine: &Engine) -> Result<(), Error> { use util::trie::TrieError; if !self.is_done() { return Ok(()) } @@ -163,10 +164,11 @@ impl Restoration { } // check for missing code. - self.state.finalize(self.manifest.block_number, self.manifest.block_hash)?; + let db = self.state.finalize(self.manifest.block_number, self.manifest.block_hash)?; + let db = ::state_db::StateDB::new(db, 0); // connect out-of-order chunks and verify chain integrity. - self.secondary.finalize()?; + self.secondary.finalize(db, engine)?; if let Some(writer) = self.writer { writer.finish(self.manifest)?; @@ -450,7 +452,10 @@ impl Service { let recover = rest.as_ref().map_or(false, |rest| rest.writer.is_some()); // destroy the restoration before replacing databases and snapshot. - rest.take().map(Restoration::finalize).unwrap_or(Ok(()))?; + rest.take() + .map(|r| r.finalize(&*self.engine)) + .unwrap_or(Ok(()))?; + self.replace_client_db()?; if recover { @@ -554,6 +559,11 @@ impl SnapshotService for Service { self.reader.read().as_ref().map(|r| r.manifest().clone()) } + fn min_supported_version(&self) -> Option { + self.engine.snapshot_components() + .map(|c| c.min_supported_version()) + } + fn chunk(&self, hash: H256) -> Option { self.reader.read().as_ref().and_then(|r| r.chunk(hash).ok()) } diff --git a/ethcore/src/snapshot/snapshot_service_trait.rs b/ethcore/src/snapshot/snapshot_service_trait.rs index 67e96398e..e57b39da1 100644 --- a/ethcore/src/snapshot/snapshot_service_trait.rs +++ b/ethcore/src/snapshot/snapshot_service_trait.rs @@ -27,6 +27,10 @@ pub trait SnapshotService : Sync + Send { /// Query the most recent manifest data. fn manifest(&self) -> Option; + /// Get the minimum supported snapshot version number. + /// `None` indicates warp sync isn't supported by the consensus engine. + fn min_supported_version(&self) -> Option; + /// Get raw chunk for a given hash. fn chunk(&self, hash: H256) -> Option; diff --git a/ethcore/src/snapshot/tests/helpers.rs b/ethcore/src/snapshot/tests/helpers.rs index ed356346b..cfd7af9be 100644 --- a/ethcore/src/snapshot/tests/helpers.rs +++ b/ethcore/src/snapshot/tests/helpers.rs @@ -17,13 +17,24 @@ //! Snapshot test helpers. These are used to build blockchains and state tries //! which can be queried before and after a full snapshot/restore cycle. -use basic_account::BasicAccount; +use std::sync::Arc; + use account_db::AccountDBMut; +use basic_account::BasicAccount; +use blockchain::BlockChain; +use client::{BlockChainClient, Client}; +use engines::Engine; +use snapshot::{StateRebuilder}; +use snapshot::io::{SnapshotReader, PackedWriter, PackedReader}; +use state_db::StateDB; + +use devtools::{RandomTempPath, GuardedTempResult}; use rand::Rng; -use util::DBValue; +use util::{DBValue, KeyValueDB}; use util::hash::H256; use util::hashdb::HashDB; +use util::journaldb; use util::trie::{Alphabet, StandardMap, SecTrieDBMut, TrieMut, ValueMode}; use util::trie::{TrieDB, TrieDBMut, Trie}; use util::sha3::SHA3_NULL_RLP; @@ -125,3 +136,67 @@ pub fn compare_dbs(one: &HashDB, two: &HashDB) { assert_eq!(one.get(&key).unwrap(), two.get(&key).unwrap()); } } + +/// Take a snapshot from the given client into a temporary file. +/// Return a snapshot reader for it. +pub fn snap(client: &Client) -> GuardedTempResult> { + use ids::BlockId; + + let dir = RandomTempPath::new(); + let writer = PackedWriter::new(dir.as_path()).unwrap(); + let progress = Default::default(); + + let hash = client.chain_info().best_block_hash; + client.take_snapshot(writer, BlockId::Hash(hash), &progress).unwrap(); + + let reader = PackedReader::new(dir.as_path()).unwrap().unwrap(); + + GuardedTempResult { + result: Some(Box::new(reader)), + _temp: dir, + } +} + +/// Restore a snapshot into a given database. This will read chunks from the given reader +/// write into the given database. +pub fn restore( + db: Arc, + engine: &Engine, + reader: &SnapshotReader, + genesis: &[u8], +) -> Result<(), ::error::Error> { + use std::sync::atomic::AtomicBool; + use util::snappy; + + let flag = AtomicBool::new(true); + let components = engine.snapshot_components().unwrap(); + let manifest = reader.manifest(); + + let mut state = StateRebuilder::new(db.clone(), journaldb::Algorithm::Archive); + let mut secondary = { + let chain = BlockChain::new(Default::default(), genesis, db.clone()); + components.rebuilder(chain, db, manifest).unwrap() + }; + + let mut snappy_buffer = Vec::new(); + + trace!(target: "snapshot", "restoring state"); + for state_chunk_hash in manifest.state_hashes.iter() { + trace!(target: "snapshot", "state chunk hash: {}", state_chunk_hash); + let chunk = reader.chunk(*state_chunk_hash).unwrap(); + let len = snappy::decompress_into(&chunk, &mut snappy_buffer).unwrap(); + state.feed(&snappy_buffer[..len], &flag)?; + } + + trace!(target: "snapshot", "restoring secondary"); + for chunk_hash in manifest.block_hashes.iter() { + let chunk = reader.chunk(*chunk_hash).unwrap(); + let len = snappy::decompress_into(&chunk, &mut snappy_buffer).unwrap(); + secondary.feed(&snappy_buffer[..len], engine, &flag)?; + } + + let jdb = state.finalize(manifest.block_number, manifest.block_hash)?; + let state_db = StateDB::new(jdb, 0); + + secondary.finalize(state_db, engine) +} diff --git a/ethcore/src/snapshot/tests/mod.rs b/ethcore/src/snapshot/tests/mod.rs index 6530bb42a..6e9398356 100644 --- a/ethcore/src/snapshot/tests/mod.rs +++ b/ethcore/src/snapshot/tests/mod.rs @@ -16,7 +16,8 @@ //! Snapshot tests. -mod blocks; +mod proof_of_work; +mod proof_of_authority; mod state; mod service; diff --git a/ethcore/src/snapshot/tests/proof_of_authority.rs b/ethcore/src/snapshot/tests/proof_of_authority.rs new file mode 100644 index 000000000..d82b6f3ae --- /dev/null +++ b/ethcore/src/snapshot/tests/proof_of_authority.rs @@ -0,0 +1,249 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! PoA block chunker and rebuilder tests. + +use std::cell::RefCell; +use std::sync::Arc; +use std::str::FromStr; + +use account_provider::AccountProvider; +use client::{Client, BlockChainClient, MiningBlockChainClient}; +use ethkey::Secret; +use engines::Seal; +use futures::Future; +use miner::MinerService; +use native_contracts::test_contracts::ValidatorSet; +use snapshot::tests::helpers as snapshot_helpers; +use spec::Spec; +use tests::helpers; +use transaction::{Transaction, Action, SignedTransaction}; + +use util::{Address, Hashable}; +use util::kvdb; + +const PASS: &'static str = ""; +const TRANSITION_BLOCK_1: usize = 2; // block at which the contract becomes activated. +const TRANSITION_BLOCK_2: usize = 6; // block at which the second contract activates. + +macro_rules! secret { + ($e: expr) => { Secret::from_slice(&$e.sha3()).expect(format!("sha3({}) not valid secret.", $e).as_str()) } +} + +lazy_static! { + // contract addresses. + static ref CONTRACT_ADDR_1: Address = Address::from_str("0000000000000000000000000000000000000005").unwrap(); + static ref CONTRACT_ADDR_2: Address = Address::from_str("0000000000000000000000000000000000000006").unwrap(); + // secret: `sha3(1)`, and initial validator. + static ref RICH_ADDR: Address = Address::from_str("7d577a597b2742b498cb5cf0c26cdcd726d39e6e").unwrap(); + // rich address' secret. + static ref RICH_SECRET: Secret = secret!("1"); +} + + +/// Contract code used here: https://gist.github.com/rphmeier/2de14fd365a969e3a9e10d77eb9a1e37 +/// Account with secrets "1".sha3() is initially the validator. +/// Transitions to the contract at block 2, initially same validator set. +/// Create a new Spec with BasicAuthority which uses a contract at address 5 to determine the current validators using `getValidators`. +/// `native_contracts::test_contracts::ValidatorSet` provides a native wrapper for the ABi. +fn spec_fixed_to_contract() -> Spec { + let data = include_bytes!("test_validator_contract.json"); + Spec::load(&data[..]).unwrap() +} + +// creates an account provider, filling it with accounts from all the given +// secrets and password `PASS`. +// returns addresses corresponding to secrets. +fn make_accounts(secrets: &[Secret]) -> (Arc, Vec
) { + let provider = AccountProvider::transient_provider(); + + let addrs = secrets.iter() + .cloned() + .map(|s| provider.insert_account(s, PASS).unwrap()) + .collect(); + + (Arc::new(provider), addrs) +} + +// validator transition. block number and new validators. must be after `TRANSITION_BLOCK`. +// all addresses in the set must be in the account provider. +enum Transition { + // manual transition via transaction + Manual(usize, Vec
), + // implicit transition via multi-set + Implicit(usize, Vec
), +} + +// create a chain with the given transitions and some blocks beyond that transition. +fn make_chain(accounts: Arc, blocks_beyond: usize, transitions: Vec) -> Arc { + let client = helpers::generate_dummy_client_with_spec_and_accounts( + spec_fixed_to_contract, Some(accounts.clone())); + + let mut cur_signers = vec![*RICH_ADDR]; + { + let engine = client.engine(); + engine.register_client(Arc::downgrade(&client)); + } + + { + // push a block with given number, signed by one of the signers, with given transactions. + let push_block = |signers: &[Address], n, txs: Vec| { + use block::IsBlock; + + let engine = client.engine(); + let idx = n as usize % signers.len(); + engine.set_signer(accounts.clone(), signers[idx], PASS.to_owned()); + + trace!(target: "snapshot", "Pushing block #{}, {} txs, author={}", n, txs.len(), signers[idx]); + + let mut open_block = client.prepare_open_block(signers[idx], (5_000_000.into(), 5_000_000.into()), Vec::new()); + for tx in txs { + open_block.push_transaction(tx, None).unwrap(); + } + let block = open_block.close_and_lock(); + let seal = match engine.generate_seal(block.block()) { + Seal::Regular(seal) => seal, + _ => panic!("Unable to generate seal for dummy chain block #{}", n), + }; + let block = block.seal(&*engine, seal).unwrap(); + + client.import_sealed_block(block).unwrap(); + }; + + // execution callback for native contract: push transaction to be sealed. + let nonce = RefCell::new(client.engine().account_start_nonce()); + let exec = |addr, data| { + let mut nonce = nonce.borrow_mut(); + let transaction = Transaction { + nonce: *nonce, + gas_price: 0.into(), + gas: 1_000_000.into(), + action: Action::Call(addr), + value: 0.into(), + data: data, + }.sign(&*RICH_SECRET, client.signing_network_id()); + + client.miner().import_own_transaction(&*client, transaction.into()).unwrap(); + + *nonce = *nonce + 1.into(); + Ok(Vec::new()) + }; + + let contract_1 = ValidatorSet::new(*CONTRACT_ADDR_1); + let contract_2 = ValidatorSet::new(*CONTRACT_ADDR_2); + + // apply all transitions. + for transition in transitions { + let (num, manual, new_set) = match transition { + Transition::Manual(num, new_set) => (num, true, new_set), + Transition::Implicit(num, new_set) => (num, false, new_set), + }; + + if num < TRANSITION_BLOCK_1 { + panic!("Bad test: issued epoch change before transition to contract."); + } + + for number in client.chain_info().best_block_number + 1 .. num as u64 { + push_block(&cur_signers, number, vec![]); + } + + let pending = if manual { + trace!(target: "snapshot", "applying set transition at block #{}", num); + let contract = match num >= TRANSITION_BLOCK_2 { + true => &contract_2, + false => &contract_1, + }; + + contract.set_validators(&exec, new_set.clone()).wait().unwrap(); + client.ready_transactions() + .into_iter() + .map(|x| x.transaction) + .collect() + } else { + Vec::new() + }; + + push_block(&cur_signers, num as u64, pending); + cur_signers = new_set; + } + + // make blocks beyond. + for number in (client.chain_info().best_block_number..).take(blocks_beyond) { + push_block(&cur_signers, number + 1, vec![]); + } + } + + client +} + +#[test] +fn fixed_to_contract() { + let (provider, addrs) = make_accounts(&[ + RICH_SECRET.clone(), + secret!("foo"), + secret!("bar"), + secret!("test"), + secret!("signer"), + secret!("crypto"), + secret!("wizard"), + secret!("dog42"), + ]); + + assert!(provider.has_account(*RICH_ADDR).unwrap()); + + let client = make_chain(provider, 1, vec![ + Transition::Manual(3, vec![addrs[2], addrs[3], addrs[5], addrs[7]]), + Transition::Manual(4, vec![addrs[0], addrs[1], addrs[4], addrs[6]]), + ]); + + assert_eq!(client.chain_info().best_block_number, 5); + let reader = snapshot_helpers::snap(&*client); + + let new_db = kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)); + let spec = spec_fixed_to_contract(); + + snapshot_helpers::restore(Arc::new(new_db), &*spec.engine, &**reader, &spec.genesis_block()).unwrap(); +} + +#[test] +fn fixed_to_contract_to_contract() { + let (provider, addrs) = make_accounts(&[ + RICH_SECRET.clone(), + secret!("foo"), + secret!("bar"), + secret!("test"), + secret!("signer"), + secret!("crypto"), + secret!("wizard"), + secret!("dog42"), + ]); + + assert!(provider.has_account(*RICH_ADDR).unwrap()); + + let client = make_chain(provider, 2, vec![ + Transition::Manual(3, vec![addrs[2], addrs[3], addrs[5], addrs[7]]), + Transition::Manual(4, vec![addrs[0], addrs[1], addrs[4], addrs[6]]), + Transition::Implicit(5, vec![addrs[0]]), + Transition::Manual(8, vec![addrs[2], addrs[4], addrs[6], addrs[7]]), + ]); + + assert_eq!(client.chain_info().best_block_number, 10); + let reader = snapshot_helpers::snap(&*client); + let new_db = kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)); + let spec = spec_fixed_to_contract(); + + snapshot_helpers::restore(Arc::new(new_db), &*spec.engine, &**reader, &spec.genesis_block()).unwrap(); +} diff --git a/ethcore/src/snapshot/tests/blocks.rs b/ethcore/src/snapshot/tests/proof_of_work.rs similarity index 94% rename from ethcore/src/snapshot/tests/blocks.rs rename to ethcore/src/snapshot/tests/proof_of_work.rs index 2769644e8..e3b65aec4 100644 --- a/ethcore/src/snapshot/tests/blocks.rs +++ b/ethcore/src/snapshot/tests/proof_of_work.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Block chunker and rebuilder tests. +//! PoW block chunker and rebuilder tests. use devtools::RandomTempPath; use error::Error; @@ -23,8 +23,10 @@ use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer}; use blockchain::BlockChain; use snapshot::{chunk_secondary, Error as SnapshotError, Progress, SnapshotComponents}; use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}; +use state_db::StateDB; use util::{Mutex, snappy}; +use util::journaldb::{self, Algorithm}; use util::kvdb::{self, KeyValueDB, DBTransaction}; use std::sync::Arc; @@ -81,6 +83,7 @@ fn chunk_and_restore(amount: u64) { // restore it. let new_db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))); let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone()); + let new_state = StateDB::new(journaldb::new(new_db.clone(), Algorithm::Archive, None), 0); let mut rebuilder = SNAPSHOT_MODE.rebuilder(new_chain, new_db.clone(), &manifest).unwrap(); let reader = PackedReader::new(&snapshot_path).unwrap().unwrap(); @@ -91,7 +94,7 @@ fn chunk_and_restore(amount: u64) { rebuilder.feed(&chunk, engine.as_ref(), &flag).unwrap(); } - rebuilder.finalize().unwrap(); + rebuilder.finalize(new_state, engine.as_ref()).unwrap(); drop(rebuilder); // and test it. diff --git a/ethcore/src/snapshot/tests/test_validator_contract.json b/ethcore/src/snapshot/tests/test_validator_contract.json new file mode 100644 index 000000000..b422ebde3 --- /dev/null +++ b/ethcore/src/snapshot/tests/test_validator_contract.json @@ -0,0 +1,51 @@ +{ + "name": "TestValidatorContract", + "engine": { + "basicAuthority": { + "params": { + "gasLimitBoundDivisor": "0x0400", + "durationLimit": "0x0d", + "validators": { + "multi": { + "0": { "list": ["0x7d577a597b2742b498cb5cf0c26cdcd726d39e6e"] }, + "2": { "contract": "0x0000000000000000000000000000000000000005" }, + "6": { "contract": "0x0000000000000000000000000000000000000006" } + } + } + } + } + }, + "params": { + "accountStartNonce": "0x0", + "maximumExtraDataSize": "0x20", + "minGasLimit": "0x1388", + "networkID" : "0x69" + }, + "genesis": { + "seal": { + "generic": "0xc180" + }, + "difficulty": "0x20000", + "author": "0x0000000000000000000000000000000000000000", + "timestamp": "0x00", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "0x", + "gasLimit": "0x2fefd8" + }, + "accounts": { + "0000000000000000000000000000000000000001": { "balance": "1", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, + "0000000000000000000000000000000000000002": { "balance": "1", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, + "0000000000000000000000000000000000000003": { "balance": "1", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, + "0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, + "0000000000000000000000000000000000000005": { + "balance": "1", + "constructor": "6060604052604060405190810160405280737d577a597b2742b498cb5cf0c26cdcd726d39e6e73ffffffffffffffffffffffffffffffffffffffff1681526020017382a978b3f5962a5b0957d9ee9eef472ee55b42f173ffffffffffffffffffffffffffffffffffffffff16815250600290600261007e929190610096565b50341561008757fe5b5b60006001819055505b610163565b82805482825590600052602060002090810192821561010f579160200282015b8281111561010e5782518260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550916020019190600101906100b6565b5b50905061011c9190610120565b5090565b61016091905b8082111561015c57600081816101000a81549073ffffffffffffffffffffffffffffffffffffffff021916905550600101610126565b5090565b90565b61045d806101726000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063303e98e5146100675780639300c9261461008d578063b7ab4db5146100e4578063bfc708a014610159578063fd6e1b501461018f575bfe5b341561006f57fe5b6100776101c5565b6040518082815260200191505060405180910390f35b341561009557fe5b6100e26004808035906020019082018035906020019080806020026020016040519081016040528093929190818152602001838360200280828437820191505050505050919050506101d0565b005b34156100ec57fe5b6100f46102b3565b6040518080602001828103825283818151815260200191508051906020019060200280838360008314610146575b80518252602083111561014657602082019150602081019050602083039250610122565b5050509050019250505060405180910390f35b341561016157fe5b61018d600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610348565b005b341561019757fe5b6101c3600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190505061034c565b005b600060015490505b90565b600081600290805190602001906101e8929190610350565b50600143034090506000546000191681600019161415156102ae578060008160001916905550600160016000828254019250508190555060015481600019167f47e91f47ccfdcb578564e1af55da55c5e5d33403372fe68e4fed3dfd385764a184604051808060200182810382528381815181526020019150805190602001906020028083836000831461029b575b80518252602083111561029b57602082019150602081019050602083039250610277565b5050509050019250505060405180910390a35b5b5050565b6102bb6103da565b600280548060200260200160405190810160405280929190818152602001828054801561033d57602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190600101908083116102f3575b505050505090505b90565b5b50565b5b50565b8280548282559060005260206000209081019282156103c9579160200282015b828111156103c85782518260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555091602001919060010190610370565b5b5090506103d691906103ee565b5090565b602060405190810160405280600081525090565b61042e91905b8082111561042a57600081816101000a81549073ffffffffffffffffffffffffffffffffffffffff0219169055506001016103f4565b5090565b905600a165627a7a723058205c9ed1e1da2b93682907ac47377a662b21a5f9d89c4b21be40b098bdb00254360029" + }, + "0000000000000000000000000000000000000006": { + "balance": "1", + "constructor": "6060604052602060405190810160405280737d577a597b2742b498cb5cf0c26cdcd726d39e6e73ffffffffffffffffffffffffffffffffffffffff16815250600290600161004e929190610066565b50341561005757fe5b5b60006001819055505b610133565b8280548282559060005260206000209081019282156100df579160200282015b828111156100de5782518260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555091602001919060010190610086565b5b5090506100ec91906100f0565b5090565b61013091905b8082111561012c57600081816101000a81549073ffffffffffffffffffffffffffffffffffffffff0219169055506001016100f6565b5090565b90565b61045d806101426000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063303e98e5146100675780639300c9261461008d578063b7ab4db5146100e4578063bfc708a014610159578063fd6e1b501461018f575bfe5b341561006f57fe5b6100776101c5565b6040518082815260200191505060405180910390f35b341561009557fe5b6100e26004808035906020019082018035906020019080806020026020016040519081016040528093929190818152602001838360200280828437820191505050505050919050506101d0565b005b34156100ec57fe5b6100f46102b3565b6040518080602001828103825283818151815260200191508051906020019060200280838360008314610146575b80518252602083111561014657602082019150602081019050602083039250610122565b5050509050019250505060405180910390f35b341561016157fe5b61018d600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610348565b005b341561019757fe5b6101c3600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190505061034c565b005b600060015490505b90565b600081600290805190602001906101e8929190610350565b50600143034090506000546000191681600019161415156102ae578060008160001916905550600160016000828254019250508190555060015481600019167f47e91f47ccfdcb578564e1af55da55c5e5d33403372fe68e4fed3dfd385764a184604051808060200182810382528381815181526020019150805190602001906020028083836000831461029b575b80518252602083111561029b57602082019150602081019050602083039250610277565b5050509050019250505060405180910390a35b5b5050565b6102bb6103da565b600280548060200260200160405190810160405280929190818152602001828054801561033d57602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190600101908083116102f3575b505050505090505b90565b5b50565b5b50565b8280548282559060005260206000209081019282156103c9579160200282015b828111156103c85782518260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555091602001919060010190610370565b5b5090506103d691906103ee565b5090565b602060405190810160405280600081525090565b61042e91905b8082111561042a57600081816101000a81549073ffffffffffffffffffffffffffffffffffffffff0219169055506001016103f4565b5090565b905600a165627a7a723058203070810251dcb89c9838d957eb3dbeef357bef0902e0245e3dc3849b6143c3960029" + }, + "0x7d577a597b2742b498cb5cf0c26cdcd726d39e6e": { "balance": "1606938044258990275541962092341162602522202993782792835301376" }, + "0x82a978b3f5962a5b0957d9ee9eef472ee55b42f1": { "balance": "1606938044258990275541962092341162602522202993782792835301376" } + } +} diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index e710559df..90055f275 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -189,7 +189,7 @@ pub fn check_proof( Err(_) => return ProvedExecution::BadProof, }; - match state.execute(env_info, engine, transaction, false) { + match state.execute(env_info, engine, transaction, false, true) { Ok(executed) => ProvedExecution::Complete(executed), Err(ExecutionError::Internal(_)) => ProvedExecution::BadProof, Err(e) => ProvedExecution::Failed(e), @@ -604,7 +604,7 @@ impl State { pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> ApplyResult { // let old = self.to_pod(); - let e = self.execute(env_info, engine, t, tracing)?; + let e = self.execute(env_info, engine, t, tracing, false)?; // trace!("Applied transaction. Diff:\n{}\n", state_diff::diff_pod(&old, &self.to_pod())); let state_root = if env_info.number < engine.params().eip98_transition || env_info.number < engine.params().validate_receipts_transition { self.commit()?; @@ -617,12 +617,22 @@ impl State { Ok(ApplyOutcome{receipt: receipt, trace: e.trace}) } - // Execute a given transaction. - fn execute(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> Result { + // Execute a given transaction without committing changes. + // + // `virt` signals that we are executing outside of a block set and restrictions like + // gas limits and gas costs should be lifted. + fn execute(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool, virt: bool) + -> Result + { let options = TransactOptions { tracing: tracing, vm_tracing: false, check_nonce: true }; let vm_factory = self.factories.vm.clone(); - Executive::new(self, env_info, engine, &vm_factory).transact(t, options) + let mut e = Executive::new(self, env_info, engine, &vm_factory); + + match virt { + true => e.transact_virtual(t, options), + false => e.transact(t, options), + } } diff --git a/rpc/src/v1/tests/helpers/snapshot_service.rs b/rpc/src/v1/tests/helpers/snapshot_service.rs index f03eb3bc3..efecbcb9b 100644 --- a/rpc/src/v1/tests/helpers/snapshot_service.rs +++ b/rpc/src/v1/tests/helpers/snapshot_service.rs @@ -41,6 +41,7 @@ impl TestSnapshotService { impl SnapshotService for TestSnapshotService { fn manifest(&self) -> Option { None } + fn min_supported_version(&self) -> Option { None } fn chunk(&self, _hash: H256) -> Option { None } fn status(&self) -> RestorationStatus { self.status.lock().clone() } fn begin_restore(&self, _manifest: ManifestData) { } diff --git a/sync/src/chain.rs b/sync/src/chain.rs index be2aeada7..f0f98d3dd 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -158,8 +158,6 @@ pub const SNAPSHOT_SYNC_PACKET_COUNT: u8 = 0x16; const MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD: usize = 3; -const MIN_SUPPORTED_SNAPSHOT_MANIFEST_VERSION: u64 = 1; - const WAIT_PEERS_TIMEOUT_SEC: u64 = 5; const STATUS_TIMEOUT_SEC: u64 = 5; const HEADERS_TIMEOUT_SEC: u64 = 15; @@ -504,7 +502,7 @@ impl ChainSync { } fn maybe_start_snapshot_sync(&mut self, io: &mut SyncIo) { - if !self.enable_warp_sync { + if !self.enable_warp_sync || io.snapshot_service().min_supported_version().is_none() { return; } if self.state != SyncState::WaitingPeers && self.state != SyncState::Blocks && self.state != SyncState::Waiting { @@ -1042,7 +1040,11 @@ impl ChainSync { } Ok(manifest) => manifest, }; - if manifest.version < MIN_SUPPORTED_SNAPSHOT_MANIFEST_VERSION { + + let is_supported_version = io.snapshot_service().min_supported_version() + .map_or(false, |v| manifest.version >= v); + + if !is_supported_version { trace!(target: "sync", "{}: Snapshot manifest version too low: {}", peer_id, manifest.version); io.disable_peer(peer_id); self.continue_sync(io); diff --git a/sync/src/tests/snapshot.rs b/sync/src/tests/snapshot.rs index 0f97ec913..995d7a056 100644 --- a/sync/src/tests/snapshot.rs +++ b/sync/src/tests/snapshot.rs @@ -71,6 +71,10 @@ impl SnapshotService for TestSnapshotService { self.manifest.as_ref().cloned() } + fn min_supported_version(&self) -> Option { + Some(1) + } + fn chunk(&self, hash: H256) -> Option { self.chunks.get(&hash).cloned() }