diff --git a/Cargo.lock b/Cargo.lock index 1d81b0474..b9cbff049 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -513,7 +513,9 @@ dependencies = [ "ethcore-ipc-codegen 1.6.0", "ethcore-network 1.6.0", "ethcore-util 1.6.0", + "futures 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", "smallvec 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index 9372f522f..534b0e5c1 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -20,6 +20,8 @@ ethcore-ipc = { path = "../../ipc/rpc", optional = true } rlp = { path = "../../util/rlp" } time = "0.1" smallvec = "0.3.1" +futures = "0.1" +rand = "0.3" [features] default = [] diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index 5cdc3addc..0e650f992 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -34,6 +34,7 @@ pub mod client; pub mod net; +pub mod on_demand; #[cfg(not(feature = "ipc"))] pub mod provider; @@ -64,6 +65,8 @@ extern crate ethcore_io as io; extern crate rlp; extern crate smallvec; extern crate time; +extern crate futures; +extern crate rand; #[cfg(feature = "ipc")] extern crate ethcore_ipc as ipc; diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 03a19e14b..18425152c 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -30,6 +30,7 @@ use util::{Bytes, Mutex, RwLock, U256}; use time::{Duration, SteadyTime}; use std::collections::HashMap; +use std::fmt; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -123,6 +124,12 @@ mod timeout { #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct ReqId(usize); +impl fmt::Display for ReqId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Request #{}", self.0) + } +} + // A pending peer: one we've sent our status to but // may not have received one for. struct PendingPeer { @@ -186,12 +193,12 @@ pub trait Handler: Send + Sync { fn on_block_headers(&self, _ctx: &EventContext, _req_id: ReqId, _headers: &[Bytes]) { } /// Called when a peer responds with block receipts. fn on_receipts(&self, _ctx: &EventContext, _req_id: ReqId, _receipts: &[Vec]) { } - /// Called when a peer responds with state proofs. Each proof is a series of trie + /// Called when a peer responds with state proofs. Each proof should be a series of trie /// nodes in ascending order by distance from the root. fn on_state_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[Vec]) { } /// Called when a peer responds with contract code. fn on_code(&self, _ctx: &EventContext, _req_id: ReqId, _codes: &[Bytes]) { } - /// Called when a peer responds with header proofs. Each proof is a block header coupled + /// Called when a peer responds with header proofs. Each proof should be a block header coupled /// with a series of trie nodes is ascending order by distance from the root. fn on_header_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[(Bytes, Vec)]) { } /// Called to "tick" the handler periodically. diff --git a/ethcore/light/src/net/status.rs b/ethcore/light/src/net/status.rs index d058bc2f2..a11ed5b11 100644 --- a/ethcore/light/src/net/status.rs +++ b/ethcore/light/src/net/status.rs @@ -158,6 +158,16 @@ pub struct Status { pub last_head: Option<(H256, u64)>, } +impl Status { + /// Update the status from an announcement. + pub fn update_from(&mut self, announcement: &Announcement) { + self.last_head = Some((self.head_hash, announcement.reorg_depth)); + self.head_td = announcement.head_td; + self.head_hash = announcement.head_hash; + self.head_num = announcement.head_num; + } +} + /// Peer capabilities. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Capabilities { diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs new file mode 100644 index 000000000..aadb43c0e --- /dev/null +++ b/ethcore/light/src/on_demand/mod.rs @@ -0,0 +1,520 @@ +// Copyright 2015, 2016 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! On-demand chain requests over LES. This is a major building block for RPCs. +//! The request service is implemented using Futures. Higher level request handlers +//! will take the raw data received here and extract meaningful results from it. + +use std::collections::HashMap; + +use ethcore::basic_account::BasicAccount; +use ethcore::encoded; +use ethcore::receipt::Receipt; + +use futures::{Async, Poll, Future}; +use futures::sync::oneshot; +use network::PeerId; + +use net::{Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; +use util::{Bytes, RwLock}; +use types::les_request::{self as les_request, Request as LesRequest}; + +pub mod request; + +/// Errors which can occur while trying to fulfill a request. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum Error { + /// Request was canceled. + Canceled, + /// No suitable peers available to serve the request. + NoPeersAvailable, + /// Request timed out. + TimedOut, +} + +impl From for Error { + fn from(_: oneshot::Canceled) -> Self { + Error::Canceled + } +} + +/// Future which awaits a response to an on-demand request. +pub struct Response(oneshot::Receiver>); + +impl Future for Response { + type Item = T; + type Error = Error; + + fn poll(&mut self) -> Poll { + match self.0.poll().map_err(Into::into) { + Ok(Async::Ready(val)) => val.map(Async::Ready), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err(e) => Err(e), + } + } +} + +type Sender = oneshot::Sender>; + +// relevant peer info. +struct Peer { + status: Status, + capabilities: Capabilities, +} + +// Attempted request info and sender to put received value. +enum Pending { + HeaderByNumber(request::HeaderByNumber, Sender), // num + CHT root + HeaderByHash(request::HeaderByHash, Sender), + Block(request::Body, Sender), + BlockReceipts(request::BlockReceipts, Sender>), + Account(request::Account, Sender), +} + +/// On demand request service. See module docs for more details. +/// Accumulates info about all peers' capabilities and dispatches +/// requests to them accordingly. +pub struct OnDemand { + peers: RwLock>, + pending_requests: RwLock>, +} + +impl Default for OnDemand { + fn default() -> Self { + OnDemand { + peers: RwLock::new(HashMap::new()), + pending_requests: RwLock::new(HashMap::new()), + } + } +} + +impl OnDemand { + /// Request a header by block number and CHT root hash. + pub fn header_by_number(&self, ctx: &BasicContext, req: request::HeaderByNumber) -> Response { + let (sender, receiver) = oneshot::channel(); + self.dispatch_header_by_number(ctx, req, sender); + Response(receiver) + } + + // dispatch the request, completing the request if no peers available. + fn dispatch_header_by_number(&self, ctx: &BasicContext, req: request::HeaderByNumber, sender: Sender) { + let num = req.num; + let cht_num = ::client::cht::block_to_cht_number(req.num); + let les_req = LesRequest::HeaderProofs(les_request::HeaderProofs { + requests: vec![les_request::HeaderProof { + cht_number: cht_num, + block_number: req.num, + from_level: 0, + }], + }); + + // we're looking for a peer with serveHeaders who's far enough along in the + // chain. + for (id, peer) in self.peers.read().iter() { + if peer.capabilities.serve_headers && peer.status.head_num >= num { + match ctx.request_from(*id, les_req.clone()) { + Ok(req_id) => { + trace!(target: "on_demand", "Assigning request to peer {}", id); + self.pending_requests.write().insert( + req_id, + Pending::HeaderByNumber(req, sender) + ); + return + }, + Err(e) => + trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), + } + } + } + + // TODO: retrying + trace!(target: "on_demand", "No suitable peer for request"); + sender.complete(Err(Error::NoPeersAvailable)); + } + + /// Request a header by hash. This is less accurate than by-number because we don't know + /// where in the chain this header lies, and therefore can't find a peer who is supposed to have + /// it as easily. + pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> Response { + let (sender, receiver) = oneshot::channel(); + self.dispatch_header_by_hash(ctx, req, sender); + Response(receiver) + } + + fn dispatch_header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash, sender: Sender) { + let les_req = LesRequest::Headers(les_request::Headers { + start: req.0.into(), + max: 1, + skip: 0, + reverse: false, + }); + + // all we've got is a hash, so we'll just guess at peers who might have + // it randomly. + let mut potential_peers = self.peers.read().iter() + .filter(|&(_, peer)| peer.capabilities.serve_headers) + .map(|(id, _)| *id) + .collect::>(); + + let mut rng = ::rand::thread_rng(); + + ::rand::Rng::shuffle(&mut rng, &mut potential_peers); + + for id in potential_peers { + match ctx.request_from(id, les_req.clone()) { + Ok(req_id) => { + trace!(target: "on_demand", "Assigning request to peer {}", id); + self.pending_requests.write().insert( + req_id, + Pending::HeaderByHash(req, sender), + ); + return + } + Err(e) => + trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), + } + } + + // TODO: retrying + trace!(target: "on_demand", "No suitable peer for request"); + sender.complete(Err(Error::NoPeersAvailable)); + } + + /// Request a block, given its header. Block bodies are requestable by hash only, + /// and the header is required anyway to verify and complete the block body + /// -- this just doesn't obscure the network query. + pub fn block(&self, ctx: &BasicContext, req: request::Body) -> Response { + let (sender, receiver) = oneshot::channel(); + self.dispatch_block(ctx, req, sender); + Response(receiver) + } + + fn dispatch_block(&self, ctx: &BasicContext, req: request::Body, sender: Sender) { + let num = req.header.number(); + let les_req = LesRequest::Bodies(les_request::Bodies { + block_hashes: vec![req.hash], + }); + + // we're looking for a peer with serveChainSince(num) + for (id, peer) in self.peers.read().iter() { + if peer.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= num) { + match ctx.request_from(*id, les_req.clone()) { + Ok(req_id) => { + trace!(target: "on_demand", "Assigning request to peer {}", id); + self.pending_requests.write().insert( + req_id, + Pending::Block(req, sender) + ); + return + } + Err(e) => + trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), + } + } + } + + // TODO: retrying + trace!(target: "on_demand", "No suitable peer for request"); + sender.complete(Err(Error::NoPeersAvailable)); + } + + /// Request the receipts for a block. The header serves two purposes: + /// provide the block hash to fetch receipts for, and for verification of the receipts root. + pub fn block_receipts(&self, ctx: &BasicContext, req: request::BlockReceipts) -> Response> { + let (sender, receiver) = oneshot::channel(); + self.dispatch_block_receipts(ctx, req, sender); + Response(receiver) + } + + fn dispatch_block_receipts(&self, ctx: &BasicContext, req: request::BlockReceipts, sender: Sender>) { + let num = req.0.number(); + let les_req = LesRequest::Receipts(les_request::Receipts { + block_hashes: vec![req.0.hash()], + }); + + // we're looking for a peer with serveChainSince(num) + for (id, peer) in self.peers.read().iter() { + if peer.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= num) { + match ctx.request_from(*id, les_req.clone()) { + Ok(req_id) => { + trace!(target: "on_demand", "Assigning request to peer {}", id); + self.pending_requests.write().insert( + req_id, + Pending::BlockReceipts(req, sender) + ); + return + } + Err(e) => + trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), + } + } + } + + // TODO: retrying + trace!(target: "on_demand", "No suitable peer for request"); + sender.complete(Err(Error::NoPeersAvailable)); + } + + /// Request an account by address and block header -- which gives a hash to query and a state root + /// to verify against. + pub fn account(&self, ctx: &BasicContext, req: request::Account) -> Response { + let (sender, receiver) = oneshot::channel(); + self.dispatch_account(ctx, req, sender); + Response(receiver) + } + + fn dispatch_account(&self, ctx: &BasicContext, req: request::Account, sender: Sender) { + let num = req.header.number(); + let les_req = LesRequest::StateProofs(les_request::StateProofs { + requests: vec![les_request::StateProof { + block: req.header.hash(), + key1: ::util::Hashable::sha3(&req.address), + key2: None, + from_level: 0, + }], + }); + + // we're looking for a peer with serveStateSince(num) + for (id, peer) in self.peers.read().iter() { + if peer.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= num) { + match ctx.request_from(*id, les_req.clone()) { + Ok(req_id) => { + trace!(target: "on_demand", "Assigning request to peer {}", id); + self.pending_requests.write().insert( + req_id, + Pending::Account(req, sender) + ); + return + } + Err(e) => + trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), + } + } + } + + // TODO: retrying + trace!(target: "on_demand", "No suitable peer for request"); + sender.complete(Err(Error::NoPeersAvailable)); + } +} + +impl Handler for OnDemand { + fn on_connect(&self, ctx: &EventContext, status: &Status, capabilities: &Capabilities) { + self.peers.write().insert(ctx.peer(), Peer { status: status.clone(), capabilities: capabilities.clone() }); + } + + fn on_disconnect(&self, ctx: &EventContext, unfulfilled: &[ReqId]) { + self.peers.write().remove(&ctx.peer()); + let ctx = ctx.as_basic(); + + for unfulfilled in unfulfilled { + if let Some(pending) = self.pending_requests.write().remove(unfulfilled) { + trace!(target: "on_demand", "Attempting to reassign dropped request"); + match pending { + Pending::HeaderByNumber(req, sender) + => self.dispatch_header_by_number(ctx, req, sender), + Pending::HeaderByHash(req, sender) + => self.dispatch_header_by_hash(ctx, req, sender), + Pending::Block(req, sender) + => self.dispatch_block(ctx, req, sender), + Pending::BlockReceipts(req, sender) + => self.dispatch_block_receipts(ctx, req, sender), + Pending::Account(req, sender) + => self.dispatch_account(ctx, req, sender), + } + } + } + } + + fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { + let mut peers = self.peers.write(); + if let Some(ref mut peer) = peers.get_mut(&ctx.peer()) { + peer.status.update_from(&announcement); + peer.capabilities.update_from(&announcement); + } + } + + fn on_header_proofs(&self, ctx: &EventContext, req_id: ReqId, proofs: &[(Bytes, Vec)]) { + let peer = ctx.peer(); + let req = match self.pending_requests.write().remove(&req_id) { + Some(req) => req, + None => return, + }; + + match req { + Pending::HeaderByNumber(req, sender) => { + if let Some(&(ref header, ref proof)) = proofs.get(0) { + match req.check_response(header, proof) { + Ok(header) => { + sender.complete(Ok(header)); + return + } + Err(e) => { + warn!("Error handling response for header request: {:?}", e); + ctx.disable_peer(peer); + } + } + } + + self.dispatch_header_by_number(ctx.as_basic(), req, sender); + } + _ => panic!("Only header by number request fetches header proofs; qed"), + } + } + + fn on_block_headers(&self, ctx: &EventContext, req_id: ReqId, headers: &[Bytes]) { + let peer = ctx.peer(); + let req = match self.pending_requests.write().remove(&req_id) { + Some(req) => req, + None => return, + }; + + match req { + Pending::HeaderByHash(req, sender) => { + if let Some(ref header) = headers.get(0) { + match req.check_response(header) { + Ok(header) => { + sender.complete(Ok(header)); + return + } + Err(e) => { + warn!("Error handling response for header request: {:?}", e); + ctx.disable_peer(peer); + } + } + } + + self.dispatch_header_by_hash(ctx.as_basic(), req, sender); + } + _ => panic!("Only header by hash request fetches headers; qed"), + } + } + + fn on_block_bodies(&self, ctx: &EventContext, req_id: ReqId, bodies: &[Bytes]) { + let peer = ctx.peer(); + let req = match self.pending_requests.write().remove(&req_id) { + Some(req) => req, + None => return, + }; + + match req { + Pending::Block(req, sender) => { + if let Some(ref block) = bodies.get(0) { + match req.check_response(block) { + Ok(block) => { + sender.complete(Ok(block)); + return + } + Err(e) => { + warn!("Error handling response for block request: {:?}", e); + ctx.disable_peer(peer); + } + } + } + + self.dispatch_block(ctx.as_basic(), req, sender); + } + _ => panic!("Only block request fetches bodies; qed"), + } + } + + fn on_receipts(&self, ctx: &EventContext, req_id: ReqId, receipts: &[Vec]) { + let peer = ctx.peer(); + let req = match self.pending_requests.write().remove(&req_id) { + Some(req) => req, + None => return, + }; + + match req { + Pending::BlockReceipts(req, sender) => { + if let Some(ref receipts) = receipts.get(0) { + match req.check_response(receipts) { + Ok(receipts) => { + sender.complete(Ok(receipts)); + return + } + Err(e) => { + warn!("Error handling response for receipts request: {:?}", e); + ctx.disable_peer(peer); + } + } + } + + self.dispatch_block_receipts(ctx.as_basic(), req, sender); + } + _ => panic!("Only receipts request fetches receipts; qed"), + } + } + + fn on_state_proofs(&self, ctx: &EventContext, req_id: ReqId, proofs: &[Vec]) { + let peer = ctx.peer(); + let req = match self.pending_requests.write().remove(&req_id) { + Some(req) => req, + None => return, + }; + + match req { + Pending::Account(req, sender) => { + if let Some(ref proof) = proofs.get(0) { + match req.check_response(proof) { + Ok(proof) => { + sender.complete(Ok(proof)); + return + } + Err(e) => { + warn!("Error handling response for state request: {:?}", e); + ctx.disable_peer(peer); + } + } + } + + self.dispatch_account(ctx.as_basic(), req, sender); + } + _ => panic!("Only account request fetches state proof; qed"), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use net::{Announcement, BasicContext, ReqId, Error as LesError}; + use request::{Request as LesRequest, Kind as LesRequestKind}; + use network::{PeerId, NodeId}; + use futures::Future; + use util::H256; + + struct FakeContext; + + impl BasicContext for FakeContext { + fn persistent_peer_id(&self, _: PeerId) -> Option { None } + fn request_from(&self, _: PeerId, _: LesRequest) -> Result { + unimplemented!() + } + fn make_announcement(&self, _: Announcement) { } + fn max_requests(&self, _: PeerId, _: LesRequestKind) -> usize { 0 } + fn disconnect_peer(&self, _: PeerId) { } + fn disable_peer(&self, _: PeerId) { } + } + + #[test] + fn no_peers() { + let on_demand = OnDemand::default(); + let result = on_demand.header_by_hash(&FakeContext, request::HeaderByHash(H256::default())); + + assert_eq!(result.wait().unwrap_err(), Error::NoPeersAvailable); + } +} diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs new file mode 100644 index 000000000..86fcc5d05 --- /dev/null +++ b/ethcore/light/src/on_demand/request.rs @@ -0,0 +1,337 @@ +// Copyright 2015, 2016 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Request types, verification, and verification errors. + +use ethcore::basic_account::BasicAccount; +use ethcore::encoded; +use ethcore::receipt::Receipt; + +use rlp::{RlpStream, Stream, UntrustedRlp, View}; +use util::{Address, Bytes, HashDB, H256}; +use util::memorydb::MemoryDB; +use util::sha3::Hashable; +use util::trie::{Trie, TrieDB, TrieError}; + +/// Errors in verification. +#[derive(Debug, PartialEq)] +pub enum Error { + /// RLP decoder error. + Decoder(::rlp::DecoderError), + /// Trie lookup error (result of bad proof) + Trie(TrieError), + /// Bad inclusion proof + BadProof, + /// Wrong header number. + WrongNumber(u64, u64), + /// Wrong header hash. + WrongHash(H256, H256), + /// Wrong trie root. + WrongTrieRoot(H256, H256), +} + +impl From<::rlp::DecoderError> for Error { + fn from(err: ::rlp::DecoderError) -> Self { + Error::Decoder(err) + } +} + +impl From> for Error { + fn from(err: Box) -> Self { + Error::Trie(*err) + } +} + +/// Request for a header by number. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct HeaderByNumber { + /// The header's number. + pub num: u64, + /// The root of the CHT containing this header. + pub cht_root: H256, +} + +impl HeaderByNumber { + /// Check a response with a header and cht proof. + pub fn check_response(&self, header: &[u8], proof: &[Bytes]) -> Result { + use util::trie::{Trie, TrieDB}; + + // check the proof + let mut db = MemoryDB::new(); + + for node in proof { db.insert(&node[..]); } + let key = ::rlp::encode(&self.num); + + let expected_hash: H256 = match TrieDB::new(&db, &self.cht_root).and_then(|t| t.get(&*key))? { + Some(val) => ::rlp::decode(&val), + None => return Err(Error::BadProof) + }; + + // and compare the hash to the found header. + let found_hash = header.sha3(); + match expected_hash == found_hash { + true => Ok(encoded::Header::new(header.to_vec())), + false => Err(Error::WrongHash(expected_hash, found_hash)), + } + } +} + +/// Request for a header by hash. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct HeaderByHash(pub H256); + +impl HeaderByHash { + /// Check a response for the header. + pub fn check_response(&self, header: &[u8]) -> Result { + let hash = header.sha3(); + match hash == self.0 { + true => Ok(encoded::Header::new(header.to_vec())), + false => Err(Error::WrongHash(self.0, hash)), + } + } +} + +/// Request for a block, with header and precomputed hash. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Body { + /// The block's header. + pub header: encoded::Header, + /// The block's hash. + pub hash: H256, +} + +impl Body { + /// Check a response for this block body. + pub fn check_response(&self, body: &[u8]) -> Result { + let body_view = UntrustedRlp::new(&body); + + // check the integrity of the the body against the header + let tx_root = ::util::triehash::ordered_trie_root(body_view.at(0)?.iter().map(|r| r.as_raw().to_vec())); + if tx_root != self.header.transactions_root() { + return Err(Error::WrongTrieRoot(self.header.transactions_root(), tx_root)); + } + + let uncles_hash = body_view.at(1)?.as_raw().sha3(); + if uncles_hash != self.header.uncles_hash() { + return Err(Error::WrongHash(self.header.uncles_hash(), uncles_hash)); + } + + // concatenate the header and the body. + let mut stream = RlpStream::new_list(3); + stream.append_raw(self.header.rlp().as_raw(), 1); + stream.append_raw(body, 2); + + Ok(encoded::Block::new(stream.out())) + } +} + +/// Request for a block's receipts with header for verification. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BlockReceipts(pub encoded::Header); + +impl BlockReceipts { + /// Check a response with receipts against the stored header. + pub fn check_response(&self, receipts: &[Receipt]) -> Result, Error> { + let receipts_root = self.0.receipts_root(); + let found_root = ::util::triehash::ordered_trie_root(receipts.iter().map(|r| ::rlp::encode(r).to_vec())); + + match receipts_root == found_root { + true => Ok(receipts.to_vec()), + false => Err(Error::WrongTrieRoot(receipts_root, found_root)), + } + } +} + +/// Request for an account structure. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Account { + /// Header for verification. + pub header: encoded::Header, + /// Address requested. + pub address: Address, +} + +impl Account { + /// Check a response with an account against the stored header. + pub fn check_response(&self, proof: &[Bytes]) -> Result { + let state_root = self.header.state_root(); + + let mut db = MemoryDB::new(); + for node in proof { db.insert(&node[..]); } + + match TrieDB::new(&db, &state_root).and_then(|t| t.get(&self.address.sha3()))? { + Some(val) => { + let rlp = UntrustedRlp::new(&val); + Ok(BasicAccount { + nonce: rlp.val_at(0)?, + balance: rlp.val_at(1)?, + storage_root: rlp.val_at(2)?, + code_hash: rlp.val_at(3)?, + }) + }, + None => Err(Error::BadProof) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use util::{MemoryDB, Address, H256, FixedHash}; + use util::trie::{Trie, TrieMut, TrieDB, SecTrieDB, TrieDBMut, SecTrieDBMut}; + use util::trie::recorder::Recorder; + + use ethcore::header::Header; + use ethcore::encoded; + use ethcore::receipt::Receipt; + + #[test] + fn check_header_by_number() { + let mut root = H256::default(); + let mut db = MemoryDB::new(); + let mut header = Header::new(); + header.set_number(10_000); + header.set_extra_data(b"test_header".to_vec()); + + { + let mut trie = TrieDBMut::new(&mut db, &mut root); + for i in (0..2048u64).map(|x| x + 8192) { + let hash = if i == 10_000 { + header.hash() + } else { + H256::random() + }; + trie.insert(&*::rlp::encode(&i), &*::rlp::encode(&hash)).unwrap(); + } + } + + let proof = { + let trie = TrieDB::new(&db, &root).unwrap(); + let key = ::rlp::encode(&10_000u64); + let mut recorder = Recorder::new(); + + trie.get_with(&*key, &mut recorder).unwrap().unwrap(); + + recorder.drain().into_iter().map(|r| r.data).collect::>() + }; + + let req = HeaderByNumber { + num: 10_000, + cht_root: root, + }; + + let raw_header = ::rlp::encode(&header); + + assert!(req.check_response(&*raw_header, &proof[..]).is_ok()); + } + + #[test] + fn check_header_by_hash() { + let mut header = Header::new(); + header.set_number(10_000); + header.set_extra_data(b"test_header".to_vec()); + let hash = header.hash(); + let raw_header = ::rlp::encode(&header); + + assert!(HeaderByHash(hash).check_response(&*raw_header).is_ok()) + } + + #[test] + fn check_body() { + use rlp::{RlpStream, Stream}; + + let header = Header::new(); + let mut body_stream = RlpStream::new_list(2); + body_stream.begin_list(0).begin_list(0); + + let req = Body { + header: encoded::Header::new(::rlp::encode(&header).to_vec()), + hash: header.hash(), + }; + + assert!(req.check_response(&*body_stream.drain()).is_ok()) + } + + #[test] + fn check_receipts() { + let receipts = (0..5).map(|_| Receipt { + state_root: H256::random(), + gas_used: 21_000u64.into(), + log_bloom: Default::default(), + logs: Vec::new(), + }).collect::>(); + + let mut header = Header::new(); + let receipts_root = ::util::triehash::ordered_trie_root( + receipts.iter().map(|x| ::rlp::encode(x).to_vec()) + ); + + header.set_receipts_root(receipts_root); + + let req = BlockReceipts(encoded::Header::new(::rlp::encode(&header).to_vec())); + + assert!(req.check_response(&receipts).is_ok()) + } + + #[test] + fn check_state_proof() { + use rlp::{RlpStream, Stream}; + + let mut root = H256::default(); + let mut db = MemoryDB::new(); + let mut header = Header::new(); + header.set_number(123_456); + header.set_extra_data(b"test_header".to_vec()); + + let addr = Address::random(); + let rand_acc = || { + let mut stream = RlpStream::new_list(4); + stream.append(&2u64) + .append(&100_000_000u64) + .append(&H256::random()) + .append(&H256::random()); + + stream.out() + }; + { + let mut trie = SecTrieDBMut::new(&mut db, &mut root); + for _ in 0..100 { + let address = Address::random(); + trie.insert(&*address, &rand_acc()).unwrap(); + } + + trie.insert(&*addr, &rand_acc()).unwrap(); + } + + let proof = { + let trie = SecTrieDB::new(&db, &root).unwrap(); + let mut recorder = Recorder::new(); + + trie.get_with(&*addr, &mut recorder).unwrap().unwrap(); + + recorder.drain().into_iter().map(|r| r.data).collect::>() + }; + + header.set_state_root(root.clone()); + + let req = Account { + header: encoded::Header::new(::rlp::encode(&header).to_vec()), + address: addr, + }; + + assert!(req.check_response(&proof[..]).is_ok()); + } +}