Remote transaction execution (#4684)

* return errors on database corruption

* fix tests, json tests

* fix remainder of build

* buffer flow -> request credits

* proving state backend

* generate transaction proofs from provider

* network messages for transaction proof

* transaction proof test

* test for transaction proof message

* fix call bug

* request transaction proofs from on_demand

* most of proved_execution rpc

* proved execution future
This commit is contained in:
Robert Habermeier 2017-03-08 14:39:44 +01:00 committed by Gav Wood
parent 5bbcf0482b
commit 8a3b5c6332
25 changed files with 993 additions and 253 deletions

View File

@ -31,7 +31,7 @@ use ethcore::service::ClientIoMessage;
use ethcore::encoded; use ethcore::encoded;
use io::IoChannel; use io::IoChannel;
use util::{Bytes, H256, Mutex, RwLock}; use util::{Bytes, DBValue, H256, Mutex, RwLock};
use self::header_chain::{AncestryIter, HeaderChain}; use self::header_chain::{AncestryIter, HeaderChain};
@ -230,22 +230,32 @@ impl Client {
} }
/// Get a handle to the verification engine. /// Get a handle to the verification engine.
pub fn engine(&self) -> &Engine { pub fn engine(&self) -> &Arc<Engine> {
&*self.engine &self.engine
} }
fn latest_env_info(&self) -> EnvInfo { /// Get the latest environment info.
let header = self.best_block_header(); pub fn latest_env_info(&self) -> EnvInfo {
self.env_info(BlockId::Latest)
.expect("Best block header and recent hashes always stored; qed")
}
EnvInfo { /// Get environment info for a given block.
pub fn env_info(&self, id: BlockId) -> Option<EnvInfo> {
let header = match self.block_header(id) {
Some(hdr) => hdr,
None => return None,
};
Some(EnvInfo {
number: header.number(), number: header.number(),
author: header.author(), author: header.author(),
timestamp: header.timestamp(), timestamp: header.timestamp(),
difficulty: header.difficulty(), difficulty: header.difficulty(),
last_hashes: self.build_last_hashes(header.hash()), last_hashes: self.build_last_hashes(header.parent_hash()),
gas_used: Default::default(), gas_used: Default::default(),
gas_limit: header.gas_limit(), gas_limit: header.gas_limit(),
} })
} }
fn build_last_hashes(&self, mut parent_hash: H256) -> Arc<Vec<H256>> { fn build_last_hashes(&self, mut parent_hash: H256) -> Arc<Vec<H256>> {
@ -344,6 +354,10 @@ impl ::provider::Provider for Client {
None None
} }
fn transaction_proof(&self, _req: ::request::TransactionProof) -> Option<Vec<DBValue>> {
None
}
fn ready_transactions(&self) -> Vec<::ethcore::transaction::PendingTransaction> { fn ready_transactions(&self) -> Vec<::ethcore::transaction::PendingTransaction> {
Vec::new() Vec::new()
} }

View File

@ -44,8 +44,8 @@ pub enum Error {
Rlp(DecoderError), Rlp(DecoderError),
/// A network error. /// A network error.
Network(NetworkError), Network(NetworkError),
/// Out of buffer. /// Out of credits.
BufferEmpty, NoCredits,
/// Unrecognized packet code. /// Unrecognized packet code.
UnrecognizedPacket(u8), UnrecognizedPacket(u8),
/// Unexpected handshake. /// Unexpected handshake.
@ -72,7 +72,7 @@ impl Error {
match *self { match *self {
Error::Rlp(_) => Punishment::Disable, Error::Rlp(_) => Punishment::Disable,
Error::Network(_) => Punishment::None, Error::Network(_) => Punishment::None,
Error::BufferEmpty => Punishment::Disable, Error::NoCredits => Punishment::Disable,
Error::UnrecognizedPacket(_) => Punishment::Disconnect, Error::UnrecognizedPacket(_) => Punishment::Disconnect,
Error::UnexpectedHandshake => Punishment::Disconnect, Error::UnexpectedHandshake => Punishment::Disconnect,
Error::WrongNetwork => Punishment::Disable, Error::WrongNetwork => Punishment::Disable,
@ -103,7 +103,7 @@ impl fmt::Display for Error {
match *self { match *self {
Error::Rlp(ref err) => err.fmt(f), Error::Rlp(ref err) => err.fmt(f),
Error::Network(ref err) => err.fmt(f), Error::Network(ref err) => err.fmt(f),
Error::BufferEmpty => write!(f, "Out of buffer"), Error::NoCredits => write!(f, "Out of request credits"),
Error::UnrecognizedPacket(code) => write!(f, "Unrecognized packet: 0x{:x}", code), Error::UnrecognizedPacket(code) => write!(f, "Unrecognized packet: 0x{:x}", code),
Error::UnexpectedHandshake => write!(f, "Unexpected handshake"), Error::UnexpectedHandshake => write!(f, "Unexpected handshake"),
Error::WrongNetwork => write!(f, "Wrong network"), Error::WrongNetwork => write!(f, "Wrong network"),

View File

@ -19,14 +19,14 @@
//! This uses a "Provider" to answer requests. //! This uses a "Provider" to answer requests.
//! See https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES) //! See https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES)
use ethcore::transaction::UnverifiedTransaction; use ethcore::transaction::{Action, UnverifiedTransaction};
use ethcore::receipt::Receipt; use ethcore::receipt::Receipt;
use io::TimerToken; use io::TimerToken;
use network::{NetworkProtocolHandler, NetworkContext, PeerId}; use network::{NetworkProtocolHandler, NetworkContext, PeerId};
use rlp::{RlpStream, Stream, UntrustedRlp, View}; use rlp::{RlpStream, Stream, UntrustedRlp, View};
use util::hash::H256; use util::hash::H256;
use util::{Bytes, Mutex, RwLock, U256}; use util::{Bytes, DBValue, Mutex, RwLock, U256};
use time::{Duration, SteadyTime}; use time::{Duration, SteadyTime};
use std::collections::HashMap; use std::collections::HashMap;
@ -37,7 +37,7 @@ use std::sync::atomic::{AtomicUsize, Ordering};
use provider::Provider; use provider::Provider;
use request::{self, HashOrNumber, Request}; use request::{self, HashOrNumber, Request};
use self::buffer_flow::{Buffer, FlowParams}; use self::request_credits::{Credits, FlowParams};
use self::context::{Ctx, TickCtx}; use self::context::{Ctx, TickCtx};
use self::error::Punishment; use self::error::Punishment;
use self::request_set::RequestSet; use self::request_set::RequestSet;
@ -51,7 +51,7 @@ mod request_set;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
pub mod buffer_flow; pub mod request_credits;
pub use self::error::Error; pub use self::error::Error;
pub use self::context::{BasicContext, EventContext, IoContext}; pub use self::context::{BasicContext, EventContext, IoContext};
@ -73,7 +73,7 @@ pub const PROTOCOL_VERSIONS: &'static [u8] = &[1];
pub const MAX_PROTOCOL_VERSION: u8 = 1; pub const MAX_PROTOCOL_VERSION: u8 = 1;
/// Packet count for LES. /// Packet count for LES.
pub const PACKET_COUNT: u8 = 15; pub const PACKET_COUNT: u8 = 17;
// packet ID definitions. // packet ID definitions.
mod packet { mod packet {
@ -109,6 +109,10 @@ mod packet {
// request and response for header proofs in a CHT. // request and response for header proofs in a CHT.
pub const GET_HEADER_PROOFS: u8 = 0x0d; pub const GET_HEADER_PROOFS: u8 = 0x0d;
pub const HEADER_PROOFS: u8 = 0x0e; pub const HEADER_PROOFS: u8 = 0x0e;
// request and response for transaction proof.
pub const GET_TRANSACTION_PROOF: u8 = 0x0f;
pub const TRANSACTION_PROOF: u8 = 0x10;
} }
// timeouts for different kinds of requests. all values are in milliseconds. // timeouts for different kinds of requests. all values are in milliseconds.
@ -121,6 +125,7 @@ mod timeout {
pub const PROOFS: i64 = 4000; pub const PROOFS: i64 = 4000;
pub const CONTRACT_CODES: i64 = 5000; pub const CONTRACT_CODES: i64 = 5000;
pub const HEADER_PROOFS: i64 = 3500; pub const HEADER_PROOFS: i64 = 3500;
pub const TRANSACTION_PROOF: i64 = 5000;
} }
/// A request id. /// A request id.
@ -143,10 +148,10 @@ struct PendingPeer {
/// Relevant data to each peer. Not accessible publicly, only `pub` due to /// Relevant data to each peer. Not accessible publicly, only `pub` due to
/// limitations of the privacy system. /// limitations of the privacy system.
pub struct Peer { pub struct Peer {
local_buffer: Buffer, // their buffer relative to us local_credits: Credits, // their credits relative to us
status: Status, status: Status,
capabilities: Capabilities, capabilities: Capabilities,
remote_flow: Option<(Buffer, FlowParams)>, remote_flow: Option<(Credits, FlowParams)>,
sent_head: H256, // last chain head we've given them. sent_head: H256, // last chain head we've given them.
last_update: SteadyTime, last_update: SteadyTime,
pending_requests: RequestSet, pending_requests: RequestSet,
@ -155,21 +160,21 @@ pub struct Peer {
impl Peer { impl Peer {
// check the maximum cost of a request, returning an error if there's // check the maximum cost of a request, returning an error if there's
// not enough buffer left. // not enough credits left.
// returns the calculated maximum cost. // returns the calculated maximum cost.
fn deduct_max(&mut self, flow_params: &FlowParams, kind: request::Kind, max: usize) -> Result<U256, Error> { fn deduct_max(&mut self, flow_params: &FlowParams, kind: request::Kind, max: usize) -> Result<U256, Error> {
flow_params.recharge(&mut self.local_buffer); flow_params.recharge(&mut self.local_credits);
let max_cost = flow_params.compute_cost(kind, max); let max_cost = flow_params.compute_cost(kind, max);
self.local_buffer.deduct_cost(max_cost)?; self.local_credits.deduct_cost(max_cost)?;
Ok(max_cost) Ok(max_cost)
} }
// refund buffer for a request. returns new buffer amount. // refund credits for a request. returns new amount of credits.
fn refund(&mut self, flow_params: &FlowParams, amount: U256) -> U256 { fn refund(&mut self, flow_params: &FlowParams, amount: U256) -> U256 {
flow_params.refund(&mut self.local_buffer, amount); flow_params.refund(&mut self.local_credits, amount);
self.local_buffer.current() self.local_credits.current()
} }
} }
@ -206,6 +211,8 @@ pub trait Handler: Send + Sync {
/// Called when a peer responds with header proofs. Each proof should be a block header coupled /// Called when a peer responds with header proofs. Each proof should be a block header coupled
/// with a series of trie nodes is ascending order by distance from the root. /// with a series of trie nodes is ascending order by distance from the root.
fn on_header_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[(Bytes, Vec<Bytes>)]) { } fn on_header_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[(Bytes, Vec<Bytes>)]) { }
/// Called when a peer responds with a transaction proof. Each proof is a vector of state items.
fn on_transaction_proof(&self, _ctx: &EventContext, _req_id: ReqId, _state_items: &[DBValue]) { }
/// Called to "tick" the handler periodically. /// Called to "tick" the handler periodically.
fn tick(&self, _ctx: &BasicContext) { } fn tick(&self, _ctx: &BasicContext) { }
/// Called on abort. This signals to handlers that they should clean up /// Called on abort. This signals to handlers that they should clean up
@ -218,7 +225,7 @@ pub trait Handler: Send + Sync {
pub struct Params { pub struct Params {
/// Network id. /// Network id.
pub network_id: u64, pub network_id: u64,
/// Buffer flow parameters. /// Request credits parameters.
pub flow_params: FlowParams, pub flow_params: FlowParams,
/// Initial capabilities. /// Initial capabilities.
pub capabilities: Capabilities, pub capabilities: Capabilities,
@ -334,14 +341,14 @@ impl LightProtocol {
/// Check the maximum amount of requests of a specific type /// Check the maximum amount of requests of a specific type
/// which a peer would be able to serve. Returns zero if the /// which a peer would be able to serve. Returns zero if the
/// peer is unknown or has no buffer flow parameters. /// peer is unknown or has no credit parameters.
fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize {
self.peers.read().get(&peer).and_then(|peer| { self.peers.read().get(&peer).and_then(|peer| {
let mut peer = peer.lock(); let mut peer = peer.lock();
match peer.remote_flow { match peer.remote_flow {
Some((ref mut buf, ref flow)) => { Some((ref mut c, ref flow)) => {
flow.recharge(buf); flow.recharge(c);
Some(flow.max_amount(&*buf, kind)) Some(flow.max_amount(&*c, kind))
} }
None => None, None => None,
} }
@ -351,7 +358,7 @@ impl LightProtocol {
/// Make a request to a peer. /// Make a request to a peer.
/// ///
/// Fails on: nonexistent peer, network error, peer not server, /// Fails on: nonexistent peer, network error, peer not server,
/// insufficient buffer. Does not check capabilities before sending. /// insufficient credits. Does not check capabilities before sending.
/// On success, returns a request id which can later be coordinated /// On success, returns a request id which can later be coordinated
/// with an event. /// with an event.
pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, request: Request) -> Result<ReqId, Error> { pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, request: Request) -> Result<ReqId, Error> {
@ -360,10 +367,10 @@ impl LightProtocol {
let mut peer = peer.lock(); let mut peer = peer.lock();
match peer.remote_flow { match peer.remote_flow {
Some((ref mut buf, ref flow)) => { Some((ref mut c, ref flow)) => {
flow.recharge(buf); flow.recharge(c);
let max = flow.compute_cost(request.kind(), request.amount()); let max = flow.compute_cost(request.kind(), request.amount());
buf.deduct_cost(max)?; c.deduct_cost(max)?;
} }
None => return Err(Error::NotServer), None => return Err(Error::NotServer),
} }
@ -380,6 +387,7 @@ impl LightProtocol {
request::Kind::StateProofs => packet::GET_PROOFS, request::Kind::StateProofs => packet::GET_PROOFS,
request::Kind::Codes => packet::GET_CONTRACT_CODES, request::Kind::Codes => packet::GET_CONTRACT_CODES,
request::Kind::HeaderProofs => packet::GET_HEADER_PROOFS, request::Kind::HeaderProofs => packet::GET_HEADER_PROOFS,
request::Kind::TransactionProof => packet::GET_TRANSACTION_PROOF,
}; };
io.send(*peer_id, packet_id, packet_data); io.send(*peer_id, packet_id, packet_data);
@ -464,7 +472,7 @@ impl LightProtocol {
// - check whether request kinds match // - check whether request kinds match
fn pre_verify_response(&self, peer: &PeerId, kind: request::Kind, raw: &UntrustedRlp) -> Result<IdGuard, Error> { fn pre_verify_response(&self, peer: &PeerId, kind: request::Kind, raw: &UntrustedRlp) -> Result<IdGuard, Error> {
let req_id = ReqId(raw.val_at(0)?); let req_id = ReqId(raw.val_at(0)?);
let cur_buffer: U256 = raw.val_at(1)?; let cur_credits: U256 = raw.val_at(1)?;
trace!(target: "les", "pre-verifying response from peer {}, kind={:?}", peer, kind); trace!(target: "les", "pre-verifying response from peer {}, kind={:?}", peer, kind);
@ -480,9 +488,9 @@ impl LightProtocol {
(Some(request), Some(flow_info)) => { (Some(request), Some(flow_info)) => {
had_req = true; had_req = true;
let &mut (ref mut buf, ref mut flow) = flow_info; let &mut (ref mut c, ref mut flow) = flow_info;
let actual_buffer = ::std::cmp::min(cur_buffer, *flow.limit()); let actual_credits = ::std::cmp::min(cur_credits, *flow.limit());
buf.update_to(actual_buffer); c.update_to(actual_credits);
if request.kind() != kind { if request.kind() != kind {
Some(Error::UnsolicitedResponse) Some(Error::UnsolicitedResponse)
@ -539,6 +547,9 @@ impl LightProtocol {
packet::GET_HEADER_PROOFS => self.get_header_proofs(peer, io, rlp), packet::GET_HEADER_PROOFS => self.get_header_proofs(peer, io, rlp),
packet::HEADER_PROOFS => self.header_proofs(peer, io, rlp), packet::HEADER_PROOFS => self.header_proofs(peer, io, rlp),
packet::GET_TRANSACTION_PROOF => self.get_transaction_proof(peer, io, rlp),
packet::TRANSACTION_PROOF => self.transaction_proof(peer, io, rlp),
packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp), packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp),
other => { other => {
@ -685,10 +696,10 @@ impl LightProtocol {
return Err(Error::BadProtocolVersion); return Err(Error::BadProtocolVersion);
} }
let remote_flow = flow_params.map(|params| (params.create_buffer(), params)); let remote_flow = flow_params.map(|params| (params.create_credits(), params));
self.peers.write().insert(*peer, Mutex::new(Peer { self.peers.write().insert(*peer, Mutex::new(Peer {
local_buffer: self.flow_params.create_buffer(), local_credits: self.flow_params.create_credits(),
status: status.clone(), status: status.clone(),
capabilities: capabilities.clone(), capabilities: capabilities.clone(),
remote_flow: remote_flow, remote_flow: remote_flow,
@ -793,10 +804,10 @@ impl LightProtocol {
let actual_cost = self.flow_params.compute_cost(request::Kind::Headers, response.len()); let actual_cost = self.flow_params.compute_cost(request::Kind::Headers, response.len());
assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost.");
let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost);
io.respond(packet::BLOCK_HEADERS, { io.respond(packet::BLOCK_HEADERS, {
let mut stream = RlpStream::new_list(3); let mut stream = RlpStream::new_list(3);
stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); stream.append(&req_id).append(&cur_credits).begin_list(response.len());
for header in response { for header in response {
stream.append_raw(&header.into_inner(), 1); stream.append_raw(&header.into_inner(), 1);
@ -855,11 +866,11 @@ impl LightProtocol {
let actual_cost = self.flow_params.compute_cost(request::Kind::Bodies, response_len); let actual_cost = self.flow_params.compute_cost(request::Kind::Bodies, response_len);
assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost.");
let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost);
io.respond(packet::BLOCK_BODIES, { io.respond(packet::BLOCK_BODIES, {
let mut stream = RlpStream::new_list(3); let mut stream = RlpStream::new_list(3);
stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); stream.append(&req_id).append(&cur_credits).begin_list(response.len());
for body in response { for body in response {
match body { match body {
@ -921,11 +932,11 @@ impl LightProtocol {
let actual_cost = self.flow_params.compute_cost(request::Kind::Receipts, response_len); let actual_cost = self.flow_params.compute_cost(request::Kind::Receipts, response_len);
assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost.");
let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost);
io.respond(packet::RECEIPTS, { io.respond(packet::RECEIPTS, {
let mut stream = RlpStream::new_list(3); let mut stream = RlpStream::new_list(3);
stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); stream.append(&req_id).append(&cur_credits).begin_list(response.len());
for receipts in response { for receipts in response {
stream.append_raw(&receipts, 1); stream.append_raw(&receipts, 1);
@ -995,11 +1006,11 @@ impl LightProtocol {
let actual_cost = self.flow_params.compute_cost(request::Kind::StateProofs, response_len); let actual_cost = self.flow_params.compute_cost(request::Kind::StateProofs, response_len);
assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost.");
let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost);
io.respond(packet::PROOFS, { io.respond(packet::PROOFS, {
let mut stream = RlpStream::new_list(3); let mut stream = RlpStream::new_list(3);
stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); stream.append(&req_id).append(&cur_credits).begin_list(response.len());
for proof in response { for proof in response {
stream.append_raw(&proof, 1); stream.append_raw(&proof, 1);
@ -1067,11 +1078,11 @@ impl LightProtocol {
let actual_cost = self.flow_params.compute_cost(request::Kind::Codes, response_len); let actual_cost = self.flow_params.compute_cost(request::Kind::Codes, response_len);
assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost.");
let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost);
io.respond(packet::CONTRACT_CODES, { io.respond(packet::CONTRACT_CODES, {
let mut stream = RlpStream::new_list(3); let mut stream = RlpStream::new_list(3);
stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); stream.append(&req_id).append(&cur_credits).begin_list(response.len());
for code in response { for code in response {
stream.append(&code); stream.append(&code);
@ -1140,11 +1151,11 @@ impl LightProtocol {
let actual_cost = self.flow_params.compute_cost(request::Kind::HeaderProofs, response_len); let actual_cost = self.flow_params.compute_cost(request::Kind::HeaderProofs, response_len);
assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost.");
let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost);
io.respond(packet::HEADER_PROOFS, { io.respond(packet::HEADER_PROOFS, {
let mut stream = RlpStream::new_list(3); let mut stream = RlpStream::new_list(3);
stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); stream.append(&req_id).append(&cur_credits).begin_list(response.len());
for proof in response { for proof in response {
stream.append_raw(&proof, 1); stream.append_raw(&proof, 1);
@ -1182,6 +1193,90 @@ impl LightProtocol {
Ok(()) Ok(())
} }
// Receive a request for proof-of-execution.
fn get_transaction_proof(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> {
// refuse to execute more than this amount of gas at once.
// this is appx. the point at which the proof of execution would no longer fit in
// a single Devp2p packet.
const MAX_GAS: usize = 50_000_000;
use util::Uint;
let peers = self.peers.read();
let peer = match peers.get(peer) {
Some(peer) => peer,
None => {
debug!(target: "les", "Ignoring request from unknown peer");
return Ok(())
}
};
let mut peer = peer.lock();
let req_id: u64 = raw.val_at(0)?;
let req = {
let req_rlp = raw.at(1)?;
request::TransactionProof {
at: req_rlp.val_at(0)?,
from: req_rlp.val_at(1)?,
action: if req_rlp.at(2)?.is_empty() {
Action::Create
} else {
Action::Call(req_rlp.val_at(2)?)
},
gas: ::std::cmp::min(req_rlp.val_at(3)?, MAX_GAS.into()),
gas_price: req_rlp.val_at(4)?,
value: req_rlp.val_at(5)?,
data: req_rlp.val_at(6)?,
}
};
// always charge the peer for all the gas.
peer.deduct_max(&self.flow_params, request::Kind::TransactionProof, req.gas.low_u64() as usize)?;
let response = match self.provider.transaction_proof(req) {
Some(res) => res,
None => vec![],
};
let cur_credits = peer.local_credits.current();
io.respond(packet::TRANSACTION_PROOF, {
let mut stream = RlpStream::new_list(3);
stream.append(&req_id).append(&cur_credits).begin_list(response.len());
for state_item in response {
stream.append(&&state_item[..]);
}
stream.out()
});
Ok(())
}
// Receive a response for proof-of-execution.
fn transaction_proof(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> {
let id_guard = self.pre_verify_response(peer, request::Kind::HeaderProofs, &raw)?;
let raw_proof: Vec<DBValue> = raw.at(2)?.iter()
.map(|rlp| {
let mut db_val = DBValue::new();
db_val.append_slice(rlp.data()?);
Ok(db_val)
})
.collect::<Result<Vec<_>, ::rlp::DecoderError>>()?;
let req_id = id_guard.defuse();
for handler in &self.handlers {
handler.on_transaction_proof(&Ctx {
peer: *peer,
io: io,
proto: self,
}, req_id, &raw_proof);
}
Ok(())
}
// Receive a set of transactions to relay. // Receive a set of transactions to relay.
fn relay_transactions(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { fn relay_transactions(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> {
const MAX_TRANSACTIONS: usize = 256; const MAX_TRANSACTIONS: usize = 256;
@ -1330,6 +1425,25 @@ fn encode_request(req: &Request, req_id: usize) -> Vec<u8> {
.append(&proof_req.from_level); .append(&proof_req.from_level);
} }
stream.out()
}
Request::TransactionProof(ref request) => {
let mut stream = RlpStream::new_list(2);
stream.append(&req_id).begin_list(7)
.append(&request.at)
.append(&request.from);
match request.action {
Action::Create => stream.append_empty_data(),
Action::Call(ref to) => stream.append(to),
};
stream
.append(&request.gas)
.append(&request.gas_price)
.append(&request.value)
.append(&request.data);
stream.out() stream.out()
} }
} }

View File

@ -14,14 +14,14 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! LES buffer flow management. //! Request credit management.
//! //!
//! Every request in the LES protocol leads to a reduction //! Every request in the light protocol leads to a reduction
//! of the requester's buffer value as a rate-limiting mechanism. //! of the requester's amount of credits as a rate-limiting mechanism.
//! This buffer value will recharge at a set rate. //! The amount of credits will recharge at a set rate.
//! //!
//! This module provides an interface for configuration of buffer //! This module provides an interface for configuration of
//! flow costs and recharge rates. //! costs and recharge rates of request credits.
//! //!
//! Current default costs are picked completely arbitrarily, not based //! Current default costs are picked completely arbitrarily, not based
//! on any empirical timings or mathematical models. //! on any empirical timings or mathematical models.
@ -38,19 +38,19 @@ use time::{Duration, SteadyTime};
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub struct Cost(pub U256, pub U256); pub struct Cost(pub U256, pub U256);
/// Buffer value. /// Credits value.
/// ///
/// Produced and recharged using `FlowParams`. /// Produced and recharged using `FlowParams`.
/// Definitive updates can be made as well -- these will reset the recharge /// Definitive updates can be made as well -- these will reset the recharge
/// point to the time of the update. /// point to the time of the update.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub struct Buffer { pub struct Credits {
estimate: U256, estimate: U256,
recharge_point: SteadyTime, recharge_point: SteadyTime,
} }
impl Buffer { impl Credits {
/// Get the current buffer value. /// Get the current amount of credits..
pub fn current(&self) -> U256 { self.estimate.clone() } pub fn current(&self) -> U256 { self.estimate.clone() }
/// Make a definitive update. /// Make a definitive update.
@ -61,7 +61,7 @@ impl Buffer {
self.recharge_point = SteadyTime::now(); self.recharge_point = SteadyTime::now();
} }
/// Attempt to apply the given cost to the buffer. /// Attempt to apply the given cost to the amount of credits.
/// ///
/// If successful, the cost will be deducted successfully. /// If successful, the cost will be deducted successfully.
/// ///
@ -69,7 +69,7 @@ impl Buffer {
/// error will be produced. /// error will be produced.
pub fn deduct_cost(&mut self, cost: U256) -> Result<(), Error> { pub fn deduct_cost(&mut self, cost: U256) -> Result<(), Error> {
match cost > self.estimate { match cost > self.estimate {
true => Err(Error::BufferEmpty), true => Err(Error::NoCredits),
false => { false => {
self.estimate = self.estimate - cost; self.estimate = self.estimate - cost;
Ok(()) Ok(())
@ -81,12 +81,13 @@ impl Buffer {
/// A cost table, mapping requests to base and per-request costs. /// A cost table, mapping requests to base and per-request costs.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub struct CostTable { pub struct CostTable {
headers: Cost, headers: Cost, // cost per header
bodies: Cost, bodies: Cost,
receipts: Cost, receipts: Cost,
state_proofs: Cost, state_proofs: Cost,
contract_codes: Cost, contract_codes: Cost,
header_proofs: Cost, header_proofs: Cost,
transaction_proof: Cost, // cost per gas.
} }
impl Default for CostTable { impl Default for CostTable {
@ -99,6 +100,7 @@ impl Default for CostTable {
state_proofs: Cost(250000.into(), 25000.into()), state_proofs: Cost(250000.into(), 25000.into()),
contract_codes: Cost(200000.into(), 20000.into()), contract_codes: Cost(200000.into(), 20000.into()),
header_proofs: Cost(150000.into(), 15000.into()), header_proofs: Cost(150000.into(), 15000.into()),
transaction_proof: Cost(100000.into(), 2.into()),
} }
} }
} }
@ -112,7 +114,7 @@ impl RlpEncodable for CostTable {
.append(&cost.1); .append(&cost.1);
} }
s.begin_list(6); s.begin_list(7);
append_cost(s, packet::GET_BLOCK_HEADERS, &self.headers); append_cost(s, packet::GET_BLOCK_HEADERS, &self.headers);
append_cost(s, packet::GET_BLOCK_BODIES, &self.bodies); append_cost(s, packet::GET_BLOCK_BODIES, &self.bodies);
@ -120,6 +122,7 @@ impl RlpEncodable for CostTable {
append_cost(s, packet::GET_PROOFS, &self.state_proofs); append_cost(s, packet::GET_PROOFS, &self.state_proofs);
append_cost(s, packet::GET_CONTRACT_CODES, &self.contract_codes); append_cost(s, packet::GET_CONTRACT_CODES, &self.contract_codes);
append_cost(s, packet::GET_HEADER_PROOFS, &self.header_proofs); append_cost(s, packet::GET_HEADER_PROOFS, &self.header_proofs);
append_cost(s, packet::GET_TRANSACTION_PROOF, &self.transaction_proof);
} }
} }
@ -133,6 +136,7 @@ impl RlpDecodable for CostTable {
let mut state_proofs = None; let mut state_proofs = None;
let mut contract_codes = None; let mut contract_codes = None;
let mut header_proofs = None; let mut header_proofs = None;
let mut transaction_proof = None;
for row in rlp.iter() { for row in rlp.iter() {
let msg_id: u8 = row.val_at(0)?; let msg_id: u8 = row.val_at(0)?;
@ -150,6 +154,7 @@ impl RlpDecodable for CostTable {
packet::GET_PROOFS => state_proofs = Some(cost), packet::GET_PROOFS => state_proofs = Some(cost),
packet::GET_CONTRACT_CODES => contract_codes = Some(cost), packet::GET_CONTRACT_CODES => contract_codes = Some(cost),
packet::GET_HEADER_PROOFS => header_proofs = Some(cost), packet::GET_HEADER_PROOFS => header_proofs = Some(cost),
packet::GET_TRANSACTION_PROOF => transaction_proof = Some(cost),
_ => return Err(DecoderError::Custom("Unrecognized message in cost table")), _ => return Err(DecoderError::Custom("Unrecognized message in cost table")),
} }
} }
@ -161,11 +166,12 @@ impl RlpDecodable for CostTable {
state_proofs: state_proofs.ok_or(DecoderError::Custom("No proofs cost specified"))?, state_proofs: state_proofs.ok_or(DecoderError::Custom("No proofs cost specified"))?,
contract_codes: contract_codes.ok_or(DecoderError::Custom("No contract codes specified"))?, contract_codes: contract_codes.ok_or(DecoderError::Custom("No contract codes specified"))?,
header_proofs: header_proofs.ok_or(DecoderError::Custom("No header proofs cost specified"))?, header_proofs: header_proofs.ok_or(DecoderError::Custom("No header proofs cost specified"))?,
transaction_proof: transaction_proof.ok_or(DecoderError::Custom("No transaction proof gas cost specified"))?,
}) })
} }
} }
/// A buffer-flow manager handles costs, recharge, limits /// Handles costs, recharge, limits of request credits.
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub struct FlowParams { pub struct FlowParams {
costs: CostTable, costs: CostTable,
@ -175,7 +181,7 @@ pub struct FlowParams {
impl FlowParams { impl FlowParams {
/// Create new flow parameters from a request cost table, /// Create new flow parameters from a request cost table,
/// buffer limit, and (minimum) rate of recharge. /// credit limit, and (minimum) rate of recharge.
pub fn new(limit: U256, costs: CostTable, recharge: U256) -> Self { pub fn new(limit: U256, costs: CostTable, recharge: U256) -> Self {
FlowParams { FlowParams {
costs: costs, costs: costs,
@ -197,11 +203,12 @@ impl FlowParams {
state_proofs: free_cost.clone(), state_proofs: free_cost.clone(),
contract_codes: free_cost.clone(), contract_codes: free_cost.clone(),
header_proofs: free_cost.clone(), header_proofs: free_cost.clone(),
transaction_proof: free_cost,
} }
} }
} }
/// Get a reference to the buffer limit. /// Get a reference to the credit limit.
pub fn limit(&self) -> &U256 { &self.limit } pub fn limit(&self) -> &U256 { &self.limit }
/// Get a reference to the cost table. /// Get a reference to the cost table.
@ -220,6 +227,7 @@ impl FlowParams {
request::Kind::StateProofs => &self.costs.state_proofs, request::Kind::StateProofs => &self.costs.state_proofs,
request::Kind::Codes => &self.costs.contract_codes, request::Kind::Codes => &self.costs.contract_codes,
request::Kind::HeaderProofs => &self.costs.header_proofs, request::Kind::HeaderProofs => &self.costs.header_proofs,
request::Kind::TransactionProof => &self.costs.transaction_proof,
}; };
let amount: U256 = amount.into(); let amount: U256 = amount.into();
@ -227,10 +235,10 @@ impl FlowParams {
} }
/// Compute the maximum number of costs of a specific kind which can be made /// Compute the maximum number of costs of a specific kind which can be made
/// with the given buffer. /// with the given amount of credits
/// Saturates at `usize::max()`. This is not a problem in practice because /// Saturates at `usize::max()`. This is not a problem in practice because
/// this amount of requests is already prohibitively large. /// this amount of requests is already prohibitively large.
pub fn max_amount(&self, buffer: &Buffer, kind: request::Kind) -> usize { pub fn max_amount(&self, credits: &Credits, kind: request::Kind) -> usize {
use util::Uint; use util::Uint;
use std::usize; use std::usize;
@ -241,9 +249,10 @@ impl FlowParams {
request::Kind::StateProofs => &self.costs.state_proofs, request::Kind::StateProofs => &self.costs.state_proofs,
request::Kind::Codes => &self.costs.contract_codes, request::Kind::Codes => &self.costs.contract_codes,
request::Kind::HeaderProofs => &self.costs.header_proofs, request::Kind::HeaderProofs => &self.costs.header_proofs,
request::Kind::TransactionProof => &self.costs.transaction_proof,
}; };
let start = buffer.current(); let start = credits.current();
if start <= cost.0 { if start <= cost.0 {
return 0; return 0;
@ -259,36 +268,36 @@ impl FlowParams {
} }
} }
/// Create initial buffer parameter. /// Create initial credits..
pub fn create_buffer(&self) -> Buffer { pub fn create_credits(&self) -> Credits {
Buffer { Credits {
estimate: self.limit, estimate: self.limit,
recharge_point: SteadyTime::now(), recharge_point: SteadyTime::now(),
} }
} }
/// Recharge the buffer based on time passed since last /// Recharge the given credits based on time passed since last
/// update. /// update.
pub fn recharge(&self, buf: &mut Buffer) { pub fn recharge(&self, credits: &mut Credits) {
let now = SteadyTime::now(); let now = SteadyTime::now();
// recompute and update only in terms of full seconds elapsed // recompute and update only in terms of full seconds elapsed
// in order to keep the estimate as an underestimate. // in order to keep the estimate as an underestimate.
let elapsed = (now - buf.recharge_point).num_seconds(); let elapsed = (now - credits.recharge_point).num_seconds();
buf.recharge_point = buf.recharge_point + Duration::seconds(elapsed); credits.recharge_point = credits.recharge_point + Duration::seconds(elapsed);
let elapsed: U256 = elapsed.into(); let elapsed: U256 = elapsed.into();
buf.estimate = ::std::cmp::min(self.limit, buf.estimate + (elapsed * self.recharge)); credits.estimate = ::std::cmp::min(self.limit, credits.estimate + (elapsed * self.recharge));
} }
/// Refund some buffer which was previously deducted. /// Refund some credits which were previously deducted.
/// Does not update the recharge timestamp. /// Does not update the recharge timestamp.
pub fn refund(&self, buf: &mut Buffer, refund_amount: U256) { pub fn refund(&self, credits: &mut Credits, refund_amount: U256) {
buf.estimate = buf.estimate + refund_amount; credits.estimate = credits.estimate + refund_amount;
if buf.estimate > self.limit { if credits.estimate > self.limit {
buf.estimate = self.limit credits.estimate = self.limit
} }
} }
} }
@ -318,20 +327,20 @@ mod tests {
} }
#[test] #[test]
fn buffer_mechanism() { fn credits_mechanism() {
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
let flow_params = FlowParams::new(100.into(), Default::default(), 20.into()); let flow_params = FlowParams::new(100.into(), Default::default(), 20.into());
let mut buffer = flow_params.create_buffer(); let mut credits = flow_params.create_credits();
assert!(buffer.deduct_cost(101.into()).is_err()); assert!(credits.deduct_cost(101.into()).is_err());
assert!(buffer.deduct_cost(10.into()).is_ok()); assert!(credits.deduct_cost(10.into()).is_ok());
thread::sleep(Duration::from_secs(1)); thread::sleep(Duration::from_secs(1));
flow_params.recharge(&mut buffer); flow_params.recharge(&mut credits);
assert_eq!(buffer.estimate, 100.into()); assert_eq!(credits.estimate, 100.into());
} }
} }

View File

@ -101,6 +101,7 @@ impl RequestSet {
request::Kind::StateProofs => timeout::PROOFS, request::Kind::StateProofs => timeout::PROOFS,
request::Kind::Codes => timeout::CONTRACT_CODES, request::Kind::Codes => timeout::CONTRACT_CODES,
request::Kind::HeaderProofs => timeout::HEADER_PROOFS, request::Kind::HeaderProofs => timeout::HEADER_PROOFS,
request::Kind::TransactionProof => timeout::TRANSACTION_PROOF,
}; };
base + Duration::milliseconds(kind_timeout) <= now base + Duration::milliseconds(kind_timeout) <= now

View File

@ -19,7 +19,7 @@
use rlp::{DecoderError, RlpDecodable, RlpEncodable, RlpStream, Stream, UntrustedRlp, View}; use rlp::{DecoderError, RlpDecodable, RlpEncodable, RlpStream, Stream, UntrustedRlp, View};
use util::{H256, U256}; use util::{H256, U256};
use super::buffer_flow::FlowParams; use super::request_credits::FlowParams;
// recognized handshake/announcement keys. // recognized handshake/announcement keys.
// unknown keys are to be skipped, known keys have a defined order. // unknown keys are to be skipped, known keys have a defined order.
@ -207,7 +207,7 @@ impl Capabilities {
/// Attempt to parse a handshake message into its three parts: /// Attempt to parse a handshake message into its three parts:
/// - chain status /// - chain status
/// - serving capabilities /// - serving capabilities
/// - buffer flow parameters /// - request credit parameters
pub fn parse_handshake(rlp: UntrustedRlp) -> Result<(Status, Capabilities, Option<FlowParams>), DecoderError> { pub fn parse_handshake(rlp: UntrustedRlp) -> Result<(Status, Capabilities, Option<FlowParams>), DecoderError> {
let mut parser = Parser { let mut parser = Parser {
pos: 0, pos: 0,
@ -300,7 +300,7 @@ pub struct Announcement {
pub serve_chain_since: Option<u64>, pub serve_chain_since: Option<u64>,
/// optional new transaction-relay capability. false means "no change" /// optional new transaction-relay capability. false means "no change"
pub tx_relay: bool, pub tx_relay: bool,
// TODO: changes in buffer flow? // TODO: changes in request credits.
} }
/// Parse an announcement. /// Parse an announcement.
@ -372,7 +372,7 @@ pub fn write_announcement(announcement: &Announcement) -> Vec<u8> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use super::super::buffer_flow::FlowParams; use super::super::request_credits::FlowParams;
use util::{U256, H256, FixedHash}; use util::{U256, H256, FixedHash};
use rlp::{RlpStream, Stream ,UntrustedRlp, View}; use rlp::{RlpStream, Stream ,UntrustedRlp, View};

View File

@ -20,11 +20,11 @@
use ethcore::blockchain_info::BlockChainInfo; use ethcore::blockchain_info::BlockChainInfo;
use ethcore::client::{EachBlockWith, TestBlockChainClient}; use ethcore::client::{EachBlockWith, TestBlockChainClient};
use ethcore::ids::BlockId; use ethcore::ids::BlockId;
use ethcore::transaction::PendingTransaction; use ethcore::transaction::{Action, PendingTransaction};
use ethcore::encoded; use ethcore::encoded;
use network::{PeerId, NodeId}; use network::{PeerId, NodeId};
use net::buffer_flow::FlowParams; use net::request_credits::FlowParams;
use net::context::IoContext; use net::context::IoContext;
use net::status::{Capabilities, Status, write_handshake}; use net::status::{Capabilities, Status, write_handshake};
use net::{encode_request, LightProtocol, Params, packet, Peer}; use net::{encode_request, LightProtocol, Params, packet, Peer};
@ -32,7 +32,7 @@ use provider::Provider;
use request::{self, Request, Headers}; use request::{self, Request, Headers};
use rlp::*; use rlp::*;
use util::{Bytes, H256, U256}; use util::{Address, Bytes, DBValue, H256, U256};
use std::sync::Arc; use std::sync::Arc;
@ -127,6 +127,10 @@ impl Provider for TestProvider {
None None
} }
fn transaction_proof(&self, _req: request::TransactionProof) -> Option<Vec<DBValue>> {
None
}
fn ready_transactions(&self) -> Vec<PendingTransaction> { fn ready_transactions(&self) -> Vec<PendingTransaction> {
self.0.client.ready_transactions() self.0.client.ready_transactions()
} }
@ -203,7 +207,7 @@ fn genesis_mismatch() {
} }
#[test] #[test]
fn buffer_overflow() { fn credit_overflow() {
let flow_params = make_flow_params(); let flow_params = make_flow_params();
let capabilities = capabilities(); let capabilities = capabilities();
@ -268,11 +272,11 @@ fn get_block_headers() {
let headers: Vec<_> = (0..10).map(|i| provider.client.block_header(BlockId::Number(i + 1)).unwrap()).collect(); let headers: Vec<_> = (0..10).map(|i| provider.client.block_header(BlockId::Number(i + 1)).unwrap()).collect();
assert_eq!(headers.len(), 10); assert_eq!(headers.len(), 10);
let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Headers, 10); let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Headers, 10);
let mut response_stream = RlpStream::new_list(3); let mut response_stream = RlpStream::new_list(3);
response_stream.append(&req_id).append(&new_buf).begin_list(10); response_stream.append(&req_id).append(&new_creds).begin_list(10);
for header in headers { for header in headers {
response_stream.append_raw(&header.into_inner(), 1); response_stream.append_raw(&header.into_inner(), 1);
} }
@ -317,11 +321,11 @@ fn get_block_bodies() {
let bodies: Vec<_> = (0..10).map(|i| provider.client.block_body(BlockId::Number(i + 1)).unwrap()).collect(); let bodies: Vec<_> = (0..10).map(|i| provider.client.block_body(BlockId::Number(i + 1)).unwrap()).collect();
assert_eq!(bodies.len(), 10); assert_eq!(bodies.len(), 10);
let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Bodies, 10); let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Bodies, 10);
let mut response_stream = RlpStream::new_list(3); let mut response_stream = RlpStream::new_list(3);
response_stream.append(&req_id).append(&new_buf).begin_list(10); response_stream.append(&req_id).append(&new_creds).begin_list(10);
for body in bodies { for body in bodies {
response_stream.append_raw(&body.into_inner(), 1); response_stream.append_raw(&body.into_inner(), 1);
} }
@ -371,11 +375,11 @@ fn get_block_receipts() {
.map(|hash| provider.client.block_receipts(hash).unwrap()) .map(|hash| provider.client.block_receipts(hash).unwrap())
.collect(); .collect();
let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Receipts, receipts.len()); let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Receipts, receipts.len());
let mut response_stream = RlpStream::new_list(3); let mut response_stream = RlpStream::new_list(3);
response_stream.append(&req_id).append(&new_buf).begin_list(receipts.len()); response_stream.append(&req_id).append(&new_creds).begin_list(receipts.len());
for block_receipts in receipts { for block_receipts in receipts {
response_stream.append_raw(&block_receipts, 1); response_stream.append_raw(&block_receipts, 1);
} }
@ -420,11 +424,11 @@ fn get_state_proofs() {
vec![::util::sha3::SHA3_NULL_RLP.to_vec()], vec![::util::sha3::SHA3_NULL_RLP.to_vec()],
]; ];
let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::StateProofs, 2); let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::StateProofs, 2);
let mut response_stream = RlpStream::new_list(3); let mut response_stream = RlpStream::new_list(3);
response_stream.append(&req_id).append(&new_buf).begin_list(2); response_stream.append(&req_id).append(&new_creds).begin_list(2);
for proof in proofs { for proof in proofs {
response_stream.begin_list(proof.len()); response_stream.begin_list(proof.len());
for node in proof { for node in proof {
@ -472,11 +476,11 @@ fn get_contract_code() {
key2.iter().chain(key2.iter()).cloned().collect(), key2.iter().chain(key2.iter()).cloned().collect(),
]; ];
let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Codes, 2); let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Codes, 2);
let mut response_stream = RlpStream::new_list(3); let mut response_stream = RlpStream::new_list(3);
response_stream.append(&req_id).append(&new_buf).begin_list(2); response_stream.append(&req_id).append(&new_creds).begin_list(2);
for code in codes { for code in codes {
response_stream.append(&code); response_stream.append(&code);
} }
@ -488,6 +492,56 @@ fn get_contract_code() {
proto.handle_packet(&expected, &1, packet::GET_CONTRACT_CODES, &request_body); proto.handle_packet(&expected, &1, packet::GET_CONTRACT_CODES, &request_body);
} }
#[test]
fn proof_of_execution() {
let flow_params = FlowParams::new(5_000_000.into(), Default::default(), 0.into());
let capabilities = capabilities();
let (provider, proto) = setup(flow_params.clone(), capabilities.clone());
let cur_status = status(provider.client.chain_info());
{
let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params));
proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body.clone()));
proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &packet_body);
}
let req_id = 112;
let mut request = Request::TransactionProof (request::TransactionProof {
at: H256::default(),
from: Address::default(),
action: Action::Call(Address::default()),
gas: 100.into(),
gas_price: 0.into(),
value: 0.into(),
data: Vec::new(),
});
// first: a valid amount to request execution of.
let request_body = encode_request(&request, req_id);
let response = {
let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::TransactionProof, 100);
let mut response_stream = RlpStream::new_list(3);
response_stream.append(&req_id).append(&new_creds).begin_list(0);
response_stream.out()
};
let expected = Expect::Respond(packet::TRANSACTION_PROOF, response);
proto.handle_packet(&expected, &1, packet::GET_TRANSACTION_PROOF, &request_body);
// next: way too much requested gas.
if let Request::TransactionProof(ref mut req) = request {
req.gas = 100_000_000.into();
}
let req_id = 113;
let request_body = encode_request(&request, req_id);
let expected = Expect::Punish(1);
proto.handle_packet(&expected, &1, packet::GET_TRANSACTION_PROOF, &request_body);
}
#[test] #[test]
fn id_guard() { fn id_guard() {
use super::request_set::RequestSet; use super::request_set::RequestSet;
@ -515,10 +569,10 @@ fn id_guard() {
pending_requests.insert(req_id_2, req, ::time::SteadyTime::now()); pending_requests.insert(req_id_2, req, ::time::SteadyTime::now());
proto.peers.write().insert(peer_id, ::util::Mutex::new(Peer { proto.peers.write().insert(peer_id, ::util::Mutex::new(Peer {
local_buffer: flow_params.create_buffer(), local_credits: flow_params.create_credits(),
status: status(provider.client.chain_info()), status: status(provider.client.chain_info()),
capabilities: capabilities.clone(), capabilities: capabilities.clone(),
remote_flow: Some((flow_params.create_buffer(), flow_params)), remote_flow: Some((flow_params.create_credits(), flow_params)),
sent_head: provider.client.chain_info().best_block_hash, sent_head: provider.client.chain_info().best_block_hash,
last_update: ::time::SteadyTime::now(), last_update: ::time::SteadyTime::now(),
pending_requests: pending_requests, pending_requests: pending_requests,

View File

@ -24,12 +24,14 @@ use std::sync::Arc;
use ethcore::basic_account::BasicAccount; use ethcore::basic_account::BasicAccount;
use ethcore::encoded; use ethcore::encoded;
use ethcore::receipt::Receipt; use ethcore::receipt::Receipt;
use ethcore::state::ProvedExecution;
use ethcore::executed::{Executed, ExecutionError};
use futures::{Async, Poll, Future}; use futures::{Async, Poll, Future};
use futures::sync::oneshot::{self, Sender, Receiver}; use futures::sync::oneshot::{self, Sender, Receiver};
use network::PeerId; use network::PeerId;
use rlp::{RlpStream, Stream}; use rlp::{RlpStream, Stream};
use util::{Bytes, RwLock, Mutex, U256}; use util::{Bytes, DBValue, RwLock, Mutex, U256};
use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP};
use net::{Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; use net::{Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId};
@ -59,6 +61,7 @@ enum Pending {
BlockReceipts(request::BlockReceipts, Sender<Vec<Receipt>>), BlockReceipts(request::BlockReceipts, Sender<Vec<Receipt>>),
Account(request::Account, Sender<BasicAccount>), Account(request::Account, Sender<BasicAccount>),
Code(request::Code, Sender<Bytes>), Code(request::Code, Sender<Bytes>),
TxProof(request::TransactionProof, Sender<Result<Executed, ExecutionError>>),
} }
/// On demand request service. See module docs for more details. /// On demand request service. See module docs for more details.
@ -418,6 +421,50 @@ impl OnDemand {
self.orphaned_requests.write().push(pending) self.orphaned_requests.write().push(pending)
} }
/// Request proof-of-execution for a transaction.
pub fn transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof) -> Receiver<Result<Executed, ExecutionError>> {
let (sender, receiver) = oneshot::channel();
self.dispatch_transaction_proof(ctx, req, sender);
receiver
}
fn dispatch_transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof, sender: Sender<Result<Executed, ExecutionError>>) {
let num = req.header.number();
let les_req = LesRequest::TransactionProof(les_request::TransactionProof {
at: req.header.hash(),
from: req.tx.sender(),
gas: req.tx.gas,
gas_price: req.tx.gas_price,
action: req.tx.action.clone(),
value: req.tx.value,
data: req.tx.data.clone(),
});
let pending = Pending::TxProof(req, sender);
// we're looking for a peer with serveStateSince(num)
for (id, peer) in self.peers.read().iter() {
if peer.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= num) {
match ctx.request_from(*id, les_req.clone()) {
Ok(req_id) => {
trace!(target: "on_demand", "Assigning request to peer {}", id);
self.pending_requests.write().insert(
req_id,
pending
);
return
}
Err(e) =>
trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e),
}
}
}
trace!(target: "on_demand", "No suitable peer for request");
self.orphaned_requests.write().push(pending)
}
// dispatch orphaned requests, and discard those for which the corresponding // dispatch orphaned requests, and discard those for which the corresponding
// receiver has been dropped. // receiver has been dropped.
fn dispatch_orphaned(&self, ctx: &BasicContext) { fn dispatch_orphaned(&self, ctx: &BasicContext) {
@ -468,6 +515,8 @@ impl OnDemand {
if !check_hangup(&mut sender) { self.dispatch_account(ctx, req, sender) }, if !check_hangup(&mut sender) { self.dispatch_account(ctx, req, sender) },
Pending::Code(req, mut sender) => Pending::Code(req, mut sender) =>
if !check_hangup(&mut sender) { self.dispatch_code(ctx, req, sender) }, if !check_hangup(&mut sender) { self.dispatch_code(ctx, req, sender) },
Pending::TxProof(req, mut sender) =>
if !check_hangup(&mut sender) { self.dispatch_transaction_proof(ctx, req, sender) }
} }
} }
} }
@ -690,6 +739,36 @@ impl Handler for OnDemand {
} }
} }
fn on_transaction_proof(&self, ctx: &EventContext, req_id: ReqId, items: &[DBValue]) {
let peer = ctx.peer();
let req = match self.pending_requests.write().remove(&req_id) {
Some(req) => req,
None => return,
};
match req {
Pending::TxProof(req, sender) => {
match req.check_response(items) {
ProvedExecution::Complete(executed) => {
sender.complete(Ok(executed));
return
}
ProvedExecution::Failed(err) => {
sender.complete(Err(err));
return
}
ProvedExecution::BadProof => {
warn!("Error handling response for transaction proof request");
ctx.disable_peer(peer);
}
}
self.dispatch_transaction_proof(ctx.as_basic(), req, sender);
}
_ => panic!("Only transaction proof request dispatches transaction proof requests; qed"),
}
}
fn tick(&self, ctx: &BasicContext) { fn tick(&self, ctx: &BasicContext) {
self.dispatch_orphaned(ctx) self.dispatch_orphaned(ctx)
} }

View File

@ -16,12 +16,18 @@
//! Request types, verification, and verification errors. //! Request types, verification, and verification errors.
use std::sync::Arc;
use ethcore::basic_account::BasicAccount; use ethcore::basic_account::BasicAccount;
use ethcore::encoded; use ethcore::encoded;
use ethcore::engines::Engine;
use ethcore::env_info::EnvInfo;
use ethcore::receipt::Receipt; use ethcore::receipt::Receipt;
use ethcore::state::{self, ProvedExecution};
use ethcore::transaction::SignedTransaction;
use rlp::{RlpStream, Stream, UntrustedRlp, View}; use rlp::{RlpStream, Stream, UntrustedRlp, View};
use util::{Address, Bytes, HashDB, H256, U256}; use util::{Address, Bytes, DBValue, HashDB, H256, U256};
use util::memorydb::MemoryDB; use util::memorydb::MemoryDB;
use util::sha3::Hashable; use util::sha3::Hashable;
use util::trie::{Trie, TrieDB, TrieError}; use util::trie::{Trie, TrieDB, TrieError};
@ -231,6 +237,33 @@ impl Code {
} }
} }
/// Request for transaction execution, along with the parts necessary to verify the proof.
pub struct TransactionProof {
/// The transaction to request proof of.
pub tx: SignedTransaction,
/// Block header.
pub header: encoded::Header,
/// Transaction environment info.
pub env_info: EnvInfo,
/// Consensus engine.
pub engine: Arc<Engine>,
}
impl TransactionProof {
/// Check the proof, returning the proved execution or indicate that the proof was bad.
pub fn check_response(&self, state_items: &[DBValue]) -> ProvedExecution {
let root = self.header.state_root();
state::check_proof(
state_items,
root,
&self.tx,
&*self.engine,
&self.env_info,
)
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -24,7 +24,7 @@ use ethcore::client::{BlockChainClient, ProvingBlockChainClient};
use ethcore::transaction::PendingTransaction; use ethcore::transaction::PendingTransaction;
use ethcore::ids::BlockId; use ethcore::ids::BlockId;
use ethcore::encoded; use ethcore::encoded;
use util::{Bytes, RwLock, H256}; use util::{Bytes, DBValue, RwLock, H256};
use cht::{self, BlockInfo}; use cht::{self, BlockInfo};
use client::{LightChainClient, AsLightClient}; use client::{LightChainClient, AsLightClient};
@ -193,6 +193,10 @@ pub trait Provider: Send + Sync {
/// Provide pending transactions. /// Provide pending transactions.
fn ready_transactions(&self) -> Vec<PendingTransaction>; fn ready_transactions(&self) -> Vec<PendingTransaction>;
/// Provide a proof-of-execution for the given transaction proof request.
/// Returns a vector of all state items necessary to execute the transaction.
fn transaction_proof(&self, req: request::TransactionProof) -> Option<Vec<DBValue>>;
} }
// Implementation of a light client data provider for a client. // Implementation of a light client data provider for a client.
@ -283,6 +287,26 @@ impl<T: ProvingBlockChainClient + ?Sized> Provider for T {
} }
} }
fn transaction_proof(&self, req: request::TransactionProof) -> Option<Vec<DBValue>> {
use ethcore::transaction::Transaction;
let id = BlockId::Hash(req.at);
let nonce = match self.nonce(&req.from, id.clone()) {
Some(nonce) => nonce,
None => return None,
};
let transaction = Transaction {
nonce: nonce,
gas: req.gas,
gas_price: req.gas_price,
action: req.action,
value: req.value,
data: req.data,
}.fake_sign(req.from);
self.prove_transaction(transaction, id)
}
fn ready_transactions(&self) -> Vec<PendingTransaction> { fn ready_transactions(&self) -> Vec<PendingTransaction> {
BlockChainClient::ready_transactions(self) BlockChainClient::ready_transactions(self)
} }
@ -343,6 +367,10 @@ impl<L: AsLightClient + Send + Sync> Provider for LightProvider<L> {
None None
} }
fn transaction_proof(&self, _req: request::TransactionProof) -> Option<Vec<DBValue>> {
None
}
fn ready_transactions(&self) -> Vec<PendingTransaction> { fn ready_transactions(&self) -> Vec<PendingTransaction> {
let chain_info = self.chain_info(); let chain_info = self.chain_info();
self.txqueue.read().ready_transactions(chain_info.best_block_number, chain_info.best_block_timestamp) self.txqueue.read().ready_transactions(chain_info.best_block_number, chain_info.best_block_timestamp)

View File

@ -16,7 +16,8 @@
//! LES request types. //! LES request types.
use util::H256; use ethcore::transaction::Action;
use util::{Address, H256, U256, Uint};
/// Either a hash or a number. /// Either a hash or a number.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
@ -134,6 +135,26 @@ pub struct HeaderProofs {
pub requests: Vec<HeaderProof>, pub requests: Vec<HeaderProof>,
} }
/// A request for proof of (simulated) transaction execution.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "ipc", binary)]
pub struct TransactionProof {
/// Block hash to request for.
pub at: H256,
/// Address to treat as the caller.
pub from: Address,
/// Action to take: either a call or a create.
pub action: Action,
/// Amount of gas to request proof-of-execution for.
pub gas: U256,
/// Price for each gas.
pub gas_price: U256,
/// Value to simulate sending.
pub value: U256,
/// Transaction data.
pub data: Vec<u8>,
}
/// Kinds of requests. /// Kinds of requests.
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "ipc", binary)] #[cfg_attr(feature = "ipc", binary)]
@ -150,6 +171,8 @@ pub enum Kind {
Codes, Codes,
/// Requesting header proofs (from the CHT). /// Requesting header proofs (from the CHT).
HeaderProofs, HeaderProofs,
/// Requesting proof of transaction execution.
TransactionProof,
} }
/// Encompasses all possible types of requests in a single structure. /// Encompasses all possible types of requests in a single structure.
@ -168,6 +191,8 @@ pub enum Request {
Codes(ContractCodes), Codes(ContractCodes),
/// Requesting header proofs. /// Requesting header proofs.
HeaderProofs(HeaderProofs), HeaderProofs(HeaderProofs),
/// Requesting proof of transaction execution.
TransactionProof(TransactionProof),
} }
impl Request { impl Request {
@ -180,10 +205,12 @@ impl Request {
Request::StateProofs(_) => Kind::StateProofs, Request::StateProofs(_) => Kind::StateProofs,
Request::Codes(_) => Kind::Codes, Request::Codes(_) => Kind::Codes,
Request::HeaderProofs(_) => Kind::HeaderProofs, Request::HeaderProofs(_) => Kind::HeaderProofs,
Request::TransactionProof(_) => Kind::TransactionProof,
} }
} }
/// Get the amount of requests being made. /// Get the amount of requests being made.
/// In the case of `TransactionProof`, this is the amount of gas being requested.
pub fn amount(&self) -> usize { pub fn amount(&self) -> usize {
match *self { match *self {
Request::Headers(ref req) => req.max, Request::Headers(ref req) => req.max,
@ -192,6 +219,10 @@ impl Request {
Request::StateProofs(ref req) => req.requests.len(), Request::StateProofs(ref req) => req.requests.len(),
Request::Codes(ref req) => req.code_requests.len(), Request::Codes(ref req) => req.code_requests.len(),
Request::HeaderProofs(ref req) => req.requests.len(), Request::HeaderProofs(ref req) => req.requests.len(),
Request::TransactionProof(ref req) => match req.gas > usize::max_value().into() {
true => usize::max_value(),
false => req.gas.low_u64() as usize,
}
} }
} }
} }

View File

@ -24,7 +24,7 @@ use time::precise_time_ns;
// util // util
use util::{Bytes, PerfTimer, Itertools, Mutex, RwLock, MutexGuard, Hashable}; use util::{Bytes, PerfTimer, Itertools, Mutex, RwLock, MutexGuard, Hashable};
use util::{journaldb, TrieFactory, Trie}; use util::{journaldb, DBValue, TrieFactory, Trie};
use util::{U256, H256, Address, H2048, Uint, FixedHash}; use util::{U256, H256, Address, H2048, Uint, FixedHash};
use util::trie::TrieSpec; use util::trie::TrieSpec;
use util::kvdb::*; use util::kvdb::*;
@ -34,7 +34,7 @@ use io::*;
use views::BlockView; use views::BlockView;
use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError}; use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError};
use header::BlockNumber; use header::BlockNumber;
use state::{State, CleanupMode}; use state::{self, State, CleanupMode};
use spec::Spec; use spec::Spec;
use basic_types::Seal; use basic_types::Seal;
use engines::Engine; use engines::Engine;
@ -308,18 +308,24 @@ impl Client {
} }
/// The env info as of the best block. /// The env info as of the best block.
fn latest_env_info(&self) -> EnvInfo { pub fn latest_env_info(&self) -> EnvInfo {
let header = self.best_block_header(); self.env_info(BlockId::Latest).expect("Best block header always stored; qed")
}
/// The env info as of a given block.
/// returns `None` if the block unknown.
pub fn env_info(&self, id: BlockId) -> Option<EnvInfo> {
self.block_header(id).map(|header| {
EnvInfo { EnvInfo {
number: header.number(), number: header.number(),
author: header.author(), author: header.author(),
timestamp: header.timestamp(), timestamp: header.timestamp(),
difficulty: header.difficulty(), difficulty: header.difficulty(),
last_hashes: self.build_last_hashes(header.hash()), last_hashes: self.build_last_hashes(header.parent_hash()),
gas_used: U256::default(), gas_used: U256::default(),
gas_limit: header.gas_limit(), gas_limit: header.gas_limit(),
} }
})
} }
fn build_last_hashes(&self, parent_hash: H256) -> Arc<LastHashes> { fn build_last_hashes(&self, parent_hash: H256) -> Arc<LastHashes> {
@ -874,17 +880,9 @@ impl snapshot::DatabaseRestore for Client {
impl BlockChainClient for Client { impl BlockChainClient for Client {
fn call(&self, t: &SignedTransaction, block: BlockId, analytics: CallAnalytics) -> Result<Executed, CallError> { fn call(&self, t: &SignedTransaction, block: BlockId, analytics: CallAnalytics) -> Result<Executed, CallError> {
let header = self.block_header(block).ok_or(CallError::StatePruned)?; let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?;
let last_hashes = self.build_last_hashes(header.parent_hash()); env_info.gas_limit = U256::max_value();
let env_info = EnvInfo {
number: header.number(),
author: header.author(),
timestamp: header.timestamp(),
difficulty: header.difficulty(),
last_hashes: last_hashes,
gas_used: U256::zero(),
gas_limit: U256::max_value(),
};
// that's just a copy of the state. // that's just a copy of the state.
let mut state = self.state_at(block).ok_or(CallError::StatePruned)?; let mut state = self.state_at(block).ok_or(CallError::StatePruned)?;
let original_state = if analytics.state_diffing { Some(state.clone()) } else { None }; let original_state = if analytics.state_diffing { Some(state.clone()) } else { None };
@ -910,17 +908,13 @@ impl BlockChainClient for Client {
fn estimate_gas(&self, t: &SignedTransaction, block: BlockId) -> Result<U256, CallError> { fn estimate_gas(&self, t: &SignedTransaction, block: BlockId) -> Result<U256, CallError> {
const UPPER_CEILING: u64 = 1_000_000_000_000u64; const UPPER_CEILING: u64 = 1_000_000_000_000u64;
let header = self.block_header(block).ok_or(CallError::StatePruned)?; let (mut upper, env_info) = {
let last_hashes = self.build_last_hashes(header.parent_hash()); let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?;
let env_info = EnvInfo { let initial_upper = env_info.gas_limit;
number: header.number(), env_info.gas_limit = UPPER_CEILING.into();
author: header.author(), (initial_upper, env_info)
timestamp: header.timestamp(),
difficulty: header.difficulty(),
last_hashes: last_hashes,
gas_used: U256::zero(),
gas_limit: UPPER_CEILING.into(),
}; };
// that's just a copy of the state. // that's just a copy of the state.
let original_state = self.state_at(block).ok_or(CallError::StatePruned)?; let original_state = self.state_at(block).ok_or(CallError::StatePruned)?;
let sender = t.sender(); let sender = t.sender();
@ -946,7 +940,6 @@ impl BlockChainClient for Client {
.unwrap_or(false)) .unwrap_or(false))
}; };
let mut upper = header.gas_limit();
if !cond(upper)? { if !cond(upper)? {
// impossible at block gas limit - try `UPPER_CEILING` instead. // impossible at block gas limit - try `UPPER_CEILING` instead.
// TODO: consider raising limit by powers of two. // TODO: consider raising limit by powers of two.
@ -989,7 +982,7 @@ impl BlockChainClient for Client {
fn replay(&self, id: TransactionId, analytics: CallAnalytics) -> Result<Executed, CallError> { fn replay(&self, id: TransactionId, analytics: CallAnalytics) -> Result<Executed, CallError> {
let address = self.transaction_address(id).ok_or(CallError::TransactionNotFound)?; let address = self.transaction_address(id).ok_or(CallError::TransactionNotFound)?;
let header = self.block_header(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?; let mut env_info = self.env_info(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?;
let body = self.block_body(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?; let body = self.block_body(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?;
let mut state = self.state_at_beginning(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?; let mut state = self.state_at_beginning(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?;
let mut txs = body.transactions(); let mut txs = body.transactions();
@ -999,16 +992,6 @@ impl BlockChainClient for Client {
} }
let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false };
let last_hashes = self.build_last_hashes(header.hash());
let mut env_info = EnvInfo {
number: header.number(),
author: header.author(),
timestamp: header.timestamp(),
difficulty: header.difficulty(),
last_hashes: last_hashes,
gas_used: U256::default(),
gas_limit: header.gas_limit(),
};
const PROOF: &'static str = "Transactions fetched from blockchain; blockchain transactions are valid; qed"; const PROOF: &'static str = "Transactions fetched from blockchain; blockchain transactions are valid; qed";
let rest = txs.split_off(address.index); let rest = txs.split_off(address.index);
for t in txs { for t in txs {
@ -1620,6 +1603,25 @@ impl ::client::ProvingBlockChainClient for Client {
.and_then(|x| x) .and_then(|x| x)
.unwrap_or_else(Vec::new) .unwrap_or_else(Vec::new)
} }
fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<Vec<DBValue>> {
let (state, env_info) = match (self.state_at(id), self.env_info(id)) {
(Some(s), Some(e)) => (s, e),
_ => return None,
};
let mut jdb = self.state_db.lock().journal_db().boxed_clone();
let backend = state::backend::Proving::new(jdb.as_hashdb_mut());
let mut state = state.replace_backend(backend);
let options = TransactOptions { tracing: false, vm_tracing: false, check_nonce: false };
let res = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(&transaction, options);
match res {
Err(ExecutionError::Internal(_)) => return None,
_ => return Some(state.drop().1.extract_proof()),
}
}
} }
impl Drop for Client { impl Drop for Client {

View File

@ -765,6 +765,10 @@ impl ProvingBlockChainClient for TestBlockChainClient {
fn code_by_hash(&self, _: H256, _: BlockId) -> Bytes { fn code_by_hash(&self, _: H256, _: BlockId) -> Bytes {
Vec::new() Vec::new()
} }
fn prove_transaction(&self, _: SignedTransaction, _: BlockId) -> Option<Vec<DBValue>> {
None
}
} }
impl EngineClient for TestBlockChainClient { impl EngineClient for TestBlockChainClient {

View File

@ -16,6 +16,7 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use util::{U256, Address, H256, H2048, Bytes, Itertools}; use util::{U256, Address, H256, H2048, Bytes, Itertools};
use util::hashdb::DBValue;
use blockchain::TreeRoute; use blockchain::TreeRoute;
use verification::queue::QueueInfo as BlockQueueInfo; use verification::queue::QueueInfo as BlockQueueInfo;
use block::{OpenBlock, SealedBlock}; use block::{OpenBlock, SealedBlock};
@ -321,4 +322,7 @@ pub trait ProvingBlockChainClient: BlockChainClient {
/// Get code by address hash. /// Get code by address hash.
fn code_by_hash(&self, account_key: H256, id: BlockId) -> Bytes; fn code_by_hash(&self, account_key: H256, id: BlockId) -> Bytes;
/// Prove execution of a transaction at the given block.
fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<Vec<DBValue>>;
} }

View File

@ -14,6 +14,8 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Environment information for transaction execution.
use std::cmp; use std::cmp;
use std::sync::Arc; use std::sync::Arc;
use util::{U256, Address, H256, Hashable}; use util::{U256, Address, H256, Hashable};
@ -25,7 +27,7 @@ use ethjson;
pub type LastHashes = Vec<H256>; pub type LastHashes = Vec<H256>;
/// Information concerning the execution environment for a message-call/contract-creation. /// Information concerning the execution environment for a message-call/contract-creation.
#[derive(Debug)] #[derive(Debug, Clone)]
pub struct EnvInfo { pub struct EnvInfo {
/// The block number. /// The block number.
pub number: BlockNumber, pub number: BlockNumber,

View File

@ -79,7 +79,6 @@
//! cargo build --release //! cargo build --release
//! ``` //! ```
extern crate ethcore_io as io; extern crate ethcore_io as io;
extern crate rustc_serialize; extern crate rustc_serialize;
extern crate crypto; extern crate crypto;
@ -141,12 +140,12 @@ pub mod action_params;
pub mod db; pub mod db;
pub mod verification; pub mod verification;
pub mod state; pub mod state;
pub mod env_info;
#[macro_use] pub mod evm; #[macro_use] pub mod evm;
mod cache_manager; mod cache_manager;
mod blooms; mod blooms;
mod basic_types; mod basic_types;
mod env_info;
mod pod_account; mod pod_account;
mod state_db; mod state_db;
mod account_db; mod account_db;

View File

@ -21,10 +21,12 @@
//! should become general over time to the point where not even a //! should become general over time to the point where not even a
//! merkle trie is strictly necessary. //! merkle trie is strictly necessary.
use std::collections::{HashSet, HashMap};
use std::sync::Arc; use std::sync::Arc;
use state::Account; use state::Account;
use util::{Address, AsHashDB, HashDB, H256}; use util::{Address, MemoryDB, Mutex, H256};
use util::hashdb::{AsHashDB, HashDB, DBValue};
/// State backend. See module docs for more details. /// State backend. See module docs for more details.
pub trait Backend: Send { pub trait Backend: Send {
@ -64,21 +66,48 @@ pub trait Backend: Send {
fn is_known_null(&self, address: &Address) -> bool; fn is_known_null(&self, address: &Address) -> bool;
} }
/// A raw backend which simply wraps a hashdb and does no caching. /// A raw backend used to check proofs of execution.
#[derive(Debug, Clone, PartialEq, Eq)] ///
pub struct NoCache<T>(T); /// This doesn't delete anything since execution proofs won't have mangled keys
/// and we want to avoid collisions.
// TODO: when account lookup moved into backends, this won't rely as tenuously on intended
// usage.
#[derive(Clone, PartialEq)]
pub struct ProofCheck(MemoryDB);
impl<T> NoCache<T> { impl ProofCheck {
/// Create a new `NoCache` backend. /// Create a new `ProofCheck` backend from the given state items.
pub fn new(inner: T) -> Self { NoCache(inner) } pub fn new(proof: &[DBValue]) -> Self {
let mut db = MemoryDB::new();
/// Consume the backend, yielding the inner database. for item in proof { db.insert(item); }
pub fn into_inner(self) -> T { self.0 } ProofCheck(db)
}
} }
impl<T: AsHashDB + Send> Backend for NoCache<T> { impl HashDB for ProofCheck {
fn as_hashdb(&self) -> &HashDB { self.0.as_hashdb() } fn keys(&self) -> HashMap<H256, i32> { self.0.keys() }
fn as_hashdb_mut(&mut self) -> &mut HashDB { self.0.as_hashdb_mut() } fn get(&self, key: &H256) -> Option<DBValue> {
self.0.get(key)
}
fn contains(&self, key: &H256) -> bool {
self.0.contains(key)
}
fn insert(&mut self, value: &[u8]) -> H256 {
self.0.insert(value)
}
fn emplace(&mut self, key: H256, value: DBValue) {
self.0.emplace(key, value)
}
fn remove(&mut self, _key: &H256) { }
}
impl Backend for ProofCheck {
fn as_hashdb(&self) -> &HashDB { self }
fn as_hashdb_mut(&mut self) -> &mut HashDB { self }
fn add_to_account_cache(&mut self, _addr: Address, _data: Option<Account>, _modified: bool) {} fn add_to_account_cache(&mut self, _addr: Address, _data: Option<Account>, _modified: bool) {}
fn cache_code(&self, _hash: H256, _code: Arc<Vec<u8>>) {} fn cache_code(&self, _hash: H256, _code: Arc<Vec<u8>>) {}
fn get_cached_account(&self, _addr: &Address) -> Option<Option<Account>> { None } fn get_cached_account(&self, _addr: &Address) -> Option<Option<Account>> { None }
@ -91,3 +120,104 @@ impl<T: AsHashDB + Send> Backend for NoCache<T> {
fn note_non_null_account(&self, _address: &Address) {} fn note_non_null_account(&self, _address: &Address) {}
fn is_known_null(&self, _address: &Address) -> bool { false } fn is_known_null(&self, _address: &Address) -> bool { false }
} }
/// Proving state backend.
/// This keeps track of all state values loaded during usage of this backend.
/// The proof-of-execution can be extracted with `extract_proof`.
///
/// This doesn't cache anything or rely on the canonical state caches.
pub struct Proving<H: AsHashDB> {
base: H, // state we're proving values from.
changed: MemoryDB, // changed state via insertions.
proof: Mutex<HashSet<DBValue>>,
}
impl<H: AsHashDB + Send + Sync> HashDB for Proving<H> {
fn keys(&self) -> HashMap<H256, i32> {
let mut keys = self.base.as_hashdb().keys();
keys.extend(self.changed.keys());
keys
}
fn get(&self, key: &H256) -> Option<DBValue> {
match self.base.as_hashdb().get(key) {
Some(val) => {
self.proof.lock().insert(val.clone());
Some(val)
}
None => self.changed.get(key)
}
}
fn contains(&self, key: &H256) -> bool {
self.get(key).is_some()
}
fn insert(&mut self, value: &[u8]) -> H256 {
self.changed.insert(value)
}
fn emplace(&mut self, key: H256, value: DBValue) {
self.changed.emplace(key, value)
}
fn remove(&mut self, key: &H256) {
// only remove from `changed`
if self.changed.contains(key) {
self.changed.remove(key)
}
}
}
impl<H: AsHashDB + Send + Sync> Backend for Proving<H> {
fn as_hashdb(&self) -> &HashDB {
self
}
fn as_hashdb_mut(&mut self) -> &mut HashDB {
self
}
fn add_to_account_cache(&mut self, _: Address, _: Option<Account>, _: bool) { }
fn cache_code(&self, _: H256, _: Arc<Vec<u8>>) { }
fn get_cached_account(&self, _: &Address) -> Option<Option<Account>> { None }
fn get_cached<F, U>(&self, _: &Address, _: F) -> Option<U>
where F: FnOnce(Option<&mut Account>) -> U
{
None
}
fn get_cached_code(&self, _: &H256) -> Option<Arc<Vec<u8>>> { None }
fn note_non_null_account(&self, _: &Address) { }
fn is_known_null(&self, _: &Address) -> bool { false }
}
impl<H: AsHashDB> Proving<H> {
/// Create a new `Proving` over a base database.
/// This will store all values ever fetched from that base.
pub fn new(base: H) -> Self {
Proving {
base: base,
changed: MemoryDB::new(),
proof: Mutex::new(HashSet::new()),
}
}
/// Consume the backend, extracting the gathered proof.
pub fn extract_proof(self) -> Vec<DBValue> {
self.proof.into_inner().into_iter().collect()
}
}
impl<H: AsHashDB + Clone> Clone for Proving<H> {
fn clone(&self) -> Self {
Proving {
base: self.base.clone(),
changed: self.changed.clone(),
proof: Mutex::new(self.proof.lock().clone()),
}
}
}

View File

@ -31,6 +31,7 @@ use factory::Factories;
use trace::FlatTrace; use trace::FlatTrace;
use pod_account::*; use pod_account::*;
use pod_state::{self, PodState}; use pod_state::{self, PodState};
use types::executed::{Executed, ExecutionError};
use types::state_diff::StateDiff; use types::state_diff::StateDiff;
use transaction::SignedTransaction; use transaction::SignedTransaction;
use state_db::StateDB; use state_db::StateDB;
@ -60,6 +61,17 @@ pub struct ApplyOutcome {
/// Result type for the execution ("application") of a transaction. /// Result type for the execution ("application") of a transaction.
pub type ApplyResult = Result<ApplyOutcome, Error>; pub type ApplyResult = Result<ApplyOutcome, Error>;
/// Return type of proof validity check.
#[derive(Debug, Clone)]
pub enum ProvedExecution {
/// Proof wasn't enough to complete execution.
BadProof,
/// The transaction failed, but not due to a bad proof.
Failed(ExecutionError),
/// The transaction successfully completd with the given proof.
Complete(Executed),
}
#[derive(Eq, PartialEq, Clone, Copy, Debug)] #[derive(Eq, PartialEq, Clone, Copy, Debug)]
/// Account modification state. Used to check if the account was /// Account modification state. Used to check if the account was
/// Modified in between commits and overall. /// Modified in between commits and overall.
@ -150,6 +162,39 @@ impl AccountEntry {
} }
} }
/// Check the given proof of execution.
/// `Err(ExecutionError::Internal)` indicates failure, everything else indicates
/// a successful proof (as the transaction itself may be poorly chosen).
pub fn check_proof(
proof: &[::util::DBValue],
root: H256,
transaction: &SignedTransaction,
engine: &Engine,
env_info: &EnvInfo,
) -> ProvedExecution {
let backend = self::backend::ProofCheck::new(proof);
let mut factories = Factories::default();
factories.accountdb = ::account_db::Factory::Plain;
let res = State::from_existing(
backend,
root,
engine.account_start_nonce(),
factories
);
let mut state = match res {
Ok(state) => state,
Err(_) => return ProvedExecution::BadProof,
};
match state.execute(env_info, engine, transaction, false) {
Ok(executed) => ProvedExecution::Complete(executed),
Err(ExecutionError::Internal(_)) => ProvedExecution::BadProof,
Err(e) => ProvedExecution::Failed(e),
}
}
/// Representation of the entire state of all accounts in the system. /// Representation of the entire state of all accounts in the system.
/// ///
/// `State` can work together with `StateDB` to share account cache. /// `State` can work together with `StateDB` to share account cache.
@ -264,6 +309,19 @@ impl<B: Backend> State<B> {
Ok(state) Ok(state)
} }
/// Swap the current backend for another.
// TODO: [rob] find a less hacky way to avoid duplication of `Client::state_at`.
pub fn replace_backend<T: Backend>(self, backend: T) -> State<T> {
State {
db: backend,
root: self.root,
cache: self.cache,
checkpoints: self.checkpoints,
account_start_nonce: self.account_start_nonce,
factories: self.factories,
}
}
/// Create a recoverable checkpoint of this state. /// Create a recoverable checkpoint of this state.
pub fn checkpoint(&mut self) { pub fn checkpoint(&mut self) {
self.checkpoints.get_mut().push(HashMap::new()); self.checkpoints.get_mut().push(HashMap::new());
@ -535,16 +593,12 @@ impl<B: Backend> State<B> {
Ok(()) Ok(())
} }
/// Execute a given transaction. /// Execute a given transaction, producing a receipt and an optional trace.
/// This will change the state accordingly. /// This will change the state accordingly.
pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> ApplyResult { pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> ApplyResult {
// let old = self.to_pod(); // let old = self.to_pod();
let options = TransactOptions { tracing: tracing, vm_tracing: false, check_nonce: true }; let e = self.execute(env_info, engine, t, tracing)?;
let vm_factory = self.factories.vm.clone();
let e = Executive::new(self, env_info, engine, &vm_factory).transact(t, options)?;
// TODO uncomment once to_pod() works correctly.
// trace!("Applied transaction. Diff:\n{}\n", state_diff::diff_pod(&old, &self.to_pod())); // trace!("Applied transaction. Diff:\n{}\n", state_diff::diff_pod(&old, &self.to_pod()));
let state_root = if env_info.number < engine.params().eip98_transition { let state_root = if env_info.number < engine.params().eip98_transition {
self.commit()?; self.commit()?;
@ -557,6 +611,15 @@ impl<B: Backend> State<B> {
Ok(ApplyOutcome{receipt: receipt, trace: e.trace}) Ok(ApplyOutcome{receipt: receipt, trace: e.trace})
} }
// Execute a given transaction.
fn execute(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> Result<Executed, ExecutionError> {
let options = TransactOptions { tracing: tracing, vm_tracing: false, check_nonce: true };
let vm_factory = self.factories.vm.clone();
Executive::new(self, env_info, engine, &vm_factory).transact(t, options)
}
/// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit. /// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit.
/// `accounts` is mutable because we may need to commit the code or storage and record that. /// `accounts` is mutable because we may need to commit the code or storage and record that.
#[cfg_attr(feature="dev", allow(match_ref_pats))] #[cfg_attr(feature="dev", allow(match_ref_pats))]

View File

@ -16,7 +16,8 @@
use io::IoChannel; use io::IoChannel;
use client::{BlockChainClient, MiningBlockChainClient, Client, ClientConfig, BlockId}; use client::{BlockChainClient, MiningBlockChainClient, Client, ClientConfig, BlockId};
use state::CleanupMode; use state::{self, State, CleanupMode};
use executive::Executive;
use ethereum; use ethereum;
use block::IsBlock; use block::IsBlock;
use tests::helpers::*; use tests::helpers::*;
@ -341,3 +342,43 @@ fn does_not_propagate_delayed_transactions() {
assert_eq!(2, client.ready_transactions().len()); assert_eq!(2, client.ready_transactions().len());
assert_eq!(2, client.miner().pending_transactions().len()); assert_eq!(2, client.miner().pending_transactions().len());
} }
#[test]
fn transaction_proof() {
use ::client::ProvingBlockChainClient;
let client_result = generate_dummy_client(0);
let client = client_result.reference();
let address = Address::random();
let test_spec = Spec::new_test();
for _ in 0..20 {
let mut b = client.prepare_open_block(Address::default(), (3141562.into(), 31415620.into()), vec![]);
b.block_mut().fields_mut().state.add_balance(&address, &5.into(), CleanupMode::NoEmpty).unwrap();
b.block_mut().fields_mut().state.commit().unwrap();
let b = b.close_and_lock().seal(&*test_spec.engine, vec![]).unwrap();
client.import_sealed_block(b).unwrap(); // account change is in the journal overlay
}
let transaction = Transaction {
nonce: 0.into(),
gas_price: 0.into(),
gas: 21000.into(),
action: Action::Call(Address::default()),
value: 5.into(),
data: Vec::new(),
}.fake_sign(address);
let proof = client.prove_transaction(transaction.clone(), BlockId::Latest).unwrap();
let backend = state::backend::ProofCheck::new(&proof);
let mut factories = ::factory::Factories::default();
factories.accountdb = ::account_db::Factory::Plain; // raw state values, no mangled keys.
let root = client.best_block_header().state_root();
let mut state = State::from_existing(backend, root, 0.into(), factories.clone()).unwrap();
Executive::new(&mut state, &client.latest_env_info(), &*test_spec.engine, &factories.vm)
.transact(&transaction, Default::default()).unwrap();
assert_eq!(state.balance(&Address::default()).unwrap(), 5.into());
assert_eq!(state.balance(&address).unwrap(), 95.into());
}

View File

@ -158,6 +158,54 @@ impl<C: MiningBlockChainClient, M: MinerService> Dispatcher for FullDispatcher<C
} }
} }
/// Get a recent gas price corpus.
// TODO: this could be `impl Trait`.
pub fn fetch_gas_price_corpus(
sync: Arc<LightSync>,
client: Arc<LightChainClient>,
on_demand: Arc<OnDemand>,
cache: Arc<Mutex<LightDataCache>>,
) -> BoxFuture<Corpus<U256>, Error> {
const GAS_PRICE_SAMPLE_SIZE: usize = 100;
if let Some(cached) = cache.lock().gas_price_corpus() {
return future::ok(cached).boxed()
}
let cache = cache.clone();
let eventual_corpus = sync.with_context(|ctx| {
// get some recent headers with gas used,
// and request each of the blocks from the network.
let block_futures = client.ancestry_iter(BlockId::Latest)
.filter(|hdr| hdr.gas_used() != U256::default())
.take(GAS_PRICE_SAMPLE_SIZE)
.map(request::Body::new)
.map(|req| on_demand.block(ctx, req));
// as the blocks come in, collect gas prices into a vector
stream::futures_unordered(block_futures)
.fold(Vec::new(), |mut v, block| {
for t in block.transaction_views().iter() {
v.push(t.gas_price())
}
future::ok(v)
})
.map(move |v| {
// produce a corpus from the vector, cache it, and return
// the median as the intended gas price.
let corpus: ::stats::Corpus<_> = v.into();
cache.lock().set_gas_price_corpus(corpus.clone());
corpus
})
});
match eventual_corpus {
Some(corp) => corp.map_err(|_| errors::no_light_peers()).boxed(),
None => future::err(errors::network_disabled()).boxed(),
}
}
/// Dispatcher for light clients -- fetches default gas price, next nonce, etc. from network. /// Dispatcher for light clients -- fetches default gas price, next nonce, etc. from network.
/// Light client `ETH` RPC. /// Light client `ETH` RPC.
#[derive(Clone)] #[derive(Clone)]
@ -197,44 +245,12 @@ impl LightDispatcher {
/// Get a recent gas price corpus. /// Get a recent gas price corpus.
// TODO: this could be `impl Trait`. // TODO: this could be `impl Trait`.
pub fn gas_price_corpus(&self) -> BoxFuture<Corpus<U256>, Error> { pub fn gas_price_corpus(&self) -> BoxFuture<Corpus<U256>, Error> {
const GAS_PRICE_SAMPLE_SIZE: usize = 100; fetch_gas_price_corpus(
self.sync.clone(),
if let Some(cached) = self.cache.lock().gas_price_corpus() { self.client.clone(),
return future::ok(cached).boxed() self.on_demand.clone(),
} self.cache.clone(),
)
let cache = self.cache.clone();
let eventual_corpus = self.sync.with_context(|ctx| {
// get some recent headers with gas used,
// and request each of the blocks from the network.
let block_futures = self.client.ancestry_iter(BlockId::Latest)
.filter(|hdr| hdr.gas_used() != U256::default())
.take(GAS_PRICE_SAMPLE_SIZE)
.map(request::Body::new)
.map(|req| self.on_demand.block(ctx, req));
// as the blocks come in, collect gas prices into a vector
stream::futures_unordered(block_futures)
.fold(Vec::new(), |mut v, block| {
for t in block.transaction_views().iter() {
v.push(t.gas_price())
}
future::ok(v)
})
.map(move |v| {
// produce a corpus from the vector, cache it, and return
// the median as the intended gas price.
let corpus: ::stats::Corpus<_> = v.into();
cache.lock().set_gas_price_corpus(corpus.clone());
corpus
})
});
match eventual_corpus {
Some(corp) => corp.map_err(|_| errors::no_light_peers()).boxed(),
None => future::err(errors::network_disabled()).boxed(),
}
} }
/// Get an account's next nonce. /// Get an account's next nonce.
@ -285,7 +301,12 @@ impl Dispatcher for LightDispatcher {
// fast path for known gas price. // fast path for known gas price.
match request_gas_price { match request_gas_price {
Some(gas_price) => future::ok(with_gas_price(gas_price)).boxed(), Some(gas_price) => future::ok(with_gas_price(gas_price)).boxed(),
None => self.gas_price_corpus().and_then(|corp| match corp.median() { None => fetch_gas_price_corpus(
self.sync.clone(),
self.client.clone(),
self.on_demand.clone(),
self.cache.clone()
).and_then(|corp| match corp.median() {
Some(median) => future::ok(*median), Some(median) => future::ok(*median),
None => future::ok(DEFAULT_GAS_PRICE), // fall back to default on error. None => future::ok(DEFAULT_GAS_PRICE), // fall back to default on error.
}).map(with_gas_price).boxed() }).map(with_gas_price).boxed()

View File

@ -632,26 +632,34 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM> Eth for EthClient<C, SN, S, M, EM> where
self.send_raw_transaction(raw) self.send_raw_transaction(raw)
} }
fn call(&self, request: CallRequest, num: Trailing<BlockNumber>) -> Result<Bytes, Error> { fn call(&self, request: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<Bytes, Error> {
let request = CallRequest::into(request); let request = CallRequest::into(request);
let signed = self.sign_call(request)?; let signed = match self.sign_call(request) {
Ok(signed) => signed,
let result = match num.0 { Err(e) => return future::err(e).boxed(),
BlockNumber::Pending => take_weak!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()),
num => take_weak!(self.client).call(&signed, num.into(), Default::default()),
}; };
result let result = match num.0 {
BlockNumber::Pending => take_weakf!(self.miner).call(&*take_weakf!(self.client), &signed, Default::default()),
num => take_weakf!(self.client).call(&signed, num.into(), Default::default()),
};
future::done(result
.map(|b| b.output.into()) .map(|b| b.output.into())
.map_err(errors::from_call_error) .map_err(errors::from_call_error)
).boxed()
} }
fn estimate_gas(&self, request: CallRequest, num: Trailing<BlockNumber>) -> Result<RpcU256, Error> { fn estimate_gas(&self, request: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> {
let request = CallRequest::into(request); let request = CallRequest::into(request);
let signed = self.sign_call(request)?; let signed = match self.sign_call(request) {
take_weak!(self.client).estimate_gas(&signed, num.0.into()) Ok(signed) => signed,
Err(e) => return future::err(e).boxed(),
};
future::done(take_weakf!(self.client).estimate_gas(&signed, num.0.into())
.map(Into::into) .map(Into::into)
.map_err(errors::from_call_error) .map_err(errors::from_call_error)
).boxed()
} }
fn compile_lll(&self, _: String) -> Result<Bytes, Error> { fn compile_lll(&self, _: String) -> Result<Bytes, Error> {

View File

@ -24,6 +24,7 @@ use std::sync::Arc;
use jsonrpc_core::Error; use jsonrpc_core::Error;
use jsonrpc_macros::Trailing; use jsonrpc_macros::Trailing;
use light::cache::Cache as LightDataCache;
use light::client::Client as LightClient; use light::client::Client as LightClient;
use light::{cht, TransactionQueue}; use light::{cht, TransactionQueue};
use light::on_demand::{request, OnDemand}; use light::on_demand::{request, OnDemand};
@ -31,17 +32,18 @@ use light::on_demand::{request, OnDemand};
use ethcore::account_provider::{AccountProvider, DappId}; use ethcore::account_provider::{AccountProvider, DappId};
use ethcore::basic_account::BasicAccount; use ethcore::basic_account::BasicAccount;
use ethcore::encoded; use ethcore::encoded;
use ethcore::executed::{Executed, ExecutionError};
use ethcore::ids::BlockId; use ethcore::ids::BlockId;
use ethcore::transaction::SignedTransaction; use ethcore::transaction::{Action, SignedTransaction, Transaction as EthTransaction};
use ethsync::LightSync; use ethsync::LightSync;
use rlp::{UntrustedRlp, View}; use rlp::{UntrustedRlp, View};
use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP};
use util::{RwLock, U256}; use util::{RwLock, Mutex, FixedHash, Uint, U256};
use futures::{future, Future, BoxFuture}; use futures::{future, Future, BoxFuture, IntoFuture};
use futures::sync::oneshot; use futures::sync::oneshot;
use v1::helpers::{CallRequest as CRequest, errors, limit_logs}; use v1::helpers::{CallRequest as CRequest, errors, limit_logs, dispatch};
use v1::helpers::block_import::is_major_importing; use v1::helpers::block_import::is_major_importing;
use v1::traits::Eth; use v1::traits::Eth;
use v1::types::{ use v1::types::{
@ -60,6 +62,7 @@ pub struct EthClient {
on_demand: Arc<OnDemand>, on_demand: Arc<OnDemand>,
transaction_queue: Arc<RwLock<TransactionQueue>>, transaction_queue: Arc<RwLock<TransactionQueue>>,
accounts: Arc<AccountProvider>, accounts: Arc<AccountProvider>,
cache: Arc<Mutex<LightDataCache>>,
} }
// helper for internal error: on demand sender cancelled. // helper for internal error: on demand sender cancelled.
@ -67,6 +70,8 @@ fn err_premature_cancel(_cancel: oneshot::Canceled) -> Error {
errors::internal("on-demand sender prematurely cancelled", "") errors::internal("on-demand sender prematurely cancelled", "")
} }
type ExecutionResult = Result<Executed, ExecutionError>;
impl EthClient { impl EthClient {
/// Create a new `EthClient` with a handle to the light sync instance, client, /// Create a new `EthClient` with a handle to the light sync instance, client,
/// and on-demand request service, which is assumed to be attached as a handler. /// and on-demand request service, which is assumed to be attached as a handler.
@ -76,6 +81,7 @@ impl EthClient {
on_demand: Arc<OnDemand>, on_demand: Arc<OnDemand>,
transaction_queue: Arc<RwLock<TransactionQueue>>, transaction_queue: Arc<RwLock<TransactionQueue>>,
accounts: Arc<AccountProvider>, accounts: Arc<AccountProvider>,
cache: Arc<Mutex<LightDataCache>>,
) -> Self { ) -> Self {
EthClient { EthClient {
sync: sync, sync: sync,
@ -83,6 +89,7 @@ impl EthClient {
on_demand: on_demand, on_demand: on_demand,
transaction_queue: transaction_queue, transaction_queue: transaction_queue,
accounts: accounts, accounts: accounts,
cache: cache,
} }
} }
@ -147,6 +154,80 @@ impl EthClient {
.unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) .unwrap_or_else(|| future::err(errors::network_disabled()).boxed())
}).boxed() }).boxed()
} }
// helper for getting proved execution.
fn proved_execution(&self, req: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<ExecutionResult, Error> {
const DEFAULT_GAS_PRICE: U256 = U256([0, 0, 0, 21_000_000]);
let (sync, on_demand, client) = (self.sync.clone(), self.on_demand.clone(), self.client.clone());
let req: CRequest = req.into();
let id = num.0.into();
let from = req.from.unwrap_or(Address::zero());
let nonce_fut = match req.nonce {
Some(nonce) => future::ok(Some(nonce)).boxed(),
None => self.account(from, id).map(|acc| acc.map(|a| a.nonce)).boxed(),
};
let gas_price_fut = match req.gas_price {
Some(price) => future::ok(price).boxed(),
None => dispatch::fetch_gas_price_corpus(
self.sync.clone(),
self.client.clone(),
self.on_demand.clone(),
self.cache.clone(),
).map(|corp| match corp.median() {
Some(median) => *median,
None => DEFAULT_GAS_PRICE,
}).boxed()
};
// if nonce resolves, this should too since it'll be in the LRU-cache.
let header_fut = self.header(id);
// fetch missing transaction fields from the network.
nonce_fut.join(gas_price_fut).and_then(move |(nonce, gas_price)| {
let action = req.to.map_or(Action::Create, Action::Call);
let gas = req.gas.unwrap_or(U256::from(10_000_000)); // better gas amount?
let value = req.value.unwrap_or_else(U256::zero);
let data = req.data.map_or_else(Vec::new, |d| d.to_vec());
future::done(match nonce {
Some(n) => Ok(EthTransaction {
nonce: n,
action: action,
gas: gas,
gas_price: gas_price,
value: value,
data: data,
}.fake_sign(from)),
None => Err(errors::unknown_block()),
})
}).join(header_fut).and_then(move |(tx, hdr)| {
// then request proved execution.
// TODO: get last-hashes from network.
let (env_info, hdr) = match (client.env_info(id), hdr) {
(Some(env_info), Some(hdr)) => (env_info, hdr),
_ => return future::err(errors::unknown_block()).boxed(),
};
let request = request::TransactionProof {
tx: tx,
header: hdr,
env_info: env_info,
engine: client.engine().clone(),
};
let proved_future = sync.with_context(move |ctx| {
on_demand.transaction_proof(ctx, request).map_err(err_premature_cancel).boxed()
});
match proved_future {
Some(fut) => fut.boxed(),
None => future::err(errors::network_disabled()).boxed(),
}
}).boxed()
}
} }
impl Eth for EthClient { impl Eth for EthClient {
@ -322,12 +403,23 @@ impl Eth for EthClient {
self.send_raw_transaction(raw) self.send_raw_transaction(raw)
} }
fn call(&self, req: CallRequest, num: Trailing<BlockNumber>) -> Result<Bytes, Error> { fn call(&self, req: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<Bytes, Error> {
Err(errors::unimplemented(None)) self.proved_execution(req, num).and_then(|res| {
match res {
Ok(exec) => Ok(exec.output.into()),
Err(e) => Err(errors::execution(e)),
}
}).boxed()
} }
fn estimate_gas(&self, req: CallRequest, num: Trailing<BlockNumber>) -> Result<RpcU256, Error> { fn estimate_gas(&self, req: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> {
Err(errors::unimplemented(None)) // TODO: binary chop for more accurate estimates.
self.proved_execution(req, num).and_then(|res| {
match res {
Ok(exec) => Ok((exec.refunded + exec.gas_used).into()),
Err(e) => Err(errors::execution(e)),
}
}).boxed()
} }
fn transaction_by_hash(&self, hash: RpcH256) -> Result<Option<Transaction>, Error> { fn transaction_by_hash(&self, hash: RpcH256) -> Result<Option<Transaction>, Error> {
@ -355,19 +447,20 @@ impl Eth for EthClient {
} }
fn compilers(&self) -> Result<Vec<String>, Error> { fn compilers(&self) -> Result<Vec<String>, Error> {
Err(errors::unimplemented(None)) Err(errors::deprecated("Compilation functionality is deprecated.".to_string()))
} }
fn compile_lll(&self, _code: String) -> Result<Bytes, Error> { fn compile_lll(&self, _: String) -> Result<Bytes, Error> {
Err(errors::unimplemented(None)) Err(errors::deprecated("Compilation of LLL via RPC is deprecated".to_string()))
} }
fn compile_solidity(&self, _code: String) -> Result<Bytes, Error> { fn compile_serpent(&self, _: String) -> Result<Bytes, Error> {
Err(errors::unimplemented(None)) Err(errors::deprecated("Compilation of Serpent via RPC is deprecated".to_string()))
} }
fn compile_serpent(&self, _code: String) -> Result<Bytes, Error> { fn compile_solidity(&self, _: String) -> Result<Bytes, Error> {
Err(errors::unimplemented(None)) Err(errors::deprecated("Compilation of Solidity via RPC is deprecated".to_string()))
} }
fn logs(&self, _filter: Filter) -> Result<Vec<Log>, Error> { fn logs(&self, _filter: Filter) -> Result<Vec<Log>, Error> {

View File

@ -110,12 +110,12 @@ build_rpc_trait! {
fn submit_transaction(&self, Bytes) -> Result<H256, Error>; fn submit_transaction(&self, Bytes) -> Result<H256, Error>;
/// Call contract, returning the output data. /// Call contract, returning the output data.
#[rpc(name = "eth_call")] #[rpc(async, name = "eth_call")]
fn call(&self, CallRequest, Trailing<BlockNumber>) -> Result<Bytes, Error>; fn call(&self, CallRequest, Trailing<BlockNumber>) -> BoxFuture<Bytes, Error>;
/// Estimate gas needed for execution of given contract. /// Estimate gas needed for execution of given contract.
#[rpc(name = "eth_estimateGas")] #[rpc(async, name = "eth_estimateGas")]
fn estimate_gas(&self, CallRequest, Trailing<BlockNumber>) -> Result<U256, Error>; fn estimate_gas(&self, CallRequest, Trailing<BlockNumber>) -> BoxFuture<U256, Error>;
/// Get transaction by its hash. /// Get transaction by its hash.
#[rpc(name = "eth_getTransactionByHash")] #[rpc(name = "eth_getTransactionByHash")]

View File

@ -27,7 +27,7 @@ use ethcore::spec::Spec;
use io::IoChannel; use io::IoChannel;
use light::client::Client as LightClient; use light::client::Client as LightClient;
use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams}; use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams};
use light::net::buffer_flow::FlowParams; use light::net::request_credits::FlowParams;
use network::{NodeId, PeerId}; use network::{NodeId, PeerId};
use util::RwLock; use util::RwLock;

View File

@ -125,3 +125,13 @@ impl<T: HashDB> AsHashDB for T {
self self
} }
} }
impl<'a> AsHashDB for &'a mut HashDB {
fn as_hashdb(&self) -> &HashDB {
&**self
}
fn as_hashdb_mut(&mut self) -> &mut HashDB {
&mut **self
}
}