Merge branch 'master' into ui-2
This commit is contained in:
commit
ab86f9e696
8
Cargo.lock
generated
8
Cargo.lock
generated
@ -124,7 +124,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "bigint"
|
||||
version = "1.0.4"
|
||||
version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -441,7 +441,7 @@ name = "ethcore-bigint"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"bigint 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bigint 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1783,7 +1783,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "parity-ui-precompiled"
|
||||
version = "1.4.0"
|
||||
source = "git+https://github.com/paritytech/js-precompiled.git#05e0ea878ee54bed2e62a5f434663706bdf1919e"
|
||||
source = "git+https://github.com/paritytech/js-precompiled.git#3dd953a83569af644c5737a22c0ceb7d5f68b138"
|
||||
dependencies = [
|
||||
"parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -2802,7 +2802,7 @@ dependencies = [
|
||||
"checksum aster 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfdf7355d9db158df68f976ed030ab0f6578af811f5a7bb6dcf221ec24e0e0"
|
||||
"checksum base-x 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2f59103b47307f76e03bef1633aec7fa9e29bfb5aa6daf5a334f94233c71f6c1"
|
||||
"checksum base32 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1b9605ba46d61df0410d8ac686b0007add8172eba90e8e909c347856fe794d8c"
|
||||
"checksum bigint 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4865ae66523e00114a17935fc03865323c668381e9e37fa96c525a8bbcc4e04f"
|
||||
"checksum bigint 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "5d1b3ef6756498df0e2c6bb67c065f4154d0ecd721eb5b3c3f865c8012b9fd74"
|
||||
"checksum bit-set 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e6e1e6fb1c9e3d6fcdec57216a74eaa03e41f52a22f13a16438251d8e88b89da"
|
||||
"checksum bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d9bf6104718e80d7b26a68fdbacff3481cfc05df670821affc7e9cbc1884400c"
|
||||
"checksum bit-vec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "5b97c2c8e8bbb4251754f559df8af22fb264853c7d009084a576cdf12565089d"
|
||||
|
@ -96,7 +96,7 @@ impl Drop for RandomTempPath {
|
||||
|
||||
pub struct GuardedTempResult<T> {
|
||||
pub result: Option<T>,
|
||||
pub _temp: RandomTempPath
|
||||
pub _temp: RandomTempPath,
|
||||
}
|
||||
|
||||
impl<T> GuardedTempResult<T> {
|
||||
|
@ -52,6 +52,9 @@ stats = { path = "../util/stats" }
|
||||
time = "0.1"
|
||||
transient-hashmap = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
native-contracts = { path = "native_contracts", features = ["test_contracts"] }
|
||||
|
||||
[features]
|
||||
jit = ["evmjit"]
|
||||
evm-debug = ["slow-blocks"]
|
||||
|
@ -44,13 +44,13 @@ pub mod provider;
|
||||
|
||||
#[cfg(feature = "ipc")]
|
||||
pub mod provider {
|
||||
#![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues
|
||||
#![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues
|
||||
include!(concat!(env!("OUT_DIR"), "/provider.rs"));
|
||||
}
|
||||
|
||||
#[cfg(feature = "ipc")]
|
||||
pub mod remote {
|
||||
pub use provider::LightProviderClient;
|
||||
pub use provider::LightProviderClient;
|
||||
}
|
||||
|
||||
mod types;
|
||||
|
@ -20,7 +20,7 @@ use network::{NetworkContext, PeerId, NodeId};
|
||||
|
||||
use super::{Announcement, LightProtocol, ReqId};
|
||||
use super::error::Error;
|
||||
use request::Requests;
|
||||
use request::NetworkRequests as Requests;
|
||||
|
||||
/// An I/O context which allows sending and receiving packets as well as
|
||||
/// disconnecting peers. This is used as a generalization of the portions
|
||||
|
@ -33,7 +33,7 @@ use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use provider::Provider;
|
||||
use request::{Request, Requests, Response};
|
||||
use request::{Request, NetworkRequests as Requests, Response};
|
||||
|
||||
use self::request_credits::{Credits, FlowParams};
|
||||
use self::context::{Ctx, TickCtx};
|
||||
@ -108,9 +108,14 @@ mod timeout {
|
||||
}
|
||||
|
||||
/// A request id.
|
||||
#[cfg(not(test))]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
|
||||
pub struct ReqId(usize);
|
||||
|
||||
#[cfg(test)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
|
||||
pub struct ReqId(pub usize);
|
||||
|
||||
impl fmt::Display for ReqId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "Request #{}", self.0)
|
||||
|
@ -25,7 +25,7 @@ use std::collections::{BTreeMap, HashMap};
|
||||
use std::iter::FromIterator;
|
||||
|
||||
use request::Request;
|
||||
use request::Requests;
|
||||
use request::NetworkRequests as Requests;
|
||||
use net::{timeout, ReqId};
|
||||
use util::U256;
|
||||
|
||||
|
@ -39,14 +39,14 @@ use std::sync::Arc;
|
||||
|
||||
// helper for encoding a single request into a packet.
|
||||
// panics on bad backreference.
|
||||
fn encode_single(request: Request) -> Requests {
|
||||
fn encode_single(request: Request) -> NetworkRequests {
|
||||
let mut builder = RequestBuilder::default();
|
||||
builder.push(request).unwrap();
|
||||
builder.build()
|
||||
}
|
||||
|
||||
// helper for making a packet out of `Requests`.
|
||||
fn make_packet(req_id: usize, requests: &Requests) -> Vec<u8> {
|
||||
fn make_packet(req_id: usize, requests: &NetworkRequests) -> Vec<u8> {
|
||||
let mut stream = RlpStream::new_list(2);
|
||||
stream.append(&req_id).append_list(&requests.requests());
|
||||
stream.out()
|
||||
|
@ -18,20 +18,17 @@
|
||||
//! The request service is implemented using Futures. Higher level request handlers
|
||||
//! will take the raw data received here and extract meaningful results from it.
|
||||
|
||||
// TODO [ToDr] Suppressing deprecation warnings. Rob will fix the API anyway.
|
||||
#![allow(deprecated)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
|
||||
use ethcore::basic_account::BasicAccount;
|
||||
use ethcore::encoded;
|
||||
use ethcore::receipt::Receipt;
|
||||
use ethcore::state::ProvedExecution;
|
||||
use ethcore::executed::{Executed, ExecutionError};
|
||||
|
||||
use futures::{Async, Poll, Future};
|
||||
use futures::sync::oneshot::{self, Sender, Receiver};
|
||||
use futures::{future, Async, Poll, Future, BoxFuture};
|
||||
use futures::sync::oneshot::{self, Sender, Receiver, Canceled};
|
||||
use network::PeerId;
|
||||
use rlp::RlpStream;
|
||||
use util::{Bytes, RwLock, Mutex, U256, H256};
|
||||
@ -39,10 +36,19 @@ use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY, SHA3_EMPTY_LIST_RLP};
|
||||
|
||||
use net::{self, Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId};
|
||||
use cache::Cache;
|
||||
use request::{self as basic_request, Request as NetworkRequest, Response as NetworkResponse};
|
||||
use request::{self as basic_request, Request as NetworkRequest};
|
||||
use self::request::CheckedRequest;
|
||||
|
||||
pub use self::request::{Request, Response};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub mod request;
|
||||
|
||||
/// The result of execution
|
||||
pub type ExecutionResult = Result<Executed, ExecutionError>;
|
||||
|
||||
// relevant peer info.
|
||||
struct Peer {
|
||||
status: Status,
|
||||
@ -50,146 +56,154 @@ struct Peer {
|
||||
}
|
||||
|
||||
impl Peer {
|
||||
// Whether a given peer can handle a specific request.
|
||||
fn can_handle(&self, pending: &Pending) -> bool {
|
||||
match *pending {
|
||||
Pending::HeaderProof(ref req, _) =>
|
||||
self.capabilities.serve_headers && self.status.head_num > req.num(),
|
||||
Pending::HeaderByHash(_, _) => self.capabilities.serve_headers,
|
||||
Pending::Block(ref req, _) =>
|
||||
self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x <= req.header.number()),
|
||||
Pending::BlockReceipts(ref req, _) =>
|
||||
self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x <= req.0.number()),
|
||||
Pending::Account(ref req, _) =>
|
||||
self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.header.number()),
|
||||
Pending::Code(ref req, _) =>
|
||||
self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.block_id.1),
|
||||
Pending::TxProof(ref req, _) =>
|
||||
self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.header.number()),
|
||||
}
|
||||
// whether this peer can fulfill the
|
||||
fn can_fulfill(&self, c: &Capabilities) -> bool {
|
||||
let caps = &self.capabilities;
|
||||
|
||||
caps.serve_headers == c.serve_headers &&
|
||||
caps.serve_chain_since >= c.serve_chain_since &&
|
||||
caps.serve_state_since >= c.serve_chain_since
|
||||
}
|
||||
}
|
||||
|
||||
// Which portions of a CHT proof should be sent.
|
||||
enum ChtProofSender {
|
||||
Both(Sender<(H256, U256)>),
|
||||
Hash(Sender<H256>),
|
||||
ChainScore(Sender<U256>),
|
||||
}
|
||||
|
||||
// Attempted request info and sender to put received value.
|
||||
enum Pending {
|
||||
HeaderProof(request::HeaderProof, ChtProofSender),
|
||||
HeaderByHash(request::HeaderByHash, Sender<encoded::Header>),
|
||||
Block(request::Body, Sender<encoded::Block>),
|
||||
BlockReceipts(request::BlockReceipts, Sender<Vec<Receipt>>),
|
||||
Account(request::Account, Sender<BasicAccount>),
|
||||
Code(request::Code, Sender<Bytes>),
|
||||
TxProof(request::TransactionProof, Sender<Result<Executed, ExecutionError>>),
|
||||
struct Pending {
|
||||
requests: basic_request::Requests<CheckedRequest>,
|
||||
net_requests: basic_request::Requests<NetworkRequest>,
|
||||
required_capabilities: Capabilities,
|
||||
responses: Vec<Response>,
|
||||
sender: oneshot::Sender<Vec<Response>>,
|
||||
}
|
||||
|
||||
impl Pending {
|
||||
// Create a network request.
|
||||
fn make_request(&self) -> NetworkRequest {
|
||||
match *self {
|
||||
Pending::HeaderByHash(ref req, _) => NetworkRequest::Headers(basic_request::IncompleteHeadersRequest {
|
||||
start: basic_request::HashOrNumber::Hash(req.0).into(),
|
||||
skip: 0,
|
||||
max: 1,
|
||||
reverse: false,
|
||||
}),
|
||||
Pending::HeaderProof(ref req, _) => NetworkRequest::HeaderProof(basic_request::IncompleteHeaderProofRequest {
|
||||
num: req.num().into(),
|
||||
}),
|
||||
Pending::Block(ref req, _) => NetworkRequest::Body(basic_request::IncompleteBodyRequest {
|
||||
hash: req.hash.into(),
|
||||
}),
|
||||
Pending::BlockReceipts(ref req, _) => NetworkRequest::Receipts(basic_request::IncompleteReceiptsRequest {
|
||||
hash: req.0.hash().into(),
|
||||
}),
|
||||
Pending::Account(ref req, _) => NetworkRequest::Account(basic_request::IncompleteAccountRequest {
|
||||
block_hash: req.header.hash().into(),
|
||||
address_hash: ::util::Hashable::sha3(&req.address).into(),
|
||||
}),
|
||||
Pending::Code(ref req, _) => NetworkRequest::Code(basic_request::IncompleteCodeRequest {
|
||||
block_hash: req.block_id.0.into(),
|
||||
code_hash: req.code_hash.into(),
|
||||
}),
|
||||
Pending::TxProof(ref req, _) => NetworkRequest::Execution(basic_request::IncompleteExecutionRequest {
|
||||
block_hash: req.header.hash().into(),
|
||||
from: req.tx.sender(),
|
||||
gas: req.tx.gas,
|
||||
gas_price: req.tx.gas_price,
|
||||
action: req.tx.action.clone(),
|
||||
value: req.tx.value,
|
||||
data: req.tx.data.clone(),
|
||||
}),
|
||||
// helper to guess capabilities required for a given batch of network requests.
|
||||
fn guess_capabilities(requests: &[CheckedRequest]) -> Capabilities {
|
||||
let mut caps = Capabilities {
|
||||
serve_headers: false,
|
||||
serve_chain_since: None,
|
||||
serve_state_since: None,
|
||||
tx_relay: false,
|
||||
};
|
||||
|
||||
let update_since = |current: &mut Option<u64>, new|
|
||||
*current = match *current {
|
||||
Some(x) => Some(::std::cmp::min(x, new)),
|
||||
None => Some(new),
|
||||
};
|
||||
|
||||
for request in requests {
|
||||
match *request {
|
||||
// TODO: might be worth returning a required block number for this also.
|
||||
CheckedRequest::HeaderProof(_, _) =>
|
||||
caps.serve_headers = true,
|
||||
CheckedRequest::HeaderByHash(_, _) =>
|
||||
caps.serve_headers = true,
|
||||
CheckedRequest::Body(ref req, _) =>
|
||||
update_since(&mut caps.serve_chain_since, req.header.number()),
|
||||
CheckedRequest::Receipts(ref req, _) =>
|
||||
update_since(&mut caps.serve_chain_since, req.0.number()),
|
||||
CheckedRequest::Account(ref req, _) =>
|
||||
update_since(&mut caps.serve_state_since, req.header.number()),
|
||||
CheckedRequest::Code(ref req, _) =>
|
||||
update_since(&mut caps.serve_state_since, req.block_id.1),
|
||||
CheckedRequest::Execution(ref req, _) =>
|
||||
update_since(&mut caps.serve_state_since, req.header.number()),
|
||||
}
|
||||
}
|
||||
|
||||
caps
|
||||
}
|
||||
|
||||
/// A future extracting the concrete output type of the generic adapter
|
||||
/// from a vector of responses.
|
||||
pub struct OnResponses<T: request::RequestAdapter> {
|
||||
receiver: Receiver<Vec<Response>>,
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: request::RequestAdapter> Future for OnResponses<T> {
|
||||
type Item = T::Out;
|
||||
type Error = Canceled;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
self.receiver.poll().map(|async| async.map(T::extract_from))
|
||||
}
|
||||
}
|
||||
|
||||
/// On demand request service. See module docs for more details.
|
||||
/// Accumulates info about all peers' capabilities and dispatches
|
||||
/// requests to them accordingly.
|
||||
// lock in declaration order.
|
||||
pub struct OnDemand {
|
||||
pending: RwLock<Vec<Pending>>,
|
||||
peers: RwLock<HashMap<PeerId, Peer>>,
|
||||
pending_requests: RwLock<HashMap<ReqId, Pending>>,
|
||||
in_transit: RwLock<HashMap<ReqId, Pending>>,
|
||||
cache: Arc<Mutex<Cache>>,
|
||||
orphaned_requests: RwLock<Vec<Pending>>,
|
||||
start_nonce: U256,
|
||||
no_immediate_dispatch: bool,
|
||||
}
|
||||
|
||||
const RECEIVER_IN_SCOPE: &'static str = "Receiver is still in scope, so it's not dropped; qed";
|
||||
|
||||
impl OnDemand {
|
||||
/// Create a new `OnDemand` service with the given cache.
|
||||
pub fn new(cache: Arc<Mutex<Cache>>, account_start_nonce: U256) -> Self {
|
||||
pub fn new(cache: Arc<Mutex<Cache>>) -> Self {
|
||||
OnDemand {
|
||||
pending: RwLock::new(Vec::new()),
|
||||
peers: RwLock::new(HashMap::new()),
|
||||
pending_requests: RwLock::new(HashMap::new()),
|
||||
in_transit: RwLock::new(HashMap::new()),
|
||||
cache: cache,
|
||||
orphaned_requests: RwLock::new(Vec::new()),
|
||||
start_nonce: account_start_nonce,
|
||||
no_immediate_dispatch: true,
|
||||
}
|
||||
}
|
||||
|
||||
// make a test version: this doesn't dispatch pending requests
|
||||
// until you trigger it manually.
|
||||
#[cfg(test)]
|
||||
fn new_test(cache: Arc<Mutex<Cache>>) -> Self {
|
||||
let mut me = OnDemand::new(cache);
|
||||
me.no_immediate_dispatch = true;
|
||||
|
||||
me
|
||||
}
|
||||
|
||||
/// Request a header's hash by block number and CHT root hash.
|
||||
/// Returns the hash.
|
||||
pub fn hash_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver<H256> {
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
pub fn hash_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> BoxFuture<H256, Canceled> {
|
||||
let cached = {
|
||||
let mut cache = self.cache.lock();
|
||||
cache.block_hash(&req.num())
|
||||
};
|
||||
|
||||
match cached {
|
||||
Some(hash) => sender.send(hash).expect(RECEIVER_IN_SCOPE),
|
||||
None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Hash(sender))),
|
||||
Some(hash) => future::ok(hash).boxed(),
|
||||
None => {
|
||||
self.request(ctx, req)
|
||||
.expect("request given fully fleshed out; qed")
|
||||
.map(|(h, _)| h)
|
||||
.boxed()
|
||||
},
|
||||
}
|
||||
receiver
|
||||
}
|
||||
|
||||
/// Request a canonical block's chain score.
|
||||
/// Returns the chain score.
|
||||
pub fn chain_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver<U256> {
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
pub fn chain_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> BoxFuture<U256, Canceled> {
|
||||
let cached = {
|
||||
let mut cache = self.cache.lock();
|
||||
cache.block_hash(&req.num()).and_then(|hash| cache.chain_score(&hash))
|
||||
};
|
||||
|
||||
match cached {
|
||||
Some(score) => sender.send(score).expect(RECEIVER_IN_SCOPE),
|
||||
None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::ChainScore(sender))),
|
||||
Some(score) => future::ok(score).boxed(),
|
||||
None => {
|
||||
self.request(ctx, req)
|
||||
.expect("request given fully fleshed out; qed")
|
||||
.map(|(_, s)| s)
|
||||
.boxed()
|
||||
},
|
||||
}
|
||||
|
||||
receiver
|
||||
}
|
||||
|
||||
/// Request a canonical block's hash and chain score by number.
|
||||
/// Returns the hash and chain score.
|
||||
pub fn hash_and_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver<(H256, U256)> {
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
pub fn hash_and_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> BoxFuture<(H256, U256), Canceled> {
|
||||
let cached = {
|
||||
let mut cache = self.cache.lock();
|
||||
let hash = cache.block_hash(&req.num());
|
||||
@ -200,31 +214,33 @@ impl OnDemand {
|
||||
};
|
||||
|
||||
match cached {
|
||||
(Some(hash), Some(score)) => sender.send((hash, score)).expect(RECEIVER_IN_SCOPE),
|
||||
_ => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Both(sender))),
|
||||
(Some(hash), Some(score)) => future::ok((hash, score)).boxed(),
|
||||
_ => {
|
||||
self.request(ctx, req)
|
||||
.expect("request given fully fleshed out; qed")
|
||||
.boxed()
|
||||
},
|
||||
}
|
||||
|
||||
receiver
|
||||
}
|
||||
|
||||
/// Request a header by hash. This is less accurate than by-number because we don't know
|
||||
/// where in the chain this header lies, and therefore can't find a peer who is supposed to have
|
||||
/// it as easily.
|
||||
pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> Receiver<encoded::Header> {
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> BoxFuture<encoded::Header, Canceled> {
|
||||
match { self.cache.lock().block_header(&req.0) } {
|
||||
Some(hdr) => sender.send(hdr).expect(RECEIVER_IN_SCOPE),
|
||||
None => self.dispatch(ctx, Pending::HeaderByHash(req, sender)),
|
||||
Some(hdr) => future::ok(hdr).boxed(),
|
||||
None => {
|
||||
self.request(ctx, req)
|
||||
.expect("request given fully fleshed out; qed")
|
||||
.boxed()
|
||||
},
|
||||
}
|
||||
receiver
|
||||
}
|
||||
|
||||
/// Request a block, given its header. Block bodies are requestable by hash only,
|
||||
/// and the header is required anyway to verify and complete the block body
|
||||
/// -- this just doesn't obscure the network query.
|
||||
pub fn block(&self, ctx: &BasicContext, req: request::Body) -> Receiver<encoded::Block> {
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
|
||||
pub fn block(&self, ctx: &BasicContext, req: request::Body) -> BoxFuture<encoded::Block, Canceled> {
|
||||
// fast path for empty body.
|
||||
if req.header.transactions_root() == SHA3_NULL_RLP && req.header.uncles_hash() == SHA3_EMPTY_LIST_RLP {
|
||||
let mut stream = RlpStream::new_list(3);
|
||||
@ -232,7 +248,7 @@ impl OnDemand {
|
||||
stream.begin_list(0);
|
||||
stream.begin_list(0);
|
||||
|
||||
sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE);
|
||||
future::ok(encoded::Block::new(stream.out())).boxed()
|
||||
} else {
|
||||
match { self.cache.lock().block_body(&req.hash) } {
|
||||
Some(body) => {
|
||||
@ -242,98 +258,124 @@ impl OnDemand {
|
||||
stream.append_raw(&body.at(0).as_raw(), 1);
|
||||
stream.append_raw(&body.at(1).as_raw(), 1);
|
||||
|
||||
sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE);
|
||||
future::ok(encoded::Block::new(stream.out())).boxed()
|
||||
}
|
||||
None => {
|
||||
self.request(ctx, req)
|
||||
.expect("request given fully fleshed out; qed")
|
||||
.boxed()
|
||||
}
|
||||
None => self.dispatch(ctx, Pending::Block(req, sender)),
|
||||
}
|
||||
}
|
||||
receiver
|
||||
}
|
||||
|
||||
/// Request the receipts for a block. The header serves two purposes:
|
||||
/// provide the block hash to fetch receipts for, and for verification of the receipts root.
|
||||
pub fn block_receipts(&self, ctx: &BasicContext, req: request::BlockReceipts) -> Receiver<Vec<Receipt>> {
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
|
||||
pub fn block_receipts(&self, ctx: &BasicContext, req: request::BlockReceipts) -> BoxFuture<Vec<Receipt>, Canceled> {
|
||||
// fast path for empty receipts.
|
||||
if req.0.receipts_root() == SHA3_NULL_RLP {
|
||||
sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE);
|
||||
} else {
|
||||
match { self.cache.lock().block_receipts(&req.0.hash()) } {
|
||||
Some(receipts) => sender.send(receipts).expect(RECEIVER_IN_SCOPE),
|
||||
None => self.dispatch(ctx, Pending::BlockReceipts(req, sender)),
|
||||
}
|
||||
return future::ok(Vec::new()).boxed()
|
||||
}
|
||||
|
||||
receiver
|
||||
match { self.cache.lock().block_receipts(&req.0.hash()) } {
|
||||
Some(receipts) => future::ok(receipts).boxed(),
|
||||
None => {
|
||||
self.request(ctx, req)
|
||||
.expect("request given fully fleshed out; qed")
|
||||
.boxed()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Request an account by address and block header -- which gives a hash to query and a state root
|
||||
/// to verify against.
|
||||
pub fn account(&self, ctx: &BasicContext, req: request::Account) -> Receiver<BasicAccount> {
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
self.dispatch(ctx, Pending::Account(req, sender));
|
||||
receiver
|
||||
/// `None` here means that no account by the queried key exists in the queried state.
|
||||
pub fn account(&self, ctx: &BasicContext, req: request::Account) -> BoxFuture<Option<BasicAccount>, Canceled> {
|
||||
self.request(ctx, req)
|
||||
.expect("request given fully fleshed out; qed")
|
||||
.boxed()
|
||||
}
|
||||
|
||||
/// Request code by address, known code hash, and block header.
|
||||
pub fn code(&self, ctx: &BasicContext, req: request::Code) -> Receiver<Bytes> {
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
|
||||
pub fn code(&self, ctx: &BasicContext, req: request::Code) -> BoxFuture<Bytes, Canceled> {
|
||||
// fast path for no code.
|
||||
if req.code_hash == SHA3_EMPTY {
|
||||
sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE)
|
||||
future::ok(Vec::new()).boxed()
|
||||
} else {
|
||||
self.dispatch(ctx, Pending::Code(req, sender));
|
||||
self.request(ctx, req)
|
||||
.expect("request given fully fleshed out; qed")
|
||||
.boxed()
|
||||
}
|
||||
|
||||
receiver
|
||||
}
|
||||
|
||||
/// Request proof-of-execution for a transaction.
|
||||
pub fn transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof) -> Receiver<Result<Executed, ExecutionError>> {
|
||||
pub fn transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof) -> BoxFuture<ExecutionResult, Canceled> {
|
||||
self.request(ctx, req)
|
||||
.expect("request given fully fleshed out; qed")
|
||||
.boxed()
|
||||
}
|
||||
|
||||
/// Submit a vector of requests to be processed together.
|
||||
///
|
||||
/// Fails if back-references are not coherent.
|
||||
/// The returned vector of responses will correspond to the requests exactly.
|
||||
pub fn request_raw(&self, ctx: &BasicContext, requests: Vec<Request>)
|
||||
-> Result<Receiver<Vec<Response>>, basic_request::NoSuchOutput>
|
||||
{
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
|
||||
self.dispatch(ctx, Pending::TxProof(req, sender));
|
||||
|
||||
receiver
|
||||
}
|
||||
|
||||
// dispatch the request, with a "suitability" function to filter acceptable peers.
|
||||
fn dispatch(&self, ctx: &BasicContext, pending: Pending) {
|
||||
let mut builder = basic_request::RequestBuilder::default();
|
||||
builder.push(pending.make_request())
|
||||
.expect("make_request always returns fully complete request; qed");
|
||||
|
||||
let complete = builder.build();
|
||||
|
||||
let kind = complete.requests()[0].kind();
|
||||
for (id, peer) in self.peers.read().iter() {
|
||||
if !peer.can_handle(&pending) { continue }
|
||||
match ctx.request_from(*id, complete.clone()) {
|
||||
Ok(req_id) => {
|
||||
trace!(target: "on_demand", "{}: Assigned {:?} to peer {}",
|
||||
req_id, kind, id);
|
||||
|
||||
self.pending_requests.write().insert(
|
||||
req_id,
|
||||
pending,
|
||||
);
|
||||
return
|
||||
}
|
||||
Err(net::Error::NoCredits) => {}
|
||||
Err(e) =>
|
||||
trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e),
|
||||
}
|
||||
if requests.is_empty() {
|
||||
assert!(sender.send(Vec::new()).is_ok(), "receiver still in scope; qed");
|
||||
return Ok(receiver);
|
||||
}
|
||||
|
||||
self.orphaned_requests.write().push(pending);
|
||||
let mut builder = basic_request::RequestBuilder::default();
|
||||
|
||||
let responses = Vec::with_capacity(requests.len());
|
||||
for request in requests {
|
||||
builder.push(CheckedRequest::from(request))?;
|
||||
}
|
||||
|
||||
let requests = builder.build();
|
||||
let net_requests = requests.clone().map_requests(|req| req.into_net_request());
|
||||
let capabilities = guess_capabilities(requests.requests());
|
||||
|
||||
self.pending.write().push(Pending {
|
||||
requests: requests,
|
||||
net_requests: net_requests,
|
||||
required_capabilities: capabilities,
|
||||
responses: responses,
|
||||
sender: sender,
|
||||
});
|
||||
|
||||
self.attempt_dispatch(ctx);
|
||||
|
||||
Ok(receiver)
|
||||
}
|
||||
|
||||
/// Submit a strongly-typed batch of requests.
|
||||
///
|
||||
/// Fails if back-reference are not coherent.
|
||||
pub fn request<T>(&self, ctx: &BasicContext, requests: T) -> Result<OnResponses<T>, basic_request::NoSuchOutput>
|
||||
where T: request::RequestAdapter
|
||||
{
|
||||
self.request_raw(ctx, requests.make_requests()).map(|recv| OnResponses {
|
||||
receiver: recv,
|
||||
_marker: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
// dispatch orphaned requests, and discard those for which the corresponding
|
||||
// maybe dispatch pending requests.
|
||||
// sometimes
|
||||
fn attempt_dispatch(&self, ctx: &BasicContext) {
|
||||
if !self.no_immediate_dispatch {
|
||||
self.dispatch_pending(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// dispatch pending requests, and discard those for which the corresponding
|
||||
// receiver has been dropped.
|
||||
fn dispatch_orphaned(&self, ctx: &BasicContext) {
|
||||
fn dispatch_pending(&self, ctx: &BasicContext) {
|
||||
// wrapper future for calling `poll_cancel` on our `Senders` to preserve
|
||||
// the invariant that it's always within a task.
|
||||
struct CheckHangup<'a, T: 'a>(&'a mut Sender<T>);
|
||||
@ -356,35 +398,44 @@ impl OnDemand {
|
||||
CheckHangup(send).wait().expect("CheckHangup always returns ok; qed")
|
||||
}
|
||||
|
||||
if self.orphaned_requests.read().is_empty() { return }
|
||||
if self.pending.read().is_empty() { return }
|
||||
let mut pending = self.pending.write();
|
||||
|
||||
let to_dispatch = ::std::mem::replace(&mut *self.orphaned_requests.write(), Vec::new());
|
||||
// iterate over all pending requests, and check them for hang-up.
|
||||
// then, try and find a peer who can serve it.
|
||||
let peers = self.peers.read();
|
||||
*pending = ::std::mem::replace(&mut *pending, Vec::new()).into_iter()
|
||||
.filter_map(|mut pending| match check_hangup(&mut pending.sender) {
|
||||
false => Some(pending),
|
||||
true => None,
|
||||
})
|
||||
.filter_map(|pending| {
|
||||
for (peer_id, peer) in peers.iter() { // .shuffle?
|
||||
// TODO: see which requests can be answered by the cache?
|
||||
|
||||
trace!(target: "on_demand", "Attempting to dispatch {} orphaned requests.", to_dispatch.len());
|
||||
for mut orphaned in to_dispatch {
|
||||
let hung_up = match orphaned {
|
||||
Pending::HeaderProof(_, ref mut sender) => match *sender {
|
||||
ChtProofSender::Both(ref mut s) => check_hangup(s),
|
||||
ChtProofSender::Hash(ref mut s) => check_hangup(s),
|
||||
ChtProofSender::ChainScore(ref mut s) => check_hangup(s),
|
||||
},
|
||||
Pending::HeaderByHash(_, ref mut sender) => check_hangup(sender),
|
||||
Pending::Block(_, ref mut sender) => check_hangup(sender),
|
||||
Pending::BlockReceipts(_, ref mut sender) => check_hangup(sender),
|
||||
Pending::Account(_, ref mut sender) => check_hangup(sender),
|
||||
Pending::Code(_, ref mut sender) => check_hangup(sender),
|
||||
Pending::TxProof(_, ref mut sender) => check_hangup(sender),
|
||||
};
|
||||
if !peer.can_fulfill(&pending.required_capabilities) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !hung_up { self.dispatch(ctx, orphaned) }
|
||||
}
|
||||
match ctx.request_from(*peer_id, pending.net_requests.clone()) {
|
||||
Ok(req_id) => {
|
||||
self.in_transit.write().insert(req_id, pending);
|
||||
return None
|
||||
}
|
||||
Err(net::Error::NoCredits) => {}
|
||||
Err(e) => debug!(target: "on_demand", "Error dispatching request to peer: {}", e),
|
||||
}
|
||||
}
|
||||
Some(pending)
|
||||
})
|
||||
.collect(); // `pending` now contains all requests we couldn't dispatch.
|
||||
}
|
||||
}
|
||||
|
||||
impl Handler for OnDemand {
|
||||
fn on_connect(&self, ctx: &EventContext, status: &Status, capabilities: &Capabilities) {
|
||||
self.peers.write().insert(ctx.peer(), Peer { status: status.clone(), capabilities: capabilities.clone() });
|
||||
self.dispatch_orphaned(ctx.as_basic());
|
||||
self.attempt_dispatch(ctx.as_basic());
|
||||
}
|
||||
|
||||
fn on_disconnect(&self, ctx: &EventContext, unfulfilled: &[ReqId]) {
|
||||
@ -392,16 +443,16 @@ impl Handler for OnDemand {
|
||||
let ctx = ctx.as_basic();
|
||||
|
||||
{
|
||||
let mut orphaned = self.orphaned_requests.write();
|
||||
let mut pending = self.pending.write();
|
||||
for unfulfilled in unfulfilled {
|
||||
if let Some(pending) = self.pending_requests.write().remove(unfulfilled) {
|
||||
if let Some(unfulfilled) = self.in_transit.write().remove(unfulfilled) {
|
||||
trace!(target: "on_demand", "Attempting to reassign dropped request");
|
||||
orphaned.push(pending);
|
||||
pending.push(unfulfilled);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.dispatch_orphaned(ctx);
|
||||
self.attempt_dispatch(ctx);
|
||||
}
|
||||
|
||||
fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) {
|
||||
@ -413,183 +464,70 @@ impl Handler for OnDemand {
|
||||
}
|
||||
}
|
||||
|
||||
self.dispatch_orphaned(ctx.as_basic());
|
||||
self.attempt_dispatch(ctx.as_basic());
|
||||
}
|
||||
|
||||
fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[basic_request::Response]) {
|
||||
let peer = ctx.peer();
|
||||
let req = match self.pending_requests.write().remove(&req_id) {
|
||||
use request::IncompleteRequest;
|
||||
|
||||
let mut pending = match self.in_transit.write().remove(&req_id) {
|
||||
Some(req) => req,
|
||||
None => return,
|
||||
};
|
||||
|
||||
let response = match responses.get(0) {
|
||||
Some(response) => response,
|
||||
None => {
|
||||
trace!(target: "on_demand", "Ignoring empty response for request {}", req_id);
|
||||
self.dispatch(ctx.as_basic(), req);
|
||||
return;
|
||||
}
|
||||
};
|
||||
// for each incoming response
|
||||
// 1. ensure verification data filled. (still TODO since on_demand doesn't use back-references yet)
|
||||
// 2. pending.requests.supply_response
|
||||
// 3. if extracted on-demand response, keep it for later.
|
||||
for response in responses {
|
||||
match pending.requests.supply_response(&*self.cache, response) {
|
||||
Ok(response) => {
|
||||
pending.responses.push(response)
|
||||
}
|
||||
Err(e) => {
|
||||
let peer = ctx.peer();
|
||||
debug!(target: "on_demand", "Peer {} gave bad response: {:?}", peer, e);
|
||||
ctx.disable_peer(peer);
|
||||
|
||||
trace!(target: "on_demand", "Handling response for request {}, kind={:?}", req_id, response.kind());
|
||||
|
||||
// handle the response appropriately for the request.
|
||||
// all branches which do not return early lead to disabling of the peer
|
||||
// due to misbehavior.
|
||||
match req {
|
||||
Pending::HeaderProof(req, sender) => {
|
||||
if let NetworkResponse::HeaderProof(ref response) = *response {
|
||||
match req.check_response(&response.proof) {
|
||||
Ok((hash, score)) => {
|
||||
let mut cache = self.cache.lock();
|
||||
cache.insert_block_hash(req.num(), hash);
|
||||
cache.insert_chain_score(hash, score);
|
||||
|
||||
match sender {
|
||||
ChtProofSender::Both(sender) => { let _ = sender.send((hash, score)); }
|
||||
ChtProofSender::Hash(sender) => { let _ = sender.send(hash); }
|
||||
ChtProofSender::ChainScore(sender) => { let _ = sender.send(score); }
|
||||
}
|
||||
return
|
||||
}
|
||||
Err(e) => warn!(target: "on_demand", "Error handling response for header request: {:?}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
Pending::HeaderByHash(req, sender) => {
|
||||
if let NetworkResponse::Headers(ref response) = *response {
|
||||
if let Some(header) = response.headers.get(0) {
|
||||
match req.check_response(header) {
|
||||
Ok(header) => {
|
||||
self.cache.lock().insert_block_header(req.0, header.clone());
|
||||
let _ = sender.send(header);
|
||||
return
|
||||
}
|
||||
Err(e) => warn!(target: "on_demand", "Error handling response for header request: {:?}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Pending::Block(req, sender) => {
|
||||
if let NetworkResponse::Body(ref response) = *response {
|
||||
match req.check_response(&response.body) {
|
||||
Ok(block) => {
|
||||
self.cache.lock().insert_block_body(req.hash, response.body.clone());
|
||||
let _ = sender.send(block);
|
||||
return
|
||||
}
|
||||
Err(e) => warn!(target: "on_demand", "Error handling response for block request: {:?}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
Pending::BlockReceipts(req, sender) => {
|
||||
if let NetworkResponse::Receipts(ref response) = *response {
|
||||
match req.check_response(&response.receipts) {
|
||||
Ok(receipts) => {
|
||||
let hash = req.0.hash();
|
||||
self.cache.lock().insert_block_receipts(hash, receipts.clone());
|
||||
let _ = sender.send(receipts);
|
||||
return
|
||||
}
|
||||
Err(e) => warn!(target: "on_demand", "Error handling response for receipts request: {:?}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
Pending::Account(req, sender) => {
|
||||
if let NetworkResponse::Account(ref response) = *response {
|
||||
match req.check_response(&response.proof) {
|
||||
Ok(account) => {
|
||||
let account = account.unwrap_or_else(|| {
|
||||
BasicAccount {
|
||||
balance: 0.into(),
|
||||
nonce: self.start_nonce,
|
||||
code_hash: SHA3_EMPTY,
|
||||
storage_root: SHA3_NULL_RLP
|
||||
}
|
||||
});
|
||||
|
||||
// TODO: validate against request outputs.
|
||||
// needs engine + env info as part of request.
|
||||
let _ = sender.send(account);
|
||||
return
|
||||
}
|
||||
Err(e) => warn!(target: "on_demand", "Error handling response for state request: {:?}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
Pending::Code(req, sender) => {
|
||||
if let NetworkResponse::Code(ref response) = *response {
|
||||
match req.check_response(response.code.as_slice()) {
|
||||
Ok(()) => {
|
||||
let _ = sender.send(response.code.clone());
|
||||
return
|
||||
}
|
||||
Err(e) => warn!(target: "on_demand", "Error handling response for code request: {:?}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
Pending::TxProof(req, sender) => {
|
||||
if let NetworkResponse::Execution(ref response) = *response {
|
||||
match req.check_response(&response.items) {
|
||||
ProvedExecution::Complete(executed) => {
|
||||
let _ = sender.send(Ok(executed));
|
||||
return
|
||||
}
|
||||
ProvedExecution::Failed(err) => {
|
||||
let _ = sender.send(Err(err));
|
||||
return
|
||||
}
|
||||
ProvedExecution::BadProof => warn!(target: "on_demand", "Error handling response for transaction proof request"),
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ctx.disable_peer(peer);
|
||||
pending.requests.fill_unanswered();
|
||||
if pending.requests.is_complete() {
|
||||
let _ = pending.sender.send(pending.responses);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// update network requests (unless we're done, in which case fulfill the future.)
|
||||
let mut builder = basic_request::RequestBuilder::default();
|
||||
let num_answered = pending.requests.num_answered();
|
||||
let mut mapping = move |idx| idx - num_answered;
|
||||
|
||||
for request in pending.requests.requests().iter().skip(num_answered) {
|
||||
let mut net_req = request.clone().into_net_request();
|
||||
|
||||
// all back-references with request index less than `num_answered` have
|
||||
// been filled by now. all remaining requests point to nothing earlier
|
||||
// than the next unanswered request.
|
||||
net_req.adjust_refs(&mut mapping);
|
||||
builder.push(net_req)
|
||||
.expect("all back-references to answered requests have been filled; qed");
|
||||
}
|
||||
|
||||
// update pending fields and re-queue.
|
||||
let capabilities = guess_capabilities(&pending.requests.requests()[num_answered..]);
|
||||
pending.net_requests = builder.build();
|
||||
pending.required_capabilities = capabilities;
|
||||
|
||||
self.pending.write().push(pending);
|
||||
self.attempt_dispatch(ctx.as_basic());
|
||||
}
|
||||
|
||||
fn tick(&self, ctx: &BasicContext) {
|
||||
self.dispatch_orphaned(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use cache::Cache;
|
||||
use net::{Announcement, BasicContext, ReqId, Error as LesError};
|
||||
use request::Requests;
|
||||
|
||||
use network::{PeerId, NodeId};
|
||||
use time::Duration;
|
||||
use util::{H256, Mutex};
|
||||
|
||||
struct FakeContext;
|
||||
|
||||
impl BasicContext for FakeContext {
|
||||
fn persistent_peer_id(&self, _: PeerId) -> Option<NodeId> { None }
|
||||
fn request_from(&self, _: PeerId, _: Requests) -> Result<ReqId, LesError> {
|
||||
unimplemented!()
|
||||
}
|
||||
fn make_announcement(&self, _: Announcement) { }
|
||||
fn disconnect_peer(&self, _: PeerId) { }
|
||||
fn disable_peer(&self, _: PeerId) { }
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_hangup() {
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||
let on_demand = OnDemand::new(cache, 0.into());
|
||||
let result = on_demand.header_by_hash(&FakeContext, request::HeaderByHash(H256::default()));
|
||||
|
||||
assert!(on_demand.orphaned_requests.read().len() == 1);
|
||||
drop(result);
|
||||
|
||||
on_demand.dispatch_orphaned(&FakeContext);
|
||||
assert!(on_demand.orphaned_requests.read().is_empty());
|
||||
self.attempt_dispatch(ctx)
|
||||
}
|
||||
}
|
||||
|
@ -26,17 +26,374 @@ use ethcore::receipt::Receipt;
|
||||
use ethcore::state::{self, ProvedExecution};
|
||||
use ethcore::transaction::SignedTransaction;
|
||||
|
||||
use request::{self as net_request, IncompleteRequest, Output, OutputKind};
|
||||
|
||||
use rlp::{RlpStream, UntrustedRlp};
|
||||
use util::{Address, Bytes, DBValue, HashDB, H256, U256};
|
||||
use util::{Address, Bytes, DBValue, HashDB, Mutex, H256, U256};
|
||||
use util::memorydb::MemoryDB;
|
||||
use util::sha3::Hashable;
|
||||
use util::trie::{Trie, TrieDB, TrieError};
|
||||
|
||||
const SUPPLIED_MATCHES: &'static str = "supplied responses always match produced requests; enforced by `check_response`; qed";
|
||||
|
||||
/// Core unit of the API: submit batches of these to be answered with `Response`s.
|
||||
#[derive(Clone)]
|
||||
pub enum Request {
|
||||
/// A request for a header proof.
|
||||
HeaderProof(HeaderProof),
|
||||
/// A request for a header by hash.
|
||||
HeaderByHash(HeaderByHash),
|
||||
/// A request for block receipts.
|
||||
Receipts(BlockReceipts),
|
||||
/// A request for a block body.
|
||||
Body(Body),
|
||||
/// A request for an account.
|
||||
Account(Account),
|
||||
/// A request for a contract's code.
|
||||
Code(Code),
|
||||
/// A request for proof of execution.
|
||||
Execution(TransactionProof),
|
||||
}
|
||||
|
||||
/// A request argument.
|
||||
pub trait RequestArg {
|
||||
/// the response type.
|
||||
type Out;
|
||||
|
||||
/// Create the request type.
|
||||
/// `extract` must not fail when presented with the corresponding
|
||||
/// `Response`.
|
||||
fn make(self) -> Request;
|
||||
|
||||
/// May not panic if the response corresponds with the request
|
||||
/// from `make`.
|
||||
/// Is free to panic otherwise.
|
||||
fn extract(r: Response) -> Self::Out;
|
||||
}
|
||||
|
||||
/// An adapter can be thought of as a grouping of request argument types.
|
||||
/// This is implemented for various tuples and convenient types.
|
||||
pub trait RequestAdapter {
|
||||
/// The output type.
|
||||
type Out;
|
||||
|
||||
/// Infallibly produce requests. When `extract_from` is presented
|
||||
/// with the corresponding response vector, it may not fail.
|
||||
fn make_requests(self) -> Vec<Request>;
|
||||
|
||||
/// Extract the output type from the given responses.
|
||||
/// If they are the corresponding responses to the requests
|
||||
/// made by `make_requests`, do not panic.
|
||||
fn extract_from(Vec<Response>) -> Self::Out;
|
||||
}
|
||||
|
||||
// helper to implement `RequestArg` and `From` for a single request kind.
|
||||
macro_rules! impl_single {
|
||||
($variant: ident, $me: ty, $out: ty) => {
|
||||
impl RequestArg for $me {
|
||||
type Out = $out;
|
||||
|
||||
fn make(self) -> Request {
|
||||
Request::$variant(self)
|
||||
}
|
||||
|
||||
fn extract(r: Response) -> $out {
|
||||
match r {
|
||||
Response::$variant(x) => x,
|
||||
_ => panic!(SUPPLIED_MATCHES),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<$me> for Request {
|
||||
fn from(me: $me) -> Request {
|
||||
Request::$variant(me)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// implement traits for each kind of request.
|
||||
impl_single!(HeaderProof, HeaderProof, (H256, U256));
|
||||
impl_single!(HeaderByHash, HeaderByHash, encoded::Header);
|
||||
impl_single!(Receipts, BlockReceipts, Vec<Receipt>);
|
||||
impl_single!(Body, Body, encoded::Block);
|
||||
impl_single!(Account, Account, Option<BasicAccount>);
|
||||
impl_single!(Code, Code, Bytes);
|
||||
impl_single!(Execution, TransactionProof, super::ExecutionResult);
|
||||
|
||||
macro_rules! impl_args {
|
||||
() => {
|
||||
impl<T: RequestArg> RequestAdapter for T {
|
||||
type Out = T::Out;
|
||||
|
||||
fn make_requests(self) -> Vec<Request> {
|
||||
vec![self.make()]
|
||||
}
|
||||
|
||||
fn extract_from(mut responses: Vec<Response>) -> Self::Out {
|
||||
T::extract(responses.pop().expect(SUPPLIED_MATCHES))
|
||||
}
|
||||
}
|
||||
};
|
||||
($first: ident, $($next: ident,)*) => {
|
||||
impl<
|
||||
$first: RequestArg,
|
||||
$($next: RequestArg,)*
|
||||
>
|
||||
RequestAdapter for ($first, $($next,)*) {
|
||||
type Out = ($first::Out, $($next::Out,)*);
|
||||
|
||||
fn make_requests(self) -> Vec<Request> {
|
||||
let ($first, $($next,)*) = self;
|
||||
|
||||
vec![
|
||||
$first.make(),
|
||||
$($next.make(),)*
|
||||
]
|
||||
}
|
||||
|
||||
fn extract_from(responses: Vec<Response>) -> Self::Out {
|
||||
let mut iter = responses.into_iter();
|
||||
(
|
||||
$first::extract(iter.next().expect(SUPPLIED_MATCHES)),
|
||||
$($next::extract(iter.next().expect(SUPPLIED_MATCHES)),)*
|
||||
)
|
||||
}
|
||||
}
|
||||
impl_args!($($next,)*);
|
||||
}
|
||||
}
|
||||
|
||||
mod impls {
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use super::{RequestAdapter, RequestArg, Request, Response, SUPPLIED_MATCHES};
|
||||
|
||||
impl_args!(A, B, C, D, E, F, G, H, I, J, K, L,);
|
||||
}
|
||||
|
||||
/// Requests coupled with their required data for verification.
|
||||
/// This is used internally but not part of the public API.
|
||||
#[derive(Clone)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum CheckedRequest {
|
||||
HeaderProof(HeaderProof, net_request::IncompleteHeaderProofRequest),
|
||||
HeaderByHash(HeaderByHash, net_request::IncompleteHeadersRequest),
|
||||
Receipts(BlockReceipts, net_request::IncompleteReceiptsRequest),
|
||||
Body(Body, net_request::IncompleteBodyRequest),
|
||||
Account(Account, net_request::IncompleteAccountRequest),
|
||||
Code(Code, net_request::IncompleteCodeRequest),
|
||||
Execution(TransactionProof, net_request::IncompleteExecutionRequest),
|
||||
}
|
||||
|
||||
impl From<Request> for CheckedRequest {
|
||||
fn from(req: Request) -> Self {
|
||||
match req {
|
||||
Request::HeaderByHash(req) => {
|
||||
let net_req = net_request::IncompleteHeadersRequest {
|
||||
start: net_request::HashOrNumber::Hash(req.0).into(),
|
||||
skip: 0,
|
||||
max: 1,
|
||||
reverse: false,
|
||||
};
|
||||
CheckedRequest::HeaderByHash(req, net_req)
|
||||
}
|
||||
Request::HeaderProof(req) => {
|
||||
let net_req = net_request::IncompleteHeaderProofRequest {
|
||||
num: req.num().into(),
|
||||
};
|
||||
CheckedRequest::HeaderProof(req, net_req)
|
||||
}
|
||||
Request::Body(req) => {
|
||||
let net_req = net_request::IncompleteBodyRequest {
|
||||
hash: req.hash.into(),
|
||||
};
|
||||
CheckedRequest::Body(req, net_req)
|
||||
}
|
||||
Request::Receipts(req) => {
|
||||
let net_req = net_request::IncompleteReceiptsRequest {
|
||||
hash: req.0.hash().into(),
|
||||
};
|
||||
CheckedRequest::Receipts(req, net_req)
|
||||
}
|
||||
Request::Account(req) => {
|
||||
let net_req = net_request::IncompleteAccountRequest {
|
||||
block_hash: req.header.hash().into(),
|
||||
address_hash: ::util::Hashable::sha3(&req.address).into(),
|
||||
};
|
||||
CheckedRequest::Account(req, net_req)
|
||||
}
|
||||
Request::Code(req) => {
|
||||
let net_req = net_request::IncompleteCodeRequest {
|
||||
block_hash: req.block_id.0.into(),
|
||||
code_hash: req.code_hash.into(),
|
||||
};
|
||||
CheckedRequest::Code(req, net_req)
|
||||
}
|
||||
Request::Execution(req) => {
|
||||
let net_req = net_request::IncompleteExecutionRequest {
|
||||
block_hash: req.header.hash().into(),
|
||||
from: req.tx.sender(),
|
||||
gas: req.tx.gas,
|
||||
gas_price: req.tx.gas_price,
|
||||
action: req.tx.action.clone(),
|
||||
value: req.tx.value,
|
||||
data: req.tx.data.clone(),
|
||||
};
|
||||
CheckedRequest::Execution(req, net_req)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CheckedRequest {
|
||||
/// Convert this into a network request.
|
||||
pub fn into_net_request(self) -> net_request::Request {
|
||||
use ::request::Request as NetRequest;
|
||||
|
||||
match self {
|
||||
CheckedRequest::HeaderProof(_, req) => NetRequest::HeaderProof(req),
|
||||
CheckedRequest::HeaderByHash(_, req) => NetRequest::Headers(req),
|
||||
CheckedRequest::Receipts(_, req) => NetRequest::Receipts(req),
|
||||
CheckedRequest::Body(_, req) => NetRequest::Body(req),
|
||||
CheckedRequest::Account(_, req) => NetRequest::Account(req),
|
||||
CheckedRequest::Code(_, req) => NetRequest::Code(req),
|
||||
CheckedRequest::Execution(_, req) => NetRequest::Execution(req),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! match_me {
|
||||
($me: expr, ($check: pat, $req: pat) => $e: expr) => {
|
||||
match $me {
|
||||
CheckedRequest::HeaderProof($check, $req) => $e,
|
||||
CheckedRequest::HeaderByHash($check, $req) => $e,
|
||||
CheckedRequest::Receipts($check, $req) => $e,
|
||||
CheckedRequest::Body($check, $req) => $e,
|
||||
CheckedRequest::Account($check, $req) => $e,
|
||||
CheckedRequest::Code($check, $req) => $e,
|
||||
CheckedRequest::Execution($check, $req) => $e,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IncompleteRequest for CheckedRequest {
|
||||
type Complete = net_request::CompleteRequest;
|
||||
type Response = net_request::Response;
|
||||
|
||||
/// Check prior outputs against the needed inputs.
|
||||
///
|
||||
/// This is called to ensure consistency of this request with
|
||||
/// others in the same packet.
|
||||
fn check_outputs<F>(&self, f: F) -> Result<(), net_request::NoSuchOutput>
|
||||
where F: FnMut(usize, usize, OutputKind) -> Result<(), net_request::NoSuchOutput>
|
||||
{
|
||||
match_me!(*self, (_, ref req) => req.check_outputs(f))
|
||||
}
|
||||
|
||||
/// Note that this request will produce the following outputs.
|
||||
fn note_outputs<F>(&self, f: F) where F: FnMut(usize, OutputKind) {
|
||||
match_me!(*self, (_, ref req) => req.note_outputs(f))
|
||||
}
|
||||
|
||||
/// Fill fields of the request.
|
||||
///
|
||||
/// This function is provided an "output oracle" which allows fetching of
|
||||
/// prior request outputs.
|
||||
/// Only outputs previously checked with `check_outputs` may be available.
|
||||
fn fill<F>(&mut self, f: F) where F: Fn(usize, usize) -> Result<Output, net_request::NoSuchOutput> {
|
||||
match_me!(*self, (_, ref mut req) => req.fill(f))
|
||||
}
|
||||
|
||||
/// Will succeed if all fields have been filled, will fail otherwise.
|
||||
fn complete(self) -> Result<Self::Complete, net_request::NoSuchOutput> {
|
||||
use ::request::CompleteRequest;
|
||||
|
||||
match self {
|
||||
CheckedRequest::HeaderProof(_, req) => req.complete().map(CompleteRequest::HeaderProof),
|
||||
CheckedRequest::HeaderByHash(_, req) => req.complete().map(CompleteRequest::Headers),
|
||||
CheckedRequest::Receipts(_, req) => req.complete().map(CompleteRequest::Receipts),
|
||||
CheckedRequest::Body(_, req) => req.complete().map(CompleteRequest::Body),
|
||||
CheckedRequest::Account(_, req) => req.complete().map(CompleteRequest::Account),
|
||||
CheckedRequest::Code(_, req) => req.complete().map(CompleteRequest::Code),
|
||||
CheckedRequest::Execution(_, req) => req.complete().map(CompleteRequest::Execution),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn adjust_refs<F>(&mut self, mapping: F) where F: FnMut(usize) -> usize {
|
||||
match_me!(*self, (_, ref mut req) => req.adjust_refs(mapping))
|
||||
}
|
||||
}
|
||||
|
||||
impl net_request::CheckedRequest for CheckedRequest {
|
||||
type Extract = Response;
|
||||
type Error = Error;
|
||||
type Environment = Mutex<::cache::Cache>;
|
||||
|
||||
/// Check whether the response matches (beyond the type).
|
||||
fn check_response(&self, cache: &Mutex<::cache::Cache>, response: &Self::Response) -> Result<Response, Error> {
|
||||
use ::request::Response as NetResponse;
|
||||
|
||||
// helper for expecting a specific response for a given request.
|
||||
macro_rules! expect {
|
||||
($res: pat => $e: expr) => {
|
||||
match *response {
|
||||
$res => $e,
|
||||
_ => Err(Error::WrongKind),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check response against contained prover.
|
||||
match *self {
|
||||
CheckedRequest::HeaderProof(ref prover, _) => expect!(NetResponse::HeaderProof(ref res) =>
|
||||
prover.check_response(cache, &res.proof).map(Response::HeaderProof)),
|
||||
CheckedRequest::HeaderByHash(ref prover, _) => expect!(NetResponse::Headers(ref res) =>
|
||||
prover.check_response(cache, &res.headers).map(Response::HeaderByHash)),
|
||||
CheckedRequest::Receipts(ref prover, _) => expect!(NetResponse::Receipts(ref res) =>
|
||||
prover.check_response(cache, &res.receipts).map(Response::Receipts)),
|
||||
CheckedRequest::Body(ref prover, _) => expect!(NetResponse::Body(ref res) =>
|
||||
prover.check_response(cache, &res.body).map(Response::Body)),
|
||||
CheckedRequest::Account(ref prover, _) => expect!(NetResponse::Account(ref res) =>
|
||||
prover.check_response(cache, &res.proof).map(Response::Account)),
|
||||
CheckedRequest::Code(ref prover, _) => expect!(NetResponse::Code(ref res) =>
|
||||
prover.check_response(cache, &res.code).map(Response::Code)),
|
||||
CheckedRequest::Execution(ref prover, _) => expect!(NetResponse::Execution(ref res) =>
|
||||
prover.check_response(cache, &res.items).map(Response::Execution)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Responses to on-demand requests.
|
||||
/// All of these are checked.
|
||||
pub enum Response {
|
||||
/// Response to a header proof request.
|
||||
/// Returns the hash and chain score.
|
||||
HeaderProof((H256, U256)),
|
||||
/// Response to a header-by-hash request.
|
||||
HeaderByHash(encoded::Header),
|
||||
/// Response to a receipts request.
|
||||
Receipts(Vec<Receipt>),
|
||||
/// Response to a block body request.
|
||||
Body(encoded::Block),
|
||||
/// Response to an Account request.
|
||||
// TODO: `unwrap_or(engine_defaults)`
|
||||
Account(Option<BasicAccount>),
|
||||
/// Response to a request for code.
|
||||
Code(Vec<u8>),
|
||||
/// Response to a request for proved execution.
|
||||
Execution(super::ExecutionResult),
|
||||
}
|
||||
|
||||
/// Errors in verification.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
/// RLP decoder error.
|
||||
Decoder(::rlp::DecoderError),
|
||||
/// Empty response.
|
||||
Empty,
|
||||
/// Trie lookup error (result of bad proof)
|
||||
Trie(TrieError),
|
||||
/// Bad inclusion proof
|
||||
@ -47,6 +404,8 @@ pub enum Error {
|
||||
WrongHash(H256, H256),
|
||||
/// Wrong trie root.
|
||||
WrongTrieRoot(H256, H256),
|
||||
/// Wrong response kind.
|
||||
WrongKind,
|
||||
}
|
||||
|
||||
impl From<::rlp::DecoderError> for Error {
|
||||
@ -93,9 +452,15 @@ impl HeaderProof {
|
||||
pub fn cht_root(&self) -> H256 { self.cht_root }
|
||||
|
||||
/// Check a response with a CHT proof, get a hash and total difficulty back.
|
||||
pub fn check_response(&self, proof: &[Bytes]) -> Result<(H256, U256), Error> {
|
||||
pub fn check_response(&self, cache: &Mutex<::cache::Cache>, proof: &[Bytes]) -> Result<(H256, U256), Error> {
|
||||
match ::cht::check_proof(proof, self.num, self.cht_root) {
|
||||
Some((expected_hash, td)) => Ok((expected_hash, td)),
|
||||
Some((expected_hash, td)) => {
|
||||
let mut cache = cache.lock();
|
||||
cache.insert_block_hash(self.num, expected_hash);
|
||||
cache.insert_chain_score(expected_hash, td);
|
||||
|
||||
Ok((expected_hash, td))
|
||||
}
|
||||
None => Err(Error::BadProof),
|
||||
}
|
||||
}
|
||||
@ -107,10 +472,14 @@ pub struct HeaderByHash(pub H256);
|
||||
|
||||
impl HeaderByHash {
|
||||
/// Check a response for the header.
|
||||
pub fn check_response(&self, header: &encoded::Header) -> Result<encoded::Header, Error> {
|
||||
pub fn check_response(&self, cache: &Mutex<::cache::Cache>, headers: &[encoded::Header]) -> Result<encoded::Header, Error> {
|
||||
let header = headers.get(0).ok_or(Error::Empty)?;
|
||||
let hash = header.sha3();
|
||||
match hash == self.0 {
|
||||
true => Ok(header.clone()),
|
||||
true => {
|
||||
cache.lock().insert_block_header(hash, header.clone());
|
||||
Ok(header.clone())
|
||||
}
|
||||
false => Err(Error::WrongHash(self.0, hash)),
|
||||
}
|
||||
}
|
||||
@ -136,7 +505,7 @@ impl Body {
|
||||
}
|
||||
|
||||
/// Check a response for this block body.
|
||||
pub fn check_response(&self, body: &encoded::Body) -> Result<encoded::Block, Error> {
|
||||
pub fn check_response(&self, cache: &Mutex<::cache::Cache>, body: &encoded::Body) -> Result<encoded::Block, Error> {
|
||||
// check the integrity of the the body against the header
|
||||
let tx_root = ::util::triehash::ordered_trie_root(body.rlp().at(0).iter().map(|r| r.as_raw().to_vec()));
|
||||
if tx_root != self.header.transactions_root() {
|
||||
@ -154,6 +523,8 @@ impl Body {
|
||||
stream.append_raw(body.rlp().at(0).as_raw(), 1);
|
||||
stream.append_raw(body.rlp().at(1).as_raw(), 1);
|
||||
|
||||
cache.lock().insert_block_body(self.hash, body.clone());
|
||||
|
||||
Ok(encoded::Block::new(stream.out()))
|
||||
}
|
||||
}
|
||||
@ -164,12 +535,15 @@ pub struct BlockReceipts(pub encoded::Header);
|
||||
|
||||
impl BlockReceipts {
|
||||
/// Check a response with receipts against the stored header.
|
||||
pub fn check_response(&self, receipts: &[Receipt]) -> Result<Vec<Receipt>, Error> {
|
||||
pub fn check_response(&self, cache: &Mutex<::cache::Cache>, receipts: &[Receipt]) -> Result<Vec<Receipt>, Error> {
|
||||
let receipts_root = self.0.receipts_root();
|
||||
let found_root = ::util::triehash::ordered_trie_root(receipts.iter().map(|r| ::rlp::encode(r).to_vec()));
|
||||
|
||||
match receipts_root == found_root {
|
||||
true => Ok(receipts.to_vec()),
|
||||
true => {
|
||||
cache.lock().insert_block_receipts(receipts_root, receipts.to_vec());
|
||||
Ok(receipts.to_vec())
|
||||
}
|
||||
false => Err(Error::WrongTrieRoot(receipts_root, found_root)),
|
||||
}
|
||||
}
|
||||
@ -186,7 +560,7 @@ pub struct Account {
|
||||
|
||||
impl Account {
|
||||
/// Check a response with an account against the stored header.
|
||||
pub fn check_response(&self, proof: &[Bytes]) -> Result<Option<BasicAccount>, Error> {
|
||||
pub fn check_response(&self, _: &Mutex<::cache::Cache>, proof: &[Bytes]) -> Result<Option<BasicAccount>, Error> {
|
||||
let state_root = self.header.state_root();
|
||||
|
||||
let mut db = MemoryDB::new();
|
||||
@ -208,6 +582,7 @@ impl Account {
|
||||
}
|
||||
|
||||
/// Request for account code.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Code {
|
||||
/// Block hash, number pair.
|
||||
pub block_id: (H256, u64),
|
||||
@ -217,10 +592,10 @@ pub struct Code {
|
||||
|
||||
impl Code {
|
||||
/// Check a response with code against the code hash.
|
||||
pub fn check_response(&self, code: &[u8]) -> Result<(), Error> {
|
||||
pub fn check_response(&self, _: &Mutex<::cache::Cache>, code: &[u8]) -> Result<Vec<u8>, Error> {
|
||||
let found_hash = code.sha3();
|
||||
if found_hash == self.code_hash {
|
||||
Ok(())
|
||||
Ok(code.to_vec())
|
||||
} else {
|
||||
Err(Error::WrongHash(self.code_hash, found_hash))
|
||||
}
|
||||
@ -228,6 +603,7 @@ impl Code {
|
||||
}
|
||||
|
||||
/// Request for transaction execution, along with the parts necessary to verify the proof.
|
||||
#[derive(Clone)]
|
||||
pub struct TransactionProof {
|
||||
/// The transaction to request proof of.
|
||||
pub tx: SignedTransaction,
|
||||
@ -241,25 +617,32 @@ pub struct TransactionProof {
|
||||
|
||||
impl TransactionProof {
|
||||
/// Check the proof, returning the proved execution or indicate that the proof was bad.
|
||||
pub fn check_response(&self, state_items: &[DBValue]) -> ProvedExecution {
|
||||
pub fn check_response(&self, _: &Mutex<::cache::Cache>, state_items: &[DBValue]) -> Result<super::ExecutionResult, Error> {
|
||||
let root = self.header.state_root();
|
||||
|
||||
let mut env_info = self.env_info.clone();
|
||||
env_info.gas_limit = self.tx.gas.clone();
|
||||
state::check_proof(
|
||||
|
||||
let proved_execution = state::check_proof(
|
||||
state_items,
|
||||
root,
|
||||
&self.tx,
|
||||
&*self.engine,
|
||||
&env_info,
|
||||
)
|
||||
&self.env_info,
|
||||
);
|
||||
|
||||
match proved_execution {
|
||||
ProvedExecution::BadProof => Err(Error::BadProof),
|
||||
ProvedExecution::Failed(e) => Ok(Err(e)),
|
||||
ProvedExecution::Complete(e) => Ok(Ok(e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use util::{MemoryDB, Address, H256};
|
||||
use util::{MemoryDB, Address, Mutex, H256};
|
||||
use util::trie::{Trie, TrieMut, SecTrieDB, SecTrieDBMut};
|
||||
use util::trie::recorder::Recorder;
|
||||
|
||||
@ -268,6 +651,10 @@ mod tests {
|
||||
use ethcore::encoded;
|
||||
use ethcore::receipt::Receipt;
|
||||
|
||||
fn make_cache() -> ::cache::Cache {
|
||||
::cache::Cache::new(Default::default(), ::time::Duration::seconds(1))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_invalid_header_by_number() {
|
||||
assert!(HeaderProof::new(0, Default::default()).is_none())
|
||||
@ -297,7 +684,8 @@ mod tests {
|
||||
let proof = cht.prove(10_000, 0).unwrap().unwrap();
|
||||
let req = HeaderProof::new(10_000, cht.root()).unwrap();
|
||||
|
||||
assert!(req.check_response(&proof[..]).is_ok());
|
||||
let cache = Mutex::new(make_cache());
|
||||
assert!(req.check_response(&cache, &proof[..]).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -308,7 +696,8 @@ mod tests {
|
||||
let hash = header.hash();
|
||||
let raw_header = encoded::Header::new(::rlp::encode(&header).to_vec());
|
||||
|
||||
assert!(HeaderByHash(hash).check_response(&raw_header).is_ok())
|
||||
let cache = Mutex::new(make_cache());
|
||||
assert!(HeaderByHash(hash).check_response(&cache, &[raw_header]).is_ok())
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -324,8 +713,9 @@ mod tests {
|
||||
hash: header.hash(),
|
||||
};
|
||||
|
||||
let cache = Mutex::new(make_cache());
|
||||
let response = encoded::Body::new(body_stream.drain().to_vec());
|
||||
assert!(req.check_response(&response).is_ok())
|
||||
assert!(req.check_response(&cache, &response).is_ok())
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -346,7 +736,8 @@ mod tests {
|
||||
|
||||
let req = BlockReceipts(encoded::Header::new(::rlp::encode(&header).to_vec()));
|
||||
|
||||
assert!(req.check_response(&receipts).is_ok())
|
||||
let cache = Mutex::new(make_cache());
|
||||
assert!(req.check_response(&cache, &receipts).is_ok())
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -395,7 +786,8 @@ mod tests {
|
||||
address: addr,
|
||||
};
|
||||
|
||||
assert!(req.check_response(&proof[..]).is_ok());
|
||||
let cache = Mutex::new(make_cache());
|
||||
assert!(req.check_response(&cache, &proof[..]).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -406,7 +798,8 @@ mod tests {
|
||||
code_hash: ::util::Hashable::sha3(&code),
|
||||
};
|
||||
|
||||
assert!(req.check_response(&code).is_ok());
|
||||
assert!(req.check_response(&[]).is_err());
|
||||
let cache = Mutex::new(make_cache());
|
||||
assert!(req.check_response(&cache, &code).is_ok());
|
||||
assert!(req.check_response(&cache, &[]).is_err());
|
||||
}
|
||||
}
|
||||
|
397
ethcore/light/src/on_demand/tests.rs
Normal file
397
ethcore/light/src/on_demand/tests.rs
Normal file
@ -0,0 +1,397 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Tests for the on-demand service.
|
||||
|
||||
use cache::Cache;
|
||||
use ethcore::encoded;
|
||||
use ethcore::header::{Header, Seal};
|
||||
use futures::Future;
|
||||
use network::{PeerId, NodeId};
|
||||
use net::*;
|
||||
use util::{H256, Mutex};
|
||||
use time::Duration;
|
||||
use ::request::{self as basic_request, Response};
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::{request, OnDemand, Peer};
|
||||
|
||||
// useful contexts to give the service.
|
||||
enum Context {
|
||||
NoOp,
|
||||
WithPeer(PeerId),
|
||||
RequestFrom(PeerId, ReqId),
|
||||
Punish(PeerId),
|
||||
}
|
||||
|
||||
impl EventContext for Context {
|
||||
fn peer(&self) -> PeerId {
|
||||
match *self {
|
||||
Context::WithPeer(id)
|
||||
| Context::RequestFrom(id, _)
|
||||
| Context::Punish(id) => id,
|
||||
_ => panic!("didn't expect to have peer queried."),
|
||||
}
|
||||
}
|
||||
|
||||
fn as_basic(&self) -> &BasicContext { self }
|
||||
}
|
||||
|
||||
impl BasicContext for Context {
|
||||
/// Returns the relevant's peer persistent Id (aka NodeId).
|
||||
fn persistent_peer_id(&self, _: PeerId) -> Option<NodeId> {
|
||||
panic!("didn't expect to provide persistent ID")
|
||||
}
|
||||
|
||||
fn request_from(&self, peer_id: PeerId, _: ::request::NetworkRequests) -> Result<ReqId, Error> {
|
||||
match *self {
|
||||
Context::RequestFrom(id, req_id) => if peer_id == id { Ok(req_id) } else { Err(Error::NoCredits) },
|
||||
_ => panic!("didn't expect to have requests dispatched."),
|
||||
}
|
||||
}
|
||||
|
||||
fn make_announcement(&self, _: Announcement) {
|
||||
panic!("didn't expect to make announcement")
|
||||
}
|
||||
|
||||
fn disconnect_peer(&self, id: PeerId) {
|
||||
self.disable_peer(id)
|
||||
}
|
||||
|
||||
fn disable_peer(&self, peer_id: PeerId) {
|
||||
match *self {
|
||||
Context::Punish(id) if id == peer_id => {},
|
||||
_ => panic!("Unexpectedly punished peer."),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// test harness.
|
||||
struct Harness {
|
||||
service: OnDemand,
|
||||
}
|
||||
|
||||
impl Harness {
|
||||
fn create() -> Self {
|
||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::minutes(1))));
|
||||
Harness {
|
||||
service: OnDemand::new_test(cache),
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_peer(&self, id: PeerId, peer: Peer) {
|
||||
self.service.peers.write().insert(id, peer);
|
||||
}
|
||||
}
|
||||
|
||||
fn dummy_status() -> Status {
|
||||
Status {
|
||||
protocol_version: 1,
|
||||
network_id: 999,
|
||||
head_td: 1.into(),
|
||||
head_hash: H256::default(),
|
||||
head_num: 1359,
|
||||
genesis_hash: H256::default(),
|
||||
last_head: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn dummy_capabilities() -> Capabilities {
|
||||
Capabilities {
|
||||
serve_headers: true,
|
||||
serve_chain_since: Some(1),
|
||||
serve_state_since: Some(1),
|
||||
tx_relay: true,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_hangup() {
|
||||
let on_demand = Harness::create().service;
|
||||
let result = on_demand.header_by_hash(&Context::NoOp, request::HeaderByHash(H256::default()));
|
||||
|
||||
assert_eq!(on_demand.pending.read().len(), 1);
|
||||
drop(result);
|
||||
|
||||
on_demand.dispatch_pending(&Context::NoOp);
|
||||
assert!(on_demand.pending.read().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_request() {
|
||||
let harness = Harness::create();
|
||||
|
||||
let peer_id = 10101;
|
||||
let req_id = ReqId(14426);
|
||||
|
||||
harness.inject_peer(peer_id, Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
});
|
||||
|
||||
let header = Header::default();
|
||||
let encoded = encoded::Header::new(header.rlp(Seal::With));
|
||||
|
||||
let recv = harness.service.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![request::HeaderByHash(header.hash()).into()]
|
||||
).unwrap();
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
harness.service.on_responses(
|
||||
&Context::WithPeer(peer_id),
|
||||
req_id,
|
||||
&[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded] })]
|
||||
);
|
||||
|
||||
assert!(recv.wait().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_capabilities() {
|
||||
let harness = Harness::create();
|
||||
|
||||
let peer_id = 10101;
|
||||
|
||||
let mut capabilities = dummy_capabilities();
|
||||
capabilities.serve_headers = false;
|
||||
|
||||
harness.inject_peer(peer_id, Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: capabilities,
|
||||
});
|
||||
|
||||
let _recv = harness.service.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![request::HeaderByHash(Default::default()).into()]
|
||||
).unwrap();
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness.service.dispatch_pending(&Context::NoOp);
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reassign() {
|
||||
let harness = Harness::create();
|
||||
|
||||
let peer_ids = (10101, 12345);
|
||||
let req_ids = (ReqId(14426), ReqId(555));
|
||||
|
||||
harness.inject_peer(peer_ids.0, Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
});
|
||||
|
||||
let header = Header::default();
|
||||
let encoded = encoded::Header::new(header.rlp(Seal::With));
|
||||
|
||||
let recv = harness.service.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![request::HeaderByHash(header.hash()).into()]
|
||||
).unwrap();
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness.service.dispatch_pending(&Context::RequestFrom(peer_ids.0, req_ids.0));
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
harness.service.on_disconnect(&Context::WithPeer(peer_ids.0), &[req_ids.0]);
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness.inject_peer(peer_ids.1, Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
});
|
||||
|
||||
harness.service.dispatch_pending(&Context::RequestFrom(peer_ids.1, req_ids.1));
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
harness.service.on_responses(
|
||||
&Context::WithPeer(peer_ids.1),
|
||||
req_ids.1,
|
||||
&[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded] })]
|
||||
);
|
||||
|
||||
assert!(recv.wait().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn partial_response() {
|
||||
let harness = Harness::create();
|
||||
|
||||
let peer_id = 111;
|
||||
let req_ids = (ReqId(14426), ReqId(555));
|
||||
|
||||
harness.inject_peer(peer_id, Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
});
|
||||
|
||||
let make = |num| {
|
||||
let mut hdr = Header::default();
|
||||
hdr.set_number(num);
|
||||
|
||||
let encoded = encoded::Header::new(hdr.rlp(Seal::With));
|
||||
(hdr, encoded)
|
||||
};
|
||||
|
||||
let (header1, encoded1) = make(5);
|
||||
let (header2, encoded2) = make(23452);
|
||||
|
||||
// request two headers.
|
||||
let recv = harness.service.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![
|
||||
request::HeaderByHash(header1.hash()).into(),
|
||||
request::HeaderByHash(header2.hash()).into(),
|
||||
],
|
||||
).unwrap();
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0));
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
// supply only the first one.
|
||||
harness.service.on_responses(
|
||||
&Context::WithPeer(peer_id),
|
||||
req_ids.0,
|
||||
&[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded1] })]
|
||||
);
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1));
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
// supply the next one.
|
||||
harness.service.on_responses(
|
||||
&Context::WithPeer(peer_id),
|
||||
req_ids.1,
|
||||
&[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded2] })]
|
||||
);
|
||||
|
||||
assert!(recv.wait().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn part_bad_part_good() {
|
||||
let harness = Harness::create();
|
||||
|
||||
let peer_id = 111;
|
||||
let req_ids = (ReqId(14426), ReqId(555));
|
||||
|
||||
harness.inject_peer(peer_id, Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
});
|
||||
|
||||
let make = |num| {
|
||||
let mut hdr = Header::default();
|
||||
hdr.set_number(num);
|
||||
|
||||
let encoded = encoded::Header::new(hdr.rlp(Seal::With));
|
||||
(hdr, encoded)
|
||||
};
|
||||
|
||||
let (header1, encoded1) = make(5);
|
||||
let (header2, encoded2) = make(23452);
|
||||
|
||||
// request two headers.
|
||||
let recv = harness.service.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![
|
||||
request::HeaderByHash(header1.hash()).into(),
|
||||
request::HeaderByHash(header2.hash()).into(),
|
||||
],
|
||||
).unwrap();
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0));
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
// supply only the first one, but followed by the wrong kind of response.
|
||||
// the first header should be processed.
|
||||
harness.service.on_responses(
|
||||
&Context::Punish(peer_id),
|
||||
req_ids.0,
|
||||
&[
|
||||
Response::Headers(basic_request::HeadersResponse { headers: vec![encoded1] }),
|
||||
Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] } ),
|
||||
]
|
||||
);
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness.inject_peer(peer_id, Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
});
|
||||
|
||||
harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1));
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
// supply the next one.
|
||||
harness.service.on_responses(
|
||||
&Context::WithPeer(peer_id),
|
||||
req_ids.1,
|
||||
&[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded2] })]
|
||||
);
|
||||
|
||||
assert!(recv.wait().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_kind() {
|
||||
let harness = Harness::create();
|
||||
|
||||
let peer_id = 10101;
|
||||
let req_id = ReqId(14426);
|
||||
|
||||
harness.inject_peer(peer_id, Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
});
|
||||
|
||||
let _recv = harness.service.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![request::HeaderByHash(Default::default()).into()]
|
||||
).unwrap();
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
harness.service.on_responses(
|
||||
&Context::Punish(peer_id),
|
||||
req_id,
|
||||
&[Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] })]
|
||||
);
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
}
|
@ -131,7 +131,7 @@ impl TransactionQueue {
|
||||
|
||||
if self.by_hash.contains_key(&hash) { return Err(TransactionError::AlreadyImported) }
|
||||
|
||||
let res = match self.by_account.entry(sender) {
|
||||
let res = match self.by_account.entry(sender) {
|
||||
Entry::Vacant(entry) => {
|
||||
entry.insert(AccountTransactions {
|
||||
cur_nonce: CurrentNonce::Assumed(nonce),
|
||||
|
@ -20,22 +20,30 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
use request::{
|
||||
IncompleteRequest, CompleteRequest, Request,
|
||||
OutputKind, Output, NoSuchOutput, Response, ResponseError,
|
||||
IncompleteRequest, OutputKind, Output, NoSuchOutput, ResponseError, ResponseLike,
|
||||
};
|
||||
|
||||
/// Build chained requests. Push them onto the series with `push`,
|
||||
/// and produce a `Requests` object with `build`. Outputs are checked for consistency.
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
||||
pub struct RequestBuilder {
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct RequestBuilder<T> {
|
||||
output_kinds: HashMap<(usize, usize), OutputKind>,
|
||||
requests: Vec<Request>,
|
||||
requests: Vec<T>,
|
||||
}
|
||||
|
||||
impl RequestBuilder {
|
||||
impl<T> Default for RequestBuilder<T> {
|
||||
fn default() -> Self {
|
||||
RequestBuilder {
|
||||
output_kinds: HashMap::new(),
|
||||
requests: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: IncompleteRequest> RequestBuilder<T> {
|
||||
/// Attempt to push a request onto the request chain. Fails if the request
|
||||
/// references a non-existent output of a prior request.
|
||||
pub fn push(&mut self, request: Request) -> Result<(), NoSuchOutput> {
|
||||
pub fn push(&mut self, request: T) -> Result<(), NoSuchOutput> {
|
||||
request.check_outputs(|req, idx, kind| {
|
||||
match self.output_kinds.get(&(req, idx)) {
|
||||
Some(k) if k == &kind => Ok(()),
|
||||
@ -54,7 +62,7 @@ impl RequestBuilder {
|
||||
}
|
||||
|
||||
/// Convert this into a "requests" object.
|
||||
pub fn build(self) -> Requests {
|
||||
pub fn build(self) -> Requests<T> {
|
||||
Requests {
|
||||
outputs: HashMap::new(),
|
||||
requests: self.requests,
|
||||
@ -65,44 +73,41 @@ impl RequestBuilder {
|
||||
|
||||
/// Requests pending responses.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Requests {
|
||||
pub struct Requests<T> {
|
||||
outputs: HashMap<(usize, usize), Output>,
|
||||
requests: Vec<Request>,
|
||||
requests: Vec<T>,
|
||||
answered: usize,
|
||||
}
|
||||
|
||||
impl Requests {
|
||||
/// For each request, produce responses for each.
|
||||
/// The responses vector produced goes up to the point where the responder
|
||||
/// first returns `None`, an invalid response, or until all requests have been responded to.
|
||||
pub fn respond_to_all<F>(mut self, responder: F) -> Vec<Response>
|
||||
where F: Fn(CompleteRequest) -> Option<Response>
|
||||
{
|
||||
let mut responses = Vec::new();
|
||||
|
||||
while let Some(response) = self.next_complete().and_then(&responder) {
|
||||
match self.supply_response(&response) {
|
||||
Ok(()) => responses.push(response),
|
||||
Err(e) => {
|
||||
debug!(target: "pip", "produced bad response to request: {:?}", e);
|
||||
return responses;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
responses
|
||||
}
|
||||
|
||||
impl<T> Requests<T> {
|
||||
/// Get access to the underlying slice of requests.
|
||||
// TODO: unimplemented -> Vec<Request>, // do we _have to_ allocate?
|
||||
pub fn requests(&self) -> &[Request] { &self.requests }
|
||||
pub fn requests(&self) -> &[T] { &self.requests }
|
||||
|
||||
/// Get the number of answered requests.
|
||||
pub fn num_answered(&self) -> usize { self.answered }
|
||||
|
||||
/// Whether the batch is complete.
|
||||
pub fn is_complete(&self) -> bool {
|
||||
self.answered == self.requests.len()
|
||||
}
|
||||
|
||||
/// Map requests from one type into another.
|
||||
pub fn map_requests<F, U>(self, f: F) -> Requests<U>
|
||||
where F: FnMut(T) -> U, U: IncompleteRequest
|
||||
{
|
||||
Requests {
|
||||
outputs: self.outputs,
|
||||
requests: self.requests.into_iter().map(f).collect(),
|
||||
answered: self.answered,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: IncompleteRequest + Clone> Requests<T> {
|
||||
/// Get the next request as a filled request. Returns `None` when all requests answered.
|
||||
pub fn next_complete(&self) -> Option<CompleteRequest> {
|
||||
if self.answered == self.requests.len() {
|
||||
pub fn next_complete(&self) -> Option<T::Complete> {
|
||||
if self.is_complete() {
|
||||
None
|
||||
} else {
|
||||
Some(self.requests[self.answered].clone()
|
||||
@ -111,14 +116,29 @@ impl Requests {
|
||||
}
|
||||
}
|
||||
|
||||
/// Sweep through all unanswered requests, filling them as necessary.
|
||||
pub fn fill_unanswered(&mut self) {
|
||||
let outputs = &mut self.outputs;
|
||||
|
||||
for req in self.requests.iter_mut().skip(self.answered) {
|
||||
req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: super::CheckedRequest> Requests<T> {
|
||||
/// Supply a response for the next request.
|
||||
/// Fails on: wrong request kind, all requests answered already.
|
||||
pub fn supply_response(&mut self, response: &Response) -> Result<(), ResponseError> {
|
||||
pub fn supply_response(&mut self, env: &T::Environment, response: &T::Response)
|
||||
-> Result<T::Extract, ResponseError<T::Error>>
|
||||
{
|
||||
let idx = self.answered;
|
||||
|
||||
// check validity.
|
||||
if idx == self.requests.len() { return Err(ResponseError::Unexpected) }
|
||||
if self.requests[idx].kind() != response.kind() { return Err(ResponseError::WrongKind) }
|
||||
if self.is_complete() { return Err(ResponseError::Unexpected) }
|
||||
|
||||
let extracted = self.requests[idx]
|
||||
.check_response(env, response).map_err(ResponseError::Validity)?;
|
||||
|
||||
let outputs = &mut self.outputs;
|
||||
response.fill_outputs(|out_idx, output| {
|
||||
@ -135,7 +155,30 @@ impl Requests {
|
||||
req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput))
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(extracted)
|
||||
}
|
||||
}
|
||||
|
||||
impl Requests<super::Request> {
|
||||
/// For each request, produce a response.
|
||||
/// The responses vector produced goes up to the point where the responder
|
||||
/// first returns `None`, an invalid response, or until all requests have been responded to.
|
||||
pub fn respond_to_all<F>(mut self, responder: F) -> Vec<super::Response>
|
||||
where F: Fn(super::CompleteRequest) -> Option<super::Response>
|
||||
{
|
||||
let mut responses = Vec::new();
|
||||
|
||||
while let Some(response) = self.next_complete().and_then(&responder) {
|
||||
match self.supply_response(&(), &response) {
|
||||
Ok(()) => responses.push(response),
|
||||
Err(e) => {
|
||||
debug!(target: "pip", "produced bad response to request: {:?}", e);
|
||||
return responses;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
responses
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -69,11 +69,15 @@ pub use self::builder::{RequestBuilder, Requests};
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct NoSuchOutput;
|
||||
|
||||
/// Wrong kind of response corresponding to request.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct WrongKind;
|
||||
|
||||
/// Error on processing a response.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum ResponseError {
|
||||
/// Wrong kind of response.
|
||||
WrongKind,
|
||||
pub enum ResponseError<T> {
|
||||
/// Error in validity.
|
||||
Validity(T),
|
||||
/// No responses expected.
|
||||
Unexpected,
|
||||
}
|
||||
@ -96,6 +100,12 @@ impl<T> Field<T> {
|
||||
_ => Err(NoSuchOutput),
|
||||
}
|
||||
}
|
||||
|
||||
fn adjust_req<F>(&mut self, mut mapping: F) where F: FnMut(usize) -> usize {
|
||||
if let Field::BackReference(ref mut req_idx, _) = *self {
|
||||
*req_idx = mapping(*req_idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<T> for Field<T> {
|
||||
@ -197,6 +207,9 @@ impl Encodable for HashOrNumber {
|
||||
}
|
||||
}
|
||||
|
||||
/// Type alias for "network requests".
|
||||
pub type NetworkRequests = Requests<Request>;
|
||||
|
||||
/// All request types, as they're sent over the network.
|
||||
/// They may be incomplete, with back-references to outputs
|
||||
/// of prior requests.
|
||||
@ -296,6 +309,7 @@ impl Encodable for Request {
|
||||
|
||||
impl IncompleteRequest for Request {
|
||||
type Complete = CompleteRequest;
|
||||
type Response = Response;
|
||||
|
||||
fn check_outputs<F>(&self, f: F) -> Result<(), NoSuchOutput>
|
||||
where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>
|
||||
@ -350,6 +364,33 @@ impl IncompleteRequest for Request {
|
||||
Request::Execution(req) => req.complete().map(CompleteRequest::Execution),
|
||||
}
|
||||
}
|
||||
|
||||
fn adjust_refs<F>(&mut self, mapping: F) where F: FnMut(usize) -> usize {
|
||||
match *self {
|
||||
Request::Headers(ref mut req) => req.adjust_refs(mapping),
|
||||
Request::HeaderProof(ref mut req) => req.adjust_refs(mapping),
|
||||
Request::Receipts(ref mut req) => req.adjust_refs(mapping),
|
||||
Request::Body(ref mut req) => req.adjust_refs(mapping),
|
||||
Request::Account(ref mut req) => req.adjust_refs(mapping),
|
||||
Request::Storage(ref mut req) => req.adjust_refs(mapping),
|
||||
Request::Code(ref mut req) => req.adjust_refs(mapping),
|
||||
Request::Execution(ref mut req) => req.adjust_refs(mapping),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CheckedRequest for Request {
|
||||
type Extract = ();
|
||||
type Error = WrongKind;
|
||||
type Environment = ();
|
||||
|
||||
fn check_response(&self, _: &(), response: &Response) -> Result<(), WrongKind> {
|
||||
if self.kind() == response.kind() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(WrongKind)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Kinds of requests.
|
||||
@ -421,9 +462,9 @@ pub enum Response {
|
||||
Execution(ExecutionResponse),
|
||||
}
|
||||
|
||||
impl Response {
|
||||
impl ResponseLike for Response {
|
||||
/// Fill reusable outputs by writing them into the function.
|
||||
pub fn fill_outputs<F>(&self, f: F) where F: FnMut(usize, Output) {
|
||||
fn fill_outputs<F>(&self, f: F) where F: FnMut(usize, Output) {
|
||||
match *self {
|
||||
Response::Headers(ref res) => res.fill_outputs(f),
|
||||
Response::HeaderProof(ref res) => res.fill_outputs(f),
|
||||
@ -435,7 +476,9 @@ impl Response {
|
||||
Response::Execution(ref res) => res.fill_outputs(f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Response {
|
||||
/// Inspect the kind of this response.
|
||||
pub fn kind(&self) -> Kind {
|
||||
match *self {
|
||||
@ -490,6 +533,8 @@ impl Encodable for Response {
|
||||
pub trait IncompleteRequest: Sized {
|
||||
/// The complete variant of this request.
|
||||
type Complete;
|
||||
/// The response to this request.
|
||||
type Response: ResponseLike;
|
||||
|
||||
/// Check prior outputs against the needed inputs.
|
||||
///
|
||||
@ -511,6 +556,30 @@ pub trait IncompleteRequest: Sized {
|
||||
/// Attempt to convert this request into its complete variant.
|
||||
/// Will succeed if all fields have been filled, will fail otherwise.
|
||||
fn complete(self) -> Result<Self::Complete, NoSuchOutput>;
|
||||
|
||||
/// Adjust back-reference request indices.
|
||||
fn adjust_refs<F>(&mut self, mapping: F) where F: FnMut(usize) -> usize;
|
||||
}
|
||||
|
||||
/// A request which can be checked against its response for more validity.
|
||||
pub trait CheckedRequest: IncompleteRequest {
|
||||
/// Data extracted during the check.
|
||||
type Extract;
|
||||
/// Error encountered during the check.
|
||||
type Error;
|
||||
/// Environment passed to response check.
|
||||
type Environment;
|
||||
|
||||
/// Check whether the response matches (beyond the type).
|
||||
fn check_response(&self, &Self::Environment, &Self::Response) -> Result<Self::Extract, Self::Error>;
|
||||
}
|
||||
|
||||
/// A response-like object.
|
||||
///
|
||||
/// These contain re-usable outputs.
|
||||
pub trait ResponseLike {
|
||||
/// Write all re-usable outputs into the provided function.
|
||||
fn fill_outputs<F>(&self, output_store: F) where F: FnMut(usize, Output);
|
||||
}
|
||||
|
||||
/// Header request.
|
||||
@ -555,6 +624,7 @@ pub mod header {
|
||||
|
||||
impl super::IncompleteRequest for Incomplete {
|
||||
type Complete = Complete;
|
||||
type Response = Response;
|
||||
|
||||
fn check_outputs<F>(&self, mut f: F) -> Result<(), NoSuchOutput>
|
||||
where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>
|
||||
@ -586,6 +656,10 @@ pub mod header {
|
||||
reverse: self.reverse,
|
||||
})
|
||||
}
|
||||
|
||||
fn adjust_refs<F>(&mut self, mapping: F) where F: FnMut(usize) -> usize {
|
||||
self.start.adjust_req(mapping)
|
||||
}
|
||||
}
|
||||
|
||||
/// A complete header request.
|
||||
@ -608,9 +682,9 @@ pub mod header {
|
||||
pub headers: Vec<encoded::Header>,
|
||||
}
|
||||
|
||||
impl Response {
|
||||
impl super::ResponseLike for Response {
|
||||
/// Fill reusable outputs by writing them into the function.
|
||||
pub fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) { }
|
||||
fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) { }
|
||||
}
|
||||
|
||||
impl Decodable for Response {
|
||||
@ -671,6 +745,7 @@ pub mod header_proof {
|
||||
|
||||
impl super::IncompleteRequest for Incomplete {
|
||||
type Complete = Complete;
|
||||
type Response = Response;
|
||||
|
||||
fn check_outputs<F>(&self, mut f: F) -> Result<(), NoSuchOutput>
|
||||
where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>
|
||||
@ -699,6 +774,10 @@ pub mod header_proof {
|
||||
num: self.num.into_scalar()?,
|
||||
})
|
||||
}
|
||||
|
||||
fn adjust_refs<F>(&mut self, mapping: F) where F: FnMut(usize) -> usize {
|
||||
self.num.adjust_req(mapping)
|
||||
}
|
||||
}
|
||||
|
||||
/// A complete header proof request.
|
||||
@ -719,9 +798,9 @@ pub mod header_proof {
|
||||
pub td: U256,
|
||||
}
|
||||
|
||||
impl Response {
|
||||
impl super::ResponseLike for Response {
|
||||
/// Fill reusable outputs by providing them to the function.
|
||||
pub fn fill_outputs<F>(&self, mut f: F) where F: FnMut(usize, Output) {
|
||||
fn fill_outputs<F>(&self, mut f: F) where F: FnMut(usize, Output) {
|
||||
f(0, Output::Hash(self.hash));
|
||||
}
|
||||
}
|
||||
@ -776,6 +855,7 @@ pub mod block_receipts {
|
||||
|
||||
impl super::IncompleteRequest for Incomplete {
|
||||
type Complete = Complete;
|
||||
type Response = Response;
|
||||
|
||||
fn check_outputs<F>(&self, mut f: F) -> Result<(), NoSuchOutput>
|
||||
where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>
|
||||
@ -802,6 +882,10 @@ pub mod block_receipts {
|
||||
hash: self.hash.into_scalar()?,
|
||||
})
|
||||
}
|
||||
|
||||
fn adjust_refs<F>(&mut self, mapping: F) where F: FnMut(usize) -> usize {
|
||||
self.hash.adjust_req(mapping)
|
||||
}
|
||||
}
|
||||
|
||||
/// A complete block receipts request.
|
||||
@ -818,9 +902,9 @@ pub mod block_receipts {
|
||||
pub receipts: Vec<Receipt>
|
||||
}
|
||||
|
||||
impl Response {
|
||||
impl super::ResponseLike for Response {
|
||||
/// Fill reusable outputs by providing them to the function.
|
||||
pub fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) {}
|
||||
fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) {}
|
||||
}
|
||||
|
||||
impl Decodable for Response {
|
||||
@ -868,6 +952,7 @@ pub mod block_body {
|
||||
|
||||
impl super::IncompleteRequest for Incomplete {
|
||||
type Complete = Complete;
|
||||
type Response = Response;
|
||||
|
||||
fn check_outputs<F>(&self, mut f: F) -> Result<(), NoSuchOutput>
|
||||
where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>
|
||||
@ -894,6 +979,10 @@ pub mod block_body {
|
||||
hash: self.hash.into_scalar()?,
|
||||
})
|
||||
}
|
||||
|
||||
fn adjust_refs<F>(&mut self, mapping: F) where F: FnMut(usize) -> usize {
|
||||
self.hash.adjust_req(mapping)
|
||||
}
|
||||
}
|
||||
|
||||
/// A complete block body request.
|
||||
@ -910,9 +999,9 @@ pub mod block_body {
|
||||
pub body: encoded::Body,
|
||||
}
|
||||
|
||||
impl Response {
|
||||
impl super::ResponseLike for Response {
|
||||
/// Fill reusable outputs by providing them to the function.
|
||||
pub fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) {}
|
||||
fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) {}
|
||||
}
|
||||
|
||||
impl Decodable for Response {
|
||||
@ -971,6 +1060,7 @@ pub mod account {
|
||||
|
||||
impl super::IncompleteRequest for Incomplete {
|
||||
type Complete = Complete;
|
||||
type Response = Response;
|
||||
|
||||
fn check_outputs<F>(&self, mut f: F) -> Result<(), NoSuchOutput>
|
||||
where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>
|
||||
@ -1013,6 +1103,11 @@ pub mod account {
|
||||
address_hash: self.address_hash.into_scalar()?,
|
||||
})
|
||||
}
|
||||
|
||||
fn adjust_refs<F>(&mut self, mut mapping: F) where F: FnMut(usize) -> usize {
|
||||
self.block_hash.adjust_req(&mut mapping);
|
||||
self.address_hash.adjust_req(&mut mapping);
|
||||
}
|
||||
}
|
||||
|
||||
/// A complete request for an account.
|
||||
@ -1039,9 +1134,9 @@ pub mod account {
|
||||
pub storage_root: H256,
|
||||
}
|
||||
|
||||
impl Response {
|
||||
impl super::ResponseLike for Response {
|
||||
/// Fill reusable outputs by providing them to the function.
|
||||
pub fn fill_outputs<F>(&self, mut f: F) where F: FnMut(usize, Output) {
|
||||
fn fill_outputs<F>(&self, mut f: F) where F: FnMut(usize, Output) {
|
||||
f(0, Output::Hash(self.code_hash));
|
||||
f(1, Output::Hash(self.storage_root));
|
||||
}
|
||||
@ -1109,6 +1204,7 @@ pub mod storage {
|
||||
|
||||
impl super::IncompleteRequest for Incomplete {
|
||||
type Complete = Complete;
|
||||
type Response = Response;
|
||||
|
||||
fn check_outputs<F>(&self, mut f: F) -> Result<(), NoSuchOutput>
|
||||
where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>
|
||||
@ -1162,6 +1258,12 @@ pub mod storage {
|
||||
key_hash: self.key_hash.into_scalar()?,
|
||||
})
|
||||
}
|
||||
|
||||
fn adjust_refs<F>(&mut self, mut mapping: F) where F: FnMut(usize) -> usize {
|
||||
self.block_hash.adjust_req(&mut mapping);
|
||||
self.address_hash.adjust_req(&mut mapping);
|
||||
self.key_hash.adjust_req(&mut mapping);
|
||||
}
|
||||
}
|
||||
|
||||
/// A complete request for a storage proof.
|
||||
@ -1184,9 +1286,9 @@ pub mod storage {
|
||||
pub value: H256,
|
||||
}
|
||||
|
||||
impl Response {
|
||||
impl super::ResponseLike for Response {
|
||||
/// Fill reusable outputs by providing them to the function.
|
||||
pub fn fill_outputs<F>(&self, mut f: F) where F: FnMut(usize, Output) {
|
||||
fn fill_outputs<F>(&self, mut f: F) where F: FnMut(usize, Output) {
|
||||
f(0, Output::Hash(self.value));
|
||||
}
|
||||
}
|
||||
@ -1243,6 +1345,7 @@ pub mod contract_code {
|
||||
|
||||
impl super::IncompleteRequest for Incomplete {
|
||||
type Complete = Complete;
|
||||
type Response = Response;
|
||||
|
||||
fn check_outputs<F>(&self, mut f: F) -> Result<(), NoSuchOutput>
|
||||
where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>
|
||||
@ -1281,6 +1384,11 @@ pub mod contract_code {
|
||||
code_hash: self.code_hash.into_scalar()?,
|
||||
})
|
||||
}
|
||||
|
||||
fn adjust_refs<F>(&mut self, mut mapping: F) where F: FnMut(usize) -> usize {
|
||||
self.block_hash.adjust_req(&mut mapping);
|
||||
self.code_hash.adjust_req(&mut mapping);
|
||||
}
|
||||
}
|
||||
|
||||
/// A complete request.
|
||||
@ -1299,9 +1407,9 @@ pub mod contract_code {
|
||||
pub code: Bytes,
|
||||
}
|
||||
|
||||
impl Response {
|
||||
impl super::ResponseLike for Response {
|
||||
/// Fill reusable outputs by providing them to the function.
|
||||
pub fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) {}
|
||||
fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) {}
|
||||
}
|
||||
|
||||
impl Decodable for Response {
|
||||
@ -1380,6 +1488,7 @@ pub mod execution {
|
||||
|
||||
impl super::IncompleteRequest for Incomplete {
|
||||
type Complete = Complete;
|
||||
type Response = Response;
|
||||
|
||||
fn check_outputs<F>(&self, mut f: F) -> Result<(), NoSuchOutput>
|
||||
where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>
|
||||
@ -1412,6 +1521,10 @@ pub mod execution {
|
||||
data: self.data,
|
||||
})
|
||||
}
|
||||
|
||||
fn adjust_refs<F>(&mut self, mapping: F) where F: FnMut(usize) -> usize {
|
||||
self.block_hash.adjust_req(mapping);
|
||||
}
|
||||
}
|
||||
|
||||
/// A complete request.
|
||||
@ -1440,9 +1553,9 @@ pub mod execution {
|
||||
pub items: Vec<DBValue>,
|
||||
}
|
||||
|
||||
impl Response {
|
||||
impl super::ResponseLike for Response {
|
||||
/// Fill reusable outputs by providing them to the function.
|
||||
pub fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) {}
|
||||
fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) {}
|
||||
}
|
||||
|
||||
impl Decodable for Response {
|
||||
|
@ -13,3 +13,6 @@ ethcore-util = { path = "../../util" }
|
||||
|
||||
[build-dependencies]
|
||||
native-contract-generator = { path = "generator" }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
@ -33,6 +33,8 @@ const VALIDATOR_SET_ABI: &'static str = r#"[{"constant":true,"inputs":[],"name":
|
||||
|
||||
const VALIDATOR_REPORT_ABI: &'static str = r#"[{"constant":false,"inputs":[{"name":"validator","type":"address"},{"name":"blockNumber","type":"uint256"},{"name":"proof","type":"bytes"}],"name":"reportMalicious","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"validator","type":"address"},{"name":"blockNumber","type":"uint256"}],"name":"reportBenign","outputs":[],"payable":false,"type":"function"}]"#;
|
||||
|
||||
const TEST_VALIDATOR_SET_ABI: &'static str = r#"[{"constant":true,"inputs":[],"name":"transitionNonce","outputs":[{"name":"n","type":"uint256"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"newValidators","type":"address[]"}],"name":"setValidators","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"getValidators","outputs":[{"name":"vals","type":"address[]"}],"payable":false,"type":"function"},{"inputs":[],"payable":false,"type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_parent_hash","type":"bytes32"},{"indexed":true,"name":"_nonce","type":"uint256"},{"indexed":false,"name":"_new_set","type":"address[]"}],"name":"ValidatorsChanged","type":"event"}]"#;
|
||||
|
||||
fn build_file(name: &str, abi: &str, filename: &str) {
|
||||
let code = ::native_contract_generator::generate_module(name, abi).unwrap();
|
||||
|
||||
@ -43,10 +45,16 @@ fn build_file(name: &str, abi: &str, filename: &str) {
|
||||
f.write_all(code.as_bytes()).unwrap();
|
||||
}
|
||||
|
||||
fn build_test_contracts() {
|
||||
build_file("ValidatorSet", TEST_VALIDATOR_SET_ABI, "test_validator_set.rs");
|
||||
}
|
||||
|
||||
fn main() {
|
||||
build_file("Registry", REGISTRY_ABI, "registry.rs");
|
||||
build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs");
|
||||
build_file("SecretStoreAclStorage", SECRETSTORE_ACL_STORAGE_ABI, "secretstore_acl_storage.rs");
|
||||
build_file("ValidatorSet", VALIDATOR_SET_ABI, "validator_set.rs");
|
||||
build_file("ValidatorReport", VALIDATOR_REPORT_ABI, "validator_report.rs");
|
||||
|
||||
build_test_contracts();
|
||||
}
|
||||
|
@ -29,6 +29,8 @@ mod secretstore_acl_storage;
|
||||
mod validator_set;
|
||||
mod validator_report;
|
||||
|
||||
pub mod test_contracts;
|
||||
|
||||
pub use self::registry::Registry;
|
||||
pub use self::service_transaction::ServiceTransactionChecker;
|
||||
pub use self::secretstore_acl_storage::SecretStoreAclStorage;
|
||||
|
21
ethcore/native_contracts/src/test_contracts/mod.rs
Normal file
21
ethcore/native_contracts/src/test_contracts/mod.rs
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Contracts used for testing.
|
||||
|
||||
pub mod validator_set;
|
||||
|
||||
pub use self::validator_set::ValidatorSet;
|
21
ethcore/native_contracts/src/test_contracts/validator_set.rs
Normal file
21
ethcore/native_contracts/src/test_contracts/validator_set.rs
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#![allow(unused_mut, unused_variables, unused_imports)]
|
||||
|
||||
//! Test validator set contract.
|
||||
|
||||
include!(concat!(env!("OUT_DIR"), "/test_validator_set.rs"));
|
@ -445,7 +445,12 @@ impl<'a> Iterator for EpochTransitionIter<'a> {
|
||||
let is_in_canon_chain = self.chain.block_hash(transition.block_number)
|
||||
.map_or(false, |hash| hash == transition.block_hash);
|
||||
|
||||
if is_in_canon_chain {
|
||||
// if the transition is within the block gap, there will only be
|
||||
// one candidate, and it will be from a snapshot restored from.
|
||||
let is_ancient = self.chain.first_block_number()
|
||||
.map_or(false, |first| first > transition.block_number);
|
||||
|
||||
if is_ancient || is_in_canon_chain {
|
||||
return Some((transitions.number, transition))
|
||||
}
|
||||
}
|
||||
@ -864,6 +869,7 @@ impl BlockChain {
|
||||
}
|
||||
|
||||
/// Iterate over all epoch transitions.
|
||||
/// This will only return transitions within the canonical chain.
|
||||
pub fn epoch_transitions(&self) -> EpochTransitionIter {
|
||||
let iter = self.db.iter_from_prefix(db::COL_EXTRA, &EPOCH_KEY_PREFIX[..]);
|
||||
EpochTransitionIter {
|
||||
@ -872,6 +878,16 @@ impl BlockChain {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a specific epoch transition by epoch number and provided block hash.
|
||||
pub fn epoch_transition(&self, epoch_num: u64, block_hash: H256) -> Option<EpochTransition> {
|
||||
trace!(target: "blockchain", "Loading epoch {} transition at block {}",
|
||||
epoch_num, block_hash);
|
||||
|
||||
self.db.read(db::COL_EXTRA, &epoch_num).and_then(|transitions: EpochTransitions| {
|
||||
transitions.candidates.into_iter().find(|c| c.block_hash == block_hash)
|
||||
})
|
||||
}
|
||||
|
||||
/// Add a child to a given block. Assumes that the block hash is in
|
||||
/// the chain and the child's parent is this block.
|
||||
///
|
||||
|
68
ethcore/src/client/ancient_import.rs
Normal file
68
ethcore/src/client/ancient_import.rs
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Helper for ancient block import.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use engines::{Engine, EpochVerifier, EpochChange};
|
||||
use error::Error;
|
||||
use header::Header;
|
||||
|
||||
use rand::Rng;
|
||||
use util::RwLock;
|
||||
|
||||
// do "heavy" verification on ~1/50 blocks, randomly sampled.
|
||||
const HEAVY_VERIFY_RATE: f32 = 0.02;
|
||||
|
||||
/// Ancient block verifier: import an ancient sequence of blocks in order from a starting
|
||||
/// epoch.
|
||||
pub struct AncientVerifier {
|
||||
cur_verifier: RwLock<Box<EpochVerifier>>,
|
||||
engine: Arc<Engine>,
|
||||
}
|
||||
|
||||
impl AncientVerifier {
|
||||
/// Create a new ancient block verifier with the given engine and initial verifier.
|
||||
pub fn new(engine: Arc<Engine>, start_verifier: Box<EpochVerifier>) -> Self {
|
||||
AncientVerifier {
|
||||
cur_verifier: RwLock::new(start_verifier),
|
||||
engine: engine,
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify the next block header, randomly choosing whether to do heavy or light
|
||||
/// verification. If the block is the end of an epoch, updates the epoch verifier.
|
||||
pub fn verify<R: Rng, F: Fn(u64) -> Result<Box<EpochVerifier>, Error>>(
|
||||
&self,
|
||||
rng: &mut R,
|
||||
header: &Header,
|
||||
block: &[u8],
|
||||
receipts: &[::receipt::Receipt],
|
||||
load_verifier: F,
|
||||
) -> Result<(), ::error::Error> {
|
||||
match rng.gen::<f32>() <= HEAVY_VERIFY_RATE {
|
||||
true => self.cur_verifier.read().verify_heavy(header)?,
|
||||
false => self.cur_verifier.read().verify_light(header)?,
|
||||
}
|
||||
|
||||
if let EpochChange::Yes(num) = self.engine.is_epoch_end(header, Some(block), Some(receipts)) {
|
||||
*self.cur_verifier.write() = load_verifier(num)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -34,6 +34,7 @@ use basic_types::Seal;
|
||||
use block::*;
|
||||
use blockchain::{BlockChain, BlockProvider, EpochTransition, TreeRoute, ImportRoute};
|
||||
use blockchain::extras::TransactionAddress;
|
||||
use client::ancient_import::AncientVerifier;
|
||||
use client::Error as ClientError;
|
||||
use client::{
|
||||
BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient,
|
||||
@ -61,7 +62,7 @@ use service::ClientIoMessage;
|
||||
use snapshot::{self, io as snapshot_io};
|
||||
use spec::Spec;
|
||||
use state_db::StateDB;
|
||||
use state::{self, State, CleanupMode};
|
||||
use state::{self, State};
|
||||
use trace;
|
||||
use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase};
|
||||
use trace::FlatTransactionTraces;
|
||||
@ -152,6 +153,7 @@ pub struct Client {
|
||||
factories: Factories,
|
||||
history: u64,
|
||||
rng: Mutex<OsRng>,
|
||||
ancient_verifier: Mutex<Option<AncientVerifier>>,
|
||||
on_user_defaults_change: Mutex<Option<Box<FnMut(Option<Mode>) + 'static + Send>>>,
|
||||
registrar: Mutex<Option<Registry>>,
|
||||
exit_handler: Mutex<Option<Box<Fn(bool, Option<String>) + 'static + Send>>>,
|
||||
@ -241,6 +243,7 @@ impl Client {
|
||||
factories: factories,
|
||||
history: history,
|
||||
rng: Mutex::new(OsRng::new().map_err(::util::UtilError::StdIo)?),
|
||||
ancient_verifier: Mutex::new(None),
|
||||
on_user_defaults_change: Mutex::new(None),
|
||||
registrar: Mutex::new(None),
|
||||
exit_handler: Mutex::new(None),
|
||||
@ -256,7 +259,11 @@ impl Client {
|
||||
// ensure genesis epoch proof in the DB.
|
||||
{
|
||||
let chain = client.chain.read();
|
||||
client.generate_epoch_proof(&spec.genesis_header(), 0, &*chain);
|
||||
let gh = spec.genesis_header();
|
||||
if chain.epoch_transition(0, spec.genesis_header().hash()).is_none() {
|
||||
trace!(target: "client", "No genesis transition found.");
|
||||
client.generate_epoch_proof(&gh, 0, &*chain);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(reg_addr) = client.additional_params().get("registrar").and_then(|s| Address::from_str(s).ok()) {
|
||||
@ -540,25 +547,56 @@ impl Client {
|
||||
fn import_old_block(&self, block_bytes: Bytes, receipts_bytes: Bytes) -> Result<H256, ::error::Error> {
|
||||
let block = BlockView::new(&block_bytes);
|
||||
let header = block.header();
|
||||
let receipts = ::rlp::decode_list(&receipts_bytes);
|
||||
let hash = header.hash();
|
||||
let _import_lock = self.import_lock.lock();
|
||||
|
||||
{
|
||||
let _timer = PerfTimer::new("import_old_block");
|
||||
let mut rng = self.rng.lock();
|
||||
let chain = self.chain.read();
|
||||
let mut ancient_verifier = self.ancient_verifier.lock();
|
||||
|
||||
// verify block.
|
||||
::snapshot::verify_old_block(
|
||||
&mut *rng,
|
||||
&header,
|
||||
&*self.engine,
|
||||
&*chain,
|
||||
Some(&block_bytes),
|
||||
false,
|
||||
)?;
|
||||
{
|
||||
// closure for verifying a block.
|
||||
let verify_with = |verifier: &AncientVerifier| -> Result<(), ::error::Error> {
|
||||
// verify the block, passing a closure used to load an epoch verifier
|
||||
// by number.
|
||||
verifier.verify(
|
||||
&mut *self.rng.lock(),
|
||||
&header,
|
||||
&block_bytes,
|
||||
&receipts,
|
||||
|epoch_num| chain.epoch_transition(epoch_num, hash)
|
||||
.ok_or(BlockError::UnknownEpochTransition(epoch_num))
|
||||
.map_err(Into::into)
|
||||
.and_then(|t| self.engine.epoch_verifier(&header, &t.proof))
|
||||
)
|
||||
};
|
||||
|
||||
// initialize the ancient block verifier if we don't have one already.
|
||||
match &mut *ancient_verifier {
|
||||
&mut Some(ref verifier) => {
|
||||
verify_with(verifier)?
|
||||
}
|
||||
x @ &mut None => {
|
||||
// load most recent epoch.
|
||||
trace!(target: "client", "Initializing ancient block restoration.");
|
||||
let current_epoch_data = chain.epoch_transitions()
|
||||
.take_while(|&(_, ref t)| t.block_number < header.number())
|
||||
.last()
|
||||
.map(|(_, t)| t.proof)
|
||||
.expect("At least one epoch entry (genesis) always stored; qed");
|
||||
|
||||
let current_verifier = self.engine.epoch_verifier(&header, ¤t_epoch_data)?;
|
||||
let current_verifier = AncientVerifier::new(self.engine.clone(), current_verifier);
|
||||
|
||||
verify_with(¤t_verifier)?;
|
||||
*x = Some(current_verifier);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Commit results
|
||||
let receipts = ::rlp::decode_list(&receipts_bytes);
|
||||
let mut batch = DBTransaction::new();
|
||||
chain.insert_unordered_block(&mut batch, &block_bytes, receipts, None, false, true);
|
||||
// Final commit to the DB
|
||||
@ -590,7 +628,7 @@ impl Client {
|
||||
let entering_new_epoch = {
|
||||
use engines::EpochChange;
|
||||
match self.engine.is_epoch_end(block.header(), Some(block_data), Some(&receipts)) {
|
||||
EpochChange::Yes(e, _) => Some((block.header().clone(), e)),
|
||||
EpochChange::Yes(e) => Some((block.header().clone(), e)),
|
||||
EpochChange::No => None,
|
||||
EpochChange::Unsure(_) => {
|
||||
warn!(target: "client", "Detected invalid engine implementation.");
|
||||
@ -641,7 +679,8 @@ impl Client {
|
||||
|
||||
let mut batch = DBTransaction::new();
|
||||
let hash = header.hash();
|
||||
debug!(target: "client", "Generating validation proof for block {}", hash);
|
||||
debug!(target: "client", "Generating validation proof for epoch {} at block {}",
|
||||
epoch_number, hash);
|
||||
|
||||
// proof is two-part. state items read in lexicographical order,
|
||||
// and the secondary "proof" part.
|
||||
@ -880,8 +919,8 @@ impl Client {
|
||||
let start_hash = match at {
|
||||
BlockId::Latest => {
|
||||
let start_num = match db.earliest_era() {
|
||||
Some(era) => ::std::cmp::max(era, best_block_number - history),
|
||||
None => best_block_number - history,
|
||||
Some(era) => ::std::cmp::max(era, best_block_number.saturating_sub(history)),
|
||||
None => best_block_number.saturating_sub(history),
|
||||
};
|
||||
|
||||
match self.block_hash(BlockId::Number(start_num)) {
|
||||
@ -992,16 +1031,9 @@ impl BlockChainClient for Client {
|
||||
let mut state = self.state_at(block).ok_or(CallError::StatePruned)?;
|
||||
let original_state = if analytics.state_diffing { Some(state.clone()) } else { None };
|
||||
|
||||
let sender = t.sender();
|
||||
let balance = state.balance(&sender).map_err(|_| CallError::StateCorrupt)?;
|
||||
let needed_balance = t.value + t.gas * t.gas_price;
|
||||
if balance < needed_balance {
|
||||
// give the sender a sufficient balance
|
||||
state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty)
|
||||
.map_err(|_| CallError::StateCorrupt)?;
|
||||
}
|
||||
let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false };
|
||||
let mut ret = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(t, options)?;
|
||||
let mut ret = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm)
|
||||
.transact_virtual(t, options)?;
|
||||
|
||||
// TODO gav move this into Executive.
|
||||
if let Some(original) = original_state {
|
||||
@ -1023,7 +1055,6 @@ impl BlockChainClient for Client {
|
||||
// that's just a copy of the state.
|
||||
let original_state = self.state_at(block).ok_or(CallError::StatePruned)?;
|
||||
let sender = t.sender();
|
||||
let balance = original_state.balance(&sender).map_err(ExecutionError::from)?;
|
||||
let options = TransactOptions { tracing: true, vm_tracing: false, check_nonce: false };
|
||||
|
||||
let cond = |gas| {
|
||||
@ -1032,15 +1063,8 @@ impl BlockChainClient for Client {
|
||||
let tx = tx.fake_sign(sender);
|
||||
|
||||
let mut state = original_state.clone();
|
||||
let needed_balance = tx.value + tx.gas * tx.gas_price;
|
||||
if balance < needed_balance {
|
||||
// give the sender a sufficient balance
|
||||
state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty)
|
||||
.map_err(ExecutionError::from)?;
|
||||
}
|
||||
|
||||
Ok(Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm)
|
||||
.transact(&tx, options.clone())
|
||||
.transact_virtual(&tx, options.clone())
|
||||
.map(|r| r.exception.is_none())
|
||||
.unwrap_or(false))
|
||||
};
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
//! Blockchain database client.
|
||||
|
||||
mod ancient_import;
|
||||
mod config;
|
||||
mod error;
|
||||
mod test_client;
|
||||
|
@ -452,6 +452,10 @@ impl Engine for AuthorityRound {
|
||||
fn sign(&self, hash: H256) -> Result<Signature, Error> {
|
||||
self.signer.sign(hash).map_err(Into::into)
|
||||
}
|
||||
|
||||
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
|
||||
Some(Box::new(::snapshot::PoaSnapshot))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -216,6 +216,10 @@ impl Engine for BasicAuthority {
|
||||
fn sign(&self, hash: H256) -> Result<Signature, Error> {
|
||||
self.signer.sign(hash).map_err(Into::into)
|
||||
}
|
||||
|
||||
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
|
||||
Some(Box::new(::snapshot::PoaSnapshot))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -22,7 +22,7 @@ use header::Header;
|
||||
/// Verifier for all blocks within an epoch with self-contained state.
|
||||
///
|
||||
/// See docs on `Engine` relating to proving functions for more details.
|
||||
pub trait EpochVerifier: Sync {
|
||||
pub trait EpochVerifier: Send + Sync {
|
||||
/// Get the epoch number.
|
||||
fn epoch_number(&self) -> u64;
|
||||
|
||||
|
@ -107,8 +107,8 @@ pub enum EpochChange {
|
||||
Unsure(Unsure),
|
||||
/// No epoch change.
|
||||
No,
|
||||
/// Validation proof required, and the new epoch number and expected proof.
|
||||
Yes(u64, Bytes),
|
||||
/// Validation proof required, and the new epoch number.
|
||||
Yes(u64),
|
||||
}
|
||||
|
||||
/// More data required to determine if an epoch change occurred at a given block.
|
||||
@ -227,6 +227,9 @@ pub trait Engine : Sync + Send {
|
||||
/// For example, for PoA chains the proof will be a validator set,
|
||||
/// and the corresponding `EpochVerifier` can be used to correctly validate
|
||||
/// all blocks produced under that `ValidatorSet`
|
||||
///
|
||||
/// It must be possible to generate an epoch proof for any block in an epoch,
|
||||
/// and it should always be equivalent to the proof of the transition block.
|
||||
fn epoch_proof(&self, _header: &Header, _caller: &Call)
|
||||
-> Result<Vec<u8>, Error>
|
||||
{
|
||||
@ -234,6 +237,11 @@ pub trait Engine : Sync + Send {
|
||||
}
|
||||
|
||||
/// Whether an epoch change occurred at the given header.
|
||||
///
|
||||
/// If the block or receipts are required, return `Unsure` and the function will be
|
||||
/// called again with them.
|
||||
/// Return `Yes` or `No` when the answer is definitively known.
|
||||
///
|
||||
/// Should not interact with state.
|
||||
fn is_epoch_end(&self, _header: &Header, _block: Option<&[u8]>, _receipts: Option<&[Receipt]>)
|
||||
-> EpochChange
|
||||
|
@ -76,23 +76,45 @@ impl ValidatorSet for Multi {
|
||||
-> EpochChange
|
||||
{
|
||||
let (set_block, set) = self.correct_set_by_number(header.number());
|
||||
let (next_set_block, _) = self.correct_set_by_number(header.number() + 1);
|
||||
|
||||
// multi-set transitions require epoch changes.
|
||||
if next_set_block != set_block {
|
||||
return EpochChange::Yes(next_set_block);
|
||||
}
|
||||
|
||||
match set.is_epoch_end(header, block, receipts) {
|
||||
EpochChange::Yes(num, proof) => EpochChange::Yes(set_block + num, proof),
|
||||
EpochChange::Yes(num) => EpochChange::Yes(set_block + num),
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
|
||||
fn epoch_proof(&self, header: &Header, caller: &Call) -> Result<Vec<u8>, String> {
|
||||
self.correct_set_by_number(header.number()).1.epoch_proof(header, caller)
|
||||
let (set_block, set) = self.correct_set_by_number(header.number());
|
||||
let (next_set_block, next_set) = self.correct_set_by_number(header.number() + 1);
|
||||
|
||||
if next_set_block != set_block {
|
||||
return next_set.epoch_proof(header, caller);
|
||||
}
|
||||
|
||||
set.epoch_proof(header, caller)
|
||||
}
|
||||
|
||||
fn epoch_set(&self, header: &Header, proof: &[u8]) -> Result<(u64, super::SimpleList), ::error::Error> {
|
||||
// "multi" epoch is the inner set's epoch plus the transition block to that set.
|
||||
// ensures epoch increases monotonically.
|
||||
let (set_block, set) = self.correct_set_by_number(header.number());
|
||||
let (inner_epoch, list) = set.epoch_set(header, proof)?;
|
||||
Ok((set_block + inner_epoch, list))
|
||||
let (next_set_block, next_set) = self.correct_set_by_number(header.number() + 1);
|
||||
|
||||
// this block kicks off a new validator set -- get the validator set
|
||||
// starting there.
|
||||
if next_set_block != set_block {
|
||||
let (inner_epoch, list) = next_set.epoch_set(header, proof)?;
|
||||
Ok((next_set_block + inner_epoch, list))
|
||||
} else {
|
||||
let (inner_epoch, list) = set.epoch_set(header, proof)?;
|
||||
Ok((set_block + inner_epoch, list))
|
||||
}
|
||||
}
|
||||
|
||||
fn contains_with_caller(&self, bh: &H256, address: &Address, caller: &Call) -> bool {
|
||||
|
@ -182,10 +182,9 @@ impl ValidatorSet for ValidatorSafeContract {
|
||||
);
|
||||
|
||||
match (nonce, validators) {
|
||||
(Some(nonce), Some(validators)) => {
|
||||
let proof = encode_proof(nonce, &validators);
|
||||
(Some(nonce), Some(_)) => {
|
||||
let new_epoch = nonce.low_u64();
|
||||
::engines::EpochChange::Yes(new_epoch, proof)
|
||||
::engines::EpochChange::Yes(new_epoch)
|
||||
}
|
||||
_ => {
|
||||
debug!(target: "engine", "Successfully decoded log turned out to be bad.");
|
||||
|
@ -169,6 +169,8 @@ pub enum BlockError {
|
||||
UnknownParent(H256),
|
||||
/// Uncle parent given is unknown.
|
||||
UnknownUncleParent(H256),
|
||||
/// No transition to epoch number.
|
||||
UnknownEpochTransition(u64),
|
||||
}
|
||||
|
||||
impl fmt::Display for BlockError {
|
||||
@ -202,6 +204,7 @@ impl fmt::Display for BlockError {
|
||||
RidiculousNumber(ref oob) => format!("Implausible block number. {}", oob),
|
||||
UnknownParent(ref hash) => format!("Unknown parent: {}", hash),
|
||||
UnknownUncleParent(ref hash) => format!("Unknown uncle parent: {}", hash),
|
||||
UnknownEpochTransition(ref num) => format!("Unknown transition to epoch number: {}", num),
|
||||
};
|
||||
|
||||
f.write_fmt(format_args!("Block error ({})", msg))
|
||||
|
@ -129,6 +129,21 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute a transaction in a "virtual" context.
|
||||
/// This will ensure the caller has enough balance to execute the desired transaction.
|
||||
/// Used for extra-block executions for things like consensus contracts and RPCs
|
||||
pub fn transact_virtual(&'a mut self, t: &SignedTransaction, options: TransactOptions) -> Result<Executed, ExecutionError> {
|
||||
let sender = t.sender();
|
||||
let balance = self.state.balance(&sender)?;
|
||||
let needed_balance = t.value + t.gas * t.gas_price;
|
||||
if balance < needed_balance {
|
||||
// give the sender a sufficient balance
|
||||
self.state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty)?;
|
||||
}
|
||||
|
||||
self.transact(t, options)
|
||||
}
|
||||
|
||||
/// Execute transaction/call with tracing enabled
|
||||
pub fn transact_with_tracer<T, V>(
|
||||
&'a mut self,
|
||||
|
@ -17,12 +17,14 @@
|
||||
//! Block header.
|
||||
|
||||
use util::*;
|
||||
use basic_types::{LogBloom, Seal, ZERO_LOGBLOOM};
|
||||
use basic_types::{LogBloom, ZERO_LOGBLOOM};
|
||||
use time::get_time;
|
||||
use rlp::*;
|
||||
|
||||
use std::cell::RefCell;
|
||||
|
||||
pub use basic_types::Seal;
|
||||
|
||||
/// Type for Block number
|
||||
pub type BlockNumber = u64;
|
||||
|
||||
|
498
ethcore/src/snapshot/consensus/authority.rs
Normal file
498
ethcore/src/snapshot/consensus/authority.rs
Normal file
@ -0,0 +1,498 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Secondary chunk creation and restoration, implementation for proof-of-authority
|
||||
//! based engines.
|
||||
//!
|
||||
//! The chunks here contain state proofs of transitions, along with validator proofs.
|
||||
|
||||
use super::{SnapshotComponents, Rebuilder, ChunkSink};
|
||||
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use blockchain::{BlockChain, BlockProvider, EpochTransition};
|
||||
use engines::{Engine, EpochVerifier};
|
||||
use env_info::EnvInfo;
|
||||
use ids::BlockId;
|
||||
use header::Header;
|
||||
use receipt::Receipt;
|
||||
use snapshot::{Error, ManifestData};
|
||||
use state_db::StateDB;
|
||||
|
||||
use itertools::{Position, Itertools};
|
||||
use rlp::{RlpStream, UntrustedRlp};
|
||||
use util::{Address, Bytes, H256, KeyValueDB, DBValue};
|
||||
|
||||
/// Snapshot creation and restoration for PoA chains.
|
||||
/// Chunk format:
|
||||
///
|
||||
/// [FLAG, [header, epoch_number, epoch data, state proof, last hashes], ...]
|
||||
/// - Header data at which transition occurred,
|
||||
/// - epoch data (usually list of validators)
|
||||
/// - state items required to check epoch data
|
||||
/// - last 256 hashes before the transition; required for checking state changes.
|
||||
///
|
||||
/// FLAG is a bool: true for last chunk, false otherwise.
|
||||
///
|
||||
/// The last item of the last chunk will be a list containing data for the warp target block:
|
||||
/// [header, transactions, uncles, receipts, last_hashes, parent_td].
|
||||
/// If this block is not a transition block, the epoch data should be the same as that
|
||||
/// for the last transition.
|
||||
pub struct PoaSnapshot;
|
||||
|
||||
impl SnapshotComponents for PoaSnapshot {
|
||||
fn chunk_all(
|
||||
&mut self,
|
||||
chain: &BlockChain,
|
||||
block_at: H256,
|
||||
sink: &mut ChunkSink,
|
||||
preferred_size: usize,
|
||||
) -> Result<(), Error> {
|
||||
let number = chain.block_number(&block_at)
|
||||
.ok_or_else(|| Error::InvalidStartingBlock(BlockId::Hash(block_at)))?;
|
||||
|
||||
let mut pending_size = 0;
|
||||
let mut rlps = Vec::new();
|
||||
|
||||
// TODO: this will become irrelevant after recent block hashes are moved into
|
||||
// the state. can we optimize it out in that case?
|
||||
let make_last_hashes = |parent_hash| chain.ancestry_iter(parent_hash)
|
||||
.into_iter()
|
||||
.flat_map(|inner| inner)
|
||||
.take(255)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for (epoch_number, transition) in chain.epoch_transitions()
|
||||
.take_while(|&(_, ref t)| t.block_number <= number)
|
||||
{
|
||||
let header = chain.block_header_data(&transition.block_hash)
|
||||
.ok_or(Error::BlockNotFound(transition.block_hash))?;
|
||||
|
||||
let last_hashes: Vec<_> = make_last_hashes(header.parent_hash());
|
||||
|
||||
let entry = {
|
||||
let mut entry_stream = RlpStream::new_list(5);
|
||||
entry_stream
|
||||
.append_raw(&header.into_inner(), 1)
|
||||
.append(&epoch_number)
|
||||
.append(&transition.proof);
|
||||
|
||||
entry_stream.begin_list(transition.state_proof.len());
|
||||
for item in transition.state_proof {
|
||||
entry_stream.append(&&*item);
|
||||
}
|
||||
|
||||
entry_stream.append_list(&last_hashes);
|
||||
entry_stream.out()
|
||||
};
|
||||
|
||||
// cut of the chunk if too large.
|
||||
let new_loaded_size = pending_size + entry.len();
|
||||
pending_size = if new_loaded_size > preferred_size && !rlps.is_empty() {
|
||||
write_chunk(false, &mut rlps, sink)?;
|
||||
entry.len()
|
||||
} else {
|
||||
new_loaded_size
|
||||
};
|
||||
|
||||
rlps.push(entry);
|
||||
}
|
||||
|
||||
let (block, receipts) = chain.block(&block_at)
|
||||
.and_then(|b| chain.block_receipts(&block_at).map(|r| (b, r)))
|
||||
.ok_or(Error::BlockNotFound(block_at))?;
|
||||
let block = block.decode();
|
||||
|
||||
let parent_td = chain.block_details(block.header.parent_hash())
|
||||
.map(|d| d.total_difficulty)
|
||||
.ok_or(Error::BlockNotFound(block_at))?;
|
||||
|
||||
let last_hashes = make_last_hashes(*block.header.parent_hash());
|
||||
|
||||
rlps.push({
|
||||
let mut stream = RlpStream::new_list(6);
|
||||
stream
|
||||
.append(&block.header)
|
||||
.append_list(&block.transactions)
|
||||
.append_list(&block.uncles)
|
||||
.append(&receipts)
|
||||
.append_list(&last_hashes)
|
||||
.append(&parent_td);
|
||||
stream.out()
|
||||
});
|
||||
|
||||
write_chunk(true, &mut rlps, sink)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn rebuilder(
|
||||
&self,
|
||||
chain: BlockChain,
|
||||
db: Arc<KeyValueDB>,
|
||||
manifest: &ManifestData,
|
||||
) -> Result<Box<Rebuilder>, ::error::Error> {
|
||||
Ok(Box::new(ChunkRebuilder {
|
||||
manifest: manifest.clone(),
|
||||
warp_target: None,
|
||||
chain: chain,
|
||||
db: db,
|
||||
had_genesis: false,
|
||||
unverified_firsts: Vec::new(),
|
||||
last_proofs: Vec::new(),
|
||||
}))
|
||||
}
|
||||
|
||||
fn min_supported_version(&self) -> u64 { 3 }
|
||||
fn current_version(&self) -> u64 { 3 }
|
||||
}
|
||||
|
||||
// writes a chunk composed of the inner RLPs here.
|
||||
// flag indicates whether the chunk is the last chunk.
|
||||
fn write_chunk(last: bool, chunk_data: &mut Vec<Bytes>, sink: &mut ChunkSink) -> Result<(), Error> {
|
||||
let mut stream = RlpStream::new_list(1 + chunk_data.len());
|
||||
|
||||
stream.append(&last);
|
||||
for item in chunk_data.drain(..) {
|
||||
stream.append_raw(&item, 1);
|
||||
}
|
||||
|
||||
(sink)(stream.out().as_slice()).map_err(Into::into)
|
||||
}
|
||||
|
||||
// rebuilder checks state proofs for all transitions, and checks that each
|
||||
// transition header is verifiable from the epoch data of the one prior.
|
||||
struct ChunkRebuilder {
|
||||
manifest: ManifestData,
|
||||
warp_target: Option<(Header, Vec<H256>)>,
|
||||
chain: BlockChain,
|
||||
db: Arc<KeyValueDB>,
|
||||
had_genesis: bool,
|
||||
|
||||
// sorted vectors of unverified first blocks in a chunk
|
||||
// and epoch data from last blocks in chunks.
|
||||
// verification for these will be done at the end.
|
||||
unverified_firsts: Vec<(u64, Header)>,
|
||||
last_proofs: Vec<(u64, Header, Bytes)>,
|
||||
}
|
||||
|
||||
// verified data.
|
||||
struct Verified {
|
||||
epoch_number: u64,
|
||||
epoch_transition: EpochTransition,
|
||||
header: Header,
|
||||
}
|
||||
|
||||
// make a transaction and env info.
|
||||
// TODO: hardcoded 50M to match constants in client.
|
||||
// would be nice to extract magic numbers, or better yet
|
||||
// off-chain transaction execution, into its own module.
|
||||
fn make_tx_and_env(
|
||||
engine: &Engine,
|
||||
addr: Address,
|
||||
data: Bytes,
|
||||
header: &Header,
|
||||
last_hashes: Arc<Vec<H256>>,
|
||||
) -> (::transaction::SignedTransaction, EnvInfo) {
|
||||
use transaction::{Action, Transaction};
|
||||
|
||||
let transaction = Transaction {
|
||||
nonce: engine.account_start_nonce(),
|
||||
action: Action::Call(addr),
|
||||
gas: 50_000_000.into(),
|
||||
gas_price: 0.into(),
|
||||
value: 0.into(),
|
||||
data: data,
|
||||
}.fake_sign(Default::default());
|
||||
|
||||
let env = EnvInfo {
|
||||
number: header.number(),
|
||||
author: *header.author(),
|
||||
timestamp: header.timestamp(),
|
||||
difficulty: *header.difficulty(),
|
||||
gas_limit: 50_000_000.into(),
|
||||
last_hashes: last_hashes,
|
||||
gas_used: 0.into(),
|
||||
};
|
||||
|
||||
(transaction, env)
|
||||
}
|
||||
|
||||
impl ChunkRebuilder {
|
||||
fn verify_transition(
|
||||
&mut self,
|
||||
last_verifier: &mut Option<Box<EpochVerifier>>,
|
||||
transition_rlp: UntrustedRlp,
|
||||
engine: &Engine,
|
||||
) -> Result<Verified, ::error::Error> {
|
||||
// decode.
|
||||
let header: Header = transition_rlp.val_at(0)?;
|
||||
let epoch_number: u64 = transition_rlp.val_at(1)?;
|
||||
let epoch_data: Bytes = transition_rlp.val_at(2)?;
|
||||
let state_proof: Vec<DBValue> = transition_rlp.at(3)?
|
||||
.iter()
|
||||
.map(|x| Ok(DBValue::from_slice(x.data()?)))
|
||||
.collect::<Result<_, ::rlp::DecoderError>>()?;
|
||||
let last_hashes: Vec<H256> = transition_rlp.list_at(4)?;
|
||||
let last_hashes = Arc::new(last_hashes);
|
||||
|
||||
trace!(target: "snapshot", "verifying transition to epoch {}", epoch_number);
|
||||
|
||||
// check current transition against validators of last epoch.
|
||||
if let Some(verifier) = last_verifier.as_ref() {
|
||||
verifier.verify_heavy(&header)?;
|
||||
}
|
||||
|
||||
{
|
||||
// check the provided state proof actually leads to the
|
||||
// given epoch data.
|
||||
let caller = |addr, data| {
|
||||
use state::{check_proof, ProvedExecution};
|
||||
|
||||
let (transaction, env_info) = make_tx_and_env(
|
||||
engine,
|
||||
addr,
|
||||
data,
|
||||
&header,
|
||||
last_hashes.clone(),
|
||||
);
|
||||
|
||||
let result = check_proof(
|
||||
&state_proof,
|
||||
header.state_root().clone(),
|
||||
&transaction,
|
||||
engine,
|
||||
&env_info,
|
||||
);
|
||||
|
||||
match result {
|
||||
ProvedExecution::Complete(executed) => Ok(executed.output),
|
||||
_ => Err("Bad state proof".into()),
|
||||
}
|
||||
};
|
||||
|
||||
let extracted_proof = engine.epoch_proof(&header, &caller)
|
||||
.map_err(|_| Error::BadEpochProof(epoch_number))?;
|
||||
|
||||
if extracted_proof != epoch_data {
|
||||
return Err(Error::BadEpochProof(epoch_number).into());
|
||||
}
|
||||
}
|
||||
|
||||
// create new epoch verifier.
|
||||
*last_verifier = Some(engine.epoch_verifier(&header, &epoch_data)?);
|
||||
|
||||
Ok(Verified {
|
||||
epoch_number: epoch_number,
|
||||
epoch_transition: EpochTransition {
|
||||
block_hash: header.hash(),
|
||||
block_number: header.number(),
|
||||
state_proof: state_proof,
|
||||
proof: epoch_data,
|
||||
},
|
||||
header: header,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Rebuilder for ChunkRebuilder {
|
||||
fn feed(
|
||||
&mut self,
|
||||
chunk: &[u8],
|
||||
engine: &Engine,
|
||||
abort_flag: &AtomicBool,
|
||||
) -> Result<(), ::error::Error> {
|
||||
let rlp = UntrustedRlp::new(chunk);
|
||||
let is_last_chunk: bool = rlp.val_at(0)?;
|
||||
let num_items = rlp.item_count()?;
|
||||
|
||||
// number of transitions in the chunk.
|
||||
let num_transitions = if is_last_chunk {
|
||||
num_items - 2
|
||||
} else {
|
||||
num_items - 1
|
||||
};
|
||||
|
||||
let mut last_verifier = None;
|
||||
let mut last_number = None;
|
||||
for transition_rlp in rlp.iter().skip(1).take(num_transitions).with_position() {
|
||||
if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) }
|
||||
|
||||
let (is_first, is_last) = match transition_rlp {
|
||||
Position::First(_) => (true, false),
|
||||
Position::Middle(_) => (false, false),
|
||||
Position::Last(_) => (false, true),
|
||||
Position::Only(_) => (true, true),
|
||||
};
|
||||
|
||||
let transition_rlp = transition_rlp.into_inner();
|
||||
let verified = self.verify_transition(
|
||||
&mut last_verifier,
|
||||
transition_rlp,
|
||||
engine,
|
||||
)?;
|
||||
|
||||
if last_number.map_or(false, |num| verified.header.number() <= num) {
|
||||
return Err(Error::WrongChunkFormat("Later epoch transition in earlier or same block.".into()).into());
|
||||
}
|
||||
|
||||
last_number = Some(verified.header.number());
|
||||
|
||||
// book-keep borders for verification later.
|
||||
if is_first {
|
||||
// make sure the genesis transition was included,
|
||||
// but it doesn't need verification later.
|
||||
if verified.epoch_number == 0 && verified.header.number() == 0 {
|
||||
if verified.header.hash() != self.chain.genesis_hash() {
|
||||
return Err(Error::WrongBlockHash(0, verified.header.hash(), self.chain.genesis_hash()).into());
|
||||
}
|
||||
|
||||
self.had_genesis = true;
|
||||
} else {
|
||||
let idx = self.unverified_firsts
|
||||
.binary_search_by_key(&verified.epoch_number, |&(a, _)| a)
|
||||
.unwrap_or_else(|x| x);
|
||||
|
||||
let entry = (verified.epoch_number, verified.header.clone());
|
||||
self.unverified_firsts.insert(idx, entry);
|
||||
}
|
||||
}
|
||||
if is_last {
|
||||
let idx = self.last_proofs
|
||||
.binary_search_by_key(&verified.epoch_number, |&(a, _, _)| a)
|
||||
.unwrap_or_else(|x| x);
|
||||
|
||||
let entry = (
|
||||
verified.epoch_number,
|
||||
verified.header.clone(),
|
||||
verified.epoch_transition.proof.clone()
|
||||
);
|
||||
self.last_proofs.insert(idx, entry);
|
||||
}
|
||||
|
||||
// write epoch transition into database.
|
||||
let mut batch = self.db.transaction();
|
||||
self.chain.insert_epoch_transition(&mut batch, verified.epoch_number,
|
||||
verified.epoch_transition);
|
||||
self.db.write_buffered(batch);
|
||||
|
||||
trace!(target: "snapshot", "Verified epoch transition for epoch {}", verified.epoch_number);
|
||||
}
|
||||
|
||||
if is_last_chunk {
|
||||
use block::Block;
|
||||
|
||||
let last_rlp = rlp.at(num_items - 1)?;
|
||||
let block = Block {
|
||||
header: last_rlp.val_at(0)?,
|
||||
transactions: last_rlp.list_at(1)?,
|
||||
uncles: last_rlp.list_at(2)?,
|
||||
};
|
||||
let block_data = block.rlp_bytes(::basic_types::Seal::With);
|
||||
let receipts: Vec<Receipt> = last_rlp.list_at(3)?;
|
||||
|
||||
{
|
||||
let hash = block.header.hash();
|
||||
let best_hash = self.manifest.block_hash;
|
||||
if hash != best_hash {
|
||||
return Err(Error::WrongBlockHash(block.header.number(), best_hash, hash).into())
|
||||
}
|
||||
}
|
||||
|
||||
let last_hashes: Vec<H256> = last_rlp.list_at(4)?;
|
||||
let parent_td: ::util::U256 = last_rlp.val_at(5)?;
|
||||
|
||||
let mut batch = self.db.transaction();
|
||||
self.chain.insert_unordered_block(&mut batch, &block_data, receipts, Some(parent_td), true, false);
|
||||
self.db.write_buffered(batch);
|
||||
|
||||
self.warp_target = Some((block.header, last_hashes));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn finalize(&mut self, db: StateDB, engine: &Engine) -> Result<(), ::error::Error> {
|
||||
use state::State;
|
||||
|
||||
if !self.had_genesis {
|
||||
return Err(Error::WrongChunkFormat("No genesis transition included.".into()).into());
|
||||
}
|
||||
|
||||
let (target_header, target_last_hashes) = match self.warp_target.take() {
|
||||
Some(x) => x,
|
||||
None => return Err(Error::WrongChunkFormat("Warp target block not included.".into()).into()),
|
||||
};
|
||||
|
||||
// we store the last data even for the last chunk for easier verification
|
||||
// of warp target, but we don't store genesis transition data.
|
||||
// other than that, there should be a one-to-one correspondence of
|
||||
// chunk ends to chunk beginnings.
|
||||
if self.last_proofs.len() != self.unverified_firsts.len() + 1 {
|
||||
return Err(Error::WrongChunkFormat("More than one 'last' chunk".into()).into());
|
||||
}
|
||||
|
||||
// verify the first entries of chunks we couldn't before.
|
||||
let lasts_iter = self.last_proofs.iter().map(|&(_, ref hdr, ref proof)| (hdr, &proof[..]));
|
||||
let firsts_iter = self.unverified_firsts.iter().map(|&(_, ref hdr)| hdr);
|
||||
|
||||
for ((last_hdr, last_proof), first_hdr) in lasts_iter.zip(firsts_iter) {
|
||||
let verifier = engine.epoch_verifier(&last_hdr, &last_proof)?;
|
||||
verifier.verify_heavy(&first_hdr)?;
|
||||
}
|
||||
|
||||
// verify that the validator set of the warp target is the same as that of the
|
||||
// most recent transition. if the warp target was a transition itself,
|
||||
// `last_data` will still be correct
|
||||
let &(_, _, ref last_data) = self.last_proofs.last()
|
||||
.expect("last_proofs known to have at least one element by the check above; qed");
|
||||
|
||||
let target_last_hashes = Arc::new(target_last_hashes);
|
||||
let caller = |addr, data| {
|
||||
use executive::{Executive, TransactOptions};
|
||||
|
||||
let factories = ::factory::Factories::default();
|
||||
let mut state = State::from_existing(
|
||||
db.boxed_clone(),
|
||||
self.manifest.state_root.clone(),
|
||||
engine.account_start_nonce(),
|
||||
factories.clone(),
|
||||
).map_err(|e| format!("State root mismatch: {}", e))?;
|
||||
|
||||
let (tx, env_info) = make_tx_and_env(
|
||||
engine,
|
||||
addr,
|
||||
data,
|
||||
&target_header,
|
||||
target_last_hashes.clone(),
|
||||
);
|
||||
|
||||
let options = TransactOptions { tracing: false, vm_tracing: false, check_nonce: false };
|
||||
Executive::new(&mut state, &env_info, engine, &factories.vm)
|
||||
.transact_virtual(&tx, options)
|
||||
.map(|e| e.output)
|
||||
.map_err(|e| format!("Error executing: {}", e))
|
||||
};
|
||||
|
||||
let data = engine.epoch_proof(&target_header, &caller)?;
|
||||
if &data[..] != &last_data[..] {
|
||||
return Err(Error::WrongChunkFormat("Warp target has different epoch data than epoch transition.".into()).into())
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -17,24 +17,24 @@
|
||||
//! Secondary chunk creation and restoration, implementations for different consensus
|
||||
//! engines.
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::io;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
|
||||
use blockchain::{BlockChain, BlockProvider};
|
||||
use blockchain::BlockChain;
|
||||
use engines::Engine;
|
||||
use snapshot::{Error, ManifestData};
|
||||
use snapshot::block::AbridgedBlock;
|
||||
|
||||
use util::{Bytes, H256};
|
||||
use util::H256;
|
||||
use util::kvdb::KeyValueDB;
|
||||
use rand::OsRng;
|
||||
use rlp::{RlpStream, UntrustedRlp};
|
||||
|
||||
mod authority;
|
||||
mod work;
|
||||
|
||||
pub use self::authority::*;
|
||||
pub use self::work::*;
|
||||
|
||||
/// A sink for produced chunks.
|
||||
pub type ChunkSink<'a> = FnMut(&[u8]) -> io::Result<()> + 'a;
|
||||
pub type ChunkSink<'a> = FnMut(&[u8]) -> ::std::io::Result<()> + 'a;
|
||||
|
||||
/// Components necessary for snapshot creation and restoration.
|
||||
pub trait SnapshotComponents: Send {
|
||||
@ -57,13 +57,21 @@ pub trait SnapshotComponents: Send {
|
||||
/// order and then be finalized.
|
||||
///
|
||||
/// The manifest, a database, and fresh `BlockChain` are supplied.
|
||||
// TODO: supply anything for state?
|
||||
///
|
||||
/// The engine passed to the `Rebuilder` methods will be the same instance
|
||||
/// that created the `SnapshotComponents`.
|
||||
fn rebuilder(
|
||||
&self,
|
||||
chain: BlockChain,
|
||||
db: Arc<KeyValueDB>,
|
||||
manifest: &ManifestData,
|
||||
) -> Result<Box<Rebuilder>, ::error::Error>;
|
||||
|
||||
/// Minimum supported snapshot version number.
|
||||
fn min_supported_version(&self) -> u64;
|
||||
|
||||
/// Current version number
|
||||
fn current_version(&self) -> u64;
|
||||
}
|
||||
|
||||
|
||||
@ -82,271 +90,10 @@ pub trait Rebuilder: Send {
|
||||
|
||||
/// Finalize the restoration. Will be done after all chunks have been
|
||||
/// fed successfully.
|
||||
/// This will apply the necessary "glue" between chunks.
|
||||
fn finalize(&mut self) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// Snapshot creation and restoration for PoW chains.
|
||||
/// This includes blocks from the head of the chain as a
|
||||
/// loose assurance that the chain is valid.
|
||||
///
|
||||
/// The field is the number of blocks from the head of the chain
|
||||
/// to include in the snapshot.
|
||||
#[derive(Clone, Copy, PartialEq)]
|
||||
pub struct PowSnapshot(pub u64);
|
||||
|
||||
impl SnapshotComponents for PowSnapshot {
|
||||
fn chunk_all(
|
||||
&mut self,
|
||||
chain: &BlockChain,
|
||||
block_at: H256,
|
||||
chunk_sink: &mut ChunkSink,
|
||||
preferred_size: usize,
|
||||
) -> Result<(), Error> {
|
||||
PowWorker {
|
||||
chain: chain,
|
||||
rlps: VecDeque::new(),
|
||||
current_hash: block_at,
|
||||
writer: chunk_sink,
|
||||
preferred_size: preferred_size,
|
||||
}.chunk_all(self.0)
|
||||
}
|
||||
|
||||
fn rebuilder(
|
||||
&self,
|
||||
chain: BlockChain,
|
||||
db: Arc<KeyValueDB>,
|
||||
manifest: &ManifestData,
|
||||
) -> Result<Box<Rebuilder>, ::error::Error> {
|
||||
PowRebuilder::new(chain, db, manifest, self.0).map(|r| Box::new(r) as Box<_>)
|
||||
}
|
||||
}
|
||||
|
||||
/// Used to build block chunks.
|
||||
struct PowWorker<'a> {
|
||||
chain: &'a BlockChain,
|
||||
// block, receipt rlp pairs.
|
||||
rlps: VecDeque<Bytes>,
|
||||
current_hash: H256,
|
||||
writer: &'a mut ChunkSink<'a>,
|
||||
preferred_size: usize,
|
||||
}
|
||||
|
||||
impl<'a> PowWorker<'a> {
|
||||
// Repeatedly fill the buffers and writes out chunks, moving backwards from starting block hash.
|
||||
// Loops until we reach the first desired block, and writes out the remainder.
|
||||
fn chunk_all(&mut self, snapshot_blocks: u64) -> Result<(), Error> {
|
||||
let mut loaded_size = 0;
|
||||
let mut last = self.current_hash;
|
||||
|
||||
let genesis_hash = self.chain.genesis_hash();
|
||||
|
||||
for _ in 0..snapshot_blocks {
|
||||
if self.current_hash == genesis_hash { break }
|
||||
|
||||
let (block, receipts) = self.chain.block(&self.current_hash)
|
||||
.and_then(|b| self.chain.block_receipts(&self.current_hash).map(|r| (b, r)))
|
||||
.ok_or(Error::BlockNotFound(self.current_hash))?;
|
||||
|
||||
let abridged_rlp = AbridgedBlock::from_block_view(&block.view()).into_inner();
|
||||
|
||||
let pair = {
|
||||
let mut pair_stream = RlpStream::new_list(2);
|
||||
pair_stream.append_raw(&abridged_rlp, 1).append(&receipts);
|
||||
pair_stream.out()
|
||||
};
|
||||
|
||||
let new_loaded_size = loaded_size + pair.len();
|
||||
|
||||
// cut off the chunk if too large.
|
||||
|
||||
if new_loaded_size > self.preferred_size && !self.rlps.is_empty() {
|
||||
self.write_chunk(last)?;
|
||||
loaded_size = pair.len();
|
||||
} else {
|
||||
loaded_size = new_loaded_size;
|
||||
}
|
||||
|
||||
self.rlps.push_front(pair);
|
||||
|
||||
last = self.current_hash;
|
||||
self.current_hash = block.header_view().parent_hash();
|
||||
}
|
||||
|
||||
if loaded_size != 0 {
|
||||
self.write_chunk(last)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// write out the data in the buffers to a chunk on disk
|
||||
//
|
||||
// we preface each chunk with the parent of the first block's details,
|
||||
// obtained from the details of the last block written.
|
||||
fn write_chunk(&mut self, last: H256) -> Result<(), Error> {
|
||||
trace!(target: "snapshot", "prepared block chunk with {} blocks", self.rlps.len());
|
||||
|
||||
let (last_header, last_details) = self.chain.block_header(&last)
|
||||
.and_then(|n| self.chain.block_details(&last).map(|d| (n, d)))
|
||||
.ok_or(Error::BlockNotFound(last))?;
|
||||
|
||||
let parent_number = last_header.number() - 1;
|
||||
let parent_hash = last_header.parent_hash();
|
||||
let parent_total_difficulty = last_details.total_difficulty - *last_header.difficulty();
|
||||
|
||||
trace!(target: "snapshot", "parent last written block: {}", parent_hash);
|
||||
|
||||
let num_entries = self.rlps.len();
|
||||
let mut rlp_stream = RlpStream::new_list(3 + num_entries);
|
||||
rlp_stream.append(&parent_number).append(parent_hash).append(&parent_total_difficulty);
|
||||
|
||||
for pair in self.rlps.drain(..) {
|
||||
rlp_stream.append_raw(&pair, 1);
|
||||
}
|
||||
|
||||
let raw_data = rlp_stream.out();
|
||||
|
||||
(self.writer)(&raw_data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Rebuilder for proof-of-work chains.
|
||||
/// Does basic verification for all blocks, but `PoW` verification for some.
|
||||
/// Blocks must be fed in-order.
|
||||
///
|
||||
/// The first block in every chunk is disconnected from the last block in the
|
||||
/// chunk before it, as chunks may be submitted out-of-order.
|
||||
///
|
||||
/// After all chunks have been submitted, we "glue" the chunks together.
|
||||
pub struct PowRebuilder {
|
||||
chain: BlockChain,
|
||||
db: Arc<KeyValueDB>,
|
||||
rng: OsRng,
|
||||
disconnected: Vec<(u64, H256)>,
|
||||
best_number: u64,
|
||||
best_hash: H256,
|
||||
best_root: H256,
|
||||
fed_blocks: u64,
|
||||
snapshot_blocks: u64,
|
||||
}
|
||||
|
||||
impl PowRebuilder {
|
||||
/// Create a new PowRebuilder.
|
||||
fn new(chain: BlockChain, db: Arc<KeyValueDB>, manifest: &ManifestData, snapshot_blocks: u64) -> Result<Self, ::error::Error> {
|
||||
Ok(PowRebuilder {
|
||||
chain: chain,
|
||||
db: db,
|
||||
rng: OsRng::new()?,
|
||||
disconnected: Vec::new(),
|
||||
best_number: manifest.block_number,
|
||||
best_hash: manifest.block_hash,
|
||||
best_root: manifest.state_root,
|
||||
fed_blocks: 0,
|
||||
snapshot_blocks: snapshot_blocks,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Rebuilder for PowRebuilder {
|
||||
/// Feed the rebuilder an uncompressed block chunk.
|
||||
/// Returns the number of blocks fed or any errors.
|
||||
fn feed(&mut self, chunk: &[u8], engine: &Engine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> {
|
||||
use basic_types::Seal::With;
|
||||
use views::BlockView;
|
||||
use snapshot::verify_old_block;
|
||||
use util::U256;
|
||||
use util::triehash::ordered_trie_root;
|
||||
|
||||
let rlp = UntrustedRlp::new(chunk);
|
||||
let item_count = rlp.item_count()?;
|
||||
let num_blocks = (item_count - 3) as u64;
|
||||
|
||||
trace!(target: "snapshot", "restoring block chunk with {} blocks.", item_count - 3);
|
||||
|
||||
if self.fed_blocks + num_blocks > self.snapshot_blocks {
|
||||
return Err(Error::TooManyBlocks(self.snapshot_blocks, self.fed_blocks).into())
|
||||
}
|
||||
|
||||
// todo: assert here that these values are consistent with chunks being in order.
|
||||
let mut cur_number = rlp.val_at::<u64>(0)? + 1;
|
||||
let mut parent_hash = rlp.val_at::<H256>(1)?;
|
||||
let parent_total_difficulty = rlp.val_at::<U256>(2)?;
|
||||
|
||||
for idx in 3..item_count {
|
||||
if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) }
|
||||
|
||||
let pair = rlp.at(idx)?;
|
||||
let abridged_rlp = pair.at(0)?.as_raw().to_owned();
|
||||
let abridged_block = AbridgedBlock::from_raw(abridged_rlp);
|
||||
let receipts: Vec<::receipt::Receipt> = pair.list_at(1)?;
|
||||
let receipts_root = ordered_trie_root(
|
||||
pair.at(1)?.iter().map(|r| r.as_raw().to_owned())
|
||||
);
|
||||
|
||||
let block = abridged_block.to_block(parent_hash, cur_number, receipts_root)?;
|
||||
let block_bytes = block.rlp_bytes(With);
|
||||
let is_best = cur_number == self.best_number;
|
||||
|
||||
if is_best {
|
||||
if block.header.hash() != self.best_hash {
|
||||
return Err(Error::WrongBlockHash(cur_number, self.best_hash, block.header.hash()).into())
|
||||
}
|
||||
|
||||
if block.header.state_root() != &self.best_root {
|
||||
return Err(Error::WrongStateRoot(self.best_root, *block.header.state_root()).into())
|
||||
}
|
||||
}
|
||||
|
||||
verify_old_block(
|
||||
&mut self.rng,
|
||||
&block.header,
|
||||
engine,
|
||||
&self.chain,
|
||||
Some(&block_bytes),
|
||||
is_best
|
||||
)?;
|
||||
|
||||
let mut batch = self.db.transaction();
|
||||
|
||||
// special-case the first block in each chunk.
|
||||
if idx == 3 {
|
||||
if self.chain.insert_unordered_block(&mut batch, &block_bytes, receipts, Some(parent_total_difficulty), is_best, false) {
|
||||
self.disconnected.push((cur_number, block.header.hash()));
|
||||
}
|
||||
} else {
|
||||
self.chain.insert_unordered_block(&mut batch, &block_bytes, receipts, None, is_best, false);
|
||||
}
|
||||
self.db.write_buffered(batch);
|
||||
self.chain.commit();
|
||||
|
||||
parent_hash = BlockView::new(&block_bytes).hash();
|
||||
cur_number += 1;
|
||||
}
|
||||
|
||||
self.fed_blocks += num_blocks;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Glue together any disconnected chunks and check that the chain is complete.
|
||||
fn finalize(&mut self) -> Result<(), Error> {
|
||||
let mut batch = self.db.transaction();
|
||||
|
||||
for (first_num, first_hash) in self.disconnected.drain(..) {
|
||||
let parent_num = first_num - 1;
|
||||
|
||||
// check if the parent is even in the chain.
|
||||
// since we don't restore every single block in the chain,
|
||||
// the first block of the first chunks has nothing to connect to.
|
||||
if let Some(parent_hash) = self.chain.block_hash(parent_num) {
|
||||
// if so, add the child to it.
|
||||
self.chain.add_child(&mut batch, parent_hash, first_hash);
|
||||
}
|
||||
}
|
||||
self.db.write_buffered(batch);
|
||||
Ok(())
|
||||
}
|
||||
///
|
||||
/// This should apply the necessary "glue" between chunks,
|
||||
/// and verify against the restored state.
|
||||
///
|
||||
/// The database passed contains the state for the warp target block.
|
||||
fn finalize(&mut self, db: ::state_db::StateDB, engine: &Engine) -> Result<(), ::error::Error>;
|
||||
}
|
||||
|
311
ethcore/src/snapshot/consensus/work.rs
Normal file
311
ethcore/src/snapshot/consensus/work.rs
Normal file
@ -0,0 +1,311 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Secondary chunk creation and restoration, implementation for proof-of-work
|
||||
//! chains.
|
||||
//!
|
||||
//! The secondary chunks in this instance are 30,000 "abridged blocks" from the head
|
||||
//! of the chain, which serve as an indication of valid chain.
|
||||
|
||||
use super::{SnapshotComponents, Rebuilder, ChunkSink};
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use blockchain::{BlockChain, BlockProvider};
|
||||
use engines::Engine;
|
||||
use snapshot::{Error, ManifestData};
|
||||
use snapshot::block::AbridgedBlock;
|
||||
use util::{Bytes, H256, KeyValueDB};
|
||||
use rlp::{RlpStream, UntrustedRlp};
|
||||
use rand::OsRng;
|
||||
|
||||
/// Snapshot creation and restoration for PoW chains.
|
||||
/// This includes blocks from the head of the chain as a
|
||||
/// loose assurance that the chain is valid.
|
||||
///
|
||||
/// The field is the number of blocks from the head of the chain
|
||||
/// to include in the snapshot.
|
||||
#[derive(Clone, Copy, PartialEq)]
|
||||
pub struct PowSnapshot(pub u64);
|
||||
|
||||
impl SnapshotComponents for PowSnapshot {
|
||||
fn chunk_all(
|
||||
&mut self,
|
||||
chain: &BlockChain,
|
||||
block_at: H256,
|
||||
chunk_sink: &mut ChunkSink,
|
||||
preferred_size: usize,
|
||||
) -> Result<(), Error> {
|
||||
PowWorker {
|
||||
chain: chain,
|
||||
rlps: VecDeque::new(),
|
||||
current_hash: block_at,
|
||||
writer: chunk_sink,
|
||||
preferred_size: preferred_size,
|
||||
}.chunk_all(self.0)
|
||||
}
|
||||
|
||||
fn rebuilder(
|
||||
&self,
|
||||
chain: BlockChain,
|
||||
db: Arc<KeyValueDB>,
|
||||
manifest: &ManifestData,
|
||||
) -> Result<Box<Rebuilder>, ::error::Error> {
|
||||
PowRebuilder::new(chain, db, manifest, self.0).map(|r| Box::new(r) as Box<_>)
|
||||
}
|
||||
|
||||
fn min_supported_version(&self) -> u64 { ::snapshot::MIN_SUPPORTED_STATE_CHUNK_VERSION }
|
||||
fn current_version(&self) -> u64 { ::snapshot::STATE_CHUNK_VERSION }
|
||||
}
|
||||
|
||||
/// Used to build block chunks.
|
||||
struct PowWorker<'a> {
|
||||
chain: &'a BlockChain,
|
||||
// block, receipt rlp pairs.
|
||||
rlps: VecDeque<Bytes>,
|
||||
current_hash: H256,
|
||||
writer: &'a mut ChunkSink<'a>,
|
||||
preferred_size: usize,
|
||||
}
|
||||
|
||||
impl<'a> PowWorker<'a> {
|
||||
// Repeatedly fill the buffers and writes out chunks, moving backwards from starting block hash.
|
||||
// Loops until we reach the first desired block, and writes out the remainder.
|
||||
fn chunk_all(&mut self, snapshot_blocks: u64) -> Result<(), Error> {
|
||||
let mut loaded_size = 0;
|
||||
let mut last = self.current_hash;
|
||||
|
||||
let genesis_hash = self.chain.genesis_hash();
|
||||
|
||||
for _ in 0..snapshot_blocks {
|
||||
if self.current_hash == genesis_hash { break }
|
||||
|
||||
let (block, receipts) = self.chain.block(&self.current_hash)
|
||||
.and_then(|b| self.chain.block_receipts(&self.current_hash).map(|r| (b, r)))
|
||||
.ok_or(Error::BlockNotFound(self.current_hash))?;
|
||||
|
||||
let abridged_rlp = AbridgedBlock::from_block_view(&block.view()).into_inner();
|
||||
|
||||
let pair = {
|
||||
let mut pair_stream = RlpStream::new_list(2);
|
||||
pair_stream.append_raw(&abridged_rlp, 1).append(&receipts);
|
||||
pair_stream.out()
|
||||
};
|
||||
|
||||
let new_loaded_size = loaded_size + pair.len();
|
||||
|
||||
// cut off the chunk if too large.
|
||||
|
||||
if new_loaded_size > self.preferred_size && !self.rlps.is_empty() {
|
||||
self.write_chunk(last)?;
|
||||
loaded_size = pair.len();
|
||||
} else {
|
||||
loaded_size = new_loaded_size;
|
||||
}
|
||||
|
||||
self.rlps.push_front(pair);
|
||||
|
||||
last = self.current_hash;
|
||||
self.current_hash = block.header_view().parent_hash();
|
||||
}
|
||||
|
||||
if loaded_size != 0 {
|
||||
self.write_chunk(last)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// write out the data in the buffers to a chunk on disk
|
||||
//
|
||||
// we preface each chunk with the parent of the first block's details,
|
||||
// obtained from the details of the last block written.
|
||||
fn write_chunk(&mut self, last: H256) -> Result<(), Error> {
|
||||
trace!(target: "snapshot", "prepared block chunk with {} blocks", self.rlps.len());
|
||||
|
||||
let (last_header, last_details) = self.chain.block_header(&last)
|
||||
.and_then(|n| self.chain.block_details(&last).map(|d| (n, d)))
|
||||
.ok_or(Error::BlockNotFound(last))?;
|
||||
|
||||
let parent_number = last_header.number() - 1;
|
||||
let parent_hash = last_header.parent_hash();
|
||||
let parent_total_difficulty = last_details.total_difficulty - *last_header.difficulty();
|
||||
|
||||
trace!(target: "snapshot", "parent last written block: {}", parent_hash);
|
||||
|
||||
let num_entries = self.rlps.len();
|
||||
let mut rlp_stream = RlpStream::new_list(3 + num_entries);
|
||||
rlp_stream.append(&parent_number).append(parent_hash).append(&parent_total_difficulty);
|
||||
|
||||
for pair in self.rlps.drain(..) {
|
||||
rlp_stream.append_raw(&pair, 1);
|
||||
}
|
||||
|
||||
let raw_data = rlp_stream.out();
|
||||
|
||||
(self.writer)(&raw_data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Rebuilder for proof-of-work chains.
|
||||
/// Does basic verification for all blocks, but `PoW` verification for some.
|
||||
/// Blocks must be fed in-order.
|
||||
///
|
||||
/// The first block in every chunk is disconnected from the last block in the
|
||||
/// chunk before it, as chunks may be submitted out-of-order.
|
||||
///
|
||||
/// After all chunks have been submitted, we "glue" the chunks together.
|
||||
pub struct PowRebuilder {
|
||||
chain: BlockChain,
|
||||
db: Arc<KeyValueDB>,
|
||||
rng: OsRng,
|
||||
disconnected: Vec<(u64, H256)>,
|
||||
best_number: u64,
|
||||
best_hash: H256,
|
||||
best_root: H256,
|
||||
fed_blocks: u64,
|
||||
snapshot_blocks: u64,
|
||||
}
|
||||
|
||||
impl PowRebuilder {
|
||||
/// Create a new PowRebuilder.
|
||||
fn new(chain: BlockChain, db: Arc<KeyValueDB>, manifest: &ManifestData, snapshot_blocks: u64) -> Result<Self, ::error::Error> {
|
||||
Ok(PowRebuilder {
|
||||
chain: chain,
|
||||
db: db,
|
||||
rng: OsRng::new()?,
|
||||
disconnected: Vec::new(),
|
||||
best_number: manifest.block_number,
|
||||
best_hash: manifest.block_hash,
|
||||
best_root: manifest.state_root,
|
||||
fed_blocks: 0,
|
||||
snapshot_blocks: snapshot_blocks,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Rebuilder for PowRebuilder {
|
||||
/// Feed the rebuilder an uncompressed block chunk.
|
||||
/// Returns the number of blocks fed or any errors.
|
||||
fn feed(&mut self, chunk: &[u8], engine: &Engine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> {
|
||||
use basic_types::Seal::With;
|
||||
use views::BlockView;
|
||||
use snapshot::verify_old_block;
|
||||
use util::U256;
|
||||
use util::triehash::ordered_trie_root;
|
||||
|
||||
let rlp = UntrustedRlp::new(chunk);
|
||||
let item_count = rlp.item_count()?;
|
||||
let num_blocks = (item_count - 3) as u64;
|
||||
|
||||
trace!(target: "snapshot", "restoring block chunk with {} blocks.", item_count - 3);
|
||||
|
||||
if self.fed_blocks + num_blocks > self.snapshot_blocks {
|
||||
return Err(Error::TooManyBlocks(self.snapshot_blocks, self.fed_blocks).into())
|
||||
}
|
||||
|
||||
// todo: assert here that these values are consistent with chunks being in order.
|
||||
let mut cur_number = rlp.val_at::<u64>(0)? + 1;
|
||||
let mut parent_hash = rlp.val_at::<H256>(1)?;
|
||||
let parent_total_difficulty = rlp.val_at::<U256>(2)?;
|
||||
|
||||
for idx in 3..item_count {
|
||||
if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) }
|
||||
|
||||
let pair = rlp.at(idx)?;
|
||||
let abridged_rlp = pair.at(0)?.as_raw().to_owned();
|
||||
let abridged_block = AbridgedBlock::from_raw(abridged_rlp);
|
||||
let receipts: Vec<::receipt::Receipt> = pair.list_at(1)?;
|
||||
let receipts_root = ordered_trie_root(
|
||||
pair.at(1)?.iter().map(|r| r.as_raw().to_owned())
|
||||
);
|
||||
|
||||
let block = abridged_block.to_block(parent_hash, cur_number, receipts_root)?;
|
||||
let block_bytes = block.rlp_bytes(With);
|
||||
let is_best = cur_number == self.best_number;
|
||||
|
||||
if is_best {
|
||||
if block.header.hash() != self.best_hash {
|
||||
return Err(Error::WrongBlockHash(cur_number, self.best_hash, block.header.hash()).into())
|
||||
}
|
||||
|
||||
if block.header.state_root() != &self.best_root {
|
||||
return Err(Error::WrongStateRoot(self.best_root, *block.header.state_root()).into())
|
||||
}
|
||||
}
|
||||
|
||||
verify_old_block(
|
||||
&mut self.rng,
|
||||
&block.header,
|
||||
engine,
|
||||
&self.chain,
|
||||
Some(&block_bytes),
|
||||
is_best
|
||||
)?;
|
||||
|
||||
let mut batch = self.db.transaction();
|
||||
|
||||
// special-case the first block in each chunk.
|
||||
if idx == 3 {
|
||||
if self.chain.insert_unordered_block(&mut batch, &block_bytes, receipts, Some(parent_total_difficulty), is_best, false) {
|
||||
self.disconnected.push((cur_number, block.header.hash()));
|
||||
}
|
||||
} else {
|
||||
self.chain.insert_unordered_block(&mut batch, &block_bytes, receipts, None, is_best, false);
|
||||
}
|
||||
self.db.write_buffered(batch);
|
||||
self.chain.commit();
|
||||
|
||||
parent_hash = BlockView::new(&block_bytes).hash();
|
||||
cur_number += 1;
|
||||
}
|
||||
|
||||
self.fed_blocks += num_blocks;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Glue together any disconnected chunks and check that the chain is complete.
|
||||
fn finalize(&mut self, _: ::state_db::StateDB, _: &Engine) -> Result<(), ::error::Error> {
|
||||
let mut batch = self.db.transaction();
|
||||
|
||||
for (first_num, first_hash) in self.disconnected.drain(..) {
|
||||
let parent_num = first_num - 1;
|
||||
|
||||
// check if the parent is even in the chain.
|
||||
// since we don't restore every single block in the chain,
|
||||
// the first block of the first chunks has nothing to connect to.
|
||||
if let Some(parent_hash) = self.chain.block_hash(parent_num) {
|
||||
// if so, add the child to it.
|
||||
self.chain.add_child(&mut batch, parent_hash, first_hash);
|
||||
}
|
||||
}
|
||||
|
||||
let genesis_hash = self.chain.genesis_hash();
|
||||
self.chain.insert_epoch_transition(&mut batch, 0, ::blockchain::EpochTransition {
|
||||
block_number: 0,
|
||||
block_hash: genesis_hash,
|
||||
proof: vec![],
|
||||
state_proof: vec![],
|
||||
});
|
||||
self.db.write_buffered(batch);
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -59,6 +59,10 @@ pub enum Error {
|
||||
ChunkTooSmall,
|
||||
/// Snapshots not supported by the consensus engine.
|
||||
SnapshotsUnsupported,
|
||||
/// Bad epoch transition.
|
||||
BadEpochProof(u64),
|
||||
/// Wrong chunk format.
|
||||
WrongChunkFormat(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
@ -82,6 +86,8 @@ impl fmt::Display for Error {
|
||||
Error::VersionNotSupported(ref ver) => write!(f, "Snapshot version {} is not supprted.", ver),
|
||||
Error::ChunkTooSmall => write!(f, "Chunk size is too small."),
|
||||
Error::SnapshotsUnsupported => write!(f, "Snapshots unsupported by consensus engine."),
|
||||
Error::BadEpochProof(i) => write!(f, "Bad epoch proof for transition to epoch {}", i),
|
||||
Error::WrongChunkFormat(ref msg) => write!(f, "Wrong chunk format: {}", msg),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ use util::{Bytes, Hashable, HashDB, DBValue, snappy, U256, Uint};
|
||||
use util::Mutex;
|
||||
use util::hash::{H256};
|
||||
use util::journaldb::{self, Algorithm, JournalDB};
|
||||
use util::kvdb::Database;
|
||||
use util::kvdb::KeyValueDB;
|
||||
use util::trie::{TrieDB, TrieDBMut, Trie, TrieMut};
|
||||
use util::sha3::SHA3_NULL_RLP;
|
||||
use rlp::{RlpStream, UntrustedRlp};
|
||||
@ -83,6 +83,11 @@ mod traits {
|
||||
// Try to have chunks be around 4MB (before compression)
|
||||
const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024;
|
||||
|
||||
// Minimum supported state chunk version.
|
||||
const MIN_SUPPORTED_STATE_CHUNK_VERSION: u64 = 1;
|
||||
// current state chunk version.
|
||||
const STATE_CHUNK_VERSION: u64 = 2;
|
||||
|
||||
/// A progress indicator for snapshots.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct Progress {
|
||||
@ -135,6 +140,7 @@ pub fn take_snapshot<W: SnapshotWriter + Send>(
|
||||
|
||||
let writer = Mutex::new(writer);
|
||||
let chunker = engine.snapshot_components().ok_or(Error::SnapshotsUnsupported)?;
|
||||
let snapshot_version = chunker.current_version();
|
||||
let (state_hashes, block_hashes) = scope(|scope| {
|
||||
let writer = &writer;
|
||||
let block_guard = scope.spawn(move || chunk_secondary(chunker, chain, block_at, writer, p));
|
||||
@ -148,7 +154,7 @@ pub fn take_snapshot<W: SnapshotWriter + Send>(
|
||||
info!("produced {} state chunks and {} block chunks.", state_hashes.len(), block_hashes.len());
|
||||
|
||||
let manifest_data = ManifestData {
|
||||
version: 2,
|
||||
version: snapshot_version,
|
||||
state_hashes: state_hashes,
|
||||
block_hashes: block_hashes,
|
||||
state_root: *state_root,
|
||||
@ -309,7 +315,7 @@ pub struct StateRebuilder {
|
||||
|
||||
impl StateRebuilder {
|
||||
/// Create a new state rebuilder to write into the given backing DB.
|
||||
pub fn new(db: Arc<Database>, pruning: Algorithm) -> Self {
|
||||
pub fn new(db: Arc<KeyValueDB>, pruning: Algorithm) -> Self {
|
||||
StateRebuilder {
|
||||
db: journaldb::new(db.clone(), pruning, ::db::COL_STATE),
|
||||
state_root: SHA3_NULL_RLP,
|
||||
@ -384,7 +390,7 @@ impl StateRebuilder {
|
||||
/// Finalize the restoration. Check for accounts missing code and make a dummy
|
||||
/// journal entry.
|
||||
/// Once all chunks have been fed, there should be nothing missing.
|
||||
pub fn finalize(mut self, era: u64, id: H256) -> Result<(), ::error::Error> {
|
||||
pub fn finalize(mut self, era: u64, id: H256) -> Result<Box<JournalDB>, ::error::Error> {
|
||||
let missing = self.missing_code.keys().cloned().collect::<Vec<_>>();
|
||||
if !missing.is_empty() { return Err(Error::MissingCode(missing).into()) }
|
||||
|
||||
@ -392,7 +398,7 @@ impl StateRebuilder {
|
||||
self.db.journal_under(&mut batch, era, &id)?;
|
||||
self.db.backing().write_buffered(batch);
|
||||
|
||||
Ok(())
|
||||
Ok(self.db)
|
||||
}
|
||||
|
||||
/// Get the state root of the rebuilder.
|
||||
|
@ -106,6 +106,7 @@ impl Restoration {
|
||||
let secondary = components.rebuilder(chain, raw_db.clone(), &manifest)?;
|
||||
|
||||
let root = manifest.state_root.clone();
|
||||
|
||||
Ok(Restoration {
|
||||
manifest: manifest,
|
||||
state_chunks_left: state_chunks,
|
||||
@ -150,7 +151,7 @@ impl Restoration {
|
||||
}
|
||||
|
||||
// finish up restoration.
|
||||
fn finalize(mut self) -> Result<(), Error> {
|
||||
fn finalize(mut self, engine: &Engine) -> Result<(), Error> {
|
||||
use util::trie::TrieError;
|
||||
|
||||
if !self.is_done() { return Ok(()) }
|
||||
@ -163,10 +164,11 @@ impl Restoration {
|
||||
}
|
||||
|
||||
// check for missing code.
|
||||
self.state.finalize(self.manifest.block_number, self.manifest.block_hash)?;
|
||||
let db = self.state.finalize(self.manifest.block_number, self.manifest.block_hash)?;
|
||||
let db = ::state_db::StateDB::new(db, 0);
|
||||
|
||||
// connect out-of-order chunks and verify chain integrity.
|
||||
self.secondary.finalize()?;
|
||||
self.secondary.finalize(db, engine)?;
|
||||
|
||||
if let Some(writer) = self.writer {
|
||||
writer.finish(self.manifest)?;
|
||||
@ -450,7 +452,10 @@ impl Service {
|
||||
let recover = rest.as_ref().map_or(false, |rest| rest.writer.is_some());
|
||||
|
||||
// destroy the restoration before replacing databases and snapshot.
|
||||
rest.take().map(Restoration::finalize).unwrap_or(Ok(()))?;
|
||||
rest.take()
|
||||
.map(|r| r.finalize(&*self.engine))
|
||||
.unwrap_or(Ok(()))?;
|
||||
|
||||
self.replace_client_db()?;
|
||||
|
||||
if recover {
|
||||
@ -554,6 +559,11 @@ impl SnapshotService for Service {
|
||||
self.reader.read().as_ref().map(|r| r.manifest().clone())
|
||||
}
|
||||
|
||||
fn min_supported_version(&self) -> Option<u64> {
|
||||
self.engine.snapshot_components()
|
||||
.map(|c| c.min_supported_version())
|
||||
}
|
||||
|
||||
fn chunk(&self, hash: H256) -> Option<Bytes> {
|
||||
self.reader.read().as_ref().and_then(|r| r.chunk(hash).ok())
|
||||
}
|
||||
|
@ -27,6 +27,10 @@ pub trait SnapshotService : Sync + Send {
|
||||
/// Query the most recent manifest data.
|
||||
fn manifest(&self) -> Option<ManifestData>;
|
||||
|
||||
/// Get the minimum supported snapshot version number.
|
||||
/// `None` indicates warp sync isn't supported by the consensus engine.
|
||||
fn min_supported_version(&self) -> Option<u64>;
|
||||
|
||||
/// Get raw chunk for a given hash.
|
||||
fn chunk(&self, hash: H256) -> Option<Bytes>;
|
||||
|
||||
|
@ -17,13 +17,24 @@
|
||||
//! Snapshot test helpers. These are used to build blockchains and state tries
|
||||
//! which can be queried before and after a full snapshot/restore cycle.
|
||||
|
||||
use basic_account::BasicAccount;
|
||||
use std::sync::Arc;
|
||||
|
||||
use account_db::AccountDBMut;
|
||||
use basic_account::BasicAccount;
|
||||
use blockchain::BlockChain;
|
||||
use client::{BlockChainClient, Client};
|
||||
use engines::Engine;
|
||||
use snapshot::{StateRebuilder};
|
||||
use snapshot::io::{SnapshotReader, PackedWriter, PackedReader};
|
||||
use state_db::StateDB;
|
||||
|
||||
use devtools::{RandomTempPath, GuardedTempResult};
|
||||
use rand::Rng;
|
||||
|
||||
use util::DBValue;
|
||||
use util::{DBValue, KeyValueDB};
|
||||
use util::hash::H256;
|
||||
use util::hashdb::HashDB;
|
||||
use util::journaldb;
|
||||
use util::trie::{Alphabet, StandardMap, SecTrieDBMut, TrieMut, ValueMode};
|
||||
use util::trie::{TrieDB, TrieDBMut, Trie};
|
||||
use util::sha3::SHA3_NULL_RLP;
|
||||
@ -125,3 +136,67 @@ pub fn compare_dbs(one: &HashDB, two: &HashDB) {
|
||||
assert_eq!(one.get(&key).unwrap(), two.get(&key).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
/// Take a snapshot from the given client into a temporary file.
|
||||
/// Return a snapshot reader for it.
|
||||
pub fn snap(client: &Client) -> GuardedTempResult<Box<SnapshotReader>> {
|
||||
use ids::BlockId;
|
||||
|
||||
let dir = RandomTempPath::new();
|
||||
let writer = PackedWriter::new(dir.as_path()).unwrap();
|
||||
let progress = Default::default();
|
||||
|
||||
let hash = client.chain_info().best_block_hash;
|
||||
client.take_snapshot(writer, BlockId::Hash(hash), &progress).unwrap();
|
||||
|
||||
let reader = PackedReader::new(dir.as_path()).unwrap().unwrap();
|
||||
|
||||
GuardedTempResult {
|
||||
result: Some(Box::new(reader)),
|
||||
_temp: dir,
|
||||
}
|
||||
}
|
||||
|
||||
/// Restore a snapshot into a given database. This will read chunks from the given reader
|
||||
/// write into the given database.
|
||||
pub fn restore(
|
||||
db: Arc<KeyValueDB>,
|
||||
engine: &Engine,
|
||||
reader: &SnapshotReader,
|
||||
genesis: &[u8],
|
||||
) -> Result<(), ::error::Error> {
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use util::snappy;
|
||||
|
||||
let flag = AtomicBool::new(true);
|
||||
let components = engine.snapshot_components().unwrap();
|
||||
let manifest = reader.manifest();
|
||||
|
||||
let mut state = StateRebuilder::new(db.clone(), journaldb::Algorithm::Archive);
|
||||
let mut secondary = {
|
||||
let chain = BlockChain::new(Default::default(), genesis, db.clone());
|
||||
components.rebuilder(chain, db, manifest).unwrap()
|
||||
};
|
||||
|
||||
let mut snappy_buffer = Vec::new();
|
||||
|
||||
trace!(target: "snapshot", "restoring state");
|
||||
for state_chunk_hash in manifest.state_hashes.iter() {
|
||||
trace!(target: "snapshot", "state chunk hash: {}", state_chunk_hash);
|
||||
let chunk = reader.chunk(*state_chunk_hash).unwrap();
|
||||
let len = snappy::decompress_into(&chunk, &mut snappy_buffer).unwrap();
|
||||
state.feed(&snappy_buffer[..len], &flag)?;
|
||||
}
|
||||
|
||||
trace!(target: "snapshot", "restoring secondary");
|
||||
for chunk_hash in manifest.block_hashes.iter() {
|
||||
let chunk = reader.chunk(*chunk_hash).unwrap();
|
||||
let len = snappy::decompress_into(&chunk, &mut snappy_buffer).unwrap();
|
||||
secondary.feed(&snappy_buffer[..len], engine, &flag)?;
|
||||
}
|
||||
|
||||
let jdb = state.finalize(manifest.block_number, manifest.block_hash)?;
|
||||
let state_db = StateDB::new(jdb, 0);
|
||||
|
||||
secondary.finalize(state_db, engine)
|
||||
}
|
||||
|
@ -16,7 +16,8 @@
|
||||
|
||||
//! Snapshot tests.
|
||||
|
||||
mod blocks;
|
||||
mod proof_of_work;
|
||||
mod proof_of_authority;
|
||||
mod state;
|
||||
mod service;
|
||||
|
||||
|
249
ethcore/src/snapshot/tests/proof_of_authority.rs
Normal file
249
ethcore/src/snapshot/tests/proof_of_authority.rs
Normal file
@ -0,0 +1,249 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! PoA block chunker and rebuilder tests.
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::sync::Arc;
|
||||
use std::str::FromStr;
|
||||
|
||||
use account_provider::AccountProvider;
|
||||
use client::{Client, BlockChainClient, MiningBlockChainClient};
|
||||
use ethkey::Secret;
|
||||
use engines::Seal;
|
||||
use futures::Future;
|
||||
use miner::MinerService;
|
||||
use native_contracts::test_contracts::ValidatorSet;
|
||||
use snapshot::tests::helpers as snapshot_helpers;
|
||||
use spec::Spec;
|
||||
use tests::helpers;
|
||||
use transaction::{Transaction, Action, SignedTransaction};
|
||||
|
||||
use util::{Address, Hashable};
|
||||
use util::kvdb;
|
||||
|
||||
const PASS: &'static str = "";
|
||||
const TRANSITION_BLOCK_1: usize = 2; // block at which the contract becomes activated.
|
||||
const TRANSITION_BLOCK_2: usize = 6; // block at which the second contract activates.
|
||||
|
||||
macro_rules! secret {
|
||||
($e: expr) => { Secret::from_slice(&$e.sha3()).expect(format!("sha3({}) not valid secret.", $e).as_str()) }
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
// contract addresses.
|
||||
static ref CONTRACT_ADDR_1: Address = Address::from_str("0000000000000000000000000000000000000005").unwrap();
|
||||
static ref CONTRACT_ADDR_2: Address = Address::from_str("0000000000000000000000000000000000000006").unwrap();
|
||||
// secret: `sha3(1)`, and initial validator.
|
||||
static ref RICH_ADDR: Address = Address::from_str("7d577a597b2742b498cb5cf0c26cdcd726d39e6e").unwrap();
|
||||
// rich address' secret.
|
||||
static ref RICH_SECRET: Secret = secret!("1");
|
||||
}
|
||||
|
||||
|
||||
/// Contract code used here: https://gist.github.com/rphmeier/2de14fd365a969e3a9e10d77eb9a1e37
|
||||
/// Account with secrets "1".sha3() is initially the validator.
|
||||
/// Transitions to the contract at block 2, initially same validator set.
|
||||
/// Create a new Spec with BasicAuthority which uses a contract at address 5 to determine the current validators using `getValidators`.
|
||||
/// `native_contracts::test_contracts::ValidatorSet` provides a native wrapper for the ABi.
|
||||
fn spec_fixed_to_contract() -> Spec {
|
||||
let data = include_bytes!("test_validator_contract.json");
|
||||
Spec::load(&data[..]).unwrap()
|
||||
}
|
||||
|
||||
// creates an account provider, filling it with accounts from all the given
|
||||
// secrets and password `PASS`.
|
||||
// returns addresses corresponding to secrets.
|
||||
fn make_accounts(secrets: &[Secret]) -> (Arc<AccountProvider>, Vec<Address>) {
|
||||
let provider = AccountProvider::transient_provider();
|
||||
|
||||
let addrs = secrets.iter()
|
||||
.cloned()
|
||||
.map(|s| provider.insert_account(s, PASS).unwrap())
|
||||
.collect();
|
||||
|
||||
(Arc::new(provider), addrs)
|
||||
}
|
||||
|
||||
// validator transition. block number and new validators. must be after `TRANSITION_BLOCK`.
|
||||
// all addresses in the set must be in the account provider.
|
||||
enum Transition {
|
||||
// manual transition via transaction
|
||||
Manual(usize, Vec<Address>),
|
||||
// implicit transition via multi-set
|
||||
Implicit(usize, Vec<Address>),
|
||||
}
|
||||
|
||||
// create a chain with the given transitions and some blocks beyond that transition.
|
||||
fn make_chain(accounts: Arc<AccountProvider>, blocks_beyond: usize, transitions: Vec<Transition>) -> Arc<Client> {
|
||||
let client = helpers::generate_dummy_client_with_spec_and_accounts(
|
||||
spec_fixed_to_contract, Some(accounts.clone()));
|
||||
|
||||
let mut cur_signers = vec![*RICH_ADDR];
|
||||
{
|
||||
let engine = client.engine();
|
||||
engine.register_client(Arc::downgrade(&client));
|
||||
}
|
||||
|
||||
{
|
||||
// push a block with given number, signed by one of the signers, with given transactions.
|
||||
let push_block = |signers: &[Address], n, txs: Vec<SignedTransaction>| {
|
||||
use block::IsBlock;
|
||||
|
||||
let engine = client.engine();
|
||||
let idx = n as usize % signers.len();
|
||||
engine.set_signer(accounts.clone(), signers[idx], PASS.to_owned());
|
||||
|
||||
trace!(target: "snapshot", "Pushing block #{}, {} txs, author={}", n, txs.len(), signers[idx]);
|
||||
|
||||
let mut open_block = client.prepare_open_block(signers[idx], (5_000_000.into(), 5_000_000.into()), Vec::new());
|
||||
for tx in txs {
|
||||
open_block.push_transaction(tx, None).unwrap();
|
||||
}
|
||||
let block = open_block.close_and_lock();
|
||||
let seal = match engine.generate_seal(block.block()) {
|
||||
Seal::Regular(seal) => seal,
|
||||
_ => panic!("Unable to generate seal for dummy chain block #{}", n),
|
||||
};
|
||||
let block = block.seal(&*engine, seal).unwrap();
|
||||
|
||||
client.import_sealed_block(block).unwrap();
|
||||
};
|
||||
|
||||
// execution callback for native contract: push transaction to be sealed.
|
||||
let nonce = RefCell::new(client.engine().account_start_nonce());
|
||||
let exec = |addr, data| {
|
||||
let mut nonce = nonce.borrow_mut();
|
||||
let transaction = Transaction {
|
||||
nonce: *nonce,
|
||||
gas_price: 0.into(),
|
||||
gas: 1_000_000.into(),
|
||||
action: Action::Call(addr),
|
||||
value: 0.into(),
|
||||
data: data,
|
||||
}.sign(&*RICH_SECRET, client.signing_network_id());
|
||||
|
||||
client.miner().import_own_transaction(&*client, transaction.into()).unwrap();
|
||||
|
||||
*nonce = *nonce + 1.into();
|
||||
Ok(Vec::new())
|
||||
};
|
||||
|
||||
let contract_1 = ValidatorSet::new(*CONTRACT_ADDR_1);
|
||||
let contract_2 = ValidatorSet::new(*CONTRACT_ADDR_2);
|
||||
|
||||
// apply all transitions.
|
||||
for transition in transitions {
|
||||
let (num, manual, new_set) = match transition {
|
||||
Transition::Manual(num, new_set) => (num, true, new_set),
|
||||
Transition::Implicit(num, new_set) => (num, false, new_set),
|
||||
};
|
||||
|
||||
if num < TRANSITION_BLOCK_1 {
|
||||
panic!("Bad test: issued epoch change before transition to contract.");
|
||||
}
|
||||
|
||||
for number in client.chain_info().best_block_number + 1 .. num as u64 {
|
||||
push_block(&cur_signers, number, vec![]);
|
||||
}
|
||||
|
||||
let pending = if manual {
|
||||
trace!(target: "snapshot", "applying set transition at block #{}", num);
|
||||
let contract = match num >= TRANSITION_BLOCK_2 {
|
||||
true => &contract_2,
|
||||
false => &contract_1,
|
||||
};
|
||||
|
||||
contract.set_validators(&exec, new_set.clone()).wait().unwrap();
|
||||
client.ready_transactions()
|
||||
.into_iter()
|
||||
.map(|x| x.transaction)
|
||||
.collect()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
push_block(&cur_signers, num as u64, pending);
|
||||
cur_signers = new_set;
|
||||
}
|
||||
|
||||
// make blocks beyond.
|
||||
for number in (client.chain_info().best_block_number..).take(blocks_beyond) {
|
||||
push_block(&cur_signers, number + 1, vec![]);
|
||||
}
|
||||
}
|
||||
|
||||
client
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fixed_to_contract() {
|
||||
let (provider, addrs) = make_accounts(&[
|
||||
RICH_SECRET.clone(),
|
||||
secret!("foo"),
|
||||
secret!("bar"),
|
||||
secret!("test"),
|
||||
secret!("signer"),
|
||||
secret!("crypto"),
|
||||
secret!("wizard"),
|
||||
secret!("dog42"),
|
||||
]);
|
||||
|
||||
assert!(provider.has_account(*RICH_ADDR).unwrap());
|
||||
|
||||
let client = make_chain(provider, 1, vec![
|
||||
Transition::Manual(3, vec![addrs[2], addrs[3], addrs[5], addrs[7]]),
|
||||
Transition::Manual(4, vec![addrs[0], addrs[1], addrs[4], addrs[6]]),
|
||||
]);
|
||||
|
||||
assert_eq!(client.chain_info().best_block_number, 5);
|
||||
let reader = snapshot_helpers::snap(&*client);
|
||||
|
||||
let new_db = kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0));
|
||||
let spec = spec_fixed_to_contract();
|
||||
|
||||
snapshot_helpers::restore(Arc::new(new_db), &*spec.engine, &**reader, &spec.genesis_block()).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fixed_to_contract_to_contract() {
|
||||
let (provider, addrs) = make_accounts(&[
|
||||
RICH_SECRET.clone(),
|
||||
secret!("foo"),
|
||||
secret!("bar"),
|
||||
secret!("test"),
|
||||
secret!("signer"),
|
||||
secret!("crypto"),
|
||||
secret!("wizard"),
|
||||
secret!("dog42"),
|
||||
]);
|
||||
|
||||
assert!(provider.has_account(*RICH_ADDR).unwrap());
|
||||
|
||||
let client = make_chain(provider, 2, vec![
|
||||
Transition::Manual(3, vec![addrs[2], addrs[3], addrs[5], addrs[7]]),
|
||||
Transition::Manual(4, vec![addrs[0], addrs[1], addrs[4], addrs[6]]),
|
||||
Transition::Implicit(5, vec![addrs[0]]),
|
||||
Transition::Manual(8, vec![addrs[2], addrs[4], addrs[6], addrs[7]]),
|
||||
]);
|
||||
|
||||
assert_eq!(client.chain_info().best_block_number, 10);
|
||||
let reader = snapshot_helpers::snap(&*client);
|
||||
let new_db = kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0));
|
||||
let spec = spec_fixed_to_contract();
|
||||
|
||||
snapshot_helpers::restore(Arc::new(new_db), &*spec.engine, &**reader, &spec.genesis_block()).unwrap();
|
||||
}
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Block chunker and rebuilder tests.
|
||||
//! PoW block chunker and rebuilder tests.
|
||||
|
||||
use devtools::RandomTempPath;
|
||||
use error::Error;
|
||||
@ -23,8 +23,10 @@ use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
|
||||
use blockchain::BlockChain;
|
||||
use snapshot::{chunk_secondary, Error as SnapshotError, Progress, SnapshotComponents};
|
||||
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
|
||||
use state_db::StateDB;
|
||||
|
||||
use util::{Mutex, snappy};
|
||||
use util::journaldb::{self, Algorithm};
|
||||
use util::kvdb::{self, KeyValueDB, DBTransaction};
|
||||
|
||||
use std::sync::Arc;
|
||||
@ -81,6 +83,7 @@ fn chunk_and_restore(amount: u64) {
|
||||
// restore it.
|
||||
let new_db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)));
|
||||
let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone());
|
||||
let new_state = StateDB::new(journaldb::new(new_db.clone(), Algorithm::Archive, None), 0);
|
||||
let mut rebuilder = SNAPSHOT_MODE.rebuilder(new_chain, new_db.clone(), &manifest).unwrap();
|
||||
|
||||
let reader = PackedReader::new(&snapshot_path).unwrap().unwrap();
|
||||
@ -91,7 +94,7 @@ fn chunk_and_restore(amount: u64) {
|
||||
rebuilder.feed(&chunk, engine.as_ref(), &flag).unwrap();
|
||||
}
|
||||
|
||||
rebuilder.finalize().unwrap();
|
||||
rebuilder.finalize(new_state, engine.as_ref()).unwrap();
|
||||
drop(rebuilder);
|
||||
|
||||
// and test it.
|
51
ethcore/src/snapshot/tests/test_validator_contract.json
Normal file
51
ethcore/src/snapshot/tests/test_validator_contract.json
Normal file
@ -0,0 +1,51 @@
|
||||
{
|
||||
"name": "TestValidatorContract",
|
||||
"engine": {
|
||||
"basicAuthority": {
|
||||
"params": {
|
||||
"gasLimitBoundDivisor": "0x0400",
|
||||
"durationLimit": "0x0d",
|
||||
"validators": {
|
||||
"multi": {
|
||||
"0": { "list": ["0x7d577a597b2742b498cb5cf0c26cdcd726d39e6e"] },
|
||||
"2": { "contract": "0x0000000000000000000000000000000000000005" },
|
||||
"6": { "contract": "0x0000000000000000000000000000000000000006" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
"accountStartNonce": "0x0",
|
||||
"maximumExtraDataSize": "0x20",
|
||||
"minGasLimit": "0x1388",
|
||||
"networkID" : "0x69"
|
||||
},
|
||||
"genesis": {
|
||||
"seal": {
|
||||
"generic": "0xc180"
|
||||
},
|
||||
"difficulty": "0x20000",
|
||||
"author": "0x0000000000000000000000000000000000000000",
|
||||
"timestamp": "0x00",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData": "0x",
|
||||
"gasLimit": "0x2fefd8"
|
||||
},
|
||||
"accounts": {
|
||||
"0000000000000000000000000000000000000001": { "balance": "1", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
|
||||
"0000000000000000000000000000000000000002": { "balance": "1", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
|
||||
"0000000000000000000000000000000000000003": { "balance": "1", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
|
||||
"0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
|
||||
"0000000000000000000000000000000000000005": {
|
||||
"balance": "1",
|
||||
"constructor": "6060604052604060405190810160405280737d577a597b2742b498cb5cf0c26cdcd726d39e6e73ffffffffffffffffffffffffffffffffffffffff1681526020017382a978b3f5962a5b0957d9ee9eef472ee55b42f173ffffffffffffffffffffffffffffffffffffffff16815250600290600261007e929190610096565b50341561008757fe5b5b60006001819055505b610163565b82805482825590600052602060002090810192821561010f579160200282015b8281111561010e5782518260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550916020019190600101906100b6565b5b50905061011c9190610120565b5090565b61016091905b8082111561015c57600081816101000a81549073ffffffffffffffffffffffffffffffffffffffff021916905550600101610126565b5090565b90565b61045d806101726000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063303e98e5146100675780639300c9261461008d578063b7ab4db5146100e4578063bfc708a014610159578063fd6e1b501461018f575bfe5b341561006f57fe5b6100776101c5565b6040518082815260200191505060405180910390f35b341561009557fe5b6100e26004808035906020019082018035906020019080806020026020016040519081016040528093929190818152602001838360200280828437820191505050505050919050506101d0565b005b34156100ec57fe5b6100f46102b3565b6040518080602001828103825283818151815260200191508051906020019060200280838360008314610146575b80518252602083111561014657602082019150602081019050602083039250610122565b5050509050019250505060405180910390f35b341561016157fe5b61018d600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610348565b005b341561019757fe5b6101c3600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190505061034c565b005b600060015490505b90565b600081600290805190602001906101e8929190610350565b50600143034090506000546000191681600019161415156102ae578060008160001916905550600160016000828254019250508190555060015481600019167f47e91f47ccfdcb578564e1af55da55c5e5d33403372fe68e4fed3dfd385764a184604051808060200182810382528381815181526020019150805190602001906020028083836000831461029b575b80518252602083111561029b57602082019150602081019050602083039250610277565b5050509050019250505060405180910390a35b5b5050565b6102bb6103da565b600280548060200260200160405190810160405280929190818152602001828054801561033d57602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190600101908083116102f3575b505050505090505b90565b5b50565b5b50565b8280548282559060005260206000209081019282156103c9579160200282015b828111156103c85782518260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555091602001919060010190610370565b5b5090506103d691906103ee565b5090565b602060405190810160405280600081525090565b61042e91905b8082111561042a57600081816101000a81549073ffffffffffffffffffffffffffffffffffffffff0219169055506001016103f4565b5090565b905600a165627a7a723058205c9ed1e1da2b93682907ac47377a662b21a5f9d89c4b21be40b098bdb00254360029"
|
||||
},
|
||||
"0000000000000000000000000000000000000006": {
|
||||
"balance": "1",
|
||||
"constructor": "6060604052602060405190810160405280737d577a597b2742b498cb5cf0c26cdcd726d39e6e73ffffffffffffffffffffffffffffffffffffffff16815250600290600161004e929190610066565b50341561005757fe5b5b60006001819055505b610133565b8280548282559060005260206000209081019282156100df579160200282015b828111156100de5782518260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555091602001919060010190610086565b5b5090506100ec91906100f0565b5090565b61013091905b8082111561012c57600081816101000a81549073ffffffffffffffffffffffffffffffffffffffff0219169055506001016100f6565b5090565b90565b61045d806101426000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063303e98e5146100675780639300c9261461008d578063b7ab4db5146100e4578063bfc708a014610159578063fd6e1b501461018f575bfe5b341561006f57fe5b6100776101c5565b6040518082815260200191505060405180910390f35b341561009557fe5b6100e26004808035906020019082018035906020019080806020026020016040519081016040528093929190818152602001838360200280828437820191505050505050919050506101d0565b005b34156100ec57fe5b6100f46102b3565b6040518080602001828103825283818151815260200191508051906020019060200280838360008314610146575b80518252602083111561014657602082019150602081019050602083039250610122565b5050509050019250505060405180910390f35b341561016157fe5b61018d600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610348565b005b341561019757fe5b6101c3600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190505061034c565b005b600060015490505b90565b600081600290805190602001906101e8929190610350565b50600143034090506000546000191681600019161415156102ae578060008160001916905550600160016000828254019250508190555060015481600019167f47e91f47ccfdcb578564e1af55da55c5e5d33403372fe68e4fed3dfd385764a184604051808060200182810382528381815181526020019150805190602001906020028083836000831461029b575b80518252602083111561029b57602082019150602081019050602083039250610277565b5050509050019250505060405180910390a35b5b5050565b6102bb6103da565b600280548060200260200160405190810160405280929190818152602001828054801561033d57602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190600101908083116102f3575b505050505090505b90565b5b50565b5b50565b8280548282559060005260206000209081019282156103c9579160200282015b828111156103c85782518260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555091602001919060010190610370565b5b5090506103d691906103ee565b5090565b602060405190810160405280600081525090565b61042e91905b8082111561042a57600081816101000a81549073ffffffffffffffffffffffffffffffffffffffff0219169055506001016103f4565b5090565b905600a165627a7a723058203070810251dcb89c9838d957eb3dbeef357bef0902e0245e3dc3849b6143c3960029"
|
||||
},
|
||||
"0x7d577a597b2742b498cb5cf0c26cdcd726d39e6e": { "balance": "1606938044258990275541962092341162602522202993782792835301376" },
|
||||
"0x82a978b3f5962a5b0957d9ee9eef472ee55b42f1": { "balance": "1606938044258990275541962092341162602522202993782792835301376" }
|
||||
}
|
||||
}
|
@ -189,7 +189,7 @@ pub fn check_proof(
|
||||
Err(_) => return ProvedExecution::BadProof,
|
||||
};
|
||||
|
||||
match state.execute(env_info, engine, transaction, false) {
|
||||
match state.execute(env_info, engine, transaction, false, true) {
|
||||
Ok(executed) => ProvedExecution::Complete(executed),
|
||||
Err(ExecutionError::Internal(_)) => ProvedExecution::BadProof,
|
||||
Err(e) => ProvedExecution::Failed(e),
|
||||
@ -604,7 +604,7 @@ impl<B: Backend> State<B> {
|
||||
pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> ApplyResult {
|
||||
// let old = self.to_pod();
|
||||
|
||||
let e = self.execute(env_info, engine, t, tracing)?;
|
||||
let e = self.execute(env_info, engine, t, tracing, false)?;
|
||||
// trace!("Applied transaction. Diff:\n{}\n", state_diff::diff_pod(&old, &self.to_pod()));
|
||||
let state_root = if env_info.number < engine.params().eip98_transition || env_info.number < engine.params().validate_receipts_transition {
|
||||
self.commit()?;
|
||||
@ -617,12 +617,22 @@ impl<B: Backend> State<B> {
|
||||
Ok(ApplyOutcome{receipt: receipt, trace: e.trace})
|
||||
}
|
||||
|
||||
// Execute a given transaction.
|
||||
fn execute(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> Result<Executed, ExecutionError> {
|
||||
// Execute a given transaction without committing changes.
|
||||
//
|
||||
// `virt` signals that we are executing outside of a block set and restrictions like
|
||||
// gas limits and gas costs should be lifted.
|
||||
fn execute(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool, virt: bool)
|
||||
-> Result<Executed, ExecutionError>
|
||||
{
|
||||
let options = TransactOptions { tracing: tracing, vm_tracing: false, check_nonce: true };
|
||||
let vm_factory = self.factories.vm.clone();
|
||||
|
||||
Executive::new(self, env_info, engine, &vm_factory).transact(t, options)
|
||||
let mut e = Executive::new(self, env_info, engine, &vm_factory);
|
||||
|
||||
match virt {
|
||||
true => e.transact_virtual(t, options),
|
||||
false => e.transact(t, options),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "parity.js",
|
||||
"version": "1.7.76",
|
||||
"version": "1.7.77",
|
||||
"main": "release/index.js",
|
||||
"jsnext:main": "src/index.js",
|
||||
"author": "Parity Team <admin@parity.io>",
|
||||
|
@ -67,6 +67,7 @@ impl IoHandler<ClientIoMessage> for QueueCull {
|
||||
|
||||
let (sync, on_demand, txq) = (self.sync.clone(), self.on_demand.clone(), self.txq.clone());
|
||||
let best_header = self.client.best_block_header();
|
||||
let start_nonce = self.client.engine().account_start_nonce();
|
||||
|
||||
info!(target: "cull", "Attempting to cull queued transactions from {} senders.", senders.len());
|
||||
self.remote.spawn_with_timeout(move || {
|
||||
@ -74,7 +75,10 @@ impl IoHandler<ClientIoMessage> for QueueCull {
|
||||
// fetch the nonce of each sender in the queue.
|
||||
let nonce_futures = senders.iter()
|
||||
.map(|&address| request::Account { header: best_header.clone(), address: address })
|
||||
.map(|request| on_demand.account(ctx, request).map(|acc| acc.nonce))
|
||||
.map(move |request| {
|
||||
on_demand.account(ctx, request)
|
||||
.map(move |maybe_acc| maybe_acc.map_or(start_nonce, |acc| acc.nonce))
|
||||
})
|
||||
.zip(senders.iter())
|
||||
.map(|(fut, &addr)| fut.map(move |nonce| (addr, nonce)));
|
||||
|
||||
|
@ -228,8 +228,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
|
||||
}
|
||||
|
||||
// start on_demand service.
|
||||
let account_start_nonce = service.client().engine().account_start_nonce();
|
||||
let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone(), account_start_nonce));
|
||||
let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone()));
|
||||
|
||||
// set network path.
|
||||
net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned());
|
||||
|
@ -281,6 +281,7 @@ impl LightDispatcher {
|
||||
}
|
||||
|
||||
let best_header = self.client.best_block_header();
|
||||
let account_start_nonce = self.client.engine().account_start_nonce();
|
||||
let nonce_future = self.sync.with_context(|ctx| self.on_demand.account(ctx, request::Account {
|
||||
header: best_header,
|
||||
address: addr,
|
||||
@ -288,7 +289,7 @@ impl LightDispatcher {
|
||||
|
||||
match nonce_future {
|
||||
Some(x) =>
|
||||
x.map(|acc| acc.nonce)
|
||||
x.map(move |acc| acc.map_or(account_start_nonce, |acc| acc.nonce))
|
||||
.map_err(|_| errors::no_light_peers())
|
||||
.boxed(),
|
||||
None => future::err(errors::network_disabled()).boxed()
|
||||
|
@ -57,16 +57,16 @@ pub type ExecutionResult = Result<Executed, ExecutionError>;
|
||||
|
||||
impl LightFetch {
|
||||
/// Get a block header from the on demand service or client, or error.
|
||||
pub fn header(&self, id: BlockId) -> BoxFuture<Option<encoded::Header>, Error> {
|
||||
pub fn header(&self, id: BlockId) -> BoxFuture<encoded::Header, Error> {
|
||||
if let Some(h) = self.client.block_header(id) {
|
||||
return future::ok(Some(h)).boxed()
|
||||
return future::ok(h).boxed()
|
||||
}
|
||||
|
||||
let maybe_future = match id {
|
||||
BlockId::Number(n) => {
|
||||
let cht_root = cht::block_to_cht_number(n).and_then(|cn| self.client.cht_root(cn as usize));
|
||||
match cht_root {
|
||||
None => return future::ok(None).boxed(),
|
||||
None => return future::err(errors::unknown_block()).boxed(),
|
||||
Some(root) => {
|
||||
let req = request::HeaderProof::new(n, root)
|
||||
.expect("only fails for 0; client always stores genesis; client already queried; qed");
|
||||
@ -82,7 +82,7 @@ impl LightFetch {
|
||||
Some(fut) => fut.map_err(errors::on_demand_cancel).boxed(),
|
||||
None => future::err(errors::network_disabled()).boxed(),
|
||||
}
|
||||
}).map(Some).boxed()
|
||||
}).boxed()
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -91,7 +91,7 @@ impl LightFetch {
|
||||
self.sync.with_context(|ctx|
|
||||
self.on_demand.header_by_hash(ctx, request::HeaderByHash(h))
|
||||
.then(|res| future::done(match res {
|
||||
Ok(h) => Ok(Some(h)),
|
||||
Ok(h) => Ok(h),
|
||||
Err(e) => Err(errors::on_demand_cancel(e)),
|
||||
}))
|
||||
.boxed()
|
||||
@ -106,22 +106,21 @@ impl LightFetch {
|
||||
}
|
||||
}
|
||||
|
||||
// Get account info at a given block. `None` signifies no such account existing.
|
||||
/// helper for getting account info at a given block.
|
||||
/// `None` indicates the account doesn't exist at the given block.
|
||||
pub fn account(&self, address: Address, id: BlockId) -> BoxFuture<Option<BasicAccount>, Error> {
|
||||
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
|
||||
|
||||
self.header(id).and_then(move |header| {
|
||||
let header = match header {
|
||||
None => return future::ok(None).boxed(),
|
||||
Some(hdr) => hdr,
|
||||
};
|
||||
|
||||
sync.with_context(|ctx| on_demand.account(ctx, request::Account {
|
||||
let maybe_fut = sync.with_context(|ctx| on_demand.account(ctx, request::Account {
|
||||
header: header,
|
||||
address: address,
|
||||
}).map(Some))
|
||||
.map(|x| x.map_err(errors::on_demand_cancel).boxed())
|
||||
.unwrap_or_else(|| future::err(errors::network_disabled()).boxed())
|
||||
}));
|
||||
|
||||
match maybe_fut {
|
||||
Some(fut) => fut.map_err(errors::on_demand_cancel).boxed(),
|
||||
None => future::err(errors::network_disabled()).boxed(),
|
||||
}
|
||||
}).boxed()
|
||||
}
|
||||
|
||||
@ -176,10 +175,11 @@ impl LightFetch {
|
||||
}).join(header_fut).and_then(move |(tx, hdr)| {
|
||||
// then request proved execution.
|
||||
// TODO: get last-hashes from network.
|
||||
let (env_info, hdr) = match (client.env_info(id), hdr) {
|
||||
(Some(env_info), Some(hdr)) => (env_info, hdr),
|
||||
let env_info = match client.env_info(id) {
|
||||
Some(env_info) => env_info,
|
||||
_ => return future::err(errors::unknown_block()).boxed(),
|
||||
};
|
||||
|
||||
let request = request::TransactionProof {
|
||||
tx: tx,
|
||||
header: hdr,
|
||||
@ -198,18 +198,13 @@ impl LightFetch {
|
||||
}).boxed()
|
||||
}
|
||||
|
||||
/// Get a block.
|
||||
pub fn block(&self, id: BlockId) -> BoxFuture<Option<encoded::Block>, Error> {
|
||||
/// get a block itself. fails on unknown block ID.
|
||||
pub fn block(&self, id: BlockId) -> BoxFuture<encoded::Block, Error> {
|
||||
let (on_demand, sync) = (self.on_demand.clone(), self.sync.clone());
|
||||
|
||||
self.header(id).and_then(move |hdr| {
|
||||
let req = match hdr {
|
||||
Some(hdr) => request::Body::new(hdr),
|
||||
None => return future::ok(None).boxed(),
|
||||
};
|
||||
|
||||
self.header(id).map(request::Body::new).and_then(move |req| {
|
||||
match sync.with_context(move |ctx| on_demand.block(ctx, req)) {
|
||||
Some(fut) => fut.map_err(errors::on_demand_cancel).map(Some).boxed(),
|
||||
Some(fut) => fut.map_err(errors::on_demand_cancel).boxed(),
|
||||
None => future::err(errors::network_disabled()).boxed(),
|
||||
}
|
||||
}).boxed()
|
||||
|
@ -115,12 +115,11 @@ impl EthClient {
|
||||
on_demand: self.on_demand.clone(),
|
||||
sync: self.sync.clone(),
|
||||
cache: self.cache.clone(),
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// get a "rich" block structure
|
||||
fn rich_block(&self, id: BlockId, include_txs: bool) -> BoxFuture<Option<RichBlock>, Error> {
|
||||
// get a "rich" block structure. Fails on unknown block.
|
||||
fn rich_block(&self, id: BlockId, include_txs: bool) -> BoxFuture<RichBlock, Error> {
|
||||
let (on_demand, sync) = (self.on_demand.clone(), self.sync.clone());
|
||||
let (client, engine) = (self.client.clone(), self.client.engine().clone());
|
||||
let eip86_transition = self.client.eip86_transition();
|
||||
@ -160,49 +159,45 @@ impl EthClient {
|
||||
};
|
||||
|
||||
// get the block itself.
|
||||
self.fetcher().block(id).and_then(move |block| match block {
|
||||
None => return future::ok(None).boxed(),
|
||||
Some(block) => {
|
||||
// then fetch the total difficulty (this is much easier after getting the block).
|
||||
match client.score(id) {
|
||||
Some(score) => future::ok(fill_rich(block, Some(score))).map(Some).boxed(),
|
||||
None => {
|
||||
// make a CHT request to fetch the chain score.
|
||||
let req = cht::block_to_cht_number(block.number())
|
||||
.and_then(|num| client.cht_root(num as usize))
|
||||
.and_then(|root| request::HeaderProof::new(block.number(), root));
|
||||
self.fetcher().block(id).and_then(move |block| {
|
||||
// then fetch the total difficulty (this is much easier after getting the block).
|
||||
match client.score(id) {
|
||||
Some(score) => future::ok(fill_rich(block, Some(score))).boxed(),
|
||||
None => {
|
||||
// make a CHT request to fetch the chain score.
|
||||
let req = cht::block_to_cht_number(block.number())
|
||||
.and_then(|num| client.cht_root(num as usize))
|
||||
.and_then(|root| request::HeaderProof::new(block.number(), root));
|
||||
|
||||
let req = match req {
|
||||
Some(req) => req,
|
||||
None => {
|
||||
// somehow the genesis block slipped past other checks.
|
||||
// return it now.
|
||||
let score = client.block_header(BlockId::Number(0))
|
||||
.expect("genesis always stored; qed")
|
||||
.difficulty();
|
||||
|
||||
let req = match req {
|
||||
Some(req) => req,
|
||||
None => {
|
||||
// somehow the genesis block slipped past other checks.
|
||||
// return it now.
|
||||
let score = client.block_header(BlockId::Number(0))
|
||||
.expect("genesis always stored; qed")
|
||||
.difficulty();
|
||||
|
||||
return future::ok(fill_rich(block, Some(score))).map(Some).boxed()
|
||||
}
|
||||
};
|
||||
|
||||
// three possible outcomes:
|
||||
// - network is down.
|
||||
// - we get a score, but our hash is non-canonical.
|
||||
// - we get ascore, and our hash is canonical.
|
||||
let maybe_fut = sync.with_context(move |ctx| on_demand.hash_and_score_by_number(ctx, req));
|
||||
match maybe_fut {
|
||||
Some(fut) => fut.map(move |(hash, score)| {
|
||||
let score = if hash == block.hash() {
|
||||
Some(score)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Some(fill_rich(block, score))
|
||||
}).map_err(errors::on_demand_cancel).boxed(),
|
||||
None => return future::err(errors::network_disabled()).boxed(),
|
||||
return future::ok(fill_rich(block, Some(score))).boxed()
|
||||
}
|
||||
};
|
||||
|
||||
// three possible outcomes:
|
||||
// - network is down.
|
||||
// - we get a score, but our hash is non-canonical.
|
||||
// - we get a score, and our hash is canonical.
|
||||
let maybe_fut = sync.with_context(move |ctx| on_demand.hash_and_score_by_number(ctx, req));
|
||||
match maybe_fut {
|
||||
Some(fut) => fut.map(move |(hash, score)| {
|
||||
let score = if hash == block.hash() {
|
||||
Some(score)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
fill_rich(block, score)
|
||||
}).map_err(errors::on_demand_cancel).boxed(),
|
||||
None => return future::err(errors::network_disabled()).boxed(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -281,11 +276,11 @@ impl Eth for EthClient {
|
||||
}
|
||||
|
||||
fn block_by_hash(&self, hash: RpcH256, include_txs: bool) -> BoxFuture<Option<RichBlock>, Error> {
|
||||
self.rich_block(BlockId::Hash(hash.into()), include_txs)
|
||||
self.rich_block(BlockId::Hash(hash.into()), include_txs).map(Some).boxed()
|
||||
}
|
||||
|
||||
fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture<Option<RichBlock>, Error> {
|
||||
self.rich_block(num.into(), include_txs)
|
||||
self.rich_block(num.into(), include_txs).map(Some).boxed()
|
||||
}
|
||||
|
||||
fn transaction_count(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> {
|
||||
@ -297,11 +292,6 @@ impl Eth for EthClient {
|
||||
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
|
||||
|
||||
self.fetcher().header(BlockId::Hash(hash.into())).and_then(move |hdr| {
|
||||
let hdr = match hdr {
|
||||
None => return future::ok(None).boxed(),
|
||||
Some(hdr) => hdr,
|
||||
};
|
||||
|
||||
if hdr.transactions_root() == SHA3_NULL_RLP {
|
||||
future::ok(Some(U256::from(0).into())).boxed()
|
||||
} else {
|
||||
@ -317,11 +307,6 @@ impl Eth for EthClient {
|
||||
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
|
||||
|
||||
self.fetcher().header(num.into()).and_then(move |hdr| {
|
||||
let hdr = match hdr {
|
||||
None => return future::ok(None).boxed(),
|
||||
Some(hdr) => hdr,
|
||||
};
|
||||
|
||||
if hdr.transactions_root() == SHA3_NULL_RLP {
|
||||
future::ok(Some(U256::from(0).into())).boxed()
|
||||
} else {
|
||||
@ -337,11 +322,6 @@ impl Eth for EthClient {
|
||||
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
|
||||
|
||||
self.fetcher().header(BlockId::Hash(hash.into())).and_then(move |hdr| {
|
||||
let hdr = match hdr {
|
||||
None => return future::ok(None).boxed(),
|
||||
Some(hdr) => hdr,
|
||||
};
|
||||
|
||||
if hdr.uncles_hash() == SHA3_EMPTY_LIST_RLP {
|
||||
future::ok(Some(U256::from(0).into())).boxed()
|
||||
} else {
|
||||
@ -357,11 +337,6 @@ impl Eth for EthClient {
|
||||
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
|
||||
|
||||
self.fetcher().header(num.into()).and_then(move |hdr| {
|
||||
let hdr = match hdr {
|
||||
None => return future::ok(None).boxed(),
|
||||
Some(hdr) => hdr,
|
||||
};
|
||||
|
||||
if hdr.uncles_hash() == SHA3_EMPTY_LIST_RLP {
|
||||
future::ok(Some(U256::from(0).into())).boxed()
|
||||
} else {
|
||||
|
@ -360,7 +360,7 @@ impl Parity for ParityClient {
|
||||
})
|
||||
}
|
||||
|
||||
fn block_header(&self, number: Trailing<BlockNumber>) -> BoxFuture<Option<RichHeader>, Error> {
|
||||
fn block_header(&self, number: Trailing<BlockNumber>) -> BoxFuture<RichHeader, Error> {
|
||||
use ethcore::encoded;
|
||||
|
||||
let engine = self.light_dispatch.client.engine().clone();
|
||||
@ -391,7 +391,7 @@ impl Parity for ParityClient {
|
||||
}
|
||||
};
|
||||
|
||||
self.fetcher().header(number.0.into()).map(move |encoded| encoded.map(from_encoded)).boxed()
|
||||
self.fetcher().header(number.0.into()).map(from_encoded).boxed()
|
||||
}
|
||||
|
||||
fn ipfs_cid(&self, content: Bytes) -> Result<String, Error> {
|
||||
|
@ -400,17 +400,17 @@ impl<C, M, S: ?Sized, U> Parity for ParityClient<C, M, S, U> where
|
||||
})
|
||||
}
|
||||
|
||||
fn block_header(&self, number: Trailing<BlockNumber>) -> BoxFuture<Option<RichHeader>, Error> {
|
||||
fn block_header(&self, number: Trailing<BlockNumber>) -> BoxFuture<RichHeader, Error> {
|
||||
const EXTRA_INFO_PROOF: &'static str = "Object exists in in blockchain (fetched earlier), extra_info is always available if object exists; qed";
|
||||
|
||||
let client = take_weakf!(self.client);
|
||||
let id: BlockId = number.0.into();
|
||||
let encoded = match client.block_header(id.clone()) {
|
||||
Some(encoded) => encoded,
|
||||
None => return future::ok(None).boxed(),
|
||||
None => return future::err(errors::unknown_block()).boxed(),
|
||||
};
|
||||
|
||||
future::ok(Some(RichHeader {
|
||||
future::ok(RichHeader {
|
||||
inner: Header {
|
||||
hash: Some(encoded.hash().into()),
|
||||
size: Some(encoded.rlp().as_raw().len().into()),
|
||||
@ -431,7 +431,7 @@ impl<C, M, S: ?Sized, U> Parity for ParityClient<C, M, S, U> where
|
||||
extra_data: Bytes::new(encoded.extra_data()),
|
||||
},
|
||||
extra_info: client.block_extra_info(id).expect(EXTRA_INFO_PROOF),
|
||||
})).boxed()
|
||||
}).boxed()
|
||||
}
|
||||
|
||||
fn ipfs_cid(&self, content: Bytes) -> Result<String, Error> {
|
||||
|
@ -41,6 +41,7 @@ impl TestSnapshotService {
|
||||
|
||||
impl SnapshotService for TestSnapshotService {
|
||||
fn manifest(&self) -> Option<ManifestData> { None }
|
||||
fn min_supported_version(&self) -> Option<u64> { None }
|
||||
fn chunk(&self, _hash: H256) -> Option<Bytes> { None }
|
||||
fn status(&self) -> RestorationStatus { self.status.lock().clone() }
|
||||
fn begin_restore(&self, _manifest: ManifestData) { }
|
||||
|
@ -202,7 +202,7 @@ build_rpc_trait! {
|
||||
/// Get block header.
|
||||
/// Same as `eth_getBlockByNumber` but without uncles and transactions.
|
||||
#[rpc(async, name = "parity_getBlockHeaderByNumber")]
|
||||
fn block_header(&self, Trailing<BlockNumber>) -> BoxFuture<Option<RichHeader>, Error>;
|
||||
fn block_header(&self, Trailing<BlockNumber>) -> BoxFuture<RichHeader, Error>;
|
||||
|
||||
/// Get IPFS CIDv0 given protobuf encoded bytes.
|
||||
#[rpc(name = "parity_cidV0")]
|
||||
|
@ -158,8 +158,6 @@ pub const SNAPSHOT_SYNC_PACKET_COUNT: u8 = 0x16;
|
||||
|
||||
const MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD: usize = 3;
|
||||
|
||||
const MIN_SUPPORTED_SNAPSHOT_MANIFEST_VERSION: u64 = 1;
|
||||
|
||||
const WAIT_PEERS_TIMEOUT_SEC: u64 = 5;
|
||||
const STATUS_TIMEOUT_SEC: u64 = 5;
|
||||
const HEADERS_TIMEOUT_SEC: u64 = 15;
|
||||
@ -504,7 +502,7 @@ impl ChainSync {
|
||||
}
|
||||
|
||||
fn maybe_start_snapshot_sync(&mut self, io: &mut SyncIo) {
|
||||
if !self.enable_warp_sync {
|
||||
if !self.enable_warp_sync || io.snapshot_service().min_supported_version().is_none() {
|
||||
return;
|
||||
}
|
||||
if self.state != SyncState::WaitingPeers && self.state != SyncState::Blocks && self.state != SyncState::Waiting {
|
||||
@ -1042,7 +1040,11 @@ impl ChainSync {
|
||||
}
|
||||
Ok(manifest) => manifest,
|
||||
};
|
||||
if manifest.version < MIN_SUPPORTED_SNAPSHOT_MANIFEST_VERSION {
|
||||
|
||||
let is_supported_version = io.snapshot_service().min_supported_version()
|
||||
.map_or(false, |v| manifest.version >= v);
|
||||
|
||||
if !is_supported_version {
|
||||
trace!(target: "sync", "{}: Snapshot manifest version too low: {}", peer_id, manifest.version);
|
||||
io.disable_peer(peer_id);
|
||||
self.continue_sync(io);
|
||||
|
@ -71,6 +71,10 @@ impl SnapshotService for TestSnapshotService {
|
||||
self.manifest.as_ref().cloned()
|
||||
}
|
||||
|
||||
fn min_supported_version(&self) -> Option<u64> {
|
||||
Some(1)
|
||||
}
|
||||
|
||||
fn chunk(&self, hash: H256) -> Option<Bytes> {
|
||||
self.chunks.get(&hash).cloned()
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user