fixed master (#6465)

* fixed master

* Revert "Merge pull request #6370 from paritytech/light-poa"

This reverts commit 3c60f99def, reversing
changes made to b731ccea18.
This commit is contained in:
Marek Kotewicz 2017-09-05 14:53:09 +02:00 committed by Arkadiy Paronyan
parent e5bbabb2ba
commit 899538ae25
41 changed files with 265 additions and 1269 deletions

View File

@ -1,74 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Trait for fetching chain data.
use std::sync::Arc;
use ethcore::encoded;
use ethcore::engines::{Engine, StateDependentProof};
use ethcore::header::Header;
use ethcore::receipt::Receipt;
use futures::future::IntoFuture;
use bigint::hash::H256;
/// Provides full chain data.
pub trait ChainDataFetcher: Send + Sync + 'static {
/// Error type when data unavailable.
type Error: ::std::fmt::Debug;
/// Future for fetching block body.
type Body: IntoFuture<Item=encoded::Block, Error=Self::Error>;
/// Future for fetching block receipts.
type Receipts: IntoFuture<Item=Vec<Receipt>, Error=Self::Error>;
/// Future for fetching epoch transition
type Transition: IntoFuture<Item=Vec<u8>, Error=Self::Error>;
/// Fetch a block body.
fn block_body(&self, header: &Header) -> Self::Body;
/// Fetch block receipts.
fn block_receipts(&self, header: &Header) -> Self::Receipts;
/// Fetch epoch transition proof at given header.
fn epoch_transition(&self, hash: H256, engine: Arc<Engine>, checker: Arc<StateDependentProof>) -> Self::Transition;
}
/// Fetcher implementation which cannot fetch anything.
pub struct Unavailable;
/// Create a fetcher which has all data unavailable.
pub fn unavailable() -> Unavailable { Unavailable }
impl ChainDataFetcher for Unavailable {
type Error = &'static str;
type Body = Result<encoded::Block, &'static str>;
type Receipts = Result<Vec<Receipt>, &'static str>;
type Transition = Result<Vec<u8>, &'static str>;
fn block_body(&self, _header: &Header) -> Self::Body {
Err("fetching block bodies unavailable")
}
fn block_receipts(&self, _header: &Header) -> Self::Receipts {
Err("fetching block receipts unavailable")
}
fn epoch_transition(&self, _h: H256, _e: Arc<Engine>, _check: Arc<StateDependentProof>) -> Self::Transition {
Err("fetching epoch transition proofs unavailable")
}
}

View File

@ -18,12 +18,11 @@
//! //!
//! Unlike a full node's `BlockChain` this doesn't store much in the database. //! Unlike a full node's `BlockChain` this doesn't store much in the database.
//! It stores candidates for the last 2048-4096 blocks as well as CHT roots for //! It stores candidates for the last 2048-4096 blocks as well as CHT roots for
//! historical blocks all the way to the genesis. If the engine makes use //! historical blocks all the way to the genesis.
//! of epoch transitions, those are stored as well.
//! //!
//! This is separate from the `BlockChain` for two reasons: //! This is separate from the `BlockChain` for two reasons:
//! - It stores only headers (and a pruned subset of them) //! - It stores only headers (and a pruned subset of them)
//! - To allow for flexibility in the database layout.. //! - To allow for flexibility in the database layout once that's incorporated.
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::sync::Arc; use std::sync::Arc;
@ -31,20 +30,15 @@ use std::sync::Arc;
use cht; use cht;
use ethcore::block_status::BlockStatus; use ethcore::block_status::BlockStatus;
use ethcore::error::{BlockImportError, BlockError}; use ethcore::error::BlockError;
use ethcore::encoded; use ethcore::encoded;
use ethcore::header::Header; use ethcore::header::Header;
use ethcore::ids::BlockId; use ethcore::ids::BlockId;
use ethcore::spec::Spec;
use ethcore::engines::epoch::{
Transition as EpochTransition,
PendingTransition as PendingEpochTransition
};
use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp, UntrustedRlp}; use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp, UntrustedRlp};
use heapsize::HeapSizeOf; use heapsize::HeapSizeOf;
use bigint::prelude::U256; use bigint::prelude::U256;
use bigint::hash::{H256, H256FastMap, H264}; use bigint::hash::H256;
use util::kvdb::{DBTransaction, KeyValueDB}; use util::kvdb::{DBTransaction, KeyValueDB};
use cache::Cache; use cache::Cache;
@ -60,9 +54,6 @@ const HISTORY: u64 = 2048;
/// The best block key. Maps to an RLP list: [best_era, last_era] /// The best block key. Maps to an RLP list: [best_era, last_era]
const CURRENT_KEY: &'static [u8] = &*b"best_and_latest"; const CURRENT_KEY: &'static [u8] = &*b"best_and_latest";
/// Key storing the last canonical epoch transition.
const LAST_CANONICAL_TRANSITION: &'static [u8] = &*b"canonical_transition";
/// Information about a block. /// Information about a block.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct BlockDescriptor { pub struct BlockDescriptor {
@ -110,6 +101,7 @@ impl Encodable for Entry {
impl Decodable for Entry { impl Decodable for Entry {
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> { fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
let mut candidates = SmallVec::<[Candidate; 3]>::new(); let mut candidates = SmallVec::<[Candidate; 3]>::new();
for item in rlp.iter() { for item in rlp.iter() {
@ -139,42 +131,6 @@ fn era_key(number: u64) -> String {
format!("candidates_{}", number) format!("candidates_{}", number)
} }
fn pending_transition_key(block_hash: H256) -> H264 {
const LEADING: u8 = 1;
let mut key = H264::default();
key[0] = LEADING;
key.0[1..].copy_from_slice(&block_hash.0[..]);
key
}
fn transition_key(block_hash: H256) -> H264 {
const LEADING: u8 = 2;
let mut key = H264::default();
key[0] = LEADING;
key.0[1..].copy_from_slice(&block_hash.0[..]);
key
}
// encode last canonical transition entry: header and proof.
fn encode_canonical_transition(header: &Header, proof: &[u8]) -> Vec<u8> {
let mut stream = RlpStream::new_list(2);
stream.append(header).append(&proof);
stream.out()
}
// decode last canonical transition entry.
fn decode_canonical_transition(t: &[u8]) -> Result<(Header, &[u8]), DecoderError> {
let rlp = UntrustedRlp::new(t);
Ok((rlp.val_at(0)?, rlp.at(1)?.data()?))
}
/// Pending changes from `insert` to be applied after the database write has finished. /// Pending changes from `insert` to be applied after the database write has finished.
pub struct PendingChanges { pub struct PendingChanges {
best_block: Option<BlockDescriptor>, // new best block. best_block: Option<BlockDescriptor>, // new best block.
@ -185,7 +141,6 @@ pub struct HeaderChain {
genesis_header: encoded::Header, // special-case the genesis. genesis_header: encoded::Header, // special-case the genesis.
candidates: RwLock<BTreeMap<u64, Entry>>, candidates: RwLock<BTreeMap<u64, Entry>>,
best_block: RwLock<BlockDescriptor>, best_block: RwLock<BlockDescriptor>,
live_epoch_proofs: RwLock<H256FastMap<EpochTransition>>,
db: Arc<KeyValueDB>, db: Arc<KeyValueDB>,
col: Option<u32>, col: Option<u32>,
cache: Arc<Mutex<Cache>>, cache: Arc<Mutex<Cache>>,
@ -193,16 +148,8 @@ pub struct HeaderChain {
impl HeaderChain { impl HeaderChain {
/// Create a new header chain given this genesis block and database to read from. /// Create a new header chain given this genesis block and database to read from.
pub fn new( pub fn new(db: Arc<KeyValueDB>, col: Option<u32>, genesis: &[u8], cache: Arc<Mutex<Cache>>) -> Result<Self, String> {
db: Arc<KeyValueDB>, use ethcore::views::HeaderView;
col: Option<u32>,
spec: &Spec,
cache: Arc<Mutex<Cache>>,
) -> Result<Self, String> {
let mut live_epoch_proofs = ::std::collections::HashMap::default();
let genesis = ::rlp::encode(&spec.genesis_header()).into_vec();
let decoded_header = spec.genesis_header();
let chain = if let Some(current) = db.get(col, CURRENT_KEY)? { let chain = if let Some(current) = db.get(col, CURRENT_KEY)? {
let (best_number, highest_number) = { let (best_number, highest_number) = {
@ -213,24 +160,12 @@ impl HeaderChain {
let mut cur_number = highest_number; let mut cur_number = highest_number;
let mut candidates = BTreeMap::new(); let mut candidates = BTreeMap::new();
// load all era entries, referenced headers within them, // load all era entries and referenced headers within them.
// and live epoch proofs.
while let Some(entry) = db.get(col, era_key(cur_number).as_bytes())? { while let Some(entry) = db.get(col, era_key(cur_number).as_bytes())? {
let entry: Entry = ::rlp::decode(&entry); let entry: Entry = ::rlp::decode(&entry);
trace!(target: "chain", "loaded header chain entry for era {} with {} candidates", trace!(target: "chain", "loaded header chain entry for era {} with {} candidates",
cur_number, entry.candidates.len()); cur_number, entry.candidates.len());
for c in &entry.candidates {
let key = transition_key(c.hash);
if let Some(proof) = db.get(col, &*key)? {
live_epoch_proofs.insert(c.hash, EpochTransition {
block_hash: c.hash,
block_number: cur_number,
proof: proof.into_vec(),
});
}
}
candidates.insert(cur_number, entry); candidates.insert(cur_number, entry);
cur_number -= 1; cur_number -= 1;
@ -252,42 +187,29 @@ impl HeaderChain {
}; };
HeaderChain { HeaderChain {
genesis_header: encoded::Header::new(genesis), genesis_header: encoded::Header::new(genesis.to_owned()),
best_block: RwLock::new(best_block), best_block: RwLock::new(best_block),
candidates: RwLock::new(candidates), candidates: RwLock::new(candidates),
live_epoch_proofs: RwLock::new(live_epoch_proofs),
db: db, db: db,
col: col, col: col,
cache: cache, cache: cache,
} }
} else { } else {
let g_view = HeaderView::new(genesis);
HeaderChain { HeaderChain {
genesis_header: encoded::Header::new(genesis), genesis_header: encoded::Header::new(genesis.to_owned()),
best_block: RwLock::new(BlockDescriptor { best_block: RwLock::new(BlockDescriptor {
hash: decoded_header.hash(), hash: g_view.hash(),
number: 0, number: 0,
total_difficulty: *decoded_header.difficulty(), total_difficulty: g_view.difficulty(),
}), }),
candidates: RwLock::new(BTreeMap::new()), candidates: RwLock::new(BTreeMap::new()),
live_epoch_proofs: RwLock::new(live_epoch_proofs),
db: db, db: db,
col: col, col: col,
cache: cache, cache: cache,
} }
}; };
// instantiate genesis epoch data if it doesn't exist.
if let None = chain.db.get(col, LAST_CANONICAL_TRANSITION)? {
let genesis_data = spec.genesis_epoch_data()?;
{
let mut batch = chain.db.transaction();
let data = encode_canonical_transition(&decoded_header, &genesis_data);
batch.put_vec(col, LAST_CANONICAL_TRANSITION, data);
chain.db.write(batch)?;
}
}
Ok(chain) Ok(chain)
} }
@ -296,24 +218,10 @@ impl HeaderChain {
/// This blindly trusts that the data given to it is sensible. /// This blindly trusts that the data given to it is sensible.
/// Returns a set of pending changes to be applied with `apply_pending` /// Returns a set of pending changes to be applied with `apply_pending`
/// before the next call to insert and after the transaction has been written. /// before the next call to insert and after the transaction has been written.
/// pub fn insert(&self, transaction: &mut DBTransaction, header: Header) -> Result<PendingChanges, BlockError> {
/// If the block is an epoch transition, provide the transition along with
/// the header.
pub fn insert(
&self,
transaction: &mut DBTransaction,
header: Header,
transition_proof: Option<Vec<u8>>,
) -> Result<PendingChanges, BlockImportError> {
let hash = header.hash(); let hash = header.hash();
let number = header.number(); let number = header.number();
let parent_hash = *header.parent_hash(); let parent_hash = *header.parent_hash();
let transition = transition_proof.map(|proof| EpochTransition {
block_hash: hash,
block_number: number,
proof: proof,
});
let mut pending = PendingChanges { let mut pending = PendingChanges {
best_block: None, best_block: None,
}; };
@ -329,8 +237,7 @@ impl HeaderChain {
candidates.get(&(number - 1)) candidates.get(&(number - 1))
.and_then(|entry| entry.candidates.iter().find(|c| c.hash == parent_hash)) .and_then(|entry| entry.candidates.iter().find(|c| c.hash == parent_hash))
.map(|c| c.total_difficulty) .map(|c| c.total_difficulty)
.ok_or_else(|| BlockError::UnknownParent(parent_hash)) .ok_or_else(|| BlockError::UnknownParent(parent_hash))?
.map_err(BlockImportError::Block)?
}; };
let total_difficulty = parent_td + *header.difficulty(); let total_difficulty = parent_td + *header.difficulty();
@ -355,13 +262,8 @@ impl HeaderChain {
transaction.put(self.col, era_key(number).as_bytes(), &::rlp::encode(&*cur_era)) transaction.put(self.col, era_key(number).as_bytes(), &::rlp::encode(&*cur_era))
} }
if let Some(transition) = transition { let raw = ::rlp::encode(&header);
transaction.put(self.col, &*transition_key(hash), &transition.proof); transaction.put(self.col, &hash[..], &*raw);
self.live_epoch_proofs.write().insert(hash, transition);
}
let raw = header.encoded().into_inner();
transaction.put_vec(self.col, &hash[..], raw);
let (best_num, is_new_best) = { let (best_num, is_new_best) = {
let cur_best = self.best_block.read(); let cur_best = self.best_block.read();
@ -414,10 +316,8 @@ impl HeaderChain {
let cht_num = cht::block_to_cht_number(earliest_era) let cht_num = cht::block_to_cht_number(earliest_era)
.expect("fails only for number == 0; genesis never imported; qed"); .expect("fails only for number == 0; genesis never imported; qed");
let mut last_canonical_transition = None;
let cht_root = { let cht_root = {
let mut i = earliest_era; let mut i = earliest_era;
let mut live_epoch_proofs = self.live_epoch_proofs.write();
// iterable function which removes the candidates as it goes // iterable function which removes the candidates as it goes
// along. this will only be called until the CHT is complete. // along. this will only be called until the CHT is complete.
@ -428,25 +328,7 @@ impl HeaderChain {
i += 1; i += 1;
// prune old blocks and epoch proofs.
for ancient in &era_entry.candidates { for ancient in &era_entry.candidates {
let maybe_transition = live_epoch_proofs.remove(&ancient.hash);
if let Some(epoch_transition) = maybe_transition {
transaction.delete(self.col, &*transition_key(ancient.hash));
if ancient.hash == era_entry.canonical_hash {
last_canonical_transition = match self.db.get(self.col, &ancient.hash) {
Err(e) => {
warn!(target: "chain", "Error reading from DB: {}\n
", e);
None
}
Ok(None) => panic!("stored candidates always have corresponding headers; qed"),
Ok(Some(header)) => Some((epoch_transition, ::rlp::decode(&header))),
};
}
}
transaction.delete(self.col, &ancient.hash); transaction.delete(self.col, &ancient.hash);
} }
@ -460,12 +342,6 @@ impl HeaderChain {
// write the CHT root to the database. // write the CHT root to the database.
debug!(target: "chain", "Produced CHT {} root: {:?}", cht_num, cht_root); debug!(target: "chain", "Produced CHT {} root: {:?}", cht_num, cht_root);
transaction.put(self.col, cht_key(cht_num).as_bytes(), &::rlp::encode(&cht_root)); transaction.put(self.col, cht_key(cht_num).as_bytes(), &::rlp::encode(&cht_root));
// update the last canonical transition proof
if let Some((epoch_transition, header)) = last_canonical_transition {
let x = encode_canonical_transition(&header, &epoch_transition.proof);
transaction.put_vec(self.col, LAST_CANONICAL_TRANSITION, x);
}
} }
} }
@ -491,7 +367,7 @@ impl HeaderChain {
/// will be returned. /// will be returned.
pub fn block_hash(&self, id: BlockId) -> Option<H256> { pub fn block_hash(&self, id: BlockId) -> Option<H256> {
match id { match id {
BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_hash()), BlockId::Earliest => Some(self.genesis_hash()),
BlockId::Hash(hash) => Some(hash), BlockId::Hash(hash) => Some(hash),
BlockId::Number(num) => { BlockId::Number(num) => {
if self.best_block.read().number < num { return None } if self.best_block.read().number < num { return None }
@ -642,56 +518,6 @@ impl HeaderChain {
false => BlockStatus::Unknown, false => BlockStatus::Unknown,
} }
} }
/// Insert a pending transition.
pub fn insert_pending_transition(&self, batch: &mut DBTransaction, hash: H256, t: PendingEpochTransition) {
let key = pending_transition_key(hash);
batch.put(self.col, &*key, &*::rlp::encode(&t));
}
/// Get pending transition for a specific block hash.
pub fn pending_transition(&self, hash: H256) -> Option<PendingEpochTransition> {
let key = pending_transition_key(hash);
match self.db.get(self.col, &*key) {
Ok(val) => val.map(|x| ::rlp::decode(&x)),
Err(e) => {
warn!(target: "chain", "Error reading from database: {}", e);
None
}
}
}
/// Get the transition to the epoch the given parent hash is part of
/// or transitions to.
/// This will give the epoch that any children of this parent belong to.
///
/// The header corresponding the the parent hash must be stored already.
pub fn epoch_transition_for(&self, parent_hash: H256) -> Option<(Header, Vec<u8>)> {
// slow path: loop back block by block
let live_proofs = self.live_epoch_proofs.read();
for hdr in self.ancestry_iter(BlockId::Hash(parent_hash)) {
if let Some(transition) = live_proofs.get(&hdr.hash()).cloned() {
return Some((hdr.decode(), transition.proof))
}
}
// any blocks left must be descendants of the last canonical transition block.
match self.db.get(self.col, LAST_CANONICAL_TRANSITION) {
Ok(x) => {
let x = x.expect("last canonical transition always instantiated; qed");
let (hdr, proof) = decode_canonical_transition(&x)
.expect("last canonical transition always encoded correctly; qed");
Some((hdr, proof.to_vec()))
}
Err(e) => {
warn!("Error reading from DB: {}", e);
None
}
}
}
} }
impl HeapSizeOf for HeaderChain { impl HeapSizeOf for HeaderChain {
@ -744,7 +570,7 @@ mod tests {
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap(); let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache).unwrap();
let mut parent_hash = genesis_header.hash(); let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp(); let mut rolling_timestamp = genesis_header.timestamp();
@ -757,7 +583,7 @@ mod tests {
parent_hash = header.hash(); parent_hash = header.hash();
let mut tx = db.transaction(); let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header, None).unwrap(); let pending = chain.insert(&mut tx, header).unwrap();
db.write(tx).unwrap(); db.write(tx).unwrap();
chain.apply_pending(pending); chain.apply_pending(pending);
@ -777,7 +603,7 @@ mod tests {
let db = make_db(); let db = make_db();
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap(); let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache).unwrap();
let mut parent_hash = genesis_header.hash(); let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp(); let mut rolling_timestamp = genesis_header.timestamp();
@ -790,7 +616,7 @@ mod tests {
parent_hash = header.hash(); parent_hash = header.hash();
let mut tx = db.transaction(); let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header, None).unwrap(); let pending = chain.insert(&mut tx, header).unwrap();
db.write(tx).unwrap(); db.write(tx).unwrap();
chain.apply_pending(pending); chain.apply_pending(pending);
@ -809,7 +635,7 @@ mod tests {
parent_hash = header.hash(); parent_hash = header.hash();
let mut tx = db.transaction(); let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header, None).unwrap(); let pending = chain.insert(&mut tx, header).unwrap();
db.write(tx).unwrap(); db.write(tx).unwrap();
chain.apply_pending(pending); chain.apply_pending(pending);
@ -833,7 +659,7 @@ mod tests {
parent_hash = header.hash(); parent_hash = header.hash();
let mut tx = db.transaction(); let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header, None).unwrap(); let pending = chain.insert(&mut tx, header).unwrap();
db.write(tx).unwrap(); db.write(tx).unwrap();
chain.apply_pending(pending); chain.apply_pending(pending);
@ -856,10 +682,12 @@ mod tests {
#[test] #[test]
fn earliest_is_latest() { fn earliest_is_latest() {
let spec = Spec::new_test(); let spec = Spec::new_test();
let genesis_header = spec.genesis_header();
let db = make_db(); let db = make_db();
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap(); let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache).unwrap();
assert!(chain.block_header(BlockId::Earliest).is_some()); assert!(chain.block_header(BlockId::Earliest).is_some());
assert!(chain.block_header(BlockId::Latest).is_some()); assert!(chain.block_header(BlockId::Latest).is_some());
@ -874,7 +702,7 @@ mod tests {
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
{ {
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap(); let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap();
let mut parent_hash = genesis_header.hash(); let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp(); let mut rolling_timestamp = genesis_header.timestamp();
for i in 1..10000 { for i in 1..10000 {
@ -886,7 +714,7 @@ mod tests {
parent_hash = header.hash(); parent_hash = header.hash();
let mut tx = db.transaction(); let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header, None).unwrap(); let pending = chain.insert(&mut tx, header).unwrap();
db.write(tx).unwrap(); db.write(tx).unwrap();
chain.apply_pending(pending); chain.apply_pending(pending);
@ -894,7 +722,7 @@ mod tests {
} }
} }
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap(); let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap();
assert!(chain.block_header(BlockId::Number(10)).is_none()); assert!(chain.block_header(BlockId::Number(10)).is_none());
assert!(chain.block_header(BlockId::Number(9000)).is_some()); assert!(chain.block_header(BlockId::Number(9000)).is_some());
assert!(chain.cht_root(2).is_some()); assert!(chain.cht_root(2).is_some());
@ -910,7 +738,7 @@ mod tests {
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
{ {
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap(); let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap();
let mut parent_hash = genesis_header.hash(); let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp(); let mut rolling_timestamp = genesis_header.timestamp();
@ -924,7 +752,7 @@ mod tests {
parent_hash = header.hash(); parent_hash = header.hash();
let mut tx = db.transaction(); let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header, None).unwrap(); let pending = chain.insert(&mut tx, header).unwrap();
db.write(tx).unwrap(); db.write(tx).unwrap();
chain.apply_pending(pending); chain.apply_pending(pending);
@ -941,7 +769,7 @@ mod tests {
parent_hash = header.hash(); parent_hash = header.hash();
let mut tx = db.transaction(); let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header, None).unwrap(); let pending = chain.insert(&mut tx, header).unwrap();
db.write(tx).unwrap(); db.write(tx).unwrap();
chain.apply_pending(pending); chain.apply_pending(pending);
@ -952,7 +780,7 @@ mod tests {
} }
// after restoration, non-canonical eras should still be loaded. // after restoration, non-canonical eras should still be loaded.
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap(); let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap();
assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10); assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10);
assert!(chain.candidates.read().get(&100).is_some()) assert!(chain.candidates.read().get(&100).is_some())
} }
@ -964,76 +792,10 @@ mod tests {
let db = make_db(); let db = make_db();
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap(); let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap();
assert!(chain.block_header(BlockId::Earliest).is_some()); assert!(chain.block_header(BlockId::Earliest).is_some());
assert!(chain.block_header(BlockId::Number(0)).is_some()); assert!(chain.block_header(BlockId::Number(0)).is_some());
assert!(chain.block_header(BlockId::Hash(genesis_header.hash())).is_some()); assert!(chain.block_header(BlockId::Hash(genesis_header.hash())).is_some());
} }
#[test]
fn epoch_transitions_available_after_cht() {
let spec = Spec::new_test();
let genesis_header = spec.genesis_header();
let db = make_db();
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();
for i in 1..6 {
let mut header = Header::new();
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
header.set_difficulty(*genesis_header.difficulty() * i.into());
parent_hash = header.hash();
let mut tx = db.transaction();
let epoch_proof = if i == 3 {
Some(vec![1, 2, 3, 4])
} else {
None
};
let pending = chain.insert(&mut tx, header, epoch_proof).unwrap();
db.write(tx).unwrap();
chain.apply_pending(pending);
rolling_timestamp += 10;
}
// these 3 should end up falling back to the genesis epoch proof in DB
for i in 0..3 {
let hash = chain.block_hash(BlockId::Number(i)).unwrap();
assert_eq!(chain.epoch_transition_for(hash).unwrap().1, Vec::<u8>::new());
}
// these are live.
for i in 3..6 {
let hash = chain.block_hash(BlockId::Number(i)).unwrap();
assert_eq!(chain.epoch_transition_for(hash).unwrap().1, vec![1, 2, 3, 4]);
}
for i in 6..10000 {
let mut header = Header::new();
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
header.set_difficulty(*genesis_header.difficulty() * i.into());
parent_hash = header.hash();
let mut tx = db.transaction();
let pending = chain.insert(&mut tx, header, None).unwrap();
db.write(tx).unwrap();
chain.apply_pending(pending);
rolling_timestamp += 10;
}
// no live blocks have associated epoch proofs -- make sure we aren't leaking memory.
assert!(chain.live_epoch_proofs.read().is_empty());
assert_eq!(chain.epoch_transition_for(parent_hash).unwrap().1, vec![1, 2, 3, 4]);
}
} }

View File

@ -19,11 +19,11 @@
use std::sync::{Weak, Arc}; use std::sync::{Weak, Arc};
use ethcore::block_status::BlockStatus; use ethcore::block_status::BlockStatus;
use ethcore::client::{TransactionImportResult, ClientReport, EnvInfo}; use ethcore::client::{ClientReport, EnvInfo};
use ethcore::engines::{epoch, Engine, EpochChange, EpochTransition, Proof, Unsure}; use ethcore::engines::Engine;
use ethcore::error::{TransactionError, BlockImportError, Error as EthcoreError}; use ethcore::error::BlockImportError;
use ethcore::ids::BlockId; use ethcore::ids::BlockId;
use ethcore::header::{BlockNumber, Header}; use ethcore::header::Header;
use ethcore::verification::queue::{self, HeaderQueue}; use ethcore::verification::queue::{self, HeaderQueue};
use ethcore::blockchain_info::BlockChainInfo; use ethcore::blockchain_info::BlockChainInfo;
use ethcore::spec::Spec; use ethcore::spec::Spec;
@ -33,12 +33,9 @@ use io::IoChannel;
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use bigint::prelude::U256; use bigint::prelude::U256;
use bigint::hash::H256; use bigint::hash::H256;
use futures::{IntoFuture, Future};
use util::Address;
use util::kvdb::{KeyValueDB, CompactionProfile}; use util::kvdb::{KeyValueDB, CompactionProfile};
use self::fetch::ChainDataFetcher;
use self::header_chain::{AncestryIter, HeaderChain}; use self::header_chain::{AncestryIter, HeaderChain};
use cache::Cache; use cache::Cache;
@ -48,8 +45,6 @@ pub use self::service::Service;
mod header_chain; mod header_chain;
mod service; mod service;
pub mod fetch;
/// Configuration for the light client. /// Configuration for the light client.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct Config { pub struct Config {
@ -85,9 +80,6 @@ impl Default for Config {
/// Trait for interacting with the header chain abstractly. /// Trait for interacting with the header chain abstractly.
pub trait LightChainClient: Send + Sync { pub trait LightChainClient: Send + Sync {
/// Adds a new `LightChainNotify` listener.
fn add_listener(&self, listener: Weak<LightChainNotify>);
/// Get chain info. /// Get chain info.
fn chain_info(&self) -> BlockChainInfo; fn chain_info(&self) -> BlockChainInfo;
@ -136,7 +128,7 @@ pub trait LightChainClient: Send + Sync {
fn cht_root(&self, i: usize) -> Option<H256>; fn cht_root(&self, i: usize) -> Option<H256>;
/// Get the EIP-86 transition block number. /// Get the EIP-86 transition block number.
fn eip86_transition(&self) -> BlockNumber; fn eip86_transition(&self) -> u64;
/// Get a report of import activity since the last call. /// Get a report of import activity since the last call.
fn report(&self) -> ClientReport; fn report(&self) -> ClientReport;
@ -164,7 +156,7 @@ impl<T: LightChainClient> AsLightClient for T {
} }
/// Light client implementation. /// Light client implementation.
pub struct Client<T> { pub struct Client {
queue: HeaderQueue, queue: HeaderQueue,
engine: Arc<Engine>, engine: Arc<Engine>,
chain: HeaderChain, chain: HeaderChain,
@ -172,30 +164,22 @@ pub struct Client<T> {
import_lock: Mutex<()>, import_lock: Mutex<()>,
db: Arc<KeyValueDB>, db: Arc<KeyValueDB>,
listeners: RwLock<Vec<Weak<LightChainNotify>>>, listeners: RwLock<Vec<Weak<LightChainNotify>>>,
fetcher: T,
verify_full: bool, verify_full: bool,
} }
impl<T: ChainDataFetcher> Client<T> { impl Client {
/// Create a new `Client`. /// Create a new `Client`.
pub fn new( pub fn new(config: Config, db: Arc<KeyValueDB>, chain_col: Option<u32>, spec: &Spec, io_channel: IoChannel<ClientIoMessage>, cache: Arc<Mutex<Cache>>) -> Result<Self, String> {
config: Config, let gh = ::rlp::encode(&spec.genesis_header());
db: Arc<KeyValueDB>,
chain_col: Option<u32>,
spec: &Spec,
fetcher: T,
io_channel: IoChannel<ClientIoMessage>,
cache: Arc<Mutex<Cache>>
) -> Result<Self, String> {
Ok(Client { Ok(Client {
queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, config.check_seal), queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, config.check_seal),
engine: spec.engine.clone(), engine: spec.engine.clone(),
chain: HeaderChain::new(db.clone(), chain_col, &spec, cache)?, chain: HeaderChain::new(db.clone(), chain_col, &gh, cache)?,
report: RwLock::new(ClientReport::default()), report: RwLock::new(ClientReport::default()),
import_lock: Mutex::new(()), import_lock: Mutex::new(()),
db: db, db: db,
listeners: RwLock::new(vec![]), listeners: RwLock::new(vec![]),
fetcher: fetcher,
verify_full: config.verify_full, verify_full: config.verify_full,
}) })
} }
@ -207,24 +191,10 @@ impl<T: ChainDataFetcher> Client<T> {
/// Create a new `Client` backed purely in-memory. /// Create a new `Client` backed purely in-memory.
/// This will ignore all database options in the configuration. /// This will ignore all database options in the configuration.
pub fn in_memory( pub fn in_memory(config: Config, spec: &Spec, io_channel: IoChannel<ClientIoMessage>, cache: Arc<Mutex<Cache>>) -> Self {
config: Config,
spec: &Spec,
fetcher: T,
io_channel: IoChannel<ClientIoMessage>,
cache: Arc<Mutex<Cache>>
) -> Self {
let db = ::util::kvdb::in_memory(0); let db = ::util::kvdb::in_memory(0);
Client::new( Client::new(config, Arc::new(db), None, spec, io_channel, cache).expect("New DB creation infallible; qed")
config,
Arc::new(db),
None,
spec,
fetcher,
io_channel,
cache
).expect("New DB creation infallible; qed")
} }
/// Import a header to the queue for additional verification. /// Import a header to the queue for additional verification.
@ -323,33 +293,19 @@ impl<T: ChainDataFetcher> Client<T> {
continue continue
} }
let write_proof_result = match self.check_epoch_signal(&verified_header) { // TODO: `epoch_end_signal`, `is_epoch_end`.
Ok(Some(proof)) => self.write_pending_proof(&verified_header, proof), // proofs we get from the network would be _complete_, whereas we need
Ok(None) => Ok(()), // _incomplete_ signals
Err(e) =>
panic!("Unable to fetch epoch transition proof: {:?}", e),
};
if let Err(e) = write_proof_result {
warn!(target: "client", "Error writing pending transition proof to DB: {:?} \
The node may not be able to synchronize further.", e);
}
let epoch_proof = self.engine.is_epoch_end(
&verified_header,
&|h| self.chain.block_header(BlockId::Hash(h)).map(|hdr| hdr.decode()),
&|h| self.chain.pending_transition(h),
);
let mut tx = self.db.transaction(); let mut tx = self.db.transaction();
let pending = match self.chain.insert(&mut tx, verified_header, epoch_proof) { let pending = match self.chain.insert(&mut tx, verified_header) {
Ok(pending) => { Ok(pending) => {
good.push(hash); good.push(hash);
self.report.write().blocks_imported += 1; self.report.write().blocks_imported += 1;
pending pending
} }
Err(e) => { Err(e) => {
debug!(target: "client", "Error importing header {:?}: {:?}", (num, hash), e); debug!(target: "client", "Error importing header {:?}: {}", (num, hash), e);
bad.push(hash); bad.push(hash);
continue; continue;
} }
@ -465,76 +421,9 @@ impl<T: ChainDataFetcher> Client<T> {
true true
} }
fn check_epoch_signal(&self, verified_header: &Header) -> Result<Option<Proof>, T::Error> {
let (mut block, mut receipts) = (None, None);
// First, check without providing auxiliary data.
match self.engine.signals_epoch_end(verified_header, None, None) {
EpochChange::No => return Ok(None),
EpochChange::Yes(proof) => return Ok(Some(proof)),
EpochChange::Unsure(unsure) => {
let (b, r) = match unsure {
Unsure::NeedsBody =>
(Some(self.fetcher.block_body(verified_header)), None),
Unsure::NeedsReceipts =>
(None, Some(self.fetcher.block_receipts(verified_header))),
Unsure::NeedsBoth => (
Some(self.fetcher.block_body(verified_header)),
Some(self.fetcher.block_receipts(verified_header)),
),
};
if let Some(b) = b {
block = Some(b.into_future().wait()?.into_inner());
}
if let Some(r) = r {
receipts = Some(r.into_future().wait()?);
}
}
}
let block = block.as_ref().map(|x| &x[..]);
let receipts = receipts.as_ref().map(|x| &x[..]);
// Check again now that required data has been fetched.
match self.engine.signals_epoch_end(verified_header, block, receipts) {
EpochChange::No => return Ok(None),
EpochChange::Yes(proof) => return Ok(Some(proof)),
EpochChange::Unsure(_) =>
panic!("Detected faulty engine implementation: requests additional \
data to check epoch end signal when everything necessary provided"),
}
}
// attempts to fetch the epoch proof from the network until successful.
fn write_pending_proof(&self, header: &Header, proof: Proof) -> Result<(), T::Error> {
let proof = match proof {
Proof::Known(known) => known,
Proof::WithState(state_dependent) => {
self.fetcher.epoch_transition(
header.hash(),
self.engine.clone(),
state_dependent
).into_future().wait()?
}
};
let mut batch = self.db.transaction();
self.chain.insert_pending_transition(&mut batch, header.hash(), epoch::PendingTransition {
proof: proof,
});
self.db.write_buffered(batch);
Ok(())
}
} }
impl<T: ChainDataFetcher> LightChainClient for Client<T> { impl LightChainClient for Client {
fn add_listener(&self, listener: Weak<LightChainNotify>) {
Client::add_listener(self, listener)
}
fn chain_info(&self) -> BlockChainInfo { Client::chain_info(self) } fn chain_info(&self) -> BlockChainInfo { Client::chain_info(self) }
fn queue_header(&self, header: Header) -> Result<H256, BlockImportError> { fn queue_header(&self, header: Header) -> Result<H256, BlockImportError> {
@ -593,7 +482,7 @@ impl<T: ChainDataFetcher> LightChainClient for Client<T> {
Client::cht_root(self, i) Client::cht_root(self, i)
} }
fn eip86_transition(&self) -> BlockNumber { fn eip86_transition(&self) -> u64 {
self.engine().params().eip86_transition self.engine().params().eip86_transition
} }
@ -601,38 +490,3 @@ impl<T: ChainDataFetcher> LightChainClient for Client<T> {
Client::report(self) Client::report(self)
} }
} }
impl<T: ChainDataFetcher> ::ethcore::client::EngineClient for Client<T> {
fn update_sealing(&self) { }
fn submit_seal(&self, _block_hash: H256, _seal: Vec<Vec<u8>>) { }
fn broadcast_consensus_message(&self, _message: Vec<u8>) { }
fn epoch_transition_for(&self, parent_hash: H256) -> Option<EpochTransition> {
self.chain.epoch_transition_for(parent_hash).map(|(hdr, proof)| EpochTransition {
block_hash: hdr.hash(),
block_number: hdr.number(),
proof: proof,
})
}
fn chain_info(&self) -> BlockChainInfo {
Client::chain_info(self)
}
fn call_contract(&self, _id: BlockId, _address: Address, _data: Vec<u8>) -> Result<Vec<u8>, String> {
Err("Contract calling not supported by light client".into())
}
fn transact_contract(&self, _address: Address, _data: Vec<u8>)
-> Result<TransactionImportResult, EthcoreError>
{
// TODO: these are only really used for misbehavior reporting.
// no relevant clients will be running light clients, but maybe
// they could be at some point?
Err(TransactionError::LimitReached.into())
}
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
self.block_header(id).map(|hdr| hdr.number())
}
}

View File

@ -30,7 +30,7 @@ use util::kvdb::{Database, DatabaseConfig};
use cache::Cache; use cache::Cache;
use parking_lot::Mutex; use parking_lot::Mutex;
use super::{ChainDataFetcher, Client, Config as ClientConfig}; use super::{Client, Config as ClientConfig};
/// Errors on service initialization. /// Errors on service initialization.
#[derive(Debug)] #[derive(Debug)]
@ -51,14 +51,14 @@ impl fmt::Display for Error {
} }
/// Light client service. /// Light client service.
pub struct Service<T> { pub struct Service {
client: Arc<Client<T>>, client: Arc<Client>,
io_service: IoService<ClientIoMessage>, io_service: IoService<ClientIoMessage>,
} }
impl<T: ChainDataFetcher> Service<T> { impl Service {
/// Start the service: initialize I/O workers and client itself. /// Start the service: initialize I/O workers and client itself.
pub fn start(config: ClientConfig, spec: &Spec, fetcher: T, path: &Path, cache: Arc<Mutex<Cache>>) -> Result<Self, Error> { pub fn start(config: ClientConfig, spec: &Spec, path: &Path, cache: Arc<Mutex<Cache>>) -> Result<Self, Error> {
// initialize database. // initialize database.
let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS); let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS);
@ -81,14 +81,10 @@ impl<T: ChainDataFetcher> Service<T> {
db, db,
db::COL_LIGHT_CHAIN, db::COL_LIGHT_CHAIN,
spec, spec,
fetcher,
io_service.channel(), io_service.channel(),
cache, cache,
).map_err(Error::Database)?); ).map_err(Error::Database)?);
io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?; io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?;
spec.engine.register_client(Arc::downgrade(&client) as _);
Ok(Service { Ok(Service {
client: client, client: client,
io_service: io_service, io_service: io_service,
@ -101,14 +97,14 @@ impl<T: ChainDataFetcher> Service<T> {
} }
/// Get a handle to the client. /// Get a handle to the client.
pub fn client(&self) -> &Arc<Client<T>> { pub fn client(&self) -> &Arc<Client> {
&self.client &self.client
} }
} }
struct ImportBlocks<T>(Arc<Client<T>>); struct ImportBlocks(Arc<Client>);
impl<T: ChainDataFetcher> IoHandler<ClientIoMessage> for ImportBlocks<T> { impl IoHandler<ClientIoMessage> for ImportBlocks {
fn message(&self, _io: &IoContext<ClientIoMessage>, message: &ClientIoMessage) { fn message(&self, _io: &IoContext<ClientIoMessage>, message: &ClientIoMessage) {
if let ClientIoMessage::BlockVerified = *message { if let ClientIoMessage::BlockVerified = *message {
self.0.import_verified(); self.0.import_verified();
@ -124,7 +120,6 @@ mod tests {
use std::sync::Arc; use std::sync::Arc;
use cache::Cache; use cache::Cache;
use client::fetch;
use time::Duration; use time::Duration;
use parking_lot::Mutex; use parking_lot::Mutex;
@ -134,6 +129,6 @@ mod tests {
let temp_path = RandomTempPath::new(); let temp_path = RandomTempPath::new();
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
Service::start(Default::default(), &spec, fetch::unavailable(), temp_path.as_path(), cache).unwrap(); Service::start(Default::default(), &spec, temp_path.as_path(), cache).unwrap();
} }
} }

View File

@ -62,7 +62,6 @@ fn hardcoded_serve_time(kind: Kind) -> u64 {
Kind::Storage => 2_000_000, Kind::Storage => 2_000_000,
Kind::Code => 1_500_000, Kind::Code => 1_500_000,
Kind::Execution => 250, // per gas. Kind::Execution => 250, // per gas.
Kind::Signal => 500_000,
} }
} }

View File

@ -104,8 +104,9 @@ mod packet {
// relay transactions to peers. // relay transactions to peers.
pub const SEND_TRANSACTIONS: u8 = 0x06; pub const SEND_TRANSACTIONS: u8 = 0x06;
// two packets were previously meant to be reserved for epoch proofs. // request and respond with epoch transition proof
// these have since been moved to requests. pub const REQUEST_EPOCH_PROOF: u8 = 0x07;
pub const EPOCH_PROOF: u8 = 0x08;
} }
// timeouts for different kinds of requests. all values are in milliseconds. // timeouts for different kinds of requests. all values are in milliseconds.
@ -123,7 +124,6 @@ mod timeout {
pub const CONTRACT_CODE: i64 = 100; pub const CONTRACT_CODE: i64 = 100;
pub const HEADER_PROOF: i64 = 100; pub const HEADER_PROOF: i64 = 100;
pub const TRANSACTION_PROOF: i64 = 1000; // per gas? pub const TRANSACTION_PROOF: i64 = 1000; // per gas?
pub const EPOCH_SIGNAL: i64 = 200;
} }
/// A request id. /// A request id.
@ -584,6 +584,12 @@ impl LightProtocol {
packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp), packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp),
packet::REQUEST_EPOCH_PROOF | packet::EPOCH_PROOF => {
// ignore these for now, but leave them specified.
debug!(target: "pip", "Ignoring request/response for epoch proof");
Ok(())
}
other => { other => {
Err(Error::UnrecognizedPacket(other)) Err(Error::UnrecognizedPacket(other))
} }
@ -946,7 +952,6 @@ impl LightProtocol {
CompleteRequest::Storage(req) => self.provider.storage_proof(req).map(Response::Storage), CompleteRequest::Storage(req) => self.provider.storage_proof(req).map(Response::Storage),
CompleteRequest::Code(req) => self.provider.contract_code(req).map(Response::Code), CompleteRequest::Code(req) => self.provider.contract_code(req).map(Response::Code),
CompleteRequest::Execution(req) => self.provider.transaction_proof(req).map(Response::Execution), CompleteRequest::Execution(req) => self.provider.transaction_proof(req).map(Response::Execution),
CompleteRequest::Signal(req) => self.provider.epoch_signal(req).map(Response::Signal),
} }
}); });

View File

@ -91,7 +91,6 @@ pub struct CostTable {
code: U256, code: U256,
header_proof: U256, header_proof: U256,
transaction_proof: U256, // cost per gas. transaction_proof: U256, // cost per gas.
epoch_signal: U256,
} }
impl Default for CostTable { impl Default for CostTable {
@ -108,7 +107,6 @@ impl Default for CostTable {
code: 20000.into(), code: 20000.into(),
header_proof: 15000.into(), header_proof: 15000.into(),
transaction_proof: 2.into(), transaction_proof: 2.into(),
epoch_signal: 10000.into(),
} }
} }
} }
@ -123,7 +121,7 @@ impl Encodable for CostTable {
s.append(cost); s.append(cost);
} }
s.begin_list(11).append(&self.base); s.begin_list(10).append(&self.base);
append_cost(s, &self.headers, request::Kind::Headers); append_cost(s, &self.headers, request::Kind::Headers);
append_cost(s, &self.transaction_index, request::Kind::TransactionIndex); append_cost(s, &self.transaction_index, request::Kind::TransactionIndex);
append_cost(s, &self.body, request::Kind::Body); append_cost(s, &self.body, request::Kind::Body);
@ -133,7 +131,6 @@ impl Encodable for CostTable {
append_cost(s, &self.code, request::Kind::Code); append_cost(s, &self.code, request::Kind::Code);
append_cost(s, &self.header_proof, request::Kind::HeaderProof); append_cost(s, &self.header_proof, request::Kind::HeaderProof);
append_cost(s, &self.transaction_proof, request::Kind::Execution); append_cost(s, &self.transaction_proof, request::Kind::Execution);
append_cost(s, &self.epoch_signal, request::Kind::Signal);
} }
} }
@ -150,7 +147,6 @@ impl Decodable for CostTable {
let mut code = None; let mut code = None;
let mut header_proof = None; let mut header_proof = None;
let mut transaction_proof = None; let mut transaction_proof = None;
let mut epoch_signal = None;
for cost_list in rlp.iter().skip(1) { for cost_list in rlp.iter().skip(1) {
let cost = cost_list.val_at(1)?; let cost = cost_list.val_at(1)?;
@ -164,7 +160,6 @@ impl Decodable for CostTable {
request::Kind::Code => code = Some(cost), request::Kind::Code => code = Some(cost),
request::Kind::HeaderProof => header_proof = Some(cost), request::Kind::HeaderProof => header_proof = Some(cost),
request::Kind::Execution => transaction_proof = Some(cost), request::Kind::Execution => transaction_proof = Some(cost),
request::Kind::Signal => epoch_signal = Some(cost),
} }
} }
@ -181,7 +176,6 @@ impl Decodable for CostTable {
code: unwrap_cost(code)?, code: unwrap_cost(code)?,
header_proof: unwrap_cost(header_proof)?, header_proof: unwrap_cost(header_proof)?,
transaction_proof: unwrap_cost(transaction_proof)?, transaction_proof: unwrap_cost(transaction_proof)?,
epoch_signal: unwrap_cost(epoch_signal)?,
}) })
} }
} }
@ -244,7 +238,6 @@ impl FlowParams {
code: cost_for_kind(Kind::Code), code: cost_for_kind(Kind::Code),
header_proof: cost_for_kind(Kind::HeaderProof), header_proof: cost_for_kind(Kind::HeaderProof),
transaction_proof: cost_for_kind(Kind::Execution), transaction_proof: cost_for_kind(Kind::Execution),
epoch_signal: cost_for_kind(Kind::Signal),
}; };
FlowParams { FlowParams {
@ -270,8 +263,7 @@ impl FlowParams {
storage: free_cost.clone(), storage: free_cost.clone(),
code: free_cost.clone(), code: free_cost.clone(),
header_proof: free_cost.clone(), header_proof: free_cost.clone(),
transaction_proof: free_cost.clone(), transaction_proof: free_cost,
epoch_signal: free_cost,
} }
} }
} }
@ -301,7 +293,6 @@ impl FlowParams {
Request::Storage(_) => self.costs.storage, Request::Storage(_) => self.costs.storage,
Request::Code(_) => self.costs.code, Request::Code(_) => self.costs.code,
Request::Execution(ref req) => self.costs.transaction_proof * req.gas, Request::Execution(ref req) => self.costs.transaction_proof * req.gas,
Request::Signal(_) => self.costs.epoch_signal,
} }
} }

View File

@ -139,7 +139,6 @@ fn compute_timeout(reqs: &Requests) -> Duration {
Request::Storage(_) => timeout::PROOF, Request::Storage(_) => timeout::PROOF,
Request::Code(_) => timeout::CONTRACT_CODE, Request::Code(_) => timeout::CONTRACT_CODE,
Request::Execution(_) => timeout::TRANSACTION_PROOF, Request::Execution(_) => timeout::TRANSACTION_PROOF,
Request::Signal(_) => timeout::EPOCH_SIGNAL,
} }
})) }))
} }

View File

@ -158,12 +158,6 @@ impl Provider for TestProvider {
None None
} }
fn epoch_signal(&self, _req: request::CompleteSignalRequest) -> Option<request::SignalResponse> {
Some(request::SignalResponse {
signal: vec![1, 2, 3, 4],
})
}
fn ready_transactions(&self) -> Vec<PendingTransaction> { fn ready_transactions(&self) -> Vec<PendingTransaction> {
self.0.client.ready_transactions() self.0.client.ready_transactions()
} }
@ -529,50 +523,6 @@ fn get_contract_code() {
proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); proto.handle_packet(&expected, &1, packet::REQUEST, &request_body);
} }
#[test]
fn epoch_signal() {
let capabilities = capabilities();
let (provider, proto) = setup(capabilities.clone());
let flow_params = proto.flow_params.read().clone();
let cur_status = status(provider.client.chain_info());
{
let packet_body = write_handshake(&cur_status, &capabilities, &proto);
proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body.clone()));
proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &packet_body);
}
let req_id = 112;
let request = Request::Signal(request::IncompleteSignalRequest {
block_hash: H256([1; 32]).into(),
});
let requests = encode_single(request.clone());
let request_body = make_packet(req_id, &requests);
let response = {
let response = vec![Response::Signal(SignalResponse {
signal: vec![1, 2, 3, 4],
})];
let limit = *flow_params.limit();
let cost = flow_params.compute_cost_multi(requests.requests());
println!("limit = {}, cost = {}", limit, cost);
let new_creds = limit - cost;
let mut response_stream = RlpStream::new_list(3);
response_stream.append(&req_id).append(&new_creds).append_list(&response);
response_stream.out()
};
let expected = Expect::Respond(packet::RESPONSE, response);
proto.handle_packet(&expected, &1, packet::REQUEST, &request_body);
}
#[test] #[test]
fn proof_of_execution() { fn proof_of_execution() {
let capabilities = capabilities(); let capabilities = capabilities();

View File

@ -195,8 +195,6 @@ fn guess_capabilities(requests: &[CheckedRequest]) -> Capabilities {
caps.serve_headers = true, caps.serve_headers = true,
CheckedRequest::HeaderByHash(_, _) => CheckedRequest::HeaderByHash(_, _) =>
caps.serve_headers = true, caps.serve_headers = true,
CheckedRequest::Signal(_, _) =>
caps.serve_headers = true,
CheckedRequest::Body(ref req, _) => if let Ok(ref hdr) = req.0.as_ref() { CheckedRequest::Body(ref req, _) => if let Ok(ref hdr) = req.0.as_ref() {
update_since(&mut caps.serve_chain_since, hdr.number()); update_since(&mut caps.serve_chain_since, hdr.number());
}, },

View File

@ -20,7 +20,7 @@ use std::sync::Arc;
use ethcore::basic_account::BasicAccount; use ethcore::basic_account::BasicAccount;
use ethcore::encoded; use ethcore::encoded;
use ethcore::engines::{Engine, StateDependentProof}; use ethcore::engines::Engine;
use ethcore::receipt::Receipt; use ethcore::receipt::Receipt;
use ethcore::state::{self, ProvedExecution}; use ethcore::state::{self, ProvedExecution};
use ethcore::transaction::SignedTransaction; use ethcore::transaction::SignedTransaction;
@ -56,8 +56,6 @@ pub enum Request {
Code(Code), Code(Code),
/// A request for proof of execution. /// A request for proof of execution.
Execution(TransactionProof), Execution(TransactionProof),
/// A request for epoch change signal.
Signal(Signal),
} }
/// A request argument. /// A request argument.
@ -138,7 +136,6 @@ impl_single!(Body, Body, encoded::Block);
impl_single!(Account, Account, Option<BasicAccount>); impl_single!(Account, Account, Option<BasicAccount>);
impl_single!(Code, Code, Bytes); impl_single!(Code, Code, Bytes);
impl_single!(Execution, TransactionProof, super::ExecutionResult); impl_single!(Execution, TransactionProof, super::ExecutionResult);
impl_single!(Signal, Signal, Vec<u8>);
macro_rules! impl_args { macro_rules! impl_args {
() => { () => {
@ -247,7 +244,6 @@ pub enum CheckedRequest {
Account(Account, net_request::IncompleteAccountRequest), Account(Account, net_request::IncompleteAccountRequest),
Code(Code, net_request::IncompleteCodeRequest), Code(Code, net_request::IncompleteCodeRequest),
Execution(TransactionProof, net_request::IncompleteExecutionRequest), Execution(TransactionProof, net_request::IncompleteExecutionRequest),
Signal(Signal, net_request::IncompleteSignalRequest)
} }
impl From<Request> for CheckedRequest { impl From<Request> for CheckedRequest {
@ -306,12 +302,6 @@ impl From<Request> for CheckedRequest {
}; };
CheckedRequest::Execution(req, net_req) CheckedRequest::Execution(req, net_req)
} }
Request::Signal(req) => {
let net_req = net_request::IncompleteSignalRequest {
block_hash: req.hash.into(),
};
CheckedRequest::Signal(req, net_req)
}
} }
} }
} }
@ -329,7 +319,6 @@ impl CheckedRequest {
CheckedRequest::Account(_, req) => NetRequest::Account(req), CheckedRequest::Account(_, req) => NetRequest::Account(req),
CheckedRequest::Code(_, req) => NetRequest::Code(req), CheckedRequest::Code(_, req) => NetRequest::Code(req),
CheckedRequest::Execution(_, req) => NetRequest::Execution(req), CheckedRequest::Execution(_, req) => NetRequest::Execution(req),
CheckedRequest::Signal(_, req) => NetRequest::Signal(req),
} }
} }
@ -457,7 +446,6 @@ macro_rules! match_me {
CheckedRequest::Account($check, $req) => $e, CheckedRequest::Account($check, $req) => $e,
CheckedRequest::Code($check, $req) => $e, CheckedRequest::Code($check, $req) => $e,
CheckedRequest::Execution($check, $req) => $e, CheckedRequest::Execution($check, $req) => $e,
CheckedRequest::Signal($check, $req) => $e,
} }
} }
} }
@ -485,7 +473,6 @@ impl IncompleteRequest for CheckedRequest {
CheckedRequest::Account(_, ref req) => req.check_outputs(f), CheckedRequest::Account(_, ref req) => req.check_outputs(f),
CheckedRequest::Code(_, ref req) => req.check_outputs(f), CheckedRequest::Code(_, ref req) => req.check_outputs(f),
CheckedRequest::Execution(_, ref req) => req.check_outputs(f), CheckedRequest::Execution(_, ref req) => req.check_outputs(f),
CheckedRequest::Signal(_, ref req) => req.check_outputs(f),
} }
} }
@ -506,7 +493,6 @@ impl IncompleteRequest for CheckedRequest {
CheckedRequest::Account(_, req) => req.complete().map(CompleteRequest::Account), CheckedRequest::Account(_, req) => req.complete().map(CompleteRequest::Account),
CheckedRequest::Code(_, req) => req.complete().map(CompleteRequest::Code), CheckedRequest::Code(_, req) => req.complete().map(CompleteRequest::Code),
CheckedRequest::Execution(_, req) => req.complete().map(CompleteRequest::Execution), CheckedRequest::Execution(_, req) => req.complete().map(CompleteRequest::Execution),
CheckedRequest::Signal(_, req) => req.complete().map(CompleteRequest::Signal),
} }
} }
@ -558,9 +544,6 @@ impl net_request::CheckedRequest for CheckedRequest {
CheckedRequest::Execution(ref prover, _) => CheckedRequest::Execution(ref prover, _) =>
expect!((&NetResponse::Execution(ref res), _) => expect!((&NetResponse::Execution(ref res), _) =>
prover.check_response(cache, &res.items).map(Response::Execution)), prover.check_response(cache, &res.items).map(Response::Execution)),
CheckedRequest::Signal(ref prover, _) =>
expect!((&NetResponse::Signal(ref res), _) =>
prover.check_response(cache, &res.signal).map(Response::Signal)),
} }
} }
} }
@ -584,8 +567,6 @@ pub enum Response {
Code(Vec<u8>), Code(Vec<u8>),
/// Response to a request for proved execution. /// Response to a request for proved execution.
Execution(super::ExecutionResult), Execution(super::ExecutionResult),
/// Response to a request for epoch change signal.
Signal(Vec<u8>),
} }
impl net_request::ResponseLike for Response { impl net_request::ResponseLike for Response {
@ -869,27 +850,6 @@ impl TransactionProof {
} }
} }
/// Request for epoch signal.
/// Provide engine and state-dependent proof checker.
#[derive(Clone)]
pub struct Signal {
/// Block hash and number to fetch proof for.
pub hash: H256,
/// Consensus engine, used to check the proof.
pub engine: Arc<Engine>,
/// Special checker for the proof.
pub proof_check: Arc<StateDependentProof>,
}
impl Signal {
/// Check the signal, returning the signal or indicate that it's bad.
pub fn check_response(&self, _: &Mutex<::cache::Cache>, signal: &[u8]) -> Result<Vec<u8>, Error> {
self.proof_check.check_proof(&*self.engine, signal)
.map(|_| signal.to_owned())
.map_err(|_| Error::BadProof)
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -127,9 +127,6 @@ pub trait Provider: Send + Sync {
/// Provide a proof-of-execution for the given transaction proof request. /// Provide a proof-of-execution for the given transaction proof request.
/// Returns a vector of all state items necessary to execute the transaction. /// Returns a vector of all state items necessary to execute the transaction.
fn transaction_proof(&self, req: request::CompleteExecutionRequest) -> Option<request::ExecutionResponse>; fn transaction_proof(&self, req: request::CompleteExecutionRequest) -> Option<request::ExecutionResponse>;
/// Provide epoch signal data at given block hash. This should be just the
fn epoch_signal(&self, req: request::CompleteSignalRequest) -> Option<request::SignalResponse>;
} }
// Implementation of a light client data provider for a client. // Implementation of a light client data provider for a client.
@ -268,12 +265,6 @@ impl<T: ProvingBlockChainClient + ?Sized> Provider for T {
fn ready_transactions(&self) -> Vec<PendingTransaction> { fn ready_transactions(&self) -> Vec<PendingTransaction> {
BlockChainClient::ready_transactions(self) BlockChainClient::ready_transactions(self)
} }
fn epoch_signal(&self, req: request::CompleteSignalRequest) -> Option<request::SignalResponse> {
self.epoch_signal(req.block_hash).map(|signal| request::SignalResponse {
signal: signal,
})
}
} }
/// The light client "provider" implementation. This wraps a `LightClient` and /// The light client "provider" implementation. This wraps a `LightClient` and
@ -339,10 +330,6 @@ impl<L: AsLightClient + Send + Sync> Provider for LightProvider<L> {
None None
} }
fn epoch_signal(&self, _req: request::CompleteSignalRequest) -> Option<request::SignalResponse> {
None
}
fn ready_transactions(&self) -> Vec<PendingTransaction> { fn ready_transactions(&self) -> Vec<PendingTransaction> {
let chain_info = self.chain_info(); let chain_info = self.chain_info();
self.txqueue.read().ready_transactions(chain_info.best_block_number, chain_info.best_block_timestamp) self.txqueue.read().ready_transactions(chain_info.best_block_number, chain_info.best_block_timestamp)

View File

@ -67,11 +67,6 @@ pub use self::execution::{
Incomplete as IncompleteExecutionRequest, Incomplete as IncompleteExecutionRequest,
Response as ExecutionResponse, Response as ExecutionResponse,
}; };
pub use self::epoch_signal::{
Complete as CompleteSignalRequest,
Incomplete as IncompleteSignalRequest,
Response as SignalResponse,
};
pub use self::builder::{RequestBuilder, Requests}; pub use self::builder::{RequestBuilder, Requests};
@ -266,8 +261,6 @@ pub enum Request {
Code(IncompleteCodeRequest), Code(IncompleteCodeRequest),
/// A request for proof of execution, /// A request for proof of execution,
Execution(IncompleteExecutionRequest), Execution(IncompleteExecutionRequest),
/// A request for an epoch signal.
Signal(IncompleteSignalRequest),
} }
/// All request types, in an answerable state. /// All request types, in an answerable state.
@ -291,8 +284,6 @@ pub enum CompleteRequest {
Code(CompleteCodeRequest), Code(CompleteCodeRequest),
/// A request for proof of execution, /// A request for proof of execution,
Execution(CompleteExecutionRequest), Execution(CompleteExecutionRequest),
/// A request for an epoch signal.
Signal(CompleteSignalRequest),
} }
impl CompleteRequest { impl CompleteRequest {
@ -308,7 +299,6 @@ impl CompleteRequest {
CompleteRequest::Storage(_) => Kind::Storage, CompleteRequest::Storage(_) => Kind::Storage,
CompleteRequest::Code(_) => Kind::Code, CompleteRequest::Code(_) => Kind::Code,
CompleteRequest::Execution(_) => Kind::Execution, CompleteRequest::Execution(_) => Kind::Execution,
CompleteRequest::Signal(_) => Kind::Signal,
} }
} }
} }
@ -326,7 +316,6 @@ impl Request {
Request::Storage(_) => Kind::Storage, Request::Storage(_) => Kind::Storage,
Request::Code(_) => Kind::Code, Request::Code(_) => Kind::Code,
Request::Execution(_) => Kind::Execution, Request::Execution(_) => Kind::Execution,
Request::Signal(_) => Kind::Signal,
} }
} }
} }
@ -343,7 +332,6 @@ impl Decodable for Request {
Kind::Storage => Ok(Request::Storage(rlp.val_at(1)?)), Kind::Storage => Ok(Request::Storage(rlp.val_at(1)?)),
Kind::Code => Ok(Request::Code(rlp.val_at(1)?)), Kind::Code => Ok(Request::Code(rlp.val_at(1)?)),
Kind::Execution => Ok(Request::Execution(rlp.val_at(1)?)), Kind::Execution => Ok(Request::Execution(rlp.val_at(1)?)),
Kind::Signal => Ok(Request::Signal(rlp.val_at(1)?)),
} }
} }
} }
@ -365,7 +353,6 @@ impl Encodable for Request {
Request::Storage(ref req) => s.append(req), Request::Storage(ref req) => s.append(req),
Request::Code(ref req) => s.append(req), Request::Code(ref req) => s.append(req),
Request::Execution(ref req) => s.append(req), Request::Execution(ref req) => s.append(req),
Request::Signal(ref req) => s.append(req),
}; };
} }
} }
@ -387,7 +374,6 @@ impl IncompleteRequest for Request {
Request::Storage(ref req) => req.check_outputs(f), Request::Storage(ref req) => req.check_outputs(f),
Request::Code(ref req) => req.check_outputs(f), Request::Code(ref req) => req.check_outputs(f),
Request::Execution(ref req) => req.check_outputs(f), Request::Execution(ref req) => req.check_outputs(f),
Request::Signal(ref req) => req.check_outputs(f),
} }
} }
@ -402,7 +388,6 @@ impl IncompleteRequest for Request {
Request::Storage(ref req) => req.note_outputs(f), Request::Storage(ref req) => req.note_outputs(f),
Request::Code(ref req) => req.note_outputs(f), Request::Code(ref req) => req.note_outputs(f),
Request::Execution(ref req) => req.note_outputs(f), Request::Execution(ref req) => req.note_outputs(f),
Request::Signal(ref req) => req.note_outputs(f),
} }
} }
@ -417,7 +402,6 @@ impl IncompleteRequest for Request {
Request::Storage(ref mut req) => req.fill(oracle), Request::Storage(ref mut req) => req.fill(oracle),
Request::Code(ref mut req) => req.fill(oracle), Request::Code(ref mut req) => req.fill(oracle),
Request::Execution(ref mut req) => req.fill(oracle), Request::Execution(ref mut req) => req.fill(oracle),
Request::Signal(ref mut req) => req.fill(oracle),
} }
} }
@ -432,7 +416,6 @@ impl IncompleteRequest for Request {
Request::Storage(req) => req.complete().map(CompleteRequest::Storage), Request::Storage(req) => req.complete().map(CompleteRequest::Storage),
Request::Code(req) => req.complete().map(CompleteRequest::Code), Request::Code(req) => req.complete().map(CompleteRequest::Code),
Request::Execution(req) => req.complete().map(CompleteRequest::Execution), Request::Execution(req) => req.complete().map(CompleteRequest::Execution),
Request::Signal(req) => req.complete().map(CompleteRequest::Signal),
} }
} }
@ -447,7 +430,6 @@ impl IncompleteRequest for Request {
Request::Storage(ref mut req) => req.adjust_refs(mapping), Request::Storage(ref mut req) => req.adjust_refs(mapping),
Request::Code(ref mut req) => req.adjust_refs(mapping), Request::Code(ref mut req) => req.adjust_refs(mapping),
Request::Execution(ref mut req) => req.adjust_refs(mapping), Request::Execution(ref mut req) => req.adjust_refs(mapping),
Request::Signal(ref mut req) => req.adjust_refs(mapping),
} }
} }
} }
@ -489,8 +471,6 @@ pub enum Kind {
Code = 7, Code = 7,
/// A request for transaction execution + state proof. /// A request for transaction execution + state proof.
Execution = 8, Execution = 8,
/// A request for epoch transition signal.
Signal = 9,
} }
impl Decodable for Kind { impl Decodable for Kind {
@ -505,7 +485,6 @@ impl Decodable for Kind {
6 => Ok(Kind::Storage), 6 => Ok(Kind::Storage),
7 => Ok(Kind::Code), 7 => Ok(Kind::Code),
8 => Ok(Kind::Execution), 8 => Ok(Kind::Execution),
9 => Ok(Kind::Signal),
_ => Err(DecoderError::Custom("Unknown PIP request ID.")), _ => Err(DecoderError::Custom("Unknown PIP request ID.")),
} }
} }
@ -538,8 +517,6 @@ pub enum Response {
Code(CodeResponse), Code(CodeResponse),
/// A response for proof of execution, /// A response for proof of execution,
Execution(ExecutionResponse), Execution(ExecutionResponse),
/// A response for epoch change signal.
Signal(SignalResponse),
} }
impl ResponseLike for Response { impl ResponseLike for Response {
@ -555,7 +532,6 @@ impl ResponseLike for Response {
Response::Storage(ref res) => res.fill_outputs(f), Response::Storage(ref res) => res.fill_outputs(f),
Response::Code(ref res) => res.fill_outputs(f), Response::Code(ref res) => res.fill_outputs(f),
Response::Execution(ref res) => res.fill_outputs(f), Response::Execution(ref res) => res.fill_outputs(f),
Response::Signal(ref res) => res.fill_outputs(f),
} }
} }
} }
@ -573,7 +549,6 @@ impl Response {
Response::Storage(_) => Kind::Storage, Response::Storage(_) => Kind::Storage,
Response::Code(_) => Kind::Code, Response::Code(_) => Kind::Code,
Response::Execution(_) => Kind::Execution, Response::Execution(_) => Kind::Execution,
Response::Signal(_) => Kind::Signal,
} }
} }
} }
@ -590,7 +565,6 @@ impl Decodable for Response {
Kind::Storage => Ok(Response::Storage(rlp.val_at(1)?)), Kind::Storage => Ok(Response::Storage(rlp.val_at(1)?)),
Kind::Code => Ok(Response::Code(rlp.val_at(1)?)), Kind::Code => Ok(Response::Code(rlp.val_at(1)?)),
Kind::Execution => Ok(Response::Execution(rlp.val_at(1)?)), Kind::Execution => Ok(Response::Execution(rlp.val_at(1)?)),
Kind::Signal => Ok(Response::Signal(rlp.val_at(1)?)),
} }
} }
} }
@ -612,7 +586,6 @@ impl Encodable for Response {
Response::Storage(ref res) => s.append(res), Response::Storage(ref res) => s.append(res),
Response::Code(ref res) => s.append(res), Response::Code(ref res) => s.append(res),
Response::Execution(ref res) => s.append(res), Response::Execution(ref res) => s.append(res),
Response::Signal(ref res) => s.append(res),
}; };
} }
} }
@ -787,8 +760,8 @@ pub mod header {
pub mod header_proof { pub mod header_proof {
use super::{Field, NoSuchOutput, OutputKind, Output}; use super::{Field, NoSuchOutput, OutputKind, Output};
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
use bigint::prelude::U256;
use bigint::hash::H256; use bigint::hash::H256;
use bigint::prelude::U256;
use util::Bytes; use util::Bytes;
/// Potentially incomplete header proof request. /// Potentially incomplete header proof request.
@ -1118,8 +1091,8 @@ pub mod block_body {
/// A request for an account proof. /// A request for an account proof.
pub mod account { pub mod account {
use super::{Field, NoSuchOutput, OutputKind, Output}; use super::{Field, NoSuchOutput, OutputKind, Output};
use bigint::prelude::U256;
use bigint::hash::H256; use bigint::hash::H256;
use bigint::prelude::U256;
use util::Bytes; use util::Bytes;
/// Potentially incomplete request for an account proof. /// Potentially incomplete request for an account proof.
@ -1415,8 +1388,8 @@ pub mod execution {
use super::{Field, NoSuchOutput, OutputKind, Output}; use super::{Field, NoSuchOutput, OutputKind, Output};
use ethcore::transaction::Action; use ethcore::transaction::Action;
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
use bigint::prelude::U256;
use bigint::hash::H256; use bigint::hash::H256;
use bigint::prelude::U256;
use util::{Bytes, Address, DBValue}; use util::{Bytes, Address, DBValue};
/// Potentially incomplete execution proof request. /// Potentially incomplete execution proof request.
@ -1536,105 +1509,6 @@ pub mod execution {
} }
} }
/// A request for epoch signal data.
pub mod epoch_signal {
use super::{Field, NoSuchOutput, OutputKind, Output};
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
use bigint::hash::H256;
use util::Bytes;
/// Potentially incomplete epoch signal request.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Incomplete {
/// The block hash to request the signal for.
pub block_hash: Field<H256>,
}
impl Decodable for Incomplete {
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
Ok(Incomplete {
block_hash: rlp.val_at(0)?,
})
}
}
impl Encodable for Incomplete {
fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(1).append(&self.block_hash);
}
}
impl super::IncompleteRequest for Incomplete {
type Complete = Complete;
type Response = Response;
fn check_outputs<F>(&self, mut f: F) -> Result<(), NoSuchOutput>
where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>
{
if let Field::BackReference(req, idx) = self.block_hash {
f(req, idx, OutputKind::Hash)?;
}
Ok(())
}
fn note_outputs<F>(&self, _: F) where F: FnMut(usize, OutputKind) {}
fn fill<F>(&mut self, oracle: F) where F: Fn(usize, usize) -> Result<Output, NoSuchOutput> {
if let Field::BackReference(req, idx) = self.block_hash {
self.block_hash = match oracle(req, idx) {
Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash.into()),
_ => Field::BackReference(req, idx),
}
}
}
fn complete(self) -> Result<Self::Complete, NoSuchOutput> {
Ok(Complete {
block_hash: self.block_hash.into_scalar()?,
})
}
fn adjust_refs<F>(&mut self, mut mapping: F) where F: FnMut(usize) -> usize {
self.block_hash.adjust_req(&mut mapping);
}
}
/// A complete request.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Complete {
/// The block hash to request the epoch signal for.
pub block_hash: H256,
}
/// The output of a request for an epoch signal.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Response {
/// The requested epoch signal.
pub signal: Bytes,
}
impl super::ResponseLike for Response {
/// Fill reusable outputs by providing them to the function.
fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) {}
}
impl Decodable for Response {
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
Ok(Response {
signal: rlp.as_val()?,
})
}
}
impl Encodable for Response {
fn rlp_append(&self, s: &mut RlpStream) {
s.append(&self.signal);
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -1923,22 +1797,4 @@ mod tests {
let raw = ::rlp::encode_list(&reqs); let raw = ::rlp::encode_list(&reqs);
assert_eq!(::rlp::decode_list::<Response>(&raw), reqs); assert_eq!(::rlp::decode_list::<Response>(&raw), reqs);
} }
#[test]
fn epoch_signal_roundtrip() {
let req = IncompleteSignalRequest {
block_hash: Field::Scalar(Default::default()),
};
let full_req = Request::Signal(req.clone());
let res = SignalResponse {
signal: vec![1, 2, 3, 4, 5, 6, 7, 6, 5, 4],
};
let full_res = Response::Signal(res.clone());
check_roundtrip(req);
check_roundtrip(full_req);
check_roundtrip(res);
check_roundtrip(full_res);
}
} }

View File

@ -42,7 +42,7 @@ use client::ancient_import::AncientVerifier;
use client::Error as ClientError; use client::Error as ClientError;
use client::{ use client::{
BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient, BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient,
MiningBlockChainClient, TraceFilter, CallAnalytics, BlockImportError, Mode, MiningBlockChainClient, EngineClient, TraceFilter, CallAnalytics, BlockImportError, Mode,
ChainNotify, PruningInfo, ProvingBlockChainClient, ChainNotify, PruningInfo, ProvingBlockChainClient,
}; };
use encoded; use encoded;
@ -770,7 +770,7 @@ impl Client {
res.map(|(output, proof)| (output, proof.into_iter().map(|x| x.into_vec()).collect())) res.map(|(output, proof)| (output, proof.into_iter().map(|x| x.into_vec()).collect()))
}; };
match with_state.generate_proof(&call) { match (with_state)(&call) {
Ok(proof) => proof, Ok(proof) => proof,
Err(e) => { Err(e) => {
warn!(target: "client", "Failed to generate transition proof for block {}: {}", hash, e); warn!(target: "client", "Failed to generate transition proof for block {}: {}", hash, e);
@ -1936,7 +1936,7 @@ impl MiningBlockChainClient for Client {
} }
} }
impl super::traits::EngineClient for Client { impl EngineClient for Client {
fn update_sealing(&self) { fn update_sealing(&self) {
self.miner.update_sealing(self) self.miner.update_sealing(self)
} }
@ -1954,22 +1954,6 @@ impl super::traits::EngineClient for Client {
fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition> { fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition> {
self.chain.read().epoch_transition_for(parent_hash) self.chain.read().epoch_transition_for(parent_hash)
} }
fn chain_info(&self) -> BlockChainInfo {
BlockChainClient::chain_info(self)
}
fn call_contract(&self, id: BlockId, address: Address, data: Bytes) -> Result<Bytes, String> {
BlockChainClient::call_contract(self, id, address, data)
}
fn transact_contract(&self, address: Address, data: Bytes) -> Result<TransactionImportResult, EthcoreError> {
BlockChainClient::transact_contract(self, address, data)
}
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
BlockChainClient::block_number(self, id)
}
} }
impl ProvingBlockChainClient for Client { impl ProvingBlockChainClient for Client {
@ -1984,30 +1968,27 @@ impl ProvingBlockChainClient for Client {
} }
fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<(Bytes, Vec<DBValue>)> { fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<(Bytes, Vec<DBValue>)> {
let (header, mut env_info) = match (self.block_header(id), self.env_info(id)) { let (state, mut env_info) = match (self.state_at(id), self.env_info(id)) {
(Some(s), Some(e)) => (s, e), (Some(s), Some(e)) => (s, e),
_ => return None, _ => return None,
}; };
env_info.gas_limit = transaction.gas.clone(); env_info.gas_limit = transaction.gas.clone();
let mut jdb = self.state_db.lock().journal_db().boxed_clone(); let mut jdb = self.state_db.lock().journal_db().boxed_clone();
let backend = state::backend::Proving::new(jdb.as_hashdb_mut());
state::prove_transaction( let mut state = state.replace_backend(backend);
jdb.as_hashdb_mut(), let options = TransactOptions::with_no_tracing().dont_check_nonce();
header.state_root().clone(), let res = Executive::new(&mut state, &env_info, &*self.engine).transact(&transaction, options);
&transaction,
&*self.engine,
&env_info,
self.factories.clone(),
false,
)
}
match res {
fn epoch_signal(&self, hash: H256) -> Option<Vec<u8>> { Err(ExecutionError::Internal(_)) => None,
// pending transitions are never deleted, and do not contain Err(e) => {
// finality proofs by definition. trace!(target: "client", "Proved call failed: {}", e);
self.chain.read().get_pending_transition(hash).map(|pending| pending.proof) Some((Vec::new(), state.drop().1.extract_proof()))
}
Ok(res) => Some((res.output, state.drop().1.extract_proof())),
}
} }
} }

View File

@ -33,7 +33,7 @@ use devtools::*;
use transaction::{Transaction, LocalizedTransaction, PendingTransaction, SignedTransaction, Action}; use transaction::{Transaction, LocalizedTransaction, PendingTransaction, SignedTransaction, Action};
use blockchain::TreeRoute; use blockchain::TreeRoute;
use client::{ use client::{
BlockChainClient, MiningBlockChainClient, BlockChainInfo, BlockStatus, BlockId, BlockChainClient, MiningBlockChainClient, EngineClient, BlockChainInfo, BlockStatus, BlockId,
TransactionId, UncleId, TraceId, TraceFilter, LastHashes, CallAnalytics, BlockImportError, TransactionId, UncleId, TraceId, TraceFilter, LastHashes, CallAnalytics, BlockImportError,
ProvingBlockChainClient, ProvingBlockChainClient,
}; };
@ -801,13 +801,9 @@ impl ProvingBlockChainClient for TestBlockChainClient {
fn prove_transaction(&self, _: SignedTransaction, _: BlockId) -> Option<(Bytes, Vec<DBValue>)> { fn prove_transaction(&self, _: SignedTransaction, _: BlockId) -> Option<(Bytes, Vec<DBValue>)> {
None None
} }
fn epoch_signal(&self, _: H256) -> Option<Vec<u8>> {
None
}
} }
impl super::traits::EngineClient for TestBlockChainClient { impl EngineClient for TestBlockChainClient {
fn update_sealing(&self) { fn update_sealing(&self) {
self.miner.update_sealing(self) self.miner.update_sealing(self)
} }
@ -823,20 +819,4 @@ impl super::traits::EngineClient for TestBlockChainClient {
fn epoch_transition_for(&self, _block_hash: H256) -> Option<::engines::EpochTransition> { fn epoch_transition_for(&self, _block_hash: H256) -> Option<::engines::EpochTransition> {
None None
} }
fn chain_info(&self) -> BlockChainInfo {
BlockChainClient::chain_info(self)
}
fn call_contract(&self, id: BlockId, address: Address, data: Bytes) -> Result<Bytes, String> {
BlockChainClient::call_contract(self, id, address, data)
}
fn transact_contract(&self, address: Address, data: Bytes) -> Result<TransactionImportResult, EthcoreError> {
BlockChainClient::transact_contract(self, address, data)
}
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
BlockChainClient::block_number(self, id)
}
} }

View File

@ -317,7 +317,7 @@ pub trait MiningBlockChainClient: BlockChainClient {
} }
/// Client facilities used by internally sealing Engines. /// Client facilities used by internally sealing Engines.
pub trait EngineClient: Sync + Send { pub trait EngineClient: MiningBlockChainClient {
/// Make a new block and seal it. /// Make a new block and seal it.
fn update_sealing(&self); fn update_sealing(&self);
@ -333,17 +333,6 @@ pub trait EngineClient: Sync + Send {
/// ///
/// The block corresponding the the parent hash must be stored already. /// The block corresponding the the parent hash must be stored already.
fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition>; fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition>;
/// Get block chain info.
fn chain_info(&self) -> BlockChainInfo;
/// Like `call`, but with various defaults. Designed to be used for calling contracts.
fn call_contract(&self, id: BlockId, address: Address, data: Bytes) -> Result<Bytes, String>;
/// Import a transaction: used for misbehaviour reporting.
fn transact_contract(&self, address: Address, data: Bytes) -> Result<TransactionImportResult, EthcoreError>;
fn block_number(&self, id: BlockId) -> Option<BlockNumber>;
} }
/// Extended client interface for providing proofs of the state. /// Extended client interface for providing proofs of the state.
@ -363,7 +352,4 @@ pub trait ProvingBlockChainClient: BlockChainClient {
/// Returns the output of the call and a vector of database items necessary /// Returns the output of the call and a vector of database items necessary
/// to reproduce it. /// to reproduce it.
fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<(Bytes, Vec<DBValue>)>; fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<(Bytes, Vec<DBValue>)>;
/// Get an epoch change signal by block hash.
fn epoch_signal(&self, hash: H256) -> Option<Vec<u8>>;
} }

View File

@ -25,7 +25,7 @@ use std::cmp;
use account_provider::AccountProvider; use account_provider::AccountProvider;
use block::*; use block::*;
use builtin::Builtin; use builtin::Builtin;
use client::EngineClient; use client::{Client, EngineClient};
use engines::{Call, Engine, Seal, EngineError, ConstructedVerifier}; use engines::{Call, Engine, Seal, EngineError, ConstructedVerifier};
use error::{Error, TransactionError, BlockError}; use error::{Error, TransactionError, BlockError};
use ethjson; use ethjson;
@ -646,8 +646,6 @@ impl Engine for AuthorityRound {
(&active_set as &_, epoch_manager.epoch_transition_number) (&active_set as &_, epoch_manager.epoch_transition_number)
}; };
// always report with "self.validators" so that the report actually gets
// to the contract.
let report = |report| match report { let report = |report| match report {
Report::Benign(address, block_number) => Report::Benign(address, block_number) =>
self.validators.report_benign(&address, set_number, block_number), self.validators.report_benign(&address, set_number, block_number),
@ -740,18 +738,13 @@ impl Engine for AuthorityRound {
{ {
if let Ok(finalized) = epoch_manager.finality_checker.push_hash(chain_head.hash(), *chain_head.author()) { if let Ok(finalized) = epoch_manager.finality_checker.push_hash(chain_head.hash(), *chain_head.author()) {
let mut finalized = finalized.into_iter(); let mut finalized = finalized.into_iter();
while let Some(finalized_hash) = finalized.next() { while let Some(hash) = finalized.next() {
if let Some(pending) = transition_store(finalized_hash) { if let Some(pending) = transition_store(hash) {
let finality_proof = ::std::iter::once(finalized_hash) let finality_proof = ::std::iter::once(hash)
.chain(finalized) .chain(finalized)
.chain(epoch_manager.finality_checker.unfinalized_hashes()) .chain(epoch_manager.finality_checker.unfinalized_hashes())
.map(|h| if h == chain_head.hash() { .map(|hash| chain(hash)
// chain closure only stores ancestry, but the chain head is also .expect("these headers fetched before when constructing finality checker; qed"))
// unfinalized.
chain_head.clone()
} else {
chain(h).expect("these headers fetched before when constructing finality checker; qed")
})
.collect::<Vec<Header>>(); .collect::<Vec<Header>>();
// this gives us the block number for `hash`, assuming it's ancestry. // this gives us the block number for `hash`, assuming it's ancestry.
@ -815,9 +808,9 @@ impl Engine for AuthorityRound {
Ok(()) Ok(())
} }
fn register_client(&self, client: Weak<EngineClient>) { fn register_client(&self, client: Weak<Client>) {
*self.client.write() = Some(client.clone()); *self.client.write() = Some(client.clone());
self.validators.register_client(client); self.validators.register_contract(client);
} }
fn set_signer(&self, ap: Arc<AccountProvider>, address: Address, password: String) { fn set_signer(&self, ap: Arc<AccountProvider>, address: Address, password: String) {

View File

@ -33,7 +33,7 @@ use error::{BlockError, Error};
use evm::Schedule; use evm::Schedule;
use ethjson; use ethjson;
use header::{Header, BlockNumber}; use header::{Header, BlockNumber};
use client::EngineClient; use client::Client;
use semantic_version::SemanticVersion; use semantic_version::SemanticVersion;
use super::signer::EngineSigner; use super::signer::EngineSigner;
use super::validator_set::{ValidatorSet, SimpleList, new_validator_set}; use super::validator_set::{ValidatorSet, SimpleList, new_validator_set};
@ -236,8 +236,8 @@ impl Engine for BasicAuthority {
} }
} }
fn register_client(&self, client: Weak<EngineClient>) { fn register_client(&self, client: Weak<Client>) {
self.validators.register_client(client); self.validators.register_contract(client);
} }
fn set_signer(&self, ap: Arc<AccountProvider>, address: Address, password: String) { fn set_signer(&self, ap: Arc<AccountProvider>, address: Address, password: String) {

View File

@ -44,7 +44,7 @@ use self::epoch::PendingTransition;
use account_provider::AccountProvider; use account_provider::AccountProvider;
use block::ExecutedBlock; use block::ExecutedBlock;
use builtin::Builtin; use builtin::Builtin;
use client::EngineClient; use client::Client;
use vm::{EnvInfo, LastHashes, Schedule, CreateContractAddress}; use vm::{EnvInfo, LastHashes, Schedule, CreateContractAddress};
use error::Error; use error::Error;
use header::{Header, BlockNumber}; use header::{Header, BlockNumber};
@ -123,22 +123,12 @@ pub type Headers<'a> = Fn(H256) -> Option<Header> + 'a;
/// Type alias for a function we can query pending transitions by block hash through. /// Type alias for a function we can query pending transitions by block hash through.
pub type PendingTransitionStore<'a> = Fn(H256) -> Option<PendingTransition> + 'a; pub type PendingTransitionStore<'a> = Fn(H256) -> Option<PendingTransition> + 'a;
/// Proof dependent on state.
pub trait StateDependentProof: Send + Sync {
/// Generate a proof, given the state.
fn generate_proof(&self, caller: &Call) -> Result<Vec<u8>, String>;
/// Check a proof generated elsewhere (potentially by a peer).
// `engine` needed to check state proofs, while really this should
// just be state machine params.
fn check_proof(&self, engine: &Engine, proof: &[u8]) -> Result<(), String>;
}
/// Proof generated on epoch change. /// Proof generated on epoch change.
pub enum Proof { pub enum Proof {
/// Known proof (extracted from signal) /// Known proof (exctracted from signal)
Known(Vec<u8>), Known(Vec<u8>),
/// State dependent proof. /// Extract proof from caller.
WithState(Arc<StateDependentProof>), WithState(Box<Fn(&Call) -> Result<Vec<u8>, String>>),
} }
/// Generated epoch verifier. /// Generated epoch verifier.
@ -370,7 +360,7 @@ pub trait Engine : Sync + Send {
fn sign(&self, _hash: H256) -> Result<Signature, Error> { unimplemented!() } fn sign(&self, _hash: H256) -> Result<Signature, Error> { unimplemented!() }
/// Add Client which can be used for sealing, querying the state and sending messages. /// Add Client which can be used for sealing, querying the state and sending messages.
fn register_client(&self, _client: Weak<EngineClient>) {} fn register_client(&self, _client: Weak<Client>) {}
/// Trigger next step of the consensus engine. /// Trigger next step of the consensus engine.
fn step(&self) {} fn step(&self) {}

View File

@ -34,7 +34,7 @@ use bigint::prelude::{U128, U256};
use bigint::hash::{H256, H520}; use bigint::hash::{H256, H520};
use parking_lot::RwLock; use parking_lot::RwLock;
use util::*; use util::*;
use client::EngineClient; use client::{Client, EngineClient};
use error::{Error, BlockError}; use error::{Error, BlockError};
use header::{Header, BlockNumber}; use header::{Header, BlockNumber};
use builtin::Builtin; use builtin::Builtin;
@ -570,35 +570,18 @@ impl Engine for Tendermint {
Ok(()) Ok(())
} }
/// Verify gas limit. /// Verify validators and gas limit.
fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> { fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
if header.number() == 0 { if header.number() == 0 {
return Err(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() }).into()); return Err(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() }).into());
} }
let gas_limit_divisor = self.params().gas_limit_bound_divisor;
let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor;
let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor;
if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas {
self.validators.report_malicious(header.author(), header.number(), header.number(), Default::default());
return Err(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() }).into());
}
Ok(())
}
fn verify_block_external(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
if let Ok(proposal) = ConsensusMessage::new_proposal(header) { if let Ok(proposal) = ConsensusMessage::new_proposal(header) {
let proposer = proposal.verify()?; let proposer = proposal.verify()?;
if !self.is_authority(&proposer) { if !self.is_authority(&proposer) {
return Err(EngineError::NotAuthorized(proposer).into()); return Err(EngineError::NotAuthorized(proposer).into());
} }
self.check_view_proposer( self.check_view_proposer(header.parent_hash(), proposal.vote_step.height, proposal.vote_step.view, &proposer)?;
header.parent_hash(),
proposal.vote_step.height,
proposal.vote_step.view,
&proposer
).map_err(Into::into)
} else { } else {
let vote_step = VoteStep::new(header.number() as usize, consensus_view(header)?, Step::Precommit); let vote_step = VoteStep::new(header.number() as usize, consensus_view(header)?, Step::Precommit);
let precommit_hash = message_hash(vote_step.clone(), header.bare_hash()); let precommit_hash = message_hash(vote_step.clone(), header.bare_hash());
@ -624,8 +607,18 @@ impl Engine for Tendermint {
} }
} }
self.check_above_threshold(origins.len()).map_err(Into::into) self.check_above_threshold(origins.len())?
} }
let gas_limit_divisor = self.params().gas_limit_bound_divisor;
let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor;
let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor;
if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas {
self.validators.report_malicious(header.author(), header.number(), header.number(), Default::default());
return Err(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() }).into());
}
Ok(())
} }
fn signals_epoch_end(&self, header: &Header, block: Option<&[u8]>, receipts: Option<&[::receipt::Receipt]>) fn signals_epoch_end(&self, header: &Header, block: Option<&[u8]>, receipts: Option<&[::receipt::Receipt]>)
@ -760,12 +753,13 @@ impl Engine for Tendermint {
self.to_step(next_step); self.to_step(next_step);
} }
fn register_client(&self, client: Weak<EngineClient>) { fn register_client(&self, client: Weak<Client>) {
use client::BlockChainClient;
if let Some(c) = client.upgrade() { if let Some(c) = client.upgrade() {
self.height.store(c.chain_info().best_block_number as usize + 1, AtomicOrdering::SeqCst); self.height.store(c.chain_info().best_block_number as usize + 1, AtomicOrdering::SeqCst);
} }
*self.client.write() = Some(client.clone()); *self.client.write() = Some(client.clone());
self.validators.register_client(client); self.validators.register_contract(client);
} }
} }
@ -893,14 +887,14 @@ mod tests {
let seal = proposal_seal(&tap, &header, 0); let seal = proposal_seal(&tap, &header, 0);
header.set_seal(seal); header.set_seal(seal);
// Good proposer. // Good proposer.
assert!(engine.verify_block_external(&header, None).is_ok()); assert!(engine.verify_block_family(&header, &parent_header, None).is_ok());
let validator = insert_and_unlock(&tap, "0"); let validator = insert_and_unlock(&tap, "0");
header.set_author(validator); header.set_author(validator);
let seal = proposal_seal(&tap, &header, 0); let seal = proposal_seal(&tap, &header, 0);
header.set_seal(seal); header.set_seal(seal);
// Bad proposer. // Bad proposer.
match engine.verify_block_external(&header, None) { match engine.verify_block_family(&header, &parent_header, None) {
Err(Error::Engine(EngineError::NotProposer(_))) => {}, Err(Error::Engine(EngineError::NotProposer(_))) => {},
_ => panic!(), _ => panic!(),
} }
@ -910,7 +904,7 @@ mod tests {
let seal = proposal_seal(&tap, &header, 0); let seal = proposal_seal(&tap, &header, 0);
header.set_seal(seal); header.set_seal(seal);
// Not authority. // Not authority.
match engine.verify_block_external(&header, None) { match engine.verify_block_family(&header, &parent_header, None) {
Err(Error::Engine(EngineError::NotAuthorized(_))) => {}, Err(Error::Engine(EngineError::NotAuthorized(_))) => {},
_ => panic!(), _ => panic!(),
}; };
@ -940,7 +934,7 @@ mod tests {
header.set_seal(seal.clone()); header.set_seal(seal.clone());
// One good signature is not enough. // One good signature is not enough.
match engine.verify_block_external(&header, None) { match engine.verify_block_family(&header, &parent_header, None) {
Err(Error::Engine(EngineError::BadSealFieldSize(_))) => {}, Err(Error::Engine(EngineError::BadSealFieldSize(_))) => {},
_ => panic!(), _ => panic!(),
} }
@ -951,7 +945,7 @@ mod tests {
seal[2] = ::rlp::encode_list(&vec![H520::from(signature1.clone()), H520::from(signature0.clone())]).into_vec(); seal[2] = ::rlp::encode_list(&vec![H520::from(signature1.clone()), H520::from(signature0.clone())]).into_vec();
header.set_seal(seal.clone()); header.set_seal(seal.clone());
assert!(engine.verify_block_external(&header, None).is_ok()); assert!(engine.verify_block_family(&header, &parent_header, None).is_ok());
let bad_voter = insert_and_unlock(&tap, "101"); let bad_voter = insert_and_unlock(&tap, "101");
let bad_signature = tap.sign(bad_voter, None, keccak(vote_info)).unwrap(); let bad_signature = tap.sign(bad_voter, None, keccak(vote_info)).unwrap();
@ -960,7 +954,7 @@ mod tests {
header.set_seal(seal); header.set_seal(seal);
// One good and one bad signature. // One good and one bad signature.
match engine.verify_block_external(&header, None) { match engine.verify_block_family(&header, &parent_header, None) {
Err(Error::Engine(EngineError::NotAuthorized(_))) => {}, Err(Error::Engine(EngineError::NotAuthorized(_))) => {},
_ => panic!(), _ => panic!(),
}; };
@ -1006,7 +1000,7 @@ mod tests {
let client = generate_dummy_client(0); let client = generate_dummy_client(0);
let notify = Arc::new(TestNotify::default()); let notify = Arc::new(TestNotify::default());
client.add_notify(notify.clone()); client.add_notify(notify.clone());
engine.register_client(Arc::downgrade(&client) as _); engine.register_client(Arc::downgrade(&client));
let prevote_current = vote(engine.as_ref(), |mh| tap.sign(v0, None, mh).map(H520::from), h, r, Step::Prevote, proposal); let prevote_current = vote(engine.as_ref(), |mh| tap.sign(v0, None, mh).map(H520::from), h, r, Step::Prevote, proposal);
@ -1024,6 +1018,7 @@ mod tests {
fn seal_submission() { fn seal_submission() {
use ethkey::{Generator, Random}; use ethkey::{Generator, Random};
use transaction::{Transaction, Action}; use transaction::{Transaction, Action};
use client::BlockChainClient;
let tap = Arc::new(AccountProvider::transient_provider()); let tap = Arc::new(AccountProvider::transient_provider());
// Accounts for signing votes. // Accounts for signing votes.
@ -1036,7 +1031,7 @@ mod tests {
let notify = Arc::new(TestNotify::default()); let notify = Arc::new(TestNotify::default());
client.add_notify(notify.clone()); client.add_notify(notify.clone());
engine.register_client(Arc::downgrade(&client) as _); engine.register_client(Arc::downgrade(&client));
let keypair = Random.generate().unwrap(); let keypair = Random.generate().unwrap();
let transaction = Transaction { let transaction = Transaction {

View File

@ -25,7 +25,7 @@ use util::*;
use futures::Future; use futures::Future;
use native_contracts::ValidatorReport as Provider; use native_contracts::ValidatorReport as Provider;
use client::EngineClient; use client::{Client, BlockChainClient};
use engines::{Call, Engine}; use engines::{Call, Engine};
use header::{Header, BlockNumber}; use header::{Header, BlockNumber};
@ -36,7 +36,7 @@ use super::safe_contract::ValidatorSafeContract;
pub struct ValidatorContract { pub struct ValidatorContract {
validators: ValidatorSafeContract, validators: ValidatorSafeContract,
provider: Provider, provider: Provider,
client: RwLock<Option<Weak<EngineClient>>>, // TODO [keorn]: remove client: RwLock<Option<Weak<Client>>>, // TODO [keorn]: remove
} }
impl ValidatorContract { impl ValidatorContract {
@ -120,8 +120,8 @@ impl ValidatorSet for ValidatorContract {
} }
} }
fn register_client(&self, client: Weak<EngineClient>) { fn register_contract(&self, client: Weak<Client>) {
self.validators.register_client(client.clone()); self.validators.register_contract(client.clone());
*self.client.write() = Some(client); *self.client.write() = Some(client);
} }
} }
@ -148,7 +148,7 @@ mod tests {
fn fetches_validators() { fn fetches_validators() {
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_contract, None); let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_contract, None);
let vc = Arc::new(ValidatorContract::new("0000000000000000000000000000000000000005".parse::<Address>().unwrap())); let vc = Arc::new(ValidatorContract::new("0000000000000000000000000000000000000005".parse::<Address>().unwrap()));
vc.register_client(Arc::downgrade(&client) as _); vc.register_contract(Arc::downgrade(&client));
let last_hash = client.best_block_header().hash(); let last_hash = client.best_block_header().hash();
assert!(vc.contains(&last_hash, &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e".parse::<Address>().unwrap())); assert!(vc.contains(&last_hash, &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e".parse::<Address>().unwrap()));
assert!(vc.contains(&last_hash, &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1".parse::<Address>().unwrap())); assert!(vc.contains(&last_hash, &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1".parse::<Address>().unwrap()));
@ -159,7 +159,7 @@ mod tests {
let tap = Arc::new(AccountProvider::transient_provider()); let tap = Arc::new(AccountProvider::transient_provider());
let v1 = tap.insert_account(keccak("1").into(), "").unwrap(); let v1 = tap.insert_account(keccak("1").into(), "").unwrap();
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_contract, Some(tap.clone())); let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_contract, Some(tap.clone()));
client.engine().register_client(Arc::downgrade(&client) as _); client.engine().register_client(Arc::downgrade(&client));
let validator_contract = "0000000000000000000000000000000000000005".parse::<Address>().unwrap(); let validator_contract = "0000000000000000000000000000000000000005".parse::<Address>().unwrap();
// Make sure reporting can be done. // Make sure reporting can be done.

View File

@ -28,7 +28,7 @@ use ids::BlockId;
use bigint::hash::H256; use bigint::hash::H256;
use util::{Bytes, Address}; use util::{Bytes, Address};
use ethjson::spec::ValidatorSet as ValidatorSpec; use ethjson::spec::ValidatorSet as ValidatorSpec;
use client::EngineClient; use client::Client;
use header::{Header, BlockNumber}; use header::{Header, BlockNumber};
#[cfg(test)] #[cfg(test)]
@ -142,5 +142,5 @@ pub trait ValidatorSet: Send + Sync {
/// Notifies about benign misbehaviour. /// Notifies about benign misbehaviour.
fn report_benign(&self, _validator: &Address, _set_block: BlockNumber, _block: BlockNumber) {} fn report_benign(&self, _validator: &Address, _set_block: BlockNumber, _block: BlockNumber) {}
/// Allows blockchain state access. /// Allows blockchain state access.
fn register_client(&self, _client: Weak<EngineClient>) {} fn register_contract(&self, _client: Weak<Client>) {}
} }

View File

@ -24,7 +24,7 @@ use parking_lot::RwLock;
use util::{Bytes, Address}; use util::{Bytes, Address};
use ids::BlockId; use ids::BlockId;
use header::{BlockNumber, Header}; use header::{BlockNumber, Header};
use client::EngineClient; use client::{Client, BlockChainClient};
use super::{SystemCall, ValidatorSet}; use super::{SystemCall, ValidatorSet};
type BlockNumberLookup = Box<Fn(BlockId) -> Result<BlockNumber, String> + Send + Sync + 'static>; type BlockNumberLookup = Box<Fn(BlockId) -> Result<BlockNumber, String> + Send + Sync + 'static>;
@ -131,9 +131,9 @@ impl ValidatorSet for Multi {
self.correct_set_by_number(set_block).1.report_benign(validator, set_block, block); self.correct_set_by_number(set_block).1.report_benign(validator, set_block, block);
} }
fn register_client(&self, client: Weak<EngineClient>) { fn register_contract(&self, client: Weak<Client>) {
for set in self.sets.values() { for set in self.sets.values() {
set.register_client(client.clone()); set.register_contract(client.clone());
} }
*self.block_number.write() = Box::new(move |id| client *self.block_number.write() = Box::new(move |id| client
.upgrade() .upgrade()
@ -148,7 +148,7 @@ mod tests {
use std::collections::BTreeMap; use std::collections::BTreeMap;
use hash::keccak; use hash::keccak;
use account_provider::AccountProvider; use account_provider::AccountProvider;
use client::BlockChainClient; use client::{BlockChainClient, EngineClient};
use engines::EpochChange; use engines::EpochChange;
use engines::validator_set::ValidatorSet; use engines::validator_set::ValidatorSet;
use ethkey::Secret; use ethkey::Secret;
@ -170,7 +170,7 @@ mod tests {
let v0 = tap.insert_account(s0.clone(), "").unwrap(); let v0 = tap.insert_account(s0.clone(), "").unwrap();
let v1 = tap.insert_account(keccak("1").into(), "").unwrap(); let v1 = tap.insert_account(keccak("1").into(), "").unwrap();
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_multi, Some(tap)); let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_multi, Some(tap));
client.engine().register_client(Arc::downgrade(&client) as _); client.engine().register_client(Arc::downgrade(&client));
// Make sure txs go through. // Make sure txs go through.
client.miner().set_gas_floor_target(1_000_000.into()); client.miner().set_gas_floor_target(1_000_000.into());
@ -178,27 +178,27 @@ mod tests {
// Wrong signer for the first block. // Wrong signer for the first block.
client.miner().set_engine_signer(v1, "".into()).unwrap(); client.miner().set_engine_signer(v1, "".into()).unwrap();
client.transact_contract(Default::default(), Default::default()).unwrap(); client.transact_contract(Default::default(), Default::default()).unwrap();
::client::EngineClient::update_sealing(&*client); client.update_sealing();
assert_eq!(client.chain_info().best_block_number, 0); assert_eq!(client.chain_info().best_block_number, 0);
// Right signer for the first block. // Right signer for the first block.
client.miner().set_engine_signer(v0, "".into()).unwrap(); client.miner().set_engine_signer(v0, "".into()).unwrap();
::client::EngineClient::update_sealing(&*client); client.update_sealing();
assert_eq!(client.chain_info().best_block_number, 1); assert_eq!(client.chain_info().best_block_number, 1);
// This time v0 is wrong. // This time v0 is wrong.
client.transact_contract(Default::default(), Default::default()).unwrap(); client.transact_contract(Default::default(), Default::default()).unwrap();
::client::EngineClient::update_sealing(&*client); client.update_sealing();
assert_eq!(client.chain_info().best_block_number, 1); assert_eq!(client.chain_info().best_block_number, 1);
client.miner().set_engine_signer(v1, "".into()).unwrap(); client.miner().set_engine_signer(v1, "".into()).unwrap();
::client::EngineClient::update_sealing(&*client); client.update_sealing();
assert_eq!(client.chain_info().best_block_number, 2); assert_eq!(client.chain_info().best_block_number, 2);
// v1 is still good. // v1 is still good.
client.transact_contract(Default::default(), Default::default()).unwrap(); client.transact_contract(Default::default(), Default::default()).unwrap();
::client::EngineClient::update_sealing(&*client); client.update_sealing();
assert_eq!(client.chain_info().best_block_number, 3); assert_eq!(client.chain_info().best_block_number, 3);
// Check syncing. // Check syncing.
let sync_client = generate_dummy_client_with_spec_and_data(Spec::new_validator_multi, 0, 0, &[]); let sync_client = generate_dummy_client_with_spec_and_data(Spec::new_validator_multi, 0, 0, &[]);
sync_client.engine().register_client(Arc::downgrade(&sync_client) as _); sync_client.engine().register_client(Arc::downgrade(&sync_client));
for i in 1..4 { for i in 1..4 {
sync_client.import_block(client.block(BlockId::Number(i)).unwrap().into_inner()).unwrap(); sync_client.import_block(client.block(BlockId::Number(i)).unwrap().into_inner()).unwrap();
} }

View File

@ -23,14 +23,13 @@ use hash::keccak;
use bigint::prelude::U256; use bigint::prelude::U256;
use bigint::hash::{H160, H256}; use bigint::hash::{H160, H256};
use parking_lot::{Mutex, RwLock}; use parking_lot::RwLock;
use util::*; use util::*;
use util::cache::MemoryLruCache; use util::cache::MemoryLruCache;
use rlp::{UntrustedRlp, RlpStream}; use rlp::{UntrustedRlp, RlpStream};
use basic_types::LogBloom; use basic_types::LogBloom;
use client::EngineClient; use client::{Client, BlockChainClient};
use engines::{Call, Engine}; use engines::{Call, Engine};
use header::Header; use header::Header;
use ids::BlockId; use ids::BlockId;
@ -49,35 +48,12 @@ lazy_static! {
static ref EVENT_NAME_HASH: H256 = keccak(EVENT_NAME); static ref EVENT_NAME_HASH: H256 = keccak(EVENT_NAME);
} }
// state-dependent proofs for the safe contract:
// only "first" proofs are such.
struct StateProof {
header: Mutex<Header>,
provider: Provider,
}
impl ::engines::StateDependentProof for StateProof {
fn generate_proof(&self, caller: &Call) -> Result<Vec<u8>, String> {
prove_initial(&self.provider, &*self.header.lock(), caller)
}
fn check_proof(&self, engine: &Engine, proof: &[u8]) -> Result<(), String> {
let (header, state_items) = decode_first_proof(&UntrustedRlp::new(proof))
.map_err(|e| format!("proof incorrectly encoded: {}", e))?;
if &header != &*self.header.lock(){
return Err("wrong header in proof".into());
}
check_first_proof(engine, &self.provider, header, &state_items).map(|_| ())
}
}
/// The validator contract should have the following interface: /// The validator contract should have the following interface:
pub struct ValidatorSafeContract { pub struct ValidatorSafeContract {
pub address: Address, pub address: Address,
validators: RwLock<MemoryLruCache<H256, SimpleList>>, validators: RwLock<MemoryLruCache<H256, SimpleList>>,
provider: Provider, provider: Provider,
client: RwLock<Option<Weak<EngineClient>>>, // TODO [keorn]: remove client: RwLock<Option<Weak<Client>>>, // TODO [keorn]: remove
} }
// first proof is just a state proof call of `getValidators` at header's state. // first proof is just a state proof call of `getValidators` at header's state.
@ -91,59 +67,6 @@ fn encode_first_proof(header: &Header, state_items: &[Vec<u8>]) -> Bytes {
stream.out() stream.out()
} }
// check a first proof: fetch the validator set at the given block.
fn check_first_proof(engine: &Engine, provider: &Provider, old_header: Header, state_items: &[DBValue])
-> Result<Vec<Address>, String>
{
use transaction::{Action, Transaction};
// TODO: match client contract_call_tx more cleanly without duplication.
const PROVIDED_GAS: u64 = 50_000_000;
let env_info = ::vm::EnvInfo {
number: old_header.number(),
author: *old_header.author(),
difficulty: *old_header.difficulty(),
gas_limit: PROVIDED_GAS.into(),
timestamp: old_header.timestamp(),
last_hashes: {
// this will break if we don't inclue all 256 last hashes.
let mut last_hashes: Vec<_> = (0..256).map(|_| H256::default()).collect();
last_hashes[255] = *old_header.parent_hash();
Arc::new(last_hashes)
},
gas_used: 0.into(),
};
// check state proof using given engine.
let number = old_header.number();
provider.get_validators(move |a, d| {
let from = Address::default();
let tx = Transaction {
nonce: engine.account_start_nonce(number),
action: Action::Call(a),
gas: PROVIDED_GAS.into(),
gas_price: U256::default(),
value: U256::default(),
data: d,
}.fake_sign(from);
let res = ::state::check_proof(
state_items,
*old_header.state_root(),
&tx,
engine,
&env_info,
);
match res {
::state::ProvedExecution::BadProof => Err("Bad proof".into()),
::state::ProvedExecution::Failed(e) => Err(format!("Failed call: {}", e)),
::state::ProvedExecution::Complete(e) => Ok(e.output),
}
}).wait()
}
fn decode_first_proof(rlp: &UntrustedRlp) -> Result<(Header, Vec<DBValue>), ::error::Error> { fn decode_first_proof(rlp: &UntrustedRlp) -> Result<(Header, Vec<DBValue>), ::error::Error> {
let header = rlp.val_at(0)?; let header = rlp.val_at(0)?;
let state_items = rlp.at(1)?.iter().map(|x| { let state_items = rlp.at(1)?.iter().map(|x| {
@ -181,7 +104,8 @@ fn prove_initial(provider: &Provider, header: &Header, caller: &Call) -> Result<
Ok(result) Ok(result)
}; };
provider.get_validators(caller).wait() provider.get_validators(caller)
.wait()
}; };
res.map(|validators| { res.map(|validators| {
@ -335,11 +259,9 @@ impl ValidatorSet for ValidatorSafeContract {
// transition to the first block of a contract requires finality but has no log event. // transition to the first block of a contract requires finality but has no log event.
if first { if first {
debug!(target: "engine", "signalling transition to fresh contract."); debug!(target: "engine", "signalling transition to fresh contract.");
let state_proof = Arc::new(StateProof { let (provider, header) = (self.provider.clone(), header.clone());
header: Mutex::new(header.clone()), let with_caller: Box<Fn(&Call) -> _> = Box::new(move |caller| prove_initial(&provider, &header, caller));
provider: self.provider.clone(), return ::engines::EpochChange::Yes(::engines::Proof::WithState(with_caller))
});
return ::engines::EpochChange::Yes(::engines::Proof::WithState(state_proof as Arc<_>));
} }
// otherwise, we're checking for logs. // otherwise, we're checking for logs.
@ -368,16 +290,61 @@ impl ValidatorSet for ValidatorSafeContract {
fn epoch_set(&self, first: bool, engine: &Engine, _number: ::header::BlockNumber, proof: &[u8]) fn epoch_set(&self, first: bool, engine: &Engine, _number: ::header::BlockNumber, proof: &[u8])
-> Result<(SimpleList, Option<H256>), ::error::Error> -> Result<(SimpleList, Option<H256>), ::error::Error>
{ {
use transaction::{Action, Transaction};
let rlp = UntrustedRlp::new(proof); let rlp = UntrustedRlp::new(proof);
if first { if first {
trace!(target: "engine", "Recovering initial epoch set"); trace!(target: "engine", "Recovering initial epoch set");
// TODO: match client contract_call_tx more cleanly without duplication.
const PROVIDED_GAS: u64 = 50_000_000;
let (old_header, state_items) = decode_first_proof(&rlp)?; let (old_header, state_items) = decode_first_proof(&rlp)?;
let number = old_header.number();
let old_hash = old_header.hash(); let old_hash = old_header.hash();
let addresses = check_first_proof(engine, &self.provider, old_header, &state_items)
.map_err(::engines::EngineError::InsufficientProof)?; let env_info = ::vm::EnvInfo {
number: old_header.number(),
author: *old_header.author(),
difficulty: *old_header.difficulty(),
gas_limit: PROVIDED_GAS.into(),
timestamp: old_header.timestamp(),
last_hashes: {
// this will break if we don't inclue all 256 last hashes.
let mut last_hashes: Vec<_> = (0..256).map(|_| H256::default()).collect();
last_hashes[255] = *old_header.parent_hash();
Arc::new(last_hashes)
},
gas_used: 0.into(),
};
// check state proof using given engine.
let number = old_header.number();
let addresses = self.provider.get_validators(move |a, d| {
let from = Address::default();
let tx = Transaction {
nonce: engine.account_start_nonce(number),
action: Action::Call(a),
gas: PROVIDED_GAS.into(),
gas_price: U256::default(),
value: U256::default(),
data: d,
}.fake_sign(from);
let res = ::state::check_proof(
&state_items,
*old_header.state_root(),
&tx,
engine,
&env_info,
);
match res {
::state::ProvedExecution::BadProof => Err("Bad proof".into()),
::state::ProvedExecution::Failed(e) => Err(format!("Failed call: {}", e)),
::state::ProvedExecution::Complete(e) => Ok(e.output),
}
}).wait().map_err(::engines::EngineError::InsufficientProof)?;
trace!(target: "engine", "extracted epoch set at #{}: {} addresses", trace!(target: "engine", "extracted epoch set at #{}: {} addresses",
number, addresses.len()); number, addresses.len());
@ -451,7 +418,7 @@ impl ValidatorSet for ValidatorSafeContract {
})) }))
} }
fn register_client(&self, client: Weak<EngineClient>) { fn register_contract(&self, client: Weak<Client>) {
trace!(target: "engine", "Setting up contract caller."); trace!(target: "engine", "Setting up contract caller.");
*self.client.write() = Some(client); *self.client.write() = Some(client);
} }
@ -467,7 +434,7 @@ mod tests {
use spec::Spec; use spec::Spec;
use account_provider::AccountProvider; use account_provider::AccountProvider;
use transaction::{Transaction, Action}; use transaction::{Transaction, Action};
use client::BlockChainClient; use client::{BlockChainClient, EngineClient};
use ethkey::Secret; use ethkey::Secret;
use miner::MinerService; use miner::MinerService;
use tests::helpers::{generate_dummy_client_with_spec_and_accounts, generate_dummy_client_with_spec_and_data}; use tests::helpers::{generate_dummy_client_with_spec_and_accounts, generate_dummy_client_with_spec_and_data};
@ -478,7 +445,7 @@ mod tests {
fn fetches_validators() { fn fetches_validators() {
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, None); let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, None);
let vc = Arc::new(ValidatorSafeContract::new("0000000000000000000000000000000000000005".parse::<Address>().unwrap())); let vc = Arc::new(ValidatorSafeContract::new("0000000000000000000000000000000000000005".parse::<Address>().unwrap()));
vc.register_client(Arc::downgrade(&client) as _); vc.register_contract(Arc::downgrade(&client));
let last_hash = client.best_block_header().hash(); let last_hash = client.best_block_header().hash();
assert!(vc.contains(&last_hash, &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e".parse::<Address>().unwrap())); assert!(vc.contains(&last_hash, &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e".parse::<Address>().unwrap()));
assert!(vc.contains(&last_hash, &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1".parse::<Address>().unwrap())); assert!(vc.contains(&last_hash, &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1".parse::<Address>().unwrap()));
@ -492,7 +459,7 @@ mod tests {
let v1 = tap.insert_account(keccak("0").into(), "").unwrap(); let v1 = tap.insert_account(keccak("0").into(), "").unwrap();
let chain_id = Spec::new_validator_safe_contract().chain_id(); let chain_id = Spec::new_validator_safe_contract().chain_id();
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, Some(tap)); let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, Some(tap));
client.engine().register_client(Arc::downgrade(&client) as _); client.engine().register_client(Arc::downgrade(&client));
let validator_contract = "0000000000000000000000000000000000000005".parse::<Address>().unwrap(); let validator_contract = "0000000000000000000000000000000000000005".parse::<Address>().unwrap();
client.miner().set_engine_signer(v1, "".into()).unwrap(); client.miner().set_engine_signer(v1, "".into()).unwrap();
@ -506,7 +473,7 @@ mod tests {
data: "bfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(), data: "bfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(),
}.sign(&s0, Some(chain_id)); }.sign(&s0, Some(chain_id));
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap(); client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
::client::EngineClient::update_sealing(&*client); client.update_sealing();
assert_eq!(client.chain_info().best_block_number, 1); assert_eq!(client.chain_info().best_block_number, 1);
// Add "1" validator back in. // Add "1" validator back in.
let tx = Transaction { let tx = Transaction {
@ -518,13 +485,13 @@ mod tests {
data: "4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(), data: "4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(),
}.sign(&s0, Some(chain_id)); }.sign(&s0, Some(chain_id));
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap(); client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
::client::EngineClient::update_sealing(&*client); client.update_sealing();
// The transaction is not yet included so still unable to seal. // The transaction is not yet included so still unable to seal.
assert_eq!(client.chain_info().best_block_number, 1); assert_eq!(client.chain_info().best_block_number, 1);
// Switch to the validator that is still there. // Switch to the validator that is still there.
client.miner().set_engine_signer(v0, "".into()).unwrap(); client.miner().set_engine_signer(v0, "".into()).unwrap();
::client::EngineClient::update_sealing(&*client); client.update_sealing();
assert_eq!(client.chain_info().best_block_number, 2); assert_eq!(client.chain_info().best_block_number, 2);
// Switch back to the added validator, since the state is updated. // Switch back to the added validator, since the state is updated.
client.miner().set_engine_signer(v1, "".into()).unwrap(); client.miner().set_engine_signer(v1, "".into()).unwrap();
@ -537,13 +504,13 @@ mod tests {
data: Vec::new(), data: Vec::new(),
}.sign(&s0, Some(chain_id)); }.sign(&s0, Some(chain_id));
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap(); client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
::client::EngineClient::update_sealing(&*client); client.update_sealing();
// Able to seal again. // Able to seal again.
assert_eq!(client.chain_info().best_block_number, 3); assert_eq!(client.chain_info().best_block_number, 3);
// Check syncing. // Check syncing.
let sync_client = generate_dummy_client_with_spec_and_data(Spec::new_validator_safe_contract, 0, 0, &[]); let sync_client = generate_dummy_client_with_spec_and_data(Spec::new_validator_safe_contract, 0, 0, &[]);
sync_client.engine().register_client(Arc::downgrade(&sync_client) as _); sync_client.engine().register_client(Arc::downgrade(&sync_client));
for i in 1..4 { for i in 1..4 {
sync_client.import_block(client.block(BlockId::Number(i)).unwrap().into_inner()).unwrap(); sync_client.import_block(client.block(BlockId::Number(i)).unwrap().into_inner()).unwrap();
} }

View File

@ -261,13 +261,8 @@ impl Header {
s.out() s.out()
} }
/// Get the SHA3 (Keccak) of this header, optionally `with_seal`. /// Get the KECCAK (Keccak) of this header, optionally `with_seal`.
pub fn rlp_keccak(&self, with_seal: Seal) -> H256 { keccak(self.rlp(with_seal)) } pub fn rlp_keccak(&self, with_seal: Seal) -> H256 { keccak(self.rlp(with_seal)) }
/// Encode the header, getting a type-safe wrapper around the RLP.
pub fn encoded(&self) -> ::encoded::Header {
::encoded::Header::new(self.rlp(Seal::With))
}
} }
impl Decodable for Header { impl Decodable for Header {

View File

@ -116,7 +116,7 @@ impl ClientService {
}); });
io_service.register_handler(client_io)?; io_service.register_handler(client_io)?;
spec.engine.register_client(Arc::downgrade(&client) as _); spec.engine.register_client(Arc::downgrade(&client));
let stop_guard = ::devtools::StopGuard::new(); let stop_guard = ::devtools::StopGuard::new();
run_ipc(ipc_path, client.clone(), snapshot.clone(), stop_guard.share()); run_ipc(ipc_path, client.clone(), snapshot.clone(), stop_guard.share());

View File

@ -93,7 +93,7 @@ fn make_chain(accounts: Arc<AccountProvider>, blocks_beyond: usize, transitions:
let mut cur_signers = vec![*RICH_ADDR]; let mut cur_signers = vec![*RICH_ADDR];
{ {
let engine = client.engine(); let engine = client.engine();
engine.register_client(Arc::downgrade(&client) as _); engine.register_client(Arc::downgrade(&client));
} }
{ {

View File

@ -36,6 +36,7 @@ use factory::Factories;
use header::{BlockNumber, Header}; use header::{BlockNumber, Header};
use pod_state::*; use pod_state::*;
use rlp::{Rlp, RlpStream}; use rlp::{Rlp, RlpStream};
use state_db::StateDB;
use state::{Backend, State, Substate}; use state::{Backend, State, Substate};
use state::backend::Basic as BasicBackend; use state::backend::Basic as BasicBackend;
use trace::{NoopTracer, NoopVMTracer}; use trace::{NoopTracer, NoopVMTracer};
@ -464,7 +465,7 @@ impl Spec {
} }
/// Ensure that the given state DB has the trie nodes in for the genesis state. /// Ensure that the given state DB has the trie nodes in for the genesis state.
pub fn ensure_db_good<T: Backend>(&self, db: T, factories: &Factories) -> Result<T, Error> { pub fn ensure_db_good(&self, db: StateDB, factories: &Factories) -> Result<StateDB, Error> {
if db.as_hashdb().contains(&self.state_root()) { if db.as_hashdb().contains(&self.state_root()) {
return Ok(db) return Ok(db)
} }
@ -486,63 +487,6 @@ impl Spec {
.and_then(|x| load_from(cache_dir, x).map_err(fmt)) .and_then(|x| load_from(cache_dir, x).map_err(fmt))
} }
/// initialize genesis epoch data, using in-memory database for
/// constructor.
pub fn genesis_epoch_data(&self) -> Result<Vec<u8>, String> {
use transaction::{Action, Transaction};
use util::{journaldb, kvdb};
let genesis = self.genesis_header();
let factories = Default::default();
let mut db = journaldb::new(
Arc::new(kvdb::in_memory(0)),
journaldb::Algorithm::Archive,
None,
);
self.ensure_db_good(BasicBackend(db.as_hashdb_mut()), &factories)
.map_err(|e| format!("Unable to initialize genesis state: {}", e))?;
let call = |a, d| {
let mut db = db.boxed_clone();
let env_info = ::evm::EnvInfo {
number: 0,
author: *genesis.author(),
timestamp: genesis.timestamp(),
difficulty: *genesis.difficulty(),
gas_limit: *genesis.gas_limit(),
last_hashes: Arc::new(Vec::new()),
gas_used: 0.into()
};
let from = Address::default();
let tx = Transaction {
nonce: self.engine.account_start_nonce(0),
action: Action::Call(a),
gas: U256::from(50_000_000), // TODO: share with client.
gas_price: U256::default(),
value: U256::default(),
data: d,
}.fake_sign(from);
let res = ::state::prove_transaction(
db.as_hashdb_mut(),
*genesis.state_root(),
&tx,
&*self.engine,
&env_info,
factories.clone(),
true,
);
res.map(|(out, proof)| (out, proof.into_iter().map(|x| x.into_vec()).collect()))
.ok_or_else(|| "Failed to prove call: insufficient state".into())
};
self.engine.genesis_epoch_data(&genesis, &call)
}
/// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus. /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus.
pub fn new_test() -> Spec { load_bundled!("null_morden") } pub fn new_test() -> Spec { load_bundled!("null_morden") }

View File

@ -21,7 +21,8 @@ use std::collections::HashMap;
use std::collections::hash_map::Entry; use std::collections::hash_map::Entry;
use native_contracts::TransactAcl as Contract; use native_contracts::TransactAcl as Contract;
use client::{BlockChainClient, BlockId, ChainNotify}; use client::{BlockChainClient, BlockId, ChainNotify};
use util::{Address, H256, Bytes}; use util::{Address, Bytes};
use bigint::hash::H256;
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use futures::{self, Future}; use futures::{self, Future};
use spec::CommonParams; use spec::CommonParams;

View File

@ -208,9 +208,7 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> {
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
config.queue.verifier_settings = cmd.verifier_settings; config.queue.verifier_settings = cmd.verifier_settings;
// TODO: could epoch signals be avilable at the end of the file? let service = LightClientService::start(config, &spec, &client_path, cache)
let fetch = ::light::client::fetch::unavailable();
let service = LightClientService::start(config, &spec, fetch, &client_path, cache)
.map_err(|e| format!("Failed to start client: {}", e))?; .map_err(|e| format!("Failed to start client: {}", e))?;
// free up the spec in memory. // free up the spec in memory.

View File

@ -25,7 +25,7 @@ use futures::{future, IntoFuture, Future, BoxFuture};
use hash_fetch::fetch::Client as FetchClient; use hash_fetch::fetch::Client as FetchClient;
use hash_fetch::urlhint::ContractClient; use hash_fetch::urlhint::ContractClient;
use helpers::replace_home; use helpers::replace_home;
use light::client::LightChainClient; use light::client::Client as LightClient;
use light::on_demand::{self, OnDemand}; use light::on_demand::{self, OnDemand};
use node_health::{SyncStatus, NodeHealth}; use node_health::{SyncStatus, NodeHealth};
use rpc; use rpc;
@ -87,16 +87,16 @@ impl ContractClient for FullRegistrar {
} }
/// Registrar implementation for the light client. /// Registrar implementation for the light client.
pub struct LightRegistrar<T> { pub struct LightRegistrar {
/// The light client. /// The light client.
pub client: Arc<T>, pub client: Arc<LightClient>,
/// Handle to the on-demand service. /// Handle to the on-demand service.
pub on_demand: Arc<OnDemand>, pub on_demand: Arc<OnDemand>,
/// Handle to the light network service. /// Handle to the light network service.
pub sync: Arc<LightSync>, pub sync: Arc<LightSync>,
} }
impl<T: LightChainClient + 'static> ContractClient for LightRegistrar<T> { impl ContractClient for LightRegistrar {
fn registrar(&self) -> Result<Address, String> { fn registrar(&self) -> Result<Address, String> {
self.client.engine().additional_params().get("registrar") self.client.engine().additional_params().get("registrar")
.ok_or_else(|| "Registrar not defined.".into()) .ok_or_else(|| "Registrar not defined.".into())
@ -106,14 +106,7 @@ impl<T: LightChainClient + 'static> ContractClient for LightRegistrar<T> {
} }
fn call(&self, address: Address, data: Bytes) -> BoxFuture<Bytes, String> { fn call(&self, address: Address, data: Bytes) -> BoxFuture<Bytes, String> {
let header = self.client.best_block_header(); let (header, env_info) = (self.client.best_block_header(), self.client.latest_env_info());
let env_info = self.client.env_info(BlockId::Hash(header.hash()))
.ok_or_else(|| format!("Cannot fetch env info for header {}", header.hash()));
let env_info = match env_info {
Ok(x) => x,
Err(e) => return future::err(e).boxed(),
};
let maybe_future = self.sync.with_context(move |ctx| { let maybe_future = self.sync.with_context(move |ctx| {
self.on_demand self.on_demand

View File

@ -22,7 +22,7 @@ use std::sync::{Arc};
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
use std::time::{Instant, Duration}; use std::time::{Instant, Duration};
use ethcore::client::{BlockId, BlockChainClient, BlockChainInfo, BlockQueueInfo, ChainNotify, ClientReport, Client}; use ethcore::client::*;
use ethcore::header::BlockNumber; use ethcore::header::BlockNumber;
use ethcore::service::ClientIoMessage; use ethcore::service::ClientIoMessage;
use ethcore::snapshot::{RestorationStatus, SnapshotService as SS}; use ethcore::snapshot::{RestorationStatus, SnapshotService as SS};

View File

@ -1,90 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::{Arc, Weak};
use ethcore::encoded;
use ethcore::engines::{Engine, StateDependentProof};
use ethcore::header::Header;
use ethcore::receipt::Receipt;
use ethsync::LightSync;
use futures::{future, Future, BoxFuture};
use light::client::fetch::ChainDataFetcher;
use light::on_demand::{request, OnDemand};
use parking_lot::RwLock;
use bigint::hash::H256;
const ALL_VALID_BACKREFS: &str = "no back-references, therefore all back-references valid; qed";
/// Allows on-demand fetch of data useful for the light client.
pub struct EpochFetch {
/// A handle to the sync service.
pub sync: Arc<RwLock<Weak<LightSync>>>,
/// The on-demand request service.
pub on_demand: Arc<OnDemand>,
}
impl EpochFetch {
fn request<T>(&self, req: T) -> BoxFuture<T::Out, &'static str>
where T: Send + request::RequestAdapter + 'static, T::Out: Send + 'static
{
match self.sync.read().upgrade() {
Some(sync) => {
let on_demand = &self.on_demand;
let maybe_future = sync.with_context(move |ctx| {
on_demand.request(ctx, req).expect(ALL_VALID_BACKREFS)
});
match maybe_future {
Some(x) => x.map_err(|_| "Request canceled").boxed(),
None => future::err("Unable to access network.").boxed(),
}
}
None => future::err("Unable to access network").boxed(),
}
}
}
impl ChainDataFetcher for EpochFetch {
type Error = &'static str;
type Body = BoxFuture<encoded::Block, &'static str>;
type Receipts = BoxFuture<Vec<Receipt>, &'static str>;
type Transition = BoxFuture<Vec<u8>, &'static str>;
fn block_body(&self, header: &Header) -> Self::Body {
self.request(request::Body(header.encoded().into()))
}
/// Fetch block receipts.
fn block_receipts(&self, header: &Header) -> Self::Receipts {
self.request(request::BlockReceipts(header.encoded().into()))
}
/// Fetch epoch transition proof at given header.
fn epoch_transition(&self, hash: H256, engine: Arc<Engine>, checker: Arc<StateDependentProof>)
-> Self::Transition
{
self.request(request::Signal {
hash: hash,
engine: engine,
proof_check: checker,
})
}
}

View File

@ -16,8 +16,6 @@
//! Utilities and helpers for the light client. //! Utilities and helpers for the light client.
mod epoch_fetch;
mod queue_cull; mod queue_cull;
pub use self::epoch_fetch::EpochFetch;
pub use self::queue_cull::QueueCull; pub use self::queue_cull::QueueCull;

View File

@ -23,7 +23,7 @@ use ethcore::service::ClientIoMessage;
use ethsync::LightSync; use ethsync::LightSync;
use io::{IoContext, IoHandler, TimerToken}; use io::{IoContext, IoHandler, TimerToken};
use light::client::LightChainClient; use light::client::Client;
use light::on_demand::{request, OnDemand}; use light::on_demand::{request, OnDemand};
use light::TransactionQueue; use light::TransactionQueue;
@ -41,9 +41,9 @@ const TIMEOUT_MS: u64 = 1000 * 60 * 10;
const PURGE_TIMEOUT_MS: u64 = 1000 * 60 * 9; const PURGE_TIMEOUT_MS: u64 = 1000 * 60 * 9;
/// Periodically culls the transaction queue of mined transactions. /// Periodically culls the transaction queue of mined transactions.
pub struct QueueCull<T> { pub struct QueueCull {
/// A handle to the client, for getting the latest block header. /// A handle to the client, for getting the latest block header.
pub client: Arc<T>, pub client: Arc<Client>,
/// A handle to the sync service. /// A handle to the sync service.
pub sync: Arc<LightSync>, pub sync: Arc<LightSync>,
/// The on-demand request service. /// The on-demand request service.
@ -54,7 +54,7 @@ pub struct QueueCull<T> {
pub remote: Remote, pub remote: Remote,
} }
impl<T: LightChainClient + 'static> IoHandler<ClientIoMessage> for QueueCull<T> { impl IoHandler<ClientIoMessage> for QueueCull {
fn initialize(&self, io: &IoContext<ClientIoMessage>) { fn initialize(&self, io: &IoContext<ClientIoMessage>) {
io.register_timer(TOKEN, TIMEOUT_MS).expect("Error registering timer"); io.register_timer(TOKEN, TIMEOUT_MS).expect("Error registering timer");
} }

View File

@ -32,7 +32,6 @@ use ethsync::{ManageNetwork, SyncProvider, LightSync};
use hash_fetch::fetch::Client as FetchClient; use hash_fetch::fetch::Client as FetchClient;
use jsonrpc_core::{self as core, MetaIoHandler}; use jsonrpc_core::{self as core, MetaIoHandler};
use light::{TransactionQueue as LightTransactionQueue, Cache as LightDataCache}; use light::{TransactionQueue as LightTransactionQueue, Cache as LightDataCache};
use light::client::LightChainClient;
use node_health::NodeHealth; use node_health::NodeHealth;
use parity_reactor; use parity_reactor;
use parity_rpc::dispatch::{FullDispatcher, LightDispatcher}; use parity_rpc::dispatch::{FullDispatcher, LightDispatcher};
@ -399,9 +398,9 @@ impl ActivityNotifier for LightClientNotifier {
} }
/// RPC dependencies for a light client. /// RPC dependencies for a light client.
pub struct LightDependencies<T> { pub struct LightDependencies {
pub signer_service: Arc<SignerService>, pub signer_service: Arc<SignerService>,
pub client: Arc<T>, pub client: Arc<::light::client::Client>,
pub sync: Arc<LightSync>, pub sync: Arc<LightSync>,
pub net: Arc<ManageNetwork>, pub net: Arc<ManageNetwork>,
pub secret_store: Arc<AccountProvider>, pub secret_store: Arc<AccountProvider>,
@ -420,7 +419,7 @@ pub struct LightDependencies<T> {
pub whisper_rpc: Option<::whisper::RpcFactory>, pub whisper_rpc: Option<::whisper::RpcFactory>,
} }
impl<C: LightChainClient + 'static> LightDependencies<C> { impl LightDependencies {
fn extend_api<T: core::Middleware<Metadata>>( fn extend_api<T: core::Middleware<Metadata>>(
&self, &self,
handler: &mut MetaIoHandler<Metadata, T>, handler: &mut MetaIoHandler<Metadata, T>,
@ -569,7 +568,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
} }
} }
impl<T: LightChainClient + 'static> Dependencies for LightDependencies<T> { impl Dependencies for LightDependencies {
type Notifier = LightClientNotifier; type Notifier = LightClientNotifier;
fn activity_notifier(&self) -> Self::Notifier { LightClientNotifier } fn activity_notifier(&self) -> Self::Notifier { LightClientNotifier }

View File

@ -223,16 +223,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
config.queue.verifier_settings = cmd.verifier_settings; config.queue.verifier_settings = cmd.verifier_settings;
// start on_demand service. let service = light_client::Service::start(config, &spec, &db_dirs.client_path(algorithm), cache.clone())
let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone()));
let sync_handle = Arc::new(RwLock::new(Weak::new()));
let fetch = ::light_helpers::EpochFetch {
on_demand: on_demand.clone(),
sync: sync_handle.clone(),
};
let service = light_client::Service::start(config, &spec, fetch, &db_dirs.client_path(algorithm), cache.clone())
.map_err(|e| format!("Error starting light client: {}", e))?; .map_err(|e| format!("Error starting light client: {}", e))?;
let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default())); let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default()));
let provider = ::light::provider::LightProvider::new(service.client().clone(), txq.clone()); let provider = ::light::provider::LightProvider::new(service.client().clone(), txq.clone());
@ -244,6 +235,9 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
net_conf.boot_nodes = spec.nodes.clone(); net_conf.boot_nodes = spec.nodes.clone();
} }
// start on_demand service.
let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone()));
let mut attached_protos = Vec::new(); let mut attached_protos = Vec::new();
let whisper_factory = if cmd.whisper.enabled { let whisper_factory = if cmd.whisper.enabled {
let (whisper_net, whisper_factory) = ::whisper::setup(cmd.whisper.target_message_pool_size) let (whisper_net, whisper_factory) = ::whisper::setup(cmd.whisper.target_message_pool_size)
@ -267,7 +261,6 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
}; };
let light_sync = LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?; let light_sync = LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?;
let light_sync = Arc::new(light_sync); let light_sync = Arc::new(light_sync);
*sync_handle.write() = Arc::downgrade(&light_sync);
// spin up event loop // spin up event loop
let event_loop = EventLoop::spawn(); let event_loop = EventLoop::spawn();

View File

@ -25,7 +25,7 @@ use jsonrpc_core::Error;
use jsonrpc_macros::Trailing; use jsonrpc_macros::Trailing;
use light::cache::Cache as LightDataCache; use light::cache::Cache as LightDataCache;
use light::client::LightChainClient; use light::client::{Client as LightClient, LightChainClient};
use light::{cht, TransactionQueue}; use light::{cht, TransactionQueue};
use light::on_demand::{request, OnDemand}; use light::on_demand::{request, OnDemand};
@ -63,9 +63,9 @@ use util::Address;
const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed"; const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed";
/// Light client `ETH` (and filter) RPC. /// Light client `ETH` (and filter) RPC.
pub struct EthClient<T> { pub struct EthClient {
sync: Arc<LightSync>, sync: Arc<LightSync>,
client: Arc<T>, client: Arc<LightClient>,
on_demand: Arc<OnDemand>, on_demand: Arc<OnDemand>,
transaction_queue: Arc<RwLock<TransactionQueue>>, transaction_queue: Arc<RwLock<TransactionQueue>>,
accounts: Arc<AccountProvider>, accounts: Arc<AccountProvider>,
@ -73,7 +73,7 @@ pub struct EthClient<T> {
polls: Mutex<PollManager<PollFilter>>, polls: Mutex<PollManager<PollFilter>>,
} }
impl<T> Clone for EthClient<T> { impl Clone for EthClient {
fn clone(&self) -> Self { fn clone(&self) -> Self {
// each instance should have its own poll manager. // each instance should have its own poll manager.
EthClient { EthClient {
@ -89,12 +89,12 @@ impl<T> Clone for EthClient<T> {
} }
impl<T: LightChainClient + 'static> EthClient<T> { impl EthClient {
/// Create a new `EthClient` with a handle to the light sync instance, client, /// Create a new `EthClient` with a handle to the light sync instance, client,
/// and on-demand request service, which is assumed to be attached as a handler. /// and on-demand request service, which is assumed to be attached as a handler.
pub fn new( pub fn new(
sync: Arc<LightSync>, sync: Arc<LightSync>,
client: Arc<T>, client: Arc<LightClient>,
on_demand: Arc<OnDemand>, on_demand: Arc<OnDemand>,
transaction_queue: Arc<RwLock<TransactionQueue>>, transaction_queue: Arc<RwLock<TransactionQueue>>,
accounts: Arc<AccountProvider>, accounts: Arc<AccountProvider>,
@ -209,7 +209,7 @@ impl<T: LightChainClient + 'static> EthClient<T> {
} }
} }
impl<T: LightChainClient + 'static> Eth for EthClient<T> { impl Eth for EthClient {
type Metadata = Metadata; type Metadata = Metadata;
fn protocol_version(&self) -> Result<String, Error> { fn protocol_version(&self) -> Result<String, Error> {
@ -466,7 +466,7 @@ impl<T: LightChainClient + 'static> Eth for EthClient<T> {
} }
// This trait implementation triggers a blanked impl of `EthFilter`. // This trait implementation triggers a blanked impl of `EthFilter`.
impl<T: LightChainClient + 'static> Filterable for EthClient<T> { impl Filterable for EthClient {
fn best_block_number(&self) -> u64 { self.client.chain_info().best_block_number } fn best_block_number(&self) -> u64 { self.client.chain_info().best_block_number }
fn block_hash(&self, id: BlockId) -> Option<RpcH256> { fn block_hash(&self, id: BlockId) -> Option<RpcH256> {

View File

@ -2244,7 +2244,7 @@ mod tests {
use super::{PeerInfo, PeerAsking}; use super::{PeerInfo, PeerAsking};
use ethkey; use ethkey;
use ethcore::header::*; use ethcore::header::*;
use ethcore::client::{BlockChainClient, EachBlockWith, TestBlockChainClient}; use ethcore::client::*;
use ethcore::transaction::UnverifiedTransaction; use ethcore::transaction::UnverifiedTransaction;
use ethcore::miner::MinerService; use ethcore::miner::MinerService;

View File

@ -25,7 +25,7 @@ use tests::helpers::{TestNet, Peer as PeerLike, TestPacket};
use ethcore::client::TestBlockChainClient; use ethcore::client::TestBlockChainClient;
use ethcore::spec::Spec; use ethcore::spec::Spec;
use io::IoChannel; use io::IoChannel;
use light::client::fetch::{self, Unavailable}; use light::client::Client as LightClient;
use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams}; use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams};
use light::provider::LightProvider; use light::provider::LightProvider;
use network::{NodeId, PeerId}; use network::{NodeId, PeerId};
@ -36,8 +36,6 @@ use light::cache::Cache;
const NETWORK_ID: u64 = 0xcafebabe; const NETWORK_ID: u64 = 0xcafebabe;
pub type LightClient = ::light::client::Client<Unavailable>;
struct TestIoContext<'a> { struct TestIoContext<'a> {
queue: &'a RwLock<VecDeque<TestPacket>>, queue: &'a RwLock<VecDeque<TestPacket>>,
sender: Option<PeerId>, sender: Option<PeerId>,
@ -218,14 +216,7 @@ impl TestNet<Peer> {
// skip full verification because the blocks are bad. // skip full verification because the blocks are bad.
config.verify_full = false; config.verify_full = false;
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
let client = LightClient::in_memory( let client = LightClient::in_memory(config, &Spec::new_test(), IoChannel::disconnected(), cache);
config,
&Spec::new_test(),
fetch::unavailable(), // TODO: allow fetch from full nodes.
IoChannel::disconnected(),
cache
);
peers.push(Arc::new(Peer::new_light(Arc::new(client)))) peers.push(Arc::new(Peer::new_light(Arc::new(client))))
} }

View File

@ -71,8 +71,8 @@ fn authority_round() {
// Push transaction to both clients. Only one of them gets lucky to produce a block. // Push transaction to both clients. Only one of them gets lucky to produce a block.
net.peer(0).chain.miner().set_engine_signer(s0.address(), "".to_owned()).unwrap(); net.peer(0).chain.miner().set_engine_signer(s0.address(), "".to_owned()).unwrap();
net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap(); net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap();
net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain) as _); net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain));
net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain) as _); net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain));
net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1))); net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1)));
net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0))); net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0)));
// exchange statuses // exchange statuses
@ -160,8 +160,8 @@ fn tendermint() {
trace!(target: "poa", "Peer 0 is {}.", s0.address()); trace!(target: "poa", "Peer 0 is {}.", s0.address());
net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap(); net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap();
trace!(target: "poa", "Peer 1 is {}.", s1.address()); trace!(target: "poa", "Peer 1 is {}.", s1.address());
net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain) as _); net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain));
net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain) as _); net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain));
net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0))); net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0)));
net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1))); net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1)));
// Exhange statuses // Exhange statuses