From ad39446e87645f8cc40d63a804d44d50f7e22976 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 5 Sep 2017 17:54:05 +0200 Subject: [PATCH 01/10] Revert "fixed master (#6465)" This reverts commit 899538ae2549990c410c28fdbaf870b3f06445cf. --- ethcore/light/src/client/fetch.rs | 74 +++++ ethcore/light/src/client/header_chain.rs | 308 ++++++++++++++++-- ethcore/light/src/client/mod.rs | 186 +++++++++-- ethcore/light/src/client/service.rs | 23 +- ethcore/light/src/net/load_timer.rs | 1 + ethcore/light/src/net/mod.rs | 13 +- ethcore/light/src/net/request_credits.rs | 13 +- ethcore/light/src/net/request_set.rs | 1 + ethcore/light/src/net/tests/mod.rs | 50 +++ ethcore/light/src/on_demand/mod.rs | 2 + ethcore/light/src/on_demand/request.rs | 42 ++- ethcore/light/src/provider.rs | 13 + ethcore/light/src/types/request/mod.rs | 150 ++++++++- ethcore/src/client/client.rs | 51 ++- ethcore/src/client/test_client.rs | 24 +- ethcore/src/client/traits.rs | 16 +- ethcore/src/engines/authority_round/mod.rs | 23 +- ethcore/src/engines/basic_authority.rs | 6 +- ethcore/src/engines/mod.rs | 20 +- ethcore/src/engines/tendermint/mod.rs | 55 ++-- ethcore/src/engines/validator_set/contract.rs | 12 +- ethcore/src/engines/validator_set/mod.rs | 4 +- ethcore/src/engines/validator_set/multi.rs | 22 +- .../engines/validator_set/safe_contract.rs | 163 +++++---- ethcore/src/header.rs | 7 +- ethcore/src/service.rs | 2 +- .../src/snapshot/tests/proof_of_authority.rs | 2 +- ethcore/src/spec/spec.rs | 60 +++- ethcore/src/tx_filter.rs | 3 +- parity/blockchain.rs | 4 +- parity/dapps.rs | 17 +- parity/informant.rs | 2 +- parity/light_helpers/epoch_fetch.rs | 90 +++++ parity/light_helpers/mod.rs | 2 + parity/light_helpers/queue_cull.rs | 8 +- parity/rpc_apis.rs | 9 +- parity/run.rs | 15 +- rpc/src/v1/impls/light/eth.rs | 16 +- sync/src/chain.rs | 2 +- sync/src/light_sync/tests/test_net.rs | 13 +- sync/src/tests/consensus.rs | 8 +- 41 files changed, 1268 insertions(+), 264 deletions(-) create mode 100644 ethcore/light/src/client/fetch.rs create mode 100644 parity/light_helpers/epoch_fetch.rs diff --git a/ethcore/light/src/client/fetch.rs b/ethcore/light/src/client/fetch.rs new file mode 100644 index 000000000..93a2cde11 --- /dev/null +++ b/ethcore/light/src/client/fetch.rs @@ -0,0 +1,74 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Trait for fetching chain data. + +use std::sync::Arc; + +use ethcore::encoded; +use ethcore::engines::{Engine, StateDependentProof}; +use ethcore::header::Header; +use ethcore::receipt::Receipt; +use futures::future::IntoFuture; +use bigint::hash::H256; + +/// Provides full chain data. +pub trait ChainDataFetcher: Send + Sync + 'static { + /// Error type when data unavailable. + type Error: ::std::fmt::Debug; + + /// Future for fetching block body. + type Body: IntoFuture; + /// Future for fetching block receipts. + type Receipts: IntoFuture, Error=Self::Error>; + /// Future for fetching epoch transition + type Transition: IntoFuture, Error=Self::Error>; + + /// Fetch a block body. + fn block_body(&self, header: &Header) -> Self::Body; + + /// Fetch block receipts. + fn block_receipts(&self, header: &Header) -> Self::Receipts; + + /// Fetch epoch transition proof at given header. + fn epoch_transition(&self, hash: H256, engine: Arc, checker: Arc) -> Self::Transition; +} + +/// Fetcher implementation which cannot fetch anything. +pub struct Unavailable; + +/// Create a fetcher which has all data unavailable. +pub fn unavailable() -> Unavailable { Unavailable } + +impl ChainDataFetcher for Unavailable { + type Error = &'static str; + + type Body = Result; + type Receipts = Result, &'static str>; + type Transition = Result, &'static str>; + + fn block_body(&self, _header: &Header) -> Self::Body { + Err("fetching block bodies unavailable") + } + + fn block_receipts(&self, _header: &Header) -> Self::Receipts { + Err("fetching block receipts unavailable") + } + + fn epoch_transition(&self, _h: H256, _e: Arc, _check: Arc) -> Self::Transition { + Err("fetching epoch transition proofs unavailable") + } +} diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 3828e6954..7320eddb4 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -18,11 +18,12 @@ //! //! Unlike a full node's `BlockChain` this doesn't store much in the database. //! It stores candidates for the last 2048-4096 blocks as well as CHT roots for -//! historical blocks all the way to the genesis. +//! historical blocks all the way to the genesis. If the engine makes use +//! of epoch transitions, those are stored as well. //! //! This is separate from the `BlockChain` for two reasons: //! - It stores only headers (and a pruned subset of them) -//! - To allow for flexibility in the database layout once that's incorporated. +//! - To allow for flexibility in the database layout.. use std::collections::BTreeMap; use std::sync::Arc; @@ -30,15 +31,20 @@ use std::sync::Arc; use cht; use ethcore::block_status::BlockStatus; -use ethcore::error::BlockError; +use ethcore::error::{BlockImportError, BlockError}; use ethcore::encoded; use ethcore::header::Header; use ethcore::ids::BlockId; +use ethcore::spec::Spec; +use ethcore::engines::epoch::{ + Transition as EpochTransition, + PendingTransition as PendingEpochTransition +}; use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp, UntrustedRlp}; use heapsize::HeapSizeOf; use bigint::prelude::U256; -use bigint::hash::H256; +use bigint::hash::{H256, H256FastMap, H264}; use util::kvdb::{DBTransaction, KeyValueDB}; use cache::Cache; @@ -54,6 +60,9 @@ const HISTORY: u64 = 2048; /// The best block key. Maps to an RLP list: [best_era, last_era] const CURRENT_KEY: &'static [u8] = &*b"best_and_latest"; +/// Key storing the last canonical epoch transition. +const LAST_CANONICAL_TRANSITION: &'static [u8] = &*b"canonical_transition"; + /// Information about a block. #[derive(Debug, Clone)] pub struct BlockDescriptor { @@ -101,7 +110,6 @@ impl Encodable for Entry { impl Decodable for Entry { fn decode(rlp: &UntrustedRlp) -> Result { - let mut candidates = SmallVec::<[Candidate; 3]>::new(); for item in rlp.iter() { @@ -131,6 +139,42 @@ fn era_key(number: u64) -> String { format!("candidates_{}", number) } +fn pending_transition_key(block_hash: H256) -> H264 { + const LEADING: u8 = 1; + + let mut key = H264::default(); + + key[0] = LEADING; + key.0[1..].copy_from_slice(&block_hash.0[..]); + + key +} + +fn transition_key(block_hash: H256) -> H264 { + const LEADING: u8 = 2; + + let mut key = H264::default(); + + key[0] = LEADING; + key.0[1..].copy_from_slice(&block_hash.0[..]); + + key +} + +// encode last canonical transition entry: header and proof. +fn encode_canonical_transition(header: &Header, proof: &[u8]) -> Vec { + let mut stream = RlpStream::new_list(2); + stream.append(header).append(&proof); + stream.out() +} + +// decode last canonical transition entry. +fn decode_canonical_transition(t: &[u8]) -> Result<(Header, &[u8]), DecoderError> { + let rlp = UntrustedRlp::new(t); + + Ok((rlp.val_at(0)?, rlp.at(1)?.data()?)) +} + /// Pending changes from `insert` to be applied after the database write has finished. pub struct PendingChanges { best_block: Option, // new best block. @@ -141,6 +185,7 @@ pub struct HeaderChain { genesis_header: encoded::Header, // special-case the genesis. candidates: RwLock>, best_block: RwLock, + live_epoch_proofs: RwLock>, db: Arc, col: Option, cache: Arc>, @@ -148,8 +193,16 @@ pub struct HeaderChain { impl HeaderChain { /// Create a new header chain given this genesis block and database to read from. - pub fn new(db: Arc, col: Option, genesis: &[u8], cache: Arc>) -> Result { - use ethcore::views::HeaderView; + pub fn new( + db: Arc, + col: Option, + spec: &Spec, + cache: Arc>, + ) -> Result { + let mut live_epoch_proofs = ::std::collections::HashMap::default(); + + let genesis = ::rlp::encode(&spec.genesis_header()).into_vec(); + let decoded_header = spec.genesis_header(); let chain = if let Some(current) = db.get(col, CURRENT_KEY)? { let (best_number, highest_number) = { @@ -160,12 +213,24 @@ impl HeaderChain { let mut cur_number = highest_number; let mut candidates = BTreeMap::new(); - // load all era entries and referenced headers within them. + // load all era entries, referenced headers within them, + // and live epoch proofs. while let Some(entry) = db.get(col, era_key(cur_number).as_bytes())? { let entry: Entry = ::rlp::decode(&entry); trace!(target: "chain", "loaded header chain entry for era {} with {} candidates", cur_number, entry.candidates.len()); + for c in &entry.candidates { + let key = transition_key(c.hash); + + if let Some(proof) = db.get(col, &*key)? { + live_epoch_proofs.insert(c.hash, EpochTransition { + block_hash: c.hash, + block_number: cur_number, + proof: proof.into_vec(), + }); + } + } candidates.insert(cur_number, entry); cur_number -= 1; @@ -187,29 +252,42 @@ impl HeaderChain { }; HeaderChain { - genesis_header: encoded::Header::new(genesis.to_owned()), + genesis_header: encoded::Header::new(genesis), best_block: RwLock::new(best_block), candidates: RwLock::new(candidates), + live_epoch_proofs: RwLock::new(live_epoch_proofs), db: db, col: col, cache: cache, } } else { - let g_view = HeaderView::new(genesis); HeaderChain { - genesis_header: encoded::Header::new(genesis.to_owned()), + genesis_header: encoded::Header::new(genesis), best_block: RwLock::new(BlockDescriptor { - hash: g_view.hash(), + hash: decoded_header.hash(), number: 0, - total_difficulty: g_view.difficulty(), + total_difficulty: *decoded_header.difficulty(), }), candidates: RwLock::new(BTreeMap::new()), + live_epoch_proofs: RwLock::new(live_epoch_proofs), db: db, col: col, cache: cache, } }; + // instantiate genesis epoch data if it doesn't exist. + if let None = chain.db.get(col, LAST_CANONICAL_TRANSITION)? { + let genesis_data = spec.genesis_epoch_data()?; + + { + let mut batch = chain.db.transaction(); + let data = encode_canonical_transition(&decoded_header, &genesis_data); + batch.put_vec(col, LAST_CANONICAL_TRANSITION, data); + chain.db.write(batch)?; + } + } + Ok(chain) } @@ -218,10 +296,24 @@ impl HeaderChain { /// This blindly trusts that the data given to it is sensible. /// Returns a set of pending changes to be applied with `apply_pending` /// before the next call to insert and after the transaction has been written. - pub fn insert(&self, transaction: &mut DBTransaction, header: Header) -> Result { + /// + /// If the block is an epoch transition, provide the transition along with + /// the header. + pub fn insert( + &self, + transaction: &mut DBTransaction, + header: Header, + transition_proof: Option>, + ) -> Result { let hash = header.hash(); let number = header.number(); let parent_hash = *header.parent_hash(); + let transition = transition_proof.map(|proof| EpochTransition { + block_hash: hash, + block_number: number, + proof: proof, + }); + let mut pending = PendingChanges { best_block: None, }; @@ -237,7 +329,8 @@ impl HeaderChain { candidates.get(&(number - 1)) .and_then(|entry| entry.candidates.iter().find(|c| c.hash == parent_hash)) .map(|c| c.total_difficulty) - .ok_or_else(|| BlockError::UnknownParent(parent_hash))? + .ok_or_else(|| BlockError::UnknownParent(parent_hash)) + .map_err(BlockImportError::Block)? }; let total_difficulty = parent_td + *header.difficulty(); @@ -262,8 +355,13 @@ impl HeaderChain { transaction.put(self.col, era_key(number).as_bytes(), &::rlp::encode(&*cur_era)) } - let raw = ::rlp::encode(&header); - transaction.put(self.col, &hash[..], &*raw); + if let Some(transition) = transition { + transaction.put(self.col, &*transition_key(hash), &transition.proof); + self.live_epoch_proofs.write().insert(hash, transition); + } + + let raw = header.encoded().into_inner(); + transaction.put_vec(self.col, &hash[..], raw); let (best_num, is_new_best) = { let cur_best = self.best_block.read(); @@ -316,8 +414,10 @@ impl HeaderChain { let cht_num = cht::block_to_cht_number(earliest_era) .expect("fails only for number == 0; genesis never imported; qed"); + let mut last_canonical_transition = None; let cht_root = { let mut i = earliest_era; + let mut live_epoch_proofs = self.live_epoch_proofs.write(); // iterable function which removes the candidates as it goes // along. this will only be called until the CHT is complete. @@ -328,7 +428,25 @@ impl HeaderChain { i += 1; + // prune old blocks and epoch proofs. for ancient in &era_entry.candidates { + let maybe_transition = live_epoch_proofs.remove(&ancient.hash); + if let Some(epoch_transition) = maybe_transition { + transaction.delete(self.col, &*transition_key(ancient.hash)); + + if ancient.hash == era_entry.canonical_hash { + last_canonical_transition = match self.db.get(self.col, &ancient.hash) { + Err(e) => { + warn!(target: "chain", "Error reading from DB: {}\n + ", e); + None + } + Ok(None) => panic!("stored candidates always have corresponding headers; qed"), + Ok(Some(header)) => Some((epoch_transition, ::rlp::decode(&header))), + }; + } + } + transaction.delete(self.col, &ancient.hash); } @@ -342,6 +460,12 @@ impl HeaderChain { // write the CHT root to the database. debug!(target: "chain", "Produced CHT {} root: {:?}", cht_num, cht_root); transaction.put(self.col, cht_key(cht_num).as_bytes(), &::rlp::encode(&cht_root)); + + // update the last canonical transition proof + if let Some((epoch_transition, header)) = last_canonical_transition { + let x = encode_canonical_transition(&header, &epoch_transition.proof); + transaction.put_vec(self.col, LAST_CANONICAL_TRANSITION, x); + } } } @@ -367,7 +491,7 @@ impl HeaderChain { /// will be returned. pub fn block_hash(&self, id: BlockId) -> Option { match id { - BlockId::Earliest => Some(self.genesis_hash()), + BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_hash()), BlockId::Hash(hash) => Some(hash), BlockId::Number(num) => { if self.best_block.read().number < num { return None } @@ -518,6 +642,56 @@ impl HeaderChain { false => BlockStatus::Unknown, } } + + /// Insert a pending transition. + pub fn insert_pending_transition(&self, batch: &mut DBTransaction, hash: H256, t: PendingEpochTransition) { + let key = pending_transition_key(hash); + batch.put(self.col, &*key, &*::rlp::encode(&t)); + } + + /// Get pending transition for a specific block hash. + pub fn pending_transition(&self, hash: H256) -> Option { + let key = pending_transition_key(hash); + match self.db.get(self.col, &*key) { + Ok(val) => val.map(|x| ::rlp::decode(&x)), + Err(e) => { + warn!(target: "chain", "Error reading from database: {}", e); + None + } + } + } + + /// Get the transition to the epoch the given parent hash is part of + /// or transitions to. + /// This will give the epoch that any children of this parent belong to. + /// + /// The header corresponding the the parent hash must be stored already. + pub fn epoch_transition_for(&self, parent_hash: H256) -> Option<(Header, Vec)> { + // slow path: loop back block by block + let live_proofs = self.live_epoch_proofs.read(); + + for hdr in self.ancestry_iter(BlockId::Hash(parent_hash)) { + if let Some(transition) = live_proofs.get(&hdr.hash()).cloned() { + return Some((hdr.decode(), transition.proof)) + } + } + + // any blocks left must be descendants of the last canonical transition block. + match self.db.get(self.col, LAST_CANONICAL_TRANSITION) { + Ok(x) => { + let x = x.expect("last canonical transition always instantiated; qed"); + + let (hdr, proof) = decode_canonical_transition(&x) + .expect("last canonical transition always encoded correctly; qed"); + + Some((hdr, proof.to_vec())) + } + Err(e) => { + warn!("Error reading from DB: {}", e); + None + } + } + } } impl HeapSizeOf for HeaderChain { @@ -570,7 +744,7 @@ mod tests { let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache).unwrap(); + let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap(); let mut parent_hash = genesis_header.hash(); let mut rolling_timestamp = genesis_header.timestamp(); @@ -583,7 +757,7 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header, None).unwrap(); db.write(tx).unwrap(); chain.apply_pending(pending); @@ -603,7 +777,7 @@ mod tests { let db = make_db(); let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache).unwrap(); + let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap(); let mut parent_hash = genesis_header.hash(); let mut rolling_timestamp = genesis_header.timestamp(); @@ -616,7 +790,7 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header, None).unwrap(); db.write(tx).unwrap(); chain.apply_pending(pending); @@ -635,7 +809,7 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header, None).unwrap(); db.write(tx).unwrap(); chain.apply_pending(pending); @@ -659,7 +833,7 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header, None).unwrap(); db.write(tx).unwrap(); chain.apply_pending(pending); @@ -682,12 +856,10 @@ mod tests { #[test] fn earliest_is_latest() { let spec = Spec::new_test(); - let genesis_header = spec.genesis_header(); let db = make_db(); let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache).unwrap(); - + let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap(); assert!(chain.block_header(BlockId::Earliest).is_some()); assert!(chain.block_header(BlockId::Latest).is_some()); @@ -702,7 +874,7 @@ mod tests { let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); { - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap(); + let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap(); let mut parent_hash = genesis_header.hash(); let mut rolling_timestamp = genesis_header.timestamp(); for i in 1..10000 { @@ -714,7 +886,7 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header, None).unwrap(); db.write(tx).unwrap(); chain.apply_pending(pending); @@ -722,7 +894,7 @@ mod tests { } } - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap(); + let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap(); assert!(chain.block_header(BlockId::Number(10)).is_none()); assert!(chain.block_header(BlockId::Number(9000)).is_some()); assert!(chain.cht_root(2).is_some()); @@ -738,7 +910,7 @@ mod tests { let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); { - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap(); + let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap(); let mut parent_hash = genesis_header.hash(); let mut rolling_timestamp = genesis_header.timestamp(); @@ -752,7 +924,7 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header, None).unwrap(); db.write(tx).unwrap(); chain.apply_pending(pending); @@ -769,7 +941,7 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header, None).unwrap(); db.write(tx).unwrap(); chain.apply_pending(pending); @@ -780,7 +952,7 @@ mod tests { } // after restoration, non-canonical eras should still be loaded. - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap(); + let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap(); assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10); assert!(chain.candidates.read().get(&100).is_some()) } @@ -792,10 +964,76 @@ mod tests { let db = make_db(); let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap(); + let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap(); assert!(chain.block_header(BlockId::Earliest).is_some()); assert!(chain.block_header(BlockId::Number(0)).is_some()); assert!(chain.block_header(BlockId::Hash(genesis_header.hash())).is_some()); } + + #[test] + fn epoch_transitions_available_after_cht() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = make_db(); + let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); + + let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap(); + + let mut parent_hash = genesis_header.hash(); + let mut rolling_timestamp = genesis_header.timestamp(); + for i in 1..6 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i.into()); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + let epoch_proof = if i == 3 { + Some(vec![1, 2, 3, 4]) + } else { + None + }; + + let pending = chain.insert(&mut tx, header, epoch_proof).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); + + rolling_timestamp += 10; + } + + // these 3 should end up falling back to the genesis epoch proof in DB + for i in 0..3 { + let hash = chain.block_hash(BlockId::Number(i)).unwrap(); + assert_eq!(chain.epoch_transition_for(hash).unwrap().1, Vec::::new()); + } + + // these are live. + for i in 3..6 { + let hash = chain.block_hash(BlockId::Number(i)).unwrap(); + assert_eq!(chain.epoch_transition_for(hash).unwrap().1, vec![1, 2, 3, 4]); + } + + for i in 6..10000 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i.into()); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, header, None).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); + + rolling_timestamp += 10; + } + + // no live blocks have associated epoch proofs -- make sure we aren't leaking memory. + assert!(chain.live_epoch_proofs.read().is_empty()); + assert_eq!(chain.epoch_transition_for(parent_hash).unwrap().1, vec![1, 2, 3, 4]); + } } diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 2067a23c2..2b77685bd 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -19,11 +19,11 @@ use std::sync::{Weak, Arc}; use ethcore::block_status::BlockStatus; -use ethcore::client::{ClientReport, EnvInfo}; -use ethcore::engines::Engine; -use ethcore::error::BlockImportError; +use ethcore::client::{TransactionImportResult, ClientReport, EnvInfo}; +use ethcore::engines::{epoch, Engine, EpochChange, EpochTransition, Proof, Unsure}; +use ethcore::error::{TransactionError, BlockImportError, Error as EthcoreError}; use ethcore::ids::BlockId; -use ethcore::header::Header; +use ethcore::header::{BlockNumber, Header}; use ethcore::verification::queue::{self, HeaderQueue}; use ethcore::blockchain_info::BlockChainInfo; use ethcore::spec::Spec; @@ -33,9 +33,12 @@ use io::IoChannel; use parking_lot::{Mutex, RwLock}; use bigint::prelude::U256; use bigint::hash::H256; +use futures::{IntoFuture, Future}; +use util::Address; use util::kvdb::{KeyValueDB, CompactionProfile}; +use self::fetch::ChainDataFetcher; use self::header_chain::{AncestryIter, HeaderChain}; use cache::Cache; @@ -45,6 +48,8 @@ pub use self::service::Service; mod header_chain; mod service; +pub mod fetch; + /// Configuration for the light client. #[derive(Debug, Clone)] pub struct Config { @@ -80,6 +85,9 @@ impl Default for Config { /// Trait for interacting with the header chain abstractly. pub trait LightChainClient: Send + Sync { + /// Adds a new `LightChainNotify` listener. + fn add_listener(&self, listener: Weak); + /// Get chain info. fn chain_info(&self) -> BlockChainInfo; @@ -128,7 +136,7 @@ pub trait LightChainClient: Send + Sync { fn cht_root(&self, i: usize) -> Option; /// Get the EIP-86 transition block number. - fn eip86_transition(&self) -> u64; + fn eip86_transition(&self) -> BlockNumber; /// Get a report of import activity since the last call. fn report(&self) -> ClientReport; @@ -156,7 +164,7 @@ impl AsLightClient for T { } /// Light client implementation. -pub struct Client { +pub struct Client { queue: HeaderQueue, engine: Arc, chain: HeaderChain, @@ -164,22 +172,30 @@ pub struct Client { import_lock: Mutex<()>, db: Arc, listeners: RwLock>>, + fetcher: T, verify_full: bool, } -impl Client { +impl Client { /// Create a new `Client`. - pub fn new(config: Config, db: Arc, chain_col: Option, spec: &Spec, io_channel: IoChannel, cache: Arc>) -> Result { - let gh = ::rlp::encode(&spec.genesis_header()); - + pub fn new( + config: Config, + db: Arc, + chain_col: Option, + spec: &Spec, + fetcher: T, + io_channel: IoChannel, + cache: Arc> + ) -> Result { Ok(Client { queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, config.check_seal), engine: spec.engine.clone(), - chain: HeaderChain::new(db.clone(), chain_col, &gh, cache)?, + chain: HeaderChain::new(db.clone(), chain_col, &spec, cache)?, report: RwLock::new(ClientReport::default()), import_lock: Mutex::new(()), db: db, listeners: RwLock::new(vec![]), + fetcher: fetcher, verify_full: config.verify_full, }) } @@ -191,10 +207,24 @@ impl Client { /// Create a new `Client` backed purely in-memory. /// This will ignore all database options in the configuration. - pub fn in_memory(config: Config, spec: &Spec, io_channel: IoChannel, cache: Arc>) -> Self { + pub fn in_memory( + config: Config, + spec: &Spec, + fetcher: T, + io_channel: IoChannel, + cache: Arc> + ) -> Self { let db = ::util::kvdb::in_memory(0); - Client::new(config, Arc::new(db), None, spec, io_channel, cache).expect("New DB creation infallible; qed") + Client::new( + config, + Arc::new(db), + None, + spec, + fetcher, + io_channel, + cache + ).expect("New DB creation infallible; qed") } /// Import a header to the queue for additional verification. @@ -293,19 +323,33 @@ impl Client { continue } - // TODO: `epoch_end_signal`, `is_epoch_end`. - // proofs we get from the network would be _complete_, whereas we need - // _incomplete_ signals + let write_proof_result = match self.check_epoch_signal(&verified_header) { + Ok(Some(proof)) => self.write_pending_proof(&verified_header, proof), + Ok(None) => Ok(()), + Err(e) => + panic!("Unable to fetch epoch transition proof: {:?}", e), + }; + + if let Err(e) = write_proof_result { + warn!(target: "client", "Error writing pending transition proof to DB: {:?} \ + The node may not be able to synchronize further.", e); + } + + let epoch_proof = self.engine.is_epoch_end( + &verified_header, + &|h| self.chain.block_header(BlockId::Hash(h)).map(|hdr| hdr.decode()), + &|h| self.chain.pending_transition(h), + ); let mut tx = self.db.transaction(); - let pending = match self.chain.insert(&mut tx, verified_header) { + let pending = match self.chain.insert(&mut tx, verified_header, epoch_proof) { Ok(pending) => { good.push(hash); self.report.write().blocks_imported += 1; pending } Err(e) => { - debug!(target: "client", "Error importing header {:?}: {}", (num, hash), e); + debug!(target: "client", "Error importing header {:?}: {:?}", (num, hash), e); bad.push(hash); continue; } @@ -421,9 +465,76 @@ impl Client { true } + + fn check_epoch_signal(&self, verified_header: &Header) -> Result, T::Error> { + let (mut block, mut receipts) = (None, None); + + // First, check without providing auxiliary data. + match self.engine.signals_epoch_end(verified_header, None, None) { + EpochChange::No => return Ok(None), + EpochChange::Yes(proof) => return Ok(Some(proof)), + EpochChange::Unsure(unsure) => { + let (b, r) = match unsure { + Unsure::NeedsBody => + (Some(self.fetcher.block_body(verified_header)), None), + Unsure::NeedsReceipts => + (None, Some(self.fetcher.block_receipts(verified_header))), + Unsure::NeedsBoth => ( + Some(self.fetcher.block_body(verified_header)), + Some(self.fetcher.block_receipts(verified_header)), + ), + }; + + if let Some(b) = b { + block = Some(b.into_future().wait()?.into_inner()); + } + + if let Some(r) = r { + receipts = Some(r.into_future().wait()?); + } + } + } + + let block = block.as_ref().map(|x| &x[..]); + let receipts = receipts.as_ref().map(|x| &x[..]); + + // Check again now that required data has been fetched. + match self.engine.signals_epoch_end(verified_header, block, receipts) { + EpochChange::No => return Ok(None), + EpochChange::Yes(proof) => return Ok(Some(proof)), + EpochChange::Unsure(_) => + panic!("Detected faulty engine implementation: requests additional \ + data to check epoch end signal when everything necessary provided"), + } + } + + // attempts to fetch the epoch proof from the network until successful. + fn write_pending_proof(&self, header: &Header, proof: Proof) -> Result<(), T::Error> { + let proof = match proof { + Proof::Known(known) => known, + Proof::WithState(state_dependent) => { + self.fetcher.epoch_transition( + header.hash(), + self.engine.clone(), + state_dependent + ).into_future().wait()? + } + }; + + let mut batch = self.db.transaction(); + self.chain.insert_pending_transition(&mut batch, header.hash(), epoch::PendingTransition { + proof: proof, + }); + self.db.write_buffered(batch); + Ok(()) + } } -impl LightChainClient for Client { +impl LightChainClient for Client { + fn add_listener(&self, listener: Weak) { + Client::add_listener(self, listener) + } + fn chain_info(&self) -> BlockChainInfo { Client::chain_info(self) } fn queue_header(&self, header: Header) -> Result { @@ -482,7 +593,7 @@ impl LightChainClient for Client { Client::cht_root(self, i) } - fn eip86_transition(&self) -> u64 { + fn eip86_transition(&self) -> BlockNumber { self.engine().params().eip86_transition } @@ -490,3 +601,38 @@ impl LightChainClient for Client { Client::report(self) } } + +impl ::ethcore::client::EngineClient for Client { + fn update_sealing(&self) { } + fn submit_seal(&self, _block_hash: H256, _seal: Vec>) { } + fn broadcast_consensus_message(&self, _message: Vec) { } + + fn epoch_transition_for(&self, parent_hash: H256) -> Option { + self.chain.epoch_transition_for(parent_hash).map(|(hdr, proof)| EpochTransition { + block_hash: hdr.hash(), + block_number: hdr.number(), + proof: proof, + }) + } + + fn chain_info(&self) -> BlockChainInfo { + Client::chain_info(self) + } + + fn call_contract(&self, _id: BlockId, _address: Address, _data: Vec) -> Result, String> { + Err("Contract calling not supported by light client".into()) + } + + fn transact_contract(&self, _address: Address, _data: Vec) + -> Result + { + // TODO: these are only really used for misbehavior reporting. + // no relevant clients will be running light clients, but maybe + // they could be at some point? + Err(TransactionError::LimitReached.into()) + } + + fn block_number(&self, id: BlockId) -> Option { + self.block_header(id).map(|hdr| hdr.number()) + } +} diff --git a/ethcore/light/src/client/service.rs b/ethcore/light/src/client/service.rs index 99dccc999..20aea69ce 100644 --- a/ethcore/light/src/client/service.rs +++ b/ethcore/light/src/client/service.rs @@ -30,7 +30,7 @@ use util::kvdb::{Database, DatabaseConfig}; use cache::Cache; use parking_lot::Mutex; -use super::{Client, Config as ClientConfig}; +use super::{ChainDataFetcher, Client, Config as ClientConfig}; /// Errors on service initialization. #[derive(Debug)] @@ -51,14 +51,14 @@ impl fmt::Display for Error { } /// Light client service. -pub struct Service { - client: Arc, +pub struct Service { + client: Arc>, io_service: IoService, } -impl Service { +impl Service { /// Start the service: initialize I/O workers and client itself. - pub fn start(config: ClientConfig, spec: &Spec, path: &Path, cache: Arc>) -> Result { + pub fn start(config: ClientConfig, spec: &Spec, fetcher: T, path: &Path, cache: Arc>) -> Result { // initialize database. let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS); @@ -81,10 +81,14 @@ impl Service { db, db::COL_LIGHT_CHAIN, spec, + fetcher, io_service.channel(), cache, ).map_err(Error::Database)?); + io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?; + spec.engine.register_client(Arc::downgrade(&client) as _); + Ok(Service { client: client, io_service: io_service, @@ -97,14 +101,14 @@ impl Service { } /// Get a handle to the client. - pub fn client(&self) -> &Arc { + pub fn client(&self) -> &Arc> { &self.client } } -struct ImportBlocks(Arc); +struct ImportBlocks(Arc>); -impl IoHandler for ImportBlocks { +impl IoHandler for ImportBlocks { fn message(&self, _io: &IoContext, message: &ClientIoMessage) { if let ClientIoMessage::BlockVerified = *message { self.0.import_verified(); @@ -120,6 +124,7 @@ mod tests { use std::sync::Arc; use cache::Cache; + use client::fetch; use time::Duration; use parking_lot::Mutex; @@ -129,6 +134,6 @@ mod tests { let temp_path = RandomTempPath::new(); let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); - Service::start(Default::default(), &spec, temp_path.as_path(), cache).unwrap(); + Service::start(Default::default(), &spec, fetch::unavailable(), temp_path.as_path(), cache).unwrap(); } } diff --git a/ethcore/light/src/net/load_timer.rs b/ethcore/light/src/net/load_timer.rs index 7b78fc693..8df8fdf17 100644 --- a/ethcore/light/src/net/load_timer.rs +++ b/ethcore/light/src/net/load_timer.rs @@ -62,6 +62,7 @@ fn hardcoded_serve_time(kind: Kind) -> u64 { Kind::Storage => 2_000_000, Kind::Code => 1_500_000, Kind::Execution => 250, // per gas. + Kind::Signal => 500_000, } } diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index cccb32458..968b98281 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -104,9 +104,8 @@ mod packet { // relay transactions to peers. pub const SEND_TRANSACTIONS: u8 = 0x06; - // request and respond with epoch transition proof - pub const REQUEST_EPOCH_PROOF: u8 = 0x07; - pub const EPOCH_PROOF: u8 = 0x08; + // two packets were previously meant to be reserved for epoch proofs. + // these have since been moved to requests. } // timeouts for different kinds of requests. all values are in milliseconds. @@ -124,6 +123,7 @@ mod timeout { pub const CONTRACT_CODE: i64 = 100; pub const HEADER_PROOF: i64 = 100; pub const TRANSACTION_PROOF: i64 = 1000; // per gas? + pub const EPOCH_SIGNAL: i64 = 200; } /// A request id. @@ -584,12 +584,6 @@ impl LightProtocol { packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp), - packet::REQUEST_EPOCH_PROOF | packet::EPOCH_PROOF => { - // ignore these for now, but leave them specified. - debug!(target: "pip", "Ignoring request/response for epoch proof"); - Ok(()) - } - other => { Err(Error::UnrecognizedPacket(other)) } @@ -952,6 +946,7 @@ impl LightProtocol { CompleteRequest::Storage(req) => self.provider.storage_proof(req).map(Response::Storage), CompleteRequest::Code(req) => self.provider.contract_code(req).map(Response::Code), CompleteRequest::Execution(req) => self.provider.transaction_proof(req).map(Response::Execution), + CompleteRequest::Signal(req) => self.provider.epoch_signal(req).map(Response::Signal), } }); diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index 39eb33106..8c2e89eec 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -91,6 +91,7 @@ pub struct CostTable { code: U256, header_proof: U256, transaction_proof: U256, // cost per gas. + epoch_signal: U256, } impl Default for CostTable { @@ -107,6 +108,7 @@ impl Default for CostTable { code: 20000.into(), header_proof: 15000.into(), transaction_proof: 2.into(), + epoch_signal: 10000.into(), } } } @@ -121,7 +123,7 @@ impl Encodable for CostTable { s.append(cost); } - s.begin_list(10).append(&self.base); + s.begin_list(11).append(&self.base); append_cost(s, &self.headers, request::Kind::Headers); append_cost(s, &self.transaction_index, request::Kind::TransactionIndex); append_cost(s, &self.body, request::Kind::Body); @@ -131,6 +133,7 @@ impl Encodable for CostTable { append_cost(s, &self.code, request::Kind::Code); append_cost(s, &self.header_proof, request::Kind::HeaderProof); append_cost(s, &self.transaction_proof, request::Kind::Execution); + append_cost(s, &self.epoch_signal, request::Kind::Signal); } } @@ -147,6 +150,7 @@ impl Decodable for CostTable { let mut code = None; let mut header_proof = None; let mut transaction_proof = None; + let mut epoch_signal = None; for cost_list in rlp.iter().skip(1) { let cost = cost_list.val_at(1)?; @@ -160,6 +164,7 @@ impl Decodable for CostTable { request::Kind::Code => code = Some(cost), request::Kind::HeaderProof => header_proof = Some(cost), request::Kind::Execution => transaction_proof = Some(cost), + request::Kind::Signal => epoch_signal = Some(cost), } } @@ -176,6 +181,7 @@ impl Decodable for CostTable { code: unwrap_cost(code)?, header_proof: unwrap_cost(header_proof)?, transaction_proof: unwrap_cost(transaction_proof)?, + epoch_signal: unwrap_cost(epoch_signal)?, }) } } @@ -238,6 +244,7 @@ impl FlowParams { code: cost_for_kind(Kind::Code), header_proof: cost_for_kind(Kind::HeaderProof), transaction_proof: cost_for_kind(Kind::Execution), + epoch_signal: cost_for_kind(Kind::Signal), }; FlowParams { @@ -263,7 +270,8 @@ impl FlowParams { storage: free_cost.clone(), code: free_cost.clone(), header_proof: free_cost.clone(), - transaction_proof: free_cost, + transaction_proof: free_cost.clone(), + epoch_signal: free_cost, } } } @@ -293,6 +301,7 @@ impl FlowParams { Request::Storage(_) => self.costs.storage, Request::Code(_) => self.costs.code, Request::Execution(ref req) => self.costs.transaction_proof * req.gas, + Request::Signal(_) => self.costs.epoch_signal, } } diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index e83c33bff..7ec668884 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -139,6 +139,7 @@ fn compute_timeout(reqs: &Requests) -> Duration { Request::Storage(_) => timeout::PROOF, Request::Code(_) => timeout::CONTRACT_CODE, Request::Execution(_) => timeout::TRANSACTION_PROOF, + Request::Signal(_) => timeout::EPOCH_SIGNAL, } })) } diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 8e928dd22..539a60ffb 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -158,6 +158,12 @@ impl Provider for TestProvider { None } + fn epoch_signal(&self, _req: request::CompleteSignalRequest) -> Option { + Some(request::SignalResponse { + signal: vec![1, 2, 3, 4], + }) + } + fn ready_transactions(&self) -> Vec { self.0.client.ready_transactions() } @@ -523,6 +529,50 @@ fn get_contract_code() { proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } +#[test] +fn epoch_signal() { + let capabilities = capabilities(); + + let (provider, proto) = setup(capabilities.clone()); + let flow_params = proto.flow_params.read().clone(); + + let cur_status = status(provider.client.chain_info()); + + { + let packet_body = write_handshake(&cur_status, &capabilities, &proto); + proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body.clone())); + proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &packet_body); + } + + let req_id = 112; + let request = Request::Signal(request::IncompleteSignalRequest { + block_hash: H256([1; 32]).into(), + }); + + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); + + let response = { + let response = vec![Response::Signal(SignalResponse { + signal: vec![1, 2, 3, 4], + })]; + + let limit = *flow_params.limit(); + let cost = flow_params.compute_cost_multi(requests.requests()); + + println!("limit = {}, cost = {}", limit, cost); + let new_creds = limit - cost; + + let mut response_stream = RlpStream::new_list(3); + response_stream.append(&req_id).append(&new_creds).append_list(&response); + + response_stream.out() + }; + + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); +} + #[test] fn proof_of_execution() { let capabilities = capabilities(); diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index d67b7dc4e..6a9ecb4d1 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -195,6 +195,8 @@ fn guess_capabilities(requests: &[CheckedRequest]) -> Capabilities { caps.serve_headers = true, CheckedRequest::HeaderByHash(_, _) => caps.serve_headers = true, + CheckedRequest::Signal(_, _) => + caps.serve_headers = true, CheckedRequest::Body(ref req, _) => if let Ok(ref hdr) = req.0.as_ref() { update_since(&mut caps.serve_chain_since, hdr.number()); }, diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 9f03955da..d9afd5582 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -20,7 +20,7 @@ use std::sync::Arc; use ethcore::basic_account::BasicAccount; use ethcore::encoded; -use ethcore::engines::Engine; +use ethcore::engines::{Engine, StateDependentProof}; use ethcore::receipt::Receipt; use ethcore::state::{self, ProvedExecution}; use ethcore::transaction::SignedTransaction; @@ -56,6 +56,8 @@ pub enum Request { Code(Code), /// A request for proof of execution. Execution(TransactionProof), + /// A request for epoch change signal. + Signal(Signal), } /// A request argument. @@ -136,6 +138,7 @@ impl_single!(Body, Body, encoded::Block); impl_single!(Account, Account, Option); impl_single!(Code, Code, Bytes); impl_single!(Execution, TransactionProof, super::ExecutionResult); +impl_single!(Signal, Signal, Vec); macro_rules! impl_args { () => { @@ -244,6 +247,7 @@ pub enum CheckedRequest { Account(Account, net_request::IncompleteAccountRequest), Code(Code, net_request::IncompleteCodeRequest), Execution(TransactionProof, net_request::IncompleteExecutionRequest), + Signal(Signal, net_request::IncompleteSignalRequest) } impl From for CheckedRequest { @@ -302,6 +306,12 @@ impl From for CheckedRequest { }; CheckedRequest::Execution(req, net_req) } + Request::Signal(req) => { + let net_req = net_request::IncompleteSignalRequest { + block_hash: req.hash.into(), + }; + CheckedRequest::Signal(req, net_req) + } } } } @@ -319,6 +329,7 @@ impl CheckedRequest { CheckedRequest::Account(_, req) => NetRequest::Account(req), CheckedRequest::Code(_, req) => NetRequest::Code(req), CheckedRequest::Execution(_, req) => NetRequest::Execution(req), + CheckedRequest::Signal(_, req) => NetRequest::Signal(req), } } @@ -446,6 +457,7 @@ macro_rules! match_me { CheckedRequest::Account($check, $req) => $e, CheckedRequest::Code($check, $req) => $e, CheckedRequest::Execution($check, $req) => $e, + CheckedRequest::Signal($check, $req) => $e, } } } @@ -473,6 +485,7 @@ impl IncompleteRequest for CheckedRequest { CheckedRequest::Account(_, ref req) => req.check_outputs(f), CheckedRequest::Code(_, ref req) => req.check_outputs(f), CheckedRequest::Execution(_, ref req) => req.check_outputs(f), + CheckedRequest::Signal(_, ref req) => req.check_outputs(f), } } @@ -493,6 +506,7 @@ impl IncompleteRequest for CheckedRequest { CheckedRequest::Account(_, req) => req.complete().map(CompleteRequest::Account), CheckedRequest::Code(_, req) => req.complete().map(CompleteRequest::Code), CheckedRequest::Execution(_, req) => req.complete().map(CompleteRequest::Execution), + CheckedRequest::Signal(_, req) => req.complete().map(CompleteRequest::Signal), } } @@ -544,6 +558,9 @@ impl net_request::CheckedRequest for CheckedRequest { CheckedRequest::Execution(ref prover, _) => expect!((&NetResponse::Execution(ref res), _) => prover.check_response(cache, &res.items).map(Response::Execution)), + CheckedRequest::Signal(ref prover, _) => + expect!((&NetResponse::Signal(ref res), _) => + prover.check_response(cache, &res.signal).map(Response::Signal)), } } } @@ -567,6 +584,8 @@ pub enum Response { Code(Vec), /// Response to a request for proved execution. Execution(super::ExecutionResult), + /// Response to a request for epoch change signal. + Signal(Vec), } impl net_request::ResponseLike for Response { @@ -850,6 +869,27 @@ impl TransactionProof { } } +/// Request for epoch signal. +/// Provide engine and state-dependent proof checker. +#[derive(Clone)] +pub struct Signal { + /// Block hash and number to fetch proof for. + pub hash: H256, + /// Consensus engine, used to check the proof. + pub engine: Arc, + /// Special checker for the proof. + pub proof_check: Arc, +} + +impl Signal { + /// Check the signal, returning the signal or indicate that it's bad. + pub fn check_response(&self, _: &Mutex<::cache::Cache>, signal: &[u8]) -> Result, Error> { + self.proof_check.check_proof(&*self.engine, signal) + .map(|_| signal.to_owned()) + .map_err(|_| Error::BadProof) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 6db81dcdd..d71a5fff0 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -127,6 +127,9 @@ pub trait Provider: Send + Sync { /// Provide a proof-of-execution for the given transaction proof request. /// Returns a vector of all state items necessary to execute the transaction. fn transaction_proof(&self, req: request::CompleteExecutionRequest) -> Option; + + /// Provide epoch signal data at given block hash. This should be just the + fn epoch_signal(&self, req: request::CompleteSignalRequest) -> Option; } // Implementation of a light client data provider for a client. @@ -265,6 +268,12 @@ impl Provider for T { fn ready_transactions(&self) -> Vec { BlockChainClient::ready_transactions(self) } + + fn epoch_signal(&self, req: request::CompleteSignalRequest) -> Option { + self.epoch_signal(req.block_hash).map(|signal| request::SignalResponse { + signal: signal, + }) + } } /// The light client "provider" implementation. This wraps a `LightClient` and @@ -330,6 +339,10 @@ impl Provider for LightProvider { None } + fn epoch_signal(&self, _req: request::CompleteSignalRequest) -> Option { + None + } + fn ready_transactions(&self) -> Vec { let chain_info = self.chain_info(); self.txqueue.read().ready_transactions(chain_info.best_block_number, chain_info.best_block_timestamp) diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 38e736673..c623ca656 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -67,6 +67,11 @@ pub use self::execution::{ Incomplete as IncompleteExecutionRequest, Response as ExecutionResponse, }; +pub use self::epoch_signal::{ + Complete as CompleteSignalRequest, + Incomplete as IncompleteSignalRequest, + Response as SignalResponse, +}; pub use self::builder::{RequestBuilder, Requests}; @@ -261,6 +266,8 @@ pub enum Request { Code(IncompleteCodeRequest), /// A request for proof of execution, Execution(IncompleteExecutionRequest), + /// A request for an epoch signal. + Signal(IncompleteSignalRequest), } /// All request types, in an answerable state. @@ -284,6 +291,8 @@ pub enum CompleteRequest { Code(CompleteCodeRequest), /// A request for proof of execution, Execution(CompleteExecutionRequest), + /// A request for an epoch signal. + Signal(CompleteSignalRequest), } impl CompleteRequest { @@ -299,6 +308,7 @@ impl CompleteRequest { CompleteRequest::Storage(_) => Kind::Storage, CompleteRequest::Code(_) => Kind::Code, CompleteRequest::Execution(_) => Kind::Execution, + CompleteRequest::Signal(_) => Kind::Signal, } } } @@ -316,6 +326,7 @@ impl Request { Request::Storage(_) => Kind::Storage, Request::Code(_) => Kind::Code, Request::Execution(_) => Kind::Execution, + Request::Signal(_) => Kind::Signal, } } } @@ -332,6 +343,7 @@ impl Decodable for Request { Kind::Storage => Ok(Request::Storage(rlp.val_at(1)?)), Kind::Code => Ok(Request::Code(rlp.val_at(1)?)), Kind::Execution => Ok(Request::Execution(rlp.val_at(1)?)), + Kind::Signal => Ok(Request::Signal(rlp.val_at(1)?)), } } } @@ -353,6 +365,7 @@ impl Encodable for Request { Request::Storage(ref req) => s.append(req), Request::Code(ref req) => s.append(req), Request::Execution(ref req) => s.append(req), + Request::Signal(ref req) => s.append(req), }; } } @@ -374,6 +387,7 @@ impl IncompleteRequest for Request { Request::Storage(ref req) => req.check_outputs(f), Request::Code(ref req) => req.check_outputs(f), Request::Execution(ref req) => req.check_outputs(f), + Request::Signal(ref req) => req.check_outputs(f), } } @@ -388,6 +402,7 @@ impl IncompleteRequest for Request { Request::Storage(ref req) => req.note_outputs(f), Request::Code(ref req) => req.note_outputs(f), Request::Execution(ref req) => req.note_outputs(f), + Request::Signal(ref req) => req.note_outputs(f), } } @@ -402,6 +417,7 @@ impl IncompleteRequest for Request { Request::Storage(ref mut req) => req.fill(oracle), Request::Code(ref mut req) => req.fill(oracle), Request::Execution(ref mut req) => req.fill(oracle), + Request::Signal(ref mut req) => req.fill(oracle), } } @@ -416,6 +432,7 @@ impl IncompleteRequest for Request { Request::Storage(req) => req.complete().map(CompleteRequest::Storage), Request::Code(req) => req.complete().map(CompleteRequest::Code), Request::Execution(req) => req.complete().map(CompleteRequest::Execution), + Request::Signal(req) => req.complete().map(CompleteRequest::Signal), } } @@ -430,6 +447,7 @@ impl IncompleteRequest for Request { Request::Storage(ref mut req) => req.adjust_refs(mapping), Request::Code(ref mut req) => req.adjust_refs(mapping), Request::Execution(ref mut req) => req.adjust_refs(mapping), + Request::Signal(ref mut req) => req.adjust_refs(mapping), } } } @@ -471,6 +489,8 @@ pub enum Kind { Code = 7, /// A request for transaction execution + state proof. Execution = 8, + /// A request for epoch transition signal. + Signal = 9, } impl Decodable for Kind { @@ -485,6 +505,7 @@ impl Decodable for Kind { 6 => Ok(Kind::Storage), 7 => Ok(Kind::Code), 8 => Ok(Kind::Execution), + 9 => Ok(Kind::Signal), _ => Err(DecoderError::Custom("Unknown PIP request ID.")), } } @@ -517,6 +538,8 @@ pub enum Response { Code(CodeResponse), /// A response for proof of execution, Execution(ExecutionResponse), + /// A response for epoch change signal. + Signal(SignalResponse), } impl ResponseLike for Response { @@ -532,6 +555,7 @@ impl ResponseLike for Response { Response::Storage(ref res) => res.fill_outputs(f), Response::Code(ref res) => res.fill_outputs(f), Response::Execution(ref res) => res.fill_outputs(f), + Response::Signal(ref res) => res.fill_outputs(f), } } } @@ -549,6 +573,7 @@ impl Response { Response::Storage(_) => Kind::Storage, Response::Code(_) => Kind::Code, Response::Execution(_) => Kind::Execution, + Response::Signal(_) => Kind::Signal, } } } @@ -565,6 +590,7 @@ impl Decodable for Response { Kind::Storage => Ok(Response::Storage(rlp.val_at(1)?)), Kind::Code => Ok(Response::Code(rlp.val_at(1)?)), Kind::Execution => Ok(Response::Execution(rlp.val_at(1)?)), + Kind::Signal => Ok(Response::Signal(rlp.val_at(1)?)), } } } @@ -586,6 +612,7 @@ impl Encodable for Response { Response::Storage(ref res) => s.append(res), Response::Code(ref res) => s.append(res), Response::Execution(ref res) => s.append(res), + Response::Signal(ref res) => s.append(res), }; } } @@ -760,8 +787,8 @@ pub mod header { pub mod header_proof { use super::{Field, NoSuchOutput, OutputKind, Output}; use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; - use bigint::hash::H256; use bigint::prelude::U256; + use bigint::hash::H256; use util::Bytes; /// Potentially incomplete header proof request. @@ -1091,8 +1118,8 @@ pub mod block_body { /// A request for an account proof. pub mod account { use super::{Field, NoSuchOutput, OutputKind, Output}; - use bigint::hash::H256; use bigint::prelude::U256; + use bigint::hash::H256; use util::Bytes; /// Potentially incomplete request for an account proof. @@ -1388,8 +1415,8 @@ pub mod execution { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::transaction::Action; use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; - use bigint::hash::H256; use bigint::prelude::U256; + use bigint::hash::H256; use util::{Bytes, Address, DBValue}; /// Potentially incomplete execution proof request. @@ -1509,6 +1536,105 @@ pub mod execution { } } +/// A request for epoch signal data. +pub mod epoch_signal { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; + use bigint::hash::H256; + use util::Bytes; + + /// Potentially incomplete epoch signal request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// The block hash to request the signal for. + pub block_hash: Field, + } + + impl Decodable for Incomplete { + fn decode(rlp: &UntrustedRlp) -> Result { + Ok(Incomplete { + block_hash: rlp.val_at(0)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(1).append(&self.block_hash); + } + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + type Response = Response; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)?; + } + + Ok(()) + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash.into()), + _ => Field::BackReference(req, idx), + } + } + } + + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + }) + } + + fn adjust_refs(&mut self, mut mapping: F) where F: FnMut(usize) -> usize { + self.block_hash.adjust_req(&mut mapping); + } + } + + /// A complete request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The block hash to request the epoch signal for. + pub block_hash: H256, + } + + /// The output of a request for an epoch signal. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// The requested epoch signal. + pub signal: Bytes, + } + + impl super::ResponseLike for Response { + /// Fill reusable outputs by providing them to the function. + fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + } + + impl Decodable for Response { + fn decode(rlp: &UntrustedRlp) -> Result { + + Ok(Response { + signal: rlp.as_val()?, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.append(&self.signal); + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -1797,4 +1923,22 @@ mod tests { let raw = ::rlp::encode_list(&reqs); assert_eq!(::rlp::decode_list::(&raw), reqs); } + + #[test] + fn epoch_signal_roundtrip() { + let req = IncompleteSignalRequest { + block_hash: Field::Scalar(Default::default()), + }; + + let full_req = Request::Signal(req.clone()); + let res = SignalResponse { + signal: vec![1, 2, 3, 4, 5, 6, 7, 6, 5, 4], + }; + let full_res = Response::Signal(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 8948faa25..0eda5048e 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -43,7 +43,7 @@ use client::ancient_import::AncientVerifier; use client::Error as ClientError; use client::{ BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient, - MiningBlockChainClient, EngineClient, TraceFilter, CallAnalytics, BlockImportError, Mode, + MiningBlockChainClient, TraceFilter, CallAnalytics, BlockImportError, Mode, ChainNotify, PruningInfo, ProvingBlockChainClient, }; use encoded; @@ -771,7 +771,7 @@ impl Client { res.map(|(output, proof)| (output, proof.into_iter().map(|x| x.into_vec()).collect())) }; - match (with_state)(&call) { + match with_state.generate_proof(&call) { Ok(proof) => proof, Err(e) => { warn!(target: "client", "Failed to generate transition proof for block {}: {}", hash, e); @@ -1937,7 +1937,7 @@ impl MiningBlockChainClient for Client { } } -impl EngineClient for Client { +impl super::traits::EngineClient for Client { fn update_sealing(&self) { self.miner.update_sealing(self) } @@ -1955,6 +1955,22 @@ impl EngineClient for Client { fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition> { self.chain.read().epoch_transition_for(parent_hash) } + + fn chain_info(&self) -> BlockChainInfo { + BlockChainClient::chain_info(self) + } + + fn call_contract(&self, id: BlockId, address: Address, data: Bytes) -> Result { + BlockChainClient::call_contract(self, id, address, data) + } + + fn transact_contract(&self, address: Address, data: Bytes) -> Result { + BlockChainClient::transact_contract(self, address, data) + } + + fn block_number(&self, id: BlockId) -> Option { + BlockChainClient::block_number(self, id) + } } impl ProvingBlockChainClient for Client { @@ -1969,27 +1985,30 @@ impl ProvingBlockChainClient for Client { } fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<(Bytes, Vec)> { - let (state, mut env_info) = match (self.state_at(id), self.env_info(id)) { + let (header, mut env_info) = match (self.block_header(id), self.env_info(id)) { (Some(s), Some(e)) => (s, e), _ => return None, }; env_info.gas_limit = transaction.gas.clone(); let mut jdb = self.state_db.lock().journal_db().boxed_clone(); - let backend = state::backend::Proving::new(jdb.as_hashdb_mut()); - let mut state = state.replace_backend(backend); - let options = TransactOptions::with_no_tracing().dont_check_nonce(); - let res = Executive::new(&mut state, &env_info, &*self.engine).transact(&transaction, options); + state::prove_transaction( + jdb.as_hashdb_mut(), + header.state_root().clone(), + &transaction, + &*self.engine, + &env_info, + self.factories.clone(), + false, + ) + } - match res { - Err(ExecutionError::Internal(_)) => None, - Err(e) => { - trace!(target: "client", "Proved call failed: {}", e); - Some((Vec::new(), state.drop().1.extract_proof())) - } - Ok(res) => Some((res.output, state.drop().1.extract_proof())), - } + + fn epoch_signal(&self, hash: H256) -> Option> { + // pending transitions are never deleted, and do not contain + // finality proofs by definition. + self.chain.read().get_pending_transition(hash).map(|pending| pending.proof) } } diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index aabd744f9..0cd9fedc8 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -33,7 +33,7 @@ use devtools::*; use transaction::{Transaction, LocalizedTransaction, PendingTransaction, SignedTransaction, Action}; use blockchain::TreeRoute; use client::{ - BlockChainClient, MiningBlockChainClient, EngineClient, BlockChainInfo, BlockStatus, BlockId, + BlockChainClient, MiningBlockChainClient, BlockChainInfo, BlockStatus, BlockId, TransactionId, UncleId, TraceId, TraceFilter, LastHashes, CallAnalytics, BlockImportError, ProvingBlockChainClient, }; @@ -801,9 +801,13 @@ impl ProvingBlockChainClient for TestBlockChainClient { fn prove_transaction(&self, _: SignedTransaction, _: BlockId) -> Option<(Bytes, Vec)> { None } + + fn epoch_signal(&self, _: H256) -> Option> { + None + } } -impl EngineClient for TestBlockChainClient { +impl super::traits::EngineClient for TestBlockChainClient { fn update_sealing(&self) { self.miner.update_sealing(self) } @@ -819,4 +823,20 @@ impl EngineClient for TestBlockChainClient { fn epoch_transition_for(&self, _block_hash: H256) -> Option<::engines::EpochTransition> { None } + + fn chain_info(&self) -> BlockChainInfo { + BlockChainClient::chain_info(self) + } + + fn call_contract(&self, id: BlockId, address: Address, data: Bytes) -> Result { + BlockChainClient::call_contract(self, id, address, data) + } + + fn transact_contract(&self, address: Address, data: Bytes) -> Result { + BlockChainClient::transact_contract(self, address, data) + } + + fn block_number(&self, id: BlockId) -> Option { + BlockChainClient::block_number(self, id) + } } diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index 4a1f8a0c5..45736e2c5 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -317,7 +317,7 @@ pub trait MiningBlockChainClient: BlockChainClient { } /// Client facilities used by internally sealing Engines. -pub trait EngineClient: MiningBlockChainClient { +pub trait EngineClient: Sync + Send { /// Make a new block and seal it. fn update_sealing(&self); @@ -333,6 +333,17 @@ pub trait EngineClient: MiningBlockChainClient { /// /// The block corresponding the the parent hash must be stored already. fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition>; + + /// Get block chain info. + fn chain_info(&self) -> BlockChainInfo; + + /// Like `call`, but with various defaults. Designed to be used for calling contracts. + fn call_contract(&self, id: BlockId, address: Address, data: Bytes) -> Result; + + /// Import a transaction: used for misbehaviour reporting. + fn transact_contract(&self, address: Address, data: Bytes) -> Result; + + fn block_number(&self, id: BlockId) -> Option; } /// Extended client interface for providing proofs of the state. @@ -352,4 +363,7 @@ pub trait ProvingBlockChainClient: BlockChainClient { /// Returns the output of the call and a vector of database items necessary /// to reproduce it. fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<(Bytes, Vec)>; + + /// Get an epoch change signal by block hash. + fn epoch_signal(&self, hash: H256) -> Option>; } diff --git a/ethcore/src/engines/authority_round/mod.rs b/ethcore/src/engines/authority_round/mod.rs index fc0080f8a..b50ebbc4f 100644 --- a/ethcore/src/engines/authority_round/mod.rs +++ b/ethcore/src/engines/authority_round/mod.rs @@ -25,7 +25,7 @@ use std::cmp; use account_provider::AccountProvider; use block::*; use builtin::Builtin; -use client::{Client, EngineClient}; +use client::EngineClient; use engines::{Call, Engine, Seal, EngineError, ConstructedVerifier}; use error::{Error, TransactionError, BlockError}; use ethjson; @@ -647,6 +647,8 @@ impl Engine for AuthorityRound { (&active_set as &_, epoch_manager.epoch_transition_number) }; + // always report with "self.validators" so that the report actually gets + // to the contract. let report = |report| match report { Report::Benign(address, block_number) => self.validators.report_benign(&address, set_number, block_number), @@ -739,13 +741,18 @@ impl Engine for AuthorityRound { { if let Ok(finalized) = epoch_manager.finality_checker.push_hash(chain_head.hash(), *chain_head.author()) { let mut finalized = finalized.into_iter(); - while let Some(hash) = finalized.next() { - if let Some(pending) = transition_store(hash) { - let finality_proof = ::std::iter::once(hash) + while let Some(finalized_hash) = finalized.next() { + if let Some(pending) = transition_store(finalized_hash) { + let finality_proof = ::std::iter::once(finalized_hash) .chain(finalized) .chain(epoch_manager.finality_checker.unfinalized_hashes()) - .map(|hash| chain(hash) - .expect("these headers fetched before when constructing finality checker; qed")) + .map(|h| if h == chain_head.hash() { + // chain closure only stores ancestry, but the chain head is also + // unfinalized. + chain_head.clone() + } else { + chain(h).expect("these headers fetched before when constructing finality checker; qed") + }) .collect::>(); // this gives us the block number for `hash`, assuming it's ancestry. @@ -809,9 +816,9 @@ impl Engine for AuthorityRound { Ok(()) } - fn register_client(&self, client: Weak) { + fn register_client(&self, client: Weak) { *self.client.write() = Some(client.clone()); - self.validators.register_contract(client); + self.validators.register_client(client); } fn set_signer(&self, ap: Arc, address: Address, password: String) { diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index 49dc71c0e..b96769837 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -34,7 +34,7 @@ use error::{BlockError, Error}; use evm::Schedule; use ethjson; use header::{Header, BlockNumber}; -use client::Client; +use client::EngineClient; use semantic_version::SemanticVersion; use super::signer::EngineSigner; use super::validator_set::{ValidatorSet, SimpleList, new_validator_set}; @@ -237,8 +237,8 @@ impl Engine for BasicAuthority { } } - fn register_client(&self, client: Weak) { - self.validators.register_contract(client); + fn register_client(&self, client: Weak) { + self.validators.register_client(client); } fn set_signer(&self, ap: Arc, address: Address, password: String) { diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs index 2f48150ab..fcf387f8b 100644 --- a/ethcore/src/engines/mod.rs +++ b/ethcore/src/engines/mod.rs @@ -44,7 +44,7 @@ use self::epoch::PendingTransition; use account_provider::AccountProvider; use block::ExecutedBlock; use builtin::Builtin; -use client::Client; +use client::EngineClient; use vm::{EnvInfo, LastHashes, Schedule, CreateContractAddress}; use error::Error; use header::{Header, BlockNumber}; @@ -124,12 +124,22 @@ pub type Headers<'a> = Fn(H256) -> Option
+ 'a; /// Type alias for a function we can query pending transitions by block hash through. pub type PendingTransitionStore<'a> = Fn(H256) -> Option + 'a; +/// Proof dependent on state. +pub trait StateDependentProof: Send + Sync { + /// Generate a proof, given the state. + fn generate_proof(&self, caller: &Call) -> Result, String>; + /// Check a proof generated elsewhere (potentially by a peer). + // `engine` needed to check state proofs, while really this should + // just be state machine params. + fn check_proof(&self, engine: &Engine, proof: &[u8]) -> Result<(), String>; +} + /// Proof generated on epoch change. pub enum Proof { - /// Known proof (exctracted from signal) + /// Known proof (extracted from signal) Known(Vec), - /// Extract proof from caller. - WithState(Box Result, String>>), + /// State dependent proof. + WithState(Arc), } /// Generated epoch verifier. @@ -361,7 +371,7 @@ pub trait Engine : Sync + Send { fn sign(&self, _hash: H256) -> Result { unimplemented!() } /// Add Client which can be used for sealing, querying the state and sending messages. - fn register_client(&self, _client: Weak) {} + fn register_client(&self, _client: Weak) {} /// Trigger next step of the consensus engine. fn step(&self) {} diff --git a/ethcore/src/engines/tendermint/mod.rs b/ethcore/src/engines/tendermint/mod.rs index ee447d8da..dff678c92 100644 --- a/ethcore/src/engines/tendermint/mod.rs +++ b/ethcore/src/engines/tendermint/mod.rs @@ -571,18 +571,35 @@ impl Engine for Tendermint { Ok(()) } - /// Verify validators and gas limit. + /// Verify gas limit. fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> { if header.number() == 0 { return Err(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() }).into()); } + let gas_limit_divisor = self.params().gas_limit_bound_divisor; + let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor; + let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor; + if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas { + self.validators.report_malicious(header.author(), header.number(), header.number(), Default::default()); + return Err(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() }).into()); + } + + Ok(()) + } + + fn verify_block_external(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { if let Ok(proposal) = ConsensusMessage::new_proposal(header) { let proposer = proposal.verify()?; if !self.is_authority(&proposer) { return Err(EngineError::NotAuthorized(proposer).into()); } - self.check_view_proposer(header.parent_hash(), proposal.vote_step.height, proposal.vote_step.view, &proposer)?; + self.check_view_proposer( + header.parent_hash(), + proposal.vote_step.height, + proposal.vote_step.view, + &proposer + ).map_err(Into::into) } else { let vote_step = VoteStep::new(header.number() as usize, consensus_view(header)?, Step::Precommit); let precommit_hash = message_hash(vote_step.clone(), header.bare_hash()); @@ -608,18 +625,8 @@ impl Engine for Tendermint { } } - self.check_above_threshold(origins.len())? + self.check_above_threshold(origins.len()).map_err(Into::into) } - - let gas_limit_divisor = self.params().gas_limit_bound_divisor; - let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor; - let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor; - if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas { - self.validators.report_malicious(header.author(), header.number(), header.number(), Default::default()); - return Err(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() }).into()); - } - - Ok(()) } fn signals_epoch_end(&self, header: &Header, block: Option<&[u8]>, receipts: Option<&[::receipt::Receipt]>) @@ -754,13 +761,12 @@ impl Engine for Tendermint { self.to_step(next_step); } - fn register_client(&self, client: Weak) { - use client::BlockChainClient; + fn register_client(&self, client: Weak) { if let Some(c) = client.upgrade() { self.height.store(c.chain_info().best_block_number as usize + 1, AtomicOrdering::SeqCst); } *self.client.write() = Some(client.clone()); - self.validators.register_contract(client); + self.validators.register_client(client); } } @@ -888,14 +894,14 @@ mod tests { let seal = proposal_seal(&tap, &header, 0); header.set_seal(seal); // Good proposer. - assert!(engine.verify_block_family(&header, &parent_header, None).is_ok()); + assert!(engine.verify_block_external(&header, None).is_ok()); let validator = insert_and_unlock(&tap, "0"); header.set_author(validator); let seal = proposal_seal(&tap, &header, 0); header.set_seal(seal); // Bad proposer. - match engine.verify_block_family(&header, &parent_header, None) { + match engine.verify_block_external(&header, None) { Err(Error::Engine(EngineError::NotProposer(_))) => {}, _ => panic!(), } @@ -905,7 +911,7 @@ mod tests { let seal = proposal_seal(&tap, &header, 0); header.set_seal(seal); // Not authority. - match engine.verify_block_family(&header, &parent_header, None) { + match engine.verify_block_external(&header, None) { Err(Error::Engine(EngineError::NotAuthorized(_))) => {}, _ => panic!(), }; @@ -935,7 +941,7 @@ mod tests { header.set_seal(seal.clone()); // One good signature is not enough. - match engine.verify_block_family(&header, &parent_header, None) { + match engine.verify_block_external(&header, None) { Err(Error::Engine(EngineError::BadSealFieldSize(_))) => {}, _ => panic!(), } @@ -946,7 +952,7 @@ mod tests { seal[2] = ::rlp::encode_list(&vec![H520::from(signature1.clone()), H520::from(signature0.clone())]).into_vec(); header.set_seal(seal.clone()); - assert!(engine.verify_block_family(&header, &parent_header, None).is_ok()); + assert!(engine.verify_block_external(&header, None).is_ok()); let bad_voter = insert_and_unlock(&tap, "101"); let bad_signature = tap.sign(bad_voter, None, keccak(vote_info)).unwrap(); @@ -955,7 +961,7 @@ mod tests { header.set_seal(seal); // One good and one bad signature. - match engine.verify_block_family(&header, &parent_header, None) { + match engine.verify_block_external(&header, None) { Err(Error::Engine(EngineError::NotAuthorized(_))) => {}, _ => panic!(), }; @@ -1001,7 +1007,7 @@ mod tests { let client = generate_dummy_client(0); let notify = Arc::new(TestNotify::default()); client.add_notify(notify.clone()); - engine.register_client(Arc::downgrade(&client)); + engine.register_client(Arc::downgrade(&client) as _); let prevote_current = vote(engine.as_ref(), |mh| tap.sign(v0, None, mh).map(H520::from), h, r, Step::Prevote, proposal); @@ -1019,7 +1025,6 @@ mod tests { fn seal_submission() { use ethkey::{Generator, Random}; use transaction::{Transaction, Action}; - use client::BlockChainClient; let tap = Arc::new(AccountProvider::transient_provider()); // Accounts for signing votes. @@ -1032,7 +1037,7 @@ mod tests { let notify = Arc::new(TestNotify::default()); client.add_notify(notify.clone()); - engine.register_client(Arc::downgrade(&client)); + engine.register_client(Arc::downgrade(&client) as _); let keypair = Random.generate().unwrap(); let transaction = Transaction { diff --git a/ethcore/src/engines/validator_set/contract.rs b/ethcore/src/engines/validator_set/contract.rs index c84c6e448..9a3705b63 100644 --- a/ethcore/src/engines/validator_set/contract.rs +++ b/ethcore/src/engines/validator_set/contract.rs @@ -25,7 +25,7 @@ use util::*; use futures::Future; use native_contracts::ValidatorReport as Provider; -use client::{Client, BlockChainClient}; +use client::EngineClient; use engines::{Call, Engine}; use header::{Header, BlockNumber}; @@ -36,7 +36,7 @@ use super::safe_contract::ValidatorSafeContract; pub struct ValidatorContract { validators: ValidatorSafeContract, provider: Provider, - client: RwLock>>, // TODO [keorn]: remove + client: RwLock>>, // TODO [keorn]: remove } impl ValidatorContract { @@ -120,8 +120,8 @@ impl ValidatorSet for ValidatorContract { } } - fn register_contract(&self, client: Weak) { - self.validators.register_contract(client.clone()); + fn register_client(&self, client: Weak) { + self.validators.register_client(client.clone()); *self.client.write() = Some(client); } } @@ -148,7 +148,7 @@ mod tests { fn fetches_validators() { let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_contract, None); let vc = Arc::new(ValidatorContract::new("0000000000000000000000000000000000000005".parse::
().unwrap())); - vc.register_contract(Arc::downgrade(&client)); + vc.register_client(Arc::downgrade(&client) as _); let last_hash = client.best_block_header().hash(); assert!(vc.contains(&last_hash, &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e".parse::
().unwrap())); assert!(vc.contains(&last_hash, &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1".parse::
().unwrap())); @@ -159,7 +159,7 @@ mod tests { let tap = Arc::new(AccountProvider::transient_provider()); let v1 = tap.insert_account(keccak("1").into(), "").unwrap(); let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_contract, Some(tap.clone())); - client.engine().register_client(Arc::downgrade(&client)); + client.engine().register_client(Arc::downgrade(&client) as _); let validator_contract = "0000000000000000000000000000000000000005".parse::
().unwrap(); // Make sure reporting can be done. diff --git a/ethcore/src/engines/validator_set/mod.rs b/ethcore/src/engines/validator_set/mod.rs index d60518c45..451abe6f2 100644 --- a/ethcore/src/engines/validator_set/mod.rs +++ b/ethcore/src/engines/validator_set/mod.rs @@ -28,7 +28,7 @@ use ids::BlockId; use bigint::hash::H256; use util::{Bytes, Address}; use ethjson::spec::ValidatorSet as ValidatorSpec; -use client::Client; +use client::EngineClient; use header::{Header, BlockNumber}; #[cfg(test)] @@ -142,5 +142,5 @@ pub trait ValidatorSet: Send + Sync { /// Notifies about benign misbehaviour. fn report_benign(&self, _validator: &Address, _set_block: BlockNumber, _block: BlockNumber) {} /// Allows blockchain state access. - fn register_contract(&self, _client: Weak) {} + fn register_client(&self, _client: Weak) {} } diff --git a/ethcore/src/engines/validator_set/multi.rs b/ethcore/src/engines/validator_set/multi.rs index c115d1596..043f8aab6 100644 --- a/ethcore/src/engines/validator_set/multi.rs +++ b/ethcore/src/engines/validator_set/multi.rs @@ -24,7 +24,7 @@ use parking_lot::RwLock; use util::{Bytes, Address}; use ids::BlockId; use header::{BlockNumber, Header}; -use client::{Client, BlockChainClient}; +use client::EngineClient; use super::{SystemCall, ValidatorSet}; type BlockNumberLookup = Box Result + Send + Sync + 'static>; @@ -131,9 +131,9 @@ impl ValidatorSet for Multi { self.correct_set_by_number(set_block).1.report_benign(validator, set_block, block); } - fn register_contract(&self, client: Weak) { + fn register_client(&self, client: Weak) { for set in self.sets.values() { - set.register_contract(client.clone()); + set.register_client(client.clone()); } *self.block_number.write() = Box::new(move |id| client .upgrade() @@ -148,7 +148,7 @@ mod tests { use std::collections::BTreeMap; use hash::keccak; use account_provider::AccountProvider; - use client::{BlockChainClient, EngineClient}; + use client::BlockChainClient; use engines::EpochChange; use engines::validator_set::ValidatorSet; use ethkey::Secret; @@ -170,7 +170,7 @@ mod tests { let v0 = tap.insert_account(s0.clone(), "").unwrap(); let v1 = tap.insert_account(keccak("1").into(), "").unwrap(); let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_multi, Some(tap)); - client.engine().register_client(Arc::downgrade(&client)); + client.engine().register_client(Arc::downgrade(&client) as _); // Make sure txs go through. client.miner().set_gas_floor_target(1_000_000.into()); @@ -178,27 +178,27 @@ mod tests { // Wrong signer for the first block. client.miner().set_engine_signer(v1, "".into()).unwrap(); client.transact_contract(Default::default(), Default::default()).unwrap(); - client.update_sealing(); + ::client::EngineClient::update_sealing(&*client); assert_eq!(client.chain_info().best_block_number, 0); // Right signer for the first block. client.miner().set_engine_signer(v0, "".into()).unwrap(); - client.update_sealing(); + ::client::EngineClient::update_sealing(&*client); assert_eq!(client.chain_info().best_block_number, 1); // This time v0 is wrong. client.transact_contract(Default::default(), Default::default()).unwrap(); - client.update_sealing(); + ::client::EngineClient::update_sealing(&*client); assert_eq!(client.chain_info().best_block_number, 1); client.miner().set_engine_signer(v1, "".into()).unwrap(); - client.update_sealing(); + ::client::EngineClient::update_sealing(&*client); assert_eq!(client.chain_info().best_block_number, 2); // v1 is still good. client.transact_contract(Default::default(), Default::default()).unwrap(); - client.update_sealing(); + ::client::EngineClient::update_sealing(&*client); assert_eq!(client.chain_info().best_block_number, 3); // Check syncing. let sync_client = generate_dummy_client_with_spec_and_data(Spec::new_validator_multi, 0, 0, &[]); - sync_client.engine().register_client(Arc::downgrade(&sync_client)); + sync_client.engine().register_client(Arc::downgrade(&sync_client) as _); for i in 1..4 { sync_client.import_block(client.block(BlockId::Number(i)).unwrap().into_inner()).unwrap(); } diff --git a/ethcore/src/engines/validator_set/safe_contract.rs b/ethcore/src/engines/validator_set/safe_contract.rs index 6d5f89182..1d4f9b0be 100644 --- a/ethcore/src/engines/validator_set/safe_contract.rs +++ b/ethcore/src/engines/validator_set/safe_contract.rs @@ -23,14 +23,15 @@ use hash::keccak; use bigint::prelude::U256; use bigint::hash::{H160, H256}; -use parking_lot::RwLock; +use parking_lot::{Mutex, RwLock}; + use util::*; use util::cache::MemoryLruCache; use unexpected::Mismatch; use rlp::{UntrustedRlp, RlpStream}; use basic_types::LogBloom; -use client::{Client, BlockChainClient}; +use client::EngineClient; use engines::{Call, Engine}; use header::Header; use ids::BlockId; @@ -49,12 +50,35 @@ lazy_static! { static ref EVENT_NAME_HASH: H256 = keccak(EVENT_NAME); } +// state-dependent proofs for the safe contract: +// only "first" proofs are such. +struct StateProof { + header: Mutex
, + provider: Provider, +} + +impl ::engines::StateDependentProof for StateProof { + fn generate_proof(&self, caller: &Call) -> Result, String> { + prove_initial(&self.provider, &*self.header.lock(), caller) + } + + fn check_proof(&self, engine: &Engine, proof: &[u8]) -> Result<(), String> { + let (header, state_items) = decode_first_proof(&UntrustedRlp::new(proof)) + .map_err(|e| format!("proof incorrectly encoded: {}", e))?; + if &header != &*self.header.lock(){ + return Err("wrong header in proof".into()); + } + + check_first_proof(engine, &self.provider, header, &state_items).map(|_| ()) + } +} + /// The validator contract should have the following interface: pub struct ValidatorSafeContract { pub address: Address, validators: RwLock>, provider: Provider, - client: RwLock>>, // TODO [keorn]: remove + client: RwLock>>, // TODO [keorn]: remove } // first proof is just a state proof call of `getValidators` at header's state. @@ -68,6 +92,59 @@ fn encode_first_proof(header: &Header, state_items: &[Vec]) -> Bytes { stream.out() } +// check a first proof: fetch the validator set at the given block. +fn check_first_proof(engine: &Engine, provider: &Provider, old_header: Header, state_items: &[DBValue]) + -> Result, String> +{ + use transaction::{Action, Transaction}; + + // TODO: match client contract_call_tx more cleanly without duplication. + const PROVIDED_GAS: u64 = 50_000_000; + + let env_info = ::vm::EnvInfo { + number: old_header.number(), + author: *old_header.author(), + difficulty: *old_header.difficulty(), + gas_limit: PROVIDED_GAS.into(), + timestamp: old_header.timestamp(), + last_hashes: { + // this will break if we don't inclue all 256 last hashes. + let mut last_hashes: Vec<_> = (0..256).map(|_| H256::default()).collect(); + last_hashes[255] = *old_header.parent_hash(); + Arc::new(last_hashes) + }, + gas_used: 0.into(), + }; + + // check state proof using given engine. + let number = old_header.number(); + provider.get_validators(move |a, d| { + let from = Address::default(); + let tx = Transaction { + nonce: engine.account_start_nonce(number), + action: Action::Call(a), + gas: PROVIDED_GAS.into(), + gas_price: U256::default(), + value: U256::default(), + data: d, + }.fake_sign(from); + + let res = ::state::check_proof( + state_items, + *old_header.state_root(), + &tx, + engine, + &env_info, + ); + + match res { + ::state::ProvedExecution::BadProof => Err("Bad proof".into()), + ::state::ProvedExecution::Failed(e) => Err(format!("Failed call: {}", e)), + ::state::ProvedExecution::Complete(e) => Ok(e.output), + } + }).wait() +} + fn decode_first_proof(rlp: &UntrustedRlp) -> Result<(Header, Vec), ::error::Error> { let header = rlp.val_at(0)?; let state_items = rlp.at(1)?.iter().map(|x| { @@ -105,8 +182,7 @@ fn prove_initial(provider: &Provider, header: &Header, caller: &Call) -> Result< Ok(result) }; - provider.get_validators(caller) - .wait() + provider.get_validators(caller).wait() }; res.map(|validators| { @@ -260,9 +336,11 @@ impl ValidatorSet for ValidatorSafeContract { // transition to the first block of a contract requires finality but has no log event. if first { debug!(target: "engine", "signalling transition to fresh contract."); - let (provider, header) = (self.provider.clone(), header.clone()); - let with_caller: Box _> = Box::new(move |caller| prove_initial(&provider, &header, caller)); - return ::engines::EpochChange::Yes(::engines::Proof::WithState(with_caller)) + let state_proof = Arc::new(StateProof { + header: Mutex::new(header.clone()), + provider: self.provider.clone(), + }); + return ::engines::EpochChange::Yes(::engines::Proof::WithState(state_proof as Arc<_>)); } // otherwise, we're checking for logs. @@ -291,61 +369,16 @@ impl ValidatorSet for ValidatorSafeContract { fn epoch_set(&self, first: bool, engine: &Engine, _number: ::header::BlockNumber, proof: &[u8]) -> Result<(SimpleList, Option), ::error::Error> { - use transaction::{Action, Transaction}; - let rlp = UntrustedRlp::new(proof); if first { trace!(target: "engine", "Recovering initial epoch set"); - // TODO: match client contract_call_tx more cleanly without duplication. - const PROVIDED_GAS: u64 = 50_000_000; - let (old_header, state_items) = decode_first_proof(&rlp)?; - let old_hash = old_header.hash(); - - let env_info = ::vm::EnvInfo { - number: old_header.number(), - author: *old_header.author(), - difficulty: *old_header.difficulty(), - gas_limit: PROVIDED_GAS.into(), - timestamp: old_header.timestamp(), - last_hashes: { - // this will break if we don't inclue all 256 last hashes. - let mut last_hashes: Vec<_> = (0..256).map(|_| H256::default()).collect(); - last_hashes[255] = *old_header.parent_hash(); - Arc::new(last_hashes) - }, - gas_used: 0.into(), - }; - - // check state proof using given engine. let number = old_header.number(); - let addresses = self.provider.get_validators(move |a, d| { - let from = Address::default(); - let tx = Transaction { - nonce: engine.account_start_nonce(number), - action: Action::Call(a), - gas: PROVIDED_GAS.into(), - gas_price: U256::default(), - value: U256::default(), - data: d, - }.fake_sign(from); - - let res = ::state::check_proof( - &state_items, - *old_header.state_root(), - &tx, - engine, - &env_info, - ); - - match res { - ::state::ProvedExecution::BadProof => Err("Bad proof".into()), - ::state::ProvedExecution::Failed(e) => Err(format!("Failed call: {}", e)), - ::state::ProvedExecution::Complete(e) => Ok(e.output), - } - }).wait().map_err(::engines::EngineError::InsufficientProof)?; + let old_hash = old_header.hash(); + let addresses = check_first_proof(engine, &self.provider, old_header, &state_items) + .map_err(::engines::EngineError::InsufficientProof)?; trace!(target: "engine", "extracted epoch set at #{}: {} addresses", number, addresses.len()); @@ -419,7 +452,7 @@ impl ValidatorSet for ValidatorSafeContract { })) } - fn register_contract(&self, client: Weak) { + fn register_client(&self, client: Weak) { trace!(target: "engine", "Setting up contract caller."); *self.client.write() = Some(client); } @@ -435,7 +468,7 @@ mod tests { use spec::Spec; use account_provider::AccountProvider; use transaction::{Transaction, Action}; - use client::{BlockChainClient, EngineClient}; + use client::BlockChainClient; use ethkey::Secret; use miner::MinerService; use tests::helpers::{generate_dummy_client_with_spec_and_accounts, generate_dummy_client_with_spec_and_data}; @@ -446,7 +479,7 @@ mod tests { fn fetches_validators() { let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, None); let vc = Arc::new(ValidatorSafeContract::new("0000000000000000000000000000000000000005".parse::
().unwrap())); - vc.register_contract(Arc::downgrade(&client)); + vc.register_client(Arc::downgrade(&client) as _); let last_hash = client.best_block_header().hash(); assert!(vc.contains(&last_hash, &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e".parse::
().unwrap())); assert!(vc.contains(&last_hash, &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1".parse::
().unwrap())); @@ -460,7 +493,7 @@ mod tests { let v1 = tap.insert_account(keccak("0").into(), "").unwrap(); let chain_id = Spec::new_validator_safe_contract().chain_id(); let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, Some(tap)); - client.engine().register_client(Arc::downgrade(&client)); + client.engine().register_client(Arc::downgrade(&client) as _); let validator_contract = "0000000000000000000000000000000000000005".parse::
().unwrap(); client.miner().set_engine_signer(v1, "".into()).unwrap(); @@ -474,7 +507,7 @@ mod tests { data: "bfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(), }.sign(&s0, Some(chain_id)); client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap(); - client.update_sealing(); + ::client::EngineClient::update_sealing(&*client); assert_eq!(client.chain_info().best_block_number, 1); // Add "1" validator back in. let tx = Transaction { @@ -486,13 +519,13 @@ mod tests { data: "4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(), }.sign(&s0, Some(chain_id)); client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap(); - client.update_sealing(); + ::client::EngineClient::update_sealing(&*client); // The transaction is not yet included so still unable to seal. assert_eq!(client.chain_info().best_block_number, 1); // Switch to the validator that is still there. client.miner().set_engine_signer(v0, "".into()).unwrap(); - client.update_sealing(); + ::client::EngineClient::update_sealing(&*client); assert_eq!(client.chain_info().best_block_number, 2); // Switch back to the added validator, since the state is updated. client.miner().set_engine_signer(v1, "".into()).unwrap(); @@ -505,13 +538,13 @@ mod tests { data: Vec::new(), }.sign(&s0, Some(chain_id)); client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap(); - client.update_sealing(); + ::client::EngineClient::update_sealing(&*client); // Able to seal again. assert_eq!(client.chain_info().best_block_number, 3); // Check syncing. let sync_client = generate_dummy_client_with_spec_and_data(Spec::new_validator_safe_contract, 0, 0, &[]); - sync_client.engine().register_client(Arc::downgrade(&sync_client)); + sync_client.engine().register_client(Arc::downgrade(&sync_client) as _); for i in 1..4 { sync_client.import_block(client.block(BlockId::Number(i)).unwrap().into_inner()).unwrap(); } diff --git a/ethcore/src/header.rs b/ethcore/src/header.rs index aa94db036..8dca493a7 100644 --- a/ethcore/src/header.rs +++ b/ethcore/src/header.rs @@ -261,8 +261,13 @@ impl Header { s.out() } - /// Get the KECCAK (Keccak) of this header, optionally `with_seal`. + /// Get the SHA3 (Keccak) of this header, optionally `with_seal`. pub fn rlp_keccak(&self, with_seal: Seal) -> H256 { keccak(self.rlp(with_seal)) } + + /// Encode the header, getting a type-safe wrapper around the RLP. + pub fn encoded(&self) -> ::encoded::Header { + ::encoded::Header::new(self.rlp(Seal::With)) + } } impl Decodable for Header { diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index b8f43f11c..4e7e04341 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -116,7 +116,7 @@ impl ClientService { }); io_service.register_handler(client_io)?; - spec.engine.register_client(Arc::downgrade(&client)); + spec.engine.register_client(Arc::downgrade(&client) as _); let stop_guard = ::devtools::StopGuard::new(); run_ipc(ipc_path, client.clone(), snapshot.clone(), stop_guard.share()); diff --git a/ethcore/src/snapshot/tests/proof_of_authority.rs b/ethcore/src/snapshot/tests/proof_of_authority.rs index 509ccb8fd..7c00c8197 100644 --- a/ethcore/src/snapshot/tests/proof_of_authority.rs +++ b/ethcore/src/snapshot/tests/proof_of_authority.rs @@ -93,7 +93,7 @@ fn make_chain(accounts: Arc, blocks_beyond: usize, transitions: let mut cur_signers = vec![*RICH_ADDR]; { let engine = client.engine(); - engine.register_client(Arc::downgrade(&client)); + engine.register_client(Arc::downgrade(&client) as _); } { diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 202daba13..a807384be 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -36,7 +36,6 @@ use factory::Factories; use header::{BlockNumber, Header}; use pod_state::*; use rlp::{Rlp, RlpStream}; -use state_db::StateDB; use state::{Backend, State, Substate}; use state::backend::Basic as BasicBackend; use trace::{NoopTracer, NoopVMTracer}; @@ -465,7 +464,7 @@ impl Spec { } /// Ensure that the given state DB has the trie nodes in for the genesis state. - pub fn ensure_db_good(&self, db: StateDB, factories: &Factories) -> Result { + pub fn ensure_db_good(&self, db: T, factories: &Factories) -> Result { if db.as_hashdb().contains(&self.state_root()) { return Ok(db) } @@ -487,6 +486,63 @@ impl Spec { .and_then(|x| load_from(cache_dir, x).map_err(fmt)) } + /// initialize genesis epoch data, using in-memory database for + /// constructor. + pub fn genesis_epoch_data(&self) -> Result, String> { + use transaction::{Action, Transaction}; + use util::{journaldb, kvdb}; + + let genesis = self.genesis_header(); + + let factories = Default::default(); + let mut db = journaldb::new( + Arc::new(kvdb::in_memory(0)), + journaldb::Algorithm::Archive, + None, + ); + + self.ensure_db_good(BasicBackend(db.as_hashdb_mut()), &factories) + .map_err(|e| format!("Unable to initialize genesis state: {}", e))?; + + let call = |a, d| { + let mut db = db.boxed_clone(); + let env_info = ::evm::EnvInfo { + number: 0, + author: *genesis.author(), + timestamp: genesis.timestamp(), + difficulty: *genesis.difficulty(), + gas_limit: *genesis.gas_limit(), + last_hashes: Arc::new(Vec::new()), + gas_used: 0.into() + }; + + let from = Address::default(); + let tx = Transaction { + nonce: self.engine.account_start_nonce(0), + action: Action::Call(a), + gas: U256::from(50_000_000), // TODO: share with client. + gas_price: U256::default(), + value: U256::default(), + data: d, + }.fake_sign(from); + + let res = ::state::prove_transaction( + db.as_hashdb_mut(), + *genesis.state_root(), + &tx, + &*self.engine, + &env_info, + factories.clone(), + true, + ); + + res.map(|(out, proof)| (out, proof.into_iter().map(|x| x.into_vec()).collect())) + .ok_or_else(|| "Failed to prove call: insufficient state".into()) + }; + + self.engine.genesis_epoch_data(&genesis, &call) + } + /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus. pub fn new_test() -> Spec { load_bundled!("null_morden") } diff --git a/ethcore/src/tx_filter.rs b/ethcore/src/tx_filter.rs index 0ba986608..f39b92451 100644 --- a/ethcore/src/tx_filter.rs +++ b/ethcore/src/tx_filter.rs @@ -21,8 +21,7 @@ use std::collections::HashMap; use std::collections::hash_map::Entry; use native_contracts::TransactAcl as Contract; use client::{BlockChainClient, BlockId, ChainNotify}; -use util::{Address, Bytes}; -use bigint::hash::H256; +use util::{Address, H256, Bytes}; use parking_lot::{Mutex, RwLock}; use futures::{self, Future}; use spec::CommonParams; diff --git a/parity/blockchain.rs b/parity/blockchain.rs index eee785102..f21364214 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -208,7 +208,9 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> { config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; config.queue.verifier_settings = cmd.verifier_settings; - let service = LightClientService::start(config, &spec, &client_path, cache) + // TODO: could epoch signals be avilable at the end of the file? + let fetch = ::light::client::fetch::unavailable(); + let service = LightClientService::start(config, &spec, fetch, &client_path, cache) .map_err(|e| format!("Failed to start client: {}", e))?; // free up the spec in memory. diff --git a/parity/dapps.rs b/parity/dapps.rs index 4177644d3..98eca3459 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -25,7 +25,7 @@ use futures::{future, IntoFuture, Future, BoxFuture}; use hash_fetch::fetch::Client as FetchClient; use hash_fetch::urlhint::ContractClient; use helpers::replace_home; -use light::client::Client as LightClient; +use light::client::LightChainClient; use light::on_demand::{self, OnDemand}; use node_health::{SyncStatus, NodeHealth}; use rpc; @@ -87,16 +87,16 @@ impl ContractClient for FullRegistrar { } /// Registrar implementation for the light client. -pub struct LightRegistrar { +pub struct LightRegistrar { /// The light client. - pub client: Arc, + pub client: Arc, /// Handle to the on-demand service. pub on_demand: Arc, /// Handle to the light network service. pub sync: Arc, } -impl ContractClient for LightRegistrar { +impl ContractClient for LightRegistrar { fn registrar(&self) -> Result { self.client.engine().additional_params().get("registrar") .ok_or_else(|| "Registrar not defined.".into()) @@ -106,7 +106,14 @@ impl ContractClient for LightRegistrar { } fn call(&self, address: Address, data: Bytes) -> BoxFuture { - let (header, env_info) = (self.client.best_block_header(), self.client.latest_env_info()); + let header = self.client.best_block_header(); + let env_info = self.client.env_info(BlockId::Hash(header.hash())) + .ok_or_else(|| format!("Cannot fetch env info for header {}", header.hash())); + + let env_info = match env_info { + Ok(x) => x, + Err(e) => return future::err(e).boxed(), + }; let maybe_future = self.sync.with_context(move |ctx| { self.on_demand diff --git a/parity/informant.rs b/parity/informant.rs index 2c356a039..deb2190d1 100644 --- a/parity/informant.rs +++ b/parity/informant.rs @@ -22,7 +22,7 @@ use std::sync::{Arc}; use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; use std::time::{Instant, Duration}; -use ethcore::client::*; +use ethcore::client::{BlockId, BlockChainClient, BlockChainInfo, BlockQueueInfo, ChainNotify, ClientReport, Client}; use ethcore::header::BlockNumber; use ethcore::service::ClientIoMessage; use ethcore::snapshot::{RestorationStatus, SnapshotService as SS}; diff --git a/parity/light_helpers/epoch_fetch.rs b/parity/light_helpers/epoch_fetch.rs new file mode 100644 index 000000000..8fccf049c --- /dev/null +++ b/parity/light_helpers/epoch_fetch.rs @@ -0,0 +1,90 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::{Arc, Weak}; + +use ethcore::encoded; +use ethcore::engines::{Engine, StateDependentProof}; +use ethcore::header::Header; +use ethcore::receipt::Receipt; +use ethsync::LightSync; + +use futures::{future, Future, BoxFuture}; + +use light::client::fetch::ChainDataFetcher; +use light::on_demand::{request, OnDemand}; + +use parking_lot::RwLock; +use bigint::hash::H256; + +const ALL_VALID_BACKREFS: &str = "no back-references, therefore all back-references valid; qed"; + +/// Allows on-demand fetch of data useful for the light client. +pub struct EpochFetch { + /// A handle to the sync service. + pub sync: Arc>>, + /// The on-demand request service. + pub on_demand: Arc, +} + +impl EpochFetch { + fn request(&self, req: T) -> BoxFuture + where T: Send + request::RequestAdapter + 'static, T::Out: Send + 'static + { + match self.sync.read().upgrade() { + Some(sync) => { + let on_demand = &self.on_demand; + let maybe_future = sync.with_context(move |ctx| { + on_demand.request(ctx, req).expect(ALL_VALID_BACKREFS) + }); + + match maybe_future { + Some(x) => x.map_err(|_| "Request canceled").boxed(), + None => future::err("Unable to access network.").boxed(), + } + } + None => future::err("Unable to access network").boxed(), + } + } +} + +impl ChainDataFetcher for EpochFetch { + type Error = &'static str; + + type Body = BoxFuture; + type Receipts = BoxFuture, &'static str>; + type Transition = BoxFuture, &'static str>; + + fn block_body(&self, header: &Header) -> Self::Body { + self.request(request::Body(header.encoded().into())) + } + + /// Fetch block receipts. + fn block_receipts(&self, header: &Header) -> Self::Receipts { + self.request(request::BlockReceipts(header.encoded().into())) + } + + /// Fetch epoch transition proof at given header. + fn epoch_transition(&self, hash: H256, engine: Arc, checker: Arc) + -> Self::Transition + { + self.request(request::Signal { + hash: hash, + engine: engine, + proof_check: checker, + }) + } +} diff --git a/parity/light_helpers/mod.rs b/parity/light_helpers/mod.rs index 488f970c2..5fc9c516b 100644 --- a/parity/light_helpers/mod.rs +++ b/parity/light_helpers/mod.rs @@ -16,6 +16,8 @@ //! Utilities and helpers for the light client. +mod epoch_fetch; mod queue_cull; +pub use self::epoch_fetch::EpochFetch; pub use self::queue_cull::QueueCull; diff --git a/parity/light_helpers/queue_cull.rs b/parity/light_helpers/queue_cull.rs index eaf0ca9c5..e024e70a5 100644 --- a/parity/light_helpers/queue_cull.rs +++ b/parity/light_helpers/queue_cull.rs @@ -23,7 +23,7 @@ use ethcore::service::ClientIoMessage; use ethsync::LightSync; use io::{IoContext, IoHandler, TimerToken}; -use light::client::Client; +use light::client::LightChainClient; use light::on_demand::{request, OnDemand}; use light::TransactionQueue; @@ -41,9 +41,9 @@ const TIMEOUT_MS: u64 = 1000 * 60 * 10; const PURGE_TIMEOUT_MS: u64 = 1000 * 60 * 9; /// Periodically culls the transaction queue of mined transactions. -pub struct QueueCull { +pub struct QueueCull { /// A handle to the client, for getting the latest block header. - pub client: Arc, + pub client: Arc, /// A handle to the sync service. pub sync: Arc, /// The on-demand request service. @@ -54,7 +54,7 @@ pub struct QueueCull { pub remote: Remote, } -impl IoHandler for QueueCull { +impl IoHandler for QueueCull { fn initialize(&self, io: &IoContext) { io.register_timer(TOKEN, TIMEOUT_MS).expect("Error registering timer"); } diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index d3171f381..1e32c8bf1 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -32,6 +32,7 @@ use ethsync::{ManageNetwork, SyncProvider, LightSync}; use hash_fetch::fetch::Client as FetchClient; use jsonrpc_core::{self as core, MetaIoHandler}; use light::{TransactionQueue as LightTransactionQueue, Cache as LightDataCache}; +use light::client::LightChainClient; use node_health::NodeHealth; use parity_reactor; use parity_rpc::dispatch::{FullDispatcher, LightDispatcher}; @@ -398,9 +399,9 @@ impl ActivityNotifier for LightClientNotifier { } /// RPC dependencies for a light client. -pub struct LightDependencies { +pub struct LightDependencies { pub signer_service: Arc, - pub client: Arc<::light::client::Client>, + pub client: Arc, pub sync: Arc, pub net: Arc, pub secret_store: Arc, @@ -419,7 +420,7 @@ pub struct LightDependencies { pub whisper_rpc: Option<::whisper::RpcFactory>, } -impl LightDependencies { +impl LightDependencies { fn extend_api>( &self, handler: &mut MetaIoHandler, @@ -568,7 +569,7 @@ impl LightDependencies { } } -impl Dependencies for LightDependencies { +impl Dependencies for LightDependencies { type Notifier = LightClientNotifier; fn activity_notifier(&self) -> Self::Notifier { LightClientNotifier } diff --git a/parity/run.rs b/parity/run.rs index c9c1283ca..ee8ce5638 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -223,7 +223,16 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; config.queue.verifier_settings = cmd.verifier_settings; - let service = light_client::Service::start(config, &spec, &db_dirs.client_path(algorithm), cache.clone()) + // start on_demand service. + let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone())); + + let sync_handle = Arc::new(RwLock::new(Weak::new())); + let fetch = ::light_helpers::EpochFetch { + on_demand: on_demand.clone(), + sync: sync_handle.clone(), + }; + + let service = light_client::Service::start(config, &spec, fetch, &db_dirs.client_path(algorithm), cache.clone()) .map_err(|e| format!("Error starting light client: {}", e))?; let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default())); let provider = ::light::provider::LightProvider::new(service.client().clone(), txq.clone()); @@ -235,9 +244,6 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> net_conf.boot_nodes = spec.nodes.clone(); } - // start on_demand service. - let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone())); - let mut attached_protos = Vec::new(); let whisper_factory = if cmd.whisper.enabled { let (whisper_net, whisper_factory) = ::whisper::setup(cmd.whisper.target_message_pool_size) @@ -261,6 +267,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> }; let light_sync = LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?; let light_sync = Arc::new(light_sync); + *sync_handle.write() = Arc::downgrade(&light_sync); // spin up event loop let event_loop = EventLoop::spawn(); diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index 45c55346b..cb4550427 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -25,7 +25,7 @@ use jsonrpc_core::Error; use jsonrpc_macros::Trailing; use light::cache::Cache as LightDataCache; -use light::client::{Client as LightClient, LightChainClient}; +use light::client::LightChainClient; use light::{cht, TransactionQueue}; use light::on_demand::{request, OnDemand}; @@ -63,9 +63,9 @@ use util::Address; const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed"; /// Light client `ETH` (and filter) RPC. -pub struct EthClient { +pub struct EthClient { sync: Arc, - client: Arc, + client: Arc, on_demand: Arc, transaction_queue: Arc>, accounts: Arc, @@ -73,7 +73,7 @@ pub struct EthClient { polls: Mutex>, } -impl Clone for EthClient { +impl Clone for EthClient { fn clone(&self) -> Self { // each instance should have its own poll manager. EthClient { @@ -89,12 +89,12 @@ impl Clone for EthClient { } -impl EthClient { +impl EthClient { /// Create a new `EthClient` with a handle to the light sync instance, client, /// and on-demand request service, which is assumed to be attached as a handler. pub fn new( sync: Arc, - client: Arc, + client: Arc, on_demand: Arc, transaction_queue: Arc>, accounts: Arc, @@ -209,7 +209,7 @@ impl EthClient { } } -impl Eth for EthClient { +impl Eth for EthClient { type Metadata = Metadata; fn protocol_version(&self) -> Result { @@ -466,7 +466,7 @@ impl Eth for EthClient { } // This trait implementation triggers a blanked impl of `EthFilter`. -impl Filterable for EthClient { +impl Filterable for EthClient { fn best_block_number(&self) -> u64 { self.client.chain_info().best_block_number } fn block_hash(&self, id: BlockId) -> Option { diff --git a/sync/src/chain.rs b/sync/src/chain.rs index b3875fbcc..f00baf5a6 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -2244,7 +2244,7 @@ mod tests { use super::{PeerInfo, PeerAsking}; use ethkey; use ethcore::header::*; - use ethcore::client::*; + use ethcore::client::{BlockChainClient, EachBlockWith, TestBlockChainClient}; use ethcore::transaction::UnverifiedTransaction; use ethcore::miner::MinerService; diff --git a/sync/src/light_sync/tests/test_net.rs b/sync/src/light_sync/tests/test_net.rs index 65ddf92da..535650ce1 100644 --- a/sync/src/light_sync/tests/test_net.rs +++ b/sync/src/light_sync/tests/test_net.rs @@ -25,7 +25,7 @@ use tests::helpers::{TestNet, Peer as PeerLike, TestPacket}; use ethcore::client::TestBlockChainClient; use ethcore::spec::Spec; use io::IoChannel; -use light::client::Client as LightClient; +use light::client::fetch::{self, Unavailable}; use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams}; use light::provider::LightProvider; use network::{NodeId, PeerId}; @@ -36,6 +36,8 @@ use light::cache::Cache; const NETWORK_ID: u64 = 0xcafebabe; +pub type LightClient = ::light::client::Client; + struct TestIoContext<'a> { queue: &'a RwLock>, sender: Option, @@ -216,7 +218,14 @@ impl TestNet { // skip full verification because the blocks are bad. config.verify_full = false; let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); - let client = LightClient::in_memory(config, &Spec::new_test(), IoChannel::disconnected(), cache); + let client = LightClient::in_memory( + config, + &Spec::new_test(), + fetch::unavailable(), // TODO: allow fetch from full nodes. + IoChannel::disconnected(), + cache + ); + peers.push(Arc::new(Peer::new_light(Arc::new(client)))) } diff --git a/sync/src/tests/consensus.rs b/sync/src/tests/consensus.rs index a9c26712d..f45e614d7 100644 --- a/sync/src/tests/consensus.rs +++ b/sync/src/tests/consensus.rs @@ -71,8 +71,8 @@ fn authority_round() { // Push transaction to both clients. Only one of them gets lucky to produce a block. net.peer(0).chain.miner().set_engine_signer(s0.address(), "".to_owned()).unwrap(); net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap(); - net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain)); - net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain)); + net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain) as _); + net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain) as _); net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1))); net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0))); // exchange statuses @@ -160,8 +160,8 @@ fn tendermint() { trace!(target: "poa", "Peer 0 is {}.", s0.address()); net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap(); trace!(target: "poa", "Peer 1 is {}.", s1.address()); - net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain)); - net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain)); + net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain) as _); + net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain) as _); net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0))); net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1))); // Exhange statuses From 7d1c7a047471116af6870d6d62136fa554a7a552 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 5 Sep 2017 17:24:35 +0200 Subject: [PATCH 02/10] allow optional casting of engine client to full client --- ethcore/light/src/client/mod.rs | 18 ++++-------------- ethcore/src/client/client.rs | 8 +------- ethcore/src/client/test_client.rs | 8 +------- ethcore/src/client/traits.rs | 8 +++----- ethcore/src/engines/tendermint/mod.rs | 2 +- ethcore/src/engines/validator_set/contract.rs | 8 +++++++- .../src/engines/validator_set/safe_contract.rs | 7 ++++++- ethcore/src/ethereum/ethash.rs | 6 +++--- ethcore/src/tx_filter.rs | 15 +++++++++++---- 9 files changed, 37 insertions(+), 43 deletions(-) diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 2b77685bd..2b2514d0a 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -19,9 +19,9 @@ use std::sync::{Weak, Arc}; use ethcore::block_status::BlockStatus; -use ethcore::client::{TransactionImportResult, ClientReport, EnvInfo}; +use ethcore::client::{ClientReport, EnvInfo}; use ethcore::engines::{epoch, Engine, EpochChange, EpochTransition, Proof, Unsure}; -use ethcore::error::{TransactionError, BlockImportError, Error as EthcoreError}; +use ethcore::error::BlockImportError; use ethcore::ids::BlockId; use ethcore::header::{BlockNumber, Header}; use ethcore::verification::queue::{self, HeaderQueue}; @@ -35,7 +35,6 @@ use bigint::prelude::U256; use bigint::hash::H256; use futures::{IntoFuture, Future}; -use util::Address; use util::kvdb::{KeyValueDB, CompactionProfile}; use self::fetch::ChainDataFetcher; @@ -619,17 +618,8 @@ impl ::ethcore::client::EngineClient for Client { Client::chain_info(self) } - fn call_contract(&self, _id: BlockId, _address: Address, _data: Vec) -> Result, String> { - Err("Contract calling not supported by light client".into()) - } - - fn transact_contract(&self, _address: Address, _data: Vec) - -> Result - { - // TODO: these are only really used for misbehavior reporting. - // no relevant clients will be running light clients, but maybe - // they could be at some point? - Err(TransactionError::LimitReached.into()) + fn as_full_client(&self) -> Option<&::ethcore::client::BlockChainClient> { + None } fn block_number(&self, id: BlockId) -> Option { diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 0eda5048e..02682ece2 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -1960,13 +1960,7 @@ impl super::traits::EngineClient for Client { BlockChainClient::chain_info(self) } - fn call_contract(&self, id: BlockId, address: Address, data: Bytes) -> Result { - BlockChainClient::call_contract(self, id, address, data) - } - - fn transact_contract(&self, address: Address, data: Bytes) -> Result { - BlockChainClient::transact_contract(self, address, data) - } + fn as_full_client(&self) -> Option<&BlockChainClient> { Some(self) } fn block_number(&self, id: BlockId) -> Option { BlockChainClient::block_number(self, id) diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 0cd9fedc8..f39932f82 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -828,13 +828,7 @@ impl super::traits::EngineClient for TestBlockChainClient { BlockChainClient::chain_info(self) } - fn call_contract(&self, id: BlockId, address: Address, data: Bytes) -> Result { - BlockChainClient::call_contract(self, id, address, data) - } - - fn transact_contract(&self, address: Address, data: Bytes) -> Result { - BlockChainClient::transact_contract(self, address, data) - } + fn as_full_client(&self) -> Option<&BlockChainClient> { Some(self) } fn block_number(&self, id: BlockId) -> Option { BlockChainClient::block_number(self, id) diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index 45736e2c5..5a619a95e 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -337,12 +337,10 @@ pub trait EngineClient: Sync + Send { /// Get block chain info. fn chain_info(&self) -> BlockChainInfo; - /// Like `call`, but with various defaults. Designed to be used for calling contracts. - fn call_contract(&self, id: BlockId, address: Address, data: Bytes) -> Result; - - /// Import a transaction: used for misbehaviour reporting. - fn transact_contract(&self, address: Address, data: Bytes) -> Result; + /// Attempt to cast the engine client to a full client. + fn as_full_client(&self) -> Option<&BlockChainClient>; + /// Get a block number by ID. fn block_number(&self, id: BlockId) -> Option; } diff --git a/ethcore/src/engines/tendermint/mod.rs b/ethcore/src/engines/tendermint/mod.rs index dff678c92..ce0a0da24 100644 --- a/ethcore/src/engines/tendermint/mod.rs +++ b/ethcore/src/engines/tendermint/mod.rs @@ -35,7 +35,7 @@ use bigint::hash::{H256, H520}; use parking_lot::RwLock; use util::*; use unexpected::{OutOfBounds, Mismatch}; -use client::{Client, EngineClient}; +use client::EngineClient; use error::{Error, BlockError}; use header::{Header, BlockNumber}; use builtin::Builtin; diff --git a/ethcore/src/engines/validator_set/contract.rs b/ethcore/src/engines/validator_set/contract.rs index 9a3705b63..24e396c10 100644 --- a/ethcore/src/engines/validator_set/contract.rs +++ b/ethcore/src/engines/validator_set/contract.rs @@ -58,7 +58,13 @@ impl ValidatorContract { Box::new(move |a, d| client.as_ref() .and_then(Weak::upgrade) .ok_or("No client!".into()) - .and_then(|c| c.transact_contract(a, d).map_err(|e| format!("Transaction import error: {}", e))) + .and_then(|c| { + match c.as_full_client() { + Some(c) => c.transact_contract(a, d) + .map_err(|e| format!("Transaction import error: {}", e)), + None => Err("No full client!".into()), + } + }) .map(|_| Default::default())) } } diff --git a/ethcore/src/engines/validator_set/safe_contract.rs b/ethcore/src/engines/validator_set/safe_contract.rs index 1d4f9b0be..c3f82e181 100644 --- a/ethcore/src/engines/validator_set/safe_contract.rs +++ b/ethcore/src/engines/validator_set/safe_contract.rs @@ -311,7 +311,12 @@ impl ValidatorSet for ValidatorSafeContract { Box::new(move |addr, data| client.as_ref() .and_then(Weak::upgrade) .ok_or("No client!".into()) - .and_then(|c| c.call_contract(id, addr, data)) + .and_then(|c| { + match c.as_full_client() { + Some(c) => c.call_contract(id, addr, data), + None => Err("No full client!".into()), + } + }) .map(|out| (out, Vec::new()))) // generate no proofs in general } diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index b11700e09..11c04b628 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -40,7 +40,7 @@ use rlp::{self, UntrustedRlp}; use vm::LastHashes; use semantic_version::SemanticVersion; use tx_filter::{TransactionFilter}; -use client::{Client, BlockChainClient}; +use client::EngineClient; /// Parity tries to round block.gas_limit to multiple of this constant pub const PARITY_GAS_LIMIT_DETERMINANT: U256 = U256([37, 0, 0, 0]); @@ -460,9 +460,9 @@ impl Engine for Arc { Some(Box::new(::snapshot::PowSnapshot::new(SNAPSHOT_BLOCKS, MAX_SNAPSHOT_BLOCKS))) } - fn register_client(&self, client: Weak) { + fn register_client(&self, client: Weak) { if let Some(ref filter) = self.tx_filter { - filter.register_client(client as Weak); + filter.register_client(client); } } diff --git a/ethcore/src/tx_filter.rs b/ethcore/src/tx_filter.rs index f39b92451..bd6a0b6bd 100644 --- a/ethcore/src/tx_filter.rs +++ b/ethcore/src/tx_filter.rs @@ -19,9 +19,10 @@ use std::sync::Weak; use std::collections::HashMap; use std::collections::hash_map::Entry; +use bigint::hash::H256; use native_contracts::TransactAcl as Contract; -use client::{BlockChainClient, BlockId, ChainNotify}; -use util::{Address, H256, Bytes}; +use client::{EngineClient, BlockId, ChainNotify}; +use util::{Address, Bytes}; use parking_lot::{Mutex, RwLock}; use futures::{self, Future}; use spec::CommonParams; @@ -42,7 +43,7 @@ mod tx_permissions { /// Connection filter that uses a contract to manage permissions. pub struct TransactionFilter { contract: Mutex>, - client: RwLock>>, + client: RwLock>>, contract_address: Address, permission_cache: Mutex>, } @@ -66,7 +67,7 @@ impl TransactionFilter { } /// Set client reference to be used for contract call. - pub fn register_client(&self, client: Weak) { + pub fn register_client(&self, client: Weak) { *self.client.write() = Some(client); } @@ -78,6 +79,12 @@ impl TransactionFilter { Some(client) => client, _ => return false, }; + + let client = match client.as_full_client() { + Some(client) => client, + _ => return false, // TODO: how to handle verification for light clients? + }; + let tx_type = match transaction.action { Action::Create => tx_permissions::CREATE, Action::Call(address) => if client.code_hash(&address, BlockId::Hash(*parent_hash)).map_or(false, |c| c != KECCAK_EMPTY) { From ffced4e17f14e85fe5ac4e124cf678cec31af5b2 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 5 Sep 2017 21:24:15 +0200 Subject: [PATCH 03/10] fix test build --- ethcore/src/tx_filter.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/src/tx_filter.rs b/ethcore/src/tx_filter.rs index bd6a0b6bd..b2b7828f1 100644 --- a/ethcore/src/tx_filter.rs +++ b/ethcore/src/tx_filter.rs @@ -211,7 +211,7 @@ mod test { let key4 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000004")).unwrap(); let filter = TransactionFilter::from_params(spec.params()).unwrap(); - filter.register_client(Arc::downgrade(&client) as Weak); + filter.register_client(Arc::downgrade(&client) as Weak<_>); let mut basic_tx = Transaction::default(); basic_tx.action = Action::Call(Address::from("000000000000000000000000000000000000032")); let create_tx = Transaction::default(); From 375668bc4052c273587906c9882b72bf5489ba4a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 10 Sep 2017 18:02:14 +0200 Subject: [PATCH 04/10] Compatibility with whisper v6 (#6179) * compatibility with whisper v6 * separate subprotocol for parity extensions * kill version field --- Cargo.lock | 37 ++-- parity/rpc_apis.rs | 8 +- parity/run.rs | 7 +- parity/whisper.rs | 63 ++++-- rpc/Cargo.toml | 3 + rpc/src/v1/tests/mocked/manage_network.rs | 4 + sync/src/api.rs | 10 + whisper/src/message.rs | 110 +++++++---- whisper/src/net.rs | 231 +++++++++++++++------- whisper/src/rpc/filter.rs | 34 ++-- whisper/src/rpc/mod.rs | 12 +- whisper/src/rpc/types.rs | 2 +- 12 files changed, 349 insertions(+), 172 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 16046c480..1bf522580 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,7 +2,7 @@ name = "wasm" version = "0.1.0" dependencies = [ - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-bigint 0.1.3", "ethcore-logger 1.8.0", "ethcore-util 1.8.0", @@ -129,7 +129,7 @@ name = "bigint" version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -141,7 +141,7 @@ name = "bincode" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -210,7 +210,7 @@ name = "bn" version = "0.4.4" source = "git+https://github.com/paritytech/bn#b97e95a45f4484a41a515338c4f0e093bf6675e0" dependencies = [ - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -222,7 +222,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "byteorder" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -230,7 +230,7 @@ name = "bytes" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -507,7 +507,7 @@ dependencies = [ "bloomable 0.1.0", "bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "bn 0.4.4 (git+https://github.com/paritytech/bn)", - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "common-types 0.1.0", "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -742,7 +742,7 @@ dependencies = [ name = "ethcore-secretstore" version = "1.0.0" dependencies = [ - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.8.0", "ethcore-bigint 0.1.3", @@ -855,7 +855,7 @@ dependencies = [ name = "ethkey" version = "0.2.0" dependencies = [ - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)", "ethcore-bigint 0.1.3", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -948,7 +948,7 @@ name = "evm" version = "0.1.0" dependencies = [ "bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "common-types 0.1.0", "ethcore-bigint 0.1.3", "ethcore-logger 1.8.0", @@ -1421,7 +1421,7 @@ name = "libflate" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1634,7 +1634,7 @@ dependencies = [ name = "native-contracts" version = "0.1.0" dependencies = [ - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-bigint 0.1.3", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2062,6 +2062,7 @@ dependencies = [ "ethcore-ipc 1.8.0", "ethcore-light 1.8.0", "ethcore-logger 1.8.0", + "ethcore-network 1.8.0", "ethcore-util 1.8.0", "ethcrypto 0.1.0", "ethjson 0.1.0", @@ -2190,7 +2191,7 @@ name = "parity-wasm" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2200,7 +2201,7 @@ name = "parity-whisper" version = "0.1.0" dependencies = [ "bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-bigint 0.1.3", "ethcore-network 1.8.0", "ethcrypto 0.1.0", @@ -2504,7 +2505,7 @@ dependencies = [ name = "rlp" version = "0.2.0" dependencies = [ - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-bigint 0.1.3", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3257,7 +3258,7 @@ dependencies = [ name = "vm" version = "0.1.0" dependencies = [ - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "common-types 0.1.0", "ethcore-bigint 0.1.3", "ethcore-util 1.8.0", @@ -3301,7 +3302,7 @@ name = "ws" version = "0.7.1" source = "git+https://github.com/tomusdrw/ws-rs#f8306a798b7541d64624299a83a2c934f173beed" dependencies = [ - "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3380,7 +3381,7 @@ dependencies = [ "checksum bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3f421095d2a76fc24cd3fb3f912b90df06be7689912b1bdb423caefae59c258d" "checksum bn 0.4.4 (git+https://github.com/paritytech/bn)" = "" "checksum byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855" -"checksum byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c40977b0ee6b9885c9013cd41d9feffdd22deb3bb4dc3a71d901cc7a77de18c8" +"checksum byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff81738b726f5d099632ceaffe7fb65b90212e8dce59d518729e7e8634032d3d" "checksum bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8b24f16593f445422331a5eed46b72f7f171f910fead4f2ea8f17e727e9c5c14" "checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c" "checksum cid 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "34aa7da06f10541fbca6850719cdaa8fa03060a5d2fb33840f149cf8133a00c7" diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index 1e32c8bf1..0a2fd87f1 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -354,14 +354,14 @@ impl FullDependencies { }, Api::Whisper => { if let Some(ref whisper_rpc) = self.whisper_rpc { - let whisper = whisper_rpc.make_handler(); + let whisper = whisper_rpc.make_handler(self.net.clone()); handler.extend_with(::parity_whisper::rpc::Whisper::to_delegate(whisper)); } } Api::WhisperPubSub => { if !for_generic_pubsub { if let Some(ref whisper_rpc) = self.whisper_rpc { - let whisper = whisper_rpc.make_handler(); + let whisper = whisper_rpc.make_handler(self.net.clone()); handler.extend_with( ::parity_whisper::rpc::WhisperPubSub::to_delegate(whisper) ); @@ -554,13 +554,13 @@ impl LightDependencies { }, Api::Whisper => { if let Some(ref whisper_rpc) = self.whisper_rpc { - let whisper = whisper_rpc.make_handler(); + let whisper = whisper_rpc.make_handler(self.net.clone()); handler.extend_with(::parity_whisper::rpc::Whisper::to_delegate(whisper)); } } Api::WhisperPubSub => { if let Some(ref whisper_rpc) = self.whisper_rpc { - let whisper = whisper_rpc.make_handler(); + let whisper = whisper_rpc.make_handler(self.net.clone()); handler.extend_with(::parity_whisper::rpc::WhisperPubSub::to_delegate(whisper)); } } diff --git a/parity/run.rs b/parity/run.rs index ee8ce5638..d0f967410 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -246,10 +246,8 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> let mut attached_protos = Vec::new(); let whisper_factory = if cmd.whisper.enabled { - let (whisper_net, whisper_factory) = ::whisper::setup(cmd.whisper.target_message_pool_size) + let whisper_factory = ::whisper::setup(cmd.whisper.target_message_pool_size, &mut attached_protos) .map_err(|e| format!("Failed to initialize whisper: {}", e))?; - - attached_protos.push(whisper_net); whisper_factory } else { None @@ -638,10 +636,9 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R let mut attached_protos = Vec::new(); let whisper_factory = if cmd.whisper.enabled { - let (whisper_net, whisper_factory) = ::whisper::setup(cmd.whisper.target_message_pool_size) + let whisper_factory = ::whisper::setup(cmd.whisper.target_message_pool_size, &mut attached_protos) .map_err(|e| format!("Failed to initialize whisper: {}", e))?; - attached_protos.push(whisper_net); whisper_factory } else { None diff --git a/parity/whisper.rs b/parity/whisper.rs index cab117ae6..f8d33626b 100644 --- a/parity/whisper.rs +++ b/parity/whisper.rs @@ -17,10 +17,11 @@ use std::sync::Arc; use std::io; -use ethsync::AttachedProtocol; +use ethsync::{AttachedProtocol, ManageNetwork}; use parity_rpc::Metadata; -use parity_whisper::net::{self as whisper_net, PoolHandle, Network as WhisperNetwork}; -use parity_whisper::rpc::{WhisperClient, FilterManager}; +use parity_whisper::message::Message; +use parity_whisper::net::{self as whisper_net, Network as WhisperNetwork}; +use parity_whisper::rpc::{WhisperClient, PoolHandle, FilterManager}; /// Whisper config. #[derive(Debug, PartialEq, Eq)] @@ -38,6 +39,31 @@ impl Default for Config { } } +/// Standard pool handle. +pub struct NetPoolHandle { + /// Pool handle. + handle: Arc>>, + /// Network manager. + net: Arc, +} + +impl PoolHandle for NetPoolHandle { + fn relay(&self, message: Message) -> bool { + let mut res = false; + let mut message = Some(message); + self.net.with_proto_context(whisper_net::PROTOCOL_ID, &mut move |ctx| { + if let Some(message) = message.take() { + res = self.handle.post_message(message, ctx); + } + }); + res + } + + fn pool_status(&self) -> whisper_net::PoolStatus { + self.handle.pool_status() + } +} + /// Factory for standard whisper RPC. pub struct RpcFactory { net: Arc>>, @@ -45,8 +71,9 @@ pub struct RpcFactory { } impl RpcFactory { - pub fn make_handler(&self) -> WhisperClient { - WhisperClient::new(self.net.handle(), self.manager.clone()) + pub fn make_handler(&self, net: Arc) -> WhisperClient { + let handle = NetPoolHandle { handle: self.net.clone(), net: net }; + WhisperClient::new(handle, self.manager.clone()) } } @@ -54,24 +81,36 @@ impl RpcFactory { /// /// Will target the given pool size. #[cfg(not(feature = "ipc"))] -pub fn setup(target_pool_size: usize) -> io::Result<(AttachedProtocol, Option)> { +pub fn setup(target_pool_size: usize, protos: &mut Vec) + -> io::Result> +{ let manager = Arc::new(FilterManager::new()?); let net = Arc::new(WhisperNetwork::new(target_pool_size, manager.clone())); - let proto = AttachedProtocol { + protos.push(AttachedProtocol { handler: net.clone() as Arc<_>, packet_count: whisper_net::PACKET_COUNT, versions: whisper_net::SUPPORTED_VERSIONS, - protocol_id: *b"shh", - }; + protocol_id: whisper_net::PROTOCOL_ID, + }); + + // parity-only extensions to whisper. + protos.push(AttachedProtocol { + handler: Arc::new(whisper_net::ParityExtensions), + packet_count: whisper_net::PACKET_COUNT, + versions: whisper_net::SUPPORTED_VERSIONS, + protocol_id: whisper_net::PARITY_PROTOCOL_ID, + }); let factory = RpcFactory { net: net, manager: manager }; - Ok((proto, Some(factory))) + Ok(Some(factory)) } // TODO: make it possible to attach generic protocols in IPC. #[cfg(feature = "ipc")] -pub fn setup(_pool: usize) -> (AttachedProtocol, Option) { - Ok((AttachedProtocol, None)) +pub fn setup(_target_pool_size: usize, _protos: &mut Vec) + -> io::Result> +{ + Ok(None) } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index c213af8df..3d234d2d6 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -62,5 +62,8 @@ hash = { path = "../util/hash" } clippy = { version = "0.0.103", optional = true} pretty_assertions = "0.1" +[dev-dependencies] +ethcore-network = { path = "../util/network" } + [features] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"] diff --git a/rpc/src/v1/tests/mocked/manage_network.rs b/rpc/src/v1/tests/mocked/manage_network.rs index e5e3fd340..9438429cd 100644 --- a/rpc/src/v1/tests/mocked/manage_network.rs +++ b/rpc/src/v1/tests/mocked/manage_network.rs @@ -15,6 +15,9 @@ // along with Parity. If not, see . use ethsync::{ManageNetwork, NetworkConfiguration}; +use self::ethcore_network::{ProtocolId, NetworkContext}; + +extern crate ethcore_network; pub struct TestManageNetwork; @@ -27,4 +30,5 @@ impl ManageNetwork for TestManageNetwork { fn start_network(&self) {} fn stop_network(&self) {} fn network_config(&self) -> NetworkConfiguration { NetworkConfiguration::new_local() } + fn with_proto_context(&self, _: ProtocolId, _: &mut FnMut(&NetworkContext)) { } } diff --git a/sync/src/api.rs b/sync/src/api.rs index 6f4b14970..0dfe51efd 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -497,6 +497,8 @@ pub trait ManageNetwork : Send + Sync { fn stop_network(&self); /// Query the current configuration of the network fn network_config(&self) -> NetworkConfiguration; + /// Get network context for protocol. + fn with_proto_context(&self, proto: ProtocolId, f: &mut FnMut(&NetworkContext)); } @@ -538,6 +540,10 @@ impl ManageNetwork for EthSync { fn network_config(&self) -> NetworkConfiguration { NetworkConfiguration::from(self.network.config().clone()) } + + fn with_proto_context(&self, proto: ProtocolId, f: &mut FnMut(&NetworkContext)) { + self.network.with_context_eval(proto, f); + } } #[derive(Debug, Clone, PartialEq, Eq)] @@ -808,6 +814,10 @@ impl ManageNetwork for LightSync { fn network_config(&self) -> NetworkConfiguration { NetworkConfiguration::from(self.network.config().clone()) } + + fn with_proto_context(&self, proto: ProtocolId, f: &mut FnMut(&NetworkContext)) { + self.network.with_context_eval(proto, f); + } } impl LightSyncProvider for LightSync { diff --git a/whisper/src/message.rs b/whisper/src/message.rs index e5202c583..fe6997c01 100644 --- a/whisper/src/message.rs +++ b/whisper/src/message.rs @@ -56,23 +56,18 @@ impl Topic { /// this takes 3 sets of 9 bits, treating each as an index in the range /// 0..512 into the bloom and setting the corresponding bit in the bloom to 1. pub fn bloom_into(&self, bloom: &mut H512) { - let mut set_bit = |idx: usize| { - let idx = idx & 511; - bloom[idx / 8] |= 1 << idx % 8; - }; let data = &self.0; - let mut combined = ((data[0] as usize) << 24) | - ((data[1] as usize) << 16) | - ((data[2] as usize) << 8) | - data[3] as usize; + for i in 0..3 { + let mut idx = data[i] as usize; - // take off the last 5 bits as we only use 27. - combined >>= 5; + if data[3] & (1 << i) != 0 { + idx += 256; + } - set_bit(combined); - set_bit(combined >> 9); - set_bit(combined >> 18); + debug_assert!(idx <= 511); + bloom[idx / 8] |= 1 << (7 - idx % 8); + } } /// Get bloom for single topic. @@ -118,6 +113,7 @@ pub fn bloom_topics(topics: &[Topic]) -> H512 { #[derive(Debug)] pub enum Error { Decoder(DecoderError), + EmptyTopics, LivesTooLong, IssuedInFuture, ZeroTTL, @@ -136,10 +132,27 @@ impl fmt::Display for Error { Error::LivesTooLong => write!(f, "Message claims to be issued before the unix epoch."), Error::IssuedInFuture => write!(f, "Message issued in future."), Error::ZeroTTL => write!(f, "Message live for zero time."), + Error::EmptyTopics => write!(f, "Message has no topics."), } } } +fn append_topics<'a>(s: &'a mut RlpStream, topics: &[Topic]) -> &'a mut RlpStream { + if topics.len() == 1 { + s.append(&topics[0]) + } else { + s.append_list(&topics) + } +} + +fn decode_topics(rlp: UntrustedRlp) -> Result, DecoderError> { + if rlp.is_list() { + rlp.iter().map(|r| r.as_val::()).collect() + } else { + rlp.as_val().map(|t| SmallVec::from_slice(&[t])) + } +} + // Raw envelope struct. #[derive(Clone, Debug, PartialEq, Eq)] pub struct Envelope { @@ -156,15 +169,20 @@ pub struct Envelope { } impl Envelope { + /// Whether the message is multi-topic. Only relay these to Parity peers. + pub fn is_multitopic(&self) -> bool { + self.topics.len() != 1 + } + fn proving_hash(&self) -> H256 { use byteorder::{BigEndian, ByteOrder}; let mut buf = [0; 32]; let mut stream = RlpStream::new_list(4); - stream.append(&self.expiry) - .append(&self.ttl) - .append_list(&self.topics) + stream.append(&self.expiry).append(&self.ttl); + + append_topics(&mut stream, &self.topics) .append(&self.data); let mut digest = Keccak::new_keccak256(); @@ -185,8 +203,9 @@ impl rlp::Encodable for Envelope { fn rlp_append(&self, s: &mut RlpStream) { s.begin_list(5) .append(&self.expiry) - .append(&self.ttl) - .append_list(&self.topics) + .append(&self.ttl); + + append_topics(s, &self.topics) .append(&self.data) .append(&self.nonce); } @@ -199,13 +218,17 @@ impl rlp::Decodable for Envelope { Ok(Envelope { expiry: rlp.val_at(0)?, ttl: rlp.val_at(1)?, - topics: rlp.at(2)?.iter().map(|x| x.as_val()).collect::>()?, + topics: decode_topics(rlp.at(2)?)?, data: rlp.val_at(3)?, nonce: rlp.val_at(4)?, }) } } +/// Error indicating no topics. +#[derive(Debug, Copy, Clone)] +pub struct EmptyTopics; + /// Message creation parameters. /// Pass this to `Message::create` to make a message. pub struct CreateParams { @@ -213,7 +236,7 @@ pub struct CreateParams { pub ttl: u64, /// payload data. pub payload: Vec, - /// Topics. + /// Topics. May not be empty. pub topics: Vec, /// How many milliseconds to spend proving work. pub work: u64, @@ -231,10 +254,12 @@ pub struct Message { impl Message { /// Create a message from creation parameters. /// Panics if TTL is 0. - pub fn create(params: CreateParams) -> Self { + pub fn create(params: CreateParams) -> Result { use byteorder::{BigEndian, ByteOrder}; use rand::{Rng, SeedableRng, XorShiftRng}; + if params.topics.is_empty() { return Err(EmptyTopics) } + let mut rng = { let mut thread_rng = ::rand::thread_rng(); @@ -254,10 +279,8 @@ impl Message { let start_digest = { let mut stream = RlpStream::new_list(4); - stream.append(&expiry) - .append(¶ms.ttl) - .append_list(¶ms.topics) - .append(¶ms.payload); + stream.append(&expiry).append(¶ms.ttl); + append_topics(&mut stream, ¶ms.topics).append(¶ms.payload); let mut digest = Keccak::new_keccak256(); digest.update(&*stream.drain()); @@ -300,12 +323,12 @@ impl Message { let encoded = ::rlp::encode(&envelope); - Message::from_components( + Ok(Message::from_components( envelope, encoded.len(), H256(keccak256(&encoded)), SystemTime::now(), - ).expect("Message generated here known to be valid; qed") + ).expect("Message generated here known to be valid; qed")) } /// Decode message from RLP and check for validity against system time. @@ -327,6 +350,8 @@ impl Message { if envelope.expiry <= envelope.ttl { return Err(Error::LivesTooLong) } if envelope.ttl == 0 { return Err(Error::ZeroTTL) } + if envelope.topics.is_empty() { return Err(Error::EmptyTopics) } + let issue_time_adjusted = Duration::from_secs( (envelope.expiry - envelope.ttl).saturating_sub(LEEWAY_SECONDS) ); @@ -394,6 +419,7 @@ mod tests { use super::*; use std::time::{self, Duration, SystemTime}; use rlp::UntrustedRlp; + use smallvec::SmallVec; fn unix_time(x: u64) -> SystemTime { time::UNIX_EPOCH + Duration::from_secs(x) @@ -401,12 +427,12 @@ mod tests { #[test] fn create_message() { - let _ = Message::create(CreateParams { + assert!(Message::create(CreateParams { ttl: 100, payload: vec![1, 2, 3, 4], - topics: Vec::new(), + topics: vec![Topic([1, 2, 1, 2])], work: 50, - }); + }).is_ok()); } #[test] @@ -415,7 +441,23 @@ mod tests { expiry: 100_000, ttl: 30, data: vec![9; 256], - topics: Default::default(), + topics: SmallVec::from_slice(&[Default::default()]), + nonce: 1010101, + }; + + let encoded = ::rlp::encode(&envelope); + let decoded = ::rlp::decode(&encoded); + + assert_eq!(envelope, decoded) + } + + #[test] + fn round_trip_multitopic() { + let envelope = Envelope { + expiry: 100_000, + ttl: 30, + data: vec![9; 256], + topics: SmallVec::from_slice(&[Default::default(), Topic([1, 2, 3, 4])]), nonce: 1010101, }; @@ -431,7 +473,7 @@ mod tests { expiry: 100_000, ttl: 30, data: vec![9; 256], - topics: Default::default(), + topics: SmallVec::from_slice(&[Default::default()]), nonce: 1010101, }; @@ -450,7 +492,7 @@ mod tests { expiry: 100_000, ttl: 30, data: vec![9; 256], - topics: Default::default(), + topics: SmallVec::from_slice(&[Default::default()]), nonce: 1010101, }; @@ -467,7 +509,7 @@ mod tests { expiry: 100_000, ttl: 200_000, data: vec![9; 256], - topics: Default::default(), + topics: SmallVec::from_slice(&[Default::default()]), nonce: 1010101, }; diff --git a/whisper/src/net.rs b/whisper/src/net.rs index 5553f27e6..dab32ad2c 100644 --- a/whisper/src/net.rs +++ b/whisper/src/net.rs @@ -23,31 +23,45 @@ use std::time::{Duration, SystemTime}; use std::sync::Arc; use bigint::hash::{H256, H512}; -use network::{HostInfo, NetworkContext, NetworkError, NodeId, PeerId, TimerToken}; +use network::{HostInfo, NetworkContext, NetworkError, NodeId, PeerId, ProtocolId, TimerToken}; use ordered_float::OrderedFloat; use parking_lot::{Mutex, RwLock}; use rlp::{DecoderError, RlpStream, UntrustedRlp}; use message::{Message, Error as MessageError}; +// how often periodic relays are. when messages are imported +// we directly broadcast. const RALLY_TOKEN: TimerToken = 1; -const RALLY_TIMEOUT_MS: u64 = 750; // supposed to be at least once per second. +const RALLY_TIMEOUT_MS: u64 = 2500; -const PROTOCOL_VERSION: usize = 2; +/// Current protocol version. +pub const PROTOCOL_VERSION: usize = 6; /// Supported protocol versions. pub const SUPPORTED_VERSIONS: &'static [u8] = &[PROTOCOL_VERSION as u8]; // maximum tolerated delay between messages packets. -const MAX_TOLERATED_DELAY_MS: u64 = 2000; +const MAX_TOLERATED_DELAY_MS: u64 = 5000; -/// Number of packets. -pub const PACKET_COUNT: u8 = 3; +/// Number of packets. A bunch are reserved. +pub const PACKET_COUNT: u8 = 128; + +/// Whisper protocol ID +pub const PROTOCOL_ID: ::network::ProtocolId = *b"shh"; + +/// Parity-whisper protocol ID +/// Current parity-specific extensions: +/// - Multiple topics in packet. +pub const PARITY_PROTOCOL_ID: ::network::ProtocolId = *b"pwh"; mod packet { pub const STATUS: u8 = 0; pub const MESSAGES: u8 = 1; - pub const TOPIC_FILTER: u8 = 2; + pub const POW_REQUIREMENT: u8 = 2; + pub const TOPIC_FILTER: u8 = 3; + + // 126, 127 for mail server stuff we will never implement here. } /// Handles messages within a single packet. @@ -67,11 +81,9 @@ enum Error { Decoder(DecoderError), Network(NetworkError), Message(MessageError), - UnknownPacket(u8), UnknownPeer(PeerId), - ProtocolVersionMismatch(usize), - SameNodeKey, UnexpectedMessage, + InvalidPowReq, } impl From for Error { @@ -98,12 +110,9 @@ impl fmt::Display for Error { Error::Decoder(ref err) => write!(f, "Failed to decode packet: {}", err), Error::Network(ref err) => write!(f, "Network error: {}", err), Error::Message(ref err) => write!(f, "Error decoding message: {}", err), - Error::UnknownPacket(ref id) => write!(f, "Unknown packet kind: {}", id), Error::UnknownPeer(ref id) => write!(f, "Message received from unknown peer: {}", id), - Error::ProtocolVersionMismatch(ref proto) => - write!(f, "Unknown protocol version: {}", proto), Error::UnexpectedMessage => write!(f, "Unexpected message."), - Error::SameNodeKey => write!(f, "Peer and us have same node key."), + Error::InvalidPowReq => write!(f, "Peer sent invalid PoW requirement."), } } } @@ -298,15 +307,18 @@ impl Messages { enum State { Unconfirmed(SystemTime), // awaiting status packet. - TheirTurn(SystemTime), // it has been their turn to send since stored time. - OurTurn, + Confirmed, } +#[allow(dead_code)] // for node key. this will be useful for topic routing. struct Peer { node_key: NodeId, state: State, known_messages: HashSet, topic_filter: Option, + pow_requirement: f64, + is_parity: bool, + _protocol_version: usize, } impl Peer { @@ -319,12 +331,14 @@ impl Peer { // whether this peer will accept the message. fn will_accept(&self, message: &Message) -> bool { - let known = self.known_messages.contains(message.hash()); + if self.known_messages.contains(message.hash()) { return false } - let matches_bloom = self.topic_filter.as_ref() - .map_or(true, |topic| topic & message.bloom() == message.bloom().clone()); + // only parity peers will accept multitopic messages. + if message.envelope().is_multitopic() && !self.is_parity { return false } + if message.work_proved() < self.pow_requirement { return false } - !known && matches_bloom + self.topic_filter.as_ref() + .map_or(true, |filter| &(filter & message.bloom()) == message.bloom()) } // note a message as known. returns true if it was already @@ -337,10 +351,14 @@ impl Peer { self.topic_filter = Some(topic); } + fn set_pow_requirement(&mut self, pow_requirement: f64) { + self.pow_requirement = pow_requirement; + } + fn can_send_messages(&self) -> bool { match self.state { - State::Unconfirmed(_) | State::OurTurn => false, - State::TheirTurn(_) => true, + State::Unconfirmed(_) => false, + State::Confirmed => true, } } } @@ -357,21 +375,41 @@ pub struct PoolStatus { pub target_size: usize, } -/// Handle to the pool, for posting messages or getting info. -#[derive(Clone)] -pub struct PoolHandle { - messages: Arc>, +/// Generic network context. +pub trait Context { + /// Disconnect a peer. + fn disconnect_peer(&self, PeerId); + /// Disable a peer. + fn disable_peer(&self, PeerId); + /// Get a peer's node key. + fn node_key(&self, PeerId) -> Option; + /// Get a peer's protocol version for given protocol. + fn protocol_version(&self, ProtocolId, PeerId) -> Option; + /// Send message to peer. + fn send(&self, PeerId, u8, Vec); } -impl PoolHandle { - /// Post a message to the whisper network to be relayed. - pub fn post_message(&self, message: Message) -> bool { - self.messages.write().insert(message) +impl<'a> Context for NetworkContext<'a> { + fn disconnect_peer(&self, peer: PeerId) { + NetworkContext::disconnect_peer(self, peer); + } + fn disable_peer(&self, peer: PeerId) { + NetworkContext::disable_peer(self, peer) + } + fn node_key(&self, peer: PeerId) -> Option { + self.session_info(peer).and_then(|info| info.id) + } + fn protocol_version(&self, proto_id: ProtocolId, peer: PeerId) -> Option { + NetworkContext::protocol_version(self, proto_id, peer) } - /// Get number of messages and amount of memory used by them. - pub fn pool_status(&self) -> PoolStatus { - self.messages.read().status() + fn send(&self, peer: PeerId, packet_id: u8, message: Vec) { + if let Err(e) = NetworkContext::send(self, peer, packet_id, message) { + debug!(target: "whisper", "Failed to send packet {} to peer {}: {}", + packet_id, peer, e); + + self.disconnect_peer(peer) + } } } @@ -395,15 +433,23 @@ impl Network { } } - /// Acquire a sender to asynchronously feed messages to the whisper - /// network. - pub fn handle(&self) -> PoolHandle { - PoolHandle { messages: self.messages.clone() } + /// Post a message to the whisper network to be relayed. + pub fn post_message(&self, message: Message, context: &C) -> bool + where T: MessageHandler + { + let ok = self.messages.write().insert(message); + if ok { self.rally(context) } + ok + } + + /// Get number of messages and amount of memory used by them. + pub fn pool_status(&self) -> PoolStatus { + self.messages.read().status() } } impl Network { - fn rally(&self, io: &NetworkContext) { + fn rally(&self, io: &C) { // cannot be greater than 16MB (protocol limitation) const MAX_MESSAGES_PACKET_SIZE: usize = 8 * 1024 * 1024; @@ -428,11 +474,11 @@ impl Network { // check timeouts and skip peers who we can't send a rally to. match peer_data.state { - State::Unconfirmed(ref time) | State::TheirTurn(ref time) => { + State::Unconfirmed(ref time) => { punish_timeout(time); continue; } - State::OurTurn => {} + State::Confirmed => {} } // construct packet, skipping messages the peer won't accept. @@ -452,39 +498,19 @@ impl Network { stream.complete_unbounded_list(); - peer_data.state = State::TheirTurn(SystemTime::now()); - if let Err(e) = io.send(*peer_id, packet::MESSAGES, stream.out()) { - debug!(target: "whisper", "Failed to send messages packet to peer {}: {}", peer_id, e); - io.disconnect_peer(*peer_id); - } + io.send(*peer_id, packet::MESSAGES, stream.out()); } } // handle status packet from peer. - fn on_status(&self, peer: &PeerId, status: UntrustedRlp) + fn on_status(&self, peer: &PeerId, _status: UntrustedRlp) -> Result<(), Error> { - let proto: usize = status.as_val()?; - if proto != PROTOCOL_VERSION { return Err(Error::ProtocolVersionMismatch(proto)) } - let peers = self.peers.read(); + match peers.get(peer) { Some(peer) => { - let mut peer = peer.lock(); - let our_node_key = self.node_key.read().clone(); - - // handle this basically impossible edge case gracefully. - if peer.node_key == our_node_key { - return Err(Error::SameNodeKey); - } - - // peer with lower node key begins the rally. - if peer.node_key > our_node_key { - peer.state = State::OurTurn; - } else { - peer.state = State::TheirTurn(SystemTime::now()); - } - + peer.lock().state = State::Confirmed; Ok(()) } None => { @@ -513,8 +539,6 @@ impl Network { return Err(Error::UnexpectedMessage); } - peer.state = State::OurTurn; - let now = SystemTime::now(); let mut messages_vec = message_packet.iter().map(|rlp| Message::decode(rlp, now)) .collect::, _>>()?; @@ -541,6 +565,42 @@ impl Network { Ok(()) } + fn on_pow_requirement(&self, peer: &PeerId, requirement: UntrustedRlp) + -> Result<(), Error> + { + use byteorder::{ByteOrder, BigEndian}; + + let peers = self.peers.read(); + match peers.get(peer) { + Some(peer) => { + let mut peer = peer.lock(); + + if let State::Unconfirmed(_) = peer.state { + return Err(Error::UnexpectedMessage); + } + let bytes: Vec = requirement.as_val()?; + if bytes.len() != ::std::mem::size_of::() { + return Err(Error::InvalidPowReq); + } + + // as of byteorder 1.1.0, this is always defined. + let req = BigEndian::read_f64(&bytes[..]); + + if !req.is_normal() { + return Err(Error::InvalidPowReq); + } + + peer.set_pow_requirement(req); + } + None => { + debug!(target: "whisper", "Received message from unknown peer."); + return Err(Error::UnknownPeer(*peer)); + } + } + + Ok(()) + } + fn on_topic_filter(&self, peer: &PeerId, filter: UntrustedRlp) -> Result<(), Error> { @@ -564,10 +624,10 @@ impl Network { Ok(()) } - fn on_connect(&self, io: &NetworkContext, peer: &PeerId) { + fn on_connect(&self, io: &C, peer: &PeerId) { trace!(target: "whisper", "Connecting peer {}", peer); - let node_key = match io.session_info(*peer).and_then(|info| info.id) { + let node_key = match io.node_key(*peer) { Some(node_key) => node_key, None => { debug!(target: "whisper", "Disconnecting peer {}, who has no node key.", peer); @@ -576,17 +636,25 @@ impl Network { } }; + let version = match io.protocol_version(PROTOCOL_ID, *peer) { + Some(version) => version as usize, + None => { + io.disable_peer(*peer); + return + } + }; + self.peers.write().insert(*peer, Mutex::new(Peer { node_key: node_key, state: State::Unconfirmed(SystemTime::now()), known_messages: HashSet::new(), topic_filter: None, + pow_requirement: 0f64, + is_parity: io.protocol_version(PARITY_PROTOCOL_ID, *peer).is_some(), + _protocol_version: version, })); - if let Err(e) = io.send(*peer, packet::STATUS, ::rlp::encode(&PROTOCOL_VERSION).to_vec()) { - debug!(target: "whisper", "Error sending status: {}", e); - io.disconnect_peer(*peer); - } + io.send(*peer, packet::STATUS, ::rlp::EMPTY_LIST_RLP.to_vec()); } fn on_disconnect(&self, peer: &PeerId) { @@ -609,8 +677,9 @@ impl ::network::NetworkProtocolHandler for Network { let res = match packet_id { packet::STATUS => self.on_status(peer, rlp), packet::MESSAGES => self.on_messages(peer, rlp), + packet::POW_REQUIREMENT => self.on_pow_requirement(peer, rlp), packet::TOPIC_FILTER => self.on_topic_filter(peer, rlp), - other => Err(Error::UnknownPacket(other)), + _ => Ok(()), // ignore unknown packets. }; if let Err(e) = res { @@ -636,3 +705,19 @@ impl ::network::NetworkProtocolHandler for Network { } } } + +/// Dummy subprotocol used for parity extensions. +#[derive(Debug, Copy, Clone)] +pub struct ParityExtensions; + +impl ::network::NetworkProtocolHandler for ParityExtensions { + fn initialize(&self, _io: &NetworkContext, _host_info: &HostInfo) { } + + fn read(&self, _io: &NetworkContext, _peer: &PeerId, _id: u8, _msg: &[u8]) { } + + fn connected(&self, _io: &NetworkContext, _peer: &PeerId) { } + + fn disconnected(&self, _io: &NetworkContext, _peer: &PeerId) { } + + fn timeout(&self, _io: &NetworkContext, _timer: TimerToken) { } +} diff --git a/whisper/src/rpc/filter.rs b/whisper/src/rpc/filter.rs index 2b0c7544d..f4dd7bb9a 100644 --- a/whisper/src/rpc/filter.rs +++ b/whisper/src/rpc/filter.rs @@ -307,7 +307,7 @@ impl Filter { #[cfg(test)] mod tests { - use message::{CreateParams, Message}; + use message::{CreateParams, Message, Topic}; use rpc::types::{FilterRequest, HexEncode}; use rpc::abridge_topic; use super::*; @@ -325,38 +325,40 @@ mod tests { #[test] fn basic_match() { - let topics = vec![vec![1, 2, 3], vec![4, 5, 6]]; + let topics = vec![vec![1, 2, 3, 4], vec![5, 6, 7, 8]]; + let abridged_topics: Vec<_> = topics.iter().map(|x| abridge_topic(&x)).collect(); + let req = FilterRequest { decrypt_with: Default::default(), from: None, - topics: topics.iter().cloned().map(HexEncode).collect(), + topics: topics.into_iter().map(HexEncode).collect(), }; let filter = Filter::new(req).unwrap(); let message = Message::create(CreateParams { ttl: 100, payload: vec![1, 3, 5, 7, 9], - topics: topics.iter().map(|x| abridge_topic(&x)).collect(), + topics: abridged_topics.clone(), work: 0, - }); + }).unwrap(); assert!(filter.basic_matches(&message)); let message = Message::create(CreateParams { ttl: 100, payload: vec![1, 3, 5, 7, 9], - topics: topics.iter().take(1).map(|x| abridge_topic(&x)).collect(), + topics: abridged_topics.clone(), work: 0, - }); + }).unwrap(); assert!(filter.basic_matches(&message)); let message = Message::create(CreateParams { ttl: 100, payload: vec![1, 3, 5, 7, 9], - topics: Vec::new(), + topics: vec![Topic([1, 8, 3, 99])], work: 0, - }); + }).unwrap(); assert!(!filter.basic_matches(&message)); } @@ -366,6 +368,9 @@ mod tests { use rpc::payload::{self, EncodeParams}; use rpc::key_store::{Key, KeyStore}; + let topics = vec![vec![1, 2, 3, 4], vec![5, 6, 7, 8]]; + let abridged_topics: Vec<_> = topics.iter().map(|x| abridge_topic(&x)).collect(); + let mut store = KeyStore::new().unwrap(); let signing_pair = Key::new_asymmetric(store.rng()); let encrypting_key = Key::new_symmetric(store.rng()); @@ -386,24 +391,25 @@ mod tests { let message = Message::create(CreateParams { ttl: 100, payload: encrypted, - topics: vec![abridge_topic(&[9; 32])], + topics: abridged_topics.clone(), work: 0, - }); + }).unwrap(); let message2 = Message::create(CreateParams { ttl: 100, payload: vec![3, 5, 7, 9], - topics: vec![abridge_topic(&[9; 32])], + topics: abridged_topics, work: 0, - }); + }).unwrap(); let filter = Filter::new(FilterRequest { decrypt_with: Some(HexEncode(decrypt_id)), from: Some(HexEncode(signing_pair.public().unwrap().clone())), - topics: vec![HexEncode(vec![9; 32])], + topics: topics.into_iter().map(HexEncode).collect(), }).unwrap(); assert!(filter.basic_matches(&message)); + assert!(filter.basic_matches(&message2)); let items = ::std::cell::Cell::new(0); let on_match = |_| { items.set(items.get() + 1); }; diff --git a/whisper/src/rpc/mod.rs b/whisper/src/rpc/mod.rs index 6cec20c69..ed47ada15 100644 --- a/whisper/src/rpc/mod.rs +++ b/whisper/src/rpc/mod.rs @@ -155,16 +155,6 @@ pub trait PoolHandle: Send + Sync { fn pool_status(&self) -> ::net::PoolStatus; } -impl PoolHandle for ::net::PoolHandle { - fn relay(&self, message: Message) -> bool { - self.post_message(message) - } - - fn pool_status(&self) -> ::net::PoolStatus { - ::net::PoolHandle::pool_status(self) - } -} - /// Default, simple metadata implementation. #[derive(Clone, Default)] pub struct Meta { @@ -339,7 +329,7 @@ impl Whisper for WhisperClien payload: encrypted, topics: req.topics.into_iter().map(|x| abridge_topic(&x.into_inner())).collect(), work: req.priority, - }); + }).map_err(|_| whisper_error("Empty topics"))?; if !self.pool.relay(message) { Err(whisper_error("PoW too low to compete with other messages")) diff --git a/whisper/src/rpc/types.rs b/whisper/src/rpc/types.rs index ac06d69d5..9b2d53218 100644 --- a/whisper/src/rpc/types.rs +++ b/whisper/src/rpc/types.rs @@ -221,7 +221,7 @@ pub struct FilterItem { /// Time to live in seconds. pub ttl: u64, - /// Abridged topics that matched the filter. + /// Topics that matched the filter. pub topics: Vec, /// Unix timestamp of the message generation. From ee14a3fb3108f51a0dd7f9830359dc5263b3ded9 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sun, 10 Sep 2017 18:02:31 +0200 Subject: [PATCH 05/10] WASM runtime update (#6467) * refactor to new parity-wasm * more errors refactoring * final test * update tests * fix merge bugs --- Cargo.lock | 12 +- ethcore/evm/Cargo.toml | 3 +- ethcore/evm/src/lib.rs | 1 - ethcore/res/wasm-tests | 2 +- ethcore/wasm/Cargo.toml | 2 +- ethcore/wasm/src/env.rs | 44 +++++- ethcore/wasm/src/lib.rs | 55 ++++--- ethcore/wasm/src/ptr.rs | 14 +- ethcore/wasm/src/result.rs | 8 +- ethcore/wasm/src/runtime.rs | 302 +++++++++++++++++++++++++----------- ethcore/wasm/src/tests.rs | 134 +++++++++++++--- 11 files changed, 427 insertions(+), 150 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1bf522580..33d49894d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7,7 +7,7 @@ dependencies = [ "ethcore-logger 1.8.0", "ethcore-util 1.8.0", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-wasm 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-wasm 0.14.3 (registry+https://github.com/rust-lang/crates.io-index)", "vm 0.1.0", "wasm-utils 0.1.0 (git+https://github.com/paritytech/wasm-utils)", ] @@ -959,7 +959,6 @@ dependencies = [ "heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-wasm 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.0", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2188,7 +2187,7 @@ dependencies = [ [[package]] name = "parity-wasm" -version = "0.12.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3278,13 +3277,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "wasm-utils" version = "0.1.0" -source = "git+https://github.com/paritytech/wasm-utils#9462bcc0680f0ec2c876abdf75bae981dd4344a5" +source = "git+https://github.com/paritytech/wasm-utils#95f9f04d1036c39de5af1c811c6e5dc488fb73d9" dependencies = [ "clap 2.24.2 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-wasm 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-wasm 0.14.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3500,7 +3500,7 @@ dependencies = [ "checksum parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e1d06f6ee0fda786df3784a96ee3f0629f529b91cbfb7d142f6410e6bcd1ce2c" "checksum parity-tokio-ipc 0.1.5 (git+https://github.com/nikvolf/parity-tokio-ipc)" = "" "checksum parity-ui-precompiled 1.4.0 (git+https://github.com/paritytech/js-precompiled.git)" = "" -"checksum parity-wasm 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)" = "51104c8b8da5cd0ebe0ab765dfab37bc1927b4a01a3d870b0fe09d9ee65e35ea" +"checksum parity-wasm 0.14.3 (registry+https://github.com/rust-lang/crates.io-index)" = "466c01423614bbf89a37b0fc081e1ed3523dfd9064497308ad3f9c7c9f0092bb" "checksum parity-wordlist 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "52142d717754f7ff7ef0fc8da1bdce4f302dd576fb9bf8b727d6a5fdef33348d" "checksum parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aebb68eebde2c99f89592d925288600fde220177e46b5c9a91ca218d245aeedf" "checksum parking_lot_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fb1b97670a2ffadce7c397fb80a3d687c4f3060140b885621ef1653d0e5d5068" diff --git a/ethcore/evm/Cargo.toml b/ethcore/evm/Cargo.toml index 89baaadde..46260c08c 100644 --- a/ethcore/evm/Cargo.toml +++ b/ethcore/evm/Cargo.toml @@ -16,11 +16,10 @@ lazy_static = "0.2" log = "0.3" rlp = { path = "../../util/rlp" } vm = { path = "../vm" } -parity-wasm = "0.12" -parking_lot = "0.4" ethcore-logger = { path = "../../logger" } wasm-utils = { git = "https://github.com/paritytech/wasm-utils" } hash = { path = "../../util/hash" } +parking_lot = "0.4" [dev-dependencies] rustc-hex = "1.0" diff --git a/ethcore/evm/src/lib.rs b/ethcore/evm/src/lib.rs index 1acb57400..8c48ad86b 100644 --- a/ethcore/evm/src/lib.rs +++ b/ethcore/evm/src/lib.rs @@ -23,7 +23,6 @@ extern crate ethcore_util as util; extern crate ethcore_bigint as bigint; extern crate ethjson; extern crate rlp; -extern crate parity_wasm; extern crate parking_lot; extern crate wasm_utils; extern crate ethcore_logger; diff --git a/ethcore/res/wasm-tests b/ethcore/res/wasm-tests index 519b0b967..5fd27564f 160000 --- a/ethcore/res/wasm-tests +++ b/ethcore/res/wasm-tests @@ -1 +1 @@ -Subproject commit 519b0b967cffd7d1236ef21698b1e6e415a048e9 +Subproject commit 5fd27564f1ab49b25bb419bfc0cc68137e1f12f2 diff --git a/ethcore/wasm/Cargo.toml b/ethcore/wasm/Cargo.toml index 7f3c41917..1b18a5c2a 100644 --- a/ethcore/wasm/Cargo.toml +++ b/ethcore/wasm/Cargo.toml @@ -8,7 +8,7 @@ byteorder = "1.0" ethcore-util = { path = "../../util" } ethcore-bigint = { path = "../../util/bigint" } log = "0.3" -parity-wasm = "0.12" +parity-wasm = "0.14" wasm-utils = { git = "https://github.com/paritytech/wasm-utils" } vm = { path = "../vm" } ethcore-logger = { path = "../../logger" } diff --git a/ethcore/wasm/src/env.rs b/ethcore/wasm/src/env.rs index 777016a1b..c32e3ed84 100644 --- a/ethcore/wasm/src/env.rs +++ b/ethcore/wasm/src/env.rs @@ -19,7 +19,7 @@ use parity_wasm::elements::ValueType::*; use parity_wasm::interpreter::{self, UserFunctionDescriptor}; use parity_wasm::interpreter::UserFunctionDescriptor::*; -use super::runtime::Runtime; +use super::runtime::{Runtime, UserTrap}; pub const SIGNATURES: &'static [UserFunctionDescriptor] = &[ Static( @@ -87,6 +87,41 @@ pub const SIGNATURES: &'static [UserFunctionDescriptor] = &[ &[I32; 3], Some(I32), ), + Static( + "_panic", + &[I32; 2], + None, + ), + Static( + "_blockhash", + &[I32; 3], + Some(I32), + ), + Static( + "_coinbase", + &[I32], + None, + ), + Static( + "_timestamp", + &[], + Some(I32), + ), + Static( + "_blocknumber", + &[], + Some(I32), + ), + Static( + "_difficulty", + &[I32], + None, + ), + Static( + "_gaslimit", + &[I32], + None, + ), // TODO: Get rid of it also somehow? Static( @@ -102,9 +137,10 @@ pub const SIGNATURES: &'static [UserFunctionDescriptor] = &[ ), ]; -pub fn native_bindings<'a>(runtime: &'a mut Runtime) -> interpreter::UserFunctions<'a> { - interpreter::UserFunctions { - executor: runtime, +pub fn native_bindings<'a>(runtime: &'a mut Runtime) -> interpreter::UserDefinedElements<'a, UserTrap> { + interpreter::UserDefinedElements { + executor: Some(runtime), + globals: ::std::collections::HashMap::new(), functions: ::std::borrow::Cow::from(SIGNATURES), } } \ No newline at end of file diff --git a/ethcore/wasm/src/lib.rs b/ethcore/wasm/src/lib.rs index fec270be2..8eb14a91a 100644 --- a/ethcore/wasm/src/lib.rs +++ b/ethcore/wasm/src/lib.rs @@ -39,21 +39,41 @@ use parity_wasm::{interpreter, elements}; use parity_wasm::interpreter::ModuleInstanceInterface; use vm::{GasLeft, ReturnData, ActionParams}; -use self::runtime::{Runtime, RuntimeContext}; +use self::runtime::{Runtime, RuntimeContext, UserTrap}; -pub use self::runtime::Error as RuntimeError; +pub use self::runtime::InterpreterError; const DEFAULT_RESULT_BUFFER: usize = 1024; +/// Wrapped interpreter error +#[derive(Debug)] +pub struct Error(InterpreterError); + +impl From for Error { + fn from(e: InterpreterError) -> Self { + Error(e) + } +} + +impl From for vm::Error { + fn from(e: Error) -> Self { + vm::Error::Wasm(format!("Wasm runtime error: {:?}", e.0)) + } +} + +impl From for vm::Error { + fn from(e: UserTrap) -> Self { e.into() } +} + /// Wasm interpreter instance pub struct WasmInterpreter { - program: interpreter::ProgramInstance, + program: runtime::InterpreterProgramInstance, result: Vec, } impl WasmInterpreter { /// New wasm interpreter instance - pub fn new() -> Result { + pub fn new() -> Result { Ok(WasmInterpreter { program: interpreter::ProgramInstance::new()?, result: Vec::with_capacity(DEFAULT_RESULT_BUFFER), @@ -109,7 +129,7 @@ impl vm::Vm for WasmInterpreter { params.value.value(), params.data.unwrap_or(Vec::with_capacity(0)), ) - )?; + ).map_err(|e| Error(e))?; { let execution_params = runtime.execution_params() @@ -118,27 +138,30 @@ impl vm::Vm for WasmInterpreter { let module_instance = self.program.add_module("contract", contract_module, Some(&execution_params.externals)) .map_err(|err| { trace!(target: "wasm", "Error adding contract module: {:?}", err); - vm::Error::from(RuntimeError::Interpreter(err)) + vm::Error::from(Error(err)) })?; - module_instance.execute_export("_call", execution_params) - .map_err(|err| { + match module_instance.execute_export("_call", execution_params) { + Ok(_) => { }, + Err(interpreter::Error::User(UserTrap::Suicide)) => { }, + Err(err) => { trace!(target: "wasm", "Error executing contract: {:?}", err); - vm::Error::from(RuntimeError::Interpreter(err)) - })?; + return Err(vm::Error::from(Error(err))) + } + } } let result = result::WasmResult::new(d_ptr); - if result.peek_empty(&*runtime.memory())? { + if result.peek_empty(&*runtime.memory()).map_err(|e| Error(e))? { trace!(target: "wasm", "Contract execution result is empty."); Ok(GasLeft::Known(runtime.gas_left()?.into())) } else { self.result.clear(); // todo: use memory views to avoid copy - self.result.extend(result.pop(&*runtime.memory())?); + self.result.extend(result.pop(&*runtime.memory()).map_err(|e| Error(e.into()))?); let len = self.result.len(); Ok(GasLeft::NeedsReturn { - gas_left: runtime.gas_left()?.into(), + gas_left: runtime.gas_left().map_err(|e| Error(e.into()))?.into(), data: ReturnData::new( ::std::mem::replace(&mut self.result, Vec::with_capacity(DEFAULT_RESULT_BUFFER)), 0, @@ -149,9 +172,3 @@ impl vm::Vm for WasmInterpreter { } } } - -impl From for vm::Error { - fn from(err: runtime::Error) -> vm::Error { - vm::Error::Wasm(format!("WASM runtime-error: {:?}", err)) - } -} diff --git a/ethcore/wasm/src/ptr.rs b/ethcore/wasm/src/ptr.rs index 11edbad70..8f7c15490 100644 --- a/ethcore/wasm/src/ptr.rs +++ b/ethcore/wasm/src/ptr.rs @@ -16,9 +16,9 @@ //! Wasm bound-checked ptr -use parity_wasm::interpreter; +use super::runtime::{InterpreterMemoryInstance, InterpreterError, UserTrap}; -/// Bound-checked wrapper for webassembly memory +/// Bound-checked wrapper for webassembly memory pub struct WasmPtr(u32); /// Error in bound check @@ -28,15 +28,21 @@ pub enum Error { } impl From for WasmPtr { - fn from(raw: u32) -> Self { + fn from(raw: u32) -> Self { WasmPtr(raw) } } +impl From for InterpreterError { + fn from(_e: Error) -> Self { + UserTrap::MemoryAccessViolation.into() + } +} + impl WasmPtr { // todo: use memory view when they are on /// Check memory range and return data with given length starting from the current pointer value - pub fn slice(&self, len: u32, mem: &interpreter::MemoryInstance) -> Result, Error> { + pub fn slice(&self, len: u32, mem: &InterpreterMemoryInstance) -> Result, Error> { mem.get(self.0, len as usize).map_err(|_| Error::AccessViolation) } diff --git a/ethcore/wasm/src/result.rs b/ethcore/wasm/src/result.rs index 3d1e51f64..932bbafa6 100644 --- a/ethcore/wasm/src/result.rs +++ b/ethcore/wasm/src/result.rs @@ -18,10 +18,8 @@ use byteorder::{LittleEndian, ByteOrder}; -use parity_wasm::interpreter; - use super::ptr::WasmPtr; -use super::runtime::Error as RuntimeError; +use super::runtime::{InterpreterError, InterpreterMemoryInstance}; /// Wrapper for wasm contract call result pub struct WasmResult { @@ -35,13 +33,13 @@ impl WasmResult { } /// Check if the result contains any data - pub fn peek_empty(&self, mem: &interpreter::MemoryInstance) -> Result { + pub fn peek_empty(&self, mem: &InterpreterMemoryInstance) -> Result { let result_len = LittleEndian::read_u32(&self.ptr.slice(16, mem)?[12..16]); Ok(result_len == 0) } /// Consume the result ptr and return the actual data from wasm linear memory - pub fn pop(self, mem: &interpreter::MemoryInstance) -> Result, RuntimeError> { + pub fn pop(self, mem: &InterpreterMemoryInstance) -> Result, InterpreterError> { let result_ptr = LittleEndian::read_u32(&self.ptr.slice(16, mem)?[8..12]); let result_len = LittleEndian::read_u32(&self.ptr.slice(16, mem)?[12..16]); trace!(target: "wasm", "contract result: {} bytes at @{}", result_len, result_ptr); diff --git a/ethcore/wasm/src/runtime.rs b/ethcore/wasm/src/runtime.rs index f7fb07473..6ddaa7a10 100644 --- a/ethcore/wasm/src/runtime.rs +++ b/ethcore/wasm/src/runtime.rs @@ -30,31 +30,68 @@ use vm::CallType; use super::ptr::{WasmPtr, Error as PtrError}; use super::call_args::CallArgs; -/// Wasm runtime error -#[derive(Debug)] -pub enum Error { - /// Storage error - Storage, - /// Allocator error - Allocator, - /// Invalid gas state during the call - InvalidGasState, +/// User trap in native code +#[derive(Debug, Clone, PartialEq)] +pub enum UserTrap { + /// Storage read error + StorageReadError, + /// Storage update error + StorageUpdateError, /// Memory access violation - AccessViolation, - /// Interpreter runtime error - Interpreter(interpreter::Error), + MemoryAccessViolation, + /// Native code resulted in suicide + Suicide, + /// Suicide was requested but coudn't complete + SuicideAbort, + /// Invalid gas state inside interpreter + InvalidGasState, + /// Query of the balance resulted in an error + BalanceQueryError, + /// Failed allocation + AllocationFailed, + /// Gas limit reached + GasLimit, + /// Unknown runtime function + Unknown, + /// Passed string had invalid utf-8 encoding + BadUtf8, + /// Other error in native code + Other, + /// Panic with message + Panic(String), } -impl From for Error { - fn from(err: interpreter::Error) -> Self { - Error::Interpreter(err) +impl ::std::fmt::Display for UserTrap { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { + match *self { + UserTrap::StorageReadError => write!(f, "Storage read error"), + UserTrap::StorageUpdateError => write!(f, "Storage update error"), + UserTrap::MemoryAccessViolation => write!(f, "Memory access violation"), + UserTrap::SuicideAbort => write!(f, "Attempt to suicide resulted in an error"), + UserTrap::InvalidGasState => write!(f, "Invalid gas state"), + UserTrap::BalanceQueryError => write!(f, "Balance query resulted in an error"), + UserTrap::Suicide => write!(f, "Suicide result"), + UserTrap::Unknown => write!(f, "Unknown runtime function invoked"), + UserTrap::AllocationFailed => write!(f, "Memory allocation failed (OOM)"), + UserTrap::BadUtf8 => write!(f, "String encoding is bad utf-8 sequence"), + UserTrap::GasLimit => write!(f, "Invocation resulted in gas limit violated"), + UserTrap::Other => write!(f, "Other unspecified error"), + UserTrap::Panic(ref msg) => write!(f, "Panic: {}", msg), + } } } -impl From for Error { +impl interpreter::UserError for UserTrap { } + +pub type InterpreterError = interpreter::Error; +pub type InterpreterMemoryInstance = interpreter::MemoryInstance; +pub type InterpreterProgramInstance = interpreter::ProgramInstance; +pub type InterpreterCallerContext<'a> = interpreter::CallerContext<'a, UserTrap>; + +impl From for UserTrap { fn from(err: PtrError) -> Self { match err { - PtrError::AccessViolation => Error::AccessViolation, + PtrError::AccessViolation => UserTrap::MemoryAccessViolation, } } } @@ -79,20 +116,20 @@ pub struct Runtime<'a, 'b> { gas_limit: u64, dynamic_top: u32, ext: &'a mut vm::Ext, - memory: Arc, + memory: Arc, context: RuntimeContext, - instance: &'b interpreter::ProgramInstance, + instance: &'b InterpreterProgramInstance, } impl<'a, 'b> Runtime<'a, 'b> { /// New runtime for wasm contract with specified params pub fn with_params<'c, 'd>( ext: &'c mut vm::Ext, - memory: Arc, + memory: Arc, stack_space: u32, gas_limit: u64, context: RuntimeContext, - program_instance: &'d interpreter::ProgramInstance, + program_instance: &'d InterpreterProgramInstance, ) -> Runtime<'c, 'd> { Runtime { gas_counter: 0, @@ -106,30 +143,28 @@ impl<'a, 'b> Runtime<'a, 'b> { } /// Write to the storage from wasm memory - pub fn storage_write(&mut self, context: interpreter::CallerContext) - -> Result, interpreter::Error> + pub fn storage_write(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> { let mut context = context; let val = self.pop_h256(&mut context)?; let key = self.pop_h256(&mut context)?; trace!(target: "wasm", "storage_write: value {} at @{}", &val, &key); - self.ext.set_storage(key, val) - .map_err(|_| interpreter::Error::Trap("Storage update error".to_owned()))?; + self.ext.set_storage(key, val).map_err(|_| UserTrap::StorageUpdateError)?; Ok(Some(0i32.into())) } /// Read from the storage to wasm memory - pub fn storage_read(&mut self, context: interpreter::CallerContext) - -> Result, interpreter::Error> + pub fn storage_read(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> { let mut context = context; let val_ptr = context.value_stack.pop_as::()?; let key = self.pop_h256(&mut context)?; - let val = self.ext.storage_at(&key) - .map_err(|_| interpreter::Error::Trap("Storage read error".to_owned()))?; + let val = self.ext.storage_at(&key).map_err(|_| UserTrap::StorageReadError)?; self.memory.set(val_ptr as u32, &*val)?; @@ -137,21 +172,21 @@ impl<'a, 'b> Runtime<'a, 'b> { } /// Pass suicide to state runtime - pub fn suicide(&mut self, context: interpreter::CallerContext) - -> Result, interpreter::Error> + pub fn suicide(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> { let mut context = context; let refund_address = self.pop_address(&mut context)?; - self.ext.suicide(&refund_address) - .map_err(|_| interpreter::Error::Trap("Suicide error".to_owned()))?; + self.ext.suicide(&refund_address).map_err(|_| UserTrap::SuicideAbort)?; - Ok(None) + // We send trap to interpreter so it should abort further execution + Err(UserTrap::Suicide.into()) } /// Invoke create in the state runtime - pub fn create(&mut self, context: interpreter::CallerContext) - -> Result, interpreter::Error> + pub fn create(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> { // // method signature: @@ -172,7 +207,7 @@ impl<'a, 'b> Runtime<'a, 'b> { let code = self.memory.get(code_ptr, code_len as usize)?; let gas_left = self.gas_left() - .map_err(|_| interpreter::Error::Trap("Gas state error".to_owned()))? + .map_err(|_| UserTrap::InvalidGasState)? .into(); match self.ext.create(&gas_left, &endowment, &code, vm::CreateContractAddress::FromSenderAndCodeHash) { @@ -189,8 +224,8 @@ impl<'a, 'b> Runtime<'a, 'b> { } } - pub fn call(&mut self, context: interpreter::CallerContext) - -> Result, interpreter::Error> + pub fn call(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> { // // method signature: @@ -207,8 +242,8 @@ impl<'a, 'b> Runtime<'a, 'b> { } - fn call_code(&mut self, context: interpreter::CallerContext) - -> Result, interpreter::Error> + fn call_code(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> { // // signature (same as static call): @@ -227,9 +262,9 @@ impl<'a, 'b> Runtime<'a, 'b> { &mut self, use_val: bool, call_type: CallType, - context: interpreter::CallerContext, + context: InterpreterCallerContext, ) - -> Result, interpreter::Error> + -> Result, InterpreterError> { trace!(target: "wasm", "runtime: call code"); @@ -255,7 +290,7 @@ impl<'a, 'b> Runtime<'a, 'b> { if let Some(ref val) = val { let address_balance = self.ext.balance(&self.context.address) - .map_err(|_| interpreter::Error::Trap("Gas state error".to_owned()))?; + .map_err(|_| UserTrap::BalanceQueryError)?; if &address_balance < val { trace!(target: "wasm", "runtime: call failed due to balance check"); @@ -266,7 +301,7 @@ impl<'a, 'b> Runtime<'a, 'b> { let mut result = Vec::with_capacity(result_alloc_len as usize); result.resize(result_alloc_len as usize, 0); let gas = self.gas_left() - .map_err(|_| interpreter::Error::Trap("Gas state error".to_owned()))? + .map_err(|_| UserTrap::InvalidGasState)? .into(); // todo: optimize to use memory views once it's in let payload = self.memory.get(input_ptr, input_len as usize)?; @@ -294,8 +329,8 @@ impl<'a, 'b> Runtime<'a, 'b> { } } - pub fn static_call(&mut self, context: interpreter::CallerContext) - -> Result, interpreter::Error> + pub fn static_call(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> { // signature (same as code call): // fn ( @@ -311,8 +346,8 @@ impl<'a, 'b> Runtime<'a, 'b> { /// Allocate memory using the wasm stack params - pub fn malloc(&mut self, context: interpreter::CallerContext) - -> Result, interpreter::Error> + pub fn malloc(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> { let amount = context.value_stack.pop_as::()? as u32; let previous_top = self.dynamic_top; @@ -321,21 +356,21 @@ impl<'a, 'b> Runtime<'a, 'b> { } /// Allocate memory in wasm memory instance - pub fn alloc(&mut self, amount: u32) -> Result { + pub fn alloc(&mut self, amount: u32) -> Result { let previous_top = self.dynamic_top; self.dynamic_top = previous_top + amount; Ok(previous_top.into()) } /// Report gas cost with the params passed in wasm stack - fn gas(&mut self, context: interpreter::CallerContext) - -> Result, interpreter::Error> + fn gas(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> { let amount = context.value_stack.pop_as::()? as u64; if self.charge_gas(amount) { Ok(None) } else { - Err(interpreter::Error::Trap(format!("Gas exceeds limits of {}", self.gas_limit))) + Err(UserTrap::GasLimit.into()) } } @@ -350,50 +385,50 @@ impl<'a, 'b> Runtime<'a, 'b> { } } - fn h256_at(&self, ptr: WasmPtr) -> Result { + fn h256_at(&self, ptr: WasmPtr) -> Result { Ok(H256::from_slice(&ptr.slice(32, &*self.memory) - .map_err(|_| interpreter::Error::Trap("Memory access violation".to_owned()))? + .map_err(|_| UserTrap::MemoryAccessViolation)? )) } - fn pop_h256(&self, context: &mut interpreter::CallerContext) -> Result { + fn pop_h256(&self, context: &mut InterpreterCallerContext) -> Result { let ptr = WasmPtr::from_i32(context.value_stack.pop_as::()?) - .map_err(|_| interpreter::Error::Trap("Memory access violation".to_owned()))?; + .map_err(|_| UserTrap::MemoryAccessViolation)?; self.h256_at(ptr) } - fn pop_u256(&self, context: &mut interpreter::CallerContext) -> Result { + fn pop_u256(&self, context: &mut InterpreterCallerContext) -> Result { let ptr = WasmPtr::from_i32(context.value_stack.pop_as::()?) - .map_err(|_| interpreter::Error::Trap("Memory access violation".to_owned()))?; + .map_err(|_| UserTrap::MemoryAccessViolation)?; self.h256_at(ptr).map(Into::into) } - fn address_at(&self, ptr: WasmPtr) -> Result { + fn address_at(&self, ptr: WasmPtr) -> Result { Ok(Address::from_slice(&ptr.slice(20, &*self.memory) - .map_err(|_| interpreter::Error::Trap("Memory access violation".to_owned()))? + .map_err(|_| UserTrap::MemoryAccessViolation)? )) } - fn pop_address(&self, context: &mut interpreter::CallerContext) -> Result { + fn pop_address(&self, context: &mut InterpreterCallerContext) -> Result { let ptr = WasmPtr::from_i32(context.value_stack.pop_as::()?) - .map_err(|_| interpreter::Error::Trap("Memory access violation".to_owned()))?; + .map_err(|_| UserTrap::MemoryAccessViolation)?; self.address_at(ptr) } - fn user_trap(&mut self, _context: interpreter::CallerContext) - -> Result, interpreter::Error> + fn unknown_trap(&mut self, _context: InterpreterCallerContext) + -> Result, UserTrap> { - Err(interpreter::Error::Trap("unknown trap".to_owned())) + Err(UserTrap::Unknown) } fn user_noop(&mut self, - _context: interpreter::CallerContext - ) -> Result, interpreter::Error> { + _context: InterpreterCallerContext + ) -> Result, InterpreterError> { Ok(None) } /// Write call descriptor to wasm memory - pub fn write_descriptor(&mut self, call_args: CallArgs) -> Result { + pub fn write_descriptor(&mut self, call_args: CallArgs) -> Result { let d_ptr = self.alloc(16)?; let args_len = call_args.len(); @@ -417,14 +452,14 @@ impl<'a, 'b> Runtime<'a, 'b> { Ok(d_ptr.into()) } - fn debug_log(&mut self, context: interpreter::CallerContext) - -> Result, interpreter::Error> + fn debug_log(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> { let msg_len = context.value_stack.pop_as::()? as u32; let msg_ptr = context.value_stack.pop_as::()? as u32; let msg = String::from_utf8(self.memory.get(msg_ptr, msg_len as usize)?) - .map_err(|_| interpreter::Error::Trap("Debug log utf-8 decoding error".to_owned()))?; + .map_err(|_| UserTrap::BadUtf8)?; trace!(target: "wasm", "Contract debug message: {}", msg); @@ -432,18 +467,18 @@ impl<'a, 'b> Runtime<'a, 'b> { } /// Query current gas left for execution - pub fn gas_left(&self) -> Result { - if self.gas_counter > self.gas_limit { return Err(Error::InvalidGasState); } + pub fn gas_left(&self) -> Result { + if self.gas_counter > self.gas_limit { return Err(UserTrap::InvalidGasState); } Ok(self.gas_limit - self.gas_counter) } /// Shared memory reference - pub fn memory(&self) -> &interpreter::MemoryInstance { + pub fn memory(&self) -> &InterpreterMemoryInstance { &*self.memory } - fn mem_copy(&self, context: interpreter::CallerContext) - -> Result, interpreter::Error> + fn mem_copy(&self, context: InterpreterCallerContext) + -> Result, InterpreterError> { let len = context.value_stack.pop_as::()? as u32; let dst = context.value_stack.pop_as::()? as u32; @@ -459,8 +494,8 @@ impl<'a, 'b> Runtime<'a, 'b> { x >> 24 | x >> 8 & 0xff00 | x << 8 & 0xff0000 | x << 24 } - fn bitswap_i64(&mut self, context: interpreter::CallerContext) - -> Result, interpreter::Error> + fn bitswap_i64(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> { let x1 = context.value_stack.pop_as::()?; let x2 = context.value_stack.pop_as::()?; @@ -471,13 +506,83 @@ impl<'a, 'b> Runtime<'a, 'b> { self.return_i64(result) } - fn return_i64(&mut self, val: i64) -> Result, interpreter::Error> { + fn user_panic(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> + { + let msg_len = context.value_stack.pop_as::()? as u32; + let msg_ptr = context.value_stack.pop_as::()? as u32; + + let msg = String::from_utf8(self.memory.get(msg_ptr, msg_len as usize)?) + .map_err(|_| UserTrap::BadUtf8)?; + + trace!(target: "wasm", "Contract custom panic message: {}", msg); + + Err(UserTrap::Panic(msg).into()) + } + + fn block_hash(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> + { + let return_ptr = context.value_stack.pop_as::()? as u32; + let block_hi = context.value_stack.pop_as::()? as u32; + let block_lo = context.value_stack.pop_as::()? as u32; + + let block_num = (block_hi as u64) << 32 | block_lo as u64; + + trace!("Requesting block hash for block #{}", block_num); + let hash = self.ext.blockhash(&U256::from(block_num)); + + self.memory.set(return_ptr, &*hash)?; + + Ok(Some(0i32.into())) + } + + fn coinbase(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> + { + let return_ptr = context.value_stack.pop_as::()? as u32; + self.memory.set(return_ptr, &*self.ext.env_info().author)?; + Ok(None) + } + + fn timestamp(&mut self, _context: InterpreterCallerContext) + -> Result, InterpreterError> + { + let timestamp = self.ext.env_info().timestamp as i64; + self.return_i64(timestamp) + } + + fn block_number(&mut self, _context: InterpreterCallerContext) + -> Result, InterpreterError> + { + let block_number: u64 = self.ext.env_info().number.into(); + self.return_i64(block_number as i64) + } + + fn difficulty(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> + { + let return_ptr = context.value_stack.pop_as::()? as u32; + let difficulty: H256 = self.ext.env_info().difficulty.into(); + self.memory.set(return_ptr, &*difficulty)?; + Ok(None) + } + + fn ext_gas_limit(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> + { + let return_ptr = context.value_stack.pop_as::()? as u32; + let gas_limit: H256 = self.ext.env_info().gas_limit.into(); + self.memory.set(return_ptr, &*gas_limit)?; + Ok(None) + } + + fn return_i64(&mut self, val: i64) -> Result, InterpreterError> { let uval = val as u64; let hi = (uval >> 32) as i32; let lo = (uval << 32 >> 32) as i32; - let target = self.instance.module("contract") - .ok_or(interpreter::Error::Trap("Error locating main execution entry".to_owned()))?; + let target = self.instance.module("contract").ok_or(UserTrap::Other)?; target.execute_export( "setTempRet0", self.execution_params().add_argument( @@ -489,7 +594,7 @@ impl<'a, 'b> Runtime<'a, 'b> { )) } - pub fn execution_params(&mut self) -> interpreter::ExecutionParams { + pub fn execution_params(&mut self) -> interpreter::ExecutionParams { use super::env; let env_instance = self.instance.module("env") @@ -505,9 +610,9 @@ impl<'a, 'b> Runtime<'a, 'b> { } } -impl<'a, 'b> interpreter::UserFunctionExecutor for Runtime<'a, 'b> { - fn execute(&mut self, name: &str, context: interpreter::CallerContext) - -> Result, interpreter::Error> +impl<'a, 'b> interpreter::UserFunctionExecutor for Runtime<'a, 'b> { + fn execute(&mut self, name: &str, context: InterpreterCallerContext) + -> Result, InterpreterError> { match name { "_malloc" => { @@ -551,10 +656,31 @@ impl<'a, 'b> interpreter::UserFunctionExecutor for Runtime<'a, 'b> { "_llvm_bswap_i64" => { self.bitswap_i64(context) }, + "_panic" => { + self.user_panic(context) + }, + "_blockhash" => { + self.block_hash(context) + }, + "_coinbase" => { + self.coinbase(context) + }, + "_timestamp" => { + self.timestamp(context) + }, + "_blocknumber" => { + self.block_number(context) + }, + "_difficulty" => { + self.difficulty(context) + }, + "_gaslimit" => { + self.ext_gas_limit(context) + }, _ => { trace!(target: "wasm", "Trapped due to unhandled function: '{}'", name); - self.user_trap(context) - } + Ok(self.unknown_trap(context)?) + }, } } } diff --git a/ethcore/wasm/src/tests.rs b/ethcore/wasm/src/tests.rs index f0a828394..b79f38bdc 100644 --- a/ethcore/wasm/src/tests.rs +++ b/ethcore/wasm/src/tests.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use std::sync::Arc; +use std::collections::HashMap; use byteorder::{LittleEndian, ByteOrder}; use bigint::prelude::U256; use bigint::hash::H256; @@ -87,7 +88,7 @@ fn logger() { }; println!("ext.store: {:?}", ext.store); - assert_eq!(gas_left, U256::from(99327)); + assert_eq!(gas_left, U256::from(99529)); let address_val: H256 = address.into(); assert_eq!( ext.store.get(&"0100000000000000000000000000000000000000000000000000000000000000".parse().unwrap()).expect("storage key to exist"), @@ -138,7 +139,7 @@ fn identity() { } }; - assert_eq!(gas_left, U256::from(99_672)); + assert_eq!(gas_left, U256::from(99_762)); assert_eq!( Address::from_slice(&result), @@ -172,7 +173,7 @@ fn dispersion() { } }; - assert_eq!(gas_left, U256::from(99_270)); + assert_eq!(gas_left, U256::from(99_360)); assert_eq!( result, @@ -201,7 +202,7 @@ fn suicide_not() { } }; - assert_eq!(gas_left, U256::from(99_578)); + assert_eq!(gas_left, U256::from(99_668)); assert_eq!( result, @@ -235,7 +236,7 @@ fn suicide() { } }; - assert_eq!(gas_left, U256::from(99_621)); + assert_eq!(gas_left, U256::from(99_699)); assert!(ext.suicides.contains(&refund)); } @@ -266,7 +267,7 @@ fn create() { assert!(ext.calls.contains( &FakeCall { call_type: FakeCallType::Create, - gas: U256::from(99_674), + gas: U256::from(99_734), sender_address: None, receive_address: None, value: Some(1_000_000_000.into()), @@ -274,7 +275,7 @@ fn create() { code_address: None, } )); - assert_eq!(gas_left, U256::from(99_596)); + assert_eq!(gas_left, U256::from(99_686)); } @@ -308,7 +309,7 @@ fn call_code() { assert!(ext.calls.contains( &FakeCall { call_type: FakeCallType::Call, - gas: U256::from(99_069), + gas: U256::from(99_129), sender_address: Some(sender), receive_address: Some(receiver), value: None, @@ -316,7 +317,7 @@ fn call_code() { code_address: Some("0d13710000000000000000000000000000000000".parse().unwrap()), } )); - assert_eq!(gas_left, U256::from(94144)); + assert_eq!(gas_left, U256::from(94262)); // siphash result let res = LittleEndian::read_u32(&result[..]); @@ -353,7 +354,7 @@ fn call_static() { assert!(ext.calls.contains( &FakeCall { call_type: FakeCallType::Call, - gas: U256::from(99_069), + gas: U256::from(99_129), sender_address: Some(sender), receive_address: Some(receiver), value: None, @@ -361,7 +362,7 @@ fn call_static() { code_address: Some("13077bfb00000000000000000000000000000000".parse().unwrap()), } )); - assert_eq!(gas_left, U256::from(94144)); + assert_eq!(gas_left, U256::from(94262)); // siphash result let res = LittleEndian::read_u32(&result[..]); @@ -387,7 +388,7 @@ fn realloc() { GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), } }; - assert_eq!(gas_left, U256::from(99432)); + assert_eq!(gas_left, U256::from(99522)); assert_eq!(result, vec![0u8; 2]); } @@ -413,12 +414,15 @@ fn storage_read() { } }; - assert_eq!(gas_left, U256::from(99682)); + assert_eq!(gas_left, U256::from(99800)); assert_eq!(Address::from(&result[12..32]), address); } macro_rules! reqrep_test { ($name: expr, $input: expr) => { + reqrep_test!($name, $input, vm::EnvInfo::default(), HashMap::new()) + }; + ($name: expr, $input: expr, $info: expr, $block_hashes: expr) => { { ::ethcore_logger::init_log(); let code = load_sample!($name); @@ -428,9 +432,13 @@ macro_rules! reqrep_test { params.code = Some(Arc::new(code)); params.data = Some($input); + let mut fake_ext = FakeExt::new(); + fake_ext.info = $info; + fake_ext.blockhashes = $block_hashes; + let (gas_left, result) = { let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut FakeExt::new()).expect("Interpreter to execute without any errors"); + let result = interpreter.exec(params, &mut fake_ext).expect("Interpreter to execute without any errors"); match result { GasLeft::Known(_) => { panic!("Test is expected to return payload to check"); }, GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), @@ -439,7 +447,7 @@ macro_rules! reqrep_test { (gas_left, result) } - } + }; } // math_* tests check the ability of wasm contract to perform big integer operations @@ -464,7 +472,7 @@ fn math_add() { } ); - assert_eq!(gas_left, U256::from(98087)); + assert_eq!(gas_left, U256::from(98177)); assert_eq!( U256::from_dec_str("1888888888888888888888888888887").unwrap(), (&result[..]).into() @@ -486,7 +494,7 @@ fn math_mul() { } ); - assert_eq!(gas_left, U256::from(97236)); + assert_eq!(gas_left, U256::from(97326)); assert_eq!( U256::from_dec_str("888888888888888888888888888887111111111111111111111111111112").unwrap(), (&result[..]).into() @@ -508,7 +516,7 @@ fn math_sub() { } ); - assert_eq!(gas_left, U256::from(98131)); + assert_eq!(gas_left, U256::from(98221)); assert_eq!( U256::from_dec_str("111111111111111111111111111111").unwrap(), (&result[..]).into() @@ -529,9 +537,97 @@ fn math_div() { } ); - assert_eq!(gas_left, U256::from(91420)); + assert_eq!(gas_left, U256::from(91510)); assert_eq!( U256::from_dec_str("1125000").unwrap(), (&result[..]).into() ); } + +// This test checks the ability of wasm contract to invoke +// varios blockchain runtime methods +#[test] +fn externs() { + let (gas_left, result) = reqrep_test!( + "externs.wasm", + Vec::new(), + vm::EnvInfo { + number: 0x9999999999u64.into(), + author: "efefefefefefefefefefefefefefefefefefefef".parse().unwrap(), + timestamp: 0x8888888888u64.into(), + difficulty: H256::from("0f1f2f3f4f5f6f7f8f9fafbfcfdfefff0d1d2d3d4d5d6d7d8d9dadbdcdddedfd").into(), + gas_limit: 0x777777777777u64.into(), + last_hashes: Default::default(), + gas_used: 0.into(), + }, + { + let mut hashes = HashMap::new(); + hashes.insert( + U256::from(0), + H256::from("9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d") + ); + hashes.insert( + U256::from(1), + H256::from("7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b") + ); + hashes + } + ); + + assert_eq!( + &result[0..64].to_vec(), + &vec![ + 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b,0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + ], + "Block hashes requested and returned do not match" + ); + + assert_eq!( + &result[64..84].to_vec(), + &vec![ + 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, + ], + "Coinbase requested and returned does not match" + ); + + assert_eq!( + &result[84..92].to_vec(), + &vec![ + 0x88, 0x88, 0x88, 0x88, 0x88, 0x00, 0x00, 0x00 + ], + "Timestamp requested and returned does not match" + ); + + assert_eq!( + &result[92..100].to_vec(), + &vec![ + 0x99, 0x99, 0x99, 0x99, 0x99, 0x00, 0x00, 0x00 + ], + "Block number requested and returned does not match" + ); + + assert_eq!( + &result[100..132].to_vec(), + &vec![ + 0x0f, 0x1f, 0x2f, 0x3f, 0x4f, 0x5f, 0x6f, 0x7f, + 0x8f, 0x9f, 0xaf, 0xbf, 0xcf, 0xdf, 0xef, 0xff, + 0x0d, 0x1d, 0x2d, 0x3d, 0x4d, 0x5d, 0x6d, 0x7d, + 0x8d, 0x9d, 0xad, 0xbd, 0xcd, 0xdd, 0xed, 0xfd, + ], + "Difficulty requested and returned does not match" + ); + + assert_eq!( + &result[132..164].to_vec(), + &vec![ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + ], + "Gas limit requested and returned does not match" + ); + + assert_eq!(gas_left, U256::from(97588)); +} \ No newline at end of file From f1a050366f62804a5122ff7c975690ee0f9a3192 Mon Sep 17 00:00:00 2001 From: Nicolas Gotchac Date: Sun, 10 Sep 2017 18:03:35 +0200 Subject: [PATCH 06/10] Fix slow balances (#6471) * Update token updates * Update token info fetching * Update logger * Minor fixes to updates and notifications for balances * Use Pubsub * Fix timeout. * Use pubsub for status. * Fix signer subscription. * Process tokens in chunks. * Fix tokens loaded by chunks * Linting * Dispatch tokens asap * Fix chunks processing. * Better filter options * Parallel log fetching. * Fix signer polling. * Fix initial block query. * Token balances updates : the right(er) way * Better tokens info fetching * Fixes in token data fetching * Only fetch what's needed (tokens) * Fix linting issues * Revert "Transaction permissioning (#6441)" This reverts commit eed0e8b03a5ab328d0c4f6fcecd937a1f79f1135. * Revert "Revert "Transaction permissioning (#6441)"" This reverts commit 8f96415e58dde652e5828706eb2639d43416f448. * Update wasm-tests. * Fixing balances fetching * Fix requests tracking in UI * Fix request watching * Update the Logger * PR Grumbles Fixes * PR Grumbles fixes * Linting... --- Cargo.lock | 52 ++-- ethcore/Cargo.toml | 1 + ethcore/src/blockchain/blockchain.rs | 81 ++--- ethcore/src/client/client.rs | 37 ++- ethcore/src/client/test_client.rs | 6 +- ethcore/src/client/traits.rs | 5 +- ethcore/src/lib.rs | 1 + ethcore/src/verification/verification.rs | 7 +- js/src/api/api.js | 7 +- js/src/api/pubsub/eth/eth.js | 2 +- js/src/api/pubsub/parity/parity.js | 32 +- js/src/api/pubsub/pubsub.js | 29 ++ js/src/api/pubsub/pubsubBase.js | 11 +- js/src/api/pubsub/signer/index.js | 16 + js/src/api/pubsub/signer/signer.js | 37 +++ js/src/api/rpc/parity/parity.js | 9 + js/src/api/subscriptions/eth.js | 54 +++- js/src/api/subscriptions/eth.spec.js | 3 +- js/src/api/subscriptions/personal.js | 39 ++- js/src/api/subscriptions/signer.js | 37 ++- js/src/api/transport/jsonRpcBase.js | 25 +- js/src/api/transport/logger.js | 150 +++++++++ js/src/api/transport/ws/ws.js | 17 +- js/src/index.js | 4 - js/src/mobx/hardwareStore.js | 52 ++-- js/src/modals/Transfer/store.js | 3 +- js/src/modals/Transfer/transfer.js | 4 +- js/src/redux/providers/apiReducer.js | 2 +- js/src/redux/providers/balances.js | 288 ++++-------------- js/src/redux/providers/balancesActions.js | 256 ++++++++++------ .../providers/certifications/middleware.js | 2 +- js/src/redux/providers/index.js | 1 + js/src/redux/providers/personal.js | 114 +++++-- js/src/redux/providers/personalActions.js | 24 +- js/src/redux/providers/requestsActions.js | 13 +- js/src/redux/providers/status.js | 247 +++++++-------- js/src/redux/providers/tokens.js | 161 ++++++++++ js/src/redux/providers/tokensActions.js | 212 +++++++++++-- js/src/redux/providers/tokensReducer.js | 13 +- js/src/redux/store.js | 52 +++- js/src/ui/Balance/balance.js | 19 +- js/src/ui/TxList/TxRow/txRow.spec.js | 2 +- js/src/ui/TxList/txList.spec.js | 2 +- js/src/util/tokens.js | 133 -------- js/src/util/tokens/bytecodes.js | 23 ++ js/src/util/tokens/index.js | 283 +++++++++++++++++ js/src/views/Application/Requests/requests.js | 9 +- js/src/views/Home/Dapps/dapp.js | 9 + js/src/views/ParityBar/accountStore.js | 10 +- .../SignRequest/signRequest.spec.js | 31 +- rpc/src/v1/impls/eth_filter.rs | 49 +-- 51 files changed, 1819 insertions(+), 857 deletions(-) create mode 100644 js/src/api/pubsub/signer/index.js create mode 100644 js/src/api/pubsub/signer/signer.js create mode 100644 js/src/api/transport/logger.js create mode 100644 js/src/redux/providers/tokens.js delete mode 100644 js/src/util/tokens.js create mode 100644 js/src/util/tokens/bytecodes.js create mode 100644 js/src/util/tokens/index.js diff --git a/Cargo.lock b/Cargo.lock index 33d49894d..ed88fba72 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -295,6 +295,15 @@ dependencies = [ "unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "coco" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "either 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "common-types" version = "0.1.0" @@ -395,14 +404,6 @@ dependencies = [ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "deque" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "difference" version = "1.0.0" @@ -544,6 +545,7 @@ dependencies = [ "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "price-info 1.7.0", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.0", "rlp_derive 0.1.0", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1048,7 +1050,7 @@ name = "gcc" version = "0.3.51" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rayon 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2433,18 +2435,27 @@ dependencies = [ [[package]] name = "rayon" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rayon-core 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon-core 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rayon" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rayon-core 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rayon-core" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2496,7 +2507,7 @@ dependencies = [ "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", - "rayon 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2647,6 +2658,11 @@ name = "scoped-tls" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "scopeguard" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "secur32-sys" version = "0.2.0" @@ -3388,6 +3404,7 @@ dependencies = [ "checksum clap 2.24.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6b8f69e518f967224e628896b54e41ff6acfb4dcfefc5076325c36525dac900f" "checksum clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "5b4fabf979ddf6419a313c1c0ada4a5b95cfd2049c56e8418d622d27b4b6ff32" "checksum clippy_lints 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "ce96ec05bfe018a0d5d43da115e54850ea2217981ff0f2e462780ab9d594651a" +"checksum coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c06169f5beb7e31c7c67ebf5540b8b472d23e3eade3b2ec7d1f5b504a85f91bd" "checksum conv 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "78ff10625fd0ac447827aa30ea8b861fead473bb60aeb73af6c1c58caf0d1299" "checksum cookie 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d53b80dde876f47f03cda35303e368a79b91c70b0d65ecba5fd5280944a08591" "checksum core-foundation 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "20a6d0448d3a99d977ae4a2aa5a98d886a923e863e81ad9ff814645b6feb3bbd" @@ -3399,7 +3416,6 @@ dependencies = [ "checksum custom_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "ef8ae57c4978a2acd8b869ce6b9ca1dfe817bff704c220209fdef2c0b75a01b9" "checksum daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "271ec51b7e0bee92f0d04601422c73eb76ececf197026711c97ad25038a010cf" "checksum dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "97590ba53bcb8ac28279161ca943a924d1fd4a8fb3fa63302591647c4fc5b850" -"checksum deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1614659040e711785ed8ea24219140654da1729f3ec8a47a9719d041112fe7bf" "checksum difference 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3304d19798a8e067e48d8e69b2c37f0b5e9b4e462504ad9e27e9f3fce02bba8" "checksum docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3b5b93718f8b3e5544fcc914c43de828ca6c6ace23e0332c6080a2977b49787a" "checksum dtoa 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5edd69c67b2f8e0911629b7e6b8a34cb3956613cd7c6e6414966dee349c2db4f" @@ -3524,8 +3540,9 @@ dependencies = [ "checksum quine-mc_cluskey 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6683b0e23d80813b1a535841f0048c1537d3f86d63c999e8373b39a9b0eb74a" "checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" "checksum rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "2791d88c6defac799c3f20d74f094ca33b9332612d9aef9078519c82e4fe04a5" -"checksum rayon 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8c83adcb08e5b922e804fe1918142b422602ef11f2fd670b0b52218cb5984a20" -"checksum rayon-core 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "767d91bacddf07d442fe39257bf04fd95897d1c47c545d009f6beb03efd038f8" +"checksum rayon 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a77c51c07654ddd93f6cb543c7a849863b03abc7e82591afda6dc8ad4ac3ac4a" +"checksum rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b614fe08b6665cb9a231d07ac1364b0ef3cb3698f1239ee0c4c3a88a524f54c8" +"checksum rayon-core 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7febc28567082c345f10cddc3612c6ea020fc3297a1977d472cf9fdb73e6e493" "checksum regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4278c17d0f6d62dfef0ab00028feb45bd7d2102843f80763474eeb1be8a10c01" "checksum regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "841591b1e05609a643e3b4d0045fce04f701daba7151ddcd3ad47b080693d5a9" "checksum regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9191b1f57603095f105d317e375d19b1c9c5c3185ea9633a99a6dcbed04457" @@ -3544,6 +3561,7 @@ dependencies = [ "checksum rustc_version 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1e114e275f7c9b5d50bb52b28f9aac1921209f02aa6077c8b255e21eefaf8ffa" "checksum schannel 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "4e45ac5e9e4698c1c138d2972bedcd90b81fe1efeba805449d2bdd54512de5f9" "checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d" +"checksum scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c79eb2c3ac4bc2507cda80e7f3ac5b88bd8eae4c0914d5663e6a8933994be918" "checksum secur32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3f412dfa83308d893101dd59c10d6fda8283465976c28c287c5c855bf8d216bc" "checksum security-framework 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "42ddf098d78d0b64564b23ee6345d07573e7d10e52ad86875d89ddf5f8378a02" "checksum security-framework-sys 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "5bacdada57ea62022500c457c8571c17dfb5e6240b7c8eac5916ffa8c7138a55" diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index d96cc2dea..5a2f7327a 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -49,6 +49,7 @@ num = "0.1" num_cpus = "1.2" parking_lot = "0.4" price-info = { path = "../price-info" } +rayon = "0.8" rand = "0.3" rlp = { path = "../util/rlp" } rlp_derive = { path = "../util/rlp_derive" } diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index cdd390693..d44d8ff9b 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -44,6 +44,7 @@ use db::{self, Writable, Readable, CacheUpdatePolicy}; use cache_manager::CacheManager; use encoded; use engines::epoch::{Transition as EpochTransition, PendingTransition as PendingEpochTransition}; +use rayon::prelude::*; use ansi_term::Colour; const LOG_BLOOMS_LEVELS: usize = 3; @@ -152,7 +153,7 @@ pub trait BlockProvider { /// Returns logs matching given filter. fn logs(&self, blocks: Vec, matches: F, limit: Option) -> Vec - where F: Fn(&LogEntry) -> bool, Self: Sized; + where F: Fn(&LogEntry) -> bool + Send + Sync, Self: Sized; } macro_rules! otry { @@ -363,50 +364,56 @@ impl BlockProvider for BlockChain { } fn logs(&self, mut blocks: Vec, matches: F, limit: Option) -> Vec - where F: Fn(&LogEntry) -> bool, Self: Sized { + where F: Fn(&LogEntry) -> bool + Send + Sync, Self: Sized { // sort in reverse order blocks.sort_by(|a, b| b.cmp(a)); - let mut log_index = 0; - let mut logs = blocks.into_iter() - .filter_map(|number| self.block_hash(number).map(|hash| (number, hash))) - .filter_map(|(number, hash)| self.block_receipts(&hash).map(|r| (number, hash, r.receipts))) - .filter_map(|(number, hash, receipts)| self.block_body(&hash).map(|ref b| (number, hash, receipts, b.transaction_hashes()))) - .flat_map(|(number, hash, mut receipts, mut hashes)| { - if receipts.len() != hashes.len() { - warn!("Block {} ({}) has different number of receipts ({}) to transactions ({}). Database corrupt?", number, hash, receipts.len(), hashes.len()); - assert!(false); - } - log_index = receipts.iter().fold(0, |sum, receipt| sum + receipt.logs.len()); + let mut logs = blocks + .chunks(128) + .flat_map(move |blocks_chunk| { + blocks_chunk.into_par_iter() + .filter_map(|number| self.block_hash(*number).map(|hash| (*number, hash))) + .filter_map(|(number, hash)| self.block_receipts(&hash).map(|r| (number, hash, r.receipts))) + .filter_map(|(number, hash, receipts)| self.block_body(&hash).map(|ref b| (number, hash, receipts, b.transaction_hashes()))) + .flat_map(|(number, hash, mut receipts, mut hashes)| { + if receipts.len() != hashes.len() { + warn!("Block {} ({}) has different number of receipts ({}) to transactions ({}). Database corrupt?", number, hash, receipts.len(), hashes.len()); + assert!(false); + } + let mut log_index = receipts.iter().fold(0, |sum, receipt| sum + receipt.logs.len()); - let receipts_len = receipts.len(); - hashes.reverse(); - receipts.reverse(); - receipts.into_iter() - .map(|receipt| receipt.logs) - .zip(hashes) - .enumerate() - .flat_map(move |(index, (mut logs, tx_hash))| { - let current_log_index = log_index; - let no_of_logs = logs.len(); - log_index -= no_of_logs; - - logs.reverse(); - logs.into_iter() + let receipts_len = receipts.len(); + hashes.reverse(); + receipts.reverse(); + receipts.into_iter() + .map(|receipt| receipt.logs) + .zip(hashes) .enumerate() - .map(move |(i, log)| LocalizedLogEntry { - entry: log, - block_hash: hash, - block_number: number, - transaction_hash: tx_hash, - // iterating in reverse order - transaction_index: receipts_len - index - 1, - transaction_log_index: no_of_logs - i - 1, - log_index: current_log_index - i - 1, + .flat_map(move |(index, (mut logs, tx_hash))| { + let current_log_index = log_index; + let no_of_logs = logs.len(); + log_index -= no_of_logs; + + logs.reverse(); + logs.into_iter() + .enumerate() + .map(move |(i, log)| LocalizedLogEntry { + entry: log, + block_hash: hash, + block_number: number, + transaction_hash: tx_hash, + // iterating in reverse order + transaction_index: receipts_len - index - 1, + transaction_log_index: no_of_logs - i - 1, + log_index: current_log_index - i - 1, + }) }) + .filter(|log_entry| matches(&log_entry.entry)) + .take(limit.unwrap_or(::std::usize::MAX)) + .collect::>() }) + .collect::>() }) - .filter(|log_entry| matches(&log_entry.entry)) .take(limit.unwrap_or(::std::usize::MAX)) .collect::>(); logs.reverse(); diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 02682ece2..c58f0e945 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -918,7 +918,7 @@ impl Client { _ => {}, } - let block_number = match self.block_number(id.clone()) { + let block_number = match self.block_number(id) { Some(num) => num, None => return None, }; @@ -1155,6 +1155,16 @@ impl Client { (false, false) => call(state, env_info, engine, state_diff, t, TransactOptions::with_no_tracing()), } } + + fn block_number_ref(&self, id: &BlockId) -> Option { + match *id { + BlockId::Number(number) => Some(number), + BlockId::Hash(ref hash) => self.chain.read().block_number(hash), + BlockId::Earliest => Some(0), + BlockId::Latest => Some(self.chain.read().best_block_number()), + BlockId::Pending => Some(self.chain.read().best_block_number() + 1), + } + } } impl snapshot::DatabaseRestore for Client { @@ -1364,13 +1374,7 @@ impl BlockChainClient for Client { } fn block_number(&self, id: BlockId) -> Option { - match id { - BlockId::Number(number) => Some(number), - BlockId::Hash(ref hash) => self.chain.read().block_number(hash), - BlockId::Earliest => Some(0), - BlockId::Latest => Some(self.chain.read().best_block_number()), - BlockId::Pending => Some(self.chain.read().best_block_number() + 1), - } + self.block_number_ref(&id) } fn block_body(&self, id: BlockId) -> Option { @@ -1651,16 +1655,17 @@ impl BlockChainClient for Client { self.engine.additional_params().into_iter().collect() } - fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option> { - match (self.block_number(from_block), self.block_number(to_block)) { - (Some(from), Some(to)) => Some(self.chain.read().blocks_with_bloom(bloom, from, to)), - _ => None - } - } - fn logs(&self, filter: Filter) -> Vec { + let (from, to) = match (self.block_number_ref(&filter.from_block), self.block_number_ref(&filter.to_block)) { + (Some(from), Some(to)) => (from, to), + _ => return Vec::new(), + }; + + let chain = self.chain.read(); let blocks = filter.bloom_possibilities().iter() - .filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone())) + .map(move |bloom| { + chain.blocks_with_bloom(bloom, from, to) + }) .flat_map(|m| m) // remove duplicate elements .collect::>() diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index f39932f82..b7245b25c 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -24,7 +24,7 @@ use itertools::Itertools; use rustc_hex::FromHex; use hash::keccak; use bigint::prelude::U256; -use bigint::hash::{H256, H2048}; +use bigint::hash::H256; use parking_lot::RwLock; use util::*; use rlp::*; @@ -508,10 +508,6 @@ impl BlockChainClient for TestBlockChainClient { self.receipts.read().get(&id).cloned() } - fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockId, _to_block: BlockId) -> Option> { - unimplemented!(); - } - fn logs(&self, filter: Filter) -> Vec { let mut logs = self.logs.read().clone(); let len = logs.len(); diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index 5a619a95e..b7616a478 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -35,7 +35,7 @@ use transaction::{LocalizedTransaction, PendingTransaction, SignedTransaction}; use verification::queue::QueueInfo as BlockQueueInfo; use bigint::prelude::U256; -use bigint::hash::{H256, H2048}; +use bigint::hash::H256; use util::{Address, Bytes}; use util::hashdb::DBValue; @@ -181,9 +181,6 @@ pub trait BlockChainClient : Sync + Send { /// Get the best block header. fn best_block_header(&self) -> encoded::Header; - /// Returns numbers of blocks containing given bloom. - fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option>; - /// Returns logs matching given filter. fn logs(&self, filter: Filter) -> Vec; diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 2f2329d64..82b3cfd96 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -102,6 +102,7 @@ extern crate num; extern crate parking_lot; extern crate price_info; extern crate rand; +extern crate rayon; extern crate rlp; extern crate hash; extern crate heapsize; diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index 62639e849..4ea0efe51 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -392,14 +392,13 @@ mod tests { self.numbers.get(&index).cloned() } - fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockNumber, _to_block: BlockNumber) -> Vec { - unimplemented!() - } - fn block_receipts(&self, _hash: &H256) -> Option { unimplemented!() } + fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockNumber, _to_block: BlockNumber) -> Vec { + unimplemented!() + } fn logs(&self, _blocks: Vec, _matches: F, _limit: Option) -> Vec where F: Fn(&LogEntry) -> bool, Self: Sized { diff --git a/js/src/api/api.js b/js/src/api/api.js index 220c3be29..a1a7dbbb5 100644 --- a/js/src/api/api.js +++ b/js/src/api/api.js @@ -71,10 +71,15 @@ export default class Api extends EventEmitter { } } + get isPubSub () { + return !!this._pubsub; + } + get pubsub () { - if (!this._pubsub) { + if (!this.isPubSub) { throw Error('Pubsub is only available with a subscribing-supported transport injected!'); } + return this._pubsub; } diff --git a/js/src/api/pubsub/eth/eth.js b/js/src/api/pubsub/eth/eth.js index 0bbc85bec..044473ec6 100644 --- a/js/src/api/pubsub/eth/eth.js +++ b/js/src/api/pubsub/eth/eth.js @@ -25,7 +25,7 @@ export default class Eth extends PubsubBase { } newHeads (callback) { - return this.addListener('eth', 'newHeads', callback); + return this.addListener('eth', 'newHeads', callback, null); } logs (callback) { diff --git a/js/src/api/pubsub/parity/parity.js b/js/src/api/pubsub/parity/parity.js index 6df4a9204..042d8dc8b 100644 --- a/js/src/api/pubsub/parity/parity.js +++ b/js/src/api/pubsub/parity/parity.js @@ -267,7 +267,7 @@ export default class Parity extends PubsubBase { // parity accounts API (only secure API or configured to be exposed) allAccountsInfo (callback) { - return this._addListener(this._api, 'parity_allAccountsInfo', (error, data) => { + return this.addListener(this._api, 'parity_allAccountsInfo', (error, data) => { error ? callback(error) : callback(null, outAccountInfo(data)); @@ -275,7 +275,7 @@ export default class Parity extends PubsubBase { } getDappAddresses (callback, dappId) { - return this._addListener(this._api, 'parity_getDappAddresses', (error, data) => { + return this.addListener(this._api, 'parity_getDappAddresses', (error, data) => { error ? callback(error) : callback(null, outAddresses(data)); @@ -283,7 +283,7 @@ export default class Parity extends PubsubBase { } getDappDefaultAddress (callback, dappId) { - return this._addListener(this._api, 'parity_getDappDefaultAddress', (error, data) => { + return this.addListener(this._api, 'parity_getDappDefaultAddress', (error, data) => { error ? callback(error) : callback(null, outAddress(data)); @@ -291,7 +291,7 @@ export default class Parity extends PubsubBase { } getNewDappsAddresses (callback) { - return this._addListener(this._api, 'parity_getDappDefaultAddress', (error, addresses) => { + return this.addListener(this._api, 'parity_getDappDefaultAddress', (error, addresses) => { error ? callback(error) : callback(null, addresses ? addresses.map(outAddress) : null); @@ -299,7 +299,7 @@ export default class Parity extends PubsubBase { } getNewDappsDefaultAddress (callback) { - return this._addListener(this._api, 'parity_getNewDappsDefaultAddress', (error, data) => { + return this.addListener(this._api, 'parity_getNewDappsDefaultAddress', (error, data) => { error ? callback(error) : callback(null, outAddress(data)); @@ -307,7 +307,7 @@ export default class Parity extends PubsubBase { } listRecentDapps (callback) { - return this._addListener(this._api, 'parity_listRecentDapps', (error, data) => { + return this.addListener(this._api, 'parity_listRecentDapps', (error, data) => { error ? callback(error) : callback(null, outRecentDapps(data)); @@ -315,7 +315,7 @@ export default class Parity extends PubsubBase { } listGethAccounts (callback) { - return this._addListener(this._api, 'parity_listGethAccounts', (error, data) => { + return this.addListener(this._api, 'parity_listGethAccounts', (error, data) => { error ? callback(error) : callback(null, outAddresses(data)); @@ -323,15 +323,15 @@ export default class Parity extends PubsubBase { } listVaults (callback) { - return this._addListener(this._api, 'parity_listVaults', callback); + return this.addListener(this._api, 'parity_listVaults', callback); } listOpenedVaults (callback) { - return this._addListener(this._api, 'parity_listOpenedVaults', callback); + return this.addListener(this._api, 'parity_listOpenedVaults', callback); } getVaultMeta (callback, vaultName) { - return this._addListener(this._api, 'parity_getVaultMeta', (error, data) => { + return this.addListener(this._api, 'parity_getVaultMeta', (error, data) => { error ? callback(error) : callback(null, outVaultMeta(data)); @@ -339,7 +339,7 @@ export default class Parity extends PubsubBase { } deriveAddressHash (callback, address, password, hash, shouldSave) { - return this._addListener(this._api, 'parity_deriveAddressHash', (error, data) => { + return this.addListener(this._api, 'parity_deriveAddressHash', (error, data) => { error ? callback(error) : callback(null, outAddress(data)); @@ -347,10 +347,18 @@ export default class Parity extends PubsubBase { } deriveAddressIndex (callback, address, password, index, shouldSave) { - return this._addListener(this._api, 'parity_deriveAddressIndex', (error, data) => { + return this.addListener(this._api, 'parity_deriveAddressIndex', (error, data) => { error ? callback(error) : callback(null, outAddress(data)); }, [inAddress(address), password, inDeriveIndex(index), !!shouldSave]); } + + nodeHealth (callback) { + return this.addListener(this._api, 'parity_nodeHealth', (error, data) => { + error + ? callback(error) + : callback(null, data); + }); + } } diff --git a/js/src/api/pubsub/pubsub.js b/js/src/api/pubsub/pubsub.js index edbc201ae..4420967ee 100644 --- a/js/src/api/pubsub/pubsub.js +++ b/js/src/api/pubsub/pubsub.js @@ -16,6 +16,7 @@ import Eth from './eth'; import Parity from './parity'; +import Signer from './signer'; import Net from './net'; import { isFunction } from '../util/types'; @@ -29,6 +30,7 @@ export default class Pubsub { this._eth = new Eth(transport); this._net = new Net(transport); this._parity = new Parity(transport); + this._signer = new Signer(transport); } get net () { @@ -43,8 +45,35 @@ export default class Pubsub { return this._parity; } + get signer () { + return this._signer; + } + unsubscribe (subscriptionIds) { // subscriptions are namespace independent. Thus we can simply removeListener from any. return this._parity.removeListener(subscriptionIds); } + + subscribeAndGetResult (f, callback) { + return new Promise((resolve, reject) => { + let isFirst = true; + let onSubscription = (error, data) => { + const p1 = error ? Promise.reject(error) : Promise.resolve(data); + const p2 = p1.then(callback); + + if (isFirst) { + isFirst = false; + p2 + .then(resolve) + .catch(reject); + } + }; + + try { + f.call(this, onSubscription).catch(reject); + } catch (err) { + reject(err); + } + }); + } } diff --git a/js/src/api/pubsub/pubsubBase.js b/js/src/api/pubsub/pubsubBase.js index fcc7525d5..c50f45775 100644 --- a/js/src/api/pubsub/pubsubBase.js +++ b/js/src/api/pubsub/pubsubBase.js @@ -20,11 +20,12 @@ export default class PubsubBase { this._transport = transport; } - addListener (module, eventName, callback, eventParams) { - return eventParams - ? this._transport.subscribe(module, callback, eventName, eventParams) - : this._transport.subscribe(module, callback, eventName, []); - // this._transport.subscribe(module, callback, eventName); After Patch from tomac is merged to master! => eth_subscribe does not support empty array as params + addListener (module, eventName, callback, eventParams = []) { + if (eventName) { + return this._transport.subscribe(module, callback, eventParams ? [eventName, eventParams] : [eventName]); + } + + return this._transport.subscribe(module, callback, eventParams); } removeListener (subscriptionIds) { diff --git a/js/src/api/pubsub/signer/index.js b/js/src/api/pubsub/signer/index.js new file mode 100644 index 000000000..caa410722 --- /dev/null +++ b/js/src/api/pubsub/signer/index.js @@ -0,0 +1,16 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . +export default from './signer'; diff --git a/js/src/api/pubsub/signer/signer.js b/js/src/api/pubsub/signer/signer.js new file mode 100644 index 000000000..266da6b8a --- /dev/null +++ b/js/src/api/pubsub/signer/signer.js @@ -0,0 +1,37 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . +import PubsubBase from '../pubsubBase'; + +import { outSignerRequest } from '../../format/output'; + +export default class Net extends PubsubBase { + constructor (transport) { + super(transport); + this._api = { + subscribe: 'signer_subscribePending', + unsubscribe: 'signer_unsubscribePending', + subscription: 'signer_pending' + }; + } + + pendingRequests (callback) { + return this.addListener(this._api, null, (error, data) => { + error + ? callback(error) + : callback(null, data.map(outSignerRequest)); + }); + } +} diff --git a/js/src/api/rpc/parity/parity.js b/js/src/api/rpc/parity/parity.js index 59ccb5884..3a502afab 100644 --- a/js/src/api/rpc/parity/parity.js +++ b/js/src/api/rpc/parity/parity.js @@ -44,6 +44,15 @@ export default class Parity { .execute('parity_addReservedPeer', enode); } + call (requests, blockNumber = 'latest') { + return this._transport + .execute( + 'parity_call', + requests.map((options) => inOptions(options)), + inBlockNumber(blockNumber) + ); + } + chainStatus () { return this._transport .execute('parity_chainStatus') diff --git a/js/src/api/subscriptions/eth.js b/js/src/api/subscriptions/eth.js index 8e56f335f..bbab95672 100644 --- a/js/src/api/subscriptions/eth.js +++ b/js/src/api/subscriptions/eth.js @@ -24,6 +24,13 @@ export default class Eth { this._lastBlock = new BigNumber(-1); this._pollTimerId = null; + + // Try to restart subscription if transport is closed + this._api.transport.on('close', () => { + if (this.isStarted) { + this.start(); + } + }); } get isStarted () { @@ -33,31 +40,56 @@ export default class Eth { start () { this._started = true; - return this._blockNumber(); + if (this._api.isPubSub) { + return Promise.all([ + this._pollBlockNumber(false), + this._api.pubsub + .subscribeAndGetResult( + callback => this._api.pubsub.eth.newHeads(callback), + () => { + return this._api.eth + .blockNumber() + .then(blockNumber => { + this.updateBlock(blockNumber); + return blockNumber; + }); + } + ) + ]); + } + + // fallback to polling + return this._pollBlockNumber(true); } - _blockNumber = () => { - const nextTimeout = (timeout = 1000) => { - this._pollTimerId = setTimeout(() => { - this._blockNumber(); - }, timeout); + _pollBlockNumber = (doTimeout) => { + const nextTimeout = (timeout = 1000, forceTimeout = doTimeout) => { + if (forceTimeout) { + this._pollTimerId = setTimeout(() => { + this._pollBlockNumber(doTimeout); + }, timeout); + } }; if (!this._api.transport.isConnected) { - nextTimeout(500); + nextTimeout(500, true); return; } return this._api.eth .blockNumber() .then((blockNumber) => { - if (!blockNumber.eq(this._lastBlock)) { - this._lastBlock = blockNumber; - this._updateSubscriptions('eth_blockNumber', null, blockNumber); - } + this.updateBlock(blockNumber); nextTimeout(); }) .catch(() => nextTimeout()); } + + updateBlock (blockNumber) { + if (!blockNumber.eq(this._lastBlock)) { + this._lastBlock = blockNumber; + this._updateSubscriptions('eth_blockNumber', null, blockNumber); + } + } } diff --git a/js/src/api/subscriptions/eth.spec.js b/js/src/api/subscriptions/eth.spec.js index 3f5ee81d6..2893a14dc 100644 --- a/js/src/api/subscriptions/eth.spec.js +++ b/js/src/api/subscriptions/eth.spec.js @@ -29,7 +29,8 @@ function stubApi (blockNumber) { return { _calls, transport: { - isConnected: true + isConnected: true, + on: () => {} }, eth: { blockNumber: () => { diff --git a/js/src/api/subscriptions/personal.js b/js/src/api/subscriptions/personal.js index fa7ae823c..8b2b826d0 100644 --- a/js/src/api/subscriptions/personal.js +++ b/js/src/api/subscriptions/personal.js @@ -23,6 +23,13 @@ export default class Personal { this._lastDefaultAccount = '0x0'; this._pollTimerId = null; + + // Try to restart subscription if transport is closed + this._api.transport.on('close', () => { + if (this.isStarted) { + this.start(); + } + }); } get isStarted () { @@ -32,20 +39,42 @@ export default class Personal { start () { this._started = true; + let defaultAccount = null; + + if (this._api.isPubSub) { + defaultAccount = this._api.pubsub + .subscribeAndGetResult( + callback => this._api.pubsub.parity.defaultAccount(callback), + (defaultAccount) => { + this.updateDefaultAccount(defaultAccount); + return defaultAccount; + } + ); + } else { + defaultAccount = this._defaultAccount(); + } + return Promise.all([ - this._defaultAccount(), + defaultAccount, this._listAccounts(), this._accountsInfo(), this._loggingSubscribe() ]); } + updateDefaultAccount (defaultAccount) { + if (this._lastDefaultAccount !== defaultAccount) { + this._lastDefaultAccount = defaultAccount; + this._updateSubscriptions('parity_defaultAccount', null, defaultAccount); + } + } + // FIXME: Because of the different API instances, the "wait for valid changes" approach // doesn't work. Since the defaultAccount is critical to operation, we poll in exactly // same way we do in ../eth (ala eth_blockNumber) and update. This should be moved // to pub-sub as it becomes available _defaultAccount = (timerDisabled = false) => { - const nextTimeout = (timeout = 1000) => { + const nextTimeout = (timeout = 3000) => { if (!timerDisabled) { this._pollTimerId = setTimeout(() => { this._defaultAccount(); @@ -61,11 +90,7 @@ export default class Personal { return this._api.parity .defaultAccount() .then((defaultAccount) => { - if (this._lastDefaultAccount !== defaultAccount) { - this._lastDefaultAccount = defaultAccount; - this._updateSubscriptions('parity_defaultAccount', null, defaultAccount); - } - + this.updateDefaultAccount(defaultAccount); nextTimeout(); }) .catch(() => nextTimeout()); diff --git a/js/src/api/subscriptions/signer.js b/js/src/api/subscriptions/signer.js index 2215ed7f3..a0c202c1b 100644 --- a/js/src/api/subscriptions/signer.js +++ b/js/src/api/subscriptions/signer.js @@ -22,6 +22,13 @@ export default class Signer { this._api = api; this._updateSubscriptions = updateSubscriptions; this._started = false; + + // Try to restart subscription if transport is closed + this._api.transport.on('close', () => { + if (this.isStarted) { + this.start(); + } + }); } get isStarted () { @@ -31,30 +38,50 @@ export default class Signer { start () { this._started = true; + if (this._api.isPubSub) { + const subscription = this._api.pubsub + .subscribeAndGetResult( + callback => this._api.pubsub.signer.pendingRequests(callback), + requests => { + this.updateSubscriptions(requests); + return requests; + } + ); + + return Promise.all([ + this._listRequests(false), + subscription + ]); + } + return Promise.all([ this._listRequests(true), this._loggingSubscribe() ]); } + updateSubscriptions (requests) { + return this._updateSubscriptions('signer_requestsToConfirm', null, requests); + } + _listRequests = (doTimeout) => { - const nextTimeout = (timeout = 1000) => { - if (doTimeout) { + const nextTimeout = (timeout = 1000, forceTimeout = doTimeout) => { + if (forceTimeout) { setTimeout(() => { - this._listRequests(true); + this._listRequests(doTimeout); }, timeout); } }; if (!this._api.transport.isConnected) { - nextTimeout(500); + nextTimeout(500, true); return; } return this._api.signer .requestsToConfirm() .then((requests) => { - this._updateSubscriptions('signer_requestsToConfirm', null, requests); + this.updateSubscriptions(requests); nextTimeout(); }) .catch(() => nextTimeout()); diff --git a/js/src/api/transport/jsonRpcBase.js b/js/src/api/transport/jsonRpcBase.js index 819e1f496..1b96347d3 100644 --- a/js/src/api/transport/jsonRpcBase.js +++ b/js/src/api/transport/jsonRpcBase.js @@ -15,7 +15,11 @@ // along with Parity. If not, see . import EventEmitter from 'eventemitter3'; + import { Logging } from '../subscriptions'; +import logger from './logger'; + +const LOGGER_ENABLED = process.env.NODE_ENV !== 'production'; export default class JsonRpcBase extends EventEmitter { constructor () { @@ -75,6 +79,14 @@ export default class JsonRpcBase extends EventEmitter { } execute (method, ...params) { + let start; + let logId; + + if (LOGGER_ENABLED) { + start = Date.now(); + logId = logger.log({ method, params }); + } + return this._middlewareList.then((middlewareList) => { for (const middleware of middlewareList) { const res = middleware.handle(method, params); @@ -93,7 +105,18 @@ export default class JsonRpcBase extends EventEmitter { } } - return this._execute(method, params); + const result = this._execute(method, params); + + if (!LOGGER_ENABLED) { + return result; + } + + return result + .then((result) => { + logger.set(logId, { result, time: Date.now() - start }); + + return result; + }); }); } diff --git a/js/src/api/transport/logger.js b/js/src/api/transport/logger.js new file mode 100644 index 000000000..930c4a34e --- /dev/null +++ b/js/src/api/transport/logger.js @@ -0,0 +1,150 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +import MethodDecodingStore from '~/ui/MethodDecoding/methodDecodingStore'; + +const LOGGER_ENABLED = process.env.NODE_ENV !== 'production'; + +let logger; + +if (LOGGER_ENABLED) { + class Logger { + _logs = {}; + _id = 0; + + log ({ method, params }) { + const logId = this._id++; + + this._logs[logId] = { method, params, date: Date.now() }; + return logId; + } + + set (logId, data) { + this._logs[logId] = { + ...this._logs[logId], + ...data + }; + } + + static sorter (logA, logB) { + return logA.date - logB.date; + } + + get calls () { + const calls = this.methods['eth_call'] || []; + const decoding = MethodDecodingStore.get(window.secureApi); + const contracts = {}; + + const progress = Math.round(calls.length / 20); + + return calls + .reduce((promise, call, index) => { + const { data, to } = call.params[0]; + + contracts[to] = contracts[to] || []; + + return promise + .then(() => decoding.lookup(null, { data, to })) + .then((lookup) => { + if (!lookup.name) { + contracts[to].push(data); + return; + } + + const inputs = lookup.inputs.map((input) => { + if (/bytes/.test(input.type)) { + return '0x' + input.value.map((v) => v.toString(16).padStart(2, 0)).join(''); + } + + return input.value; + }); + + const called = `${lookup.name}(${inputs.join(', ')})`; + + contracts[to].push(called); + + if (index % progress === 0) { + console.warn(`progress: ${Math.round(100 * index / calls.length)}%`); + } + }); + }, Promise.resolve()) + .then(() => { + return Object.keys(contracts) + .map((address) => { + const count = contracts[address].length; + + return { + count, + calls: contracts[address], + to: address + }; + }) + .sort((cA, cB) => cB.count - cA.count); + }); + } + + get logs () { + return Object.values(this._logs).sort(Logger.sorter); + } + + get methods () { + return this.logs.reduce((methods, log) => { + methods[log.method] = methods[log.method] || []; + methods[log.method].push(log); + return methods; + }, {}); + } + + get stats () { + const logs = this.logs; + const methods = this.methods; + + const start = logs[0].date; + const end = logs[logs.length - 1].date; + + // Duration in seconds + const duration = (end - start) / 1000; + const speed = logs.length / duration; + + const sortedMethods = Object.keys(methods) + .map((method) => { + const methodLogs = methods[method].sort(Logger.sorter); + const methodSpeed = methodLogs.length / duration; + + return { + speed: methodSpeed, + count: methodLogs.length, + logs: methodLogs, + method + }; + }) + .sort((mA, mB) => mB.count - mA.count); + + return { + methods: sortedMethods, + speed + }; + } + } + + logger = new Logger(); + + if (window) { + window._logger = logger; + } +} + +export default logger; diff --git a/js/src/api/transport/ws/ws.js b/js/src/api/transport/ws/ws.js index 9c276772d..63abecb83 100644 --- a/js/src/api/transport/ws/ws.js +++ b/js/src/api/transport/ws/ws.js @@ -29,7 +29,7 @@ export default class Ws extends JsonRpcBase { this._url = url; this._token = token; this._messages = {}; - this._subscriptions = { 'eth_subscription': [], 'parity_subscription': [], 'shh_subscription': [] }; + this._subscriptions = {}; this._sessionHash = null; this._connecting = false; @@ -209,6 +209,7 @@ export default class Ws extends JsonRpcBase { // initial pubsub ACK if (id && msg.subscription) { // save subscription to map subId -> messageId + this._subscriptions[msg.subscription] = this._subscriptions[msg.subscription] || {}; this._subscriptions[msg.subscription][res] = id; // resolve promise with messageId because subId's can collide (eth/parity) msg.resolve(id); @@ -223,7 +224,7 @@ export default class Ws extends JsonRpcBase { } // pubsub format - if (method.includes('subscription')) { + if (this._subscriptions[method]) { const messageId = this._messages[this._subscriptions[method][params.subscription]]; if (messageId) { @@ -302,6 +303,16 @@ export default class Ws extends JsonRpcBase { } _methodsFromApi (api) { + if (api.subscription) { + const { subscribe, unsubscribe, subscription } = api; + + return { + method: subscribe, + uMethod: unsubscribe, + subscription + }; + } + const method = `${api}_subscribe`; const uMethod = `${api}_unsubscribe`; const subscription = `${api}_subscription`; @@ -309,7 +320,7 @@ export default class Ws extends JsonRpcBase { return { method, uMethod, subscription }; } - subscribe (api, callback, ...params) { + subscribe (api, callback, params) { return new Promise((resolve, reject) => { const id = this.id; const { method, uMethod, subscription } = this._methodsFromApi(api); diff --git a/js/src/index.js b/js/src/index.js index 7e85dd51f..24cf38286 100644 --- a/js/src/index.js +++ b/js/src/index.js @@ -36,7 +36,6 @@ import muiTheme from '~/ui/Theme'; import MainApplication from './main'; import { loadSender, patchApi } from '~/util/tx'; -import { setApi } from '~/redux/providers/apiActions'; import './environment'; @@ -69,9 +68,6 @@ ContractInstances.create(api); const store = initStore(api, hashHistory); -store.dispatch({ type: 'initAll', api }); -store.dispatch(setApi(api)); - window.secureApi = api; ReactDOM.render( diff --git a/js/src/mobx/hardwareStore.js b/js/src/mobx/hardwareStore.js index 46bf3fa58..5a3960c2f 100644 --- a/js/src/mobx/hardwareStore.js +++ b/js/src/mobx/hardwareStore.js @@ -31,6 +31,10 @@ export default class HardwareStore { this._pollId = null; this._pollScan(); + this._subscribeParity(); + this._api.transport.on('close', () => { + this._subscribeParity(); + }); } isConnected (address) { @@ -78,26 +82,30 @@ export default class HardwareStore { }); } - scanParity () { - return this._api.parity - .hardwareAccountsInfo() - .then((hwInfo) => { - Object - .keys(hwInfo) - .forEach((address) => { - const info = hwInfo[address]; + _subscribeParity () { + const onError = error => { + console.warn('HardwareStore::scanParity', error); - info.address = address; - info.via = 'parity'; - }); + return {}; + }; - return hwInfo; - }) - .catch((error) => { - console.warn('HardwareStore::scanParity', error); + return this._api.pubsub + .subscribeAndGetResult( + callback => this._api.pubsub.parity.hardwareAccountsInfo(callback), + hwInfo => { + Object + .keys(hwInfo) + .forEach((address) => { + const info = hwInfo[address]; - return {}; - }); + info.address = address; + info.via = 'parity'; + }); + this.setWallets(hwInfo); + return hwInfo; + }, + onError + ).catch(onError); } scan () { @@ -107,14 +115,10 @@ export default class HardwareStore { // is done, different results will be retrieved via Parity vs. the browser APIs // (latter is Chrome-only, needs the browser app enabled on a Ledger, former is // not intended as a network call, i.e. hw wallet is with the user) - return Promise - .all([ - this.scanParity(), - this.scanLedger() - ]) - .then(([hwAccounts, ledgerAccounts]) => { + return this.scanLedger() + .then((ledgerAccounts) => { transaction(() => { - this.setWallets(Object.assign({}, hwAccounts, ledgerAccounts)); + this.setWallets(Object.assign({}, ledgerAccounts)); this.setScanning(false); }); }); diff --git a/js/src/modals/Transfer/store.js b/js/src/modals/Transfer/store.js index eaccf4f40..71458c85d 100644 --- a/js/src/modals/Transfer/store.js +++ b/js/src/modals/Transfer/store.js @@ -133,8 +133,8 @@ export default class TransferStore { } @action handleClose = () => { - this.stage = 0; this.onClose(); + this.stage = 0; } @action onUpdateDetails = (type, value) => { @@ -169,7 +169,6 @@ export default class TransferStore { } @action onSend = () => { - this.onNext(); this.sending = true; this diff --git a/js/src/modals/Transfer/transfer.js b/js/src/modals/Transfer/transfer.js index ab769ff02..fd2625ee7 100644 --- a/js/src/modals/Transfer/transfer.js +++ b/js/src/modals/Transfer/transfer.js @@ -192,7 +192,7 @@ class Transfer extends Component { renderDialogActions () { const { account } = this.props; - const { extras, sending, stage } = this.store; + const { extras, sending, stage, isValid } = this.store; const cancelBtn = (