From e004e05037f9cdb037576587e91004c6ef11a53c Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Wed, 5 Apr 2017 12:43:47 +0200 Subject: [PATCH 01/18] Spelling fixes and link addition about LRU cache in the docs. --- ethcore/light/src/cache.rs | 1 + ethcore/light/src/cht.rs | 2 +- ethcore/light/src/net/mod.rs | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ethcore/light/src/cache.rs b/ethcore/light/src/cache.rs index 185007a1b..67ba11ce0 100644 --- a/ethcore/light/src/cache.rs +++ b/ethcore/light/src/cache.rs @@ -61,6 +61,7 @@ impl Default for CacheSizes { /// /// Note that almost all getter methods take `&mut self` due to the necessity to update /// the underlying LRU-caches on read. +/// [LRU-cache](https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_Recently_Used_.28LRU.29) pub struct Cache { headers: MemoryLruCache, canon_hashes: MemoryLruCache, diff --git a/ethcore/light/src/cht.rs b/ethcore/light/src/cht.rs index 7c749b44f..91a5496f6 100644 --- a/ethcore/light/src/cht.rs +++ b/ethcore/light/src/cht.rs @@ -15,7 +15,7 @@ //! //! Each CHT is a trie mapping block numbers to canonical hashes and total difficulty. //! One is generated for every `SIZE` blocks, allowing us to discard those blocks in -//! favor the the trie root. When the "ancient" blocks need to be accessed, we simply +//! favor of the trie root. When the "ancient" blocks need to be accessed, we simply //! request an inclusion proof of a specific block number against the trie with the //! root has. A correct proof implies that the claimed block is identical to the one //! we discarded. diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index e32e92145..191972451 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! PIP Protocol Version 1 implementation. +//! PLP Protocol Version 1 implementation. //! //! This uses a "Provider" to answer requests. From 8ea25eeb3c3a2dbcd9b632bb36828573bffe96fc Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Wed, 5 Apr 2017 13:50:11 +0200 Subject: [PATCH 02/18] Add cache to HeaderChain struct. --- ethcore/light/src/client/header_chain.rs | 49 +++++++++++++++++++----- ethcore/light/src/client/mod.rs | 10 +++-- ethcore/light/src/client/service.rs | 7 ++++ 3 files changed, 52 insertions(+), 14 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index d4ea8d107..120df5c1f 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -39,6 +39,9 @@ use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp, UntrustedRlp}; use util::{H256, U256, HeapSizeOf, RwLock}; use util::kvdb::{DBTransaction, KeyValueDB}; +use cache::Cache; +use util::Mutex; + use smallvec::SmallVec; /// Store at least this many candidate headers at all times. @@ -138,11 +141,12 @@ pub struct HeaderChain { best_block: RwLock, db: Arc, col: Option, + cache: Arc>, } impl HeaderChain { /// Create a new header chain given this genesis block and database to read from. - pub fn new(db: Arc, col: Option, genesis: &[u8]) -> Result { + pub fn new(db: Arc, col: Option, genesis: &[u8], cache: Arc>) -> Result { use ethcore::views::HeaderView; let chain = if let Some(current) = db.get(col, CURRENT_KEY)? { @@ -186,6 +190,7 @@ impl HeaderChain { candidates: RwLock::new(candidates), db: db, col: col, + cache: cache, } } else { let g_view = HeaderView::new(genesis); @@ -199,6 +204,7 @@ impl HeaderChain { candidates: RwLock::new(BTreeMap::new()), db: db, col: col, + cache: cache, } }; @@ -408,7 +414,7 @@ impl HeaderChain { } } - /// Get the nth CHT root, if it's been computed. + /// Get the nth CHT root, if it has been computed. /// /// CHT root 0 is from block `1..2048`. /// CHT root 1 is from block `2049..4096` @@ -493,6 +499,22 @@ mod tests { use ethcore::ids::BlockId; use ethcore::header::Header; use ethcore::spec::Spec; + use cache::Cache; + + use time::Duration; + use util::Mutex; + + #[test] + fn basic_chain_with_cache() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = make_db(); + + let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); + + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache); + + } fn make_db() -> Arc<::util::KeyValueDB> { Arc::new(::util::kvdb::in_memory(0)) @@ -504,7 +526,9 @@ mod tests { let genesis_header = spec.genesis_header(); let db = make_db(); - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); + + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache).unwrap(); let mut parent_hash = genesis_header.hash(); let mut rolling_timestamp = genesis_header.timestamp(); @@ -534,9 +558,10 @@ mod tests { fn reorganize() { let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); - let db = make_db(); - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); + + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache).unwrap(); let mut parent_hash = genesis_header.hash(); let mut rolling_timestamp = genesis_header.timestamp(); @@ -617,8 +642,10 @@ mod tests { let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); let db = make_db(); + let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); + + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache).unwrap(); - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); assert!(chain.block_header(BlockId::Earliest).is_some()); assert!(chain.block_header(BlockId::Latest).is_some()); @@ -630,9 +657,10 @@ mod tests { let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); let db = make_db(); + let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); { - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap(); let mut parent_hash = genesis_header.hash(); let mut rolling_timestamp = genesis_header.timestamp(); for i in 1..10000 { @@ -652,7 +680,7 @@ mod tests { } } - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap(); assert!(chain.block_header(BlockId::Number(10)).is_none()); assert!(chain.block_header(BlockId::Number(9000)).is_some()); assert!(chain.cht_root(2).is_some()); @@ -665,9 +693,10 @@ mod tests { let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); let db = make_db(); + let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); { - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap(); let mut parent_hash = genesis_header.hash(); let mut rolling_timestamp = genesis_header.timestamp(); @@ -709,7 +738,7 @@ mod tests { } // after restoration, non-canonical eras should still be loaded. - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap(); assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10); assert!(chain.candidates.read().get(&100).is_some()) } diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index d294053e1..5424d5d1a 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -36,6 +36,8 @@ use util::kvdb::{KeyValueDB, CompactionProfile}; use self::header_chain::{AncestryIter, HeaderChain}; +use cache::Cache; + pub use self::service::Service; mod header_chain; @@ -120,13 +122,13 @@ pub struct Client { impl Client { /// Create a new `Client`. - pub fn new(config: Config, db: Arc, chain_col: Option, spec: &Spec, io_channel: IoChannel) -> Result { + pub fn new(config: Config, db: Arc, chain_col: Option, spec: &Spec, io_channel: IoChannel, cache: Arc>) -> Result { let gh = ::rlp::encode(&spec.genesis_header()); Ok(Client { queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, true), engine: spec.engine.clone(), - chain: HeaderChain::new(db.clone(), chain_col, &gh)?, + chain: HeaderChain::new(db.clone(), chain_col, &gh, cache)?, report: RwLock::new(ClientReport::default()), import_lock: Mutex::new(()), db: db, @@ -135,10 +137,10 @@ impl Client { /// Create a new `Client` backed purely in-memory. /// This will ignore all database options in the configuration. - pub fn in_memory(config: Config, spec: &Spec, io_channel: IoChannel) -> Self { + pub fn in_memory(config: Config, spec: &Spec, io_channel: IoChannel, cache: Arc>) -> Self { let db = ::util::kvdb::in_memory(0); - Client::new(config, Arc::new(db), None, spec, io_channel).expect("New DB creation infallible; qed") + Client::new(config, Arc::new(db), None, spec, io_channel, cache).expect("New DB creation infallible; qed") } /// Import a header to the queue for additional verification. diff --git a/ethcore/light/src/client/service.rs b/ethcore/light/src/client/service.rs index 55795d870..3d029b0c1 100644 --- a/ethcore/light/src/client/service.rs +++ b/ethcore/light/src/client/service.rs @@ -27,6 +27,10 @@ use ethcore::spec::Spec; use io::{IoContext, IoError, IoHandler, IoService}; use util::kvdb::{Database, DatabaseConfig}; +use cache::Cache; +use time::Duration; +use util::Mutex; + use super::{Client, Config as ClientConfig}; /// Errors on service initialization. @@ -56,6 +60,8 @@ pub struct Service { impl Service { /// Start the service: initialize I/O workers and client itself. pub fn start(config: ClientConfig, spec: &Spec, path: &Path) -> Result { + let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); + // initialize database. let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS); @@ -78,6 +84,7 @@ impl Service { db::COL_LIGHT_CHAIN, spec, io_service.channel(), + cache, ).map_err(Error::Database)?); io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?; Ok(Service { From 8a7ca6f0baf364a5a1081bad83f76f2856d58ad0 Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Thu, 6 Apr 2017 15:46:59 +0200 Subject: [PATCH 03/18] Add caching to block_header() --- ethcore/light/src/client/header_chain.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 120df5c1f..340488297 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -365,11 +365,20 @@ impl HeaderChain { /// will be returned. pub fn block_header(&self, id: BlockId) -> Option { let load_from_db = |hash: H256| { - match self.db.get(self.col, &hash) { - Ok(val) => val.map(|x| x.to_vec()).map(encoded::Header::new), - Err(e) => { - warn!(target: "chain", "Failed to read from database: {}", e); - None + let cached = { + let mut cache = self.cache.lock(); + cache.block_header(&hash) + }; + + if cached.is_some() { + return cached + } else { + match self.db.get(self.col, &hash) { + Ok(val) => val.map(|x| x.to_vec()).map(encoded::Header::new), + Err(e) => { + warn!(target: "chain", "Failed to read from database: {}", e); + None + } } } }; From 23c76caafaa811c7825c433aeb2d523054db7ec5 Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Thu, 6 Apr 2017 20:50:52 +0200 Subject: [PATCH 04/18] Add header to cache --- ethcore/light/src/client/header_chain.rs | 31 ++++++++++++++---------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 340488297..71c0aa314 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -365,19 +365,24 @@ impl HeaderChain { /// will be returned. pub fn block_header(&self, id: BlockId) -> Option { let load_from_db = |hash: H256| { - let cached = { - let mut cache = self.cache.lock(); - cache.block_header(&hash) - }; + let mut cache = self.cache.lock(); + let header = cache.block_header(&hash); - if cached.is_some() { - return cached - } else { - match self.db.get(self.col, &hash) { - Ok(val) => val.map(|x| x.to_vec()).map(encoded::Header::new), - Err(e) => { - warn!(target: "chain", "Failed to read from database: {}", e); - None + match header { + Some(header) => Some(header), + None => { + match self.db.get(self.col, &hash) { + Ok(dbValue) => { + dbValue.map(|x| x.to_vec()).map(encoded::Header::new) + .and_then(|header| { + cache.insert_block_header(hash.clone(), header.clone()); + Some(header) + }) + }, + Err(e) => { + warn!(target: "chain", "Failed to read from database: {}", e); + None + } } } } @@ -423,7 +428,7 @@ impl HeaderChain { } } - /// Get the nth CHT root, if it has been computed. + /// Get the nth CHT root, if it's been computed. /// /// CHT root 0 is from block `1..2048`. /// CHT root 1 is from block `2049..4096` From 3632a980627a8a807da6aba64ae76bc874b5213c Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Thu, 6 Apr 2017 20:58:57 +0200 Subject: [PATCH 05/18] Clean up --- ethcore/light/src/client/header_chain.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 71c0aa314..ffefa0a92 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -366,9 +366,8 @@ impl HeaderChain { pub fn block_header(&self, id: BlockId) -> Option { let load_from_db = |hash: H256| { let mut cache = self.cache.lock(); - let header = cache.block_header(&hash); - match header { + match cache.block_header(&hash) { Some(header) => Some(header), None => { match self.db.get(self.col, &hash) { From 9552ca7bb32f17b957bd5919ce3e776371b1b5c6 Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Thu, 6 Apr 2017 22:31:03 +0200 Subject: [PATCH 06/18] Change to snakecase --- ethcore/light/src/client/header_chain.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index ffefa0a92..615ffd89b 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -371,8 +371,8 @@ impl HeaderChain { Some(header) => Some(header), None => { match self.db.get(self.col, &hash) { - Ok(dbValue) => { - dbValue.map(|x| x.to_vec()).map(encoded::Header::new) + Ok(db_value) => { + db_value.map(|x| x.to_vec()).map(encoded::Header::new) .and_then(|header| { cache.insert_block_header(hash.clone(), header.clone()); Some(header) From 5e33fe0aa7b9965319bdbbe21b9c1b70e99be01c Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Wed, 5 Apr 2017 13:50:11 +0200 Subject: [PATCH 07/18] Add cache to HeaderChain struct. --- ethcore/light/src/client/header_chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 615ffd89b..a6e5165d7 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -427,7 +427,7 @@ impl HeaderChain { } } - /// Get the nth CHT root, if it's been computed. + /// Get the nth CHT root, if it has been computed. /// /// CHT root 0 is from block `1..2048`. /// CHT root 1 is from block `2049..4096` From fa88ee148a3882402a828e16fcb3939d9b8e5220 Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Thu, 6 Apr 2017 20:50:52 +0200 Subject: [PATCH 08/18] Add header to cache --- ethcore/light/src/client/header_chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index a6e5165d7..615ffd89b 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -427,7 +427,7 @@ impl HeaderChain { } } - /// Get the nth CHT root, if it has been computed. + /// Get the nth CHT root, if it's been computed. /// /// CHT root 0 is from block `1..2048`. /// CHT root 1 is from block `2049..4096` From 2ce5a656e724b03c8597eabbeeeb8de62da20baf Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Fri, 7 Apr 2017 17:37:47 +0200 Subject: [PATCH 09/18] Ensure that OnDemand and HeaderChain share the same cache --- ethcore/light/src/client/service.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/ethcore/light/src/client/service.rs b/ethcore/light/src/client/service.rs index 3d029b0c1..83949a2f1 100644 --- a/ethcore/light/src/client/service.rs +++ b/ethcore/light/src/client/service.rs @@ -28,7 +28,6 @@ use io::{IoContext, IoError, IoHandler, IoService}; use util::kvdb::{Database, DatabaseConfig}; use cache::Cache; -use time::Duration; use util::Mutex; use super::{Client, Config as ClientConfig}; @@ -59,8 +58,7 @@ pub struct Service { impl Service { /// Start the service: initialize I/O workers and client itself. - pub fn start(config: ClientConfig, spec: &Spec, path: &Path) -> Result { - let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); + pub fn start(config: ClientConfig, spec: &Spec, path: &Path, cache: Arc>) -> Result { // initialize database. let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS); @@ -119,11 +117,18 @@ mod tests { use super::Service; use devtools::RandomTempPath; use ethcore::spec::Spec; + + use std::sync::Arc; + use cache::Cache; + use time::Duration; + use util::Mutex; #[test] fn it_works() { let spec = Spec::new_test(); let temp_path = RandomTempPath::new(); - Service::start(Default::default(), &spec, temp_path.as_path()).unwrap(); + let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); + + Service::start(Default::default(), &spec, temp_path.as_path(), cache).unwrap(); } } From d6bc60f9687b5f8ae7436b76423e2a47670e0ba3 Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Fri, 7 Apr 2017 17:38:03 +0200 Subject: [PATCH 10/18] Ensure that OnDemand and HeaderChain share the same cache --- parity/run.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/parity/run.rs b/parity/run.rs index 1ad124dbe..b7f473525 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -192,6 +192,10 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> info!("Starting {}", Colour::White.bold().paint(version())); info!("Running in experimental {} mode.", Colour::Blue.bold().paint("Light Client")); + // TODO: configurable cache size. + let cache = LightDataCache::new(Default::default(), ::time::Duration::minutes(GAS_CORPUS_EXPIRATION_MINUTES)); + let cache = Arc::new(::util::Mutex::new(cache)); + // start client and create transaction queue. let mut config = light_client::Config { queue: Default::default(), @@ -204,7 +208,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; config.queue.verifier_settings = cmd.verifier_settings; - let service = light_client::Service::start(config, &spec, &db_dirs.client_path(algorithm)) + let service = light_client::Service::start(config, &spec, &db_dirs.client_path(algorithm), cache.clone()) .map_err(|e| format!("Error starting light client: {}", e))?; let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default())); let provider = ::light::provider::LightProvider::new(service.client().clone(), txq.clone()); @@ -216,10 +220,6 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> net_conf.boot_nodes = spec.nodes.clone(); } - // TODO: configurable cache size. - let cache = LightDataCache::new(Default::default(), ::time::Duration::minutes(GAS_CORPUS_EXPIRATION_MINUTES)); - let cache = Arc::new(::util::Mutex::new(cache)); - // start on_demand service. let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone())); From 645b8e4b0b9c300b53c2b17c3248058531fe190e Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Fri, 7 Apr 2017 20:56:55 +0200 Subject: [PATCH 11/18] Fix failing tests --- ethcore/light/src/client/header_chain.rs | 12 ------------ sync/src/light_sync/tests/test_net.rs | 3 ++- 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 615ffd89b..fe3c86aad 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -517,18 +517,6 @@ mod tests { use time::Duration; use util::Mutex; - #[test] - fn basic_chain_with_cache() { - let spec = Spec::new_test(); - let genesis_header = spec.genesis_header(); - let db = make_db(); - - let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); - - let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache); - - } - fn make_db() -> Arc<::util::KeyValueDB> { Arc::new(::util::kvdb::in_memory(0)) } diff --git a/sync/src/light_sync/tests/test_net.rs b/sync/src/light_sync/tests/test_net.rs index 2319e8d35..f84a37018 100644 --- a/sync/src/light_sync/tests/test_net.rs +++ b/sync/src/light_sync/tests/test_net.rs @@ -207,7 +207,8 @@ impl TestNet { pub fn light(n_light: usize, n_full: usize) -> Self { let mut peers = Vec::with_capacity(n_light + n_full); for _ in 0..n_light { - let client = LightClient::in_memory(Default::default(), &Spec::new_test(), IoChannel::disconnected()); + let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); + let client = LightClient::in_memory(Default::default(), &Spec::new_test(), IoChannel::disconnected(), cache); peers.push(Arc::new(Peer::new_light(Arc::new(client)))) } From 144d6c2379567320dc57621490d771ddf6567b85 Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Fri, 7 Apr 2017 21:34:07 +0200 Subject: [PATCH 12/18] Update light_sync test to include cache dependency --- sync/src/light_sync/tests/test_net.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sync/src/light_sync/tests/test_net.rs b/sync/src/light_sync/tests/test_net.rs index f84a37018..1da4d1659 100644 --- a/sync/src/light_sync/tests/test_net.rs +++ b/sync/src/light_sync/tests/test_net.rs @@ -32,6 +32,9 @@ use light::provider::LightProvider; use network::{NodeId, PeerId}; use util::RwLock; +use time::Duration; +use light::cache::Cache; + const NETWORK_ID: u64 = 0xcafebabe; struct TestIoContext<'a> { From d3b2bcdd79463bd9f4793ef14645d27347ecdb5c Mon Sep 17 00:00:00 2001 From: keorn Date: Mon, 10 Apr 2017 19:03:18 +0100 Subject: [PATCH 13/18] Tendermint fixes (#5415) * more resilience * refactor commit * fix proposal broadcast * list encoding * address grumbles * to into --- ethcore/src/engines/tendermint/message.rs | 40 ++--- ethcore/src/engines/tendermint/mod.rs | 189 +++++++++++----------- ethcore/src/engines/vote_collector.rs | 40 ++--- sync/src/chain.rs | 2 +- sync/src/tests/consensus.rs | 4 +- sync/src/tests/helpers.rs | 43 ++--- 6 files changed, 141 insertions(+), 177 deletions(-) diff --git a/ethcore/src/engines/tendermint/message.rs b/ethcore/src/engines/tendermint/message.rs index 0649ea050..304aa2671 100644 --- a/ethcore/src/engines/tendermint/message.rs +++ b/ethcore/src/engines/tendermint/message.rs @@ -61,6 +61,11 @@ pub fn consensus_view(header: &Header) -> Result { UntrustedRlp::new(view_rlp.as_slice()).as_val() } +/// Proposal signature. +pub fn proposal_signature(header: &Header) -> Result { + UntrustedRlp::new(header.seal().get(1).expect("seal passed basic verification; seal has 3 fields; qed").as_slice()).as_val() +} + impl Message for ConsensusMessage { type Round = VoteStep; @@ -84,34 +89,18 @@ impl ConsensusMessage { pub fn new_proposal(header: &Header) -> Result { Ok(ConsensusMessage { + signature: proposal_signature(header)?, vote_step: VoteStep::new(header.number() as Height, consensus_view(header)?, Step::Propose), - signature: UntrustedRlp::new(header.seal().get(1).expect("seal passed basic verification; seal has 3 fields; qed").as_slice()).as_val()?, block_hash: Some(header.bare_hash()), }) } - pub fn new_commit(proposal: &ConsensusMessage, signature: H520) -> Self { - let mut vote_step = proposal.vote_step.clone(); - vote_step.step = Step::Precommit; - ConsensusMessage { - vote_step: vote_step, - block_hash: proposal.block_hash, - signature: signature, - } - } - pub fn verify(&self) -> Result { let full_rlp = ::rlp::encode(self); let block_info = Rlp::new(&full_rlp).at(1); let public_key = recover(&self.signature.into(), &block_info.as_raw().sha3())?; Ok(public_to_address(&public_key)) } - - pub fn precommit_hash(&self) -> H256 { - let mut vote_step = self.vote_step.clone(); - vote_step.step = Step::Precommit; - message_info_rlp(&vote_step, self.block_hash).sha3() - } } impl Default for VoteStep { @@ -203,6 +192,10 @@ pub fn message_full_rlp(signature: &H520, vote_info: &Bytes) -> Bytes { s.out() } +pub fn message_hash(vote_step: VoteStep, block_hash: H256) -> H256 { + message_info_rlp(&vote_step, Some(block_hash)).sha3() +} + #[cfg(test)] mod tests { use util::*; @@ -294,19 +287,6 @@ mod tests { ); } - #[test] - fn message_info_from_header() { - let header = Header::default(); - let pro = ConsensusMessage { - signature: Default::default(), - vote_step: VoteStep::new(0, 0, Step::Propose), - block_hash: Some(header.bare_hash()) - }; - let pre = message_info_rlp(&VoteStep::new(0, 0, Step::Precommit), Some(header.bare_hash())); - - assert_eq!(pro.precommit_hash(), pre.sha3()); - } - #[test] fn step_ordering() { assert!(VoteStep::new(10, 123, Step::Precommit) < VoteStep::new(11, 123, Step::Precommit)); diff --git a/ethcore/src/engines/tendermint/mod.rs b/ethcore/src/engines/tendermint/mod.rs index 464e102de..8c8094117 100644 --- a/ethcore/src/engines/tendermint/mod.rs +++ b/ethcore/src/engines/tendermint/mod.rs @@ -97,6 +97,8 @@ pub struct Tendermint { proposal: RwLock>, /// Hash of the proposal parent block. proposal_parent: RwLock, + /// Last block proposed by this validator. + last_proposed: RwLock, /// Set used to determine the current validators. validators: Box, } @@ -122,6 +124,7 @@ impl Tendermint { last_lock: AtomicUsize::new(0), proposal: RwLock::new(None), proposal_parent: Default::default(), + last_proposed: Default::default(), validators: new_validator_set(our_params.validators), }); let handler = TransitionHandler::new(Arc::downgrade(&engine) as Weak, Box::new(our_params.timeouts)); @@ -196,6 +199,7 @@ impl Tendermint { self.height.store(new_height, AtomicOrdering::SeqCst); self.view.store(0, AtomicOrdering::SeqCst); *self.lock_change.write() = None; + *self.proposal.write() = None; } /// Use via step_service to transition steps. @@ -206,7 +210,6 @@ impl Tendermint { *self.step.write() = step; match step { Step::Propose => { - *self.proposal.write() = None; self.update_sealing() }, Step::Prevote => { @@ -230,28 +233,6 @@ impl Tendermint { }, Step::Commit => { trace!(target: "engine", "to_step: Commit."); - // Commit the block using a complete signature set. - let view = self.view.load(AtomicOrdering::SeqCst); - let height = self.height.load(AtomicOrdering::SeqCst); - if let Some(block_hash) = *self.proposal.read() { - // Generate seal and remove old votes. - if self.is_signer_proposer(&*self.proposal_parent.read()) { - let proposal_step = VoteStep::new(height, view, Step::Propose); - let precommit_step = VoteStep::new(proposal_step.height, proposal_step.view, Step::Precommit); - if let Some(seal) = self.votes.seal_signatures(proposal_step, precommit_step, &block_hash) { - trace!(target: "engine", "Collected seal: {:?}", seal); - let seal = vec![ - ::rlp::encode(&view).to_vec(), - ::rlp::encode(&seal.proposal).to_vec(), - ::rlp::encode_list(&seal.votes).to_vec() - ]; - self.submit_seal(block_hash, seal); - self.to_next_height(height); - } else { - warn!(target: "engine", "Not enough votes found!"); - } - } - } }, } } @@ -260,8 +241,17 @@ impl Tendermint { self.validators.contains(&*self.proposal_parent.read(), address) } - fn is_above_threshold(&self, n: usize) -> bool { - n > self.validators.count(&*self.proposal_parent.read()) * 2/3 + fn check_above_threshold(&self, n: usize) -> Result<(), EngineError> { + let threshold = self.validators.count(&*self.proposal_parent.read()) * 2/3; + if n > threshold { + Ok(()) + } else { + Err(EngineError::BadSealFieldSize(OutOfBounds { + min: Some(threshold), + max: None, + found: n + })) + } } /// Find the designated for the given view. @@ -272,7 +262,7 @@ impl Tendermint { } /// Check if address is a proposer for given view. - fn is_view_proposer(&self, bh: &H256, height: Height, view: View, address: &Address) -> Result<(), EngineError> { + fn check_view_proposer(&self, bh: &H256, height: Height, view: View, address: &Address) -> Result<(), EngineError> { let proposer = self.view_proposer(bh, height, view); if proposer == *address { Ok(()) @@ -308,13 +298,13 @@ impl Tendermint { fn has_enough_any_votes(&self) -> bool { let step_votes = self.votes.count_round_votes(&VoteStep::new(self.height.load(AtomicOrdering::SeqCst), self.view.load(AtomicOrdering::SeqCst), *self.step.read())); - self.is_above_threshold(step_votes) + self.check_above_threshold(step_votes).is_ok() } fn has_enough_future_step_votes(&self, vote_step: &VoteStep) -> bool { if vote_step.view > self.view.load(AtomicOrdering::SeqCst) { let step_votes = self.votes.count_round_votes(vote_step); - self.is_above_threshold(step_votes) + self.check_above_threshold(step_votes).is_ok() } else { false } @@ -322,7 +312,7 @@ impl Tendermint { fn has_enough_aligned_votes(&self, message: &ConsensusMessage) -> bool { let aligned_count = self.votes.count_aligned_votes(&message); - self.is_above_threshold(aligned_count) + self.check_above_threshold(aligned_count).is_ok() } fn handle_valid_message(&self, message: &ConsensusMessage) { @@ -337,18 +327,32 @@ impl Tendermint { && self.has_enough_aligned_votes(message); if lock_change { trace!(target: "engine", "handle_valid_message: Lock change."); - *self.lock_change.write() = Some(message.clone()); + *self.lock_change.write() = Some(message.clone()); } // Check if it can affect the step transition. if self.is_height(message) { let next_step = match *self.step.read() { + Step::Precommit if message.block_hash.is_none() && self.has_enough_aligned_votes(message) => { + self.increment_view(1); + Some(Step::Propose) + }, Step::Precommit if self.has_enough_aligned_votes(message) => { - if message.block_hash.is_none() { - self.increment_view(1); - Some(Step::Propose) - } else { - Some(Step::Commit) + let bh = message.block_hash.expect("previous guard ensures is_some; qed"); + if *self.last_proposed.read() == bh { + // Commit the block using a complete signature set. + // Generate seal and remove old votes. + let precommits = self.votes.round_signatures(vote_step, &bh); + trace!(target: "engine", "Collected seal: {:?}", precommits); + let seal = vec![ + ::rlp::encode(&vote_step.view).to_vec(), + ::rlp::NULL_RLP.to_vec(), + ::rlp::encode_list(&precommits).to_vec() + ]; + self.submit_seal(bh, seal); + self.votes.throw_out_old(&vote_step); } + self.to_next_height(self.height.load(AtomicOrdering::SeqCst)); + Some(Step::Commit) }, Step::Precommit if self.has_enough_future_step_votes(&vote_step) => { self.increment_view(vote_step.view - self.view.load(AtomicOrdering::SeqCst)); @@ -442,6 +446,8 @@ impl Engine for Tendermint { // Insert Propose vote. debug!(target: "engine", "Submitting proposal {} at height {} view {}.", header.bare_hash(), height, view); self.votes.vote(ConsensusMessage::new(signature, height, view, Step::Propose, bh), author); + // Remember the owned block. + *self.last_proposed.write() = header.bare_hash(); // Remember proposal for later seal submission. *self.proposal.write() = bh; *self.proposal_parent.write() = header.parent_hash().clone(); @@ -462,12 +468,12 @@ impl Engine for Tendermint { if !self.votes.is_old_or_known(&message) { let sender = public_to_address(&recover(&message.signature.into(), &rlp.at(1)?.as_raw().sha3())?); if !self.is_authority(&sender) { - Err(EngineError::NotAuthorized(sender))?; + return Err(EngineError::NotAuthorized(sender).into()); } self.broadcast_message(rlp.as_raw().to_vec()); if self.votes.vote(message.clone(), &sender).is_some() { self.validators.report_malicious(&sender); - Err(EngineError::DoubleVote(sender))? + return Err(EngineError::DoubleVote(sender).into()); } trace!(target: "engine", "Handling a valid {:?} from {}.", message, sender); self.handle_valid_message(&message); @@ -491,22 +497,19 @@ impl Engine for Tendermint { fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { let seal_length = header.seal().len(); if seal_length == self.seal_fields() { - let signatures_len = header.seal()[2].len(); - if signatures_len >= 1 { + // Either proposal or commit. + if (header.seal()[1] == ::rlp::NULL_RLP.to_vec()) + != (header.seal()[2] == ::rlp::EMPTY_LIST_RLP.to_vec()) { Ok(()) } else { - Err(From::from(EngineError::BadSealFieldSize(OutOfBounds { - min: Some(1), - max: None, - found: signatures_len - }))) + warn!(target: "engine", "verify_block_basic: Block is neither a Commit nor Proposal."); + Err(BlockError::InvalidSeal.into()) } } else { - Err(From::from(BlockError::InvalidSealArity( + Err(BlockError::InvalidSealArity( Mismatch { expected: self.seal_fields(), found: seal_length } - ))) + ).into()) } - } fn verify_block_unordered(&self, _header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { @@ -515,50 +518,42 @@ impl Engine for Tendermint { /// Verify validators and gas limit. fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> { - let proposal = ConsensusMessage::new_proposal(header)?; - let proposer = proposal.verify()?; - if !self.is_authority(&proposer) { - Err(EngineError::NotAuthorized(proposer))? - } - - let precommit_hash = proposal.precommit_hash(); - let ref signatures_field = header.seal()[2]; - let mut signature_count = 0; - let mut origins = HashSet::new(); - for rlp in UntrustedRlp::new(signatures_field).iter() { - let precommit: ConsensusMessage = ConsensusMessage::new_commit(&proposal, rlp.as_val()?); - let address = match self.votes.get(&precommit) { - Some(a) => a, - None => public_to_address(&recover(&precommit.signature.into(), &precommit_hash)?), - }; - if !self.validators.contains(header.parent_hash(), &address) { - Err(EngineError::NotAuthorized(address.to_owned()))? - } - - if origins.insert(address) { - signature_count += 1; - } else { - warn!(target: "engine", "verify_block_unordered: Duplicate signature from {} on the seal.", address); - Err(BlockError::InvalidSeal)?; - } - } - - // Check if its a proposal if there is not enough precommits. - if !self.is_above_threshold(signature_count) { - let signatures_len = signatures_field.len(); - // Proposal has to have an empty signature list. - if signatures_len != 1 { - Err(EngineError::BadSealFieldSize(OutOfBounds { - min: Some(1), - max: Some(1), - found: signatures_len - }))?; - } - self.is_view_proposer(header.parent_hash(), proposal.vote_step.height, proposal.vote_step.view, &proposer)?; - } - if header.number() == 0 { - Err(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() }))?; + return Err(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() }).into()); + } + + if let Ok(proposal) = ConsensusMessage::new_proposal(header) { + let proposer = proposal.verify()?; + if !self.is_authority(&proposer) { + return Err(EngineError::NotAuthorized(proposer).into()); + } + self.check_view_proposer(header.parent_hash(), proposal.vote_step.height, proposal.vote_step.view, &proposer)?; + } else { + let vote_step = VoteStep::new(header.number() as usize, consensus_view(header)?, Step::Precommit); + let precommit_hash = message_hash(vote_step.clone(), header.bare_hash()); + let ref signatures_field = header.seal().get(2).expect("block went through verify_block_basic; block has .seal_fields() fields; qed"); + let mut origins = HashSet::new(); + for rlp in UntrustedRlp::new(signatures_field).iter() { + let precommit = ConsensusMessage { + signature: rlp.as_val()?, + block_hash: Some(header.bare_hash()), + vote_step: vote_step.clone(), + }; + let address = match self.votes.get(&precommit) { + Some(a) => a, + None => public_to_address(&recover(&precommit.signature.into(), &precommit_hash)?), + }; + if !self.validators.contains(header.parent_hash(), &address) { + return Err(EngineError::NotAuthorized(address.to_owned()).into()); + } + + if !origins.insert(address) { + warn!(target: "engine", "verify_block_unordered: Duplicate signature from {} on the seal.", address); + return Err(BlockError::InvalidSeal.into()); + } + } + + self.check_above_threshold(origins.len())? } let gas_limit_divisor = self.gas_limit_bound_divisor; @@ -566,7 +561,7 @@ impl Engine for Tendermint { let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor; if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas { self.validators.report_malicious(header.author()); - Err(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() }))?; + return Err(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() }).into()); } Ok(()) @@ -590,13 +585,14 @@ impl Engine for Tendermint { fn is_proposal(&self, header: &Header) -> bool { let signatures_len = header.seal()[2].len(); // Signatures have to be an empty list rlp. - let proposal = ConsensusMessage::new_proposal(header).expect("block went through full verification; this Engine verifies new_proposal creation; qed"); if signatures_len != 1 { // New Commit received, skip to next height. - trace!(target: "engine", "Received a commit: {:?}.", proposal.vote_step); - self.to_next_height(proposal.vote_step.height); + trace!(target: "engine", "Received a commit: {:?}.", header.number()); + self.to_next_height(header.number() as usize); + self.to_step(Step::Commit); return false; } + let proposal = ConsensusMessage::new_proposal(header).expect("block went through full verification; this Engine verifies new_proposal creation; qed"); let proposer = proposal.verify().expect("block went through full verification; this Engine tries verify; qed"); debug!(target: "engine", "Received a new proposal {:?} from {}.", proposal.vote_step, proposer); if self.is_view(&proposal) { @@ -647,6 +643,10 @@ impl Engine for Tendermint { } fn register_client(&self, client: Weak) { + use client::BlockChainClient; + if let Some(c) = client.upgrade() { + self.height.store(c.chain_info().best_block_number as usize + 1, AtomicOrdering::SeqCst); + } *self.client.write() = Some(client.clone()); self.validators.register_contract(client); } @@ -825,6 +825,7 @@ mod tests { let vote_info = message_info_rlp(&VoteStep::new(2, 0, Step::Precommit), Some(header.bare_hash())); let signature1 = tap.sign(proposer, None, vote_info.sha3()).unwrap(); + seal[1] = ::rlp::NULL_RLP.to_vec(); seal[2] = ::rlp::encode_list(&vec![H520::from(signature1.clone())]).to_vec(); header.set_seal(seal.clone()); diff --git a/ethcore/src/engines/vote_collector.rs b/ethcore/src/engines/vote_collector.rs index a2969db3a..482dd1d4b 100644 --- a/ethcore/src/engines/vote_collector.rs +++ b/ethcore/src/engines/vote_collector.rs @@ -136,30 +136,14 @@ impl VoteCollector { *guard = new_collector; } - /// Collects the signatures used to seal a block. - pub fn seal_signatures(&self, proposal_round: M::Round, commit_round: M::Round, block_hash: &H256) -> Option { - let ref bh = Some(*block_hash); - let maybe_seal = { - let guard = self.votes.read(); - guard - .get(&proposal_round) - .and_then(|c| c.block_votes.get(bh)) - .and_then(|proposals| proposals.keys().next()) - .map(|proposal| SealSignatures { - proposal: proposal.clone(), - votes: guard - .get(&commit_round) - .and_then(|c| c.block_votes.get(bh)) - .map(|precommits| precommits.keys().cloned().collect()) - .unwrap_or_else(Vec::new), - }) - .and_then(|seal| if seal.votes.is_empty() { None } else { Some(seal) }) - }; - if maybe_seal.is_some() { - // Remove messages that are no longer relevant. - self.throw_out_old(&commit_round); - } - maybe_seal + /// Collects the signatures for a given round and hash. + pub fn round_signatures(&self, round: &M::Round, block_hash: &H256) -> Vec { + let guard = self.votes.read(); + guard + .get(round) + .and_then(|c| c.block_votes.get(&Some(*block_hash))) + .map(|votes| votes.keys().cloned().collect()) + .unwrap_or_else(Vec::new) } /// Count votes which agree with the given message. @@ -275,11 +259,9 @@ mod tests { random_vote(&collector, signatures[1].clone(), commit_round.clone(), bh.clone()); // Wrong round, same signature. random_vote(&collector, signatures[1].clone(), 7, bh.clone()); - let seal = SealSignatures { - proposal: signatures[0], - votes: signatures[1..3].to_vec() - }; - assert_eq!(seal, collector.seal_signatures(propose_round, commit_round, &bh.unwrap()).unwrap()); + + assert_eq!(signatures[0..1].to_vec(), collector.round_signatures(&propose_round, &bh.unwrap())); + assert_eq!(signatures[1..3].iter().collect::>(), collector.round_signatures(&commit_round, &bh.unwrap()).iter().collect::>()); } #[test] diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 3c1717620..be2aeada7 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -2132,7 +2132,7 @@ impl ChainSync { let queue_info = io.chain().queue_info(); let is_syncing = self.status().is_syncing(queue_info); - if !is_syncing || !sealed.is_empty() { + if !is_syncing || !sealed.is_empty() || !proposed.is_empty() { trace!(target: "sync", "Propagating blocks, state={:?}", self.state); self.propagate_latest_blocks(io, sealed); self.propagate_proposed_blocks(io, proposed); diff --git a/sync/src/tests/consensus.rs b/sync/src/tests/consensus.rs index 2c45bbd28..00a7452c4 100644 --- a/sync/src/tests/consensus.rs +++ b/sync/src/tests/consensus.rs @@ -196,8 +196,8 @@ fn tendermint() { // Propose net.peer(0).chain.engine().step(); net.peer(1).chain.engine().step(); -net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 2.into())).unwrap(); - net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 2.into())).unwrap(); + net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 2.into())).unwrap(); + net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 2.into())).unwrap(); // Send different prevotes net.sync(); // Prevote timeout diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index 328c0a24f..9d32d1951 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -277,31 +277,32 @@ impl TestNet> { started: false, disconnect_events: Vec::new(), }; - for _ in 0..n { - let spec = spec_factory(); - let client = EthcoreClient::new( - ClientConfig::default(), - &spec, - Arc::new(::util::kvdb::in_memory(::ethcore::db::NUM_COLUMNS.unwrap_or(0))), - Arc::new(Miner::with_spec_and_accounts(&spec, accounts.clone())), - IoChannel::disconnected(), - ).unwrap(); - - let ss = Arc::new(TestSnapshotService::new()); - let sync = ChainSync::new(config.clone(), &*client); - let peer = Arc::new(EthPeer { - sync: RwLock::new(sync), - snapshot_service: ss, - chain: client, - queue: RwLock::new(VecDeque::new()), - }); - peer.chain.add_notify(peer.clone()); - net.peers.push(peer); + net.add_peer(config.clone(), spec_factory(), accounts.clone()); } - net } + + pub fn add_peer(&mut self, config: SyncConfig, spec: Spec, accounts: Option>) { + let client = EthcoreClient::new( + ClientConfig::default(), + &spec, + Arc::new(::util::kvdb::in_memory(::ethcore::db::NUM_COLUMNS.unwrap_or(0))), + Arc::new(Miner::with_spec_and_accounts(&spec, accounts)), + IoChannel::disconnected(), + ).unwrap(); + + let ss = Arc::new(TestSnapshotService::new()); + let sync = ChainSync::new(config, &*client); + let peer = Arc::new(EthPeer { + sync: RwLock::new(sync), + snapshot_service: ss, + chain: client, + queue: RwLock::new(VecDeque::new()), + }); + peer.chain.add_notify(peer.clone()); + self.peers.push(peer); + } } impl

TestNet

where P: Peer { From 4f8e61dce99c8ba8c850764a617724d24e7932df Mon Sep 17 00:00:00 2001 From: Marek Kotewicz Date: Tue, 11 Apr 2017 16:24:56 +0800 Subject: [PATCH 14/18] easy to use conversion from and to string for ethstore::Crypto (#5437) * easy to use conversion from and to string for ethstore::Crypto * ethstore uses tempdir instead of devtools * ethstore does not depend on ethcore-util --- Cargo.lock | 4 ++-- ethcrypto/src/lib.rs | 4 ++-- ethstore/Cargo.toml | 4 ++-- ethstore/src/account/crypto.rs | 27 +++++++++++++++++++------ ethstore/src/dir/disk.rs | 18 +++++++++-------- ethstore/src/dir/vault.rs | 36 ++++++++++++++++++---------------- ethstore/src/ethstore.rs | 11 ++++++----- ethstore/src/json/crypto.rs | 17 +++++++++++++++- ethstore/src/lib.rs | 6 +++--- ethstore/src/secret_store.rs | 2 +- 10 files changed, 82 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e9edecc88..830e1e75a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -784,8 +784,7 @@ name = "ethstore" version = "0.1.0" dependencies = [ "docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore-devtools 1.7.0", - "ethcore-util 1.7.0", + "ethcore-bigint 0.1.2", "ethcrypto 0.1.0", "ethkey 0.2.0", "itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", @@ -800,6 +799,7 @@ dependencies = [ "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/ethcrypto/src/lib.rs b/ethcrypto/src/lib.rs index 9c1352087..ea0ea9754 100644 --- a/ethcrypto/src/lib.rs +++ b/ethcrypto/src/lib.rs @@ -94,11 +94,11 @@ pub trait Keccak256 { fn keccak256(&self) -> T where T: Sized; } -impl Keccak256<[u8; 32]> for [u8] { +impl Keccak256<[u8; 32]> for T where T: AsRef<[u8]> { fn keccak256(&self) -> [u8; 32] { let mut keccak = Keccak::new_keccak256(); let mut result = [0u8; 32]; - keccak.update(self); + keccak.update(self.as_ref()); keccak.finalize(&mut result); result } diff --git a/ethstore/Cargo.toml b/ethstore/Cargo.toml index a6a0d1752..cd2a11db1 100755 --- a/ethstore/Cargo.toml +++ b/ethstore/Cargo.toml @@ -19,10 +19,10 @@ time = "0.1.34" itertools = "0.5" parking_lot = "0.4" ethcrypto = { path = "../ethcrypto" } -ethcore-util = { path = "../util" } +ethcore-bigint = { path = "../util/bigint" } smallvec = "0.3.1" -ethcore-devtools = { path = "../devtools" } parity-wordlist = "1.0" +tempdir = "0.3" [features] cli = ["docopt"] diff --git a/ethstore/src/account/crypto.rs b/ethstore/src/account/crypto.rs index adcae997d..8c4825a22 100755 --- a/ethstore/src/account/crypto.rs +++ b/ethstore/src/account/crypto.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use std::iter::repeat; +use std::str; use ethkey::Secret; use {json, Error, crypto}; use crypto::Keccak256; @@ -46,17 +47,31 @@ impl From for Crypto { } } -impl Into for Crypto { - fn into(self) -> json::Crypto { +impl From for json::Crypto { + fn from(c: Crypto) -> Self { json::Crypto { - cipher: self.cipher.into(), - ciphertext: self.ciphertext.into(), - kdf: self.kdf.into(), - mac: self.mac.into(), + cipher: c.cipher.into(), + ciphertext: c.ciphertext.into(), + kdf: c.kdf.into(), + mac: c.mac.into(), } } } +impl str::FromStr for Crypto { + type Err = ::Err; + + fn from_str(s: &str) -> Result { + s.parse::().map(Into::into) + } +} + +impl From for String { + fn from(c: Crypto) -> Self { + json::Crypto::from(c).into() + } +} + impl Crypto { pub fn with_secret(secret: &Secret, password: &str, iterations: u32) -> Self { Crypto::with_plain(&*secret, password, iterations) diff --git a/ethstore/src/dir/disk.rs b/ethstore/src/dir/disk.rs index afca0955d..3f56ebe40 100755 --- a/ethstore/src/dir/disk.rs +++ b/ethstore/src/dir/disk.rs @@ -225,8 +225,8 @@ impl KeyDirectory for DiskDirectory where T: KeyFileManager { Some(self) } - fn unique_repr(&self) -> Result { - self.files_hash() + fn unique_repr(&self) -> Result { + self.files_hash() } } @@ -280,12 +280,14 @@ impl KeyFileManager for DiskKeyFileManager { #[cfg(test)] mod test { + extern crate tempdir; + use std::{env, fs}; use super::RootDiskDirectory; use dir::{KeyDirectory, VaultKey}; use account::SafeAccount; use ethkey::{Random, Generator}; - use devtools::RandomTempPath; + use self::tempdir::TempDir; #[test] fn should_create_new_account() { @@ -344,7 +346,7 @@ mod test { #[test] fn should_list_vaults() { // given - let temp_path = RandomTempPath::new(); + let temp_path = TempDir::new("").unwrap(); let directory = RootDiskDirectory::create(&temp_path).unwrap(); let vault_provider = directory.as_vault_provider().unwrap(); vault_provider.create("vault1", VaultKey::new("password1", 1)).unwrap(); @@ -359,11 +361,11 @@ mod test { #[test] fn hash_of_files() { - let temp_path = RandomTempPath::new(); + let temp_path = TempDir::new("").unwrap(); let directory = RootDiskDirectory::create(&temp_path).unwrap(); - let hash = directory.files_hash().expect("Files hash should be calculated ok"); - assert_eq!( + let hash = directory.files_hash().expect("Files hash should be calculated ok"); + assert_eq!( hash, 15130871412783076140 ); @@ -373,7 +375,7 @@ mod test { let account = SafeAccount::create(&keypair, [0u8; 16], password, 1024, "Test".to_owned(), "{}".to_owned()); directory.insert(account).expect("Account should be inserted ok"); - let new_hash = directory.files_hash().expect("New files hash should be calculated ok"); + let new_hash = directory.files_hash().expect("New files hash should be calculated ok"); assert!(new_hash != hash, "hash of the file list should change once directory content changed"); } diff --git a/ethstore/src/dir/vault.rs b/ethstore/src/dir/vault.rs index f321e3fbb..31c99fcc4 100755 --- a/ethstore/src/dir/vault.rs +++ b/ethstore/src/dir/vault.rs @@ -18,7 +18,7 @@ use std::{fs, io}; use std::path::{PathBuf, Path}; use parking_lot::Mutex; use {json, SafeAccount, Error}; -use util::sha3::Hashable; +use crypto::Keccak256; use super::super::account::Crypto; use super::{KeyDirectory, VaultKeyDirectory, VaultKey, SetKeyError}; use super::disk::{DiskDirectory, KeyFileManager}; @@ -234,7 +234,7 @@ fn check_vault_name(name: &str) -> bool { /// Vault can be empty, but still must be pluggable => we store vault password in separate file fn create_vault_file

(vault_dir_path: P, key: &VaultKey, meta: &str) -> Result<(), Error> where P: AsRef { - let password_hash = key.password.sha3(); + let password_hash = key.password.keccak256(); let crypto = Crypto::with_plain(&password_hash, &key.password, key.iterations); let mut vault_file_path: PathBuf = vault_dir_path.as_ref().into(); @@ -268,8 +268,8 @@ fn read_vault_file

(vault_dir_path: P, key: Option<&VaultKey>) -> Result(vault_dir_path: P, key: Option<&VaultKey>) -> Result KeyPair { Random.generate().unwrap() @@ -642,13 +643,13 @@ mod tests { struct RootDiskDirectoryGuard { pub key_dir: Option>, - _path: RandomTempPath, + _path: TempDir, } impl RootDiskDirectoryGuard { pub fn new() -> Self { - let temp_path = RandomTempPath::new(); - let disk_dir = Box::new(RootDiskDirectory::create(temp_path.as_path()).unwrap()); + let temp_path = TempDir::new("").unwrap(); + let disk_dir = Box::new(RootDiskDirectory::create(temp_path.path()).unwrap()); RootDiskDirectoryGuard { key_dir: Some(disk_dir), diff --git a/ethstore/src/json/crypto.rs b/ethstore/src/json/crypto.rs index 63d73845f..ee1f08502 100644 --- a/ethstore/src/json/crypto.rs +++ b/ethstore/src/json/crypto.rs @@ -14,10 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::fmt; +use std::{fmt, str}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde::ser::SerializeStruct; use serde::de::{Visitor, MapVisitor, Error}; +use serde_json; use super::{Cipher, CipherSer, CipherSerParams, Kdf, KdfSer, KdfSerParams, H256, Bytes}; pub type CipherText = Bytes; @@ -30,6 +31,20 @@ pub struct Crypto { pub mac: H256, } +impl str::FromStr for Crypto { + type Err = serde_json::error::Error; + + fn from_str(s: &str) -> Result { + serde_json::from_str(s) + } +} + +impl From for String { + fn from(c: Crypto) -> Self { + serde_json::to_string(&c).expect("serialization cannot fail, cause all crypto keys are strings") + } +} + enum CryptoField { Cipher, CipherParams, diff --git a/ethstore/src/lib.rs b/ethstore/src/lib.rs index 3661cf8f2..145a58023 100755 --- a/ethstore/src/lib.rs +++ b/ethstore/src/lib.rs @@ -29,9 +29,9 @@ extern crate serde_json; extern crate smallvec; extern crate time; extern crate tiny_keccak; +extern crate tempdir; -extern crate ethcore_devtools as devtools; -extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate ethcrypto as crypto; extern crate ethkey as _ethkey; extern crate parity_wordlist; @@ -54,7 +54,7 @@ mod presale; mod random; mod secret_store; -pub use self::account::SafeAccount; +pub use self::account::{SafeAccount, Crypto}; pub use self::error::Error; pub use self::ethstore::{EthStore, EthMultiStore}; pub use self::import::{import_accounts, read_geth_accounts}; diff --git a/ethstore/src/secret_store.rs b/ethstore/src/secret_store.rs index fd7eea50d..b292f0ef4 100755 --- a/ethstore/src/secret_store.rs +++ b/ethstore/src/secret_store.rs @@ -19,7 +19,7 @@ use std::path::PathBuf; use ethkey::{Address, Message, Signature, Secret, Public}; use Error; use json::{Uuid, OpaqueKeyFile}; -use util::H256; +use bigint::hash::H256; /// Key directory reference #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] From bb8adcce9255770a3523b907ffbe8682b9232f31 Mon Sep 17 00:00:00 2001 From: Jaco Greeff Date: Tue, 11 Apr 2017 16:43:15 +0200 Subject: [PATCH 15/18] Work around mismatch for QR checksum (#5374) * Work around current native-signer encoding * Avoid trying to use non-existant util function. --- js/src/i18n/zh/writeContract.js | 2 +- js/src/modals/CreateAccount/store.js | 6 +++++- js/src/views/WriteContract/writeContract.js | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/js/src/i18n/zh/writeContract.js b/js/src/i18n/zh/writeContract.js index fc1100b77..62f83dd4d 100644 --- a/js/src/i18n/zh/writeContract.js +++ b/js/src/i18n/zh/writeContract.js @@ -37,7 +37,7 @@ export default { params: `An error occurred with the following description` }, input: { - abi: `ABI Interface`, + abi: `ABI Definition`, code: `Bytecode`, metadata: `Metadata`, swarm: `Swarm Metadata Hash` diff --git a/js/src/modals/CreateAccount/store.js b/js/src/modals/CreateAccount/store.js index 9bc60d9af..9f78360fa 100644 --- a/js/src/modals/CreateAccount/store.js +++ b/js/src/modals/CreateAccount/store.js @@ -96,6 +96,7 @@ export default class Store { } @computed get qrAddressValid () { + console.log('qrValid', this.qrAddress, this._api.util.isAddressValid(this.qrAddress)); return this._api.util.isAddressValid(this.qrAddress); } @@ -155,7 +156,10 @@ export default class Store { qrAddress = `0x${qrAddress}`; } - this.qrAddress = qrAddress; + // FIXME: Current native signer encoding is not 100% for EIP-55, lowercase for now + this.qrAddress = this._api.util + ? this._api.util.toChecksumAddress(qrAddress.toLowerCase()) + : qrAddress; } @action setVaultName = (vaultName) => { diff --git a/js/src/views/WriteContract/writeContract.js b/js/src/views/WriteContract/writeContract.js index 86262820d..170c0d7c1 100644 --- a/js/src/views/WriteContract/writeContract.js +++ b/js/src/views/WriteContract/writeContract.js @@ -608,7 +608,7 @@ class WriteContract extends Component { label={ } readOnly From e84d03f31d80a5b6b674371a852c56b6a1343c1c Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Tue, 11 Apr 2017 15:08:42 +0000 Subject: [PATCH 16/18] [ci skip] js-precompiled 20170411-150514 --- Cargo.lock | 2 +- js/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 830e1e75a..8f710cc18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1761,7 +1761,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#b4c41885c6e02c64fb773546b2f135f56ea7022f" +source = "git+https://github.com/paritytech/js-precompiled.git#f4fa3048dcb0e202c53a61b9b9e7dd446fa1c088" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package.json b/js/package.json index 56806ef41..e4bf27430 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.7.49", + "version": "1.7.50", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", From daf1495c4efe9b4aecc9adab1494f4f8b5acd1e5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 12 Apr 2017 12:07:54 +0200 Subject: [PATCH 17/18] Filters and block RPCs for the light client (#5320) * block_hash method for LightChainClient * abstraction and futures-based eth_filter * log fetching for light client * add eth-filter delegate * eth_block fetching RPCs * return default accounts from on_demand * fix early exit * BlockNumber -> BlockId * early exit for no known block number. --- ethcore/light/src/client/header_chain.rs | 38 ++++ ethcore/light/src/client/mod.rs | 26 ++- ethcore/light/src/on_demand/mod.rs | 27 ++- ethcore/src/types/encoded.rs | 6 + parity/light_helpers/queue_cull.rs | 4 +- parity/rpc_apis.rs | 7 +- parity/run.rs | 3 +- rpc/src/v1/helpers/dispatch.rs | 2 +- rpc/src/v1/impls/eth.rs | 10 +- rpc/src/v1/impls/eth_filter.rs | 174 +++++++++++------ rpc/src/v1/impls/light/eth.rs | 239 +++++++++++++++++++++-- rpc/src/v1/tests/mocked/eth.rs | 2 +- rpc/src/v1/traits/eth.rs | 12 +- 13 files changed, 442 insertions(+), 108 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index fe3c86aad..35e6d996c 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -361,6 +361,22 @@ impl HeaderChain { } } + /// Get a block's hash by ID. In the case of query by number, only canonical results + /// will be returned. + pub fn block_hash(&self, id: BlockId) -> Option { + match id { + BlockId::Earliest => Some(self.genesis_hash()), + BlockId::Hash(hash) => Some(hash), + BlockId::Number(num) => { + if self.best_block.read().number < num { return None } + self.candidates.read().get(&num).map(|entry| entry.canonical_hash) + } + BlockId::Latest | BlockId::Pending => { + Some(self.best_block.read().hash) + } + } + } + /// Get a block header. In the case of query by number, only canonical blocks /// will be returned. pub fn block_header(&self, id: BlockId) -> Option { @@ -414,6 +430,28 @@ impl HeaderChain { } } + /// Get a block's chain score. + /// Returns nothing for non-canonical blocks. + pub fn score(&self, id: BlockId) -> Option { + let genesis_hash = self.genesis_hash(); + match id { + BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_header.difficulty()), + BlockId::Hash(hash) if hash == genesis_hash => Some(self.genesis_header.difficulty()), + BlockId::Hash(hash) => match self.block_header(BlockId::Hash(hash)) { + Some(header) => self.candidates.read().get(&header.number()) + .and_then(|era| era.candidates.iter().find(|e| e.hash == hash)) + .map(|c| c.total_difficulty), + None => None, + }, + BlockId::Number(num) => { + let candidates = self.candidates.read(); + if self.best_block.read().number < num { return None } + candidates.get(&num).map(|era| era.candidates[0].total_difficulty) + } + BlockId::Latest | BlockId::Pending => Some(self.best_block.read().total_difficulty) + } + } + /// Get the best block's header. pub fn best_header(&self) -> encoded::Header { self.block_header(BlockId::Latest).expect("Header for best block always stored; qed") diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 5424d5d1a..9e2257930 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -31,7 +31,7 @@ use ethcore::service::ClientIoMessage; use ethcore::encoded; use io::IoChannel; -use util::{H256, Mutex, RwLock}; +use util::{H256, U256, Mutex, RwLock}; use util::kvdb::{KeyValueDB, CompactionProfile}; use self::header_chain::{AncestryIter, HeaderChain}; @@ -67,12 +67,18 @@ pub trait LightChainClient: Send + Sync { /// parent queued prior. fn queue_header(&self, header: Header) -> Result; + /// Attempt to get a block hash by block id. + fn block_hash(&self, id: BlockId) -> Option; + /// Attempt to get block header by block id. fn block_header(&self, id: BlockId) -> Option; /// Get the best block header. fn best_block_header(&self) -> encoded::Header; + /// Get a block's chain score by ID. + fn score(&self, id: BlockId) -> Option; + /// Get an iterator over a block and its ancestry. fn ancestry_iter<'a>(&'a self, start: BlockId) -> Box + 'a>; @@ -183,6 +189,11 @@ impl Client { self.queue.queue_info() } + /// Attempt to get a block hash by block id. + pub fn block_hash(&self, id: BlockId) -> Option { + self.chain.block_hash(id) + } + /// Get a block header by Id. pub fn block_header(&self, id: BlockId) -> Option { self.chain.block_header(id) @@ -193,6 +204,11 @@ impl Client { self.chain.best_header() } + /// Get a block's chain score. + pub fn score(&self, id: BlockId) -> Option { + self.chain.score(id) + } + /// Get an iterator over a block and its ancestry. pub fn ancestry_iter(&self, start: BlockId) -> AncestryIter { self.chain.ancestry_iter(start) @@ -310,6 +326,10 @@ impl LightChainClient for Client { self.import_header(header) } + fn block_hash(&self, id: BlockId) -> Option { + Client::block_hash(self, id) + } + fn block_header(&self, id: BlockId) -> Option { Client::block_header(self, id) } @@ -318,6 +338,10 @@ impl LightChainClient for Client { Client::best_block_header(self) } + fn score(&self, id: BlockId) -> Option { + Client::score(self, id) + } + fn ancestry_iter<'a>(&'a self, start: BlockId) -> Box + 'a> { Box::new(Client::ancestry_iter(self, start)) } diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index a7c1ba2c4..c756844c9 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -35,7 +35,7 @@ use futures::sync::oneshot::{self, Sender, Receiver}; use network::PeerId; use rlp::RlpStream; use util::{Bytes, RwLock, Mutex, U256, H256}; -use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; +use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY, SHA3_EMPTY_LIST_RLP}; use net::{self, Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; use cache::Cache; @@ -83,7 +83,7 @@ enum Pending { HeaderByHash(request::HeaderByHash, Sender), Block(request::Body, Sender), BlockReceipts(request::BlockReceipts, Sender>), - Account(request::Account, Sender>), + Account(request::Account, Sender), Code(request::Code, Sender), TxProof(request::TransactionProof, Sender>), } @@ -136,18 +136,20 @@ pub struct OnDemand { pending_requests: RwLock>, cache: Arc>, orphaned_requests: RwLock>, + start_nonce: U256, } const RECEIVER_IN_SCOPE: &'static str = "Receiver is still in scope, so it's not dropped; qed"; impl OnDemand { /// Create a new `OnDemand` service with the given cache. - pub fn new(cache: Arc>) -> Self { + pub fn new(cache: Arc>, account_start_nonce: U256) -> Self { OnDemand { peers: RwLock::new(HashMap::new()), pending_requests: RwLock::new(HashMap::new()), cache: cache, orphaned_requests: RwLock::new(Vec::new()), + start_nonce: account_start_nonce, } } @@ -268,7 +270,7 @@ impl OnDemand { /// Request an account by address and block header -- which gives a hash to query and a state root /// to verify against. - pub fn account(&self, ctx: &BasicContext, req: request::Account) -> Receiver> { + pub fn account(&self, ctx: &BasicContext, req: request::Account) -> Receiver { let (sender, receiver) = oneshot::channel(); self.dispatch(ctx, Pending::Account(req, sender)); receiver @@ -279,7 +281,7 @@ impl OnDemand { let (sender, receiver) = oneshot::channel(); // fast path for no code. - if req.code_hash == ::util::sha3::SHA3_EMPTY { + if req.code_hash == SHA3_EMPTY { sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE) } else { self.dispatch(ctx, Pending::Code(req, sender)); @@ -497,10 +499,19 @@ impl Handler for OnDemand { Pending::Account(req, sender) => { if let NetworkResponse::Account(ref response) = *response { match req.check_response(&response.proof) { - Ok(maybe_account) => { + Ok(account) => { + let account = account.unwrap_or_else(|| { + BasicAccount { + balance: 0.into(), + nonce: self.start_nonce, + code_hash: SHA3_EMPTY, + storage_root: SHA3_NULL_RLP + } + }); + // TODO: validate against request outputs. // needs engine + env info as part of request. - let _ = sender.send(maybe_account); + let _ = sender.send(account); return } Err(e) => warn!(target: "on_demand", "Error handling response for state request: {:?}", e), @@ -572,7 +583,7 @@ mod tests { #[test] fn detects_hangup() { let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); - let on_demand = OnDemand::new(cache); + let on_demand = OnDemand::new(cache, 0.into()); let result = on_demand.header_by_hash(&FakeContext, request::HeaderByHash(H256::default())); assert!(on_demand.orphaned_requests.read().len() == 1); diff --git a/ethcore/src/types/encoded.rs b/ethcore/src/types/encoded.rs index 0a4164044..125a00fd0 100644 --- a/ethcore/src/types/encoded.rs +++ b/ethcore/src/types/encoded.rs @@ -199,6 +199,12 @@ impl Block { /// Decode to a full block. pub fn decode(&self) -> FullBlock { ::rlp::decode(&self.0) } + /// Decode the header. + pub fn decode_header(&self) -> FullHeader { self.rlp().val_at(0) } + + /// Clone the encoded header. + pub fn header(&self) -> Header { Header(self.rlp().at(0).as_raw().to_vec()) } + /// Get the rlp of this block. #[inline] pub fn rlp(&self) -> Rlp { diff --git a/parity/light_helpers/queue_cull.rs b/parity/light_helpers/queue_cull.rs index 10865d485..548ee33cd 100644 --- a/parity/light_helpers/queue_cull.rs +++ b/parity/light_helpers/queue_cull.rs @@ -67,7 +67,6 @@ impl IoHandler for QueueCull { let (sync, on_demand, txq) = (self.sync.clone(), self.on_demand.clone(), self.txq.clone()); let best_header = self.client.best_block_header(); - let start_nonce = self.client.engine().account_start_nonce(); info!(target: "cull", "Attempting to cull queued transactions from {} senders.", senders.len()); self.remote.spawn_with_timeout(move || { @@ -75,8 +74,7 @@ impl IoHandler for QueueCull { // fetch the nonce of each sender in the queue. let nonce_futures = senders.iter() .map(|&address| request::Account { header: best_header.clone(), address: address }) - .map(|request| on_demand.account(ctx, request)) - .map(move |fut| fut.map(move |x| x.map(|acc| acc.nonce).unwrap_or(start_nonce))) + .map(|request| on_demand.account(ctx, request).map(|acc| acc.nonce)) .zip(senders.iter()) .map(|(fut, &addr)| fut.map(move |nonce| (addr, nonce))); diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index cf3c0b7c9..538808909 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -244,7 +244,7 @@ impl Dependencies for FullDependencies { ); handler.extend_with(client.to_delegate()); - let filter_client = EthFilterClient::new(&self.client, &self.miner); + let filter_client = EthFilterClient::new(self.client.clone(), self.miner.clone()); handler.extend_with(filter_client.to_delegate()); add_signing_methods!(EthSigning, handler, self); @@ -377,9 +377,8 @@ impl Dependencies for LightDependencies { self.secret_store.clone(), self.cache.clone(), ); - handler.extend_with(client.to_delegate()); - - // TODO: filters. + handler.extend_with(Eth::to_delegate(client.clone())); + handler.extend_with(EthFilter::to_delegate(client)); add_signing_methods!(EthSigning, handler, self); }, Api::Personal => { diff --git a/parity/run.rs b/parity/run.rs index c6086d6b8..70a23f934 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -221,7 +221,8 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> } // start on_demand service. - let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone())); + let account_start_nonce = service.client().engine().account_start_nonce(); + let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone(), account_start_nonce)); // set network path. net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned()); diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index e1b298b9f..d58a211ed 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -268,7 +268,7 @@ impl LightDispatcher { match nonce_future { Some(x) => - x.map(|acc| acc.map_or_else(Default::default, |acc| acc.nonce)) + x.map(|acc| acc.nonce) .map_err(|_| errors::no_light_peers()) .boxed(), None => future::err(errors::network_disabled()).boxed() diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 73d4d9f9b..01d624e97 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -544,23 +544,23 @@ impl Eth for EthClient where Err(errors::deprecated("Compilation functionality is deprecated.".to_string())) } - fn logs(&self, filter: Filter) -> Result, Error> { + fn logs(&self, filter: Filter) -> BoxFuture, Error> { let include_pending = filter.to_block == Some(BlockNumber::Pending); let filter: EthcoreFilter = filter.into(); - let mut logs = take_weak!(self.client).logs(filter.clone()) + let mut logs = take_weakf!(self.client).logs(filter.clone()) .into_iter() .map(From::from) .collect::>(); if include_pending { - let best_block = take_weak!(self.client).chain_info().best_block_number; - let pending = pending_logs(&*take_weak!(self.miner), best_block, &filter); + let best_block = take_weakf!(self.client).chain_info().best_block_number; + let pending = pending_logs(&*take_weakf!(self.miner), best_block, &filter); logs.extend(pending); } let logs = limit_logs(logs, filter.limit); - Ok(logs) + future::ok(logs).boxed() } fn work(&self, no_new_work_timeout: Trailing) -> Result { diff --git a/rpc/src/v1/impls/eth_filter.rs b/rpc/src/v1/impls/eth_filter.rs index cf3398498..8f448feb5 100644 --- a/rpc/src/v1/impls/eth_filter.rs +++ b/rpc/src/v1/impls/eth_filter.rs @@ -16,89 +16,133 @@ //! Eth Filter RPC implementation -use std::sync::{Arc, Weak}; +use std::sync::Arc; use std::collections::HashSet; + use jsonrpc_core::*; use ethcore::miner::MinerService; use ethcore::filter::Filter as EthcoreFilter; use ethcore::client::{BlockChainClient, BlockId}; -use util::Mutex; +use util::{H256, Mutex}; + +use futures::{future, Future, BoxFuture}; + use v1::traits::EthFilter; use v1::types::{BlockNumber, Index, Filter, FilterChanges, Log, H256 as RpcH256, U256 as RpcU256}; use v1::helpers::{PollFilter, PollManager, limit_logs}; use v1::impls::eth::pending_logs; -/// Eth filter rpc implementation. +/// Something which provides data that can be filtered over. +pub trait Filterable { + /// Current best block number. + fn best_block_number(&self) -> u64; + + /// Get a block hash by block id. + fn block_hash(&self, id: BlockId) -> Option; + + /// pending transaction hashes at the given block. + fn pending_transactions_hashes(&self, block_number: u64) -> Vec; + + /// Get logs that match the given filter. + fn logs(&self, filter: EthcoreFilter) -> BoxFuture, Error>; + + /// Get logs from the pending block. + fn pending_logs(&self, block_number: u64, filter: &EthcoreFilter) -> Vec; + + /// Get a reference to the poll manager. + fn polls(&self) -> &Mutex>; +} + +/// Eth filter rpc implementation for a full node. pub struct EthFilterClient where C: BlockChainClient, M: MinerService { - client: Weak, - miner: Weak, + client: Arc, + miner: Arc, polls: Mutex>, } -impl EthFilterClient where - C: BlockChainClient, - M: MinerService { - +impl EthFilterClient where C: BlockChainClient, M: MinerService { /// Creates new Eth filter client. - pub fn new(client: &Arc, miner: &Arc) -> Self { + pub fn new(client: Arc, miner: Arc) -> Self { EthFilterClient { - client: Arc::downgrade(client), - miner: Arc::downgrade(miner), + client: client, + miner: miner, polls: Mutex::new(PollManager::new()), } } } -impl EthFilter for EthFilterClient - where C: BlockChainClient + 'static, M: MinerService + 'static -{ +impl Filterable for EthFilterClient where C: BlockChainClient, M: MinerService { + fn best_block_number(&self) -> u64 { + self.client.chain_info().best_block_number + } + + fn block_hash(&self, id: BlockId) -> Option { + self.client.block_hash(id).map(Into::into) + } + + fn pending_transactions_hashes(&self, best: u64) -> Vec { + self.miner.pending_transactions_hashes(best) + } + + fn logs(&self, filter: EthcoreFilter) -> BoxFuture, Error> { + future::ok(self.client.logs(filter).into_iter().map(Into::into).collect()).boxed() + } + + fn pending_logs(&self, block_number: u64, filter: &EthcoreFilter) -> Vec { + pending_logs(&*self.miner, block_number, filter) + } + + fn polls(&self) -> &Mutex> { &self.polls } +} + + + +impl EthFilter for T { fn new_filter(&self, filter: Filter) -> Result { - let mut polls = self.polls.lock(); - let block_number = take_weak!(self.client).chain_info().best_block_number; + let mut polls = self.polls().lock(); + let block_number = self.best_block_number(); let id = polls.create_poll(PollFilter::Logs(block_number, Default::default(), filter)); Ok(id.into()) } fn new_block_filter(&self) -> Result { - let mut polls = self.polls.lock(); - let id = polls.create_poll(PollFilter::Block(take_weak!(self.client).chain_info().best_block_number)); + let mut polls = self.polls().lock(); + let id = polls.create_poll(PollFilter::Block(self.best_block_number())); Ok(id.into()) } fn new_pending_transaction_filter(&self) -> Result { - let mut polls = self.polls.lock(); - let best_block = take_weak!(self.client).chain_info().best_block_number; - let pending_transactions = take_weak!(self.miner).pending_transactions_hashes(best_block); + let mut polls = self.polls().lock(); + let best_block = self.best_block_number(); + let pending_transactions = self.pending_transactions_hashes(best_block); let id = polls.create_poll(PollFilter::PendingTransaction(pending_transactions)); Ok(id.into()) } - fn filter_changes(&self, index: Index) -> Result { - let client = take_weak!(self.client); - let mut polls = self.polls.lock(); + fn filter_changes(&self, index: Index) -> BoxFuture { + let mut polls = self.polls().lock(); match polls.poll_mut(&index.value()) { - None => Ok(FilterChanges::Empty), + None => future::ok(FilterChanges::Empty).boxed(), Some(filter) => match *filter { PollFilter::Block(ref mut block_number) => { // + 1, cause we want to return hashes including current block hash. - let current_number = client.chain_info().best_block_number + 1; + let current_number = self.best_block_number() + 1; let hashes = (*block_number..current_number).into_iter() .map(BlockId::Number) - .filter_map(|id| client.block_hash(id)) - .map(Into::into) + .filter_map(|id| self.block_hash(id)) .collect::>(); *block_number = current_number; - Ok(FilterChanges::Hashes(hashes)) + future::ok(FilterChanges::Hashes(hashes)).boxed() }, PollFilter::PendingTransaction(ref mut previous_hashes) => { // get hashes of pending transactions - let best_block = take_weak!(self.client).chain_info().best_block_number; - let current_hashes = take_weak!(self.miner).pending_transactions_hashes(best_block); + let best_block = self.best_block_number(); + let current_hashes = self.pending_transactions_hashes(best_block); let new_hashes = { @@ -117,11 +161,11 @@ impl EthFilter for EthFilterClient *previous_hashes = current_hashes; // return new hashes - Ok(FilterChanges::Hashes(new_hashes)) + future::ok(FilterChanges::Hashes(new_hashes)).boxed() }, PollFilter::Logs(ref mut block_number, ref mut previous_logs, ref filter) => { // retrive the current block number - let current_number = client.chain_info().best_block_number; + let current_number = self.best_block_number(); // check if we need to check pending hashes let include_pending = filter.to_block == Some(BlockNumber::Pending); @@ -131,16 +175,9 @@ impl EthFilter for EthFilterClient filter.from_block = BlockId::Number(*block_number); filter.to_block = BlockId::Latest; - // retrieve logs in range from_block..min(BlockId::Latest..to_block) - let mut logs = client.logs(filter.clone()) - .into_iter() - .map(From::from) - .collect::>(); - - // additionally retrieve pending logs - if include_pending { - let best_block = take_weak!(self.client).chain_info().best_block_number; - let pending_logs = pending_logs(&*take_weak!(self.miner), best_block, &filter); + // retrieve pending logs + let pending = if include_pending { + let pending_logs = self.pending_logs(current_number, &filter); // remove logs about which client was already notified about let new_pending_logs: Vec<_> = pending_logs.iter() @@ -151,49 +188,56 @@ impl EthFilter for EthFilterClient // save all logs retrieved by client *previous_logs = pending_logs.into_iter().collect(); - // append logs array with new pending logs - logs.extend(new_pending_logs); - } - - let logs = limit_logs(logs, filter.limit); + new_pending_logs + } else { + Vec::new() + }; // save the number of the next block as a first block from which // we want to get logs *block_number = current_number + 1; - Ok(FilterChanges::Logs(logs)) + // retrieve logs in range from_block..min(BlockId::Latest..to_block) + let limit = filter.limit; + self.logs(filter) + .map(move |mut logs| { logs.extend(pending); logs }) // append fetched pending logs + .map(move |logs| limit_logs(logs, limit)) // limit the logs + .map(FilterChanges::Logs) + .boxed() } } } } - fn filter_logs(&self, index: Index) -> Result, Error> { - let mut polls = self.polls.lock(); + fn filter_logs(&self, index: Index) -> BoxFuture, Error> { + let mut polls = self.polls().lock(); match polls.poll(&index.value()) { Some(&PollFilter::Logs(ref _block_number, ref _previous_log, ref filter)) => { let include_pending = filter.to_block == Some(BlockNumber::Pending); let filter: EthcoreFilter = filter.clone().into(); - let mut logs = take_weak!(self.client).logs(filter.clone()) - .into_iter() - .map(From::from) - .collect::>(); - if include_pending { - let best_block = take_weak!(self.client).chain_info().best_block_number; - logs.extend(pending_logs(&*take_weak!(self.miner), best_block, &filter)); - } + // fetch pending logs. + let pending = if include_pending { + let best_block = self.best_block_number(); + self.pending_logs(best_block, &filter) + } else { + Vec::new() + }; - let logs = limit_logs(logs, filter.limit); - - Ok(logs) + // retrieve logs asynchronously, appending pending logs. + let limit = filter.limit; + self.logs(filter) + .map(move |mut logs| { logs.extend(pending); logs }) + .map(move |logs| limit_logs(logs, limit)) + .boxed() }, // just empty array - _ => Ok(Vec::new()), + _ => future::ok(Vec::new()).boxed() } } fn uninstall_filter(&self, index: Index) -> Result { - self.polls.lock().remove_poll(&index.value()); + self.polls().lock().remove_poll(&index.value()); Ok(true) } } diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index 1851f479e..d388e922a 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -34,6 +34,7 @@ use ethcore::basic_account::BasicAccount; use ethcore::encoded; use ethcore::executed::{Executed, ExecutionError}; use ethcore::ids::BlockId; +use ethcore::filter::Filter as EthcoreFilter; use ethcore::transaction::{Action, SignedTransaction, Transaction as EthTransaction}; use ethsync::LightSync; use rlp::UntrustedRlp; @@ -43,7 +44,9 @@ use util::{RwLock, Mutex, Uint, U256}; use futures::{future, Future, BoxFuture, IntoFuture}; use futures::sync::oneshot; +use v1::impls::eth_filter::Filterable; use v1::helpers::{CallRequest as CRequest, errors, limit_logs, dispatch}; +use v1::helpers::{PollFilter, PollManager}; use v1::helpers::block_import::is_major_importing; use v1::traits::Eth; use v1::types::{ @@ -55,7 +58,7 @@ use v1::metadata::Metadata; use util::Address; -/// Light client `ETH` RPC. +/// Light client `ETH` (and filter) RPC. pub struct EthClient { sync: Arc, client: Arc, @@ -63,6 +66,22 @@ pub struct EthClient { transaction_queue: Arc>, accounts: Arc, cache: Arc>, + polls: Mutex>, +} + +impl Clone for EthClient { + fn clone(&self) -> Self { + // each instance should have its own poll manager. + EthClient { + sync: self.sync.clone(), + client: self.client.clone(), + on_demand: self.on_demand.clone(), + transaction_queue: self.transaction_queue.clone(), + accounts: self.accounts.clone(), + cache: self.cache.clone(), + polls: Mutex::new(PollManager::new()), + } + } } // helper for internal error: on demand sender cancelled. @@ -90,6 +109,7 @@ impl EthClient { transaction_queue: transaction_queue, accounts: accounts, cache: cache, + polls: Mutex::new(PollManager::new()), } } @@ -153,12 +173,15 @@ impl EthClient { Some(hdr) => hdr, }; - sync.with_context(|ctx| on_demand.account(ctx, request::Account { + let maybe_fut = sync.with_context(|ctx| on_demand.account(ctx, request::Account { header: header, address: address, - })) - .map(|x| x.map_err(err_premature_cancel).boxed()) - .unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) + })); + + match maybe_fut { + Some(fut) => fut.map(Some).map_err(err_premature_cancel).boxed(), + None => future::err(errors::network_disabled()).boxed(), + } }).boxed() } @@ -234,6 +257,111 @@ impl EthClient { } }).boxed() } + + fn block(&self, id: BlockId) -> BoxFuture, Error> { + let (on_demand, sync) = (self.on_demand.clone(), self.sync.clone()); + + self.header(id).and_then(move |hdr| { + let req = match hdr { + Some(hdr) => request::Body::new(hdr), + None => return future::ok(None).boxed(), + }; + + match sync.with_context(move |ctx| on_demand.block(ctx, req)) { + Some(fut) => fut.map_err(err_premature_cancel).map(Some).boxed(), + None => future::err(errors::network_disabled()).boxed(), + } + }).boxed() + } + + // get a "rich" block structure + fn rich_block(&self, id: BlockId, include_txs: bool) -> BoxFuture, Error> { + let (on_demand, sync) = (self.on_demand.clone(), self.sync.clone()); + let (client, engine) = (self.client.clone(), self.client.engine().clone()); + + // helper for filling out a rich block once we've got a block and a score. + let fill_rich = move |block: encoded::Block, score: Option| { + let header = block.decode_header(); + let extra_info = engine.extra_info(&header); + RichBlock { + block: Block { + hash: Some(header.hash().into()), + size: Some(block.rlp().as_raw().len().into()), + parent_hash: header.parent_hash().clone().into(), + uncles_hash: header.uncles_hash().clone().into(), + author: header.author().clone().into(), + miner: header.author().clone().into(), + state_root: header.state_root().clone().into(), + transactions_root: header.transactions_root().clone().into(), + receipts_root: header.receipts_root().clone().into(), + number: Some(header.number().into()), + gas_used: header.gas_used().clone().into(), + gas_limit: header.gas_limit().clone().into(), + logs_bloom: header.log_bloom().clone().into(), + timestamp: header.timestamp().into(), + difficulty: header.difficulty().clone().into(), + total_difficulty: score.map(Into::into), + seal_fields: header.seal().into_iter().cloned().map(Into::into).collect(), + uncles: block.uncle_hashes().into_iter().map(Into::into).collect(), + transactions: match include_txs { + true => BlockTransactions::Full(block.view().localized_transactions().into_iter().map(Into::into).collect()), + false => BlockTransactions::Hashes(block.transaction_hashes().into_iter().map(Into::into).collect()), + }, + extra_data: Bytes::new(header.extra_data().to_vec()), + }, + extra_info: extra_info + } + }; + + // get the block itself. + self.block(id).and_then(move |block| match block { + None => return future::ok(None).boxed(), + Some(block) => { + // then fetch the total difficulty (this is much easier after getting the block). + match client.score(id) { + Some(score) => future::ok(fill_rich(block, Some(score))).map(Some).boxed(), + None => { + // make a CHT request to fetch the chain score. + let req = cht::block_to_cht_number(block.number()) + .and_then(|num| client.cht_root(num as usize)) + .and_then(|root| request::HeaderProof::new(block.number(), root)); + + + let req = match req { + Some(req) => req, + None => { + // somehow the genesis block slipped past other checks. + // return it now. + let score = client.block_header(BlockId::Number(0)) + .expect("genesis always stored; qed") + .difficulty(); + + return future::ok(fill_rich(block, Some(score))).map(Some).boxed() + } + }; + + // three possible outcomes: + // - network is down. + // - we get a score, but our hash is non-canonical. + // - we get ascore, and our hash is canonical. + let maybe_fut = sync.with_context(move |ctx| on_demand.hash_and_score_by_number(ctx, req)); + match maybe_fut { + Some(fut) => fut.map(move |(hash, score)| { + let score = if hash == block.hash() { + Some(score) + } else { + None + }; + + Some(fill_rich(block, score)) + }).map_err(err_premature_cancel).boxed(), + None => return future::err(errors::network_disabled()).boxed(), + } + } + } + } + }).boxed() + } } impl Eth for EthClient { @@ -275,7 +403,10 @@ impl Eth for EthClient { } fn gas_price(&self) -> Result { - Ok(Default::default()) + Ok(self.cache.lock().gas_price_corpus() + .and_then(|c| c.median().cloned()) + .map(RpcU256::from) + .unwrap_or_else(Default::default)) } fn accounts(&self, meta: Metadata) -> BoxFuture, Error> { @@ -304,11 +435,11 @@ impl Eth for EthClient { } fn block_by_hash(&self, hash: RpcH256, include_txs: bool) -> BoxFuture, Error> { - future::err(errors::unimplemented(None)).boxed() + self.rich_block(BlockId::Hash(hash.into()), include_txs) } fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture, Error> { - future::err(errors::unimplemented(None)).boxed() + self.rich_block(num.into(), include_txs) } fn transaction_count(&self, address: RpcH160, num: Trailing) -> BoxFuture { @@ -484,19 +615,101 @@ impl Eth for EthClient { Err(errors::deprecated("Compilation of Solidity via RPC is deprecated".to_string())) } - fn logs(&self, _filter: Filter) -> Result, Error> { - Err(errors::unimplemented(None)) + fn logs(&self, filter: Filter) -> BoxFuture, Error> { + let limit = filter.limit; + + Filterable::logs(self, filter.into()) + .map(move|logs| limit_logs(logs, limit)) + .boxed() } fn work(&self, _timeout: Trailing) -> Result { - Err(errors::unimplemented(None)) + Err(errors::light_unimplemented(None)) } fn submit_work(&self, _nonce: RpcH64, _pow_hash: RpcH256, _mix_hash: RpcH256) -> Result { - Err(errors::unimplemented(None)) + Err(errors::light_unimplemented(None)) } fn submit_hashrate(&self, _rate: RpcU256, _id: RpcH256) -> Result { - Err(errors::unimplemented(None)) + Err(errors::light_unimplemented(None)) + } +} + +// This trait implementation triggers a blanked impl of `EthFilter`. +impl Filterable for EthClient { + fn best_block_number(&self) -> u64 { self.client.chain_info().best_block_number } + + fn block_hash(&self, id: BlockId) -> Option { + self.client.block_hash(id).map(Into::into) + } + + fn pending_transactions_hashes(&self, _block_number: u64) -> Vec<::util::H256> { + Vec::new() + } + + fn logs(&self, filter: EthcoreFilter) -> BoxFuture, Error> { + use std::collections::BTreeMap; + + use futures::stream::{self, Stream}; + use util::H2048; + + // early exit for "to" block before "from" block. + let best_number = self.client.chain_info().best_block_number; + let block_number = |id| match id { + BlockId::Earliest => Some(0), + BlockId::Latest | BlockId::Pending => Some(best_number), + BlockId::Hash(h) => self.client.block_header(BlockId::Hash(h)).map(|hdr| hdr.number()), + BlockId::Number(x) => Some(x), + }; + + match (block_number(filter.to_block), block_number(filter.from_block)) { + (Some(to), Some(from)) if to < from => return future::ok(Vec::new()).boxed(), + (Some(_), Some(_)) => {}, + _ => return future::err(errors::unknown_block()).boxed(), + } + + let maybe_future = self.sync.with_context(move |ctx| { + // find all headers which match the filter, and fetch the receipts for each one. + // match them with their numbers for easy sorting later. + let bit_combos = filter.bloom_possibilities(); + let receipts_futures: Vec<_> = self.client.ancestry_iter(filter.to_block) + .take_while(|ref hdr| BlockId::Number(hdr.number()) != filter.from_block) + .take_while(|ref hdr| BlockId::Hash(hdr.hash()) != filter.from_block) + .filter(|ref hdr| { + let hdr_bloom = hdr.log_bloom(); + bit_combos.iter().find(|&bloom| hdr_bloom & *bloom == *bloom).is_some() + }) + .map(|hdr| (hdr.number(), request::BlockReceipts(hdr))) + .map(|(num, req)| self.on_demand.block_receipts(ctx, req).map(move |x| (num, x))) + .collect(); + + // as the receipts come in, find logs within them which match the filter. + // insert them into a BTreeMap to maintain order by number and block index. + stream::futures_unordered(receipts_futures) + .fold(BTreeMap::new(), move |mut matches, (num, receipts)| { + for (block_index, log) in receipts.into_iter().flat_map(|r| r.logs).enumerate() { + if filter.matches(&log) { + matches.insert((num, block_index), log.into()); + } + } + future::ok(matches) + }) // and then collect them into a vector. + .map(|matches| matches.into_iter().map(|(_, v)| v).collect()) + .map_err(err_premature_cancel) + }); + + match maybe_future { + Some(fut) => fut.boxed(), + None => future::err(errors::network_disabled()).boxed(), + } + } + + fn pending_logs(&self, _block_number: u64, _filter: &EthcoreFilter) -> Vec { + Vec::new() // light clients don't mine. + } + + fn polls(&self) -> &Mutex> { + &self.polls } } diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index ed312c363..e8cec8eab 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -90,7 +90,7 @@ impl EthTester { let hashrates = Arc::new(Mutex::new(HashMap::new())); let external_miner = Arc::new(ExternalMiner::new(hashrates.clone())); let eth = EthClient::new(&client, &snapshot, &sync, &opt_ap, &miner, &external_miner, options).to_delegate(); - let filter = EthFilterClient::new(&client, &miner).to_delegate(); + let filter = EthFilterClient::new(client.clone(), miner.clone()).to_delegate(); let dispatcher = FullDispatcher::new(Arc::downgrade(&client), Arc::downgrade(&miner)); let sign = SigningUnsafeClient::new(&opt_ap, dispatcher).to_delegate(); diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 365ad9320..941263335 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -162,8 +162,8 @@ build_rpc_trait! { fn compile_serpent(&self, String) -> Result; /// Returns logs matching given filter object. - #[rpc(name = "eth_getLogs")] - fn logs(&self, Filter) -> Result, Error>; + #[rpc(async, name = "eth_getLogs")] + fn logs(&self, Filter) -> BoxFuture, Error>; /// Returns the hash of the current block, the seedHash, and the boundary condition to be met. #[rpc(name = "eth_getWork")] @@ -196,12 +196,12 @@ build_rpc_trait! { fn new_pending_transaction_filter(&self) -> Result; /// Returns filter changes since last poll. - #[rpc(name = "eth_getFilterChanges")] - fn filter_changes(&self, Index) -> Result; + #[rpc(async, name = "eth_getFilterChanges")] + fn filter_changes(&self, Index) -> BoxFuture; /// Returns all logs matching given filter (in a range 'from' - 'to'). - #[rpc(name = "eth_getFilterLogs")] - fn filter_logs(&self, Index) -> Result, Error>; + #[rpc(async, name = "eth_getFilterLogs")] + fn filter_logs(&self, Index) -> BoxFuture, Error>; /// Uninstalls filter. #[rpc(name = "eth_uninstallFilter")] From 52eae66c72433bbfb359ce19a2935dff7b071ff1 Mon Sep 17 00:00:00 2001 From: keorn Date: Wed, 12 Apr 2017 11:15:13 +0100 Subject: [PATCH 18/18] Add raw hash signing (#5423) * add sign any * Add RPC signMessage call to JS API * Add signMessage to JSON RPC docs * PostSignTransaction -> EthSignMessage * fix doc typo * revert incorect naming --- js/src/api/rpc/parity/parity.js | 5 +++++ js/src/jsonrpc/interfaces/parity.js | 26 ++++++++++++++++++++++ rpc/src/v1/helpers/dispatch.rs | 6 ++--- rpc/src/v1/helpers/requests.rs | 6 ++--- rpc/src/v1/impls/parity_accounts.rs | 15 +++++++++++-- rpc/src/v1/impls/signing.rs | 4 ++-- rpc/src/v1/impls/signing_unsafe.rs | 2 +- rpc/src/v1/tests/mocked/parity_accounts.rs | 17 ++++++++++++++ rpc/src/v1/tests/mocked/signer.rs | 4 ++-- rpc/src/v1/traits/parity_accounts.rs | 6 ++++- rpc/src/v1/types/confirmations.rs | 8 +++---- 11 files changed, 81 insertions(+), 18 deletions(-) diff --git a/js/src/api/rpc/parity/parity.js b/js/src/api/rpc/parity/parity.js index f38e29e7b..16b14eaef 100644 --- a/js/src/api/rpc/parity/parity.js +++ b/js/src/api/rpc/parity/parity.js @@ -522,6 +522,11 @@ export default class Parity { .then(outNumber); } + signMessage (address, password, messageHash) { + return this._transport + .execute('parity_signMessage', inAddress(address), password, inHex(messageHash)); + } + testPassword (account, password) { return this._transport .execute('parity_testPassword', inAddress(account), password); diff --git a/js/src/jsonrpc/interfaces/parity.js b/js/src/jsonrpc/interfaces/parity.js index 3e6fa8475..e7a5f46c9 100644 --- a/js/src/jsonrpc/interfaces/parity.js +++ b/js/src/jsonrpc/interfaces/parity.js @@ -1881,5 +1881,31 @@ export default { desc: 'Decrypted message.', example: withComment('0x68656c6c6f20776f726c64', 'hello world') } + }, + + signMessage: { + desc: 'Sign the hashed message bytes with the given account.', + params: [ + { + type: Address, + desc: 'Account which signs the message.', + example: '0xc171033d5cbff7175f29dfd3a63dda3d6f8f385e' + }, + { + type: String, + desc: 'Passphrase to unlock the account.', + example: 'password1' + }, + { + type: Data, + desc: 'Hashed message.', + example: '0xbc36789e7a1e281436464229828f817d6612f7b477d66591ff96a9e064bcc98a' + } + ], + returns: { + type: Data, + desc: 'Message signature.', + example: '0x1d9e33a8cf8bfc089a172bca01da462f9e359c6cb1b0f29398bc884e4d18df4f78588aee4fb5cc067ca62d2abab995e0bba29527be6ac98105b0320020a2efaf00' + } } }; diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index d58a211ed..c6af8d7e7 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -474,7 +474,7 @@ pub fn execute( .map(ConfirmationResponse::SignTransaction) ).boxed() }, - ConfirmationPayload::Signature(address, mut data) => { + ConfirmationPayload::EthSignMessage(address, mut data) => { let mut message_data = format!("\x19Ethereum Signed Message:\n{}", data.len()) .into_bytes(); @@ -574,8 +574,8 @@ pub fn from_rpc(payload: RpcConfirmationPayload, default_account: Address, di RpcConfirmationPayload::Decrypt(RpcDecryptRequest { address, msg }) => { future::ok(ConfirmationPayload::Decrypt(address.into(), msg.into())).boxed() }, - RpcConfirmationPayload::Signature(RpcSignRequest { address, data }) => { - future::ok(ConfirmationPayload::Signature(address.into(), data.into())).boxed() + RpcConfirmationPayload::EthSignMessage(RpcSignRequest { address, data }) => { + future::ok(ConfirmationPayload::EthSignMessage(address.into(), data.into())).boxed() }, } } diff --git a/rpc/src/v1/helpers/requests.rs b/rpc/src/v1/helpers/requests.rs index 4a3a3704d..aa3a4c3d4 100644 --- a/rpc/src/v1/helpers/requests.rs +++ b/rpc/src/v1/helpers/requests.rs @@ -113,8 +113,8 @@ pub enum ConfirmationPayload { SendTransaction(FilledTransactionRequest), /// Sign Transaction SignTransaction(FilledTransactionRequest), - /// Sign request - Signature(Address, Bytes), + /// Sign a message with an Ethereum specific security prefix. + EthSignMessage(Address, Bytes), /// Decrypt request Decrypt(Address, Bytes), } @@ -124,7 +124,7 @@ impl ConfirmationPayload { match *self { ConfirmationPayload::SendTransaction(ref request) => request.from, ConfirmationPayload::SignTransaction(ref request) => request.from, - ConfirmationPayload::Signature(ref address, _) => *address, + ConfirmationPayload::EthSignMessage(ref address, _) => *address, ConfirmationPayload::Decrypt(ref address, _) => *address, } } diff --git a/rpc/src/v1/impls/parity_accounts.rs b/rpc/src/v1/impls/parity_accounts.rs index c22285cc9..cc206696f 100644 --- a/rpc/src/v1/impls/parity_accounts.rs +++ b/rpc/src/v1/impls/parity_accounts.rs @@ -17,7 +17,7 @@ //! Account management (personal) rpc implementation use std::sync::{Arc, Weak}; use std::collections::BTreeMap; -use util::{Address}; +use util::Address; use ethkey::{Brain, Generator, Secret}; use ethstore::KeyFile; @@ -27,7 +27,7 @@ use jsonrpc_core::Error; use v1::helpers::errors; use v1::helpers::accounts::unwrap_provider; use v1::traits::ParityAccounts; -use v1::types::{H160 as RpcH160, H256 as RpcH256, DappId, Derive, DeriveHierarchical, DeriveHash}; +use v1::types::{H160 as RpcH160, H256 as RpcH256, H520 as RpcH520, DappId, Derive, DeriveHierarchical, DeriveHash}; /// Account management (personal) rpc implementation. pub struct ParityAccountsClient { @@ -334,6 +334,17 @@ impl ParityAccounts for ParityAccountsClient { .map(Into::into) .map_err(|e| errors::account("Could not export account.", e)) } + + fn sign_message(&self, addr: RpcH160, password: String, message: RpcH256) -> Result { + self.account_provider()? + .sign( + addr.into(), + Some(password), + message.into() + ) + .map(Into::into) + .map_err(|e| errors::account("Could not sign message.", e)) + } } fn into_vec(a: Vec) -> Vec where diff --git a/rpc/src/v1/impls/signing.rs b/rpc/src/v1/impls/signing.rs index dfb0c4f80..376418ead 100644 --- a/rpc/src/v1/impls/signing.rs +++ b/rpc/src/v1/impls/signing.rs @@ -140,7 +140,7 @@ impl ParitySigning for SigningQueueClient { fn post_sign(&self, meta: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture, Error> { let pending = self.pending.clone(); self.dispatch( - RpcConfirmationPayload::Signature((address.clone(), data).into()), + RpcConfirmationPayload::EthSignMessage((address.clone(), data).into()), DefaultAccount::Provided(address.into()), meta.origin ).map(move |result| match result { @@ -216,7 +216,7 @@ impl EthSigning for SigningQueueClient { fn sign(&self, meta: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture { let res = self.dispatch( - RpcConfirmationPayload::Signature((address.clone(), data).into()), + RpcConfirmationPayload::EthSignMessage((address.clone(), data).into()), address.into(), meta.origin, ); diff --git a/rpc/src/v1/impls/signing_unsafe.rs b/rpc/src/v1/impls/signing_unsafe.rs index 8fbb6595a..6fb483c5e 100644 --- a/rpc/src/v1/impls/signing_unsafe.rs +++ b/rpc/src/v1/impls/signing_unsafe.rs @@ -78,7 +78,7 @@ impl EthSigning for SigningUnsafeClient type Metadata = Metadata; fn sign(&self, _: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture { - self.handle(RpcConfirmationPayload::Signature((address.clone(), data).into()), address.into()) + self.handle(RpcConfirmationPayload::EthSignMessage((address.clone(), data).into()), address.into()) .then(|res| match res { Ok(RpcConfirmationResponse::Signature(signature)) => Ok(signature), Err(e) => Err(e), diff --git a/rpc/src/v1/tests/mocked/parity_accounts.rs b/rpc/src/v1/tests/mocked/parity_accounts.rs index 949498c61..b3ea644fa 100644 --- a/rpc/src/v1/tests/mocked/parity_accounts.rs +++ b/rpc/src/v1/tests/mocked/parity_accounts.rs @@ -500,3 +500,20 @@ fn should_export_account() { println!("Response: {:?}", response); assert_eq!(result, Some(response.into())); } + +#[test] +fn should_sign_message() { + let tester = setup(); + let hash = tester.accounts + .insert_account( + "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a".parse().unwrap(), + "password1") + .expect("account should be inserted ok"); + + assert_eq!(hash, "c171033d5cbff7175f29dfd3a63dda3d6f8f385e".parse().unwrap()); + + let request = r#"{"jsonrpc": "2.0", "method": "parity_signMessage", "params": ["0xc171033d5cbff7175f29dfd3a63dda3d6f8f385e", "password1", "0xbc36789e7a1e281436464229828f817d6612f7b477d66591ff96a9e064bcc98a"], "id": 3}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x1d9e33a8cf8bfc089a172bca01da462f9e359c6cb1b0f29398bc884e4d18df4f78588aee4fb5cc067ca62d2abab995e0bba29527be6ac98105b0320020a2efaf00","id":3}"#; + let res = tester.io.handle_request_sync(&request); + assert_eq!(res, Some(response.into())); +} diff --git a/rpc/src/v1/tests/mocked/signer.rs b/rpc/src/v1/tests/mocked/signer.rs index a20e94bcc..973e33528 100644 --- a/rpc/src/v1/tests/mocked/signer.rs +++ b/rpc/src/v1/tests/mocked/signer.rs @@ -90,7 +90,7 @@ fn should_return_list_of_items_to_confirm() { nonce: None, condition: None, }), Origin::Dapps("http://parity.io".into())).unwrap(); - tester.signer.add_request(ConfirmationPayload::Signature(1.into(), vec![5].into()), Origin::Unknown).unwrap(); + tester.signer.add_request(ConfirmationPayload::EthSignMessage(1.into(), vec![5].into()), Origin::Unknown).unwrap(); // when let request = r#"{"jsonrpc":"2.0","method":"signer_requestsToConfirm","params":[],"id":1}"#; @@ -163,7 +163,7 @@ fn should_not_remove_transaction_if_password_is_invalid() { fn should_not_remove_sign_if_password_is_invalid() { // given let tester = signer_tester(); - tester.signer.add_request(ConfirmationPayload::Signature(0.into(), vec![5].into()), Origin::Unknown).unwrap(); + tester.signer.add_request(ConfirmationPayload::EthSignMessage(0.into(), vec![5].into()), Origin::Unknown).unwrap(); assert_eq!(tester.signer.requests().len(), 1); // when diff --git a/rpc/src/v1/traits/parity_accounts.rs b/rpc/src/v1/traits/parity_accounts.rs index 46372560c..7d49148b1 100644 --- a/rpc/src/v1/traits/parity_accounts.rs +++ b/rpc/src/v1/traits/parity_accounts.rs @@ -19,7 +19,7 @@ use std::collections::BTreeMap; use jsonrpc_core::Error; use ethstore::KeyFile; -use v1::types::{H160, H256, DappId, DeriveHash, DeriveHierarchical}; +use v1::types::{H160, H256, H520, DappId, DeriveHash, DeriveHierarchical}; build_rpc_trait! { /// Personal Parity rpc interface. @@ -180,5 +180,9 @@ build_rpc_trait! { /// Exports an account with given address if provided password matches. #[rpc(name = "parity_exportAccount")] fn export_account(&self, H160, String) -> Result; + + /// Sign raw hash with the key corresponding to address and password. + #[rpc(name = "parity_signMessage")] + fn sign_message(&self, H160, String, H256) -> Result; } } diff --git a/rpc/src/v1/types/confirmations.rs b/rpc/src/v1/types/confirmations.rs index f749df449..ac1fba4fe 100644 --- a/rpc/src/v1/types/confirmations.rs +++ b/rpc/src/v1/types/confirmations.rs @@ -57,7 +57,7 @@ impl fmt::Display for ConfirmationPayload { match *self { ConfirmationPayload::SendTransaction(ref transaction) => write!(f, "{}", transaction), ConfirmationPayload::SignTransaction(ref transaction) => write!(f, "(Sign only) {}", transaction), - ConfirmationPayload::Signature(ref sign) => write!(f, "{}", sign), + ConfirmationPayload::EthSignMessage(ref sign) => write!(f, "{}", sign), ConfirmationPayload::Decrypt(ref decrypt) => write!(f, "{}", decrypt), } } @@ -169,7 +169,7 @@ pub enum ConfirmationPayload { SignTransaction(TransactionRequest), /// Signature #[serde(rename="sign")] - Signature(SignRequest), + EthSignMessage(SignRequest), /// Decryption #[serde(rename="decrypt")] Decrypt(DecryptRequest), @@ -180,7 +180,7 @@ impl From for ConfirmationPayload { match c { helpers::ConfirmationPayload::SendTransaction(t) => ConfirmationPayload::SendTransaction(t.into()), helpers::ConfirmationPayload::SignTransaction(t) => ConfirmationPayload::SignTransaction(t.into()), - helpers::ConfirmationPayload::Signature(address, data) => ConfirmationPayload::Signature(SignRequest { + helpers::ConfirmationPayload::EthSignMessage(address, data) => ConfirmationPayload::EthSignMessage(SignRequest { address: address.into(), data: data.into(), }), @@ -255,7 +255,7 @@ mod tests { // given let request = helpers::ConfirmationRequest { id: 15.into(), - payload: helpers::ConfirmationPayload::Signature(1.into(), vec![5].into()), + payload: helpers::ConfirmationPayload::EthSignMessage(1.into(), vec![5].into()), origin: Origin::Rpc("test service".into()), };