From 4173ecf2a58a2bb40949651b05c2787638558211 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 12 Dec 2016 22:59:05 +0100 Subject: [PATCH 01/42] light: begin header chain --- ethcore/light/src/client/header_chain.rs | 185 ++++++++++++++++++ .../light/src/{client.rs => client/mod.rs} | 18 +- 2 files changed, 200 insertions(+), 3 deletions(-) create mode 100644 ethcore/light/src/client/header_chain.rs rename ethcore/light/src/{client.rs => client/mod.rs} (93%) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs new file mode 100644 index 000000000..11e7ea79e --- /dev/null +++ b/ethcore/light/src/client/header_chain.rs @@ -0,0 +1,185 @@ +// Copyright 2015, 2016 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Light client header chain. +//! +//! Unlike a full node's `BlockChain` this doesn't store much in the database. +//! It stores candidates for the last 2048-4096 blocks as well as CHT roots for +//! historical blocks all the way to the genesis. +//! +//! This is separate from the `BlockChain` for two reasons: +//! - It stores only headers (and a pruned subset of them) +//! - To allow for flexibility in the database layout once that's incorporated. +// TODO: use DB instead of memory. + +use std::collections::{BTreeMap, HashMap}; + +use ethcore::header::Header; +use ethcore::error::BlockError; +use ethcore::ids::BlockId; +use ethcore::views::HeaderView; +use util::{Bytes, H256, U256, Mutex, RwLock}; + +/// Delay this many blocks before producing a CHT. +const CHT_DELAY: u64 = 2048; + +/// Generate CHT roots of this size. +// TODO: move into more generic module. +const CHT_SIZE: u64 = 2048; + +#[derive(Debug, Clone)] +struct BestBlock { + hash: H256, + number: u64, + total_difficulty: U256, +} + +// candidate block description. +struct Candidate { + hash: H256, + parent_hash: H256, + total_difficulty: U256, +} + +struct Entry { + candidates: Vec, + canonical_hash: H256, +} + +/// Header chain. See module docs for more details. +pub struct HeaderChain { + genesis_header: Bytes, // special-case the genesis. + candidates: RwLock>, + headers: RwLock>, + best_block: RwLock, + cht_roots: Mutex>, +} + +impl HeaderChain { + /// Create a new header chain given this genesis block. + pub fn new(genesis: &[u8]) -> Self { + let g_view = HeaderView::new(genesis); + + HeaderChain { + genesis_header: genesis.to_owned(), + best_block: RwLock::new(BestBlock { + hash: g_view.hash(), + number: 0, + total_difficulty: g_view.difficulty(), + }), + candidates: RwLock::new(BTreeMap::new()), + headers: RwLock::new(HashMap::new()), + cht_roots: Mutex::new(Vec::new()), + } + } + + /// Insert a pre-verified header. + pub fn insert(&self, header: Bytes) -> Result<(), BlockError> { + let view = HeaderView::new(&header); + let hash = view.hash(); + let number = view.number(); + let parent_hash = view.parent_hash(); + + // find parent details. + let parent_td = { + if number == 1 { + let g_view = HeaderView::new(&self.genesis_header); + g_view.difficulty() + } else { + let maybe_td = self.candidates.read().get(&(number - 1)) + .and_then(|entry| entry.candidates.iter().find(|c| c.hash == parent_hash)) + .map(|c| c.total_difficulty); + + match maybe_td { + Some(td) => td, + None => return Err(BlockError::UnknownParent(parent_hash)), + } + } + }; + + let total_difficulty = parent_td + view.difficulty(); + + // insert headers and candidates entries. + let mut candidates = self.candidates.write(); + candidates.entry(number).or_insert_with(|| Entry { candidates: Vec::new(), canonical_hash: hash}) + .candidates.push(Candidate { + hash: hash, + parent_hash: parent_hash, + total_difficulty: total_difficulty, + }); + + self.headers.write().insert(hash, header.clone()); + + // reorganize ancestors so canonical entries are first in their + // respective candidates vectors. + if self.best_block.read().total_difficulty < total_difficulty { + let mut canon_hash = hash; + for (_, entry) in candidates.iter_mut().rev().skip_while(|&(height, _)| *height > number) { + if entry.canonical_hash == canon_hash { break; } + + let canon = entry.candidates.iter().find(|x| x.hash == canon_hash) + .expect("blocks are only inserted if parent is present; or this is the block we just added; qed"); + + // what about reorgs > CHT_SIZE + CHT_DELAY? + canon_hash = canon.parent_hash; + } + + *self.best_block.write() = BestBlock { + hash: hash, + number: number, + total_difficulty: total_difficulty, + }; + + // produce next CHT root if it's time. + let earliest_era = *candidates.keys().next().expect("at least one era just created; qed"); + if earliest_era + CHT_DELAY + CHT_SIZE < number { + let values: Vec<_> = (0..CHT_SIZE).map(|x| x + earliest_era) + .map(|x| candidates.remove(&x).map(|entry| (x, entry))) + .map(|x| x.expect("all eras stored are sequential with no gaps; qed")) + .map(|(x, entry)| (::rlp::encode(&x), ::rlp::encode(&entry.canonical_hash))) + .map(|(k, v)| (k.to_vec(), v.to_vec())) + .collect(); + + let cht_root = ::util::triehash::trie_root(values); + debug!(target: "chain", "Produced CHT {} root: {:?}", (earliest_era - 1) % CHT_SIZE, cht_root); + + self.cht_roots.lock().push(cht_root); + } + } + + Ok(()) + } + + /// Get a block header. In the case of query by number, only canonical blocks + /// will be returned. + pub fn block_header(&self, id: BlockId) -> Option { + match id { + BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_header.clone()), + BlockId::Hash(hash) => self.headers.read().get(&hash).map(|x| x.to_vec()), + BlockId::Number(num) => { + if self.best_block.read().number < num { return None } + + self.candidates.read().get(&num).map(|entry| entry.canonical_hash) + .and_then(|hash| self.headers.read().get(&hash).map(|x| x.to_vec())) + } + BlockId::Latest | BlockId::Pending => { + let hash = self.best_block.read().hash; + self.headers.read().get(&hash).map(|x| x.to_vec()) + } + } + } + +} diff --git a/ethcore/light/src/client.rs b/ethcore/light/src/client/mod.rs similarity index 93% rename from ethcore/light/src/client.rs rename to ethcore/light/src/client/mod.rs index 73e85b31c..206e7652a 100644 --- a/ethcore/light/src/client.rs +++ b/ethcore/light/src/client/mod.rs @@ -34,6 +34,8 @@ use util::{Bytes, Mutex}; use provider::Provider; use request; +mod header_chain; + /// Light client implementation. pub struct Client { _engine: Arc, @@ -58,7 +60,7 @@ impl Client { /// Import a local transaction. pub fn import_own_transaction(&self, tx: SignedTransaction) { self.tx_pool.lock().insert(tx.hash(), tx); - } + } /// Fetch a vector of all pending transactions. pub fn pending_transactions(&self) -> Vec { @@ -74,6 +76,16 @@ impl Client { pub fn queue_info(&self) -> QueueInfo { self.header_queue.queue_info() } + + /// Best block number. + pub fn best_block_number(&self) -> u64 { + 0 + } + + /// Best block hash. + pub fn best_block_hash(&self) -> u64 { + unimplemented!() + } } // dummy implementation -- may draw from canonical cache further on. @@ -115,6 +127,6 @@ impl Provider for Client { } fn pending_transactions(&self) -> Vec { - Vec::new() + Client::pending_transactions(self) } -} \ No newline at end of file +} From c2264bed274f0d7109c97121c1dbfe8619c6d7d1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 13 Dec 2016 00:08:39 +0100 Subject: [PATCH 02/42] light: skeleton for sync service --- sync/src/light/mod.rs | 111 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 sync/src/light/mod.rs diff --git a/sync/src/light/mod.rs b/sync/src/light/mod.rs new file mode 100644 index 000000000..5806dd6d2 --- /dev/null +++ b/sync/src/light/mod.rs @@ -0,0 +1,111 @@ +// Copyright 2015, 2016 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Light client synchronization. +//! +//! This will synchronize the header chain using LES messages. +//! Dataflow is largely one-directional as headers are pushed into +//! the light client queue for import. Where possible, they are batched +//! in groups. +//! +//! This is written assuming that the client and sync service are running +//! in the same binary; unlike a full node + +use std::collections::{BinaryHeap, HashMap}; +use std::sync::Arc; + +use light::Client; +use light::net::{Handler, EventContext, Capabilities}; +use light::request; +use network: PeerId; +use util::{U256, H256}; + +struct Peer { + head_td: U256, + head_hash: H256, + head_num: u64, +} + +// The block downloader. +// This is instantiated with a starting and a target block +// and produces a priority queue of requests for headers which should be +// fulfilled. +struct Downloader { + start: u64, + target: (H256, u64), + requests: BinaryHeap, +} + +impl Downloader { + // create a new downloader. + fn new(start: u64, target: (H256, u64)) -> Self { + Downloader { + start: start, + target: target, + requests: BinaryHeap::new(), + } + } +} + +/// Light client synchronization manager. See module docs for more details. +pub struct LightSync { + best_seen: Mutex>, // best seen block on the network. + peers: RwLock>, // peers which are relevant to synchronization. + client: Arc, + downloader: Downloader, + assigned_requests: HashMap, +} + +impl Handler for LightSync { + fn on_connect(&self, ctx: &EventContext, status: &Status, capabilities: &Capabilities) { + if !capabilities.serve_headers { + trace!(target: "sync", "Ignoring irrelevant peer: {}", ctx.peer()); + return; + } + + { + let mut best = self.best_seen.lock(); + if best_seen.as_ref().map_or(true, |ref best| status.head_td > best.1) { + *best_seen = Some(status.head_hash, status.head_td) + } + } + + self.peers.write().insert(ctx.peer(), Peer { + head_td: status.head_td, + head_hash: status.head_hash, + head_num: status.head_num, + }); + } +} + +impl LightSync { + fn assign_request(&self, p-eer: PeerId); +} + +// public API +impl LightSync { + /// Create a new instance of `LightSync`. + /// + /// This won't do anything until registered as a handler + /// so it can receive + pub fn new(client: Arc) -> Self { + LightSync { + best_seen: Mutex::new(None), + peers: HashMap::new(), + client: client, + } + } +} From 45ef986c0474b3ce16f990e7340caa1df4c34a00 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 13 Dec 2016 14:48:03 +0100 Subject: [PATCH 03/42] light: finish basic header chain and add tests --- Cargo.lock | 7 + ethcore/light/Cargo.toml | 3 +- ethcore/light/src/client/header_chain.rs | 197 ++++++++++++++++++++--- ethcore/light/src/client/mod.rs | 66 +++++--- ethcore/light/src/lib.rs | 3 +- ethcore/light/src/net/context.rs | 10 +- ethcore/light/src/net/mod.rs | 62 +++---- ethcore/light/src/net/status.rs | 2 +- 8 files changed, 270 insertions(+), 80 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eb86cd47b..d6b90f602 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -469,6 +469,7 @@ dependencies = [ "ethcore-util 1.5.0", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", + "smallvec 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1681,6 +1682,11 @@ name = "smallvec" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "smallvec" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "solicit" version = "0.4.4" @@ -2152,6 +2158,7 @@ dependencies = [ "checksum slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6dbdd334bd28d328dad1c41b0ea662517883d8880d8533895ef96c8003dec9c4" "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" "checksum smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "fcc8d19212aacecf95e4a7a2179b26f7aeb9732a915cf01f05b0d3e044865410" +"checksum smallvec 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3a3c84984c278afe61a46e19868e8b23e2ee3be5b3cc6dea6edad4893bc6c841" "checksum solicit 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "172382bac9424588d7840732b250faeeef88942e37b6e35317dce98cafdd75b2" "checksum spmc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "93bdab61c1a413e591c4d17388ffa859eaff2df27f1e13a5ec8b716700605adf" "checksum stable-heap 0.1.0 (git+https://github.com/carllerche/stable-heap?rev=3c5cd1ca47)" = "" diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index 37d7034d2..67ae166dc 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -19,7 +19,8 @@ ethcore-io = { path = "../../util/io" } ethcore-ipc = { path = "../../ipc/rpc", optional = true } rlp = { path = "../../util/rlp" } time = "0.1" +smallvec = "0.3.1" [features] default = [] -ipc = ["ethcore-ipc", "ethcore-ipc-codegen"] \ No newline at end of file +ipc = ["ethcore-ipc", "ethcore-ipc-codegen"] diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 11e7ea79e..299df04f3 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -23,28 +23,35 @@ //! This is separate from the `BlockChain` for two reasons: //! - It stores only headers (and a pruned subset of them) //! - To allow for flexibility in the database layout once that's incorporated. -// TODO: use DB instead of memory. +// TODO: use DB instead of memory. DB Layout: just the contents of `candidates`/`headers` +// use std::collections::{BTreeMap, HashMap}; -use ethcore::header::Header; use ethcore::error::BlockError; use ethcore::ids::BlockId; use ethcore::views::HeaderView; use util::{Bytes, H256, U256, Mutex, RwLock}; -/// Delay this many blocks before producing a CHT. +use smallvec::SmallVec; + +/// Delay this many blocks before producing a CHT. required to be at +/// least 1 but should be more in order to be resilient against reorgs. const CHT_DELAY: u64 = 2048; /// Generate CHT roots of this size. -// TODO: move into more generic module. +// TODO: move CHT definition/creation into more generic module. const CHT_SIZE: u64 = 2048; +/// Information about a block. #[derive(Debug, Clone)] -struct BestBlock { - hash: H256, - number: u64, - total_difficulty: U256, +pub struct BlockDescriptor { + /// The block's hash + pub hash: H256, + /// The block's number + pub number: u64, + /// The block's total difficulty. + pub total_difficulty: U256, } // candidate block description. @@ -55,7 +62,7 @@ struct Candidate { } struct Entry { - candidates: Vec, + candidates: SmallVec<[Candidate; 3]>, // 3 arbitrarily chosen canonical_hash: H256, } @@ -64,7 +71,7 @@ pub struct HeaderChain { genesis_header: Bytes, // special-case the genesis. candidates: RwLock>, headers: RwLock>, - best_block: RwLock, + best_block: RwLock, cht_roots: Mutex>, } @@ -75,7 +82,7 @@ impl HeaderChain { HeaderChain { genesis_header: genesis.to_owned(), - best_block: RwLock::new(BestBlock { + best_block: RwLock::new(BlockDescriptor { hash: g_view.hash(), number: 0, total_difficulty: g_view.difficulty(), @@ -114,7 +121,7 @@ impl HeaderChain { // insert headers and candidates entries. let mut candidates = self.candidates.write(); - candidates.entry(number).or_insert_with(|| Entry { candidates: Vec::new(), canonical_hash: hash}) + candidates.entry(number).or_insert_with(|| Entry { candidates: SmallVec::new(), canonical_hash: hash}) .candidates.push(Candidate { hash: hash, parent_hash: parent_hash, @@ -137,7 +144,7 @@ impl HeaderChain { canon_hash = canon.parent_hash; } - *self.best_block.write() = BestBlock { + *self.best_block.write() = BlockDescriptor { hash: hash, number: number, total_difficulty: total_difficulty, @@ -145,13 +152,24 @@ impl HeaderChain { // produce next CHT root if it's time. let earliest_era = *candidates.keys().next().expect("at least one era just created; qed"); - if earliest_era + CHT_DELAY + CHT_SIZE < number { - let values: Vec<_> = (0..CHT_SIZE).map(|x| x + earliest_era) - .map(|x| candidates.remove(&x).map(|entry| (x, entry))) - .map(|x| x.expect("all eras stored are sequential with no gaps; qed")) - .map(|(x, entry)| (::rlp::encode(&x), ::rlp::encode(&entry.canonical_hash))) - .map(|(k, v)| (k.to_vec(), v.to_vec())) - .collect(); + if earliest_era + CHT_DELAY + CHT_SIZE <= number { + let mut values = Vec::with_capacity(CHT_SIZE as usize); + { + let mut headers = self.headers.write(); + for i in (0..CHT_SIZE).map(|x| x + earliest_era) { + let era_entry = candidates.remove(&i) + .expect("all eras are sequential with no gaps; qed"); + + for ancient in &era_entry.candidates { + headers.remove(&ancient.hash); + } + + values.push(( + ::rlp::encode(&i).to_vec(), + ::rlp::encode(&era_entry.canonical_hash).to_vec(), + )); + } + } let cht_root = ::util::triehash::trie_root(values); debug!(target: "chain", "Produced CHT {} root: {:?}", (earliest_era - 1) % CHT_SIZE, cht_root); @@ -165,7 +183,7 @@ impl HeaderChain { /// Get a block header. In the case of query by number, only canonical blocks /// will be returned. - pub fn block_header(&self, id: BlockId) -> Option { + pub fn get_header(&self, id: BlockId) -> Option { match id { BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_header.clone()), BlockId::Hash(hash) => self.headers.read().get(&hash).map(|x| x.to_vec()), @@ -182,4 +200,141 @@ impl HeaderChain { } } + /// Get the nth CHT root, if it's been computed. + /// + /// CHT root 0 is from block `1..2048`. + /// CHT root 1 is from block `2049..4096` + /// and so on. + /// + /// This is because it's assumed that the genesis hash is known, + /// so including it within a CHT would be redundant. + pub fn cht_root(&self, n: usize) -> Option { + self.cht_roots.lock().get(n).map(|h| h.clone()) + } + + /// Get the genesis hash. + pub fn genesis_hash(&self) -> H256 { + use util::Hashable; + + self.genesis_header.sha3() + } + + /// Get the best block's data. + pub fn best_block(&self) -> BlockDescriptor { + self.best_block.read().clone() + } + + /// If there is a gap between the genesis and the rest + /// of the stored blocks, return the first post-gap block. + pub fn first_block(&self) -> Option { + let candidates = self.candidates.read(); + match candidates.iter().next() { + None | Some((&1, _)) => None, + Some((&height, entry)) => Some(BlockDescriptor { + number: height, + hash: entry.canonical_hash, + total_difficulty: entry.candidates.iter().find(|x| x.hash == entry.canonical_hash) + .expect("entry always stores canonical candidate; qed").total_difficulty, + }) + } + } +} + +#[cfg(test)] +mod tests { + use super::HeaderChain; + use ethcore::ids::BlockId; + use ethcore::header::Header; + use ethcore::spec::Spec; + + #[test] + fn it_works() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + + let chain = HeaderChain::new(&::rlp::encode(&genesis_header)); + + let mut parent_hash = genesis_header.hash(); + let mut rolling_timestamp = genesis_header.timestamp(); + for i in 1..10000 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i.into()); + + chain.insert(::rlp::encode(&header).to_vec()); + + parent_hash = header.hash(); + rolling_timestamp += 10; + } + + assert!(chain.get_header(BlockId::Number(10)).is_none()); + assert!(chain.get_header(BlockId::Number(9000)).is_some()); + assert!(chain.cht_root(2).is_some()); + assert!(chain.cht_root(3).is_none()); + } + + #[test] + fn reorganize() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + + let chain = HeaderChain::new(&::rlp::encode(&genesis_header)); + + let mut parent_hash = genesis_header.hash(); + let mut rolling_timestamp = genesis_header.timestamp(); + for i in 1..6 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i.into()); + + chain.insert(::rlp::encode(&header).to_vec()).unwrap(); + + parent_hash = header.hash(); + rolling_timestamp += 10; + } + + { + let mut rolling_timestamp = rolling_timestamp; + let mut parent_hash = parent_hash; + for i in 6..16 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i.into()); + + chain.insert(::rlp::encode(&header).to_vec()).unwrap(); + + parent_hash = header.hash(); + rolling_timestamp += 10; + } + } + + assert_eq!(chain.best_block().number, 15); + + { + let mut rolling_timestamp = rolling_timestamp; + let mut parent_hash = parent_hash; + + // import a shorter chain which has better TD. + for i in 6..13 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * (i * i).into()); + + chain.insert(::rlp::encode(&header).to_vec()).unwrap(); + + parent_hash = header.hash(); + rolling_timestamp += 11; + } + } + + assert_eq!(chain.best_block().number, 12); + } } diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 206e7652a..e56fc115b 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -20,41 +20,53 @@ use std::sync::Arc; use ethcore::engines::Engine; use ethcore::ids::BlockId; -use ethcore::service::ClientIoMessage; use ethcore::block_import_error::BlockImportError; use ethcore::block_status::BlockStatus; -use ethcore::verification::queue::{HeaderQueue, QueueInfo}; +use ethcore::verification::queue::{HeaderQueue, QueueInfo, Config as QueueConfig}; use ethcore::transaction::SignedTransaction; use ethcore::blockchain_info::BlockChainInfo; - +use ethcore::spec::Spec; +use ethcore::service::ClientIoMessage; use io::IoChannel; + use util::hash::{H256, H256FastMap}; use util::{Bytes, Mutex}; use provider::Provider; use request; +use self::header_chain::HeaderChain; + mod header_chain; +/// Configuration for the light client. +#[derive(Debug, Default, Clone)] +pub struct Config { + queue: QueueConfig, +} + /// Light client implementation. pub struct Client { - _engine: Arc, - header_queue: HeaderQueue, - _message_channel: Mutex>, + queue: HeaderQueue, + chain: HeaderChain, tx_pool: Mutex>, } impl Client { + /// Create a new `Client`. + pub fn new(config: Config, spec: &Spec, io_channel: IoChannel) -> Self { + Client { + queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, true), + chain: HeaderChain::new(&::rlp::encode(&spec.genesis_header())), + tx_pool: Mutex::new(Default::default()), + } + } + /// Import a header as rlp-encoded bytes. pub fn import_header(&self, bytes: Bytes) -> Result { let header = ::rlp::decode(&bytes); - self.header_queue.import(header).map_err(Into::into) - } - - /// Whether the block is already known (but not necessarily part of the canonical chain) - pub fn is_known(&self, _id: BlockId) -> bool { - false + self.queue.import(header).map_err(Into::into) } /// Import a local transaction. @@ -68,30 +80,34 @@ impl Client { } /// Inquire about the status of a given block (or header). - pub fn status(&self, _id: BlockId) -> BlockStatus { + pub fn status(&self, id: BlockId) -> BlockStatus { BlockStatus::Unknown } /// Get the header queue info. pub fn queue_info(&self) -> QueueInfo { - self.header_queue.queue_info() - } - - /// Best block number. - pub fn best_block_number(&self) -> u64 { - 0 - } - - /// Best block hash. - pub fn best_block_hash(&self) -> u64 { - unimplemented!() + self.queue.queue_info() } } // dummy implementation -- may draw from canonical cache further on. impl Provider for Client { fn chain_info(&self) -> BlockChainInfo { - unimplemented!() + let best_block = self.chain.best_block(); + let first_block = self.chain.first_block(); + let genesis_hash = self.chain.genesis_hash(); + + BlockChainInfo { + total_difficulty: best_block.total_difficulty, + pending_total_difficulty: best_block.total_difficulty, + genesis_hash: genesis_hash, + best_block_hash: best_block.hash, + best_block_number: best_block.number, + ancient_block_hash: if first_block.is_some() { Some(genesis_hash) } else { None }, + ancient_block_number: if first_block.is_some() { Some(0) } else { None }, + first_block_hash: first_block.as_ref().map(|first| first.hash), + first_block_number: first_block.as_ref().map(|first| first.number), + } } fn reorg_depth(&self, _a: &H256, _b: &H256) -> Option { diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index d59066b82..ddd012075 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -60,7 +60,8 @@ extern crate ethcore_util as util; extern crate ethcore_network as network; extern crate ethcore_io as io; extern crate rlp; +extern crate smallvec; extern crate time; #[cfg(feature = "ipc")] -extern crate ethcore_ipc as ipc; \ No newline at end of file +extern crate ethcore_ipc as ipc; diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index d9910f958..a7676cdf0 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -20,7 +20,7 @@ use network::{NetworkContext, PeerId, NodeId}; use super::{Announcement, LightProtocol, ReqId}; use super::error::Error; -use request::Request; +use request::{self, Request}; /// An I/O context which allows sending and receiving packets as well as /// disconnecting peers. This is used as a generalization of the portions @@ -93,6 +93,10 @@ pub trait EventContext { // TODO: maybe just put this on a timer in LightProtocol? fn make_announcement(&self, announcement: Announcement); + /// Find the maximum number of requests of a specific type which can be made from + /// supplied peer. + fn max_requests(&self, peer: PeerId, kind: request::Kind) -> Option; + /// Disconnect a peer. fn disconnect_peer(&self, peer: PeerId); @@ -128,6 +132,10 @@ impl<'a> EventContext for Ctx<'a> { self.proto.make_announcement(self.io, announcement); } + fn max_requests(&self, peer: PeerId, kind: request::Kind) -> Option { + self.proto.max_requests(peer, kind) + } + fn disconnect_peer(&self, peer: PeerId) { self.io.disconnect_peer(peer); } diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 491e1d0ac..02f08a7a9 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -157,7 +157,7 @@ impl Peer { /// An LES event handler. /// -/// Each handler function takes a context which describes the relevant peer +/// Each handler function takes a context which describes the relevant peer /// and gives references to the IO layer and protocol structure so new messages /// can be dispatched immediately. /// @@ -185,10 +185,12 @@ pub trait Handler: Send + Sync { fn on_state_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[Vec]) { } /// Called when a peer responds with contract code. fn on_code(&self, _ctx: &EventContext, _req_id: ReqId, _codes: &[Bytes]) { } - /// Called when a peer responds with header proofs. Each proof is a block header coupled + /// Called when a peer responds with header proofs. Each proof is a block header coupled /// with a series of trie nodes is ascending order by distance from the root. fn on_header_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[(Bytes, Vec)]) { } - /// Called on abort. + /// Called on abort. This signals to handlers that they should clean up + /// and ignore peers. + // TODO: coreresponding `on_activate`? fn on_abort(&self) { } } @@ -215,9 +217,9 @@ pub struct Params { /// This is simply designed for request-response purposes. Higher level uses /// of the protocol, such as synchronization, will function as wrappers around /// this system. -// +// // LOCK ORDER: -// Locks must be acquired in the order declared, and when holding a read lock +// Locks must be acquired in the order declared, and when holding a read lock // on the peers, only one peer may be held at a time. pub struct LightProtocol { provider: Arc, @@ -252,7 +254,7 @@ impl LightProtocol { } } - /// Check the maximum amount of requests of a specific type + /// Check the maximum amount of requests of a specific type /// which a peer would be able to serve. pub fn max_requests(&self, peer: PeerId, kind: request::Kind) -> Option { self.peers.read().get(&peer).and_then(|peer| { @@ -267,11 +269,11 @@ impl LightProtocol { }) } - /// Make a request to a peer. + /// Make a request to a peer. /// /// Fails on: nonexistent peer, network error, peer not server, /// insufficient buffer. Does not check capabilities before sending. - /// On success, returns a request id which can later be coordinated + /// On success, returns a request id which can later be coordinated /// with an event. pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, request: Request) -> Result { let peers = self.peers.read(); @@ -325,10 +327,10 @@ impl LightProtocol { // TODO: "urgent" announcements like new blocks? // the timer approach will skip 1 (possibly 2) in rare occasions. - if peer_info.sent_head == announcement.head_hash || + if peer_info.sent_head == announcement.head_hash || peer_info.status.head_num >= announcement.head_num || now - peer_info.last_update < Duration::milliseconds(UPDATE_INTERVAL_MS) { - continue + continue } peer_info.last_update = now; @@ -357,7 +359,7 @@ impl LightProtocol { /// Add an event handler. /// Ownership will be transferred to the protocol structure, /// and the handler will be kept alive as long as it is. - /// These are intended to be added when the protocol structure + /// These are intended to be added when the protocol structure /// is initialized as a means of customizing its behavior. pub fn add_handler(&mut self, handler: Box) { self.handlers.push(handler); @@ -380,7 +382,7 @@ impl LightProtocol { pending_requests.clear(); } - // Does the common pre-verification of responses before the response itself + // Does the common pre-verification of responses before the response itself // is actually decoded: // - check whether peer exists // - check whether request was made @@ -406,7 +408,7 @@ impl LightProtocol { let mut peer_info = peer_info.lock(); match peer_info.remote_flow.as_mut() { Some(&mut (ref mut buf, ref mut flow)) => { - let actual_buffer = ::std::cmp::min(cur_buffer, *flow.limit()); + let actual_buffer = ::std::cmp::min(cur_buffer, *flow.limit()); buf.update_to(actual_buffer) } None => return Err(Error::NotServer), // this really should be impossible. @@ -488,17 +490,17 @@ impl LightProtocol { request::Kind::Receipts => timeout::RECEIPTS, request::Kind::StateProofs => timeout::PROOFS, request::Kind::Codes => timeout::CONTRACT_CODES, - request::Kind::HeaderProofs => timeout::HEADER_PROOFS, + request::Kind::HeaderProofs => timeout::HEADER_PROOFS, }; if r.timestamp + Duration::milliseconds(kind_timeout) <= now { - debug!(target: "les", "Request for {:?} from peer {} timed out", + debug!(target: "les", "Request for {:?} from peer {} timed out", r.request.kind(), r.peer_id); - + // keep the request in the `pending` set for now so // on_disconnect will pass unfulfilled ReqIds to handlers. // in the case that a response is received after this, the - // disconnect won't be cancelled but the ReqId won't be + // disconnect won't be cancelled but the ReqId won't be // marked as abandoned. io.disconnect_peer(r.peer_id); } @@ -519,7 +521,7 @@ impl LightProtocol { punish(*peer, io, Error::UnsupportedProtocolVersion(proto_version)); return; } - + let chain_info = self.provider.chain_info(); let status = Status { @@ -540,7 +542,7 @@ impl LightProtocol { last_update: SteadyTime::now(), }); - io.send(*peer, packet::STATUS, status_packet); + io.send(*peer, packet::STATUS, status_packet); } // called when a peer disconnects. @@ -569,7 +571,7 @@ impl LightProtocol { io: io, proto: self, }, &unfulfilled) - } + } } } @@ -608,7 +610,7 @@ impl LightProtocol { for handler in &self.handlers { handler.on_connect(&Ctx { peer: *peer, - io: io, + io: io, proto: self, }, &status, &capabilities) } @@ -662,7 +664,7 @@ impl LightProtocol { } // Handle a request for block headers. - fn get_block_headers(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { + fn get_block_headers(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { const MAX_HEADERS: usize = 512; let peers = self.peers.read(); @@ -914,7 +916,7 @@ impl LightProtocol { .map(|x| x.iter().map(|node| node.as_raw().to_owned()).collect()) .collect(); - for handler in &self.handlers { + for handler in &self.handlers { handler.on_state_proofs(&Ctx { peer: *peer, io: io, @@ -983,7 +985,7 @@ impl LightProtocol { let raw_code: Vec = try!(try!(raw.at(2)).iter().map(|x| x.as_val()).collect()); - for handler in &self.handlers { + for handler in &self.handlers { handler.on_code(&Ctx { peer: *peer, io: io, @@ -1055,11 +1057,11 @@ impl LightProtocol { try!(raw.at(1)).iter().map(|x| x.as_raw().to_owned()).collect(), )) } - + let req_id = try!(self.pre_verify_response(peer, request::Kind::HeaderProofs, &raw)); let raw_proofs: Vec<_> = try!(try!(raw.at(2)).iter().map(decode_res).collect()); - for handler in &self.handlers { + for handler in &self.handlers { handler.on_header_proofs(&Ctx { peer: *peer, io: io, @@ -1082,7 +1084,7 @@ impl LightProtocol { handler.on_transactions(&Ctx { peer: *peer, io: io, - proto: self, + proto: self, }, &txs); } @@ -1136,12 +1138,12 @@ fn encode_request(req: &Request, req_id: usize) -> Vec { Request::Headers(ref headers) => { let mut stream = RlpStream::new_list(2); stream.append(&req_id).begin_list(4); - + match headers.start { HashOrNumber::Hash(ref hash) => stream.append(hash), HashOrNumber::Number(ref num) => stream.append(num), }; - + stream .append(&headers.max) .append(&headers.skip) @@ -1214,4 +1216,4 @@ fn encode_request(req: &Request, req_id: usize) -> Vec { stream.out() } } -} \ No newline at end of file +} diff --git a/ethcore/light/src/net/status.rs b/ethcore/light/src/net/status.rs index 90b8640cd..194125aff 100644 --- a/ethcore/light/src/net/status.rs +++ b/ethcore/light/src/net/status.rs @@ -562,4 +562,4 @@ mod tests { assert_eq!(read_capabilities, capabilities); assert!(read_flow.is_none()); } -} \ No newline at end of file +} From 2a01b43bd1733a6a66be17b591c430de7e8c371c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 13 Dec 2016 20:13:16 +0100 Subject: [PATCH 04/42] light: block status and CHT module --- ethcore/light/src/client/cht.rs | 22 +++++++++++++ ethcore/light/src/client/header_chain.rs | 39 ++++++++++++++++-------- ethcore/light/src/lib.rs | 6 ++-- 3 files changed, 52 insertions(+), 15 deletions(-) create mode 100644 ethcore/light/src/client/cht.rs diff --git a/ethcore/light/src/client/cht.rs b/ethcore/light/src/client/cht.rs new file mode 100644 index 000000000..8424e0c31 --- /dev/null +++ b/ethcore/light/src/client/cht.rs @@ -0,0 +1,22 @@ +// Copyright 2015, 2016 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +//! Canonical hash trie definitions and helper functions. + +/// The size of each CHT. +pub const SIZE: u64 = 2048; + +/// Convert a block number to a CHT number. +pub fn block_to_cht_number(block_num: u64) -> u64 { + (block_num + 1) / SIZE +} diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 299df04f3..f350f19ca 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -28,6 +28,9 @@ use std::collections::{BTreeMap, HashMap}; +use client::cht; + +use ethcore::block_status::BlockStatus; use ethcore::error::BlockError; use ethcore::ids::BlockId; use ethcore::views::HeaderView; @@ -35,13 +38,10 @@ use util::{Bytes, H256, U256, Mutex, RwLock}; use smallvec::SmallVec; -/// Delay this many blocks before producing a CHT. required to be at -/// least 1 but should be more in order to be resilient against reorgs. -const CHT_DELAY: u64 = 2048; - -/// Generate CHT roots of this size. -// TODO: move CHT definition/creation into more generic module. -const CHT_SIZE: u64 = 2048; +/// Store at least this many candidate headers at all times. +/// Also functions as the delay for computing CHTs as they aren't +/// relevant to any blocks we've got in memory. +const HISTORY: u64 = 2048; /// Information about a block. #[derive(Debug, Clone)] @@ -94,6 +94,10 @@ impl HeaderChain { } /// Insert a pre-verified header. + /// + /// This blindly trusts that the data given to it is + /// a) valid RLP encoding of a header and + /// b) has sensible data contained within it. pub fn insert(&self, header: Bytes) -> Result<(), BlockError> { let view = HeaderView::new(&header); let hash = view.hash(); @@ -140,7 +144,8 @@ impl HeaderChain { let canon = entry.candidates.iter().find(|x| x.hash == canon_hash) .expect("blocks are only inserted if parent is present; or this is the block we just added; qed"); - // what about reorgs > CHT_SIZE + CHT_DELAY? + // what about reorgs > cht::SIZE + HISTORY? + // resetting to the last block of a given CHT should be possible. canon_hash = canon.parent_hash; } @@ -152,11 +157,11 @@ impl HeaderChain { // produce next CHT root if it's time. let earliest_era = *candidates.keys().next().expect("at least one era just created; qed"); - if earliest_era + CHT_DELAY + CHT_SIZE <= number { - let mut values = Vec::with_capacity(CHT_SIZE as usize); + if earliest_era + HISTORY + cht::SIZE <= number { + let mut values = Vec::with_capacity(cht::SIZE as usize); { let mut headers = self.headers.write(); - for i in (0..CHT_SIZE).map(|x| x + earliest_era) { + for i in (0..cht::SIZE).map(|x| x + earliest_era) { let era_entry = candidates.remove(&i) .expect("all eras are sequential with no gaps; qed"); @@ -172,7 +177,7 @@ impl HeaderChain { } let cht_root = ::util::triehash::trie_root(values); - debug!(target: "chain", "Produced CHT {} root: {:?}", (earliest_era - 1) % CHT_SIZE, cht_root); + debug!(target: "chain", "Produced CHT {} root: {:?}", (earliest_era - 1) % cht::SIZE, cht_root); self.cht_roots.lock().push(cht_root); } @@ -238,6 +243,14 @@ impl HeaderChain { }) } } + + /// Get block status. + pub fn status(&self, hash: &H256) -> BlockStatus { + match self.headers.read().contains_key(hash) { + true => BlockStatus::InChain, + false => BlockStatus::Unknown, + } + } } #[cfg(test)] @@ -248,7 +261,7 @@ mod tests { use ethcore::spec::Spec; #[test] - fn it_works() { + fn basic_chain() { let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index ddd012075..5cdc3addc 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -25,8 +25,10 @@ //! low-latency applications, but perfectly suitable for simple everyday //! use-cases like sending transactions from a personal account. //! -//! It starts by performing a header-only sync, verifying random samples -//! of members of the chain to varying degrees. +//! The light client performs a header-only sync, doing verification and pruning +//! historical blocks. Upon pruning, batches of 2048 blocks have a number => hash +//! mapping sealed into "canonical hash tries" which can later be used to verify +//! historical block queries from peers. #![deny(missing_docs)] From 484023b1716051ac6604bd16798cf40defd4f3d4 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 13 Dec 2016 20:13:55 +0100 Subject: [PATCH 05/42] light: max requests as 0 on unknown peer --- ethcore/light/src/client/mod.rs | 21 +++++++++++---------- ethcore/light/src/net/context.rs | 4 ++-- ethcore/light/src/net/mod.rs | 7 ++++--- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index e56fc115b..32725cc1a 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -16,13 +16,9 @@ //! Light client implementation. Stores data from light sync -use std::sync::Arc; - -use ethcore::engines::Engine; -use ethcore::ids::BlockId; use ethcore::block_import_error::BlockImportError; use ethcore::block_status::BlockStatus; -use ethcore::verification::queue::{HeaderQueue, QueueInfo, Config as QueueConfig}; +use ethcore::verification::queue::{self, HeaderQueue}; use ethcore::transaction::SignedTransaction; use ethcore::blockchain_info::BlockChainInfo; use ethcore::spec::Spec; @@ -37,12 +33,13 @@ use request; use self::header_chain::HeaderChain; +mod cht; mod header_chain; /// Configuration for the light client. #[derive(Debug, Default, Clone)] pub struct Config { - queue: QueueConfig, + queue: queue::Config, } /// Light client implementation. @@ -79,15 +76,19 @@ impl Client { self.tx_pool.lock().values().cloned().collect() } - /// Inquire about the status of a given block (or header). - pub fn status(&self, id: BlockId) -> BlockStatus { - BlockStatus::Unknown + /// Inquire about the status of a given header. + pub fn status(&self, hash: &H256) -> BlockStatus { + match self.queue.status(hash) { + queue::Status::Unknown => self.chain.status(hash), + other => other.into(), + } } /// Get the header queue info. - pub fn queue_info(&self) -> QueueInfo { + pub fn queue_info(&self) -> queue::QueueInfo { self.queue.queue_info() } + } // dummy implementation -- may draw from canonical cache further on. diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index a7676cdf0..7f55dd229 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -95,7 +95,7 @@ pub trait EventContext { /// Find the maximum number of requests of a specific type which can be made from /// supplied peer. - fn max_requests(&self, peer: PeerId, kind: request::Kind) -> Option; + fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize; /// Disconnect a peer. fn disconnect_peer(&self, peer: PeerId); @@ -132,7 +132,7 @@ impl<'a> EventContext for Ctx<'a> { self.proto.make_announcement(self.io, announcement); } - fn max_requests(&self, peer: PeerId, kind: request::Kind) -> Option { + fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { self.proto.max_requests(peer, kind) } diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 02f08a7a9..326c234ca 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -255,8 +255,9 @@ impl LightProtocol { } /// Check the maximum amount of requests of a specific type - /// which a peer would be able to serve. - pub fn max_requests(&self, peer: PeerId, kind: request::Kind) -> Option { + /// which a peer would be able to serve. Returns zero if the + /// peer is unknown or has no buffer flow parameters. + pub fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { self.peers.read().get(&peer).and_then(|peer| { let mut peer = peer.lock(); match peer.remote_flow.as_mut() { @@ -266,7 +267,7 @@ impl LightProtocol { } None => None, } - }) + }).unwrap_or(0) } /// Make a request to a peer. From 8c64400654d6657696ec9ea28510ceec21a140d9 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 13 Dec 2016 21:09:43 +0100 Subject: [PATCH 06/42] light: change sync module name --- sync/src/light/mod.rs | 111 ------------------- sync/src/light_sync/mod.rs | 213 +++++++++++++++++++++++++++++++++++++ 2 files changed, 213 insertions(+), 111 deletions(-) delete mode 100644 sync/src/light/mod.rs create mode 100644 sync/src/light_sync/mod.rs diff --git a/sync/src/light/mod.rs b/sync/src/light/mod.rs deleted file mode 100644 index 5806dd6d2..000000000 --- a/sync/src/light/mod.rs +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2015, 2016 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Light client synchronization. -//! -//! This will synchronize the header chain using LES messages. -//! Dataflow is largely one-directional as headers are pushed into -//! the light client queue for import. Where possible, they are batched -//! in groups. -//! -//! This is written assuming that the client and sync service are running -//! in the same binary; unlike a full node - -use std::collections::{BinaryHeap, HashMap}; -use std::sync::Arc; - -use light::Client; -use light::net::{Handler, EventContext, Capabilities}; -use light::request; -use network: PeerId; -use util::{U256, H256}; - -struct Peer { - head_td: U256, - head_hash: H256, - head_num: u64, -} - -// The block downloader. -// This is instantiated with a starting and a target block -// and produces a priority queue of requests for headers which should be -// fulfilled. -struct Downloader { - start: u64, - target: (H256, u64), - requests: BinaryHeap, -} - -impl Downloader { - // create a new downloader. - fn new(start: u64, target: (H256, u64)) -> Self { - Downloader { - start: start, - target: target, - requests: BinaryHeap::new(), - } - } -} - -/// Light client synchronization manager. See module docs for more details. -pub struct LightSync { - best_seen: Mutex>, // best seen block on the network. - peers: RwLock>, // peers which are relevant to synchronization. - client: Arc, - downloader: Downloader, - assigned_requests: HashMap, -} - -impl Handler for LightSync { - fn on_connect(&self, ctx: &EventContext, status: &Status, capabilities: &Capabilities) { - if !capabilities.serve_headers { - trace!(target: "sync", "Ignoring irrelevant peer: {}", ctx.peer()); - return; - } - - { - let mut best = self.best_seen.lock(); - if best_seen.as_ref().map_or(true, |ref best| status.head_td > best.1) { - *best_seen = Some(status.head_hash, status.head_td) - } - } - - self.peers.write().insert(ctx.peer(), Peer { - head_td: status.head_td, - head_hash: status.head_hash, - head_num: status.head_num, - }); - } -} - -impl LightSync { - fn assign_request(&self, p-eer: PeerId); -} - -// public API -impl LightSync { - /// Create a new instance of `LightSync`. - /// - /// This won't do anything until registered as a handler - /// so it can receive - pub fn new(client: Arc) -> Self { - LightSync { - best_seen: Mutex::new(None), - peers: HashMap::new(), - client: client, - } - } -} diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs new file mode 100644 index 000000000..d49b0dc17 --- /dev/null +++ b/sync/src/light_sync/mod.rs @@ -0,0 +1,213 @@ +// Copyright 2015, 2016 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Light client synchronization. +//! +//! This will synchronize the header chain using LES messages. +//! Dataflow is largely one-directional as headers are pushed into +//! the light client queue for import. Where possible, they are batched +//! in groups. +//! +//! This is written assuming that the client and sync service are running +//! in the same binary; unlike a full node + +use std::collections::{BinaryHeap, HashMap}; +use std::fmt; +use std::sync::Arc; + +use light::client::{Client, BlockDescriptor}; +use light::net::{Error as NetError, Handler, EventContext, Capabilities, ReqId}; +use light::request; +use network::PeerId; +use rlp::{UntrustedRlp, View}; +use util::{U256, H256}; + +// How many headers we request at a time when searching for best +// common ancestor with peer. +const UNCONFIRMED_SEARCH_SIZE: u64 = 128; + +#[derive(Debug)] +enum Error { + // Peer is useless for now. + UselessPeer, + // Peer returned a malformed response. + MalformedResponse, + // Peer returned known bad block. + BadBlock, + // Peer had a prehistoric common ancestor. + PrehistoricAncestor, + // Protocol-level error. + ProtocolLevel(NetError), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::UselessPeer => write!(f, "Peer is useless"), + Error::MalformedResponse => write!(f, "Response malformed"), + Error::BadBlock => write!(f, "Block known to be bad"), + Error::PrehistoricAncestor => write!(f, "Common ancestor is prehistoric"), + Error::ProtocolLevel(ref err) => write!(f, "Protocol level error: {}", err), + } + } +} + +/// Peer chain info. +#[derive(Clone)] +struct ChainInfo { + head_td: U256, + head_hash: H256, + head_num: u64, +} + +/// A peer we haven't found a common ancestor for yet. +struct UnconfirmedPeer { + chain_info: ChainInfo, + last_batched: u64, + req_id: ReqId, +} + +impl UnconfirmedPeer { + /// Create an unconfirmed peer. Returns `None` if we cannot make a + /// common ancestors request for some reason. The event context provided + /// should be associated with this peer. + fn create(ctx: &EventContext, chain_info: ChainInfo, best_num: u64) -> Result { + let this = ctx.peer(); + + if ctx.max_requests(this, request::Kind::Headers) < UNCONFIRMED_SEARCH_SIZE { + return Err(Error::UselessPeer); // a peer which allows this few header reqs isn't useful anyway. + } + + let req_id = try!(ctx.request_from(this, request::Request::Headers(request::Headers { + start: best_num.into(), + max: ::std::cmp::min(best_num, UNCONFIRMED_SEARCH_SIZE), + skip: 0, + reverse: true, + }))); + + Ok(UnconfirmedPeer { + chain_info: chain_info, + last_batched: best_num, + req_id: ReqId, + }) + } + + /// Feed in the result of the headers query. If an error occurs, the request + /// is malformed. If a common (hash, number) pair is returned then this is + /// the common ancestor. If not, then another request for headers has been + /// dispatched. + fn check_batch(&mut self, ctx: &EventContext, client: &Client, headers: &[Bytes]) -> Result, Error> { + use ethcore::block_status::BlockStatus; + + let mut cur_num = self.last_batched; + let chain_info = client.chain_info(); + for raw_header in headers { + let header: Header = try!(UntrustedRlp::new(&raw_header).as_val().map_err(|_| Error::MalformedResponse)); + if header.number() != cur_num { return Err(Error::MalformedResponse) } + + if chain_info.first_block_number.map_or(false, |f| header.number() < f) { + return Err(Error::PrehistoricAncestor); + } + + let hash = header.hash(); + + match client.status(&hash) { + BlockStatus::InChain => return Ok(Some(hash)), + BlockStatus::Bad => return Err(Error::BadBlock), + BlockStatus::Unknown | BlockStatus::Queued => {}, + } + + cur_num -= 1; + } + let this = ctx.peer(); + + if cur_num == 0 { + trace!(target: "sync", "Peer {}: genesis as common ancestor", this); + return Ok(Some(chain_info.genesis_hash)); + } + + // nothing found, nothing prehistoric. + // send the next request. + let req_id = try!(ctx.request_from(this, request::Request::Headers(request::Headers { + start: cur_num, + max: ::std::cmp::min(cur_num, UNCONFIRMED_SEARCH_SIZE), + skip: 0, + reverse: true, + }))); + + self.req_id = req_id; + } +} + +/// Connected peers as state machines. +/// +/// On connection, we'll search for a common ancestor to their chain. +/// Once that's found, we can sync to this peer. +enum Peer { + // Searching for a common ancestor. + SearchCommon(Mutex), + // A peer we can sync to. + SyncTo(ChainInfo), +} + +/// Light client synchronization manager. See module docs for more details. +pub struct LightSync { + best_seen: Mutex>, // best seen block on the network. + peers: RwLock>, // peers which are relevant to synchronization. + client: Arc, + downloader: Downloader, + assigned_requests: HashMap, +} + +impl Handler for LightSync { + fn on_connect(&self, ctx: &EventContext, status: &Status, capabilities: &Capabilities) { + if !capabilities.serve_headers || status.head_num <= self.client.best_block().number { + trace!(target: "sync", "Ignoring irrelevant peer: {}", ctx.peer()); + return; + } + + let chain_info = ChainInfo { + head_td: status.head_td, + head_hash: status.head_hash, + head_num: status.head_num, + }; + let our_best = self.client.chain_info().best_block_number; + let unconfirmed = match UnconfirmedPeer::create(ctx, chain_info, our_best) { + Ok(unconfirmed) => unconfirmed, + Err(e) => { + trace!(target: "sync", "Failed to create unconfirmed peer: {}", e); + return; + } + }; + + self.peers.write().insert(ctx.peer(), Mutex::new(unconfirmed)); + } +} + +// public API +impl LightSync { + /// Create a new instance of `LightSync`. + /// + /// This won't do anything until registered as a handler + /// so it can receive + pub fn new(client: Arc) -> Self { + LightSync { + best_seen: Mutex::new(None), + peers: HashMap::new(), + client: client, + } + } +} From 6fb71527e48605325070f7df2d1a6640ce98eebb Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 13 Dec 2016 21:09:57 +0100 Subject: [PATCH 07/42] light: search for common ancestor with peers --- ethcore/light/src/client/header_chain.rs | 2 +- ethcore/light/src/client/mod.rs | 37 +++++++++++++++++------- ethcore/light/src/net/mod.rs | 3 +- sync/src/lib.rs | 1 + sync/src/light_sync/mod.rs | 37 +++++++++++++++--------- 5 files changed, 53 insertions(+), 27 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index f350f19ca..847b0251f 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -276,7 +276,7 @@ mod tests { header.set_timestamp(rolling_timestamp); header.set_difficulty(*genesis_header.difficulty() * i.into()); - chain.insert(::rlp::encode(&header).to_vec()); + chain.insert(::rlp::encode(&header).to_vec()).unwrap(); parent_hash = header.hash(); rolling_timestamp += 10; diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 32725cc1a..1ddcb0515 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -18,6 +18,7 @@ use ethcore::block_import_error::BlockImportError; use ethcore::block_status::BlockStatus; +use ethcore::ids::BlockId; use ethcore::verification::queue::{self, HeaderQueue}; use ethcore::transaction::SignedTransaction; use ethcore::blockchain_info::BlockChainInfo; @@ -33,7 +34,7 @@ use request; use self::header_chain::HeaderChain; -mod cht; +pub mod cht; mod header_chain; /// Configuration for the light client. @@ -84,16 +85,8 @@ impl Client { } } - /// Get the header queue info. - pub fn queue_info(&self) -> queue::QueueInfo { - self.queue.queue_info() - } - -} - -// dummy implementation -- may draw from canonical cache further on. -impl Provider for Client { - fn chain_info(&self) -> BlockChainInfo { + /// Get the chain info. + pub fn chain_info(&self) -> BlockChainInfo { let best_block = self.chain.best_block(); let first_block = self.chain.first_block(); let genesis_hash = self.chain.genesis_hash(); @@ -111,6 +104,28 @@ impl Provider for Client { } } + /// Get the header queue info. + pub fn queue_info(&self) -> queue::QueueInfo { + self.queue.queue_info() + } + + /// Get a block header by Id. + pub fn get_header(&self, id: BlockId) -> Option { + self.chain.get_header(id) + } + + /// Get the `i`th CHT root. + pub fn cht_root(&self, i: usize) -> Option { + self.chain.cht_root(i) + } +} + +// dummy implementation -- may draw from canonical cache further on. +impl Provider for Client { + fn chain_info(&self) -> BlockChainInfo { + Client::chain_info(self) + } + fn reorg_depth(&self, _a: &H256, _b: &H256) -> Option { None } diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 326c234ca..6da964f99 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -38,7 +38,7 @@ use request::{self, HashOrNumber, Request}; use self::buffer_flow::{Buffer, FlowParams}; use self::context::Ctx; -use self::error::{Error, Punishment}; +use self::error::Punishment; mod buffer_flow; mod context; @@ -48,6 +48,7 @@ mod status; #[cfg(test)] mod tests; +pub use self::error::Error; pub use self::context::{EventContext, IoContext}; pub use self::status::{Status, Capabilities, Announcement}; diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 0ff3bfe66..fc9e5de74 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -54,6 +54,7 @@ extern crate ethcore_ipc as ipc; mod chain; mod blocks; mod block_sync; +mod light_sync; mod sync_io; mod snapshot; mod transactions_stats; diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index d49b0dc17..15e4d72d4 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -28,12 +28,14 @@ use std::collections::{BinaryHeap, HashMap}; use std::fmt; use std::sync::Arc; -use light::client::{Client, BlockDescriptor}; -use light::net::{Error as NetError, Handler, EventContext, Capabilities, ReqId}; +use ethcore::header::Header; + +use light::client::Client; +use light::net::{Announcement, Error as NetError, Handler, EventContext, Capabilities, ReqId, Status}; use light::request; use network::PeerId; use rlp::{UntrustedRlp, View}; -use util::{U256, H256}; +use util::{Bytes, U256, H256, Mutex, RwLock}; // How many headers we request at a time when searching for best // common ancestor with peer. @@ -53,6 +55,12 @@ enum Error { ProtocolLevel(NetError), } +impl From for Error { + fn from(net_error: NetError) -> Self { + Error::ProtocolLevel(net_error) + } +} + impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { @@ -87,13 +95,13 @@ impl UnconfirmedPeer { fn create(ctx: &EventContext, chain_info: ChainInfo, best_num: u64) -> Result { let this = ctx.peer(); - if ctx.max_requests(this, request::Kind::Headers) < UNCONFIRMED_SEARCH_SIZE { + if ctx.max_requests(this, request::Kind::Headers) < UNCONFIRMED_SEARCH_SIZE as usize { return Err(Error::UselessPeer); // a peer which allows this few header reqs isn't useful anyway. } let req_id = try!(ctx.request_from(this, request::Request::Headers(request::Headers { start: best_num.into(), - max: ::std::cmp::min(best_num, UNCONFIRMED_SEARCH_SIZE), + max: ::std::cmp::min(best_num, UNCONFIRMED_SEARCH_SIZE) as usize, skip: 0, reverse: true, }))); @@ -101,7 +109,7 @@ impl UnconfirmedPeer { Ok(UnconfirmedPeer { chain_info: chain_info, last_batched: best_num, - req_id: ReqId, + req_id: req_id, }) } @@ -142,13 +150,15 @@ impl UnconfirmedPeer { // nothing found, nothing prehistoric. // send the next request. let req_id = try!(ctx.request_from(this, request::Request::Headers(request::Headers { - start: cur_num, - max: ::std::cmp::min(cur_num, UNCONFIRMED_SEARCH_SIZE), + start: cur_num.into(), + max: ::std::cmp::min(cur_num, UNCONFIRMED_SEARCH_SIZE) as usize, skip: 0, reverse: true, }))); self.req_id = req_id; + + Ok(None) } } @@ -168,13 +178,13 @@ pub struct LightSync { best_seen: Mutex>, // best seen block on the network. peers: RwLock>, // peers which are relevant to synchronization. client: Arc, - downloader: Downloader, - assigned_requests: HashMap, } impl Handler for LightSync { fn on_connect(&self, ctx: &EventContext, status: &Status, capabilities: &Capabilities) { - if !capabilities.serve_headers || status.head_num <= self.client.best_block().number { + let our_best = self.client.chain_info().best_block_number; + + if !capabilities.serve_headers || status.head_num <= our_best { trace!(target: "sync", "Ignoring irrelevant peer: {}", ctx.peer()); return; } @@ -184,7 +194,6 @@ impl Handler for LightSync { head_hash: status.head_hash, head_num: status.head_num, }; - let our_best = self.client.chain_info().best_block_number; let unconfirmed = match UnconfirmedPeer::create(ctx, chain_info, our_best) { Ok(unconfirmed) => unconfirmed, Err(e) => { @@ -193,7 +202,7 @@ impl Handler for LightSync { } }; - self.peers.write().insert(ctx.peer(), Mutex::new(unconfirmed)); + self.peers.write().insert(ctx.peer(), Peer::SearchCommon(Mutex::new(unconfirmed))); } } @@ -206,7 +215,7 @@ impl LightSync { pub fn new(client: Arc) -> Self { LightSync { best_seen: Mutex::new(None), - peers: HashMap::new(), + peers: RwLock::new(HashMap::new()), client: client, } } From 359d433292815862165c4d1be63b9be574b9f184 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 13 Dec 2016 22:26:06 +0100 Subject: [PATCH 08/42] light: pass incoming responses to peer state machine --- sync/src/light_sync/mod.rs | 80 +++++++++++++++++++++++++++++++++++--- 1 file changed, 75 insertions(+), 5 deletions(-) diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 15e4d72d4..faae5830f 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -24,7 +24,7 @@ //! This is written assuming that the client and sync service are running //! in the same binary; unlike a full node -use std::collections::{BinaryHeap, HashMap}; +use std::collections::HashMap; use std::fmt; use std::sync::Arc; @@ -168,7 +168,7 @@ impl UnconfirmedPeer { /// Once that's found, we can sync to this peer. enum Peer { // Searching for a common ancestor. - SearchCommon(Mutex), + SearchCommon(UnconfirmedPeer), // A peer we can sync to. SyncTo(ChainInfo), } @@ -176,7 +176,7 @@ enum Peer { /// Light client synchronization manager. See module docs for more details. pub struct LightSync { best_seen: Mutex>, // best seen block on the network. - peers: RwLock>, // peers which are relevant to synchronization. + peers: RwLock>>, // peers which are relevant to synchronization. client: Arc, } @@ -194,6 +194,8 @@ impl Handler for LightSync { head_hash: status.head_hash, head_num: status.head_num, }; + + trace!(target: "sync", "Beginning search for common ancestor with peer {}", ctx.peer()); let unconfirmed = match UnconfirmedPeer::create(ctx, chain_info, our_best) { Ok(unconfirmed) => unconfirmed, Err(e) => { @@ -202,7 +204,75 @@ impl Handler for LightSync { } }; - self.peers.write().insert(ctx.peer(), Peer::SearchCommon(Mutex::new(unconfirmed))); + self.peers.write().insert(ctx.peer(), Mutex::new(Peer::SearchCommon(unconfirmed))); + } + + fn on_disconnect(&self, ctx: &EventContext, _unfulfilled: &[ReqId]) { + let peer = ctx.peer(); + + match self.peers.write().remove(&peer).map(|peer_data| peer_data.into_inner()) { + None => {} + Some(Peer::SearchCommon(_)) => { + // unfulfilled requests are unimportant since they are only + // relevant to searching for a common ancestor. + trace!(target: "sync", "Unconfirmed peer {} disconnect", ctx.peer()); + } + Some(Peer::SyncTo(_)) => { + trace!(target: "sync", "") + // in this case we may want to reasssign all unfulfilled requests. + // (probably just by pushing them back into the current downloader's priority queue.) + } + } + } + + fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { + // restart search for common ancestor if necessary. + // restart download if necessary. + // if this is a peer we found irrelevant earlier, we may want to + // re-evaluate their usefulness. + if !self.peers.read().contains_key(&ctx.peer()) { return } + + trace!(target: "sync", "Announcement from peer {}: new chain head {:?}, reorg depth {}", + ctx.peer(), (announcement.head_hash, announcement.head_num), announcement.reorg_depth); + + } + + fn on_block_headers(&self, ctx: &EventContext, req_id: ReqId, headers: &[Bytes]) { + let peer = ctx.peer(); + match self.peers.read().get(&peer) { + None => {}, + Some(peer_data) => { + let mut peer_data = peer_data.lock(); + let new_peer = match *peer_data { + Peer::SearchCommon(ref mut unconfirmed) => { + if unconfirmed.req_id != req_id { + trace!(target: "sync", "Ignoring irrelevant response from peer {}", peer); + return; + } + match unconfirmed.check_batch(ctx, &self.client, headers) { + Ok(None) => { + trace!(target: "sync", "Continuing to search for common ancestor with peer {}", peer); + return; + } + Ok(Some(common)) => { + trace!(target: "sync", "Found common ancestor {} with peer {}", peer, common); + let chain_info = unconfirmed.chain_info.clone(); + Peer::SyncTo(chain_info) + } + Err(e) => { + trace!(target: "sync", "Failed to find common ancestor with peer {}: {}", peer, e); + return; + } + } + } + Peer::SyncTo(_) => { + trace!(target: "sync", "Incoming response from peer being synced to."); + }, + }; + + *peer_data = new_peer; + } + } } } @@ -211,7 +281,7 @@ impl LightSync { /// Create a new instance of `LightSync`. /// /// This won't do anything until registered as a handler - /// so it can receive + /// so it can act on events. pub fn new(client: Arc) -> Self { LightSync { best_seen: Mutex::new(None), From 887bcfb88aa71d3f23e819297ad3dd20398382df Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 14 Dec 2016 19:36:48 +0100 Subject: [PATCH 09/42] light: response decoding and verification module --- sync/src/light_sync/response.rs | 152 ++++++++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) create mode 100644 sync/src/light_sync/response.rs diff --git a/sync/src/light_sync/response.rs b/sync/src/light_sync/response.rs new file mode 100644 index 000000000..2d64bb36b --- /dev/null +++ b/sync/src/light_sync/response.rs @@ -0,0 +1,152 @@ +// Copyright 2015, 2016 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Helpers for decoding and verifying responses for headers. + +use std::fmt; + +use ethcore::header::Header; +use light::request::{HashOrNumber, Headers as HeadersRequest}; +use rlp::{DecoderError, UntrustedRlp, View}; +use util::H256; + +/// Errors found when decoding headers and verifying with basic constraints. +#[derive(Debug, Clone)] +pub enum BasicError { + /// Wrong skip value: expected, found (if any). + WrongSkip(u64, Option), + /// Wrong start number. + WrongStartNumber(u64, u64), + /// Wrong start hash. + WrongStartHash(H256, H256), + /// Too many headers. + TooManyHeaders(usize, usize), + /// Decoder error. + Decoder(DecoderError), +} + +impl From for BasicError { + fn from(err: DecoderError) -> Self { + BasicError::Decoder(err) + } +} + +impl fmt::Display for BasicError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Header response verification error: "); + + match *self { + BasicError::WrongSkip(ref exp, ref got) + => write!(f, "wrong skip (expected {}, got {:?})", exp, got), + BasicError::WrongStartNumber(ref exp, ref got) + => write!(f, "wrong start number (expected {}, got {})", exp, got), + BasicError::WrongStartHash(ref exp, ref got) + => write!(f, "wrong start hash (expected {}, got {})", exp, got), + BasicError::TooManyHeaders(ref max, ref got) + => write!(f, "too many headers (max {}, got {})", max, got), + BasicError::Decoder(ref err) + => write!(f, "invalid encoding ({})", err), + } + } +} + +/// Request verification constraint. +pub trait Constraint { + type Error; + + /// Verify headers against this. + fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), Self::Error>; +} + +/// Decode a response and do basic verification against a request. +pub fn decode_and_verify(headers: &[Bytes], request: &HeadersRequest) -> Result, BasicError> { + let headers: Vec<_> = try!!(headers.iter().map(|x| UntrustedRlp::new(&x).as_val()).collect()); + + let reverse = request.reverse; + + try!(Max(request.max).verify(&headers, reverse)); + match request.start { + HashOrNumber::Number(ref num) => try!(StartsAtNumber(*num).verify(&headers, reverse)), + HashOrNumber::Hash(ref hash) => try!(StartsAtHash(*hash).verify(&headers, reverse)), + } + + try!(SkipsBetween(request.skip).verify(&headers, reverse)); +} + +struct StartsAtNumber(u64); +struct StartsAtHash(H256); +struct SkipsBetween(u64); +struct Max(usize); + +impl Constraint for StartsAtNumber { + type Error = BasicError; + + fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), BasicError> { + let earliest = if reverse { headers.last() } else { headers.first() }; + + earliest.map_or(Ok(()), |h| { + if h.number() == self.0 { + Ok(()) + } else { + Err(BasicError::WrongStartNumber(self.0, h.number())) + } + }) + } +} + +impl Constraint for StartsAtHash { + type Error = BasicError; + + fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), BasicError> { + let earliest = if reverse { headers.last() } else { headers.first() }; + + earliest.map_or(Ok(()), |h| { + if h.hash() == self.0 { + Ok(()) + } else { + Err(BasicError::WrongStartHash(self.0, h.hash())) + } + }) + } +} + +impl Constraint for SkipsBetween { + type Error = BasicError; + + fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), BasicError> { + for pair in headers.windows(2) { + let (low, high) = if reverse { (&pair[1], &pair[0]) } else { (&pair[0], &pair[1]) }; + if low.number() >= high.number() { return Err(BasicError::WrongSkip(self.0, None)) } + + let skip = (high.number() - low.number()) - 1; + if skip != self.0 { return Err(BasicError::WrongSkip(self.0, Some(skip))) } + } + + Ok(()) + } +} + +impl Constraint for Max { + type Error = BasicError; + + fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> { + match headers.len() > self.0 { + true => Err(BasicError::TooManyHeaders(self.0, headers.len())), + false => Ok(()) + } + } +} + From 0d466fa8d00da2c9686961282a0e4813ec77b6f2 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 14 Dec 2016 20:27:03 +0100 Subject: [PATCH 10/42] light: tests for response module --- sync/src/light_sync/response.rs | 116 ++++++++++++++++++++++++++++++-- 1 file changed, 111 insertions(+), 5 deletions(-) diff --git a/sync/src/light_sync/response.rs b/sync/src/light_sync/response.rs index 2d64bb36b..131f7e4e2 100644 --- a/sync/src/light_sync/response.rs +++ b/sync/src/light_sync/response.rs @@ -21,10 +21,10 @@ use std::fmt; use ethcore::header::Header; use light::request::{HashOrNumber, Headers as HeadersRequest}; use rlp::{DecoderError, UntrustedRlp, View}; -use util::H256; +use util::{Bytes, H256}; /// Errors found when decoding headers and verifying with basic constraints. -#[derive(Debug, Clone)] +#[derive(Debug, PartialEq)] pub enum BasicError { /// Wrong skip value: expected, found (if any). WrongSkip(u64, Option), @@ -46,7 +46,7 @@ impl From for BasicError { impl fmt::Display for BasicError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Header response verification error: "); + try!(write!(f, "Header response verification error: ")); match *self { BasicError::WrongSkip(ref exp, ref got) @@ -58,7 +58,7 @@ impl fmt::Display for BasicError { BasicError::TooManyHeaders(ref max, ref got) => write!(f, "too many headers (max {}, got {})", max, got), BasicError::Decoder(ref err) - => write!(f, "invalid encoding ({})", err), + => write!(f, "{}", err), } } } @@ -73,7 +73,7 @@ pub trait Constraint { /// Decode a response and do basic verification against a request. pub fn decode_and_verify(headers: &[Bytes], request: &HeadersRequest) -> Result, BasicError> { - let headers: Vec<_> = try!!(headers.iter().map(|x| UntrustedRlp::new(&x).as_val()).collect()); + let headers: Vec<_> = try!(headers.iter().map(|x| UntrustedRlp::new(&x).as_val()).collect()); let reverse = request.reverse; @@ -84,6 +84,8 @@ pub fn decode_and_verify(headers: &[Bytes], request: &HeadersRequest) -> Result< } try!(SkipsBetween(request.skip).verify(&headers, reverse)); + + Ok(headers) } struct StartsAtNumber(u64); @@ -150,3 +152,107 @@ impl Constraint for Max { } } +#[cfg(test)] +mod tests { + use ethcore::header::Header; + use light::request::Headers as HeadersRequest; + + use super::*; + + #[test] + fn sequential_forward() { + let request = HeadersRequest { + start: 10.into(), + max: 30, + skip: 0, + reverse: false, + }; + + let mut parent_hash = None; + let headers: Vec<_> = (0..25).map(|x| x + 10).map(|x| { + let mut header = Header::default(); + header.set_number(x); + + if let Some(parent_hash) = parent_hash { + header.set_parent_hash(parent_hash); + } + + parent_hash = Some(header.hash()); + + ::rlp::encode(&header).to_vec() + }).collect(); + + assert!(decode_and_verify(&headers, &request).is_ok()); + } + + #[test] + fn sequential_backward() { + let request = HeadersRequest { + start: 10.into(), + max: 30, + skip: 0, + reverse: true, + }; + + let mut parent_hash = None; + let headers: Vec<_> = (0..25).map(|x| x + 10).rev().map(|x| { + let mut header = Header::default(); + header.set_number(x); + + if let Some(parent_hash) = parent_hash { + header.set_parent_hash(parent_hash); + } + + parent_hash = Some(header.hash()); + + ::rlp::encode(&header).to_vec() + }).collect(); + + assert!(decode_and_verify(&headers, &request).is_ok()); + } + + #[test] + fn too_many() { + let request = HeadersRequest { + start: 10.into(), + max: 20, + skip: 0, + reverse: false, + }; + + let mut parent_hash = None; + let headers: Vec<_> = (0..25).map(|x| x + 10).map(|x| { + let mut header = Header::default(); + header.set_number(x); + + if let Some(parent_hash) = parent_hash { + header.set_parent_hash(parent_hash); + } + + parent_hash = Some(header.hash()); + + ::rlp::encode(&header).to_vec() + }).collect(); + + assert_eq!(decode_and_verify(&headers, &request), Err(BasicError::TooManyHeaders(20, 25))); + } + + #[test] + fn wrong_skip() { + let request = HeadersRequest { + start: 10.into(), + max: 30, + skip: 5, + reverse: false, + }; + + let headers: Vec<_> = (0..25).map(|x| x * 3).map(|x| x + 10).map(|x| { + let mut header = Header::default(); + header.set_number(x); + + ::rlp::encode(&header).to_vec() + }).collect(); + + assert_eq!(decode_and_verify(&headers, &request), Err(BasicError::WrongSkip(5, Some(2)))); + } +} From 91b8fa70399dabd071ec8607dfdcaa81f459b06c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 14 Dec 2016 22:57:30 +0100 Subject: [PATCH 11/42] light: downloader state machine stub --- sync/src/light_sync/downloader.rs | 173 ++++++++++++++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 sync/src/light_sync/downloader.rs diff --git a/sync/src/light_sync/downloader.rs b/sync/src/light_sync/downloader.rs new file mode 100644 index 000000000..dd78a3144 --- /dev/null +++ b/sync/src/light_sync/downloader.rs @@ -0,0 +1,173 @@ +// Copyright 2015, 2016 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Header download state machine. + +use std::collections::{HashMap, VecDeque}; +use std::mem; + +use ethcore::header::Header; + +use light::net::{EventContext, ReqId}; +use light::request::Headers as HeadersRequest; + +use network::PeerId; +use rlp::{UntrustedRlp, View}; +use util::{Bytes, H256, Mutex}; + +use super::{Error, Peer}; +use super::response::{self, Constraint}; + +// amount of blocks between each scaffold entry. +// TODO: move these into paraeters for `RoundStart::new`? +const ROUND_SKIP: usize = 255; + +// amount of scaffold frames: these are the blank spaces in "X___X___X" +const ROUND_FRAMES: u64 = 255; + +// number of attempts to make to get a full scaffold for a sync round. +const SCAFFOLD_ATTEMPTS: usize = 3; + +// A request for headers with a known starting header +// and a known parent hash for the last block. +struct Request { + headers: HeadersRequest, + end_parent: H256, +} + +pub struct Fetcher { + sparse: Vec
, // sparse header chain. + requests: VecDeque, + pending: HashMap, +} + +impl Fetcher { + // Produce a new fetcher given a sparse headerchain, in ascending order. + // The headers must be valid RLP at this point. + fn new(sparse_headers: Vec
) -> Self { + let mut requests = VecDeque::with_capacity(sparse_headers.len() - 1); + for pair in sparse_headers.windows(2) { + let low_rung = &pair[0]; + let high_rung = &pair[1]; + + let diff = high_rung.number() - low_rung.number(); + if diff < 2 { continue } // these headers are already adjacent. + + let needed_headers = HeadersRequest { + start: high_rung.parent_hash().clone().into(), + max: diff as usize - 1, + skip: 0, + reverse: true, + }; + + requests.push_back(Request { + headers: needed_headers, + end_parent: low_rung.hash(), + }); + } + + Fetcher { + sparse: sparse_headers, + requests: requests, + pending: HashMap::new(), + } + } + + fn process_response(self, req_id: ReqId, headers: &[Bytes]) -> (Downloader, Result<(), Error>) { + unimplemented!() + } +} + +// Round started: get stepped header chain. +// from a start block with number X we request 256 headers stepped by 256 from +// block X + 1. +struct RoundStart { + start_block: (u64, H256), + pending_req: Option<(ReqId, HeadersRequest)>, + sparse_headers: Vec
, + attempt: 0, +} + +impl RoundStart { + fn new(start: (u64, H256)) -> Self { + RoundStart { + start_block: start.clone(), + pending_req: None, + sparse_headers: Vec::new(), + } + } + + fn process_response(mut self, req_id: ReqId, headers: &[Bytes]) -> (Downloader, Result<(), Error>) { + let req = match self.pending_req.take() { + Some((id, req)) if req_id == id { req.clone() } + other => { + self.pending_req = other; + return (self, Ok(())) + } + }; + + self.attempt += 1; + let headers = match response::decode_and_verify(headers, &req) { + Ok(headers) => { + self.sparse_headers.extend(headers); + + if self.sparse_headers.len() == ROUND_FRAMES + 1 + || self.attempt >= SCAFFOLD_ATTEMPTS + { + let fetcher = Fetcher::new(self.sparse_headers); + (Downloader::Fetch(fetcher), Ok(())) + } + } + } + } +} + +/// Downloader state machine. +pub enum Downloader { + /// Waiting for peers. + Nothing, + /// Searching for common block with best chain. + SearchCommon, + /// Beginning a sync round. + RoundStart(RoundStart), + /// Fetching intermediate blocks during a sync round. + Fetch(Fetcher), +} + +impl Downloader { + // Process an answer to a request. Unknown requests will be ignored. + fn process_response(self, req_id: ReqId, headers: &[Bytes]) -> (Self, Result<(), Error>) { + match self { + Downloader::RoundStart(round_start) => round_start.process_response(req_id, headers), + Downloader::Fetch(fetcher) => fetcher.process_response(req_id, headers), + other => (other, Ok(())), + } + } + + // Return unfulfilled requests from disconnected peer. Unknown requests will be ignored. + fn requests_abandoned(self, abandoned: &[ReqId]) -> (Self, Result<(), Error>) { + + } + + // Dispatch pending requests. The dispatcher provided will attempt to + // find a suitable peer to serve the request. + // TODO: have dispatcher take capabilities argument? + fn dispatch_requests(self, dispatcher: D) -> (Self, Result<(), Error>) + where D: Fn(HeadersRequest) -> Option + { + unimplemented!() + } +} From 1bcfc9348de4a4f0ee25bd39cb4951fe52dba390 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 14 Dec 2016 23:25:51 +0100 Subject: [PATCH 12/42] light: specialize Downloader to SyncRound --- .../{downloader.rs => sync_round.rs} | 83 ++++++++++++------- 1 file changed, 54 insertions(+), 29 deletions(-) rename sync/src/light_sync/{downloader.rs => sync_round.rs} (65%) diff --git a/sync/src/light_sync/downloader.rs b/sync/src/light_sync/sync_round.rs similarity index 65% rename from sync/src/light_sync/downloader.rs rename to sync/src/light_sync/sync_round.rs index dd78a3144..905e8354f 100644 --- a/sync/src/light_sync/downloader.rs +++ b/sync/src/light_sync/sync_round.rs @@ -21,6 +21,7 @@ use std::mem; use ethcore::header::Header; +use light::client::LightChainClient; use light::net::{EventContext, ReqId}; use light::request::Headers as HeadersRequest; @@ -36,11 +37,20 @@ use super::response::{self, Constraint}; const ROUND_SKIP: usize = 255; // amount of scaffold frames: these are the blank spaces in "X___X___X" -const ROUND_FRAMES: u64 = 255; +const ROUND_FRAMES: usize = 255; // number of attempts to make to get a full scaffold for a sync round. const SCAFFOLD_ATTEMPTS: usize = 3; +/// Reasons for sync round abort. +#[derive(Debug, Clone, Copy)] +pub enum AbortReason { + /// Bad chain downloaded. + BadChain, + /// No incoming data. + NoResponses, +} + // A request for headers with a known starting header // and a known parent hash for the last block. struct Request { @@ -86,7 +96,7 @@ impl Fetcher { } } - fn process_response(self, req_id: ReqId, headers: &[Bytes]) -> (Downloader, Result<(), Error>) { + fn process_response(self, req_id: ReqId, headers: &[Bytes]) -> (SyncRound, Result<(), Error>) { unimplemented!() } } @@ -98,7 +108,7 @@ struct RoundStart { start_block: (u64, H256), pending_req: Option<(ReqId, HeadersRequest)>, sparse_headers: Vec
, - attempt: 0, + attempt: usize, } impl RoundStart { @@ -107,65 +117,80 @@ impl RoundStart { start_block: start.clone(), pending_req: None, sparse_headers: Vec::new(), + attempt: 0, } } - fn process_response(mut self, req_id: ReqId, headers: &[Bytes]) -> (Downloader, Result<(), Error>) { + fn process_response(mut self, req_id: ReqId, headers: &[Bytes]) -> (SyncRound, Result<(), Error>) { let req = match self.pending_req.take() { - Some((id, req)) if req_id == id { req.clone() } + Some((id, ref req)) if req_id == id => { req.clone() } other => { self.pending_req = other; - return (self, Ok(())) + return (SyncRound::Start(self), Ok(())) } }; self.attempt += 1; - let headers = match response::decode_and_verify(headers, &req) { + let res = match response::decode_and_verify(headers, &req) { Ok(headers) => { self.sparse_headers.extend(headers); - if self.sparse_headers.len() == ROUND_FRAMES + 1 - || self.attempt >= SCAFFOLD_ATTEMPTS - { + if self.sparse_headers.len() == ROUND_FRAMES + 1 { + trace!(target: "sync", "Beginning fetch of blocks between {} sparse headers", + self.sparse_headers.len()); + let fetcher = Fetcher::new(self.sparse_headers); - (Downloader::Fetch(fetcher), Ok(())) + return (SyncRound::Fetch(fetcher), Ok(())); } + + Ok(()) } + Err(e) => Err(e), + }; + + if self.attempt >= SCAFFOLD_ATTEMPTS { + (SyncRound::Abort(AbortReason::NoResponses), res.map_err(Into::into)) + } else { + (SyncRound::Start(self), res.map_err(Into::into)) } } } -/// Downloader state machine. -pub enum Downloader { - /// Waiting for peers. - Nothing, - /// Searching for common block with best chain. - SearchCommon, +/// Sync round state machine. +pub enum SyncRound { /// Beginning a sync round. - RoundStart(RoundStart), + Start(RoundStart), /// Fetching intermediate blocks during a sync round. Fetch(Fetcher), + /// Aborted. + Abort(AbortReason), } -impl Downloader { - // Process an answer to a request. Unknown requests will be ignored. - fn process_response(self, req_id: ReqId, headers: &[Bytes]) -> (Self, Result<(), Error>) { +impl SyncRound { + fn abort(reason: AbortReason) -> Self { + trace!(target: "sync", "Aborting sync round: {:?}", reason); + + SyncRound::Abort(reason) + } + + /// Process an answer to a request. Unknown requests will be ignored. + pub fn process_response(self, req_id: ReqId, headers: &[Bytes]) -> (Self, Result<(), Error>) { match self { - Downloader::RoundStart(round_start) => round_start.process_response(req_id, headers), - Downloader::Fetch(fetcher) => fetcher.process_response(req_id, headers), + SyncRound::Start(round_start) => round_start.process_response(req_id, headers), + SyncRound::Fetch(fetcher) => fetcher.process_response(req_id, headers), other => (other, Ok(())), } } - // Return unfulfilled requests from disconnected peer. Unknown requests will be ignored. - fn requests_abandoned(self, abandoned: &[ReqId]) -> (Self, Result<(), Error>) { - + /// Return unfulfilled requests from disconnected peer. Unknown requests will be ignored. + pub fn requests_abandoned(self, abandoned: &[ReqId]) -> (Self, Result<(), Error>) { + unimplemented!() } - // Dispatch pending requests. The dispatcher provided will attempt to - // find a suitable peer to serve the request. + /// Dispatch pending requests. The dispatcher provided will attempt to + /// find a suitable peer to serve the request. // TODO: have dispatcher take capabilities argument? - fn dispatch_requests(self, dispatcher: D) -> (Self, Result<(), Error>) + pub fn dispatch_requests(self, dispatcher: D) -> (Self, Result<(), Error>) where D: Fn(HeadersRequest) -> Option { unimplemented!() From 0768a61944481059d7137b2a227cc8c5b9f61673 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 14 Dec 2016 23:26:15 +0100 Subject: [PATCH 13/42] light: add LightChainClient trait --- ethcore/light/src/client/mod.rs | 53 ++++++++- sync/src/light_sync/mod.rs | 195 +++++--------------------------- 2 files changed, 79 insertions(+), 169 deletions(-) diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 1ddcb0515..4b8a37366 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -19,6 +19,7 @@ use ethcore::block_import_error::BlockImportError; use ethcore::block_status::BlockStatus; use ethcore::ids::BlockId; +use ethcore::header::Header; use ethcore::verification::queue::{self, HeaderQueue}; use ethcore::transaction::SignedTransaction; use ethcore::blockchain_info::BlockChainInfo; @@ -43,6 +44,28 @@ pub struct Config { queue: queue::Config, } +/// Trait for interacting with the header chain abstractly. +pub trait LightChainClient: Send + Sync { + /// Get chain info. + fn chain_info(&self) -> BlockChainInfo; + + /// Queue header to be verified. Required that all headers queued have their + /// parent queued prior. + fn queue_header(&self, header: Header) -> Result; + + /// Query whether a block is known. + fn is_known(&self, hash: &H256) -> bool; + + /// Clear the queue. + fn clear_queue(&self); + + /// Get queue info. + fn queue_info(&self) -> queue::QueueInfo; + + /// Get the `i`th CHT root. + fn cht_root(&self, i: usize) -> Option; +} + /// Light client implementation. pub struct Client { queue: HeaderQueue, @@ -60,10 +83,8 @@ impl Client { } } - /// Import a header as rlp-encoded bytes. - pub fn import_header(&self, bytes: Bytes) -> Result { - let header = ::rlp::decode(&bytes); - + /// Import a header to the queue for additional verification. + pub fn import_header(&self, header: Header) -> Result { self.queue.import(header).map_err(Into::into) } @@ -120,6 +141,30 @@ impl Client { } } +impl LightChainClient for Client { + fn chain_info(&self) -> BlockChainInfo { Client::chain_info(self) } + + fn queue_header(&self, header: Header) -> Result { + self.import_header(header) + } + + fn is_known(&self, hash: &H256) -> bool { + self.status(hash) == BlockStatus::InChain + } + + fn clear_queue(&self) { + self.queue.clear() + } + + fn queue_info(&self) -> queue::QueueInfo { + self.queue.queue_info() + } + + fn cht_root(&self, i: usize) -> Option { + Client::cht_root(self, i) + } +} + // dummy implementation -- may draw from canonical cache further on. impl Provider for Client { fn chain_info(&self) -> BlockChainInfo { diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index faae5830f..6526aacd5 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -22,7 +22,7 @@ //! in groups. //! //! This is written assuming that the client and sync service are running -//! in the same binary; unlike a full node +//! in the same binary; unlike a full node which might communicate via IPC. use std::collections::HashMap; use std::fmt; @@ -30,27 +30,22 @@ use std::sync::Arc; use ethcore::header::Header; -use light::client::Client; +use light::client::LightChainClient; use light::net::{Announcement, Error as NetError, Handler, EventContext, Capabilities, ReqId, Status}; use light::request; use network::PeerId; -use rlp::{UntrustedRlp, View}; +use rlp::{DecoderError, UntrustedRlp, View}; use util::{Bytes, U256, H256, Mutex, RwLock}; -// How many headers we request at a time when searching for best -// common ancestor with peer. -const UNCONFIRMED_SEARCH_SIZE: u64 = 128; +mod response; +mod sync_round; #[derive(Debug)] enum Error { - // Peer is useless for now. - UselessPeer, // Peer returned a malformed response. - MalformedResponse, + MalformedResponse(response::BasicError), // Peer returned known bad block. BadBlock, - // Peer had a prehistoric common ancestor. - PrehistoricAncestor, // Protocol-level error. ProtocolLevel(NetError), } @@ -61,13 +56,17 @@ impl From for Error { } } +impl From for Error { + fn from(err: response::BasicError) -> Self { + Error::MalformedResponse(err) + } +} + impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - Error::UselessPeer => write!(f, "Peer is useless"), - Error::MalformedResponse => write!(f, "Response malformed"), + Error::MalformedResponse(ref err) => write!(f, "{}", err), Error::BadBlock => write!(f, "Block known to be bad"), - Error::PrehistoricAncestor => write!(f, "Common ancestor is prehistoric"), Error::ProtocolLevel(ref err) => write!(f, "Protocol level error: {}", err), } } @@ -81,106 +80,29 @@ struct ChainInfo { head_num: u64, } -/// A peer we haven't found a common ancestor for yet. -struct UnconfirmedPeer { - chain_info: ChainInfo, - last_batched: u64, - req_id: ReqId, +struct Peer { + first_status: ChainInfo, + status: ChainInfo, } -impl UnconfirmedPeer { - /// Create an unconfirmed peer. Returns `None` if we cannot make a - /// common ancestors request for some reason. The event context provided - /// should be associated with this peer. - fn create(ctx: &EventContext, chain_info: ChainInfo, best_num: u64) -> Result { - let this = ctx.peer(); - - if ctx.max_requests(this, request::Kind::Headers) < UNCONFIRMED_SEARCH_SIZE as usize { - return Err(Error::UselessPeer); // a peer which allows this few header reqs isn't useful anyway. +impl Peer { + /// Create a peer object. + fn new(chain_info: ChainInfo) -> Self { + Peer { + first_status: chain_info.clone(), + status: chain_info.clone(), } - - let req_id = try!(ctx.request_from(this, request::Request::Headers(request::Headers { - start: best_num.into(), - max: ::std::cmp::min(best_num, UNCONFIRMED_SEARCH_SIZE) as usize, - skip: 0, - reverse: true, - }))); - - Ok(UnconfirmedPeer { - chain_info: chain_info, - last_batched: best_num, - req_id: req_id, - }) } - - /// Feed in the result of the headers query. If an error occurs, the request - /// is malformed. If a common (hash, number) pair is returned then this is - /// the common ancestor. If not, then another request for headers has been - /// dispatched. - fn check_batch(&mut self, ctx: &EventContext, client: &Client, headers: &[Bytes]) -> Result, Error> { - use ethcore::block_status::BlockStatus; - - let mut cur_num = self.last_batched; - let chain_info = client.chain_info(); - for raw_header in headers { - let header: Header = try!(UntrustedRlp::new(&raw_header).as_val().map_err(|_| Error::MalformedResponse)); - if header.number() != cur_num { return Err(Error::MalformedResponse) } - - if chain_info.first_block_number.map_or(false, |f| header.number() < f) { - return Err(Error::PrehistoricAncestor); - } - - let hash = header.hash(); - - match client.status(&hash) { - BlockStatus::InChain => return Ok(Some(hash)), - BlockStatus::Bad => return Err(Error::BadBlock), - BlockStatus::Unknown | BlockStatus::Queued => {}, - } - - cur_num -= 1; - } - let this = ctx.peer(); - - if cur_num == 0 { - trace!(target: "sync", "Peer {}: genesis as common ancestor", this); - return Ok(Some(chain_info.genesis_hash)); - } - - // nothing found, nothing prehistoric. - // send the next request. - let req_id = try!(ctx.request_from(this, request::Request::Headers(request::Headers { - start: cur_num.into(), - max: ::std::cmp::min(cur_num, UNCONFIRMED_SEARCH_SIZE) as usize, - skip: 0, - reverse: true, - }))); - - self.req_id = req_id; - - Ok(None) - } -} - -/// Connected peers as state machines. -/// -/// On connection, we'll search for a common ancestor to their chain. -/// Once that's found, we can sync to this peer. -enum Peer { - // Searching for a common ancestor. - SearchCommon(UnconfirmedPeer), - // A peer we can sync to. - SyncTo(ChainInfo), } /// Light client synchronization manager. See module docs for more details. -pub struct LightSync { +pub struct LightSync { best_seen: Mutex>, // best seen block on the network. peers: RwLock>>, // peers which are relevant to synchronization. - client: Arc, + client: Arc, } -impl Handler for LightSync { +impl Handler for LightSync { fn on_connect(&self, ctx: &EventContext, status: &Status, capabilities: &Capabilities) { let our_best = self.client.chain_info().best_block_number; @@ -195,34 +117,12 @@ impl Handler for LightSync { head_num: status.head_num, }; - trace!(target: "sync", "Beginning search for common ancestor with peer {}", ctx.peer()); - let unconfirmed = match UnconfirmedPeer::create(ctx, chain_info, our_best) { - Ok(unconfirmed) => unconfirmed, - Err(e) => { - trace!(target: "sync", "Failed to create unconfirmed peer: {}", e); - return; - } - }; - - self.peers.write().insert(ctx.peer(), Mutex::new(Peer::SearchCommon(unconfirmed))); + self.peers.write().insert(ctx.peer(), Mutex::new(Peer::new(chain_info))); } fn on_disconnect(&self, ctx: &EventContext, _unfulfilled: &[ReqId]) { - let peer = ctx.peer(); + let peer_id = ctx.peer(); - match self.peers.write().remove(&peer).map(|peer_data| peer_data.into_inner()) { - None => {} - Some(Peer::SearchCommon(_)) => { - // unfulfilled requests are unimportant since they are only - // relevant to searching for a common ancestor. - trace!(target: "sync", "Unconfirmed peer {} disconnect", ctx.peer()); - } - Some(Peer::SyncTo(_)) => { - trace!(target: "sync", "") - // in this case we may want to reasssign all unfulfilled requests. - // (probably just by pushing them back into the current downloader's priority queue.) - } - } } fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { @@ -234,55 +134,20 @@ impl Handler for LightSync { trace!(target: "sync", "Announcement from peer {}: new chain head {:?}, reorg depth {}", ctx.peer(), (announcement.head_hash, announcement.head_num), announcement.reorg_depth); - } fn on_block_headers(&self, ctx: &EventContext, req_id: ReqId, headers: &[Bytes]) { - let peer = ctx.peer(); - match self.peers.read().get(&peer) { - None => {}, - Some(peer_data) => { - let mut peer_data = peer_data.lock(); - let new_peer = match *peer_data { - Peer::SearchCommon(ref mut unconfirmed) => { - if unconfirmed.req_id != req_id { - trace!(target: "sync", "Ignoring irrelevant response from peer {}", peer); - return; - } - match unconfirmed.check_batch(ctx, &self.client, headers) { - Ok(None) => { - trace!(target: "sync", "Continuing to search for common ancestor with peer {}", peer); - return; - } - Ok(Some(common)) => { - trace!(target: "sync", "Found common ancestor {} with peer {}", peer, common); - let chain_info = unconfirmed.chain_info.clone(); - Peer::SyncTo(chain_info) - } - Err(e) => { - trace!(target: "sync", "Failed to find common ancestor with peer {}: {}", peer, e); - return; - } - } - } - Peer::SyncTo(_) => { - trace!(target: "sync", "Incoming response from peer being synced to."); - }, - }; - - *peer_data = new_peer; - } - } + let peer_id = ctx.peer(); } } // public API -impl LightSync { +impl LightSync { /// Create a new instance of `LightSync`. /// /// This won't do anything until registered as a handler /// so it can act on events. - pub fn new(client: Arc) -> Self { + pub fn new(client: Arc) -> Self { LightSync { best_seen: Mutex::new(None), peers: RwLock::new(HashMap::new()), From 5d8bfd875842e241b0d50469f6ad9ad502113016 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 15 Dec 2016 13:05:38 +0100 Subject: [PATCH 14/42] handle responses for round scaffold and frames --- sync/src/light_sync/mod.rs | 15 ++-- sync/src/light_sync/sync_round.rs | 113 ++++++++++++++++++++++++------ 2 files changed, 104 insertions(+), 24 deletions(-) diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 6526aacd5..ff7fd149b 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -40,13 +40,18 @@ use util::{Bytes, U256, H256, Mutex, RwLock}; mod response; mod sync_round; +/// Light synchronization errors. #[derive(Debug)] -enum Error { - // Peer returned a malformed response. +pub enum Error { + /// Peer returned a malformed response. MalformedResponse(response::BasicError), - // Peer returned known bad block. + /// Peer returned known bad block. BadBlock, - // Protocol-level error. + /// Peer returned empty response. + EmptyResponse, + /// Peer returned a subchain with a broken parent connection. + ParentMismatch, + /// Protocol-level error. ProtocolLevel(NetError), } @@ -67,6 +72,8 @@ impl fmt::Display for Error { match *self { Error::MalformedResponse(ref err) => write!(f, "{}", err), Error::BadBlock => write!(f, "Block known to be bad"), + Error::EmptyResponse => write!(f, "Peer returned empty response."), + Error::ParentMismatch => write!(f, "Peer returned unknown block in place of parent."), Error::ProtocolLevel(ref err) => write!(f, "Protocol level error: {}", err), } } diff --git a/sync/src/light_sync/sync_round.rs b/sync/src/light_sync/sync_round.rs index 905e8354f..42a543ba3 100644 --- a/sync/src/light_sync/sync_round.rs +++ b/sync/src/light_sync/sync_round.rs @@ -16,7 +16,8 @@ //! Header download state machine. -use std::collections::{HashMap, VecDeque}; +use std::cmp::Ordering; +use std::collections::{BinaryHeap, HashMap, VecDeque}; use std::mem; use ethcore::header::Header; @@ -30,7 +31,7 @@ use rlp::{UntrustedRlp, View}; use util::{Bytes, H256, Mutex}; use super::{Error, Peer}; -use super::response::{self, Constraint}; +use super::response; // amount of blocks between each scaffold entry. // TODO: move these into paraeters for `RoundStart::new`? @@ -51,30 +52,53 @@ pub enum AbortReason { NoResponses, } -// A request for headers with a known starting header +// A request for headers with a known starting header hash. // and a known parent hash for the last block. -struct Request { - headers: HeadersRequest, - end_parent: H256, +#[derive(PartialEq, Eq)] +struct SubchainRequest { + subchain_parent: (u64, H256), + headers_request: HeadersRequest, + subchain_end: (u64, H256), + downloaded: VecDeque
, } +// ordered by subchain parent number so pending requests towards the +// front of the round are dispatched first. +impl PartialOrd for SubchainRequest { + fn partial_cmp(&self, other: &Self) -> Option { + self.subchain_parent.0.partial_cmp(&other.subchain_parent.0) + } +} + +impl Ord for SubchainRequest { + fn cmp(&self, other: &Self) -> Ordering { + self.subchain_parent.0.cmp(&other.subchain_parent.0) + } +} + +/// Manages downloading of interior blocks of a sparse header chain. pub struct Fetcher { - sparse: Vec
, // sparse header chain. - requests: VecDeque, - pending: HashMap, + sparse: VecDeque
, // sparse header chain. + requests: BinaryHeap, + complete_requests: HashMap, + pending: HashMap, } impl Fetcher { // Produce a new fetcher given a sparse headerchain, in ascending order. // The headers must be valid RLP at this point. fn new(sparse_headers: Vec
) -> Self { - let mut requests = VecDeque::with_capacity(sparse_headers.len() - 1); + let mut requests = BinaryHeap::with_capacity(sparse_headers.len() - 1); + for pair in sparse_headers.windows(2) { let low_rung = &pair[0]; let high_rung = &pair[1]; let diff = high_rung.number() - low_rung.number(); - if diff < 2 { continue } // these headers are already adjacent. + + // should never happen as long as we verify the gaps + // gotten from SyncRound::Start + if diff < 2 { continue } let needed_headers = HeadersRequest { start: high_rung.parent_hash().clone().into(), @@ -83,21 +107,67 @@ impl Fetcher { reverse: true, }; - requests.push_back(Request { - headers: needed_headers, - end_parent: low_rung.hash(), + requests.push(SubchainRequest { + headers_request: needed_headers, + subchain_end: (high_rung.number() - 1, *high_rung.parent_hash()), + downloaded: VecDeque::new(), + subchain_parent: (low_rung.number(), low_rung.hash()), }); } Fetcher { - sparse: sparse_headers, + sparse: sparse_headers.into(), requests: requests, + complete_requests: HashMap::new(), pending: HashMap::new(), } } - fn process_response(self, req_id: ReqId, headers: &[Bytes]) -> (SyncRound, Result<(), Error>) { - unimplemented!() + fn process_response(mut self, req_id: ReqId, headers: &[Bytes]) -> (SyncRound, Result<(), Error>) { + let mut request = match self.pending.remove(&req_id) { + Some(request) => request, + None => return (SyncRound::Fetch(self), Ok(())), + }; + + if headers.len() == 0 { + return (SyncRound::Fetch(self), Err(Error::EmptyResponse)); + } + + match response::decode_and_verify(headers, &request.headers_request) { + Err(e) => { + // TODO: track number of attempts per request. + self.requests.push(request); + (SyncRound::Fetch(self), Err(e).map_err(Into::into)) + } + Ok(headers) => { + let mut parent_hash = None; + for header in headers { + if parent_hash.as_ref().map_or(false, |h| h != &header.hash()) { + self.requests.push(request); + return (SyncRound::Fetch(self), Err(Error::ParentMismatch)); + } + + // incrementally update the frame request as we go so we can + // return at any time in the loop. + parent_hash = Some(header.parent_hash().clone()); + request.headers_request.start = header.parent_hash().clone().into(); + request.headers_request.max -= 1; + + request.downloaded.push_front(header); + } + + let subchain_parent = request.subchain_parent.1; + + // TODO: check subchain parent and punish peers who did framing + // if it's inaccurate. + if request.headers_request.max == 0 { + self.complete_requests.insert(subchain_parent, request); + } + + // state transition not triggered until drain is finished. + (SyncRound::Fetch(self), Ok(())) + } + } } } @@ -139,8 +209,7 @@ impl RoundStart { trace!(target: "sync", "Beginning fetch of blocks between {} sparse headers", self.sparse_headers.len()); - let fetcher = Fetcher::new(self.sparse_headers); - return (SyncRound::Fetch(fetcher), Ok(())); + return (SyncRound::Fetch(Fetcher::new(self.sparse_headers)), Ok(())); } Ok(()) @@ -149,7 +218,11 @@ impl RoundStart { }; if self.attempt >= SCAFFOLD_ATTEMPTS { - (SyncRound::Abort(AbortReason::NoResponses), res.map_err(Into::into)) + if self.sparse_headers.len() > 1 { + (SyncRound::Fetch(Fetcher::new(self.sparse_headers)), res.map_err(Into::into)) + } else { + (SyncRound::Abort(AbortReason::NoResponses), res.map_err(Into::into)) + } } else { (SyncRound::Start(self), res.map_err(Into::into)) } From ec88a992e373fc8f77f37173aff4d1cc11115abe Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 15 Dec 2016 15:50:36 +0100 Subject: [PATCH 15/42] provide response context to response handler --- sync/src/light_sync/mod.rs | 39 ----------- sync/src/light_sync/sync_round.rs | 108 ++++++++++++++++++++---------- 2 files changed, 72 insertions(+), 75 deletions(-) diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index ff7fd149b..243afa6f7 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -40,45 +40,6 @@ use util::{Bytes, U256, H256, Mutex, RwLock}; mod response; mod sync_round; -/// Light synchronization errors. -#[derive(Debug)] -pub enum Error { - /// Peer returned a malformed response. - MalformedResponse(response::BasicError), - /// Peer returned known bad block. - BadBlock, - /// Peer returned empty response. - EmptyResponse, - /// Peer returned a subchain with a broken parent connection. - ParentMismatch, - /// Protocol-level error. - ProtocolLevel(NetError), -} - -impl From for Error { - fn from(net_error: NetError) -> Self { - Error::ProtocolLevel(net_error) - } -} - -impl From for Error { - fn from(err: response::BasicError) -> Self { - Error::MalformedResponse(err) - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Error::MalformedResponse(ref err) => write!(f, "{}", err), - Error::BadBlock => write!(f, "Block known to be bad"), - Error::EmptyResponse => write!(f, "Peer returned empty response."), - Error::ParentMismatch => write!(f, "Peer returned unknown block in place of parent."), - Error::ProtocolLevel(ref err) => write!(f, "Protocol level error: {}", err), - } - } -} - /// Peer chain info. #[derive(Clone)] struct ChainInfo { diff --git a/sync/src/light_sync/sync_round.rs b/sync/src/light_sync/sync_round.rs index 42a543ba3..a3584c34c 100644 --- a/sync/src/light_sync/sync_round.rs +++ b/sync/src/light_sync/sync_round.rs @@ -17,7 +17,7 @@ //! Header download state machine. use std::cmp::Ordering; -use std::collections::{BinaryHeap, HashMap, VecDeque}; +use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque}; use std::mem; use ethcore::header::Header; @@ -30,7 +30,6 @@ use network::PeerId; use rlp::{UntrustedRlp, View}; use util::{Bytes, H256, Mutex}; -use super::{Error, Peer}; use super::response; // amount of blocks between each scaffold entry. @@ -43,11 +42,23 @@ const ROUND_FRAMES: usize = 255; // number of attempts to make to get a full scaffold for a sync round. const SCAFFOLD_ATTEMPTS: usize = 3; +/// Context for a headers response. +pub trait ResponseContext { + /// Get the peer who sent this response. + fn responder(&self) -> PeerId; + /// Get the request ID this response corresponds to. + fn req_id(&self) -> ReqId; + /// Get the (unverified) response data. + fn data(&self) -> &[Bytes]; + /// Punish the responder. + fn punish_responder(&self); +} + /// Reasons for sync round abort. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone)] pub enum AbortReason { - /// Bad chain downloaded. - BadChain, + /// Bad sparse header chain along with a list of peers who contributed to it. + BadScaffold(Vec), /// No incoming data. NoResponses, } @@ -82,12 +93,14 @@ pub struct Fetcher { requests: BinaryHeap, complete_requests: HashMap, pending: HashMap, + scaffold_contributors: Vec, } impl Fetcher { - // Produce a new fetcher given a sparse headerchain, in ascending order. + // Produce a new fetcher given a sparse headerchain, in ascending order along + // with a list of peers who helped produce the chain. // The headers must be valid RLP at this point. - fn new(sparse_headers: Vec
) -> Self { + fn new(sparse_headers: Vec
, contributors: Vec) -> Self { let mut requests = BinaryHeap::with_capacity(sparse_headers.len() - 1); for pair in sparse_headers.windows(2) { @@ -120,31 +133,43 @@ impl Fetcher { requests: requests, complete_requests: HashMap::new(), pending: HashMap::new(), + scaffold_contributors: contributors, } } - fn process_response(mut self, req_id: ReqId, headers: &[Bytes]) -> (SyncRound, Result<(), Error>) { - let mut request = match self.pending.remove(&req_id) { + fn process_response(mut self, ctx: &R) -> SyncRound { + let mut request = match self.pending.remove(&ctx.req_id()) { Some(request) => request, - None => return (SyncRound::Fetch(self), Ok(())), + None => return SyncRound::Fetch(self), }; + let headers = ctx.data(); + if headers.len() == 0 { - return (SyncRound::Fetch(self), Err(Error::EmptyResponse)); + trace!(target: "sync", "Punishing peer {} for empty response", ctx.responder()); + ctx.punish_responder(); + return SyncRound::Fetch(self); } match response::decode_and_verify(headers, &request.headers_request) { Err(e) => { - // TODO: track number of attempts per request. + trace!(target: "sync", "Punishing peer {} for invalid response ({})", ctx.responder(), e); + ctx.punish_responder(); + + // TODO: track number of attempts per request, + // abort if failure rate too high. self.requests.push(request); - (SyncRound::Fetch(self), Err(e).map_err(Into::into)) + SyncRound::Fetch(self) } Ok(headers) => { let mut parent_hash = None; for header in headers { if parent_hash.as_ref().map_or(false, |h| h != &header.hash()) { + trace!(target: "sync", "Punishing peer {} for parent mismatch", ctx.responder()); + ctx.punish_responder(); + self.requests.push(request); - return (SyncRound::Fetch(self), Err(Error::ParentMismatch)); + return SyncRound::Fetch(self); } // incrementally update the frame request as we go so we can @@ -161,23 +186,29 @@ impl Fetcher { // TODO: check subchain parent and punish peers who did framing // if it's inaccurate. if request.headers_request.max == 0 { + if parent_hash.map_or(true, |hash| hash != subchain_parent) { + let abort = AbortReason::BadScaffold(self.scaffold_contributors); + return SyncRound::Abort(abort); + } + self.complete_requests.insert(subchain_parent, request); } // state transition not triggered until drain is finished. - (SyncRound::Fetch(self), Ok(())) + (SyncRound::Fetch(self)) } } } } -// Round started: get stepped header chain. -// from a start block with number X we request 256 headers stepped by 256 from -// block X + 1. -struct RoundStart { +/// Round started: get stepped header chain. +/// from a start block with number X we request 256 headers stepped by 256 from +/// block X + 1. +pub struct RoundStart { start_block: (u64, H256), pending_req: Option<(ReqId, HeadersRequest)>, sparse_headers: Vec
, + contributors: HashSet, attempt: usize, } @@ -187,44 +218,49 @@ impl RoundStart { start_block: start.clone(), pending_req: None, sparse_headers: Vec::new(), + contributors: HashSet::new(), attempt: 0, } } - fn process_response(mut self, req_id: ReqId, headers: &[Bytes]) -> (SyncRound, Result<(), Error>) { + fn process_response(mut self, ctx: &R) -> SyncRound { let req = match self.pending_req.take() { - Some((id, ref req)) if req_id == id => { req.clone() } + Some((id, ref req)) if ctx.req_id() == id => { req.clone() } other => { self.pending_req = other; - return (SyncRound::Start(self), Ok(())) + return SyncRound::Start(self); } }; self.attempt += 1; - let res = match response::decode_and_verify(headers, &req) { + match response::decode_and_verify(ctx.data(), &req) { Ok(headers) => { + self.contributors.insert(ctx.responder()); self.sparse_headers.extend(headers); if self.sparse_headers.len() == ROUND_FRAMES + 1 { trace!(target: "sync", "Beginning fetch of blocks between {} sparse headers", self.sparse_headers.len()); - return (SyncRound::Fetch(Fetcher::new(self.sparse_headers)), Ok(())); + let fetcher = Fetcher::new(self.sparse_headers, self.contributors.into_iter().collect()); + return SyncRound::Fetch(fetcher); } - - Ok(()) } - Err(e) => Err(e), + Err(e) => { + trace!(target: "sync", "Punishing peer {} for malformed response ({})", ctx.responder(), e); + ctx.punish_responder(); + } }; if self.attempt >= SCAFFOLD_ATTEMPTS { if self.sparse_headers.len() > 1 { - (SyncRound::Fetch(Fetcher::new(self.sparse_headers)), res.map_err(Into::into)) + let fetcher = Fetcher::new(self.sparse_headers, self.contributors.into_iter().collect()); + SyncRound::Fetch(fetcher) } else { - (SyncRound::Abort(AbortReason::NoResponses), res.map_err(Into::into)) + SyncRound::Abort(AbortReason::NoResponses) } } else { - (SyncRound::Start(self), res.map_err(Into::into)) + SyncRound::Start(self) } } } @@ -247,23 +283,23 @@ impl SyncRound { } /// Process an answer to a request. Unknown requests will be ignored. - pub fn process_response(self, req_id: ReqId, headers: &[Bytes]) -> (Self, Result<(), Error>) { + pub fn process_response(self, ctx: &R) -> Self { match self { - SyncRound::Start(round_start) => round_start.process_response(req_id, headers), - SyncRound::Fetch(fetcher) => fetcher.process_response(req_id, headers), - other => (other, Ok(())), + SyncRound::Start(round_start) => round_start.process_response(ctx), + SyncRound::Fetch(fetcher) => fetcher.process_response(ctx), + other => other, } } /// Return unfulfilled requests from disconnected peer. Unknown requests will be ignored. - pub fn requests_abandoned(self, abandoned: &[ReqId]) -> (Self, Result<(), Error>) { + pub fn requests_abandoned(self, abandoned: &[ReqId]) -> Self { unimplemented!() } /// Dispatch pending requests. The dispatcher provided will attempt to /// find a suitable peer to serve the request. // TODO: have dispatcher take capabilities argument? - pub fn dispatch_requests(self, dispatcher: D) -> (Self, Result<(), Error>) + pub fn dispatch_requests(self, dispatcher: D) -> Self where D: Fn(HeadersRequest) -> Option { unimplemented!() From 71e96aca10343d5a328b0a17b3265fe848786991 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 15 Dec 2016 16:19:28 +0100 Subject: [PATCH 16/42] handle abandoned requests --- sync/src/light_sync/sync_round.rs | 59 ++++++++++++++++++++++++------- 1 file changed, 47 insertions(+), 12 deletions(-) diff --git a/sync/src/light_sync/sync_round.rs b/sync/src/light_sync/sync_round.rs index a3584c34c..83b27b046 100644 --- a/sync/src/light_sync/sync_round.rs +++ b/sync/src/light_sync/sync_round.rs @@ -183,8 +183,6 @@ impl Fetcher { let subchain_parent = request.subchain_parent.1; - // TODO: check subchain parent and punish peers who did framing - // if it's inaccurate. if request.headers_request.max == 0 { if parent_hash.map_or(true, |hash| hash != subchain_parent) { let abort = AbortReason::BadScaffold(self.scaffold_contributors); @@ -199,6 +197,18 @@ impl Fetcher { } } } + + fn requests_abandoned(mut self, abandoned: &[ReqId]) -> SyncRound { + for abandoned in abandoned { + match self.pending.remove(abandoned) { + None => {}, + Some(req) => self.requests.push(req), + } + } + + // TODO: track failure rate and potentially abort. + SyncRound::Fetch(self) + } } /// Round started: get stepped header chain. @@ -223,6 +233,22 @@ impl RoundStart { } } + // called on failed attempt. may trigger a transition. + fn failed_attempt(mut self) -> SyncRound { + self.attempt += 1; + + if self.attempt >= SCAFFOLD_ATTEMPTS { + if self.sparse_headers.len() > 1 { + let fetcher = Fetcher::new(self.sparse_headers, self.contributors.into_iter().collect()); + SyncRound::Fetch(fetcher) + } else { + SyncRound::Abort(AbortReason::NoResponses) + } + } else { + SyncRound::Start(self) + } + } + fn process_response(mut self, ctx: &R) -> SyncRound { let req = match self.pending_req.take() { Some((id, ref req)) if ctx.req_id() == id => { req.clone() } @@ -232,7 +258,6 @@ impl RoundStart { } }; - self.attempt += 1; match response::decode_and_verify(ctx.data(), &req) { Ok(headers) => { self.contributors.insert(ctx.responder()); @@ -252,15 +277,21 @@ impl RoundStart { } }; - if self.attempt >= SCAFFOLD_ATTEMPTS { - if self.sparse_headers.len() > 1 { - let fetcher = Fetcher::new(self.sparse_headers, self.contributors.into_iter().collect()); - SyncRound::Fetch(fetcher) - } else { - SyncRound::Abort(AbortReason::NoResponses) + self.failed_attempt() + } + + fn requests_abandoned(mut self, abandoned: &[ReqId]) -> SyncRound { + match self.pending_req.take() { + Some((id, req)) => { + if abandoned.iter().any(|r| r == &id) { + self.pending_req = None; + self.failed_attempt() + } else { + self.pending_req = Some((id, req)); + SyncRound::Start(self) + } } - } else { - SyncRound::Start(self) + None => SyncRound::Start(self), } } } @@ -293,7 +324,11 @@ impl SyncRound { /// Return unfulfilled requests from disconnected peer. Unknown requests will be ignored. pub fn requests_abandoned(self, abandoned: &[ReqId]) -> Self { - unimplemented!() + match self { + SyncRound::Start(round_start) => round_start.requests_abandoned(abandoned), + SyncRound::Fetch(fetcher) => fetcher.requests_abandoned(abandoned), + other => other, + } } /// Dispatch pending requests. The dispatcher provided will attempt to From f776f480238ef92f7813e03b0b064c0427df18d2 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 15 Dec 2016 17:33:25 +0100 Subject: [PATCH 17/42] drain prepared headers from sync round --- sync/src/light_sync/sync_round.rs | 139 +++++++++++++++++++++++++++--- 1 file changed, 127 insertions(+), 12 deletions(-) diff --git a/sync/src/light_sync/sync_round.rs b/sync/src/light_sync/sync_round.rs index 83b27b046..0442cc5ce 100644 --- a/sync/src/light_sync/sync_round.rs +++ b/sync/src/light_sync/sync_round.rs @@ -33,8 +33,8 @@ use util::{Bytes, H256, Mutex}; use super::response; // amount of blocks between each scaffold entry. -// TODO: move these into paraeters for `RoundStart::new`? -const ROUND_SKIP: usize = 255; +// TODO: move these into parameters for `RoundStart::new`? +const ROUND_SKIP: u64 = 255; // amount of scaffold frames: these are the blank spaces in "X___X___X" const ROUND_FRAMES: usize = 255; @@ -94,13 +94,16 @@ pub struct Fetcher { complete_requests: HashMap, pending: HashMap, scaffold_contributors: Vec, + ready: VecDeque
, + end: (u64, H256), } impl Fetcher { // Produce a new fetcher given a sparse headerchain, in ascending order along // with a list of peers who helped produce the chain. - // The headers must be valid RLP at this point. - fn new(sparse_headers: Vec
, contributors: Vec) -> Self { + // The headers must be valid RLP at this point and must have a consistent + // non-zero gap between them. Will abort the round if found wrong. + fn new(sparse_headers: Vec
, contributors: Vec) -> SyncRound { let mut requests = BinaryHeap::with_capacity(sparse_headers.len() - 1); for pair in sparse_headers.windows(2) { @@ -128,12 +131,44 @@ impl Fetcher { }); } - Fetcher { + let end = match sparse_headers.last().map(|h| (h.number(), h.hash())) { + Some(end) => end, + None => return SyncRound::abort(AbortReason::BadScaffold(contributors)), + }; + + SyncRound::Fetch(Fetcher { sparse: sparse_headers.into(), requests: requests, complete_requests: HashMap::new(), pending: HashMap::new(), scaffold_contributors: contributors, + ready: VecDeque::new(), + end: end, + }) + } + + // collect complete requests and their subchain from the sparse header chain + // into the ready set in order. + fn collect_ready(&mut self) { + loop { + let start_hash = match self.sparse.front() { + Some(first) => first.hash(), + None => break, + }; + + match self.complete_requests.remove(&start_hash) { + None => break, + Some(complete_req) => { + self.ready.push_back(self.sparse.pop_front().expect("first known to exist; qed")); + self.ready.extend(complete_req.downloaded); + } + } + } + + // frames are between two sparse headers and keyed by subchain parent, so the last + // remaining will be the last header. + if self.sparse.len() == 1 { + self.ready.push_back(self.sparse.pop_back().expect("sparse known to have one entry; qed")) } } @@ -209,6 +244,40 @@ impl Fetcher { // TODO: track failure rate and potentially abort. SyncRound::Fetch(self) } + + fn dispatch_requests(mut self, dispatcher: D) -> SyncRound + where D: Fn(HeadersRequest) -> Option + { + while let Some(pending_req) = self.requests.pop() { + match dispatcher(pending_req.headers_request.clone()) { + Some(req_id) => { + trace!(target: "sync", "Assigned request for subchain ({} -> {})", + pending_req.subchain_parent.0 + 1, pending_req.subchain_end.0); + + self.pending.insert(req_id, pending_req); + } + None => { + self.requests.push(pending_req); + break; + } + } + } + + SyncRound::Fetch(self) + } + + fn drain(mut self, headers: &mut Vec
, max: usize) -> SyncRound { + self.collect_ready(); + + let max = ::std::cmp::min(max, self.ready.len()); + headers.extend(self.ready.drain(0..max)); + + if self.sparse.is_empty() && self.ready.is_empty() { + SyncRound::Start(RoundStart::new(self.end)) + } else { + SyncRound::Fetch(self) + } + } } /// Round started: get stepped header chain. @@ -233,14 +302,16 @@ impl RoundStart { } } - // called on failed attempt. may trigger a transition. + // called on failed attempt. may trigger a transition after a number of attempts. + // a failed attempt is defined as: + // - any time we try to make a request to a peer and fail + // - any time a peer returns invalid or incomplete response fn failed_attempt(mut self) -> SyncRound { self.attempt += 1; if self.attempt >= SCAFFOLD_ATTEMPTS { if self.sparse_headers.len() > 1 { - let fetcher = Fetcher::new(self.sparse_headers, self.contributors.into_iter().collect()); - SyncRound::Fetch(fetcher) + Fetcher::new(self.sparse_headers, self.contributors.into_iter().collect()) } else { SyncRound::Abort(AbortReason::NoResponses) } @@ -260,6 +331,12 @@ impl RoundStart { match response::decode_and_verify(ctx.data(), &req) { Ok(headers) => { + if self.sparse_headers.len() == 0 + && headers.get(0).map_or(false, |x| x.parent_hash() != &self.start_block.1) { + trace!(target: "sync", "Wrong parent for first header in round"); + ctx.punish_responder(); // or should we reset? + } + self.contributors.insert(ctx.responder()); self.sparse_headers.extend(headers); @@ -267,8 +344,7 @@ impl RoundStart { trace!(target: "sync", "Beginning fetch of blocks between {} sparse headers", self.sparse_headers.len()); - let fetcher = Fetcher::new(self.sparse_headers, self.contributors.into_iter().collect()); - return SyncRound::Fetch(fetcher); + return Fetcher::new(self.sparse_headers, self.contributors.into_iter().collect()); } } Err(e) => { @@ -294,6 +370,31 @@ impl RoundStart { None => SyncRound::Start(self), } } + + fn dispatch_requests(mut self, dispatcher: D) -> SyncRound + where D: Fn(HeadersRequest) -> Option + { + if self.pending_req.is_none() { + + // beginning offset + first block expected after last header we have. + let start = (self.start_block.0 + 1) + + self.sparse_headers.len() as u64 * (ROUND_SKIP + 1); + + let headers_request = HeadersRequest { + start: start.into(), + max: (ROUND_FRAMES - 1) - self.sparse_headers.len(), + skip: ROUND_SKIP, + reverse: false, + }; + + match dispatcher(headers_request.clone()) { + Some(req_id) => self.pending_req = Some((req_id, headers_request)), + None => return self.failed_attempt(), + } + } + + SyncRound::Start(self) + } } /// Sync round state machine. @@ -333,10 +434,24 @@ impl SyncRound { /// Dispatch pending requests. The dispatcher provided will attempt to /// find a suitable peer to serve the request. - // TODO: have dispatcher take capabilities argument? + // TODO: have dispatcher take capabilities argument? and return an error as + // to why no suitable peer can be found? (no buffer, no chain heads that high, etc) pub fn dispatch_requests(self, dispatcher: D) -> Self where D: Fn(HeadersRequest) -> Option { - unimplemented!() + match self { + SyncRound::Start(round_start) => round_start.dispatch_requests(dispatcher), + SyncRound::Fetch(fetcher) => fetcher.dispatch_requests(dispatcher), + other => other, + } + } + + /// Drain up to a maximum number of headers (continuous, starting with a child of + /// the round start block) from the round, starting a new one once finished. + pub fn drain(self, v: &mut Vec
, max: usize) -> Self { + match self { + SyncRound::Fetch(fetcher) => fetcher.drain(v, max), + other => other, + } } } From 5346539ef83cfa2b3720265c0fcdddae9c6dc761 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 15 Dec 2016 18:45:11 +0100 Subject: [PATCH 18/42] minimal header import and client service --- ethcore/light/src/client/mod.rs | 29 +++++++++++- ethcore/light/src/client/service.rs | 73 +++++++++++++++++++++++++++++ sync/src/light_sync/sync_round.rs | 3 +- 3 files changed, 102 insertions(+), 3 deletions(-) create mode 100644 ethcore/light/src/client/service.rs diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 4b8a37366..d51f2c484 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -35,13 +35,18 @@ use request; use self::header_chain::HeaderChain; +pub use self::service::Service; + pub mod cht; + mod header_chain; +mod service; /// Configuration for the light client. #[derive(Debug, Default, Clone)] pub struct Config { - queue: queue::Config, + /// Verification queue config. + pub queue: queue::Config, } /// Trait for interacting with the header chain abstractly. @@ -139,6 +144,28 @@ impl Client { pub fn cht_root(&self, i: usize) -> Option { self.chain.cht_root(i) } + + /// Import a set of pre-verified headers from the queue. + pub fn import_verified(&self) { + const MAX: usize = 256; + + let mut bad = Vec::new(); + let mut good = Vec::new(); + for verified_header in self.queue.drain(MAX) { + let hash = verified_header.hash(); + + match self.chain.insert(::rlp::encode(&verified_header).to_vec()) { + Ok(()) => good.push(hash), + Err(e) => { + debug!(target: "client", "Error importing header {}: {}", hash, e); + bad.push(hash); + } + } + } + + self.queue.mark_as_bad(&bad); + self.queue.mark_as_good(&good); + } } impl LightChainClient for Client { diff --git a/ethcore/light/src/client/service.rs b/ethcore/light/src/client/service.rs new file mode 100644 index 000000000..79c53bac6 --- /dev/null +++ b/ethcore/light/src/client/service.rs @@ -0,0 +1,73 @@ +// Copyright 2015, 2016 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Minimal IO service for light client. +//! Just handles block import messages and passes them to the client. + +use std::sync::Arc; + +use ethcore::service::ClientIoMessage; +use ethcore::spec::Spec; +use io::{IoContext, IoError, IoHandler, IoService}; + +use super::{Client, Config as ClientConfig}; + +/// Light client service. +pub struct Service { + client: Arc, + _io_service: IoService, +} + +impl Service { + /// Start the service: initialize I/O workers and client itself. + pub fn start(config: ClientConfig, spec: &Spec) -> Result { + let io_service = try!(IoService::::start()); + let client = Arc::new(Client::new(config, spec, io_service.channel())); + try!(io_service.register_handler(Arc::new(ImportBlocks(client.clone())))); + + Ok(Service { + client: client, + _io_service: io_service, + }) + } + + /// Get a handle to the client. + pub fn client(&self) -> &Arc { + &self.client + } +} + +struct ImportBlocks(Arc); + +impl IoHandler for ImportBlocks { + fn message(&self, _io: &IoContext, message: &ClientIoMessage) { + if let ClientIoMessage::BlockVerified = *message { + self.0.import_verified(); + } + } +} + +#[cfg(test)] +mod tests { + use super::Service; + use ethcore::spec::Spec; + + #[test] + fn it_works() { + let spec = Spec::new_test(); + Service::start(Default::default(), &spec).unwrap(); + } +} diff --git a/sync/src/light_sync/sync_round.rs b/sync/src/light_sync/sync_round.rs index 0442cc5ce..5f121d00c 100644 --- a/sync/src/light_sync/sync_round.rs +++ b/sync/src/light_sync/sync_round.rs @@ -225,6 +225,7 @@ impl Fetcher { } self.complete_requests.insert(subchain_parent, request); + self.collect_ready(); } // state transition not triggered until drain is finished. @@ -267,8 +268,6 @@ impl Fetcher { } fn drain(mut self, headers: &mut Vec
, max: usize) -> SyncRound { - self.collect_ready(); - let max = ::std::cmp::min(max, self.ready.len()); headers.extend(self.ready.drain(0..max)); From 72f7391551f1615d2f721f3f144ff4cc7cbbaa97 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 15 Dec 2016 19:25:52 +0100 Subject: [PATCH 19/42] add BasicContext trait for handler ticking --- ethcore/light/src/net/context.rs | 73 ++++++++++++++++++++++++-------- ethcore/light/src/net/mod.rs | 20 +++++++-- 2 files changed, 73 insertions(+), 20 deletions(-) diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index 7f55dd229..74d8ad811 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -77,12 +77,8 @@ impl<'a> IoContext for NetworkContext<'a> { } } -/// Context for a protocol event. -pub trait EventContext { - /// Get the peer relevant to the event e.g. message sender, - /// disconnected/connected peer. - fn peer(&self) -> PeerId; - +/// Basic context for a the protocol. +pub trait BasicContext { /// Returns the relevant's peer persistent Id (aka NodeId). fn persistent_peer_id(&self, peer: PeerId) -> Option; @@ -104,22 +100,22 @@ pub trait EventContext { fn disable_peer(&self, peer: PeerId); } -/// Concrete implementation of `EventContext` over the light protocol struct and -/// an io context. -pub struct Ctx<'a> { - /// Io context to enable immediate response to events. +/// Context for a protocol event which has a peer ID attached. +pub trait EventContext: BasicContext { + /// Get the peer relevant to the event e.g. message sender, + /// disconnected/connected peer. + fn peer(&self) -> PeerId; +} + +/// Basic context. +pub struct TickCtx<'a> { + /// Io context to enable dispatch. pub io: &'a IoContext, /// Protocol implementation. pub proto: &'a LightProtocol, - /// Relevant peer for event. - pub peer: PeerId, } -impl<'a> EventContext for Ctx<'a> { - fn peer(&self) -> PeerId { - self.peer - } - +impl<'a> BasicContext for TickCtx<'a> { fn persistent_peer_id(&self, id: PeerId) -> Option { self.io.persistent_peer_id(id) } @@ -144,3 +140,46 @@ impl<'a> EventContext for Ctx<'a> { self.io.disable_peer(peer); } } + +/// Concrete implementation of `EventContext` over the light protocol struct and +/// an io context. +pub struct Ctx<'a> { + /// Io context to enable immediate response to events. + pub io: &'a IoContext, + /// Protocol implementation. + pub proto: &'a LightProtocol, + /// Relevant peer for event. + pub peer: PeerId, +} + +impl<'a> BasicContext for Ctx<'a> { + fn persistent_peer_id(&self, id: PeerId) -> Option { + self.io.persistent_peer_id(id) + } + + fn request_from(&self, peer: PeerId, request: Request) -> Result { + self.proto.request_from(self.io, &peer, request) + } + + fn make_announcement(&self, announcement: Announcement) { + self.proto.make_announcement(self.io, announcement); + } + + fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { + self.proto.max_requests(peer, kind) + } + + fn disconnect_peer(&self, peer: PeerId) { + self.io.disconnect_peer(peer); + } + + fn disable_peer(&self, peer: PeerId) { + self.io.disable_peer(peer); + } +} + +impl<'a> EventContext for Ctx<'a> { + fn peer(&self) -> PeerId { + self.peer + } +} diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 6da964f99..08bd3347d 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -37,7 +37,7 @@ use provider::Provider; use request::{self, HashOrNumber, Request}; use self::buffer_flow::{Buffer, FlowParams}; -use self::context::Ctx; +use self::context::{Ctx, TickCtx}; use self::error::Punishment; mod buffer_flow; @@ -49,7 +49,7 @@ mod status; mod tests; pub use self::error::Error; -pub use self::context::{EventContext, IoContext}; +pub use self::context::{BasicContext, EventContext, IoContext}; pub use self::status::{Status, Capabilities, Announcement}; const TIMEOUT: TimerToken = 0; @@ -189,6 +189,8 @@ pub trait Handler: Send + Sync { /// Called when a peer responds with header proofs. Each proof is a block header coupled /// with a series of trie nodes is ascending order by distance from the root. fn on_header_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[(Bytes, Vec)]) { } + /// Called to "tick" the handler periodically. + fn tick(&self, _ctx: &BasicContext) { } /// Called on abort. This signals to handlers that they should clean up /// and ignore peers. // TODO: coreresponding `on_activate`? @@ -509,6 +511,15 @@ impl LightProtocol { } } } + + fn tick_handlers(&self, io: &IoContext) { + for handler in &self.handlers { + handler.tick(&TickCtx { + io: io, + proto: self, + }) + } + } } impl LightProtocol { @@ -1128,7 +1139,10 @@ impl NetworkProtocolHandler for LightProtocol { fn timeout(&self, io: &NetworkContext, timer: TimerToken) { match timer { - TIMEOUT => self.timeout_check(io), + TIMEOUT => { + self.timeout_check(io); + self.tick_handlers(io); + }, _ => warn!(target: "les", "received timeout on unknown token {}", timer), } } From 9c7340307e9789f4ece19f6c22398dbec75ca466 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 15 Dec 2016 21:51:08 +0100 Subject: [PATCH 20/42] handle events, minimal state machine --- ethcore/light/src/net/context.rs | 7 ++ sync/src/lib.rs | 3 +- sync/src/light_sync/mod.rs | 165 +++++++++++++++++++++++++++--- sync/src/light_sync/sync_round.rs | 21 ++-- 4 files changed, 170 insertions(+), 26 deletions(-) diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index 74d8ad811..e95434a3b 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -105,6 +105,9 @@ pub trait EventContext: BasicContext { /// Get the peer relevant to the event e.g. message sender, /// disconnected/connected peer. fn peer(&self) -> PeerId; + + /// Treat the event context as a basic context. + fn as_basic(&self) -> &BasicContext; } /// Basic context. @@ -182,4 +185,8 @@ impl<'a> EventContext for Ctx<'a> { fn peer(&self) -> PeerId { self.peer } + + fn as_basic(&self) -> &BasicContext { + &*self + } } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index fc9e5de74..504f4e97c 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -54,11 +54,12 @@ extern crate ethcore_ipc as ipc; mod chain; mod blocks; mod block_sync; -mod light_sync; mod sync_io; mod snapshot; mod transactions_stats; +pub mod light_sync; + #[cfg(test)] mod tests; diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 243afa6f7..12bb6a11e 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -25,18 +25,22 @@ //! in the same binary; unlike a full node which might communicate via IPC. use std::collections::HashMap; -use std::fmt; +use std::mem; use std::sync::Arc; use ethcore::header::Header; use light::client::LightChainClient; -use light::net::{Announcement, Error as NetError, Handler, EventContext, Capabilities, ReqId, Status}; +use light::net::{ + Announcement, Handler, BasicContext, EventContext, + Capabilities, ReqId, Status +}; use light::request; use network::PeerId; -use rlp::{DecoderError, UntrustedRlp, View}; use util::{Bytes, U256, H256, Mutex, RwLock}; +use self::sync_round::{SyncRound, ResponseContext}; + mod response; mod sync_round; @@ -49,7 +53,6 @@ struct ChainInfo { } struct Peer { - first_status: ChainInfo, status: ChainInfo, } @@ -57,17 +60,48 @@ impl Peer { /// Create a peer object. fn new(chain_info: ChainInfo) -> Self { Peer { - first_status: chain_info.clone(), status: chain_info.clone(), } } } +// Search for a common ancestor. +struct AncestorSearch { + last_batched: u64, + req_id: ReqId, +} + +// synchronization state machine. +enum SyncState { + // Idle (waiting for peers) + Idle, + // searching for common ancestor with best chain. + // queue should be cleared at this phase. + AncestorSearch(AncestorSearch), + // Doing sync rounds. + Rounds(SyncRound), +} + +struct ResponseCtx<'a> { + peer: PeerId, + req_id: ReqId, + ctx: &'a BasicContext, + data: &'a [Bytes], +} + +impl<'a> ResponseContext for ResponseCtx<'a> { + fn responder(&self) -> PeerId { self.peer } + fn req_id(&self) -> &ReqId { &self.req_id } + fn data(&self) -> &[Bytes] { self.data } + fn punish_responder(&self) { self.ctx.disable_peer(self.peer) } +} + /// Light client synchronization manager. See module docs for more details. pub struct LightSync { best_seen: Mutex>, // best seen block on the network. peers: RwLock>>, // peers which are relevant to synchronization. client: Arc, + state: Mutex, } impl Handler for LightSync { @@ -75,7 +109,8 @@ impl Handler for LightSync { let our_best = self.client.chain_info().best_block_number; if !capabilities.serve_headers || status.head_num <= our_best { - trace!(target: "sync", "Ignoring irrelevant peer: {}", ctx.peer()); + trace!(target: "sync", "Disconnecting irrelevant peer: {}", ctx.peer()); + ctx.disconnect_peer(ctx.peer()); return; } @@ -85,27 +120,130 @@ impl Handler for LightSync { head_num: status.head_num, }; + let mut best = self.best_seen.lock(); + if best.as_ref().map_or(true, |b| status.head_td > b.1) { + *best = Some((status.head_hash, status.head_td)); + } + self.peers.write().insert(ctx.peer(), Mutex::new(Peer::new(chain_info))); + self.maintain_sync(ctx.as_basic()); } - fn on_disconnect(&self, ctx: &EventContext, _unfulfilled: &[ReqId]) { + fn on_disconnect(&self, ctx: &EventContext, unfulfilled: &[ReqId]) { let peer_id = ctx.peer(); + let peer = match self.peers.write().remove(&peer_id).map(|p| p.into_inner()) { + Some(peer) => peer, + None => return, + }; + + let new_best = { + let mut best = self.best_seen.lock(); + let peer_best = (peer.status.head_hash, peer.status.head_td); + + if best.as_ref().map_or(false, |b| b == &peer_best) { + // search for next-best block. + let next_best: Option<(H256, U256)> = self.peers.read().values() + .map(|p| p.lock()) + .map(|p| (p.status.head_hash, p.status.head_td)) + .fold(None, |acc, x| match acc { + Some(acc) => if x.1 > acc.1 { Some(x) } else { Some(acc) }, + None => Some(x), + }); + + *best = next_best; + } + + best.clone() + }; + + if new_best.is_none() { + debug!(target: "sync", "No peers remain. Reverting to idle"); + *self.state.lock() = SyncState::Idle; + } else { + let mut state = self.state.lock(); + + *state = match mem::replace(&mut *state, SyncState::Idle) { + SyncState::Idle => SyncState::Idle, + SyncState::AncestorSearch(search) => SyncState::AncestorSearch(search), + SyncState::Rounds(round) => SyncState::Rounds(round.requests_abandoned(unfulfilled)), + }; + } + + self.maintain_sync(ctx.as_basic()); } fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { - // restart search for common ancestor if necessary. - // restart download if necessary. - // if this is a peer we found irrelevant earlier, we may want to - // re-evaluate their usefulness. - if !self.peers.read().contains_key(&ctx.peer()) { return } + let last_td = { + let peers = self.peers.read(); + match peers.get(&ctx.peer()){ + None => return, + Some(peer) => { + let mut peer = peer.lock(); + let last_td = peer.status.head_td; + peer.status = ChainInfo { + head_td: announcement.head_td, + head_hash: announcement.head_hash, + head_num: announcement.head_num, + }; + last_td + } + } + }; trace!(target: "sync", "Announcement from peer {}: new chain head {:?}, reorg depth {}", ctx.peer(), (announcement.head_hash, announcement.head_num), announcement.reorg_depth); + + if last_td < announcement.head_td { + trace!(target: "sync", "Peer {} moved backwards.", ctx.peer()); + self.peers.write().remove(&ctx.peer()); + ctx.disconnect_peer(ctx.peer()); + } + + let mut best = self.best_seen.lock(); + if best.as_ref().map_or(true, |b| announcement.head_td > b.1) { + *best = Some((announcement.head_hash, announcement.head_td)); + } + + self.maintain_sync(ctx.as_basic()); } fn on_block_headers(&self, ctx: &EventContext, req_id: ReqId, headers: &[Bytes]) { - let peer_id = ctx.peer(); + if !self.peers.read().contains_key(&ctx.peer()) { + return; + } + + { + let mut state = self.state.lock(); + + *state = match mem::replace(&mut *state, SyncState::Idle) { + SyncState::Idle => SyncState::Idle, + SyncState::AncestorSearch(search) => SyncState::AncestorSearch(search), + SyncState::Rounds(round) => { + SyncState::Rounds(round.process_response(&ResponseCtx { + peer: ctx.peer(), + req_id: req_id, + ctx: ctx.as_basic(), + data: headers, + })) + } + }; + } + + self.maintain_sync(ctx.as_basic()); + } + + fn tick(&self, ctx: &BasicContext) { + self.maintain_sync(ctx); + } +} + +// private helpers +impl LightSync { + fn maintain_sync(&self, ctx: &BasicContext) { + const DRAIN_AMOUNT: usize = 256; + + unimplemented!() } } @@ -120,6 +258,7 @@ impl LightSync { best_seen: Mutex::new(None), peers: RwLock::new(HashMap::new()), client: client, + state: Mutex::new(SyncState::Idle), } } } diff --git a/sync/src/light_sync/sync_round.rs b/sync/src/light_sync/sync_round.rs index 5f121d00c..fb4f5c4ef 100644 --- a/sync/src/light_sync/sync_round.rs +++ b/sync/src/light_sync/sync_round.rs @@ -18,17 +18,14 @@ use std::cmp::Ordering; use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque}; -use std::mem; use ethcore::header::Header; -use light::client::LightChainClient; -use light::net::{EventContext, ReqId}; +use light::net::ReqId; use light::request::Headers as HeadersRequest; use network::PeerId; -use rlp::{UntrustedRlp, View}; -use util::{Bytes, H256, Mutex}; +use util::{Bytes, H256}; use super::response; @@ -47,7 +44,7 @@ pub trait ResponseContext { /// Get the peer who sent this response. fn responder(&self) -> PeerId; /// Get the request ID this response corresponds to. - fn req_id(&self) -> ReqId; + fn req_id(&self) -> &ReqId; /// Get the (unverified) response data. fn data(&self) -> &[Bytes]; /// Punish the responder. @@ -173,7 +170,7 @@ impl Fetcher { } fn process_response(mut self, ctx: &R) -> SyncRound { - let mut request = match self.pending.remove(&ctx.req_id()) { + let mut request = match self.pending.remove(ctx.req_id()) { Some(request) => request, None => return SyncRound::Fetch(self), }; @@ -267,8 +264,8 @@ impl Fetcher { SyncRound::Fetch(self) } - fn drain(mut self, headers: &mut Vec
, max: usize) -> SyncRound { - let max = ::std::cmp::min(max, self.ready.len()); + fn drain(mut self, headers: &mut Vec
, max: Option) -> SyncRound { + let max = ::std::cmp::min(max.unwrap_or(usize::max_value()), self.ready.len()); headers.extend(self.ready.drain(0..max)); if self.sparse.is_empty() && self.ready.is_empty() { @@ -321,7 +318,7 @@ impl RoundStart { fn process_response(mut self, ctx: &R) -> SyncRound { let req = match self.pending_req.take() { - Some((id, ref req)) if ctx.req_id() == id => { req.clone() } + Some((id, ref req)) if ctx.req_id() == &id => { req.clone() } other => { self.pending_req = other; return SyncRound::Start(self); @@ -445,9 +442,9 @@ impl SyncRound { } } - /// Drain up to a maximum number of headers (continuous, starting with a child of + /// Drain up to a maximum number (None -> all) of headers (continuous, starting with a child of /// the round start block) from the round, starting a new one once finished. - pub fn drain(self, v: &mut Vec
, max: usize) -> Self { + pub fn drain(self, v: &mut Vec
, max: Option) -> Self { match self { SyncRound::Fetch(fetcher) => fetcher.drain(v, max), other => other, From 8622ab66dc12cff13311acea32de5082ea14ad65 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 15 Dec 2016 22:42:24 +0100 Subject: [PATCH 21/42] skeleton for maintain_sync: all but dispatch --- sync/src/light_sync/mod.rs | 87 ++++++++++++++++++++++++++++++++++---- 1 file changed, 78 insertions(+), 9 deletions(-) diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 12bb6a11e..bd62b4c70 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -38,8 +38,9 @@ use light::net::{ use light::request; use network::PeerId; use util::{Bytes, U256, H256, Mutex, RwLock}; +use rand::{Rng, OsRng}; -use self::sync_round::{SyncRound, ResponseContext}; +use self::sync_round::{AbortReason, SyncRound, ResponseContext}; mod response; mod sync_round; @@ -65,10 +66,10 @@ impl Peer { } } -// Search for a common ancestor. +// Search for a common ancestor with the best chain. struct AncestorSearch { last_batched: u64, - req_id: ReqId, + req_id: Option, } // synchronization state machine. @@ -101,6 +102,7 @@ pub struct LightSync { best_seen: Mutex>, // best seen block on the network. peers: RwLock>>, // peers which are relevant to synchronization. client: Arc, + rng: OsRng, state: Mutex, } @@ -240,10 +242,76 @@ impl Handler for LightSync { // private helpers impl LightSync { - fn maintain_sync(&self, ctx: &BasicContext) { - const DRAIN_AMOUNT: usize = 256; + // Begins a search for the common ancestor and our best block. + // does not lock state, instead has a mutable reference to it passed. + fn begin_search(&self, _state: &mut SyncState) { + self.client.clear_queue(); - unimplemented!() + unimplemented!(); + } + + fn maintain_sync(&self, ctx: &BasicContext) { + const DRAIN_AMOUNT: usize = 128; + + let mut state = self.state.lock(); + + // drain any pending blocks into the queue. + { + let mut sink = Vec::with_capacity(DRAIN_AMOUNT); + + 'a: + loop { + let queue_info = self.client.queue_info(); + if queue_info.is_full() { break } + + *state = match mem::replace(&mut *state, SyncState::Idle) { + SyncState::Rounds(round) + => SyncState::Rounds(round.drain(&mut sink, Some(DRAIN_AMOUNT))), + other => other, + }; + + if sink.is_empty() { break } + + for header in sink.drain(..) { + if let Err(e) = self.client.queue_header(header) { + debug!(target: "sync", "Found bad header ({:?}). Reset to search state.", e); + + self.begin_search(&mut state); + break 'a; + } + } + } + } + + // check for aborted sync round. + { + match mem::replace(&mut *state, SyncState::Idle) { + SyncState::Rounds(SyncRound::Abort(reason)) => { + match reason { + AbortReason::BadScaffold(bad_peers) => { + debug!(target: "sync", "Disabling peers responsible for bad scaffold"); + for peer in bad_peers { + ctx.disable_peer(peer); + } + } + AbortReason::NoResponses => {} + } + + debug!(target: "sync", "Beginning search after aborted sync round"); + self.begin_search(&mut state); + } + other => *state = other, // restore displaced state. + } + } + + // allow dispatching of requests. + { + *state = match mem::replace(&mut *state, SyncState::Idle) { + SyncState::Rounds(round) + => SyncState::Rounds(round.dispatch_requests(|_| unimplemented!())), + other => other, + }; + } } } @@ -253,12 +321,13 @@ impl LightSync { /// /// This won't do anything until registered as a handler /// so it can act on events. - pub fn new(client: Arc) -> Self { - LightSync { + pub fn new(client: Arc) -> Result { + Ok(LightSync { best_seen: Mutex::new(None), peers: RwLock::new(HashMap::new()), client: client, + rng: try!(OsRng::new()), state: Mutex::new(SyncState::Idle), - } + }) } } From 0d7b638a2d91bd18aa638690557a63eac5080e4d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 16 Dec 2016 14:53:36 +0100 Subject: [PATCH 22/42] ancestor search response handler --- sync/src/light_sync/mod.rs | 105 +++++++++++++++++++++++++----- sync/src/light_sync/sync_round.rs | 15 +++-- 2 files changed, 96 insertions(+), 24 deletions(-) diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index bd62b4c70..3ab2ffe82 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -33,7 +33,7 @@ use ethcore::header::Header; use light::client::LightChainClient; use light::net::{ Announcement, Handler, BasicContext, EventContext, - Capabilities, ReqId, Status + Capabilities, ReqId, Status, }; use light::request; use network::PeerId; @@ -66,10 +66,60 @@ impl Peer { } } -// Search for a common ancestor with the best chain. -struct AncestorSearch { - last_batched: u64, - req_id: Option, +// search for a common ancestor with the best chain. +enum AncestorSearch { + Queued(u64), // queued to search for blocks starting from here. + Awaiting(ReqId, u64, request::Headers), // awaiting response for this request. + Prehistoric, // prehistoric block found. TODO: start to roll back CHTs. + FoundCommon(u64, H256), // common block found. + Genesis, // common ancestor is the genesis. +} + +impl AncestorSearch { + fn begin(best_num: u64) -> Self { + match best_num { + 0 => AncestorSearch::Genesis, + x => AncestorSearch::Queued(best_num), + } + } + + fn process_response(mut self, ctx: &ResponseContext, client: &L) -> AncestorSearch + where L: LightChainClient + { + let first_num = client.chain_info().first_block_number.unwrap_or(0); + match self { + AncestorSearch::Awaiting(id, start, req) => { + if &id == ctx.req_id() { + match response::decode_and_verify(ctx.data(), &req) { + Ok(headers) => { + for header in &headers { + if client.is_known(&header.hash()) { + debug!(target: "sync", "Found common ancestor with best chain"); + return AncestorSearch::FoundCommon(header.number(), header.hash()); + } + + if header.number() <= first_num { + debug!(target: "sync", "Prehistoric common ancestor with best chain."); + return AncestorSearch::Prehistoric; + } + } + + AncestorSearch::Queued(start - headers.len() as u64) + } + Err(e) => { + trace!(target: "sync", "Bad headers response from {}: {}", ctx.responder(), e); + + ctx.punish_responder(); + AncestorSearch::Queued(start) + } + } + } else { + AncestorSearch::Awaiting(id, start, req) + } + } + other => other, + } + } } // synchronization state machine. @@ -218,17 +268,18 @@ impl Handler for LightSync { { let mut state = self.state.lock(); + let ctx = ResponseCtx { + peer: ctx.peer(), + req_id: req_id, + ctx: ctx.as_basic(), + data: headers, + }; + *state = match mem::replace(&mut *state, SyncState::Idle) { SyncState::Idle => SyncState::Idle, - SyncState::AncestorSearch(search) => SyncState::AncestorSearch(search), - SyncState::Rounds(round) => { - SyncState::Rounds(round.process_response(&ResponseCtx { - peer: ctx.peer(), - req_id: req_id, - ctx: ctx.as_basic(), - data: headers, - })) - } + SyncState::AncestorSearch(search) => + SyncState::AncestorSearch(search.process_response(&ctx, &*self.client)), + SyncState::Rounds(round) => SyncState::Rounds(round.process_response(&ctx)), }; } @@ -244,10 +295,17 @@ impl Handler for LightSync { impl LightSync { // Begins a search for the common ancestor and our best block. // does not lock state, instead has a mutable reference to it passed. - fn begin_search(&self, _state: &mut SyncState) { + fn begin_search(&self, state: &mut SyncState) { self.client.clear_queue(); - unimplemented!(); + let chain_info = self.client.chain_info(); + if let None = *self.best_seen.lock() { + // no peers. + *state = SyncState::Idle; + return; + } + + *state = SyncState::AncestorSearch(AncestorSearch::begin(chain_info.best_block_number)); } fn maintain_sync(&self, ctx: &BasicContext) { @@ -283,7 +341,7 @@ impl LightSync { } } - // check for aborted sync round. + // handle state transitions. { match mem::replace(&mut *state, SyncState::Idle) { SyncState::Rounds(SyncRound::Abort(reason)) => { @@ -300,11 +358,24 @@ impl LightSync { debug!(target: "sync", "Beginning search after aborted sync round"); self.begin_search(&mut state); } + SyncState::AncestorSearch(AncestorSearch::FoundCommon(num, hash)) => { + // TODO: compare to best block and switch to another downloading + // method when close. + *state = SyncState::Rounds(SyncRound::begin(num, hash)); + } + SyncState::AncestorSearch(AncestorSearch::Genesis) => { + // Same here. + let g_hash = self.client.chain_info().genesis_hash; + *state = SyncState::Rounds(SyncRound::begin(0, g_hash)); + } + SyncState::Idle => self.begin_search(&mut state), other => *state = other, // restore displaced state. } } // allow dispatching of requests. + // TODO: maybe wait until the amount of cumulative requests remaining is high enough + // to avoid pumping the failure rate. { *state = match mem::replace(&mut *state, SyncState::Idle) { SyncState::Rounds(round) diff --git a/sync/src/light_sync/sync_round.rs b/sync/src/light_sync/sync_round.rs index fb4f5c4ef..7f86faf2d 100644 --- a/sync/src/light_sync/sync_round.rs +++ b/sync/src/light_sync/sync_round.rs @@ -299,9 +299,7 @@ impl RoundStart { } // called on failed attempt. may trigger a transition after a number of attempts. - // a failed attempt is defined as: - // - any time we try to make a request to a peer and fail - // - any time a peer returns invalid or incomplete response + // a failed attempt is defined as any time a peer returns invalid or incomplete response fn failed_attempt(mut self) -> SyncRound { self.attempt += 1; @@ -371,7 +369,6 @@ impl RoundStart { where D: Fn(HeadersRequest) -> Option { if self.pending_req.is_none() { - // beginning offset + first block expected after last header we have. let start = (self.start_block.0 + 1) + self.sparse_headers.len() as u64 * (ROUND_SKIP + 1); @@ -383,9 +380,8 @@ impl RoundStart { reverse: false, }; - match dispatcher(headers_request.clone()) { - Some(req_id) => self.pending_req = Some((req_id, headers_request)), - None => return self.failed_attempt(), + if let Some(req_id) = dispatcher(headers_request.clone()) { + self.pending_req = Some((req_id, headers_request)); } } @@ -410,6 +406,11 @@ impl SyncRound { SyncRound::Abort(reason) } + /// Begin sync rounds from a starting block. + pub fn begin(num: u64, hash: H256) -> Self { + SyncRound::Start(RoundStart::new((num, hash))) + } + /// Process an answer to a request. Unknown requests will be ignored. pub fn process_response(self, ctx: &R) -> Self { match self { From ce84215d93330b9dc448b698471c1c4633e3ae47 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 16 Dec 2016 15:26:39 +0100 Subject: [PATCH 23/42] naive and bad request dispatcher --- sync/src/light_sync/mod.rs | 65 ++++++++++++++++++++++++++----- sync/src/light_sync/sync_round.rs | 10 ++--- 2 files changed, 60 insertions(+), 15 deletions(-) diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 3ab2ffe82..5ca441949 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -28,8 +28,6 @@ use std::collections::HashMap; use std::mem; use std::sync::Arc; -use ethcore::header::Header; - use light::client::LightChainClient; use light::net::{ Announcement, Handler, BasicContext, EventContext, @@ -58,10 +56,10 @@ struct Peer { } impl Peer { - /// Create a peer object. + // Create a new peer. fn new(chain_info: ChainInfo) -> Self { Peer { - status: chain_info.clone(), + status: chain_info, } } } @@ -79,11 +77,11 @@ impl AncestorSearch { fn begin(best_num: u64) -> Self { match best_num { 0 => AncestorSearch::Genesis, - x => AncestorSearch::Queued(best_num), + _ => AncestorSearch::Queued(best_num), } } - fn process_response(mut self, ctx: &ResponseContext, client: &L) -> AncestorSearch + fn process_response(self, ctx: &ResponseContext, client: &L) -> AncestorSearch where L: LightChainClient { let first_num = client.chain_info().first_block_number.unwrap_or(0); @@ -120,6 +118,29 @@ impl AncestorSearch { other => other, } } + + fn dispatch_request(self, mut dispatcher: F) -> AncestorSearch + where F: FnMut(request::Headers) -> Option + { + const BATCH_SIZE: usize = 64; + + match self { + AncestorSearch::Queued(start) => { + let req = request::Headers { + start: start.into(), + max: ::std::cmp::min(start as usize, BATCH_SIZE), + skip: 0, + reverse: true, + }; + + match dispatcher(req.clone()) { + Some(req_id) => AncestorSearch::Awaiting(req_id, start, req), + None => AncestorSearch::Queued(start), + } + } + other => other, + } + } } // synchronization state machine. @@ -152,7 +173,7 @@ pub struct LightSync { best_seen: Mutex>, // best seen block on the network. peers: RwLock>>, // peers which are relevant to synchronization. client: Arc, - rng: OsRng, + rng: Mutex, state: Mutex, } @@ -377,9 +398,33 @@ impl LightSync { // TODO: maybe wait until the amount of cumulative requests remaining is high enough // to avoid pumping the failure rate. { + let peers = self.peers.read(); + let mut peer_ids: Vec<_> = peers.keys().cloned().collect(); + let mut rng = self.rng.lock(); + + // naive request dispatcher: just give to any peer which says it will + // give us responses. + let dispatcher = move |req: request::Headers| { + rng.shuffle(&mut peer_ids); + + for peer in &peer_ids { + if ctx.max_requests(*peer, request::Kind::Headers) >= req.max { + match ctx.request_from(*peer, request::Request::Headers(req.clone())) { + Ok(id) => return Some(id), + Err(e) => + trace!(target: "sync", "Error requesting headers from viable peer: {}", e), + } + } + } + + None + }; + *state = match mem::replace(&mut *state, SyncState::Idle) { - SyncState::Rounds(round) - => SyncState::Rounds(round.dispatch_requests(|_| unimplemented!())), + SyncState::Rounds(round) => + SyncState::Rounds(round.dispatch_requests(dispatcher)), + SyncState::AncestorSearch(search) => + SyncState::AncestorSearch(search.dispatch_request(dispatcher)), other => other, }; } @@ -397,7 +442,7 @@ impl LightSync { best_seen: Mutex::new(None), peers: RwLock::new(HashMap::new()), client: client, - rng: try!(OsRng::new()), + rng: Mutex::new(try!(OsRng::new())), state: Mutex::new(SyncState::Idle), }) } diff --git a/sync/src/light_sync/sync_round.rs b/sync/src/light_sync/sync_round.rs index 7f86faf2d..03ca1dea4 100644 --- a/sync/src/light_sync/sync_round.rs +++ b/sync/src/light_sync/sync_round.rs @@ -243,8 +243,8 @@ impl Fetcher { SyncRound::Fetch(self) } - fn dispatch_requests(mut self, dispatcher: D) -> SyncRound - where D: Fn(HeadersRequest) -> Option + fn dispatch_requests(mut self, mut dispatcher: D) -> SyncRound + where D: FnMut(HeadersRequest) -> Option { while let Some(pending_req) = self.requests.pop() { match dispatcher(pending_req.headers_request.clone()) { @@ -365,8 +365,8 @@ impl RoundStart { } } - fn dispatch_requests(mut self, dispatcher: D) -> SyncRound - where D: Fn(HeadersRequest) -> Option + fn dispatch_requests(mut self, mut dispatcher: D) -> SyncRound + where D: FnMut(HeadersRequest) -> Option { if self.pending_req.is_none() { // beginning offset + first block expected after last header we have. @@ -434,7 +434,7 @@ impl SyncRound { // TODO: have dispatcher take capabilities argument? and return an error as // to why no suitable peer can be found? (no buffer, no chain heads that high, etc) pub fn dispatch_requests(self, dispatcher: D) -> Self - where D: Fn(HeadersRequest) -> Option + where D: FnMut(HeadersRequest) -> Option { match self { SyncRound::Start(round_start) => round_start.dispatch_requests(dispatcher), From 8b88ef18448ecac293bdc17134fa1d74412e82dd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 16 Dec 2016 17:38:16 +0100 Subject: [PATCH 24/42] add light sync service to ethsync --- sync/src/api.rs | 102 +++++++++++++++++++++++++++++++++++++++++++++++- sync/src/lib.rs | 7 +++- 2 files changed, 106 insertions(+), 3 deletions(-) diff --git a/sync/src/api.rs b/sync/src/api.rs index 45a9c1eb7..be35f2d6f 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -33,6 +33,8 @@ use ipc::{BinaryConvertable, BinaryConvertError, IpcConfig}; use std::str::FromStr; use parking_lot::RwLock; use chain::{ETH_PACKET_COUNT, SNAPSHOT_SYNC_PACKET_COUNT}; +use light::client::LightChainClient; +use light::Provider; use light::net::{LightProtocol, Params as LightParams, Capabilities, Handler as LightHandler, EventContext}; /// Parity sync protocol @@ -304,7 +306,7 @@ impl ChainNotify for EthSync { Some(lp) => lp, None => return, }; - + let chain_info = self.eth_handler.chain.chain_info(); light_proto.make_announcement(context, Announcement { head_hash: chain_info.best_block_hash, @@ -567,3 +569,101 @@ pub struct ServiceConfiguration { /// IPC path. pub io_path: String, } + +/// Configuration for the light sync. +pub struct LightSyncParams { + /// Network configuration. + pub network_config: BasicNetworkConfiguration, + /// Light client to sync to. + pub client: Arc, + /// Network ID. + pub network_id: u64, + /// Subprotocol name. + pub subprotocol_name: [u8; 3], +} + +/// Service for light synchronization. +pub struct LightSync { + proto: Arc, + network: NetworkService, + subprotocol_name: [u8; 3], +} + +impl LightSync { + /// Create a new light sync service. + pub fn new(params: LightSyncParams) -> Result + where L: LightChainClient + Provider + 'static + { + use light_sync::LightSync as SyncHandler; + + // initialize light protocol handler and attach sync module. + let light_proto = { + let light_params = LightParams { + network_id: params.network_id, + flow_params: Default::default(), // or `None`? + capabilities: Capabilities { + serve_headers: false, + serve_chain_since: None, + serve_state_since: None, + tx_relay: false, + }, + }; + + let mut light_proto = LightProtocol::new(params.client.clone(), light_params); + let sync_handler = try!(SyncHandler::new(params.client.clone())); + light_proto.add_handler(Box::new(sync_handler)); + + Arc::new(light_proto) + }; + + let service = try!(NetworkService::new(params.network_config)); + + Ok(LightSync { + proto: light_proto, + network: service, + subprotocol_name: params.subprotocol_name, + }) + } +} + +impl ManageNetwork for LightSync { + fn accept_unreserved_peers(&self) { + self.network.set_non_reserved_mode(NonReservedPeerMode::Accept); + } + + fn deny_unreserved_peers(&self) { + self.network.set_non_reserved_mode(NonReservedPeerMode::Deny); + } + + fn remove_reserved_peer(&self, peer: String) -> Result<(), String> { + self.network.remove_reserved_peer(&peer).map_err(|e| format!("{:?}", e)) + } + + fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + self.network.add_reserved_peer(&peer).map_err(|e| format!("{:?}", e)) + } + + fn start_network(&self) { + match self.network.start() { + Err(NetworkError::StdIo(ref e)) if e.kind() == io::ErrorKind::AddrInUse => warn!("Network port {:?} is already in use, make sure that another instance of an Ethereum client is not running or change the port using the --port option.", self.network.config().listen_address.expect("Listen address is not set.")), + Err(err) => warn!("Error starting network: {}", err), + _ => {}, + } + + let light_proto = self.proto.clone(); + + self.network.register_protocol(light_proto, self.subprotocol_name, ::light::net::PACKET_COUNT, ::light::net::PROTOCOL_VERSIONS) + .unwrap_or_else(|e| warn!("Error registering light client protocol: {:?}", e)); + } + + fn stop_network(&self) { + self.proto.abort(); + if let Err(e) = self.network.stop() { + warn!("Error stopping network: {}", e); + } + } + + fn network_config(&self) -> NetworkConfiguration { + NetworkConfiguration::from(self.network.config().clone()) + } +} diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 504f4e97c..9b3cdab5e 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -72,8 +72,11 @@ mod api { #[cfg(not(feature = "ipc"))] mod api; -pub use api::{EthSync, Params, SyncProvider, ManageNetwork, SyncConfig, - ServiceConfiguration, NetworkConfiguration, PeerInfo, AllowIP, TransactionStats}; +pub use api::{ + EthSync, Params, SyncProvider, ManageNetwork, SyncConfig, + ServiceConfiguration, NetworkConfiguration, PeerInfo, AllowIP, TransactionStats, + LightSync, LightSyncParams, +}; pub use chain::{SyncStatus, SyncState}; pub use network::{is_valid_node_url, NonReservedPeerMode, NetworkError}; From 8970946b746130aaa5d9272fafa740a93cd225d1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 16 Dec 2016 21:45:51 +0100 Subject: [PATCH 25/42] basic --light parameter --- parity/cli/config.full.toml | 1 + parity/cli/mod.rs | 5 ++- parity/cli/usage.txt | 14 +++--- parity/configuration.rs | 6 ++- parity/run.rs | 89 +++++++++++++++++++++++++++++-------- 5 files changed, 88 insertions(+), 27 deletions(-) diff --git a/parity/cli/config.full.toml b/parity/cli/config.full.toml index fc55e5294..01bec556c 100644 --- a/parity/cli/config.full.toml +++ b/parity/cli/config.full.toml @@ -12,6 +12,7 @@ base_path = "$HOME/.parity" db_path = "$HOME/.parity/chains" keys_path = "$HOME/.parity/keys" identity = "" +light = false [account] unlock = ["0xdeadbeefcafe0000000000000000000000000000"] diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index cac137316..29445056b 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -94,6 +94,7 @@ usage! { flag_db_path: String = "$BASE/chains", or |c: &Config| otry!(c.parity).db_path.clone(), flag_keys_path: String = "$BASE/keys", or |c: &Config| otry!(c.parity).keys_path.clone(), flag_identity: String = "", or |c: &Config| otry!(c.parity).identity.clone(), + flag_light: bool = false, or |c: &Config| otry!(c.parity).light.clone(), // -- Account Options flag_unlock: Option = None, @@ -259,7 +260,7 @@ usage! { or |c: &Config| otry!(c.footprint).fat_db.clone(), flag_scale_verifiers: bool = false, or |c: &Config| otry!(c.footprint).scale_verifiers.clone(), - flag_num_verifiers: Option = None, + flag_num_verifiers: Option = None, or |c: &Config| otry!(c.footprint).num_verifiers.clone().map(Some), // -- Import/Export Options @@ -323,6 +324,7 @@ struct Operating { db_path: Option, keys_path: Option, identity: Option, + light: Option, } #[derive(Default, Debug, PartialEq, RustcDecodable)] @@ -552,6 +554,7 @@ mod tests { flag_db_path: "$HOME/.parity/chains".into(), flag_keys_path: "$HOME/.parity/keys".into(), flag_identity: "".into(), + flag_light: false, // -- Account Options flag_unlock: Some("0xdeadbeefcafe0000000000000000000000000000".into()), diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 1e4032bf5..b78941564 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -36,15 +36,15 @@ Operating Options: (default: {flag_mode_alarm}). --auto-update SET Set a releases set to automatically update and install. - all - All updates in the our release track. + all - All updates in the our release track. critical - Only consensus/security updates. - none - No updates will be auto-installed. + none - No updates will be auto-installed. (default: {flag_auto_update}). --release-track TRACK Set which release track we should use for updates. - stable - Stable releases. - beta - Beta releases. + stable - Stable releases. + beta - Beta releases. nightly - Nightly releases (unstable). - testing - Testing releases (do not use). + testing - Testing releases (do not use). current - Whatever track this executable was released on (default: {flag_release_track}). --no-download Normally new releases will be downloaded ready for @@ -66,6 +66,8 @@ Operating Options: --keys-path PATH Specify the path for JSON key files to be found (default: {flag_keys_path}). --identity NAME Specify your node's name. (default: {flag_identity}) + --light Run in light client mode. Very experimental. + (default: {flag_light}) Account Options: --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. @@ -363,7 +365,7 @@ Legacy Options: --cache MB Equivalent to --cache-size MB. Internal Options: - --can-restart Executable will auto-restart if exiting with 69. + --can-restart Executable will auto-restart if exiting with 69. Miscellaneous Options: -c --config CONFIG Specify a filename containing a configuration file. diff --git a/parity/configuration.rs b/parity/configuration.rs index ad8c8e30c..4bed93420 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -342,6 +342,7 @@ impl Configuration { check_seal: !self.args.flag_no_seal_check, download_old_blocks: !self.args.flag_no_ancient_blocks, serve_light: self.args.flag_serve_light, + light: self.args.flag_light, verifier_settings: verifier_settings, }; Cmd::Run(run_cmd) @@ -691,7 +692,7 @@ impl Configuration { "none" => UpdateFilter::None, "critical" => UpdateFilter::Critical, "all" => UpdateFilter::All, - _ => return Err("Invalid value for `--auto-update`. See `--help` for more information.".into()), + _ => return Err("Invalid value for `--auto-update`. See `--help` for more information.".into()), }, track: match self.args.flag_release_track.as_ref() { "stable" => ReleaseTrack::Stable, @@ -699,7 +700,7 @@ impl Configuration { "nightly" => ReleaseTrack::Nightly, "testing" => ReleaseTrack::Testing, "current" => ReleaseTrack::Unknown, - _ => return Err("Invalid value for `--releases-track`. See `--help` for more information.".into()), + _ => return Err("Invalid value for `--releases-track`. See `--help` for more information.".into()), }, path: default_hypervisor_path(), }) @@ -1034,6 +1035,7 @@ mod tests { download_old_blocks: true, serve_light: false, verifier_settings: Default::default(), + light: false, })); } diff --git a/parity/run.rs b/parity/run.rs index da534ee3b..4e88fe458 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -97,6 +97,7 @@ pub struct RunCmd { pub download_old_blocks: bool, pub serve_light: bool, pub verifier_settings: VerifierSettings, + pub light: bool, } pub fn open_ui(dapps_conf: &dapps::Configuration, signer_conf: &signer::Configuration) -> Result<(), String> { @@ -116,6 +117,56 @@ pub fn open_ui(dapps_conf: &dapps::Configuration, signer_conf: &signer::Configur Ok(()) } +// Execute in light client mode. +pub fn execute_light(cmd: RunCmd, logger: Arc) -> Result { + use light::client::{Config as ClientConfig, Service as LightClientService}; + use ethsync::{LightSync, LightSyncParams, ManageNetwork}; + + let panic_handler = PanicHandler::new_in_arc(); + + info!( + "Configured in {} mode. Note that this feature is {}.", + Colour::White.bold().paint("Light Client"), + Colour::Red.bold().paint("experimental"), + ); + + let mut client_config = ClientConfig::default(); + let queue_size = cmd.cache_config.queue(); + + client_config.queue.max_queue_size = queue_size as usize; + client_config.queue.verifier_settings = cmd.verifier_settings; + + let spec = try!(cmd.spec.spec()); + let service = try!(LightClientService::start(client_config, &spec) + .map_err(|e| format!("Error starting light client service: {}", e))); + + let net_conf = try!(cmd.net_conf.into_basic() + .map_err(|e| format!("Failed to create network config: {}", e))); + + let sync_params = LightSyncParams { + network_config: net_conf, + client: service.client().clone(), + network_id: cmd.network_id.unwrap_or(spec.network_id()), + subprotocol_name: *b"les", + }; + + let sync = try!(LightSync::new(sync_params) + .map_err(|e| format!("Failed to initialize sync service: {}", e))); + + sync.start_network(); + + let log_client = service.client().clone(); + ::std::thread::spawn(move || { + loop { + ::std::thread::sleep(::std::time::Duration::from_secs(5)); + println!("Best block: #{}", log_client.chain_info().best_block_number); + } + }); + + wait_for_exit(panic_handler, None, false); + Ok(false) +} + pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> Result { if cmd.ui && cmd.dapps_conf.enabled { // Check if Parity is already running @@ -125,6 +176,10 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R } } + if cmd.light { + return execute_light(cmd, logger); + } + // set up panic handler let panic_handler = PanicHandler::new_in_arc(); @@ -295,12 +350,12 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // create sync object let (sync_provider, manage_network, chain_notify) = try!(modules::sync( - &mut hypervisor, - sync_config, - net_conf.into(), - client.clone(), - snapshot_service.clone(), - client.clone(), + &mut hypervisor, + sync_config, + net_conf.into(), + client.clone(), + snapshot_service.clone(), + client.clone(), &cmd.logger_config, ).map_err(|e| format!("Sync error: {}", e))); @@ -416,7 +471,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R } // Handle exit - let restart = wait_for_exit(panic_handler, http_server, ipc_server, dapps_server, signer_server, updater, can_restart); + let restart = wait_for_exit(panic_handler, Some(updater), can_restart); info!("Finishing work, please wait..."); @@ -471,11 +526,7 @@ fn prepare_account_provider(dirs: &Directories, data_dir: &str, cfg: AccountsCon fn wait_for_exit( panic_handler: Arc, - _http_server: Option, - _ipc_server: Option, - _dapps_server: Option, - _signer_server: Option, - updater: Arc, + updater: Option>, can_restart: bool ) -> bool { let exit = Arc::new((Mutex::new(false), Condvar::new())); @@ -488,12 +539,14 @@ fn wait_for_exit( let e = exit.clone(); panic_handler.on_panic(move |_reason| { e.1.notify_all(); }); - // Handle updater wanting to restart us - if can_restart { - let e = exit.clone(); - updater.set_exit_handler(move || { *e.0.lock() = true; e.1.notify_all(); }); - } else { - updater.set_exit_handler(|| info!("Update installed; ready for restart.")); + if let Some(updater) = updater { + // Handle updater wanting to restart us + if can_restart { + let e = exit.clone(); + updater.set_exit_handler(move || { *e.0.lock() = true; e.1.notify_all(); }); + } else { + updater.set_exit_handler(|| info!("Update installed; ready for restart.")); + } } // Wait for signal From a7505be627fdc6ac25d32133b6e7dd2a49dd891b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 16 Dec 2016 22:09:29 +0100 Subject: [PATCH 26/42] fix deadlocks --- ethcore/light/src/net/mod.rs | 6 ++++-- sync/src/light_sync/mod.rs | 24 +++++++++++++++++------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 08bd3347d..464c71fbe 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -284,8 +284,8 @@ impl LightProtocol { let peer = try!(peers.get(peer_id).ok_or_else(|| Error::UnknownPeer)); let mut peer = peer.lock(); - match peer.remote_flow.as_mut() { - Some(&mut (ref mut buf, ref flow)) => { + match peer.remote_flow { + Some((ref mut buf, ref flow)) => { flow.recharge(buf); let max = flow.compute_cost(request.kind(), request.amount()); try!(buf.deduct_cost(max)); @@ -296,6 +296,8 @@ impl LightProtocol { let req_id = self.req_id.fetch_add(1, Ordering::SeqCst); let packet_data = encode_request(&request, req_id); + trace!(target: "les", "Dispatching request {} to peer {}", req_id, peer_id); + let packet_id = match request.kind() { request::Kind::Headers => packet::GET_BLOCK_HEADERS, request::Kind::Bodies => packet::GET_BLOCK_BODIES, diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 5ca441949..821d0a4b2 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -193,9 +193,11 @@ impl Handler for LightSync { head_num: status.head_num, }; - let mut best = self.best_seen.lock(); - if best.as_ref().map_or(true, |b| status.head_td > b.1) { - *best = Some((status.head_hash, status.head_td)); + { + let mut best = self.best_seen.lock(); + if best.as_ref().map_or(true, |b| status.head_td > b.1) { + *best = Some((status.head_hash, status.head_td)); + } } self.peers.write().insert(ctx.peer(), Mutex::new(Peer::new(chain_info))); @@ -210,6 +212,8 @@ impl Handler for LightSync { None => return, }; + trace!(target: "sync", "peer {} disconnecting", peer_id); + let new_best = { let mut best = self.best_seen.lock(); let peer_best = (peer.status.head_hash, peer.status.head_td); @@ -249,7 +253,7 @@ impl Handler for LightSync { fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { let last_td = { let peers = self.peers.read(); - match peers.get(&ctx.peer()){ + match peers.get(&ctx.peer()) { None => return, Some(peer) => { let mut peer = peer.lock(); @@ -273,9 +277,11 @@ impl Handler for LightSync { ctx.disconnect_peer(ctx.peer()); } - let mut best = self.best_seen.lock(); - if best.as_ref().map_or(true, |b| announcement.head_td > b.1) { - *best = Some((announcement.head_hash, announcement.head_td)); + { + let mut best = self.best_seen.lock(); + if best.as_ref().map_or(true, |b| announcement.head_td > b.1) { + *best = Some((announcement.head_hash, announcement.head_td)); + } } self.maintain_sync(ctx.as_basic()); @@ -326,12 +332,16 @@ impl LightSync { return; } + trace!(target: "sync", "Beginning search for common ancestor"); + *state = SyncState::AncestorSearch(AncestorSearch::begin(chain_info.best_block_number)); } fn maintain_sync(&self, ctx: &BasicContext) { const DRAIN_AMOUNT: usize = 128; + debug!(target: "sync", "Maintaining sync."); + let mut state = self.state.lock(); // drain any pending blocks into the queue. From 653363c5720452056eaad8ad1f07e1759e14bc2b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 16 Dec 2016 23:21:17 +0100 Subject: [PATCH 27/42] limit to one pending request per peer --- sync/src/light_sync/mod.rs | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 821d0a4b2..8baeeb0ab 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -53,6 +53,7 @@ struct ChainInfo { struct Peer { status: ChainInfo, + working: bool, } impl Peer { @@ -60,8 +61,18 @@ impl Peer { fn new(chain_info: ChainInfo) -> Self { Peer { status: chain_info, + working: false, } } + + // whether the peer is fully loaded with requests. + fn is_fully_loaded(&self) -> bool { self.working } + + // signal that the peer's load has been lightened. + fn load_lightened(&mut self) { self.working = false } + + // signal that the peer's load has been increased. + fn load_increased(&mut self) { self.working = true } } // search for a common ancestor with the best chain. @@ -288,8 +299,9 @@ impl Handler for LightSync { } fn on_block_headers(&self, ctx: &EventContext, req_id: ReqId, headers: &[Bytes]) { - if !self.peers.read().contains_key(&ctx.peer()) { - return; + match self.peers.read().get(&ctx.peer()) { + Some(peer) => peer.lock().load_lightened(), + None => return, } { @@ -418,9 +430,15 @@ impl LightSync { rng.shuffle(&mut peer_ids); for peer in &peer_ids { + let peer_info = peers.get(peer).expect("key known to be present; qed"); + let mut peer_info = peer_info.lock(); + if peer_info.is_fully_loaded() { continue } if ctx.max_requests(*peer, request::Kind::Headers) >= req.max { match ctx.request_from(*peer, request::Request::Headers(req.clone())) { - Ok(id) => return Some(id), + Ok(id) => { + peer_info.load_increased(); + return Some(id) + } Err(e) => trace!(target: "sync", "Error requesting headers from viable peer: {}", e), } From 08587a1f56327365e30e008314bdc675a60a81d5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 16 Dec 2016 23:21:51 +0100 Subject: [PATCH 28/42] fix subchain request ordering --- sync/src/light_sync/sync_round.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sync/src/light_sync/sync_round.rs b/sync/src/light_sync/sync_round.rs index 03ca1dea4..dc1927aae 100644 --- a/sync/src/light_sync/sync_round.rs +++ b/sync/src/light_sync/sync_round.rs @@ -74,13 +74,15 @@ struct SubchainRequest { // front of the round are dispatched first. impl PartialOrd for SubchainRequest { fn partial_cmp(&self, other: &Self) -> Option { - self.subchain_parent.0.partial_cmp(&other.subchain_parent.0) + self.subchain_parent.0 + .partial_cmp(&other.subchain_parent.0) + .map(Ordering::reverse) } } impl Ord for SubchainRequest { fn cmp(&self, other: &Self) -> Ordering { - self.subchain_parent.0.cmp(&other.subchain_parent.0) + self.subchain_parent.0.cmp(&other.subchain_parent.0).reverse() } } From e57ab967311d545623e2b59573f68e26a8948211 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 16 Dec 2016 23:21:59 +0100 Subject: [PATCH 29/42] fix reverse start block check --- sync/src/light_sync/response.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/sync/src/light_sync/response.rs b/sync/src/light_sync/response.rs index 131f7e4e2..907db763e 100644 --- a/sync/src/light_sync/response.rs +++ b/sync/src/light_sync/response.rs @@ -96,10 +96,8 @@ struct Max(usize); impl Constraint for StartsAtNumber { type Error = BasicError; - fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), BasicError> { - let earliest = if reverse { headers.last() } else { headers.first() }; - - earliest.map_or(Ok(()), |h| { + fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> { + headers.first().map_or(Ok(()), |h| { if h.number() == self.0 { Ok(()) } else { @@ -112,10 +110,8 @@ impl Constraint for StartsAtNumber { impl Constraint for StartsAtHash { type Error = BasicError; - fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), BasicError> { - let earliest = if reverse { headers.last() } else { headers.first() }; - - earliest.map_or(Ok(()), |h| { + fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> { + headers.first().map_or(Ok(()), |h| { if h.hash() == self.0 { Ok(()) } else { @@ -188,7 +184,7 @@ mod tests { #[test] fn sequential_backward() { let request = HeadersRequest { - start: 10.into(), + start: 34.into(), max: 30, skip: 0, reverse: true, From 2c0f456e4dbac03108dbedd6bcc9f9b785d47404 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 16 Dec 2016 23:53:20 +0100 Subject: [PATCH 30/42] guard import order --- ethcore/light/src/client/header_chain.rs | 6 ++++-- ethcore/light/src/client/mod.rs | 8 ++++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 847b0251f..a093dd170 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -104,13 +104,16 @@ impl HeaderChain { let number = view.number(); let parent_hash = view.parent_hash(); + // hold candidates the whole time to guard import order. + let mut candidates = self.candidates.write(); + // find parent details. let parent_td = { if number == 1 { let g_view = HeaderView::new(&self.genesis_header); g_view.difficulty() } else { - let maybe_td = self.candidates.read().get(&(number - 1)) + let maybe_td = candidates.get(&(number - 1)) .and_then(|entry| entry.candidates.iter().find(|c| c.hash == parent_hash)) .map(|c| c.total_difficulty); @@ -124,7 +127,6 @@ impl HeaderChain { let total_difficulty = parent_td + view.difficulty(); // insert headers and candidates entries. - let mut candidates = self.candidates.write(); candidates.entry(number).or_insert_with(|| Entry { candidates: SmallVec::new(), canonical_hash: hash}) .candidates.push(Candidate { hash: hash, diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index d51f2c484..d9ce036c7 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -76,6 +76,7 @@ pub struct Client { queue: HeaderQueue, chain: HeaderChain, tx_pool: Mutex>, + import_lock: Mutex<()>, } impl Client { @@ -85,6 +86,7 @@ impl Client { queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, true), chain: HeaderChain::new(&::rlp::encode(&spec.genesis_header())), tx_pool: Mutex::new(Default::default()), + import_lock: Mutex::new(()), } } @@ -149,15 +151,17 @@ impl Client { pub fn import_verified(&self) { const MAX: usize = 256; + let _lock = self.import_lock.lock(); + let mut bad = Vec::new(); let mut good = Vec::new(); for verified_header in self.queue.drain(MAX) { - let hash = verified_header.hash(); + let (num, hash) = (verified_header.number(), verified_header.hash()); match self.chain.insert(::rlp::encode(&verified_header).to_vec()) { Ok(()) => good.push(hash), Err(e) => { - debug!(target: "client", "Error importing header {}: {}", hash, e); + debug!(target: "client", "Error importing header {:?}: {}", (num, hash), e); bad.push(hash); } } From be914ddfc7ccfe8660d7b5ea93472ca306f1858d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 16 Dec 2016 23:53:36 +0100 Subject: [PATCH 31/42] fix check for moving backwards --- sync/src/light_sync/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 8baeeb0ab..3090af08c 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -282,7 +282,7 @@ impl Handler for LightSync { trace!(target: "sync", "Announcement from peer {}: new chain head {:?}, reorg depth {}", ctx.peer(), (announcement.head_hash, announcement.head_num), announcement.reorg_depth); - if last_td < announcement.head_td { + if last_td > announcement.head_td { trace!(target: "sync", "Peer {} moved backwards.", ctx.peer()); self.peers.write().remove(&ctx.peer()); ctx.disconnect_peer(ctx.peer()); From 893892ca121e4100135e649f9258ef7d4814e95a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 17 Dec 2016 00:31:21 +0100 Subject: [PATCH 32/42] remove request limiting from sync service --- sync/src/light_sync/mod.rs | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 3090af08c..97ddfad46 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -53,7 +53,6 @@ struct ChainInfo { struct Peer { status: ChainInfo, - working: bool, } impl Peer { @@ -61,20 +60,9 @@ impl Peer { fn new(chain_info: ChainInfo) -> Self { Peer { status: chain_info, - working: false, } } - - // whether the peer is fully loaded with requests. - fn is_fully_loaded(&self) -> bool { self.working } - - // signal that the peer's load has been lightened. - fn load_lightened(&mut self) { self.working = false } - - // signal that the peer's load has been increased. - fn load_increased(&mut self) { self.working = true } } - // search for a common ancestor with the best chain. enum AncestorSearch { Queued(u64), // queued to search for blocks starting from here. @@ -299,9 +287,8 @@ impl Handler for LightSync { } fn on_block_headers(&self, ctx: &EventContext, req_id: ReqId, headers: &[Bytes]) { - match self.peers.read().get(&ctx.peer()) { - Some(peer) => peer.lock().load_lightened(), - None => return, + if !self.peers.read().contains_key(&ctx.peer()) { + return } { @@ -432,11 +419,9 @@ impl LightSync { for peer in &peer_ids { let peer_info = peers.get(peer).expect("key known to be present; qed"); let mut peer_info = peer_info.lock(); - if peer_info.is_fully_loaded() { continue } if ctx.max_requests(*peer, request::Kind::Headers) >= req.max { match ctx.request_from(*peer, request::Request::Headers(req.clone())) { Ok(id) => { - peer_info.load_increased(); return Some(id) } Err(e) => From bd59ef05acc50f3bf3b2bcbb7ebd0485423e8239 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 17 Dec 2016 00:40:41 +0100 Subject: [PATCH 33/42] prettier informant --- parity/run.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/parity/run.rs b/parity/run.rs index 4e88fe458..cda3618fb 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -126,7 +126,7 @@ pub fn execute_light(cmd: RunCmd, logger: Arc) -> Result) -> Result Date: Sat, 17 Dec 2016 00:44:18 +0100 Subject: [PATCH 34/42] fix compiler warnings --- parity/run.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/parity/run.rs b/parity/run.rs index cda3618fb..6647bd315 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -33,9 +33,7 @@ use ethsync::SyncConfig; use informant::Informant; use updater::{UpdatePolicy, Updater}; -use rpc::{HttpServer, IpcServer, HttpConfiguration, IpcConfiguration}; -use signer::SignerServer; -use dapps::WebappServer; +use rpc::{HttpConfiguration, IpcConfiguration}; use params::{ SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool @@ -118,7 +116,7 @@ pub fn open_ui(dapps_conf: &dapps::Configuration, signer_conf: &signer::Configur } // Execute in light client mode. -pub fn execute_light(cmd: RunCmd, logger: Arc) -> Result { +pub fn execute_light(cmd: RunCmd) -> Result { use light::client::{Config as ClientConfig, Service as LightClientService}; use ethsync::{LightSync, LightSyncParams, ManageNetwork}; @@ -157,6 +155,7 @@ pub fn execute_light(cmd: RunCmd, logger: Arc) -> Result) -> R } if cmd.light { - return execute_light(cmd, logger); + return execute_light(cmd); } // set up panic handler @@ -480,6 +479,9 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // Handle exit let restart = wait_for_exit(panic_handler, Some(updater), can_restart); + // drop this stuff as soon as exit detected. + drop((http_server, ipc_server, dapps_server, signer_server)); + info!("Finishing work, please wait..."); // to make sure timer does not spawn requests while shutdown is in progress From ccdf5d587372f23d2ad9a4d6f05d5b01a8a67c3a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 19 Dec 2016 12:28:42 +0100 Subject: [PATCH 35/42] increase tick timer and limit peers to one req --- ethcore/light/src/net/error.rs | 8 ++++++-- ethcore/light/src/net/mod.rs | 35 +++++++++++++++++++++++++--------- sync/src/light_sync/mod.rs | 2 -- 3 files changed, 32 insertions(+), 13 deletions(-) diff --git a/ethcore/light/src/net/error.rs b/ethcore/light/src/net/error.rs index 42d038679..6a7746543 100644 --- a/ethcore/light/src/net/error.rs +++ b/ethcore/light/src/net/error.rs @@ -62,6 +62,8 @@ pub enum Error { UnsupportedProtocolVersion(u8), /// Bad protocol version. BadProtocolVersion, + /// Peer is overburdened. + Overburdened, } impl Error { @@ -79,6 +81,7 @@ impl Error { Error::NotServer => Punishment::Disable, Error::UnsupportedProtocolVersion(_) => Punishment::Disable, Error::BadProtocolVersion => Punishment::Disable, + Error::Overburdened => Punishment::None, } } } @@ -107,8 +110,9 @@ impl fmt::Display for Error { Error::UnknownPeer => write!(f, "Unknown peer"), Error::UnsolicitedResponse => write!(f, "Peer provided unsolicited data"), Error::NotServer => write!(f, "Peer not a server."), - Error::UnsupportedProtocolVersion(pv) => write!(f, "Unsupported protocol version: {}", pv), + Error::UnsupportedProtocolVersion(pv) => write!(f, "Unsupported protocol version: {}", pv), Error::BadProtocolVersion => write!(f, "Bad protocol version in handshake"), + Error::Overburdened => write!(f, "Peer overburdened"), } } -} \ No newline at end of file +} diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 464c71fbe..766cd9c4b 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -55,6 +55,9 @@ pub use self::status::{Status, Capabilities, Announcement}; const TIMEOUT: TimerToken = 0; const TIMEOUT_INTERVAL_MS: u64 = 1000; +const TICK_TIMEOUT: TimerToken = 1; +const TICK_TIMEOUT_INTERVAL_MS: u64 = 5000; + // minimum interval between updates. const UPDATE_INTERVAL_MS: i64 = 5000; @@ -132,8 +135,9 @@ struct Peer { status: Status, capabilities: Capabilities, remote_flow: Option<(Buffer, FlowParams)>, - sent_head: H256, // last head we've given them. + sent_head: H256, // last chain head we've given them. last_update: SteadyTime, + idle: bool, // make into a current percentage of max buffer being requested? } impl Peer { @@ -263,10 +267,16 @@ impl LightProtocol { pub fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { self.peers.read().get(&peer).and_then(|peer| { let mut peer = peer.lock(); - match peer.remote_flow.as_mut() { - Some(&mut (ref mut buf, ref flow)) => { + let idle = peer.idle; + match peer.remote_flow { + Some((ref mut buf, ref flow)) => { flow.recharge(buf); - Some(flow.max_amount(&*buf, kind)) + + if !idle { + Some(0) + } else { + Some(flow.max_amount(&*buf, kind)) + } } None => None, } @@ -284,6 +294,8 @@ impl LightProtocol { let peer = try!(peers.get(peer_id).ok_or_else(|| Error::UnknownPeer)); let mut peer = peer.lock(); + if !peer.idle { return Err(Error::Overburdened) } + match peer.remote_flow { Some((ref mut buf, ref flow)) => { flow.recharge(buf); @@ -309,6 +321,7 @@ impl LightProtocol { io.send(*peer_id, packet_id, packet_data); + peer.idle = false; self.pending_requests.write().insert(req_id, Requested { request: request, timestamp: SteadyTime::now(), @@ -412,6 +425,8 @@ impl LightProtocol { match peers.get(peer) { Some(peer_info) => { let mut peer_info = peer_info.lock(); + peer_info.idle = true; + match peer_info.remote_flow.as_mut() { Some(&mut (ref mut buf, ref mut flow)) => { let actual_buffer = ::std::cmp::min(cur_buffer, *flow.limit()); @@ -620,6 +635,7 @@ impl LightProtocol { remote_flow: remote_flow, sent_head: pending.sent_head, last_update: pending.last_update, + idle: true, })); for handler in &self.handlers { @@ -1124,7 +1140,10 @@ fn punish(peer: PeerId, io: &IoContext, e: Error) { impl NetworkProtocolHandler for LightProtocol { fn initialize(&self, io: &NetworkContext) { - io.register_timer(TIMEOUT, TIMEOUT_INTERVAL_MS).expect("Error registering sync timer."); + io.register_timer(TIMEOUT, TIMEOUT_INTERVAL_MS) + .expect("Error registering sync timer."); + io.register_timer(TICK_TIMEOUT, TICK_TIMEOUT_INTERVAL_MS) + .expect("Error registering sync timer."); } fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { @@ -1141,10 +1160,8 @@ impl NetworkProtocolHandler for LightProtocol { fn timeout(&self, io: &NetworkContext, timer: TimerToken) { match timer { - TIMEOUT => { - self.timeout_check(io); - self.tick_handlers(io); - }, + TIMEOUT => self.timeout_check(io), + TICK_TIMEOUT => self.tick_handlers(io), _ => warn!(target: "les", "received timeout on unknown token {}", timer), } } diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 97ddfad46..6f5a53c5f 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -417,8 +417,6 @@ impl LightSync { rng.shuffle(&mut peer_ids); for peer in &peer_ids { - let peer_info = peers.get(peer).expect("key known to be present; qed"); - let mut peer_info = peer_info.lock(); if ctx.max_requests(*peer, request::Kind::Headers) >= req.max { match ctx.request_from(*peer, request::Request::Headers(req.clone())) { Ok(id) => { From d76561105014f44ff97d238387ca5d4c58c76490 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 19 Dec 2016 12:41:00 +0100 Subject: [PATCH 36/42] allow queue to drain fully when idle --- sync/src/light_sync/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 6f5a53c5f..226b1fdff 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -322,9 +322,6 @@ impl LightSync { // Begins a search for the common ancestor and our best block. // does not lock state, instead has a mutable reference to it passed. fn begin_search(&self, state: &mut SyncState) { - self.client.clear_queue(); - - let chain_info = self.client.chain_info(); if let None = *self.best_seen.lock() { // no peers. *state = SyncState::Idle; @@ -332,6 +329,8 @@ impl LightSync { } trace!(target: "sync", "Beginning search for common ancestor"); + self.client.clear_queue(); + let chain_info = self.client.chain_info(); *state = SyncState::AncestorSearch(AncestorSearch::begin(chain_info.best_block_number)); } From 2b1f51694b3ecf0fcc293f597a37eb127d09244c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 19 Dec 2016 14:01:19 +0100 Subject: [PATCH 37/42] fix cli test --- parity/cli/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 29445056b..d8a757c5c 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -734,6 +734,7 @@ mod tests { db_path: None, keys_path: None, identity: None, + light: None, }), account: Some(Account { unlock: Some(vec!["0x1".into(), "0x2".into(), "0x3".into()]), From 10c56acff60338602d0546506d68944605173a03 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 23 Dec 2016 13:21:29 +0100 Subject: [PATCH 38/42] revert tests submodule --- ethcore/res/ethereum/tests | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/res/ethereum/tests b/ethcore/res/ethereum/tests index e8f4624b7..9028c4801 160000 --- a/ethcore/res/ethereum/tests +++ b/ethcore/res/ethereum/tests @@ -1 +1 @@ -Subproject commit e8f4624b7f1a15c63674eecf577c7ab76c3b16be +Subproject commit 9028c4801fd39fbb71a9796979182549a24e81c8 From bdf90df56f1d5d9f2c99bcca001d5a959db448dd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 23 Dec 2016 14:50:42 +0100 Subject: [PATCH 39/42] client report and heap size for header chain --- ethcore/light/src/client/header_chain.rs | 19 ++++++++++++++++++- ethcore/light/src/client/mod.rs | 15 +++++++++++++-- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index a093dd170..388f54d5a 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -34,7 +34,7 @@ use ethcore::block_status::BlockStatus; use ethcore::error::BlockError; use ethcore::ids::BlockId; use ethcore::views::HeaderView; -use util::{Bytes, H256, U256, Mutex, RwLock}; +use util::{Bytes, H256, U256, HeapSizeOf, Mutex, RwLock}; use smallvec::SmallVec; @@ -66,6 +66,15 @@ struct Entry { canonical_hash: H256, } +impl HeapSizeOf for Entry { + fn heap_size_of_children(&self) -> usize { + match self.candidates.spilled() { + false => 0, + true => self.candidates.capacity() * ::std::mem::size_of::(), + } + } +} + /// Header chain. See module docs for more details. pub struct HeaderChain { genesis_header: Bytes, // special-case the genesis. @@ -255,6 +264,14 @@ impl HeaderChain { } } +impl HeapSizeOf for HeaderChain { + fn heap_size_of_children(&self) -> usize { + self.candidates.read().heap_size_of_children() + + self.headers.read().heap_size_of_children() + + self.cht_roots.lock().heap_size_of_children() + } +} + #[cfg(test)] mod tests { use super::HeaderChain; diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 16b4547df..65a83c7e3 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -18,6 +18,7 @@ use ethcore::block_import_error::BlockImportError; use ethcore::block_status::BlockStatus; +use ethcore::client::ClientReport; use ethcore::ids::BlockId; use ethcore::header::Header; use ethcore::verification::queue::{self, HeaderQueue}; @@ -28,7 +29,7 @@ use ethcore::service::ClientIoMessage; use io::IoChannel; use util::hash::{H256, H256FastMap}; -use util::{Bytes, Mutex}; +use util::{Bytes, Mutex, RwLock}; use provider::Provider; use request; @@ -76,6 +77,7 @@ pub struct Client { queue: HeaderQueue, chain: HeaderChain, tx_pool: Mutex>, + report: RwLock, import_lock: Mutex<()>, } @@ -86,6 +88,7 @@ impl Client { queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, true), chain: HeaderChain::new(&::rlp::encode(&spec.genesis_header())), tx_pool: Mutex::new(Default::default()), + report: RwLock::new(ClientReport::default()), import_lock: Mutex::new(()), } } @@ -164,7 +167,10 @@ impl Client { let (num, hash) = (verified_header.number(), verified_header.hash()); match self.chain.insert(::rlp::encode(&verified_header).to_vec()) { - Ok(()) => good.push(hash), + Ok(()) => { + good.push(hash); + self.report.write().blocks_imported += 1; + } Err(e) => { debug!(target: "client", "Error importing header {:?}: {}", (num, hash), e); bad.push(hash); @@ -175,6 +181,11 @@ impl Client { self.queue.mark_as_bad(&bad); self.queue.mark_as_good(&good); } + + /// Get a report about blocks imported. + pub fn report(&self) -> ClientReport { + ::std::mem::replace(&mut *self.report.write(), ClientReport::default()) + } } impl LightChainClient for Client { From 0688ccb003894546f7d81f4cf936d8f384f09dfc Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 23 Dec 2016 14:54:31 +0100 Subject: [PATCH 40/42] chain_mem_used function on client --- ethcore/light/src/client/mod.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 65a83c7e3..7b1702804 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -186,6 +186,13 @@ impl Client { pub fn report(&self) -> ClientReport { ::std::mem::replace(&mut *self.report.write(), ClientReport::default()) } + + /// Get blockchain mem usage in bytes. + pub fn chain_mem_used(&self) -> usize { + use util::HeapSizeOf; + + self.chain.heap_size_of_children() + } } impl LightChainClient for Client { From 55dbfbf35bfaab78e761f6021ee6203ce7448214 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 27 Dec 2016 12:24:18 +0100 Subject: [PATCH 41/42] address grumbles --- ethcore/light/src/client/header_chain.rs | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 388f54d5a..fdb075e04 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -117,21 +117,16 @@ impl HeaderChain { let mut candidates = self.candidates.write(); // find parent details. - let parent_td = { + let parent_td = if number == 1 { let g_view = HeaderView::new(&self.genesis_header); g_view.difficulty() } else { - let maybe_td = candidates.get(&(number - 1)) + candidates.get(&(number - 1)) .and_then(|entry| entry.candidates.iter().find(|c| c.hash == parent_hash)) - .map(|c| c.total_difficulty); - - match maybe_td { - Some(td) => td, - None => return Err(BlockError::UnknownParent(parent_hash)), - } - } - }; + .map(|c| c.total_difficulty) + .ok_or_else(|| BlockError::UnknownParent(parent_hash))? + }; let total_difficulty = parent_td + view.difficulty(); @@ -230,9 +225,7 @@ impl HeaderChain { /// Get the genesis hash. pub fn genesis_hash(&self) -> H256 { - use util::Hashable; - - self.genesis_header.sha3() + ::util::Hashable::sha3(&self.genesis_header) } /// Get the best block's data. From 0b9a0b138b0affa27a965561641e4e3e2621c5f1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 4 Jan 2017 15:07:49 +0100 Subject: [PATCH 42/42] remove --light CLI --- parity/cli/mod.rs | 4 --- parity/cli/usage.txt | 2 -- parity/configuration.rs | 2 -- parity/run.rs | 63 ----------------------------------------- 4 files changed, 71 deletions(-) diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 36bc1412b..47a0af0bb 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -94,7 +94,6 @@ usage! { flag_db_path: String = "$BASE/chains", or |c: &Config| otry!(c.parity).db_path.clone(), flag_keys_path: String = "$BASE/keys", or |c: &Config| otry!(c.parity).keys_path.clone(), flag_identity: String = "", or |c: &Config| otry!(c.parity).identity.clone(), - flag_light: bool = false, or |c: &Config| otry!(c.parity).light.clone(), // -- Account Options flag_unlock: Option = None, @@ -322,7 +321,6 @@ struct Operating { db_path: Option, keys_path: Option, identity: Option, - light: Option, } #[derive(Default, Debug, PartialEq, RustcDecodable)] @@ -551,7 +549,6 @@ mod tests { flag_db_path: "$HOME/.parity/chains".into(), flag_keys_path: "$HOME/.parity/keys".into(), flag_identity: "".into(), - flag_light: false, // -- Account Options flag_unlock: Some("0xdeadbeefcafe0000000000000000000000000000".into()), @@ -730,7 +727,6 @@ mod tests { db_path: None, keys_path: None, identity: None, - light: None, }), account: Some(Account { unlock: Some(vec!["0x1".into(), "0x2".into(), "0x3".into()]), diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 884dc4d12..7c3e5cc7d 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -66,8 +66,6 @@ Operating Options: --keys-path PATH Specify the path for JSON key files to be found (default: {flag_keys_path}). --identity NAME Specify your node's name. (default: {flag_identity}) - --light Run in light client mode. Very experimental. - (default: {flag_light}) Account Options: --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. diff --git a/parity/configuration.rs b/parity/configuration.rs index 6968a71ec..85ff61d5c 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -341,7 +341,6 @@ impl Configuration { no_periodic_snapshot: self.args.flag_no_periodic_snapshot, check_seal: !self.args.flag_no_seal_check, download_old_blocks: !self.args.flag_no_ancient_blocks, - light: self.args.flag_light, verifier_settings: verifier_settings, }; Cmd::Run(run_cmd) @@ -1033,7 +1032,6 @@ mod tests { check_seal: true, download_old_blocks: true, verifier_settings: Default::default(), - light: false, })); } diff --git a/parity/run.rs b/parity/run.rs index 5c28d5ad9..8ac0669d8 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -96,7 +96,6 @@ pub struct RunCmd { pub check_seal: bool, pub download_old_blocks: bool, pub verifier_settings: VerifierSettings, - pub light: bool, } pub fn open_ui(dapps_conf: &dapps::Configuration, signer_conf: &signer::Configuration) -> Result<(), String> { @@ -116,64 +115,6 @@ pub fn open_ui(dapps_conf: &dapps::Configuration, signer_conf: &signer::Configur Ok(()) } -// Execute in light client mode. -pub fn execute_light(cmd: RunCmd) -> Result { - use light::client::{Config as ClientConfig, Service as LightClientService}; - use ethsync::{LightSync, LightSyncParams, ManageNetwork}; - - let panic_handler = PanicHandler::new_in_arc(); - - info!( - "Configured in {} mode. Note that this feature is {}.", - Colour::Blue.bold().paint("Light Client"), - Colour::Red.bold().paint("experimental"), - ); - - let mut client_config = ClientConfig::default(); - let queue_size = cmd.cache_config.queue(); - - client_config.queue.max_queue_size = queue_size as usize; - client_config.queue.verifier_settings = cmd.verifier_settings; - - let spec = try!(cmd.spec.spec()); - let service = try!(LightClientService::start(client_config, &spec) - .map_err(|e| format!("Error starting light client service: {}", e))); - - let net_conf = try!(cmd.net_conf.into_basic() - .map_err(|e| format!("Failed to create network config: {}", e))); - - let sync_params = LightSyncParams { - network_config: net_conf, - client: service.client().clone(), - network_id: cmd.network_id.unwrap_or(spec.network_id()), - subprotocol_name: *b"les", - }; - - let sync = try!(LightSync::new(sync_params) - .map_err(|e| format!("Failed to initialize sync service: {}", e))); - - sync.start_network(); - - let log_client = service.client().clone(); - ::std::thread::spawn(move || { - // TODO: proper informant. - loop { - ::std::thread::sleep(::std::time::Duration::from_secs(5)); - let chain_info = log_client.chain_info(); - let queue_info = log_client.queue_info(); - println!( - "#{} {:5}+{:5} Qed", - chain_info.best_block_number, - queue_info.unverified_queue_size, - queue_info.verified_queue_size - ); - } - }); - - wait_for_exit(panic_handler, None, false); - Ok(false) -} - pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> Result { if cmd.ui && cmd.dapps_conf.enabled { // Check if Parity is already running @@ -183,10 +124,6 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R } } - if cmd.light { - return execute_light(cmd); - } - // set up panic handler let panic_handler = PanicHandler::new_in_arc();