2017-01-25 18:51:41 +01:00
|
|
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
2016-12-13 21:09:43 +01:00
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
//! Light client synchronization.
|
|
|
|
//!
|
2017-03-16 23:51:47 +01:00
|
|
|
//! This will synchronize the header chain using PIP messages.
|
2016-12-13 21:09:43 +01:00
|
|
|
//! Dataflow is largely one-directional as headers are pushed into
|
|
|
|
//! the light client queue for import. Where possible, they are batched
|
|
|
|
//! in groups.
|
|
|
|
//!
|
|
|
|
//! This is written assuming that the client and sync service are running
|
2016-12-14 23:26:15 +01:00
|
|
|
//! in the same binary; unlike a full node which might communicate via IPC.
|
2017-01-11 14:39:03 +01:00
|
|
|
//!
|
|
|
|
//!
|
|
|
|
//! Sync strategy:
|
|
|
|
//! - Find a common ancestor with peers.
|
|
|
|
//! - Split the chain up into subchains, which are downloaded in parallel from various peers in rounds.
|
|
|
|
//! - When within a certain distance of the head of the chain, aggressively download all
|
|
|
|
//! announced blocks.
|
|
|
|
//! - On bad block/response, punish peer and reset.
|
2016-12-13 21:09:43 +01:00
|
|
|
|
2017-03-23 15:44:16 +01:00
|
|
|
use std::collections::{HashMap, HashSet};
|
2016-12-15 21:51:08 +01:00
|
|
|
use std::mem;
|
2016-12-13 21:09:43 +01:00
|
|
|
use std::sync::Arc;
|
|
|
|
|
2017-03-16 23:51:47 +01:00
|
|
|
use ethcore::encoded;
|
2017-02-09 18:42:18 +01:00
|
|
|
use light::client::{AsLightClient, LightChainClient};
|
2016-12-15 21:51:08 +01:00
|
|
|
use light::net::{
|
2017-06-30 10:58:48 +02:00
|
|
|
PeerStatus, Announcement, Handler, BasicContext,
|
|
|
|
EventContext, Capabilities, ReqId, Status,
|
|
|
|
Error as NetError,
|
2016-12-15 21:51:08 +01:00
|
|
|
};
|
2017-03-16 23:51:47 +01:00
|
|
|
use light::request::{self, CompleteHeadersRequest as HeadersRequest};
|
2016-12-13 21:09:43 +01:00
|
|
|
use network::PeerId;
|
2017-09-04 16:36:49 +02:00
|
|
|
use bigint::prelude::U256;
|
|
|
|
use bigint::hash::H256;
|
2017-09-02 20:09:13 +02:00
|
|
|
use parking_lot::{Mutex, RwLock};
|
2016-12-15 22:42:24 +01:00
|
|
|
use rand::{Rng, OsRng};
|
2016-12-13 21:09:43 +01:00
|
|
|
|
2016-12-15 22:42:24 +01:00
|
|
|
use self::sync_round::{AbortReason, SyncRound, ResponseContext};
|
2016-12-15 21:51:08 +01:00
|
|
|
|
2016-12-14 23:26:15 +01:00
|
|
|
mod response;
|
|
|
|
mod sync_round;
|
2016-12-13 21:09:43 +01:00
|
|
|
|
2017-01-11 14:39:03 +01:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests;
|
|
|
|
|
2016-12-13 21:09:43 +01:00
|
|
|
/// Peer chain info.
|
2017-01-20 12:41:59 +01:00
|
|
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
2016-12-13 21:09:43 +01:00
|
|
|
struct ChainInfo {
|
|
|
|
head_td: U256,
|
|
|
|
head_hash: H256,
|
|
|
|
head_num: u64,
|
|
|
|
}
|
|
|
|
|
2017-01-20 12:41:59 +01:00
|
|
|
impl PartialOrd for ChainInfo {
|
|
|
|
fn partial_cmp(&self, other: &Self) -> Option<::std::cmp::Ordering> {
|
|
|
|
self.head_td.partial_cmp(&other.head_td)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Ord for ChainInfo {
|
|
|
|
fn cmp(&self, other: &Self) -> ::std::cmp::Ordering {
|
|
|
|
self.head_td.cmp(&other.head_td)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 23:26:15 +01:00
|
|
|
struct Peer {
|
|
|
|
status: ChainInfo,
|
2016-12-13 21:09:43 +01:00
|
|
|
}
|
|
|
|
|
2016-12-14 23:26:15 +01:00
|
|
|
impl Peer {
|
2016-12-16 15:26:39 +01:00
|
|
|
// Create a new peer.
|
2016-12-14 23:26:15 +01:00
|
|
|
fn new(chain_info: ChainInfo) -> Self {
|
|
|
|
Peer {
|
2016-12-16 15:26:39 +01:00
|
|
|
status: chain_info,
|
2016-12-13 21:09:43 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-01-20 12:41:59 +01:00
|
|
|
|
2016-12-16 14:53:36 +01:00
|
|
|
// search for a common ancestor with the best chain.
|
2017-01-11 14:39:03 +01:00
|
|
|
#[derive(Debug)]
|
2016-12-16 14:53:36 +01:00
|
|
|
enum AncestorSearch {
|
|
|
|
Queued(u64), // queued to search for blocks starting from here.
|
2017-03-16 23:51:47 +01:00
|
|
|
Awaiting(ReqId, u64, HeadersRequest), // awaiting response for this request.
|
2016-12-16 14:53:36 +01:00
|
|
|
Prehistoric, // prehistoric block found. TODO: start to roll back CHTs.
|
|
|
|
FoundCommon(u64, H256), // common block found.
|
|
|
|
Genesis, // common ancestor is the genesis.
|
|
|
|
}
|
|
|
|
|
|
|
|
impl AncestorSearch {
|
|
|
|
fn begin(best_num: u64) -> Self {
|
|
|
|
match best_num {
|
|
|
|
0 => AncestorSearch::Genesis,
|
2016-12-16 15:26:39 +01:00
|
|
|
_ => AncestorSearch::Queued(best_num),
|
2016-12-16 14:53:36 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-16 15:26:39 +01:00
|
|
|
fn process_response<L>(self, ctx: &ResponseContext, client: &L) -> AncestorSearch
|
2017-02-09 18:42:18 +01:00
|
|
|
where L: AsLightClient
|
2016-12-16 14:53:36 +01:00
|
|
|
{
|
2017-02-09 18:42:18 +01:00
|
|
|
let client = client.as_light_client();
|
2016-12-16 14:53:36 +01:00
|
|
|
let first_num = client.chain_info().first_block_number.unwrap_or(0);
|
|
|
|
match self {
|
|
|
|
AncestorSearch::Awaiting(id, start, req) => {
|
|
|
|
if &id == ctx.req_id() {
|
2017-03-16 23:51:47 +01:00
|
|
|
match response::verify(ctx.data(), &req) {
|
2016-12-16 14:53:36 +01:00
|
|
|
Ok(headers) => {
|
|
|
|
for header in &headers {
|
|
|
|
if client.is_known(&header.hash()) {
|
|
|
|
debug!(target: "sync", "Found common ancestor with best chain");
|
|
|
|
return AncestorSearch::FoundCommon(header.number(), header.hash());
|
|
|
|
}
|
|
|
|
|
2017-01-20 12:41:59 +01:00
|
|
|
if header.number() < first_num {
|
2016-12-16 14:53:36 +01:00
|
|
|
debug!(target: "sync", "Prehistoric common ancestor with best chain.");
|
|
|
|
return AncestorSearch::Prehistoric;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-20 12:41:59 +01:00
|
|
|
let probe = start - headers.len() as u64;
|
|
|
|
if probe == 0 {
|
|
|
|
AncestorSearch::Genesis
|
|
|
|
} else {
|
|
|
|
AncestorSearch::Queued(probe)
|
|
|
|
}
|
2016-12-16 14:53:36 +01:00
|
|
|
}
|
|
|
|
Err(e) => {
|
|
|
|
trace!(target: "sync", "Bad headers response from {}: {}", ctx.responder(), e);
|
|
|
|
|
|
|
|
ctx.punish_responder();
|
|
|
|
AncestorSearch::Queued(start)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
AncestorSearch::Awaiting(id, start, req)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
other => other,
|
|
|
|
}
|
|
|
|
}
|
2016-12-16 15:26:39 +01:00
|
|
|
|
2017-03-23 15:44:16 +01:00
|
|
|
fn requests_abandoned(self, req_ids: &[ReqId]) -> AncestorSearch {
|
|
|
|
match self {
|
|
|
|
AncestorSearch::Awaiting(id, start, req) => {
|
|
|
|
if req_ids.iter().find(|&x| x == &id).is_some() {
|
|
|
|
AncestorSearch::Queued(start)
|
|
|
|
} else {
|
|
|
|
AncestorSearch::Awaiting(id, start, req)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
other => other,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-16 15:26:39 +01:00
|
|
|
fn dispatch_request<F>(self, mut dispatcher: F) -> AncestorSearch
|
2017-03-16 23:51:47 +01:00
|
|
|
where F: FnMut(HeadersRequest) -> Option<ReqId>
|
2016-12-16 15:26:39 +01:00
|
|
|
{
|
2017-03-16 23:51:47 +01:00
|
|
|
const BATCH_SIZE: u64 = 64;
|
2016-12-16 15:26:39 +01:00
|
|
|
|
|
|
|
match self {
|
|
|
|
AncestorSearch::Queued(start) => {
|
2017-03-16 23:51:47 +01:00
|
|
|
let batch_size = ::std::cmp::min(start, BATCH_SIZE);
|
2017-01-11 14:39:03 +01:00
|
|
|
trace!(target: "sync", "Requesting {} reverse headers from {} to find common ancestor",
|
2017-01-20 12:41:59 +01:00
|
|
|
batch_size, start);
|
2017-01-11 14:39:03 +01:00
|
|
|
|
2017-03-16 23:51:47 +01:00
|
|
|
let req = HeadersRequest {
|
2016-12-16 15:26:39 +01:00
|
|
|
start: start.into(),
|
2017-01-20 12:41:59 +01:00
|
|
|
max: batch_size,
|
2016-12-16 15:26:39 +01:00
|
|
|
skip: 0,
|
|
|
|
reverse: true,
|
|
|
|
};
|
|
|
|
|
|
|
|
match dispatcher(req.clone()) {
|
|
|
|
Some(req_id) => AncestorSearch::Awaiting(req_id, start, req),
|
|
|
|
None => AncestorSearch::Queued(start),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
other => other,
|
|
|
|
}
|
|
|
|
}
|
2016-12-15 21:51:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// synchronization state machine.
|
2017-01-11 14:39:03 +01:00
|
|
|
#[derive(Debug)]
|
2016-12-15 21:51:08 +01:00
|
|
|
enum SyncState {
|
2017-01-11 14:39:03 +01:00
|
|
|
// Idle (waiting for peers) or at chain head.
|
2016-12-15 21:51:08 +01:00
|
|
|
Idle,
|
|
|
|
// searching for common ancestor with best chain.
|
|
|
|
// queue should be cleared at this phase.
|
|
|
|
AncestorSearch(AncestorSearch),
|
|
|
|
// Doing sync rounds.
|
|
|
|
Rounds(SyncRound),
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ResponseCtx<'a> {
|
|
|
|
peer: PeerId,
|
|
|
|
req_id: ReqId,
|
|
|
|
ctx: &'a BasicContext,
|
2017-03-16 23:51:47 +01:00
|
|
|
data: &'a [encoded::Header],
|
2016-12-15 21:51:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> ResponseContext for ResponseCtx<'a> {
|
|
|
|
fn responder(&self) -> PeerId { self.peer }
|
|
|
|
fn req_id(&self) -> &ReqId { &self.req_id }
|
2017-03-16 23:51:47 +01:00
|
|
|
fn data(&self) -> &[encoded::Header] { self.data }
|
2016-12-15 21:51:08 +01:00
|
|
|
fn punish_responder(&self) { self.ctx.disable_peer(self.peer) }
|
|
|
|
}
|
|
|
|
|
2016-12-13 21:09:43 +01:00
|
|
|
/// Light client synchronization manager. See module docs for more details.
|
2017-02-09 18:42:18 +01:00
|
|
|
pub struct LightSync<L: AsLightClient> {
|
2017-03-23 03:23:53 +01:00
|
|
|
start_block_number: u64,
|
2017-01-20 12:41:59 +01:00
|
|
|
best_seen: Mutex<Option<ChainInfo>>, // best seen block on the network.
|
2016-12-13 22:26:06 +01:00
|
|
|
peers: RwLock<HashMap<PeerId, Mutex<Peer>>>, // peers which are relevant to synchronization.
|
2017-03-23 15:44:16 +01:00
|
|
|
pending_reqs: Mutex<HashSet<ReqId>>, // requests from this handler.
|
2016-12-14 23:26:15 +01:00
|
|
|
client: Arc<L>,
|
2016-12-16 15:26:39 +01:00
|
|
|
rng: Mutex<OsRng>,
|
2016-12-15 21:51:08 +01:00
|
|
|
state: Mutex<SyncState>,
|
2016-12-13 21:09:43 +01:00
|
|
|
}
|
|
|
|
|
2017-02-09 18:42:18 +01:00
|
|
|
impl<L: AsLightClient + Send + Sync> Handler for LightSync<L> {
|
2017-06-30 10:58:48 +02:00
|
|
|
fn on_connect(
|
|
|
|
&self,
|
|
|
|
ctx: &EventContext,
|
|
|
|
status: &Status,
|
|
|
|
capabilities: &Capabilities
|
|
|
|
) -> PeerStatus {
|
2017-06-30 12:10:12 +02:00
|
|
|
use std::cmp;
|
|
|
|
|
2017-06-30 10:58:48 +02:00
|
|
|
if capabilities.serve_headers {
|
|
|
|
let chain_info = ChainInfo {
|
|
|
|
head_td: status.head_td,
|
|
|
|
head_hash: status.head_hash,
|
|
|
|
head_num: status.head_num,
|
|
|
|
};
|
|
|
|
|
|
|
|
{
|
|
|
|
let mut best = self.best_seen.lock();
|
2017-06-30 12:10:12 +02:00
|
|
|
*best = cmp::max(best.clone(), Some(chain_info.clone()));
|
2017-06-30 10:58:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
self.peers.write().insert(ctx.peer(), Mutex::new(Peer::new(chain_info)));
|
|
|
|
self.maintain_sync(ctx.as_basic());
|
|
|
|
|
|
|
|
PeerStatus::Kept
|
|
|
|
} else {
|
|
|
|
PeerStatus::Unkept
|
2016-12-15 21:51:08 +01:00
|
|
|
}
|
2016-12-13 22:26:06 +01:00
|
|
|
}
|
|
|
|
|
2016-12-15 21:51:08 +01:00
|
|
|
fn on_disconnect(&self, ctx: &EventContext, unfulfilled: &[ReqId]) {
|
2016-12-14 23:26:15 +01:00
|
|
|
let peer_id = ctx.peer();
|
2016-12-13 22:26:06 +01:00
|
|
|
|
2016-12-15 21:51:08 +01:00
|
|
|
let peer = match self.peers.write().remove(&peer_id).map(|p| p.into_inner()) {
|
|
|
|
Some(peer) => peer,
|
|
|
|
None => return,
|
|
|
|
};
|
|
|
|
|
2016-12-16 22:09:29 +01:00
|
|
|
trace!(target: "sync", "peer {} disconnecting", peer_id);
|
|
|
|
|
2016-12-15 21:51:08 +01:00
|
|
|
let new_best = {
|
|
|
|
let mut best = self.best_seen.lock();
|
|
|
|
|
2017-01-20 12:41:59 +01:00
|
|
|
if best.as_ref().map_or(false, |b| b == &peer.status) {
|
2016-12-15 21:51:08 +01:00
|
|
|
// search for next-best block.
|
2017-01-20 12:41:59 +01:00
|
|
|
let next_best: Option<ChainInfo> = self.peers.read().values()
|
|
|
|
.map(|p| p.lock().status.clone())
|
|
|
|
.map(Some)
|
|
|
|
.fold(None, ::std::cmp::max);
|
2016-12-15 21:51:08 +01:00
|
|
|
|
|
|
|
*best = next_best;
|
|
|
|
}
|
|
|
|
|
|
|
|
best.clone()
|
|
|
|
};
|
|
|
|
|
2017-07-26 15:48:00 +02:00
|
|
|
{
|
|
|
|
let mut pending_reqs = self.pending_reqs.lock();
|
|
|
|
for unfulfilled in unfulfilled {
|
|
|
|
pending_reqs.remove(&unfulfilled);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-15 21:51:08 +01:00
|
|
|
if new_best.is_none() {
|
|
|
|
debug!(target: "sync", "No peers remain. Reverting to idle");
|
|
|
|
*self.state.lock() = SyncState::Idle;
|
|
|
|
} else {
|
|
|
|
let mut state = self.state.lock();
|
|
|
|
|
|
|
|
*state = match mem::replace(&mut *state, SyncState::Idle) {
|
|
|
|
SyncState::Idle => SyncState::Idle,
|
2017-03-23 15:44:16 +01:00
|
|
|
SyncState::AncestorSearch(search) =>
|
|
|
|
SyncState::AncestorSearch(search.requests_abandoned(unfulfilled)),
|
2016-12-15 21:51:08 +01:00
|
|
|
SyncState::Rounds(round) => SyncState::Rounds(round.requests_abandoned(unfulfilled)),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
self.maintain_sync(ctx.as_basic());
|
2016-12-13 22:26:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) {
|
2017-01-20 12:41:59 +01:00
|
|
|
let (last_td, chain_info) = {
|
2016-12-15 21:51:08 +01:00
|
|
|
let peers = self.peers.read();
|
2016-12-16 22:09:29 +01:00
|
|
|
match peers.get(&ctx.peer()) {
|
2016-12-15 21:51:08 +01:00
|
|
|
None => return,
|
|
|
|
Some(peer) => {
|
|
|
|
let mut peer = peer.lock();
|
|
|
|
let last_td = peer.status.head_td;
|
|
|
|
peer.status = ChainInfo {
|
|
|
|
head_td: announcement.head_td,
|
|
|
|
head_hash: announcement.head_hash,
|
|
|
|
head_num: announcement.head_num,
|
|
|
|
};
|
2017-01-20 12:41:59 +01:00
|
|
|
(last_td, peer.status.clone())
|
2016-12-15 21:51:08 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2016-12-13 22:26:06 +01:00
|
|
|
|
|
|
|
trace!(target: "sync", "Announcement from peer {}: new chain head {:?}, reorg depth {}",
|
|
|
|
ctx.peer(), (announcement.head_hash, announcement.head_num), announcement.reorg_depth);
|
2016-12-15 21:51:08 +01:00
|
|
|
|
2016-12-16 23:53:36 +01:00
|
|
|
if last_td > announcement.head_td {
|
2016-12-15 21:51:08 +01:00
|
|
|
trace!(target: "sync", "Peer {} moved backwards.", ctx.peer());
|
|
|
|
self.peers.write().remove(&ctx.peer());
|
|
|
|
ctx.disconnect_peer(ctx.peer());
|
2017-01-20 12:41:59 +01:00
|
|
|
return
|
2016-12-15 21:51:08 +01:00
|
|
|
}
|
|
|
|
|
2016-12-16 22:09:29 +01:00
|
|
|
{
|
|
|
|
let mut best = self.best_seen.lock();
|
2017-01-20 12:41:59 +01:00
|
|
|
*best = ::std::cmp::max(best.clone(), Some(chain_info));
|
2016-12-15 21:51:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
self.maintain_sync(ctx.as_basic());
|
2016-12-13 22:26:06 +01:00
|
|
|
}
|
|
|
|
|
2017-03-16 23:51:47 +01:00
|
|
|
fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[request::Response]) {
|
|
|
|
let peer = ctx.peer();
|
|
|
|
if !self.peers.read().contains_key(&peer) {
|
2016-12-17 00:31:21 +01:00
|
|
|
return
|
2016-12-15 21:51:08 +01:00
|
|
|
}
|
|
|
|
|
2017-03-23 15:44:16 +01:00
|
|
|
if !self.pending_reqs.lock().remove(&req_id) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-03-16 23:51:47 +01:00
|
|
|
let headers = match responses.get(0) {
|
|
|
|
Some(&request::Response::Headers(ref response)) => &response.headers[..],
|
|
|
|
Some(_) => {
|
|
|
|
trace!("Disabling peer {} for wrong response type.", peer);
|
|
|
|
ctx.disable_peer(peer);
|
|
|
|
&[]
|
|
|
|
}
|
|
|
|
None => &[],
|
|
|
|
};
|
|
|
|
|
2016-12-15 21:51:08 +01:00
|
|
|
{
|
|
|
|
let mut state = self.state.lock();
|
|
|
|
|
2016-12-16 14:53:36 +01:00
|
|
|
let ctx = ResponseCtx {
|
|
|
|
peer: ctx.peer(),
|
|
|
|
req_id: req_id,
|
|
|
|
ctx: ctx.as_basic(),
|
|
|
|
data: headers,
|
|
|
|
};
|
|
|
|
|
2016-12-15 21:51:08 +01:00
|
|
|
*state = match mem::replace(&mut *state, SyncState::Idle) {
|
|
|
|
SyncState::Idle => SyncState::Idle,
|
2016-12-16 14:53:36 +01:00
|
|
|
SyncState::AncestorSearch(search) =>
|
|
|
|
SyncState::AncestorSearch(search.process_response(&ctx, &*self.client)),
|
|
|
|
SyncState::Rounds(round) => SyncState::Rounds(round.process_response(&ctx)),
|
2016-12-15 21:51:08 +01:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
self.maintain_sync(ctx.as_basic());
|
|
|
|
}
|
|
|
|
|
|
|
|
fn tick(&self, ctx: &BasicContext) {
|
|
|
|
self.maintain_sync(ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// private helpers
|
2017-02-09 18:42:18 +01:00
|
|
|
impl<L: AsLightClient> LightSync<L> {
|
2016-12-15 22:42:24 +01:00
|
|
|
// Begins a search for the common ancestor and our best block.
|
|
|
|
// does not lock state, instead has a mutable reference to it passed.
|
2016-12-16 14:53:36 +01:00
|
|
|
fn begin_search(&self, state: &mut SyncState) {
|
|
|
|
if let None = *self.best_seen.lock() {
|
|
|
|
// no peers.
|
|
|
|
*state = SyncState::Idle;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-02-09 18:42:18 +01:00
|
|
|
self.client.as_light_client().flush_queue();
|
|
|
|
let chain_info = self.client.as_light_client().chain_info();
|
2016-12-16 22:09:29 +01:00
|
|
|
|
2017-01-11 14:39:03 +01:00
|
|
|
trace!(target: "sync", "Beginning search for common ancestor from {:?}",
|
|
|
|
(chain_info.best_block_number, chain_info.best_block_hash));
|
2016-12-16 14:53:36 +01:00
|
|
|
*state = SyncState::AncestorSearch(AncestorSearch::begin(chain_info.best_block_number));
|
2016-12-15 22:42:24 +01:00
|
|
|
}
|
|
|
|
|
2017-01-20 12:41:59 +01:00
|
|
|
// handles request dispatch, block import, and state machine transitions.
|
2016-12-15 21:51:08 +01:00
|
|
|
fn maintain_sync(&self, ctx: &BasicContext) {
|
2016-12-15 22:42:24 +01:00
|
|
|
const DRAIN_AMOUNT: usize = 128;
|
|
|
|
|
2017-02-09 18:42:18 +01:00
|
|
|
let client = self.client.as_light_client();
|
|
|
|
let chain_info = client.chain_info();
|
|
|
|
|
2016-12-15 22:42:24 +01:00
|
|
|
let mut state = self.state.lock();
|
2017-01-11 14:39:03 +01:00
|
|
|
debug!(target: "sync", "Maintaining sync ({:?})", &*state);
|
2016-12-15 22:42:24 +01:00
|
|
|
|
|
|
|
// drain any pending blocks into the queue.
|
|
|
|
{
|
|
|
|
let mut sink = Vec::with_capacity(DRAIN_AMOUNT);
|
2016-12-15 21:51:08 +01:00
|
|
|
|
2016-12-15 22:42:24 +01:00
|
|
|
'a:
|
|
|
|
loop {
|
2017-02-09 18:42:18 +01:00
|
|
|
if client.queue_info().is_full() { break }
|
2016-12-15 22:42:24 +01:00
|
|
|
|
|
|
|
*state = match mem::replace(&mut *state, SyncState::Idle) {
|
|
|
|
SyncState::Rounds(round)
|
|
|
|
=> SyncState::Rounds(round.drain(&mut sink, Some(DRAIN_AMOUNT))),
|
|
|
|
other => other,
|
|
|
|
};
|
|
|
|
|
|
|
|
if sink.is_empty() { break }
|
2017-01-11 14:39:03 +01:00
|
|
|
trace!(target: "sync", "Drained {} headers to import", sink.len());
|
2016-12-15 22:42:24 +01:00
|
|
|
|
|
|
|
for header in sink.drain(..) {
|
2017-02-09 18:42:18 +01:00
|
|
|
if let Err(e) = client.queue_header(header) {
|
2016-12-15 22:42:24 +01:00
|
|
|
debug!(target: "sync", "Found bad header ({:?}). Reset to search state.", e);
|
|
|
|
|
|
|
|
self.begin_search(&mut state);
|
|
|
|
break 'a;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-16 14:53:36 +01:00
|
|
|
// handle state transitions.
|
2016-12-15 22:42:24 +01:00
|
|
|
{
|
2017-01-20 12:41:59 +01:00
|
|
|
let best_td = chain_info.pending_total_difficulty;
|
|
|
|
let sync_target = match *self.best_seen.lock() {
|
|
|
|
Some(ref target) if target.head_td > best_td => (target.head_num, target.head_hash),
|
2017-03-22 19:26:51 +01:00
|
|
|
ref other => {
|
|
|
|
let network_score = other.as_ref().map(|target| target.head_td);
|
|
|
|
trace!(target: "sync", "No target to sync to. Network score: {:?}, Local score: {:?}",
|
|
|
|
network_score, best_td);
|
2017-01-20 12:41:59 +01:00
|
|
|
*state = SyncState::Idle;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2016-12-15 22:42:24 +01:00
|
|
|
match mem::replace(&mut *state, SyncState::Idle) {
|
2017-01-20 12:41:59 +01:00
|
|
|
SyncState::Rounds(SyncRound::Abort(reason, remaining)) => {
|
|
|
|
if remaining.len() > 0 {
|
|
|
|
*state = SyncState::Rounds(SyncRound::Abort(reason, remaining));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-12-15 22:42:24 +01:00
|
|
|
match reason {
|
|
|
|
AbortReason::BadScaffold(bad_peers) => {
|
|
|
|
debug!(target: "sync", "Disabling peers responsible for bad scaffold");
|
|
|
|
for peer in bad_peers {
|
|
|
|
ctx.disable_peer(peer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
AbortReason::NoResponses => {}
|
2017-01-20 12:41:59 +01:00
|
|
|
AbortReason::TargetReached => {
|
|
|
|
debug!(target: "sync", "Sync target reached. Going idle");
|
|
|
|
*state = SyncState::Idle;
|
|
|
|
return;
|
|
|
|
}
|
2016-12-15 22:42:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
debug!(target: "sync", "Beginning search after aborted sync round");
|
|
|
|
self.begin_search(&mut state);
|
|
|
|
}
|
2016-12-16 14:53:36 +01:00
|
|
|
SyncState::AncestorSearch(AncestorSearch::FoundCommon(num, hash)) => {
|
2017-01-20 12:41:59 +01:00
|
|
|
*state = SyncState::Rounds(SyncRound::begin((num, hash), sync_target));
|
2016-12-16 14:53:36 +01:00
|
|
|
}
|
|
|
|
SyncState::AncestorSearch(AncestorSearch::Genesis) => {
|
|
|
|
// Same here.
|
2017-01-11 14:39:03 +01:00
|
|
|
let g_hash = chain_info.genesis_hash;
|
2017-01-20 12:41:59 +01:00
|
|
|
*state = SyncState::Rounds(SyncRound::begin((0, g_hash), sync_target));
|
2016-12-16 14:53:36 +01:00
|
|
|
}
|
|
|
|
SyncState::Idle => self.begin_search(&mut state),
|
2016-12-15 22:42:24 +01:00
|
|
|
other => *state = other, // restore displaced state.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// allow dispatching of requests.
|
|
|
|
{
|
2016-12-16 15:26:39 +01:00
|
|
|
let peers = self.peers.read();
|
2017-01-20 12:41:59 +01:00
|
|
|
let mut peer_ids: Vec<_> = peers.iter().filter_map(|(id, p)| {
|
|
|
|
if p.lock().status.head_td > chain_info.pending_total_difficulty {
|
|
|
|
Some(*id)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}).collect();
|
2017-07-26 15:48:00 +02:00
|
|
|
|
2016-12-16 15:26:39 +01:00
|
|
|
let mut rng = self.rng.lock();
|
2017-07-26 15:48:00 +02:00
|
|
|
let mut requested_from = HashSet::new();
|
2016-12-16 15:26:39 +01:00
|
|
|
|
|
|
|
// naive request dispatcher: just give to any peer which says it will
|
2017-07-26 15:48:00 +02:00
|
|
|
// give us responses. but only one request per peer per state transition.
|
2017-03-16 23:51:47 +01:00
|
|
|
let dispatcher = move |req: HeadersRequest| {
|
2016-12-16 15:26:39 +01:00
|
|
|
rng.shuffle(&mut peer_ids);
|
|
|
|
|
2017-03-16 23:51:47 +01:00
|
|
|
let request = {
|
2017-09-24 19:18:17 +02:00
|
|
|
let mut builder = request::Builder::default();
|
2017-03-16 23:51:47 +01:00
|
|
|
builder.push(request::Request::Headers(request::IncompleteHeadersRequest {
|
|
|
|
start: req.start.into(),
|
|
|
|
skip: req.skip,
|
|
|
|
max: req.max,
|
|
|
|
reverse: req.reverse,
|
|
|
|
})).expect("request provided fully complete with no unresolved back-references; qed");
|
|
|
|
builder.build()
|
|
|
|
};
|
2016-12-16 15:26:39 +01:00
|
|
|
for peer in &peer_ids {
|
2017-07-26 15:48:00 +02:00
|
|
|
if requested_from.contains(peer) { continue }
|
2017-03-16 23:51:47 +01:00
|
|
|
match ctx.request_from(*peer, request.clone()) {
|
|
|
|
Ok(id) => {
|
2017-03-23 15:44:16 +01:00
|
|
|
self.pending_reqs.lock().insert(id.clone());
|
2017-07-26 15:48:00 +02:00
|
|
|
requested_from.insert(peer.clone());
|
|
|
|
|
2017-03-16 23:51:47 +01:00
|
|
|
return Some(id)
|
2016-12-16 15:26:39 +01:00
|
|
|
}
|
2017-03-16 23:51:47 +01:00
|
|
|
Err(NetError::NoCredits) => {}
|
|
|
|
Err(e) =>
|
|
|
|
trace!(target: "sync", "Error requesting headers from viable peer: {}", e),
|
2016-12-16 15:26:39 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
2016-12-15 22:42:24 +01:00
|
|
|
*state = match mem::replace(&mut *state, SyncState::Idle) {
|
2016-12-16 15:26:39 +01:00
|
|
|
SyncState::Rounds(round) =>
|
|
|
|
SyncState::Rounds(round.dispatch_requests(dispatcher)),
|
|
|
|
SyncState::AncestorSearch(search) =>
|
|
|
|
SyncState::AncestorSearch(search.dispatch_request(dispatcher)),
|
2016-12-15 22:42:24 +01:00
|
|
|
other => other,
|
|
|
|
};
|
|
|
|
}
|
2016-12-13 21:09:43 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// public API
|
2017-02-09 18:42:18 +01:00
|
|
|
impl<L: AsLightClient> LightSync<L> {
|
2016-12-13 21:09:43 +01:00
|
|
|
/// Create a new instance of `LightSync`.
|
|
|
|
///
|
|
|
|
/// This won't do anything until registered as a handler
|
2016-12-13 22:26:06 +01:00
|
|
|
/// so it can act on events.
|
2016-12-15 22:42:24 +01:00
|
|
|
pub fn new(client: Arc<L>) -> Result<Self, ::std::io::Error> {
|
|
|
|
Ok(LightSync {
|
2017-03-23 03:23:53 +01:00
|
|
|
start_block_number: client.as_light_client().chain_info().best_block_number,
|
2016-12-13 21:09:43 +01:00
|
|
|
best_seen: Mutex::new(None),
|
2016-12-13 21:09:57 +01:00
|
|
|
peers: RwLock::new(HashMap::new()),
|
2017-03-23 15:44:16 +01:00
|
|
|
pending_reqs: Mutex::new(HashSet::new()),
|
2016-12-13 21:09:43 +01:00
|
|
|
client: client,
|
2017-06-18 16:15:44 +02:00
|
|
|
rng: Mutex::new(OsRng::new()?),
|
2016-12-15 21:51:08 +01:00
|
|
|
state: Mutex::new(SyncState::Idle),
|
2016-12-15 22:42:24 +01:00
|
|
|
})
|
2016-12-13 21:09:43 +01:00
|
|
|
}
|
|
|
|
}
|
2017-03-23 03:23:53 +01:00
|
|
|
|
|
|
|
/// Trait for erasing the type of a light sync object and exposing read-only methods.
|
|
|
|
pub trait SyncInfo {
|
|
|
|
/// Get the highest block advertised on the network.
|
|
|
|
fn highest_block(&self) -> Option<u64>;
|
|
|
|
|
|
|
|
/// Get the block number at the time of sync start.
|
|
|
|
fn start_block(&self) -> u64;
|
|
|
|
|
|
|
|
/// Whether major sync is underway.
|
|
|
|
fn is_major_importing(&self) -> bool;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<L: AsLightClient> SyncInfo for LightSync<L> {
|
|
|
|
fn highest_block(&self) -> Option<u64> {
|
|
|
|
self.best_seen.lock().as_ref().map(|x| x.head_num)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn start_block(&self) -> u64 {
|
|
|
|
self.start_block_number
|
|
|
|
}
|
|
|
|
|
|
|
|
fn is_major_importing(&self) -> bool {
|
|
|
|
const EMPTY_QUEUE: usize = 3;
|
|
|
|
|
|
|
|
if self.client.as_light_client().queue_info().unverified_queue_size > EMPTY_QUEUE {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
match *self.state.lock() {
|
|
|
|
SyncState::Idle => false,
|
|
|
|
_ => true,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|