2019-01-07 11:33:07 +01:00
|
|
|
|
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
|
|
|
|
// This file is part of Parity Ethereum.
|
2016-10-18 18:16:00 +02:00
|
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
|
// Parity Ethereum is free software: you can redistribute it and/or modify
|
2016-10-18 18:16:00 +02:00
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
|
// Parity Ethereum is distributed in the hope that it will be useful,
|
2016-10-18 18:16:00 +02:00
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2019-01-07 11:33:07 +01:00
|
|
|
|
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2016-10-18 18:16:00 +02:00
|
|
|
|
|
|
|
|
|
///
|
|
|
|
|
/// Blockchain downloader
|
|
|
|
|
///
|
|
|
|
|
|
2017-07-29 17:12:07 +02:00
|
|
|
|
use std::collections::{HashSet, VecDeque};
|
|
|
|
|
use std::cmp;
|
2019-09-19 13:12:07 +02:00
|
|
|
|
|
|
|
|
|
use crate::{
|
|
|
|
|
blocks::{BlockCollection, SyncBody, SyncHeader},
|
|
|
|
|
chain::BlockSet,
|
|
|
|
|
sync_io::SyncIo
|
|
|
|
|
};
|
|
|
|
|
|
2018-01-10 13:35:18 +01:00
|
|
|
|
use ethereum_types::H256;
|
2019-09-19 13:12:07 +02:00
|
|
|
|
use log::{debug, trace};
|
|
|
|
|
use network::{client_version::ClientCapabilities, PeerId};
|
|
|
|
|
use rlp::Rlp;
|
|
|
|
|
use parity_util_mem::MallocSizeOf;
|
|
|
|
|
use common_types::{
|
2019-07-18 12:27:08 +02:00
|
|
|
|
BlockNumber,
|
2019-08-13 12:33:34 +02:00
|
|
|
|
block_status::BlockStatus,
|
|
|
|
|
ids::BlockId,
|
2019-07-18 12:27:08 +02:00
|
|
|
|
errors::{EthcoreError, BlockError, ImportError},
|
|
|
|
|
};
|
2016-10-18 18:16:00 +02:00
|
|
|
|
|
|
|
|
|
const MAX_HEADERS_TO_REQUEST: usize = 128;
|
2019-02-07 15:27:09 +01:00
|
|
|
|
const MAX_BODIES_TO_REQUEST_LARGE: usize = 128;
|
|
|
|
|
const MAX_BODIES_TO_REQUEST_SMALL: usize = 32; // Size request for parity clients prior to 2.4.0
|
|
|
|
|
const MAX_RECEPITS_TO_REQUEST: usize = 256;
|
2016-10-18 18:16:00 +02:00
|
|
|
|
const SUBCHAIN_SIZE: u64 = 256;
|
2016-12-23 18:43:40 +01:00
|
|
|
|
const MAX_ROUND_PARENTS: usize = 16;
|
2016-11-16 19:34:12 +01:00
|
|
|
|
const MAX_PARALLEL_SUBCHAIN_DOWNLOAD: usize = 5;
|
2018-10-09 15:31:40 +02:00
|
|
|
|
const MAX_USELESS_HEADERS_PER_ROUND: usize = 3;
|
|
|
|
|
|
|
|
|
|
// logging macros prepend BlockSet context for log filtering
|
|
|
|
|
macro_rules! trace_sync {
|
|
|
|
|
($self:ident, $fmt:expr, $($arg:tt)+) => {
|
|
|
|
|
trace!(target: "sync", concat!("{:?}: ", $fmt), $self.block_set, $($arg)+);
|
|
|
|
|
};
|
|
|
|
|
($self:ident, $fmt:expr) => {
|
|
|
|
|
trace!(target: "sync", concat!("{:?}: ", $fmt), $self.block_set);
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
macro_rules! debug_sync {
|
|
|
|
|
($self:ident, $fmt:expr, $($arg:tt)+) => {
|
|
|
|
|
debug!(target: "sync", concat!("{:?}: ", $fmt), $self.block_set, $($arg)+);
|
|
|
|
|
};
|
|
|
|
|
($self:ident, $fmt:expr) => {
|
|
|
|
|
debug!(target: "sync", concat!("{:?}: ", $fmt), $self.block_set);
|
|
|
|
|
};
|
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
|
|
2019-06-19 13:54:05 +02:00
|
|
|
|
#[derive(Copy, Clone, Eq, PartialEq, Debug, MallocSizeOf)]
|
2016-10-18 18:16:00 +02:00
|
|
|
|
/// Downloader state
|
|
|
|
|
pub enum State {
|
|
|
|
|
/// No active downloads.
|
|
|
|
|
Idle,
|
|
|
|
|
/// Downloading subchain heads
|
|
|
|
|
ChainHead,
|
|
|
|
|
/// Downloading blocks
|
|
|
|
|
Blocks,
|
|
|
|
|
/// Download is complete
|
|
|
|
|
Complete,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Data that needs to be requested from a peer.
|
|
|
|
|
pub enum BlockRequest {
|
|
|
|
|
Headers {
|
|
|
|
|
start: H256,
|
|
|
|
|
count: u64,
|
|
|
|
|
skip: u64,
|
|
|
|
|
},
|
|
|
|
|
Bodies {
|
|
|
|
|
hashes: Vec<H256>,
|
|
|
|
|
},
|
|
|
|
|
Receipts {
|
|
|
|
|
hashes: Vec<H256>,
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
2016-11-16 19:34:12 +01:00
|
|
|
|
/// Indicates sync action
|
2018-10-09 15:31:40 +02:00
|
|
|
|
#[derive(Eq, PartialEq, Debug)]
|
2016-11-16 19:34:12 +01:00
|
|
|
|
pub enum DownloadAction {
|
|
|
|
|
/// Do nothing
|
|
|
|
|
None,
|
|
|
|
|
/// Reset downloads for all peers
|
|
|
|
|
Reset
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
|
#[derive(Eq, PartialEq, Debug)]
|
|
|
|
|
pub enum BlockDownloaderImportError {
|
2018-07-19 12:46:33 +02:00
|
|
|
|
/// Imported data is rejected as invalid. Peer should be dropped.
|
2016-10-18 18:16:00 +02:00
|
|
|
|
Invalid,
|
|
|
|
|
/// Imported data is valid but rejected cause the downloader does not need it.
|
|
|
|
|
Useless,
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-19 12:46:33 +02:00
|
|
|
|
impl From<rlp::DecoderError> for BlockDownloaderImportError {
|
|
|
|
|
fn from(_: rlp::DecoderError) -> BlockDownloaderImportError {
|
|
|
|
|
BlockDownloaderImportError::Invalid
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
|
/// Block downloader strategy.
|
|
|
|
|
/// Manages state and block data for a block download process.
|
2019-06-19 13:54:05 +02:00
|
|
|
|
#[derive(MallocSizeOf)]
|
2016-10-18 18:16:00 +02:00
|
|
|
|
pub struct BlockDownloader {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
/// Which set of blocks to download
|
|
|
|
|
block_set: BlockSet,
|
2016-10-18 18:16:00 +02:00
|
|
|
|
/// Downloader state
|
|
|
|
|
state: State,
|
|
|
|
|
/// Highest block number seen
|
|
|
|
|
highest_block: Option<BlockNumber>,
|
|
|
|
|
/// Downloaded blocks, holds `H`, `B` and `S`
|
|
|
|
|
blocks: BlockCollection,
|
2018-10-09 15:31:40 +02:00
|
|
|
|
/// Last imported block number
|
2016-10-18 18:16:00 +02:00
|
|
|
|
last_imported_block: BlockNumber,
|
2018-10-09 15:31:40 +02:00
|
|
|
|
/// Last imported block hash
|
2016-10-18 18:16:00 +02:00
|
|
|
|
last_imported_hash: H256,
|
|
|
|
|
/// Number of blocks imported this round
|
|
|
|
|
imported_this_round: Option<usize>,
|
2016-12-23 18:43:40 +01:00
|
|
|
|
/// Block number the last round started with.
|
|
|
|
|
last_round_start: BlockNumber,
|
|
|
|
|
last_round_start_hash: H256,
|
2016-10-18 18:16:00 +02:00
|
|
|
|
/// Block parents imported this round (hash, parent)
|
|
|
|
|
round_parents: VecDeque<(H256, H256)>,
|
|
|
|
|
/// Do we need to download block recetips.
|
|
|
|
|
download_receipts: bool,
|
|
|
|
|
/// Sync up to the block with this hash.
|
|
|
|
|
target_hash: Option<H256>,
|
2016-12-23 18:43:40 +01:00
|
|
|
|
/// Probing range for seeking common best block.
|
|
|
|
|
retract_step: u64,
|
2018-10-09 15:31:40 +02:00
|
|
|
|
/// consecutive useless headers this round
|
|
|
|
|
useless_headers_count: usize,
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl BlockDownloader {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
/// Create a new instance of syncing strategy.
|
|
|
|
|
/// For BlockSet::NewBlocks this won't reorganize to before the last kept state.
|
|
|
|
|
pub fn new(block_set: BlockSet, start_hash: &H256, start_number: BlockNumber) -> Self {
|
2018-10-25 16:56:59 +02:00
|
|
|
|
let sync_receipts = match block_set {
|
|
|
|
|
BlockSet::NewBlocks => false,
|
|
|
|
|
BlockSet::OldBlocks => true
|
2018-10-09 15:31:40 +02:00
|
|
|
|
};
|
2017-01-20 13:25:53 +01:00
|
|
|
|
BlockDownloader {
|
2018-10-25 16:56:59 +02:00
|
|
|
|
block_set,
|
2017-01-20 13:25:53 +01:00
|
|
|
|
state: State::Idle,
|
|
|
|
|
highest_block: None,
|
|
|
|
|
last_imported_block: start_number,
|
|
|
|
|
last_imported_hash: start_hash.clone(),
|
|
|
|
|
last_round_start: start_number,
|
|
|
|
|
last_round_start_hash: start_hash.clone(),
|
|
|
|
|
blocks: BlockCollection::new(sync_receipts),
|
|
|
|
|
imported_this_round: None,
|
|
|
|
|
round_parents: VecDeque::new(),
|
|
|
|
|
download_receipts: sync_receipts,
|
|
|
|
|
target_hash: None,
|
|
|
|
|
retract_step: 1,
|
2018-10-09 15:31:40 +02:00
|
|
|
|
useless_headers_count: 0,
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Reset sync. Clear all local downloaded data.
|
|
|
|
|
pub fn reset(&mut self) {
|
|
|
|
|
self.blocks.clear();
|
2018-10-09 15:31:40 +02:00
|
|
|
|
self.useless_headers_count = 0;
|
2016-10-18 18:16:00 +02:00
|
|
|
|
self.state = State::Idle;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Mark a block as known in the chain
|
|
|
|
|
pub fn mark_as_known(&mut self, hash: &H256, number: BlockNumber) {
|
2016-12-23 18:43:40 +01:00
|
|
|
|
if number >= self.last_imported_block + 1 {
|
2016-10-18 18:16:00 +02:00
|
|
|
|
self.last_imported_block = number;
|
|
|
|
|
self.last_imported_hash = hash.clone();
|
2016-12-23 18:43:40 +01:00
|
|
|
|
self.imported_this_round = Some(self.imported_this_round.unwrap_or(0) + 1);
|
|
|
|
|
self.last_round_start = number;
|
|
|
|
|
self.last_round_start_hash = hash.clone();
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Check if download is complete
|
|
|
|
|
pub fn is_complete(&self) -> bool {
|
|
|
|
|
self.state == State::Complete
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Check if particular block hash is being downloaded
|
|
|
|
|
pub fn is_downloading(&self, hash: &H256) -> bool {
|
|
|
|
|
self.blocks.is_downloading(hash)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Set starting sync block
|
|
|
|
|
pub fn set_target(&mut self, hash: &H256) {
|
|
|
|
|
self.target_hash = Some(hash.clone());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Unmark header as being downloaded.
|
|
|
|
|
pub fn clear_header_download(&mut self, hash: &H256) {
|
|
|
|
|
self.blocks.clear_header_download(hash)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Unmark block body as being downloaded.
|
|
|
|
|
pub fn clear_body_download(&mut self, hashes: &[H256]) {
|
|
|
|
|
self.blocks.clear_body_download(hashes)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Unmark block receipt as being downloaded.
|
|
|
|
|
pub fn clear_receipt_download(&mut self, hashes: &[H256]) {
|
|
|
|
|
self.blocks.clear_receipt_download(hashes)
|
|
|
|
|
}
|
|
|
|
|
/// Reset collection for a new sync round with given subchain block hashes.
|
|
|
|
|
pub fn reset_to(&mut self, hashes: Vec<H256>) {
|
|
|
|
|
self.reset();
|
|
|
|
|
self.blocks.reset_to(hashes);
|
2016-12-23 18:43:40 +01:00
|
|
|
|
self.state = State::Blocks;
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Returns best imported block number.
|
|
|
|
|
pub fn last_imported_block_number(&self) -> BlockNumber {
|
|
|
|
|
self.last_imported_block
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Add new block headers.
|
2019-07-09 10:04:20 +02:00
|
|
|
|
pub fn import_headers(&mut self, io: &mut dyn SyncIo, r: &Rlp, expected_hash: H256) -> Result<DownloadAction, BlockDownloaderImportError> {
|
2017-03-22 14:41:46 +01:00
|
|
|
|
let item_count = r.item_count().unwrap_or(0);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
if self.state == State::Idle {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Ignored unexpected block headers");
|
2016-11-16 19:34:12 +01:00
|
|
|
|
return Ok(DownloadAction::None)
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
|
|
|
|
if item_count == 0 && (self.state == State::Blocks) {
|
|
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
|
// The request is generated in ::request_blocks.
|
|
|
|
|
let (max_count, skip) = if self.state == State::ChainHead {
|
|
|
|
|
(SUBCHAIN_SIZE as usize, (MAX_HEADERS_TO_REQUEST - 2) as u64)
|
|
|
|
|
} else {
|
|
|
|
|
(MAX_HEADERS_TO_REQUEST, 0)
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if item_count > max_count {
|
|
|
|
|
debug!(target: "sync", "Headers response is larger than expected");
|
|
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
|
let mut headers = Vec::new();
|
|
|
|
|
let mut hashes = Vec::new();
|
2018-10-03 12:35:10 +02:00
|
|
|
|
let mut last_header = None;
|
2016-10-18 18:16:00 +02:00
|
|
|
|
for i in 0..item_count {
|
2018-08-08 10:56:54 +02:00
|
|
|
|
let info = SyncHeader::from_rlp(r.at(i)?.as_raw().to_vec())?;
|
|
|
|
|
let number = BlockNumber::from(info.header.number());
|
|
|
|
|
let hash = info.header.hash();
|
2018-10-03 12:35:10 +02:00
|
|
|
|
|
|
|
|
|
let valid_response = match last_header {
|
|
|
|
|
// First header must match expected hash.
|
|
|
|
|
None => expected_hash == hash,
|
|
|
|
|
Some((last_number, last_hash)) => {
|
|
|
|
|
// Subsequent headers must be spaced by skip interval.
|
|
|
|
|
let skip_valid = number == last_number + skip + 1;
|
|
|
|
|
// Consecutive headers must be linked by parent hash.
|
|
|
|
|
let parent_valid = (number != last_number + 1) || *info.header.parent_hash() == last_hash;
|
|
|
|
|
skip_valid && parent_valid
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
2018-10-03 12:35:10 +02:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Disable the peer for this syncing round if it gives invalid chain
|
|
|
|
|
if !valid_response {
|
|
|
|
|
debug!(target: "sync", "Invalid headers response");
|
|
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
2018-10-03 12:35:10 +02:00
|
|
|
|
|
|
|
|
|
last_header = Some((number, hash));
|
2018-08-08 10:56:54 +02:00
|
|
|
|
if self.blocks.contains(&hash) {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Skipping existing block header {} ({:?})", number, hash);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-09 23:01:43 +01:00
|
|
|
|
match io.chain().block_status(BlockId::Hash(hash.clone())) {
|
2016-10-18 18:16:00 +02:00
|
|
|
|
BlockStatus::InChain | BlockStatus::Queued => {
|
|
|
|
|
match self.state {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
State::Blocks => trace_sync!(self, "Header already in chain {} ({})", number, hash),
|
|
|
|
|
_ => trace_sync!(self, "Header already in chain {} ({}), state = {:?}", number, hash, self.state),
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
2018-08-08 10:56:54 +02:00
|
|
|
|
headers.push(info);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
hashes.push(hash);
|
|
|
|
|
},
|
|
|
|
|
BlockStatus::Bad => {
|
|
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
|
|
|
|
},
|
2018-08-31 13:13:01 +02:00
|
|
|
|
BlockStatus::Unknown => {
|
2018-08-08 10:56:54 +02:00
|
|
|
|
headers.push(info);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
hashes.push(hash);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
Snapshot restoration overhaul (#11219)
* Comments and todos
Use `snapshot_sync` as logging target
* fix compilation
* More todos, more logs
* Fix picking snapshot peer: prefer the one with the highest block number
More docs, comments, todos
* Adjust WAIT_PEERS_TIMEOUT to be a multiple of MAINTAIN_SYNC_TIMER to try to fix snapshot startup problems
Docs, todos, comments
* Tabs
* Formatting
* Don't build new rlp::EMPTY_LIST_RLP instances
* Dial down debug logging
* Don't warn about missing hashes in the manifest: it's normal
Log client version on peer connect
* Cleanup
* Do not skip snapshots further away than 30k block from the highest block seen
Currently we look for peers that seed snapshots that are close to the highest block seen on the network (where "close" means withing 30k blocks). When a node starts up we wait for some time (5sec, increased here to 10sec) to let peers connect and if we have found a suitable peer to sync a snapshot from at the end of that delay, we start the download; if none is found and --warp-barrier is used we stall, otherwise we start a slow-sync.
When looking for a suitable snapshot, we use the highest block seen on the network to check if a peer has a snapshot that is within 30k blocks of that highest block number. This means that in a situation where all available snapshots are older than that, we will often fail to start a snapshot at all. What's worse is that the longer we delay starting a snapshot sync (to let more peers connect, in the hope of finding a good snapshot), the more likely we are to have seen a high block and thus the more likely we become to accept a snapshot.
This commit removes this comparison with the highest blocknumber criteria entirely and picks the best snapshot we find in 10sec.
* lockfile
* Add a `ChunkType::Dupe` variant so that we do not disconnect a peer if they happen to send us a duplicate chunk (just ignore the chunk and keep going)
Resolve some documentation todos, add more
* tweak log message
* Don't warp sync twice
Check if our own block is beyond the given warp barrier (can happen after we've completed a warp sync but are not quite yet synced up to the tip) and if so, don't sync.
More docs, resolve todos.
Dial down some `sync` debug level logging to trace
* Avoid iterating over all snapshot block/state hashes to find the next work item
Use a HashSet instead of a Vec and remove items from the set as chunks are processed. Calculate and store the total number of chunks in the `Snapshot` struct instead of counting pending chunks each time.
* Address review grumbles
* Log correct number of bytes written to disk
* Revert ChunkType::Dup change
* whitespace grumble
* Cleanup debugging code
* Fix docs
* Fix import and a typo
* Fix test impl
* Use `indexmap::IndexSet` to ensure chunk hashes are accessed in order
* Revert increased SNAPSHOT_MANIFEST_TIMEOUT: 5sec should be enough
2019-10-31 16:07:21 +01:00
|
|
|
|
// Update the highest block number seen on the network from the header.
|
2018-10-03 12:35:10 +02:00
|
|
|
|
if let Some((number, _)) = last_header {
|
|
|
|
|
if self.highest_block.as_ref().map_or(true, |n| number > *n) {
|
|
|
|
|
self.highest_block = Some(number);
|
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
match self.state {
|
|
|
|
|
State::ChainHead => {
|
|
|
|
|
if !headers.is_empty() {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Received {} subchain heads, proceeding to download", headers.len());
|
2016-10-18 18:16:00 +02:00
|
|
|
|
self.blocks.reset_to(hashes);
|
|
|
|
|
self.state = State::Blocks;
|
2016-11-16 19:34:12 +01:00
|
|
|
|
return Ok(DownloadAction::Reset);
|
2016-11-28 16:30:36 +01:00
|
|
|
|
} else {
|
2018-10-25 16:56:59 +02:00
|
|
|
|
trace_sync!(self, "No useful subchain heads received, expected hash {:?}", expected_hash);
|
2016-11-28 16:30:36 +01:00
|
|
|
|
let best = io.chain().chain_info().best_block_number;
|
2017-01-20 13:25:53 +01:00
|
|
|
|
let oldest_reorg = io.chain().pruning_info().earliest_state;
|
|
|
|
|
let last = self.last_imported_block;
|
2018-10-25 16:56:59 +02:00
|
|
|
|
match self.block_set {
|
|
|
|
|
BlockSet::NewBlocks if best > last && (last == 0 || last < oldest_reorg) => {
|
|
|
|
|
trace_sync!(self, "No common block, disabling peer");
|
|
|
|
|
return Err(BlockDownloaderImportError::Invalid)
|
|
|
|
|
},
|
|
|
|
|
BlockSet::OldBlocks => {
|
|
|
|
|
trace_sync!(self, "Expected some useful headers for downloading OldBlocks. Try a different peer");
|
|
|
|
|
return Err(BlockDownloaderImportError::Useless)
|
|
|
|
|
},
|
|
|
|
|
_ => (),
|
2016-11-28 16:30:36 +01:00
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
|
|
|
|
},
|
|
|
|
|
State::Blocks => {
|
|
|
|
|
let count = headers.len();
|
2018-10-09 15:31:40 +02:00
|
|
|
|
// At least one of the headers must advance the subchain. Otherwise they are all useless.
|
2018-10-03 12:35:10 +02:00
|
|
|
|
if count == 0 {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
self.useless_headers_count += 1;
|
|
|
|
|
trace_sync!(self, "No useful headers ({:?} this round), expected hash {:?}", self.useless_headers_count, expected_hash);
|
|
|
|
|
// only reset download if we have multiple subchain heads, to avoid unnecessary resets
|
|
|
|
|
// when we are at the head of the chain when we may legitimately receive no useful headers
|
|
|
|
|
if self.blocks.heads_len() > 1 && self.useless_headers_count >= MAX_USELESS_HEADERS_PER_ROUND {
|
|
|
|
|
trace_sync!(self, "Received {:?} useless responses this round. Resetting sync", MAX_USELESS_HEADERS_PER_ROUND);
|
|
|
|
|
self.reset();
|
|
|
|
|
}
|
2016-11-16 19:34:12 +01:00
|
|
|
|
return Err(BlockDownloaderImportError::Useless);
|
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
|
self.blocks.insert_headers(headers);
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Inserted {} headers", count);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
},
|
2018-10-09 15:31:40 +02:00
|
|
|
|
_ => trace_sync!(self, "Unexpected headers({})", headers.len()),
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
|
|
|
|
|
2016-11-16 19:34:12 +01:00
|
|
|
|
Ok(DownloadAction::None)
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Called by peer once it has new block bodies
|
2018-10-03 12:35:10 +02:00
|
|
|
|
pub fn import_bodies(&mut self, r: &Rlp, expected_hashes: &[H256]) -> Result<(), BlockDownloaderImportError> {
|
2017-03-22 14:41:46 +01:00
|
|
|
|
let item_count = r.item_count().unwrap_or(0);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
if item_count == 0 {
|
|
|
|
|
return Err(BlockDownloaderImportError::Useless);
|
2018-08-08 10:56:54 +02:00
|
|
|
|
} else if self.state != State::Blocks {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Ignored unexpected block bodies");
|
2018-08-08 10:56:54 +02:00
|
|
|
|
} else {
|
2016-10-18 18:16:00 +02:00
|
|
|
|
let mut bodies = Vec::with_capacity(item_count);
|
|
|
|
|
for i in 0..item_count {
|
2018-08-08 10:56:54 +02:00
|
|
|
|
let body = SyncBody::from_rlp(r.at(i)?.as_raw())?;
|
|
|
|
|
bodies.push(body);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
2018-08-08 10:56:54 +02:00
|
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
|
let hashes = self.blocks.insert_bodies(bodies);
|
|
|
|
|
if hashes.len() != item_count {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Deactivating peer for giving invalid block bodies");
|
2016-10-18 18:16:00 +02:00
|
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
|
|
|
|
}
|
2018-10-03 12:35:10 +02:00
|
|
|
|
if !all_expected(hashes.as_slice(), expected_hashes, |&a, &b| a == b) {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Deactivating peer for giving unexpected block bodies");
|
2018-10-03 12:35:10 +02:00
|
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Called by peer once it has new block bodies
|
2018-10-03 12:35:10 +02:00
|
|
|
|
pub fn import_receipts(&mut self, r: &Rlp, expected_hashes: &[H256]) -> Result<(), BlockDownloaderImportError> {
|
2017-03-22 14:41:46 +01:00
|
|
|
|
let item_count = r.item_count().unwrap_or(0);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
if item_count == 0 {
|
|
|
|
|
return Err(BlockDownloaderImportError::Useless);
|
|
|
|
|
}
|
|
|
|
|
else if self.state != State::Blocks {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Ignored unexpected block receipts");
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
let mut receipts = Vec::with_capacity(item_count);
|
|
|
|
|
for i in 0..item_count {
|
2016-12-27 12:53:56 +01:00
|
|
|
|
let receipt = r.at(i).map_err(|e| {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Error decoding block receipts RLP: {:?}", e);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
BlockDownloaderImportError::Invalid
|
2016-12-27 12:53:56 +01:00
|
|
|
|
})?;
|
2016-10-18 18:16:00 +02:00
|
|
|
|
receipts.push(receipt.as_raw().to_vec());
|
|
|
|
|
}
|
2018-10-03 12:35:10 +02:00
|
|
|
|
let hashes = self.blocks.insert_receipts(receipts);
|
|
|
|
|
if hashes.len() != item_count {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Deactivating peer for giving invalid block receipts");
|
2016-10-18 18:16:00 +02:00
|
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
|
|
|
|
}
|
2018-10-03 12:35:10 +02:00
|
|
|
|
if !all_expected(hashes.as_slice(), expected_hashes, |a, b| a.contains(b)) {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Deactivating peer for giving unexpected block receipts");
|
2018-10-03 12:35:10 +02:00
|
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-09 10:04:20 +02:00
|
|
|
|
fn start_sync_round(&mut self, io: &mut dyn SyncIo) {
|
2016-10-18 18:16:00 +02:00
|
|
|
|
self.state = State::ChainHead;
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Starting round (last imported count = {:?}, last started = {}, block = {:?}", self.imported_this_round, self.last_round_start, self.last_imported_block);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
// Check if need to retract to find the common block. The problem is that the peers still return headers by hash even
|
|
|
|
|
// from the non-canonical part of the tree. So we also retract if nothing has been imported last round.
|
2016-12-23 18:43:40 +01:00
|
|
|
|
let start = self.last_round_start;
|
|
|
|
|
let start_hash = self.last_round_start_hash;
|
2016-10-18 18:16:00 +02:00
|
|
|
|
match self.imported_this_round {
|
2016-12-23 18:43:40 +01:00
|
|
|
|
Some(n) if n == 0 && start > 0 => {
|
2016-10-18 18:16:00 +02:00
|
|
|
|
// nothing was imported last round, step back to a previous block
|
|
|
|
|
// search parent in last round known parents first
|
2016-12-23 18:43:40 +01:00
|
|
|
|
if let Some(&(_, p)) = self.round_parents.iter().find(|&&(h, _)| h == start_hash) {
|
|
|
|
|
self.last_imported_block = start - 1;
|
2016-10-18 18:16:00 +02:00
|
|
|
|
self.last_imported_hash = p.clone();
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Searching common header from the last round {} ({})", self.last_imported_block, self.last_imported_hash);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
} else {
|
2016-11-18 19:17:35 +01:00
|
|
|
|
let best = io.chain().chain_info().best_block_number;
|
2017-01-20 13:25:53 +01:00
|
|
|
|
let oldest_reorg = io.chain().pruning_info().earliest_state;
|
2018-10-25 16:56:59 +02:00
|
|
|
|
if self.block_set == BlockSet::NewBlocks && best > start && start < oldest_reorg {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
debug_sync!(self, "Could not revert to previous ancient block, last: {} ({})", start, start_hash);
|
2016-11-18 19:17:35 +01:00
|
|
|
|
self.reset();
|
|
|
|
|
} else {
|
2017-07-29 17:12:07 +02:00
|
|
|
|
let n = start - cmp::min(self.retract_step, start);
|
2016-12-23 18:43:40 +01:00
|
|
|
|
self.retract_step *= 2;
|
|
|
|
|
match io.chain().block_hash(BlockId::Number(n)) {
|
2016-11-18 19:17:35 +01:00
|
|
|
|
Some(h) => {
|
2016-12-23 18:43:40 +01:00
|
|
|
|
self.last_imported_block = n;
|
2016-11-18 19:17:35 +01:00
|
|
|
|
self.last_imported_hash = h;
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Searching common header in the blockchain {} ({})", start, self.last_imported_hash);
|
2016-11-18 19:17:35 +01:00
|
|
|
|
}
|
|
|
|
|
None => {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
debug_sync!(self, "Could not revert to previous block, last: {} ({})", start, self.last_imported_hash);
|
2016-11-18 19:17:35 +01:00
|
|
|
|
self.reset();
|
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
},
|
2016-12-23 18:43:40 +01:00
|
|
|
|
_ => {
|
|
|
|
|
self.retract_step = 1;
|
|
|
|
|
},
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
2016-12-23 18:43:40 +01:00
|
|
|
|
self.last_round_start = self.last_imported_block;
|
|
|
|
|
self.last_round_start_hash = self.last_imported_hash;
|
2016-10-18 18:16:00 +02:00
|
|
|
|
self.imported_this_round = None;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Find some headers or blocks to download for a peer.
|
2019-07-09 10:04:20 +02:00
|
|
|
|
pub fn request_blocks(&mut self, peer_id: PeerId, io: &mut dyn SyncIo, num_active_peers: usize) -> Option<BlockRequest> {
|
2016-10-18 18:16:00 +02:00
|
|
|
|
match self.state {
|
|
|
|
|
State::Idle => {
|
|
|
|
|
self.start_sync_round(io);
|
2016-11-18 19:17:35 +01:00
|
|
|
|
if self.state == State::ChainHead {
|
2019-02-07 15:27:09 +01:00
|
|
|
|
return self.request_blocks(peer_id, io, num_active_peers);
|
2016-11-18 19:17:35 +01:00
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
|
},
|
|
|
|
|
State::ChainHead => {
|
2016-11-16 19:34:12 +01:00
|
|
|
|
if num_active_peers < MAX_PARALLEL_SUBCHAIN_DOWNLOAD {
|
|
|
|
|
// Request subchain headers
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Starting sync with better chain");
|
2016-11-16 19:34:12 +01:00
|
|
|
|
// Request MAX_HEADERS_TO_REQUEST - 2 headers apart so that
|
|
|
|
|
// MAX_HEADERS_TO_REQUEST would include headers for neighbouring subchains
|
|
|
|
|
return Some(BlockRequest::Headers {
|
|
|
|
|
start: self.last_imported_hash.clone(),
|
|
|
|
|
count: SUBCHAIN_SIZE,
|
|
|
|
|
skip: (MAX_HEADERS_TO_REQUEST - 2) as u64,
|
|
|
|
|
});
|
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
|
},
|
|
|
|
|
State::Blocks => {
|
|
|
|
|
// check to see if we need to download any block bodies first
|
2019-02-07 15:27:09 +01:00
|
|
|
|
let client_version = io.peer_version(peer_id);
|
|
|
|
|
|
|
|
|
|
let number_of_bodies_to_request = if client_version.can_handle_large_requests() {
|
|
|
|
|
MAX_BODIES_TO_REQUEST_LARGE
|
|
|
|
|
} else {
|
|
|
|
|
MAX_BODIES_TO_REQUEST_SMALL
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let needed_bodies = self.blocks.needed_bodies(number_of_bodies_to_request, false);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
if !needed_bodies.is_empty() {
|
|
|
|
|
return Some(BlockRequest::Bodies {
|
|
|
|
|
hashes: needed_bodies,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if self.download_receipts {
|
|
|
|
|
let needed_receipts = self.blocks.needed_receipts(MAX_RECEPITS_TO_REQUEST, false);
|
|
|
|
|
if !needed_receipts.is_empty() {
|
|
|
|
|
return Some(BlockRequest::Receipts {
|
|
|
|
|
hashes: needed_receipts,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// find subchain to download
|
|
|
|
|
if let Some((h, count)) = self.blocks.needed_headers(MAX_HEADERS_TO_REQUEST, false) {
|
|
|
|
|
return Some(BlockRequest::Headers {
|
|
|
|
|
start: h,
|
|
|
|
|
count: count as u64,
|
|
|
|
|
skip: 0,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
},
|
|
|
|
|
State::Complete => (),
|
|
|
|
|
}
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Checks if there are blocks fully downloaded that can be imported into the blockchain and does the import.
|
2018-10-09 15:31:40 +02:00
|
|
|
|
/// Returns DownloadAction::Reset if it is imported all the the blocks it can and all downloading peers should be reset
|
2019-07-09 10:04:20 +02:00
|
|
|
|
pub fn collect_blocks(&mut self, io: &mut dyn SyncIo, allow_out_of_order: bool) -> DownloadAction {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
let mut download_action = DownloadAction::None;
|
2016-10-18 18:16:00 +02:00
|
|
|
|
let mut imported = HashSet::new();
|
|
|
|
|
let blocks = self.blocks.drain();
|
|
|
|
|
let count = blocks.len();
|
|
|
|
|
for block_and_receipts in blocks {
|
|
|
|
|
let block = block_and_receipts.block;
|
|
|
|
|
let receipts = block_and_receipts.receipts;
|
|
|
|
|
|
2018-08-02 11:20:46 +02:00
|
|
|
|
let h = block.header.hash();
|
|
|
|
|
let number = block.header.number();
|
|
|
|
|
let parent = *block.header.parent_hash();
|
|
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
|
if self.target_hash.as_ref().map_or(false, |t| t == &h) {
|
|
|
|
|
self.state = State::Complete;
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Sync target reached");
|
|
|
|
|
return download_action;
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let result = if let Some(receipts) = receipts {
|
2018-05-09 08:49:34 +02:00
|
|
|
|
io.chain().queue_ancient_block(block, receipts)
|
2016-10-18 18:16:00 +02:00
|
|
|
|
} else {
|
2019-07-04 18:03:22 +02:00
|
|
|
|
trace_sync!(self, "Importing block #{}/{}", number, h);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
io.chain().import_block(block)
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
match result {
|
2019-05-06 15:06:20 +02:00
|
|
|
|
Err(EthcoreError::Import(ImportError::AlreadyInChain)) => {
|
2019-07-04 18:03:22 +02:00
|
|
|
|
let is_canonical = if io.chain().block_hash(BlockId::Number(number)).is_some() {
|
|
|
|
|
"canoncial"
|
|
|
|
|
} else {
|
|
|
|
|
"not canonical"
|
|
|
|
|
};
|
|
|
|
|
trace_sync!(self, "Block #{} is already in chain {:?} – {}", number, h, is_canonical);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
self.block_imported(&h, number, &parent);
|
|
|
|
|
},
|
2019-05-06 15:06:20 +02:00
|
|
|
|
Err(EthcoreError::Import(ImportError::AlreadyQueued)) => {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Block already queued {:?}", h);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
self.block_imported(&h, number, &parent);
|
|
|
|
|
},
|
|
|
|
|
Ok(_) => {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Block queued {:?}", h);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
imported.insert(h.clone());
|
|
|
|
|
self.block_imported(&h, number, &parent);
|
|
|
|
|
},
|
2019-05-06 15:06:20 +02:00
|
|
|
|
Err(EthcoreError::Block(BlockError::UnknownParent(_))) if allow_out_of_order => {
|
2016-12-23 18:43:40 +01:00
|
|
|
|
break;
|
|
|
|
|
},
|
2019-05-06 15:06:20 +02:00
|
|
|
|
Err(EthcoreError::Block(BlockError::UnknownParent(_))) => {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Unknown new block parent, restarting sync");
|
2016-10-18 18:16:00 +02:00
|
|
|
|
break;
|
|
|
|
|
},
|
2019-05-06 15:06:20 +02:00
|
|
|
|
Err(EthcoreError::Block(BlockError::TemporarilyInvalid(_))) => {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
debug_sync!(self, "Block temporarily invalid: {:?}, restarting sync", h);
|
2018-01-19 10:38:59 +01:00
|
|
|
|
break;
|
|
|
|
|
},
|
2019-07-06 20:40:56 +02:00
|
|
|
|
Err(EthcoreError::FullQueue(limit)) => {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
debug_sync!(self, "Block import queue full ({}), restarting sync", limit);
|
|
|
|
|
download_action = DownloadAction::Reset;
|
2018-08-24 10:42:24 +02:00
|
|
|
|
break;
|
|
|
|
|
},
|
2016-10-18 18:16:00 +02:00
|
|
|
|
Err(e) => {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
debug_sync!(self, "Bad block {:?} : {:?}", h, e);
|
|
|
|
|
download_action = DownloadAction::Reset;
|
2016-10-18 18:16:00 +02:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Imported {} of {}", imported.len(), count);
|
2016-10-18 18:16:00 +02:00
|
|
|
|
self.imported_this_round = Some(self.imported_this_round.unwrap_or(0) + imported.len());
|
|
|
|
|
|
|
|
|
|
if self.blocks.is_empty() {
|
|
|
|
|
// complete sync round
|
2018-10-09 15:31:40 +02:00
|
|
|
|
trace_sync!(self, "Sync round complete");
|
|
|
|
|
download_action = DownloadAction::Reset;
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
2018-10-09 15:31:40 +02:00
|
|
|
|
download_action
|
2016-10-18 18:16:00 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn block_imported(&mut self, hash: &H256, number: BlockNumber, parent: &H256) {
|
|
|
|
|
self.last_imported_block = number;
|
|
|
|
|
self.last_imported_hash = hash.clone();
|
|
|
|
|
self.round_parents.push_back((hash.clone(), parent.clone()));
|
|
|
|
|
if self.round_parents.len() > MAX_ROUND_PARENTS {
|
|
|
|
|
self.round_parents.pop_front();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
|
// Determines if the first argument matches an ordered subset of the second, according to some predicate.
|
|
|
|
|
fn all_expected<A, B, F>(values: &[A], expected_values: &[B], is_expected: F) -> bool
|
|
|
|
|
where F: Fn(&A, &B) -> bool
|
|
|
|
|
{
|
|
|
|
|
let mut expected_iter = expected_values.iter();
|
|
|
|
|
values.iter().all(|val1| {
|
|
|
|
|
while let Some(val2) = expected_iter.next() {
|
|
|
|
|
if is_expected(val1, val2) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
false
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
mod tests {
|
2019-09-19 13:12:07 +02:00
|
|
|
|
use super::{
|
|
|
|
|
BlockSet, BlockDownloader, BlockDownloaderImportError, DownloadAction, SyncIo, H256,
|
|
|
|
|
MAX_HEADERS_TO_REQUEST, MAX_USELESS_HEADERS_PER_ROUND, SUBCHAIN_SIZE, State, Rlp, VecDeque
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
use crate::tests::{helpers::TestIo, snapshot::TestSnapshotService};
|
|
|
|
|
|
2019-09-17 16:42:22 +02:00
|
|
|
|
use ethcore::test_helpers::TestBlockChainClient;
|
2019-10-23 13:03:46 +02:00
|
|
|
|
use parity_crypto::publickey::{Random, Generator};
|
2019-09-19 13:12:07 +02:00
|
|
|
|
use keccak_hash::keccak;
|
2018-10-03 12:35:10 +02:00
|
|
|
|
use parking_lot::RwLock;
|
2019-09-17 16:42:22 +02:00
|
|
|
|
use rlp::{encode_list, RlpStream};
|
2018-10-03 12:35:10 +02:00
|
|
|
|
use triehash_ethereum::ordered_trie_root;
|
2019-09-19 13:12:07 +02:00
|
|
|
|
use common_types::{
|
|
|
|
|
transaction::{Transaction, SignedTransaction},
|
|
|
|
|
header::Header as BlockHeader,
|
|
|
|
|
};
|
2018-10-03 12:35:10 +02:00
|
|
|
|
|
|
|
|
|
fn dummy_header(number: u64, parent_hash: H256) -> BlockHeader {
|
|
|
|
|
let mut header = BlockHeader::new();
|
|
|
|
|
header.set_gas_limit(0.into());
|
|
|
|
|
header.set_difficulty((number * 100).into());
|
|
|
|
|
header.set_timestamp(number * 10);
|
|
|
|
|
header.set_number(number);
|
|
|
|
|
header.set_parent_hash(parent_hash);
|
|
|
|
|
header.set_state_root(H256::zero());
|
|
|
|
|
header
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn dummy_signed_tx() -> SignedTransaction {
|
|
|
|
|
let keypair = Random.generate().unwrap();
|
|
|
|
|
Transaction::default().sign(keypair.secret(), None)
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-09 10:04:20 +02:00
|
|
|
|
fn import_headers(headers: &[BlockHeader], downloader: &mut BlockDownloader, io: &mut dyn SyncIo) -> Result<DownloadAction, BlockDownloaderImportError> {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
let mut stream = RlpStream::new();
|
|
|
|
|
stream.append_list(headers);
|
|
|
|
|
let bytes = stream.out();
|
|
|
|
|
let rlp = Rlp::new(&bytes);
|
|
|
|
|
let expected_hash = headers.first().unwrap().hash();
|
|
|
|
|
downloader.import_headers(io, &rlp, expected_hash)
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-09 10:04:20 +02:00
|
|
|
|
fn import_headers_ok(headers: &[BlockHeader], downloader: &mut BlockDownloader, io: &mut dyn SyncIo) {
|
2018-10-09 15:31:40 +02:00
|
|
|
|
let res = import_headers(headers, downloader, io);
|
|
|
|
|
assert!(res.is_ok());
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
|
#[test]
|
|
|
|
|
fn import_headers_in_chain_head_state() {
|
2019-09-19 13:12:07 +02:00
|
|
|
|
env_logger::try_init().ok();
|
2018-10-03 12:35:10 +02:00
|
|
|
|
|
2019-08-07 16:52:48 +02:00
|
|
|
|
let spec = spec::new_test();
|
2018-10-03 12:35:10 +02:00
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
|
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
|
let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &genesis_hash, 0);
|
2018-10-03 12:35:10 +02:00
|
|
|
|
downloader.state = State::ChainHead;
|
|
|
|
|
|
|
|
|
|
let mut chain = TestBlockChainClient::new();
|
|
|
|
|
let snapshot_service = TestSnapshotService::new();
|
|
|
|
|
let queue = RwLock::new(VecDeque::new());
|
2019-08-16 14:45:52 +02:00
|
|
|
|
let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None, None);
|
2018-10-03 12:35:10 +02:00
|
|
|
|
|
|
|
|
|
// Valid headers sequence.
|
|
|
|
|
let valid_headers = [
|
|
|
|
|
spec.genesis_header(),
|
|
|
|
|
dummy_header(127, H256::random()),
|
|
|
|
|
dummy_header(254, H256::random()),
|
|
|
|
|
];
|
|
|
|
|
let rlp_data = encode_list(&valid_headers);
|
|
|
|
|
let valid_rlp = Rlp::new(&rlp_data);
|
|
|
|
|
|
|
|
|
|
match downloader.import_headers(&mut io, &valid_rlp, genesis_hash) {
|
|
|
|
|
Ok(DownloadAction::Reset) => assert_eq!(downloader.state, State::Blocks),
|
|
|
|
|
_ => panic!("expected transition to Blocks state"),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Headers are rejected because the expected hash does not match.
|
|
|
|
|
let invalid_start_block_headers = [
|
|
|
|
|
dummy_header(0, H256::random()),
|
|
|
|
|
dummy_header(127, H256::random()),
|
|
|
|
|
dummy_header(254, H256::random()),
|
|
|
|
|
];
|
|
|
|
|
let rlp_data = encode_list(&invalid_start_block_headers);
|
|
|
|
|
let invalid_start_block_rlp = Rlp::new(&rlp_data);
|
|
|
|
|
|
|
|
|
|
match downloader.import_headers(&mut io, &invalid_start_block_rlp, genesis_hash) {
|
|
|
|
|
Err(BlockDownloaderImportError::Invalid) => (),
|
|
|
|
|
_ => panic!("expected BlockDownloaderImportError"),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Headers are rejected because they are not spaced as expected.
|
|
|
|
|
let invalid_skip_headers = [
|
|
|
|
|
spec.genesis_header(),
|
|
|
|
|
dummy_header(128, H256::random()),
|
|
|
|
|
dummy_header(256, H256::random()),
|
|
|
|
|
];
|
|
|
|
|
let rlp_data = encode_list(&invalid_skip_headers);
|
|
|
|
|
let invalid_skip_rlp = Rlp::new(&rlp_data);
|
|
|
|
|
|
|
|
|
|
match downloader.import_headers(&mut io, &invalid_skip_rlp, genesis_hash) {
|
|
|
|
|
Err(BlockDownloaderImportError::Invalid) => (),
|
|
|
|
|
_ => panic!("expected BlockDownloaderImportError"),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Invalid because the packet size is too large.
|
|
|
|
|
let mut too_many_headers = Vec::with_capacity((SUBCHAIN_SIZE + 1) as usize);
|
|
|
|
|
too_many_headers.push(spec.genesis_header());
|
|
|
|
|
for i in 1..(SUBCHAIN_SIZE + 1) {
|
|
|
|
|
too_many_headers.push(dummy_header((MAX_HEADERS_TO_REQUEST as u64 - 1) * i, H256::random()));
|
|
|
|
|
}
|
|
|
|
|
let rlp_data = encode_list(&too_many_headers);
|
|
|
|
|
|
|
|
|
|
let too_many_rlp = Rlp::new(&rlp_data);
|
|
|
|
|
match downloader.import_headers(&mut io, &too_many_rlp, genesis_hash) {
|
|
|
|
|
Err(BlockDownloaderImportError::Invalid) => (),
|
|
|
|
|
_ => panic!("expected BlockDownloaderImportError"),
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn import_headers_in_blocks_state() {
|
2019-09-19 13:12:07 +02:00
|
|
|
|
env_logger::try_init().ok();
|
2018-10-03 12:35:10 +02:00
|
|
|
|
|
|
|
|
|
let mut chain = TestBlockChainClient::new();
|
|
|
|
|
let snapshot_service = TestSnapshotService::new();
|
|
|
|
|
let queue = RwLock::new(VecDeque::new());
|
2019-08-16 14:45:52 +02:00
|
|
|
|
let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None, None);
|
2018-10-03 12:35:10 +02:00
|
|
|
|
|
|
|
|
|
let mut headers = Vec::with_capacity(3);
|
|
|
|
|
let parent_hash = H256::random();
|
|
|
|
|
headers.push(dummy_header(127, parent_hash));
|
|
|
|
|
let parent_hash = headers[0].hash();
|
|
|
|
|
headers.push(dummy_header(128, parent_hash));
|
|
|
|
|
let parent_hash = headers[1].hash();
|
|
|
|
|
headers.push(dummy_header(129, parent_hash));
|
|
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
|
let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &H256::random(), 0);
|
2018-10-03 12:35:10 +02:00
|
|
|
|
downloader.state = State::Blocks;
|
|
|
|
|
downloader.blocks.reset_to(vec![headers[0].hash()]);
|
|
|
|
|
|
|
|
|
|
let rlp_data = encode_list(&headers);
|
|
|
|
|
let headers_rlp = Rlp::new(&rlp_data);
|
|
|
|
|
|
|
|
|
|
match downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()) {
|
|
|
|
|
Ok(DownloadAction::None) => (),
|
|
|
|
|
_ => panic!("expected successful import"),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Invalidate parent_hash link.
|
|
|
|
|
headers[2] = dummy_header(129, H256::random());
|
|
|
|
|
let rlp_data = encode_list(&headers);
|
|
|
|
|
let headers_rlp = Rlp::new(&rlp_data);
|
|
|
|
|
|
|
|
|
|
match downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()) {
|
|
|
|
|
Err(BlockDownloaderImportError::Invalid) => (),
|
|
|
|
|
_ => panic!("expected BlockDownloaderImportError"),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Invalidate header sequence by skipping a header.
|
|
|
|
|
headers[2] = dummy_header(130, headers[1].hash());
|
|
|
|
|
let rlp_data = encode_list(&headers);
|
|
|
|
|
let headers_rlp = Rlp::new(&rlp_data);
|
|
|
|
|
|
|
|
|
|
match downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()) {
|
|
|
|
|
Err(BlockDownloaderImportError::Invalid) => (),
|
|
|
|
|
_ => panic!("expected BlockDownloaderImportError"),
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn import_bodies() {
|
2019-09-19 13:12:07 +02:00
|
|
|
|
env_logger::try_init().ok();
|
2018-10-03 12:35:10 +02:00
|
|
|
|
|
|
|
|
|
let mut chain = TestBlockChainClient::new();
|
|
|
|
|
let snapshot_service = TestSnapshotService::new();
|
|
|
|
|
let queue = RwLock::new(VecDeque::new());
|
2019-08-16 14:45:52 +02:00
|
|
|
|
let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None, None);
|
2018-10-03 12:35:10 +02:00
|
|
|
|
|
|
|
|
|
// Import block headers.
|
|
|
|
|
let mut headers = Vec::with_capacity(4);
|
|
|
|
|
let mut bodies = Vec::with_capacity(4);
|
|
|
|
|
let mut parent_hash = H256::zero();
|
|
|
|
|
for i in 0..4 {
|
|
|
|
|
// Construct the block body
|
2019-07-09 10:04:20 +02:00
|
|
|
|
let uncles = if i > 0 {
|
2018-10-09 22:07:25 +02:00
|
|
|
|
encode_list(&[dummy_header(i - 1, H256::random())])
|
2018-10-03 12:35:10 +02:00
|
|
|
|
} else {
|
|
|
|
|
::rlp::EMPTY_LIST_RLP.to_vec()
|
|
|
|
|
};
|
|
|
|
|
|
2019-07-09 10:04:20 +02:00
|
|
|
|
let txs = encode_list(&[dummy_signed_tx()]);
|
2018-10-03 12:35:10 +02:00
|
|
|
|
let tx_root = ordered_trie_root(Rlp::new(&txs).iter().map(|r| r.as_raw()));
|
|
|
|
|
|
|
|
|
|
let mut rlp = RlpStream::new_list(2);
|
|
|
|
|
rlp.append_raw(&txs, 1);
|
|
|
|
|
rlp.append_raw(&uncles, 1);
|
|
|
|
|
bodies.push(rlp.out());
|
|
|
|
|
|
|
|
|
|
// Construct the block header
|
|
|
|
|
let mut header = dummy_header(i, parent_hash);
|
|
|
|
|
header.set_transactions_root(tx_root);
|
|
|
|
|
header.set_uncles_hash(keccak(&uncles));
|
|
|
|
|
parent_hash = header.hash();
|
|
|
|
|
headers.push(header);
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
|
let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &headers[0].hash(), 0);
|
2018-10-03 12:35:10 +02:00
|
|
|
|
downloader.state = State::Blocks;
|
|
|
|
|
downloader.blocks.reset_to(vec![headers[0].hash()]);
|
|
|
|
|
|
|
|
|
|
// Only import the first three block headers.
|
|
|
|
|
let rlp_data = encode_list(&headers[0..3]);
|
|
|
|
|
let headers_rlp = Rlp::new(&rlp_data);
|
|
|
|
|
assert!(downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()).is_ok());
|
|
|
|
|
|
|
|
|
|
// Import first body successfully.
|
|
|
|
|
let mut rlp_data = RlpStream::new_list(1);
|
|
|
|
|
rlp_data.append_raw(&bodies[0], 1);
|
|
|
|
|
let bodies_rlp = Rlp::new(rlp_data.as_raw());
|
|
|
|
|
assert!(downloader.import_bodies(&bodies_rlp, &[headers[0].hash(), headers[1].hash()]).is_ok());
|
|
|
|
|
|
|
|
|
|
// Import second body successfully.
|
|
|
|
|
let mut rlp_data = RlpStream::new_list(1);
|
|
|
|
|
rlp_data.append_raw(&bodies[1], 1);
|
|
|
|
|
let bodies_rlp = Rlp::new(rlp_data.as_raw());
|
|
|
|
|
assert!(downloader.import_bodies(&bodies_rlp, &[headers[0].hash(), headers[1].hash()]).is_ok());
|
|
|
|
|
|
|
|
|
|
// Import unexpected third body.
|
|
|
|
|
let mut rlp_data = RlpStream::new_list(1);
|
|
|
|
|
rlp_data.append_raw(&bodies[2], 1);
|
|
|
|
|
let bodies_rlp = Rlp::new(rlp_data.as_raw());
|
|
|
|
|
match downloader.import_bodies(&bodies_rlp, &[headers[0].hash(), headers[1].hash()]) {
|
|
|
|
|
Err(BlockDownloaderImportError::Invalid) => (),
|
|
|
|
|
_ => panic!("expected BlockDownloaderImportError"),
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn import_receipts() {
|
2019-09-19 13:12:07 +02:00
|
|
|
|
env_logger::try_init().ok();
|
2018-10-03 12:35:10 +02:00
|
|
|
|
|
|
|
|
|
let mut chain = TestBlockChainClient::new();
|
|
|
|
|
let snapshot_service = TestSnapshotService::new();
|
|
|
|
|
let queue = RwLock::new(VecDeque::new());
|
2019-08-16 14:45:52 +02:00
|
|
|
|
let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None, None);
|
2018-10-03 12:35:10 +02:00
|
|
|
|
|
|
|
|
|
// Import block headers.
|
|
|
|
|
let mut headers = Vec::with_capacity(4);
|
|
|
|
|
let mut receipts = Vec::with_capacity(4);
|
|
|
|
|
let mut parent_hash = H256::zero();
|
|
|
|
|
for i in 0..4 {
|
|
|
|
|
// Construct the receipts. Receipt root for the first two blocks is the same.
|
|
|
|
|
//
|
|
|
|
|
// The RLP-encoded integers are clearly not receipts, but the BlockDownloader treats
|
|
|
|
|
// all receipts as byte blobs, so it does not matter.
|
2019-07-09 10:04:20 +02:00
|
|
|
|
let receipts_rlp = if i < 2 {
|
2018-10-03 12:35:10 +02:00
|
|
|
|
encode_list(&[0u32])
|
|
|
|
|
} else {
|
|
|
|
|
encode_list(&[i as u32])
|
|
|
|
|
};
|
|
|
|
|
let receipts_root = ordered_trie_root(Rlp::new(&receipts_rlp).iter().map(|r| r.as_raw()));
|
|
|
|
|
receipts.push(receipts_rlp);
|
|
|
|
|
|
|
|
|
|
// Construct the block header.
|
|
|
|
|
let mut header = dummy_header(i, parent_hash);
|
|
|
|
|
header.set_receipts_root(receipts_root);
|
|
|
|
|
parent_hash = header.hash();
|
|
|
|
|
headers.push(header);
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
|
let mut downloader = BlockDownloader::new(BlockSet::OldBlocks, &headers[0].hash(), 0);
|
2018-10-03 12:35:10 +02:00
|
|
|
|
downloader.state = State::Blocks;
|
|
|
|
|
downloader.blocks.reset_to(vec![headers[0].hash()]);
|
|
|
|
|
|
|
|
|
|
// Only import the first three block headers.
|
|
|
|
|
let rlp_data = encode_list(&headers[0..3]);
|
|
|
|
|
let headers_rlp = Rlp::new(&rlp_data);
|
|
|
|
|
assert!(downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()).is_ok());
|
|
|
|
|
|
|
|
|
|
// Import second and third receipts successfully.
|
|
|
|
|
let mut rlp_data = RlpStream::new_list(2);
|
|
|
|
|
rlp_data.append_raw(&receipts[1], 1);
|
|
|
|
|
rlp_data.append_raw(&receipts[2], 1);
|
|
|
|
|
let receipts_rlp = Rlp::new(rlp_data.as_raw());
|
|
|
|
|
assert!(downloader.import_receipts(&receipts_rlp, &[headers[1].hash(), headers[2].hash()]).is_ok());
|
|
|
|
|
|
|
|
|
|
// Import unexpected fourth receipt.
|
|
|
|
|
let mut rlp_data = RlpStream::new_list(1);
|
|
|
|
|
rlp_data.append_raw(&receipts[3], 1);
|
|
|
|
|
let bodies_rlp = Rlp::new(rlp_data.as_raw());
|
|
|
|
|
match downloader.import_bodies(&bodies_rlp, &[headers[1].hash(), headers[2].hash()]) {
|
|
|
|
|
Err(BlockDownloaderImportError::Invalid) => (),
|
|
|
|
|
_ => panic!("expected BlockDownloaderImportError"),
|
|
|
|
|
};
|
|
|
|
|
}
|
2018-10-09 15:31:40 +02:00
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn reset_after_multiple_sets_of_useless_headers() {
|
2019-09-19 13:12:07 +02:00
|
|
|
|
env_logger::try_init().ok();
|
2018-10-09 15:31:40 +02:00
|
|
|
|
|
2019-08-07 16:52:48 +02:00
|
|
|
|
let spec = spec::new_test();
|
2018-10-09 15:31:40 +02:00
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
|
|
|
|
|
|
|
|
|
let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &genesis_hash, 0);
|
|
|
|
|
downloader.state = State::ChainHead;
|
|
|
|
|
|
|
|
|
|
let mut chain = TestBlockChainClient::new();
|
|
|
|
|
let snapshot_service = TestSnapshotService::new();
|
|
|
|
|
let queue = RwLock::new(VecDeque::new());
|
2019-08-16 14:45:52 +02:00
|
|
|
|
let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None, None);
|
2018-10-09 15:31:40 +02:00
|
|
|
|
|
|
|
|
|
let heads = [
|
|
|
|
|
spec.genesis_header(),
|
|
|
|
|
dummy_header(127, H256::random()),
|
|
|
|
|
dummy_header(254, H256::random()),
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
let short_subchain = [dummy_header(1, genesis_hash)];
|
|
|
|
|
|
|
|
|
|
import_headers_ok(&heads, &mut downloader, &mut io);
|
|
|
|
|
import_headers_ok(&short_subchain, &mut downloader, &mut io);
|
|
|
|
|
|
|
|
|
|
assert_eq!(downloader.state, State::Blocks);
|
|
|
|
|
assert!(!downloader.blocks.is_empty());
|
|
|
|
|
|
|
|
|
|
// simulate receiving useless headers
|
|
|
|
|
let head = vec![short_subchain.last().unwrap().clone()];
|
|
|
|
|
for _ in 0..MAX_USELESS_HEADERS_PER_ROUND {
|
|
|
|
|
let res = import_headers(&head, &mut downloader, &mut io);
|
|
|
|
|
assert!(res.is_err());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert_eq!(downloader.state, State::Idle);
|
|
|
|
|
assert!(downloader.blocks.is_empty());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn dont_reset_after_multiple_sets_of_useless_headers_for_chain_head() {
|
2019-09-19 13:12:07 +02:00
|
|
|
|
env_logger::try_init().ok();
|
2018-10-09 15:31:40 +02:00
|
|
|
|
|
2019-08-07 16:52:48 +02:00
|
|
|
|
let spec = spec::new_test();
|
2018-10-09 15:31:40 +02:00
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
|
|
|
|
|
|
|
|
|
let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &genesis_hash, 0);
|
|
|
|
|
downloader.state = State::ChainHead;
|
|
|
|
|
|
|
|
|
|
let mut chain = TestBlockChainClient::new();
|
|
|
|
|
let snapshot_service = TestSnapshotService::new();
|
|
|
|
|
let queue = RwLock::new(VecDeque::new());
|
2019-08-16 14:45:52 +02:00
|
|
|
|
let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None, None);
|
2018-10-09 15:31:40 +02:00
|
|
|
|
|
|
|
|
|
let heads = [
|
|
|
|
|
spec.genesis_header()
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
let short_subchain = [dummy_header(1, genesis_hash)];
|
|
|
|
|
|
|
|
|
|
import_headers_ok(&heads, &mut downloader, &mut io);
|
|
|
|
|
import_headers_ok(&short_subchain, &mut downloader, &mut io);
|
|
|
|
|
|
|
|
|
|
assert_eq!(downloader.state, State::Blocks);
|
|
|
|
|
assert!(!downloader.blocks.is_empty());
|
|
|
|
|
|
|
|
|
|
// simulate receiving useless headers
|
|
|
|
|
let head = vec![short_subchain.last().unwrap().clone()];
|
|
|
|
|
for _ in 0..MAX_USELESS_HEADERS_PER_ROUND {
|
|
|
|
|
let res = import_headers(&head, &mut downloader, &mut io);
|
|
|
|
|
assert!(res.is_err());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// download shouldn't be reset since this is the chain head for a single subchain.
|
|
|
|
|
// this state usually occurs for NewBlocks when it has reached the chain head.
|
|
|
|
|
assert_eq!(downloader.state, State::Blocks);
|
|
|
|
|
assert!(!downloader.blocks.is_empty());
|
|
|
|
|
}
|
2018-10-03 12:35:10 +02:00
|
|
|
|
}
|