2019-01-07 11:33:07 +01:00
|
|
|
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
|
|
|
// This file is part of Parity Ethereum.
|
2016-10-18 18:16:00 +02:00
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is free software: you can redistribute it and/or modify
|
2016-10-18 18:16:00 +02:00
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is distributed in the hope that it will be useful,
|
2016-10-18 18:16:00 +02:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2019-01-07 11:33:07 +01:00
|
|
|
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2016-10-18 18:16:00 +02:00
|
|
|
|
2018-08-08 10:56:54 +02:00
|
|
|
use blocks::{BlockCollection, SyncBody, SyncHeader};
|
2018-10-09 15:31:40 +02:00
|
|
|
use chain::BlockSet;
|
2018-09-24 12:28:54 +02:00
|
|
|
use ethcore::{
|
|
|
|
client::{BlockId, BlockStatus},
|
2020-08-05 06:08:03 +02:00
|
|
|
error::{
|
2018-09-24 12:28:54 +02:00
|
|
|
BlockError, Error as EthcoreError, ErrorKind as EthcoreErrorKind, ImportErrorKind,
|
|
|
|
QueueErrorKind,
|
2020-08-05 06:08:03 +02:00
|
|
|
},
|
|
|
|
};
|
2018-01-10 13:35:18 +01:00
|
|
|
use ethereum_types::H256;
|
2019-02-07 15:27:09 +01:00
|
|
|
use network::{client_version::ClientCapabilities, PeerId};
|
2018-07-19 12:46:33 +02:00
|
|
|
use rlp::{self, Rlp};
|
2017-07-29 17:12:07 +02:00
|
|
|
use std::cmp;
|
2016-10-18 18:16:00 +02:00
|
|
|
///
|
|
|
|
/// Blockchain downloader
|
|
|
|
///
|
2020-09-14 16:08:57 +02:00
|
|
|
use std::collections::{BTreeMap, HashSet, VecDeque};
|
2016-10-18 18:16:00 +02:00
|
|
|
use sync_io::SyncIo;
|
2018-08-08 10:56:54 +02:00
|
|
|
use types::BlockNumber;
|
2016-10-18 18:16:00 +02:00
|
|
|
|
|
|
|
const MAX_HEADERS_TO_REQUEST: usize = 128;
|
2019-02-07 15:27:09 +01:00
|
|
|
const MAX_BODIES_TO_REQUEST_LARGE: usize = 128;
|
|
|
|
const MAX_BODIES_TO_REQUEST_SMALL: usize = 32; // Size request for parity clients prior to 2.4.0
|
|
|
|
const MAX_RECEPITS_TO_REQUEST: usize = 256;
|
2016-10-18 18:16:00 +02:00
|
|
|
const SUBCHAIN_SIZE: u64 = 256;
|
2016-12-23 18:43:40 +01:00
|
|
|
const MAX_ROUND_PARENTS: usize = 16;
|
2016-11-16 19:34:12 +01:00
|
|
|
const MAX_PARALLEL_SUBCHAIN_DOWNLOAD: usize = 5;
|
2018-10-09 15:31:40 +02:00
|
|
|
const MAX_USELESS_HEADERS_PER_ROUND: usize = 3;
|
|
|
|
|
|
|
|
// logging macros prepend BlockSet context for log filtering
|
|
|
|
macro_rules! trace_sync {
|
|
|
|
($self:ident, $fmt:expr, $($arg:tt)+) => {
|
|
|
|
trace!(target: "sync", concat!("{:?}: ", $fmt), $self.block_set, $($arg)+);
|
|
|
|
};
|
|
|
|
($self:ident, $fmt:expr) => {
|
|
|
|
trace!(target: "sync", concat!("{:?}: ", $fmt), $self.block_set);
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! debug_sync {
|
|
|
|
($self:ident, $fmt:expr, $($arg:tt)+) => {
|
|
|
|
debug!(target: "sync", concat!("{:?}: ", $fmt), $self.block_set, $($arg)+);
|
|
|
|
};
|
|
|
|
($self:ident, $fmt:expr) => {
|
|
|
|
debug!(target: "sync", concat!("{:?}: ", $fmt), $self.block_set);
|
|
|
|
};
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
|
|
|
|
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
|
|
|
/// Downloader state
|
|
|
|
pub enum State {
|
|
|
|
/// No active downloads.
|
|
|
|
Idle,
|
|
|
|
/// Downloading subchain heads
|
|
|
|
ChainHead,
|
|
|
|
/// Downloading blocks
|
|
|
|
Blocks,
|
|
|
|
/// Download is complete
|
|
|
|
Complete,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Data that needs to be requested from a peer.
|
|
|
|
pub enum BlockRequest {
|
|
|
|
Headers { start: H256, count: u64, skip: u64 },
|
|
|
|
Bodies { hashes: Vec<H256> },
|
|
|
|
Receipts { hashes: Vec<H256> },
|
|
|
|
}
|
|
|
|
|
2016-11-16 19:34:12 +01:00
|
|
|
/// Indicates sync action
|
2018-10-09 15:31:40 +02:00
|
|
|
#[derive(Eq, PartialEq, Debug)]
|
2016-11-16 19:34:12 +01:00
|
|
|
pub enum DownloadAction {
|
|
|
|
/// Do nothing
|
|
|
|
None,
|
|
|
|
/// Reset downloads for all peers
|
|
|
|
Reset,
|
|
|
|
}
|
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
#[derive(Eq, PartialEq, Debug)]
|
|
|
|
pub enum BlockDownloaderImportError {
|
2018-07-19 12:46:33 +02:00
|
|
|
/// Imported data is rejected as invalid. Peer should be dropped.
|
2016-10-18 18:16:00 +02:00
|
|
|
Invalid,
|
|
|
|
/// Imported data is valid but rejected cause the downloader does not need it.
|
|
|
|
Useless,
|
|
|
|
}
|
|
|
|
|
2018-07-19 12:46:33 +02:00
|
|
|
impl From<rlp::DecoderError> for BlockDownloaderImportError {
|
|
|
|
fn from(_: rlp::DecoderError) -> BlockDownloaderImportError {
|
|
|
|
BlockDownloaderImportError::Invalid
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Block downloader strategy.
|
|
|
|
/// Manages state and block data for a block download process.
|
|
|
|
pub struct BlockDownloader {
|
2018-10-09 15:31:40 +02:00
|
|
|
/// Which set of blocks to download
|
|
|
|
block_set: BlockSet,
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Downloader state
|
|
|
|
state: State,
|
|
|
|
/// Highest block number seen
|
|
|
|
highest_block: Option<BlockNumber>,
|
|
|
|
/// Downloaded blocks, holds `H`, `B` and `S`
|
|
|
|
blocks: BlockCollection,
|
2018-10-09 15:31:40 +02:00
|
|
|
/// Last imported block number
|
2016-10-18 18:16:00 +02:00
|
|
|
last_imported_block: BlockNumber,
|
2018-10-09 15:31:40 +02:00
|
|
|
/// Last imported block hash
|
2016-10-18 18:16:00 +02:00
|
|
|
last_imported_hash: H256,
|
|
|
|
/// Number of blocks imported this round
|
|
|
|
imported_this_round: Option<usize>,
|
2016-12-23 18:43:40 +01:00
|
|
|
/// Block number the last round started with.
|
|
|
|
last_round_start: BlockNumber,
|
|
|
|
last_round_start_hash: H256,
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Block parents imported this round (hash, parent)
|
|
|
|
round_parents: VecDeque<(H256, H256)>,
|
|
|
|
/// Do we need to download block recetips.
|
|
|
|
download_receipts: bool,
|
|
|
|
/// Sync up to the block with this hash.
|
|
|
|
target_hash: Option<H256>,
|
2016-12-23 18:43:40 +01:00
|
|
|
/// Probing range for seeking common best block.
|
|
|
|
retract_step: u64,
|
2018-10-09 15:31:40 +02:00
|
|
|
/// consecutive useless headers this round
|
|
|
|
useless_headers_count: usize,
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl BlockDownloader {
|
2018-10-09 15:31:40 +02:00
|
|
|
/// Create a new instance of syncing strategy.
|
|
|
|
/// For BlockSet::NewBlocks this won't reorganize to before the last kept state.
|
|
|
|
pub fn new(block_set: BlockSet, start_hash: &H256, start_number: BlockNumber) -> Self {
|
2018-10-25 16:56:59 +02:00
|
|
|
let sync_receipts = match block_set {
|
|
|
|
BlockSet::NewBlocks => false,
|
|
|
|
BlockSet::OldBlocks => true,
|
2018-10-09 15:31:40 +02:00
|
|
|
};
|
2017-01-20 13:25:53 +01:00
|
|
|
BlockDownloader {
|
2018-10-25 16:56:59 +02:00
|
|
|
block_set,
|
2017-01-20 13:25:53 +01:00
|
|
|
state: State::Idle,
|
|
|
|
highest_block: None,
|
|
|
|
last_imported_block: start_number,
|
|
|
|
last_imported_hash: start_hash.clone(),
|
|
|
|
last_round_start: start_number,
|
|
|
|
last_round_start_hash: start_hash.clone(),
|
|
|
|
blocks: BlockCollection::new(sync_receipts),
|
|
|
|
imported_this_round: None,
|
|
|
|
round_parents: VecDeque::new(),
|
|
|
|
download_receipts: sync_receipts,
|
|
|
|
target_hash: None,
|
|
|
|
retract_step: 1,
|
2018-10-09 15:31:40 +02:00
|
|
|
useless_headers_count: 0,
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Reset sync. Clear all local downloaded data.
|
|
|
|
pub fn reset(&mut self) {
|
|
|
|
self.blocks.clear();
|
2018-10-09 15:31:40 +02:00
|
|
|
self.useless_headers_count = 0;
|
2016-10-18 18:16:00 +02:00
|
|
|
self.state = State::Idle;
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Mark a block as known in the chain
|
|
|
|
pub fn mark_as_known(&mut self, hash: &H256, number: BlockNumber) {
|
2016-12-23 18:43:40 +01:00
|
|
|
if number >= self.last_imported_block + 1 {
|
2016-10-18 18:16:00 +02:00
|
|
|
self.last_imported_block = number;
|
|
|
|
self.last_imported_hash = hash.clone();
|
2016-12-23 18:43:40 +01:00
|
|
|
self.imported_this_round = Some(self.imported_this_round.unwrap_or(0) + 1);
|
|
|
|
self.last_round_start = number;
|
|
|
|
self.last_round_start_hash = hash.clone();
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Check if download is complete
|
|
|
|
pub fn is_complete(&self) -> bool {
|
|
|
|
self.state == State::Complete
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Check if particular block hash is being downloaded
|
|
|
|
pub fn is_downloading(&self, hash: &H256) -> bool {
|
|
|
|
self.blocks.is_downloading(hash)
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Set starting sync block
|
|
|
|
pub fn set_target(&mut self, hash: &H256) {
|
|
|
|
self.target_hash = Some(hash.clone());
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Unmark header as being downloaded.
|
|
|
|
pub fn clear_header_download(&mut self, hash: &H256) {
|
|
|
|
self.blocks.clear_header_download(hash)
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Unmark block body as being downloaded.
|
|
|
|
pub fn clear_body_download(&mut self, hashes: &[H256]) {
|
2018-10-03 12:35:10 +02:00
|
|
|
self.blocks.clear_body_download(hashes)
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
/// Unmark block receipt as being downloaded.
|
2016-10-18 18:16:00 +02:00
|
|
|
pub fn clear_receipt_download(&mut self, hashes: &[H256]) {
|
|
|
|
self.blocks.clear_receipt_download(hashes)
|
|
|
|
}
|
|
|
|
/// Reset collection for a new sync round with given subchain block hashes.
|
|
|
|
pub fn reset_to(&mut self, hashes: Vec<H256>) {
|
|
|
|
self.reset();
|
|
|
|
self.blocks.reset_to(hashes);
|
|
|
|
self.state = State::Blocks;
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2020-09-14 16:08:57 +02:00
|
|
|
/// Returns number if items in structures
|
|
|
|
pub fn get_sizes(&self, sizes: &mut BTreeMap<String, usize>) {
|
|
|
|
let prefix = format!("{}_", self.block_set.to_string());
|
|
|
|
self.blocks.get_sizes(sizes, &prefix);
|
|
|
|
sizes.insert(
|
|
|
|
format!("{}{}", prefix, "round_parents"),
|
|
|
|
self.round_parents.len(),
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
|
2020-09-05 19:45:31 +02:00
|
|
|
fn reset_to_block(&mut self, start_hash: &H256, start_number: BlockNumber) {
|
|
|
|
self.reset();
|
|
|
|
self.last_imported_block = start_number;
|
|
|
|
self.last_imported_hash = start_hash.clone();
|
|
|
|
self.last_round_start = start_number;
|
|
|
|
self.last_round_start_hash = start_hash.clone();
|
|
|
|
self.imported_this_round = None;
|
|
|
|
self.round_parents = VecDeque::new();
|
|
|
|
self.target_hash = None;
|
|
|
|
self.retract_step = 1;
|
|
|
|
}
|
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Returns best imported block number.
|
|
|
|
pub fn last_imported_block_number(&self) -> BlockNumber {
|
2016-12-23 18:43:40 +01:00
|
|
|
self.last_imported_block
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Add new block headers.
|
|
|
|
pub fn import_headers(
|
|
|
|
&mut self,
|
2020-07-29 10:36:15 +02:00
|
|
|
io: &mut dyn SyncIo,
|
2020-08-05 06:08:03 +02:00
|
|
|
r: &Rlp,
|
2016-10-18 18:16:00 +02:00
|
|
|
expected_hash: H256,
|
2018-10-03 12:35:10 +02:00
|
|
|
) -> Result<DownloadAction, BlockDownloaderImportError> {
|
|
|
|
let item_count = r.item_count().unwrap_or(0);
|
|
|
|
if self.state == State::Idle {
|
|
|
|
trace_sync!(self, "Ignored unexpected block headers");
|
2017-03-22 14:41:46 +01:00
|
|
|
return Ok(DownloadAction::None);
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
if item_count == 0 && (self.state == State::Blocks) {
|
2018-10-09 15:31:40 +02:00
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
// The request is generated in ::request_blocks.
|
2016-10-18 18:16:00 +02:00
|
|
|
let (max_count, skip) = if self.state == State::ChainHead {
|
|
|
|
(SUBCHAIN_SIZE as usize, (MAX_HEADERS_TO_REQUEST - 2) as u64)
|
2020-08-05 06:08:03 +02:00
|
|
|
} else {
|
2016-10-18 18:16:00 +02:00
|
|
|
(MAX_HEADERS_TO_REQUEST, 0)
|
2020-08-05 06:08:03 +02:00
|
|
|
};
|
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
if item_count > max_count {
|
2018-10-03 12:35:10 +02:00
|
|
|
debug!(target: "sync", "Headers response is larger than expected");
|
2016-10-18 18:16:00 +02:00
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
let mut headers = Vec::new();
|
2018-10-03 12:35:10 +02:00
|
|
|
let mut hashes = Vec::new();
|
|
|
|
let mut last_header = None;
|
|
|
|
for i in 0..item_count {
|
|
|
|
let info = SyncHeader::from_rlp(r.at(i)?.as_raw().to_vec())?;
|
|
|
|
let number = BlockNumber::from(info.header.number());
|
2018-08-08 10:56:54 +02:00
|
|
|
let hash = info.header.hash();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
let valid_response = match last_header {
|
|
|
|
// First header must match expected hash.
|
|
|
|
None => expected_hash == hash,
|
|
|
|
Some((last_number, last_hash)) => {
|
|
|
|
// Subsequent headers must be spaced by skip interval.
|
|
|
|
let skip_valid = number == last_number + skip + 1;
|
|
|
|
// Consecutive headers must be linked by parent hash.
|
|
|
|
let parent_valid =
|
|
|
|
(number != last_number + 1) || *info.header.parent_hash() == last_hash;
|
|
|
|
skip_valid && parent_valid
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2018-10-03 12:35:10 +02:00
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Disable the peer for this syncing round if it gives invalid chain
|
|
|
|
if !valid_response {
|
|
|
|
debug!(target: "sync", "Invalid headers response");
|
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
last_header = Some((number, hash));
|
|
|
|
if self.blocks.contains(&hash) {
|
|
|
|
trace_sync!(
|
|
|
|
self,
|
|
|
|
"Skipping existing block header {} ({:?})",
|
|
|
|
number,
|
|
|
|
hash
|
2020-08-05 06:08:03 +02:00
|
|
|
);
|
2018-10-03 12:35:10 +02:00
|
|
|
continue;
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-08-08 10:56:54 +02:00
|
|
|
match io.chain().block_status(BlockId::Hash(hash.clone())) {
|
2018-10-09 15:31:40 +02:00
|
|
|
BlockStatus::InChain | BlockStatus::Queued => {
|
2016-10-18 18:16:00 +02:00
|
|
|
match self.state {
|
|
|
|
State::Blocks => {
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(self, "Header already in chain {} ({})", number, hash)
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
|
|
|
_ => trace_sync!(
|
|
|
|
self,
|
2018-10-09 15:31:40 +02:00
|
|
|
"Header already in chain {} ({}), state = {:?}",
|
|
|
|
number,
|
|
|
|
hash,
|
|
|
|
self.state
|
|
|
|
),
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
2018-08-08 10:56:54 +02:00
|
|
|
headers.push(info);
|
2016-10-18 18:16:00 +02:00
|
|
|
hashes.push(hash);
|
|
|
|
}
|
|
|
|
BlockStatus::Bad => {
|
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
|
|
|
}
|
2018-08-31 13:13:01 +02:00
|
|
|
BlockStatus::Unknown => {
|
2018-08-08 10:56:54 +02:00
|
|
|
headers.push(info);
|
2016-10-18 18:16:00 +02:00
|
|
|
hashes.push(hash);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
if let Some((number, _)) = last_header {
|
|
|
|
if self.highest_block.as_ref().map_or(true, |n| number > *n) {
|
|
|
|
self.highest_block = Some(number);
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
match self.state {
|
|
|
|
State::ChainHead => {
|
|
|
|
if !headers.is_empty() {
|
|
|
|
trace_sync!(
|
|
|
|
self,
|
2018-10-09 15:31:40 +02:00
|
|
|
"Received {} subchain heads, proceeding to download",
|
|
|
|
headers.len()
|
|
|
|
);
|
2016-10-18 18:16:00 +02:00
|
|
|
self.blocks.reset_to(hashes);
|
|
|
|
self.state = State::Blocks;
|
2016-11-16 19:34:12 +01:00
|
|
|
return Ok(DownloadAction::Reset);
|
2016-11-28 16:30:36 +01:00
|
|
|
} else {
|
2018-10-25 16:56:59 +02:00
|
|
|
trace_sync!(
|
|
|
|
self,
|
|
|
|
"No useful subchain heads received, expected hash {:?}",
|
|
|
|
expected_hash
|
|
|
|
);
|
2016-11-28 16:30:36 +01:00
|
|
|
let best = io.chain().chain_info().best_block_number;
|
2017-01-20 13:25:53 +01:00
|
|
|
let oldest_reorg = io.chain().pruning_info().earliest_state;
|
|
|
|
let last = self.last_imported_block;
|
2018-10-25 16:56:59 +02:00
|
|
|
match self.block_set {
|
|
|
|
BlockSet::NewBlocks
|
|
|
|
if best > last && (last == 0 || last < oldest_reorg) =>
|
|
|
|
{
|
|
|
|
trace_sync!(self, "No common block, disabling peer");
|
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
|
|
|
}
|
|
|
|
BlockSet::OldBlocks => {
|
|
|
|
trace_sync!(self, "Expected some useful headers for downloading OldBlocks. Try a different peer");
|
|
|
|
return Err(BlockDownloaderImportError::Useless);
|
|
|
|
}
|
|
|
|
_ => (),
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
}
|
2016-11-28 16:30:36 +01:00
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
State::Blocks => {
|
|
|
|
let count = headers.len();
|
2018-10-09 15:31:40 +02:00
|
|
|
// At least one of the headers must advance the subchain. Otherwise they are all useless.
|
2018-10-03 12:35:10 +02:00
|
|
|
if count == 0 {
|
2018-10-09 15:31:40 +02:00
|
|
|
self.useless_headers_count += 1;
|
|
|
|
trace_sync!(
|
|
|
|
self,
|
|
|
|
"No useful headers ({:?} this round), expected hash {:?}",
|
|
|
|
self.useless_headers_count,
|
|
|
|
expected_hash
|
|
|
|
);
|
|
|
|
// only reset download if we have multiple subchain heads, to avoid unnecessary resets
|
|
|
|
// when we are at the head of the chain when we may legitimately receive no useful headers
|
|
|
|
if self.blocks.heads_len() > 1
|
|
|
|
&& self.useless_headers_count >= MAX_USELESS_HEADERS_PER_ROUND
|
|
|
|
{
|
|
|
|
trace_sync!(
|
|
|
|
self,
|
|
|
|
"Received {:?} useless responses this round. Resetting sync",
|
|
|
|
MAX_USELESS_HEADERS_PER_ROUND
|
|
|
|
);
|
|
|
|
self.reset();
|
|
|
|
}
|
2016-11-16 19:34:12 +01:00
|
|
|
return Err(BlockDownloaderImportError::Useless);
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
self.blocks.insert_headers(headers);
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(self, "Inserted {} headers", count);
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
2018-10-09 15:31:40 +02:00
|
|
|
_ => trace_sync!(self, "Unexpected headers({})", headers.len()),
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-11-16 19:34:12 +01:00
|
|
|
Ok(DownloadAction::None)
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Called by peer once it has new block bodies
|
2018-10-03 12:35:10 +02:00
|
|
|
pub fn import_bodies(
|
|
|
|
&mut self,
|
|
|
|
r: &Rlp,
|
|
|
|
expected_hashes: &[H256],
|
|
|
|
) -> Result<(), BlockDownloaderImportError> {
|
2017-03-22 14:41:46 +01:00
|
|
|
let item_count = r.item_count().unwrap_or(0);
|
2016-10-18 18:16:00 +02:00
|
|
|
if item_count == 0 {
|
|
|
|
return Err(BlockDownloaderImportError::Useless);
|
2018-08-08 10:56:54 +02:00
|
|
|
} else if self.state != State::Blocks {
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(self, "Ignored unexpected block bodies");
|
2018-08-08 10:56:54 +02:00
|
|
|
} else {
|
2016-10-18 18:16:00 +02:00
|
|
|
let mut bodies = Vec::with_capacity(item_count);
|
|
|
|
for i in 0..item_count {
|
2018-08-08 10:56:54 +02:00
|
|
|
let body = SyncBody::from_rlp(r.at(i)?.as_raw())?;
|
|
|
|
bodies.push(body);
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
let hashes = self.blocks.insert_bodies(bodies);
|
|
|
|
if hashes.len() != item_count {
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(self, "Deactivating peer for giving invalid block bodies");
|
2016-10-18 18:16:00 +02:00
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
|
|
|
}
|
2018-10-03 12:35:10 +02:00
|
|
|
if !all_expected(hashes.as_slice(), expected_hashes, |&a, &b| a == b) {
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(self, "Deactivating peer for giving unexpected block bodies");
|
2018-10-03 12:35:10 +02:00
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2018-10-03 12:35:10 +02:00
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Called by peer once it has new block bodies
|
2018-10-03 12:35:10 +02:00
|
|
|
pub fn import_receipts(
|
|
|
|
&mut self,
|
|
|
|
r: &Rlp,
|
|
|
|
expected_hashes: &[H256],
|
|
|
|
) -> Result<(), BlockDownloaderImportError> {
|
2017-03-22 14:41:46 +01:00
|
|
|
let item_count = r.item_count().unwrap_or(0);
|
2016-10-18 18:16:00 +02:00
|
|
|
if item_count == 0 {
|
|
|
|
return Err(BlockDownloaderImportError::Useless);
|
|
|
|
} else if self.state != State::Blocks {
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(self, "Ignored unexpected block receipts");
|
2016-10-18 18:16:00 +02:00
|
|
|
} else {
|
|
|
|
let mut receipts = Vec::with_capacity(item_count);
|
|
|
|
for i in 0..item_count {
|
2016-12-27 12:53:56 +01:00
|
|
|
let receipt = r.at(i).map_err(|e| {
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(self, "Error decoding block receipts RLP: {:?}", e);
|
2016-10-18 18:16:00 +02:00
|
|
|
BlockDownloaderImportError::Invalid
|
2016-12-27 12:53:56 +01:00
|
|
|
})?;
|
2016-10-18 18:16:00 +02:00
|
|
|
receipts.push(receipt.as_raw().to_vec());
|
|
|
|
}
|
2018-10-03 12:35:10 +02:00
|
|
|
let hashes = self.blocks.insert_receipts(receipts);
|
|
|
|
if hashes.len() != item_count {
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(self, "Deactivating peer for giving invalid block receipts");
|
2016-10-18 18:16:00 +02:00
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
|
|
|
}
|
2018-10-03 12:35:10 +02:00
|
|
|
if !all_expected(hashes.as_slice(), expected_hashes, |a, b| a.contains(b)) {
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(
|
|
|
|
self,
|
|
|
|
"Deactivating peer for giving unexpected block receipts"
|
|
|
|
);
|
2018-10-03 12:35:10 +02:00
|
|
|
return Err(BlockDownloaderImportError::Invalid);
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2018-10-03 12:35:10 +02:00
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2020-07-29 10:36:15 +02:00
|
|
|
fn start_sync_round(&mut self, io: &mut dyn SyncIo) {
|
2016-10-18 18:16:00 +02:00
|
|
|
self.state = State::ChainHead;
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(
|
|
|
|
self,
|
|
|
|
"Starting round (last imported count = {:?}, last started = {}, block = {:?}",
|
|
|
|
self.imported_this_round,
|
|
|
|
self.last_round_start,
|
|
|
|
self.last_imported_block
|
|
|
|
);
|
2016-10-18 18:16:00 +02:00
|
|
|
// Check if need to retract to find the common block. The problem is that the peers still return headers by hash even
|
|
|
|
// from the non-canonical part of the tree. So we also retract if nothing has been imported last round.
|
2016-12-23 18:43:40 +01:00
|
|
|
let start = self.last_round_start;
|
|
|
|
let start_hash = self.last_round_start_hash;
|
2016-10-18 18:16:00 +02:00
|
|
|
match self.imported_this_round {
|
2016-12-23 18:43:40 +01:00
|
|
|
Some(n) if n == 0 && start > 0 => {
|
2016-10-18 18:16:00 +02:00
|
|
|
// nothing was imported last round, step back to a previous block
|
|
|
|
// search parent in last round known parents first
|
2016-12-23 18:43:40 +01:00
|
|
|
if let Some(&(_, p)) = self.round_parents.iter().find(|&&(h, _)| h == start_hash) {
|
|
|
|
self.last_imported_block = start - 1;
|
2016-10-18 18:16:00 +02:00
|
|
|
self.last_imported_hash = p.clone();
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(
|
|
|
|
self,
|
|
|
|
"Searching common header from the last round {} ({})",
|
|
|
|
self.last_imported_block,
|
|
|
|
self.last_imported_hash
|
|
|
|
);
|
2016-10-18 18:16:00 +02:00
|
|
|
} else {
|
2016-11-18 19:17:35 +01:00
|
|
|
let best = io.chain().chain_info().best_block_number;
|
2020-09-05 19:45:31 +02:00
|
|
|
let best_hash = io.chain().chain_info().best_block_hash;
|
2017-01-20 13:25:53 +01:00
|
|
|
let oldest_reorg = io.chain().pruning_info().earliest_state;
|
2018-10-25 16:56:59 +02:00
|
|
|
if self.block_set == BlockSet::NewBlocks && best > start && start < oldest_reorg
|
|
|
|
{
|
2018-10-09 15:31:40 +02:00
|
|
|
debug_sync!(
|
|
|
|
self,
|
|
|
|
"Could not revert to previous ancient block, last: {} ({})",
|
|
|
|
start,
|
|
|
|
start_hash
|
|
|
|
);
|
2020-09-05 19:45:31 +02:00
|
|
|
self.reset_to_block(&best_hash, best);
|
2016-11-18 19:17:35 +01:00
|
|
|
} else {
|
2017-07-29 17:12:07 +02:00
|
|
|
let n = start - cmp::min(self.retract_step, start);
|
2020-09-05 19:45:31 +02:00
|
|
|
if n == 0 {
|
|
|
|
debug_sync!(self, "Header not found, bottom line reached, resetting, last imported: {}", self.last_imported_hash);
|
|
|
|
self.reset_to_block(&best_hash, best);
|
|
|
|
} else {
|
|
|
|
self.retract_step *= 2;
|
|
|
|
match io.chain().block_hash(BlockId::Number(n)) {
|
|
|
|
Some(h) => {
|
|
|
|
self.last_imported_block = n;
|
|
|
|
self.last_imported_hash = h;
|
|
|
|
trace_sync!(
|
|
|
|
self,
|
|
|
|
"Searching common header in the blockchain {} ({})",
|
|
|
|
start,
|
|
|
|
self.last_imported_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
debug_sync!(
|
|
|
|
self,
|
|
|
|
"Could not revert to previous block, last: {} ({})",
|
|
|
|
start,
|
|
|
|
self.last_imported_hash
|
|
|
|
);
|
|
|
|
self.reset_to_block(&best_hash, best);
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => {
|
2016-12-23 18:43:40 +01:00
|
|
|
self.retract_step = 1;
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
self.last_round_start = self.last_imported_block;
|
2016-11-16 19:34:12 +01:00
|
|
|
self.last_round_start_hash = self.last_imported_hash;
|
|
|
|
self.imported_this_round = None;
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
|
2016-11-16 19:34:12 +01:00
|
|
|
/// Find some headers or blocks to download for a peer.
|
2019-02-07 15:27:09 +01:00
|
|
|
pub fn request_blocks(
|
|
|
|
&mut self,
|
2016-11-16 19:34:12 +01:00
|
|
|
peer_id: PeerId,
|
2020-07-29 10:36:15 +02:00
|
|
|
io: &mut dyn SyncIo,
|
2016-11-16 19:34:12 +01:00
|
|
|
num_active_peers: usize,
|
2019-02-07 15:27:09 +01:00
|
|
|
) -> Option<BlockRequest> {
|
2016-10-18 18:16:00 +02:00
|
|
|
match self.state {
|
|
|
|
State::Idle => {
|
2016-11-16 19:34:12 +01:00
|
|
|
self.start_sync_round(io);
|
2016-11-18 19:17:35 +01:00
|
|
|
if self.state == State::ChainHead {
|
2019-02-07 15:27:09 +01:00
|
|
|
return self.request_blocks(peer_id, io, num_active_peers);
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
State::ChainHead => {
|
2016-11-16 19:34:12 +01:00
|
|
|
if num_active_peers < MAX_PARALLEL_SUBCHAIN_DOWNLOAD {
|
|
|
|
// Request subchain headers
|
|
|
|
trace_sync!(self, "Starting sync with better chain");
|
|
|
|
// Request MAX_HEADERS_TO_REQUEST - 2 headers apart so that
|
2016-10-18 18:16:00 +02:00
|
|
|
// MAX_HEADERS_TO_REQUEST would include headers for neighbouring subchains
|
2019-02-07 15:27:09 +01:00
|
|
|
return Some(BlockRequest::Headers {
|
|
|
|
start: self.last_imported_hash.clone(),
|
|
|
|
count: SUBCHAIN_SIZE,
|
|
|
|
skip: (MAX_HEADERS_TO_REQUEST - 2) as u64,
|
2020-08-05 06:08:03 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
State::Blocks => {
|
2019-02-07 15:27:09 +01:00
|
|
|
// check to see if we need to download any block bodies first
|
|
|
|
let client_version = io.peer_version(peer_id);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2019-02-07 15:27:09 +01:00
|
|
|
let number_of_bodies_to_request = if client_version.can_handle_large_requests() {
|
2016-10-18 18:16:00 +02:00
|
|
|
MAX_BODIES_TO_REQUEST_LARGE
|
|
|
|
} else {
|
|
|
|
MAX_BODIES_TO_REQUEST_SMALL
|
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
let needed_bodies = self
|
|
|
|
.blocks
|
|
|
|
.needed_bodies(number_of_bodies_to_request, false);
|
|
|
|
if !needed_bodies.is_empty() {
|
|
|
|
return Some(BlockRequest::Bodies {
|
|
|
|
hashes: needed_bodies,
|
2020-08-05 06:08:03 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
if self.download_receipts {
|
|
|
|
let needed_receipts =
|
|
|
|
self.blocks.needed_receipts(MAX_RECEPITS_TO_REQUEST, false);
|
|
|
|
if !needed_receipts.is_empty() {
|
2016-11-16 19:34:12 +01:00
|
|
|
return Some(BlockRequest::Receipts {
|
2016-10-18 18:16:00 +02:00
|
|
|
hashes: needed_receipts,
|
2020-08-05 06:08:03 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
// find subchain to download
|
|
|
|
if let Some((h, count)) = self.blocks.needed_headers(MAX_HEADERS_TO_REQUEST, false)
|
|
|
|
{
|
|
|
|
return Some(BlockRequest::Headers {
|
|
|
|
start: h,
|
|
|
|
count: count as u64,
|
2020-08-05 06:08:03 +02:00
|
|
|
skip: 0,
|
2016-10-18 18:16:00 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
State::Complete => (),
|
|
|
|
}
|
|
|
|
None
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Checks if there are blocks fully downloaded that can be imported into the blockchain and does the import.
|
2018-10-09 15:31:40 +02:00
|
|
|
/// Returns DownloadAction::Reset if it is imported all the the blocks it can and all downloading peers should be reset
|
2020-07-29 10:36:15 +02:00
|
|
|
pub fn collect_blocks(
|
|
|
|
&mut self,
|
|
|
|
io: &mut dyn SyncIo,
|
|
|
|
allow_out_of_order: bool,
|
|
|
|
) -> DownloadAction {
|
2018-10-09 15:31:40 +02:00
|
|
|
let mut download_action = DownloadAction::None;
|
2016-10-18 18:16:00 +02:00
|
|
|
let mut imported = HashSet::new();
|
|
|
|
let blocks = self.blocks.drain();
|
|
|
|
let count = blocks.len();
|
|
|
|
for block_and_receipts in blocks {
|
|
|
|
let block = block_and_receipts.block;
|
|
|
|
let receipts = block_and_receipts.receipts;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-08-02 11:20:46 +02:00
|
|
|
let h = block.header.hash();
|
|
|
|
let number = block.header.number();
|
|
|
|
let parent = *block.header.parent_hash();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
if self.target_hash.as_ref().map_or(false, |t| t == &h) {
|
|
|
|
self.state = State::Complete;
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(self, "Sync target reached");
|
|
|
|
return download_action;
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
let result = if let Some(receipts) = receipts {
|
2018-05-09 08:49:34 +02:00
|
|
|
io.chain().queue_ancient_block(block, receipts)
|
2016-10-18 18:16:00 +02:00
|
|
|
} else {
|
|
|
|
io.chain().import_block(block)
|
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
match result {
|
2018-09-24 12:28:54 +02:00
|
|
|
Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain), _)) => {
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(self, "Block already in chain {:?}", h);
|
2016-10-18 18:16:00 +02:00
|
|
|
self.block_imported(&h, number, &parent);
|
|
|
|
}
|
2018-09-24 12:28:54 +02:00
|
|
|
Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyQueued), _)) => {
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(self, "Block already queued {:?}", h);
|
2016-10-18 18:16:00 +02:00
|
|
|
self.block_imported(&h, number, &parent);
|
|
|
|
}
|
|
|
|
Ok(_) => {
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(self, "Block queued {:?}", h);
|
2016-10-18 18:16:00 +02:00
|
|
|
imported.insert(h.clone());
|
|
|
|
self.block_imported(&h, number, &parent);
|
|
|
|
}
|
2018-09-24 12:28:54 +02:00
|
|
|
Err(EthcoreError(EthcoreErrorKind::Block(BlockError::UnknownParent(_)), _))
|
|
|
|
if allow_out_of_order =>
|
|
|
|
{
|
2016-12-23 18:43:40 +01:00
|
|
|
break;
|
|
|
|
}
|
2018-09-24 12:28:54 +02:00
|
|
|
Err(EthcoreError(EthcoreErrorKind::Block(BlockError::UnknownParent(_)), _)) => {
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(self, "Unknown new block parent, restarting sync");
|
2016-10-18 18:16:00 +02:00
|
|
|
break;
|
|
|
|
}
|
2018-09-24 12:28:54 +02:00
|
|
|
Err(EthcoreError(
|
|
|
|
EthcoreErrorKind::Block(BlockError::TemporarilyInvalid(_)),
|
|
|
|
_,
|
|
|
|
)) => {
|
2018-10-09 15:31:40 +02:00
|
|
|
debug_sync!(self, "Block temporarily invalid: {:?}, restarting sync", h);
|
2018-01-19 10:38:59 +01:00
|
|
|
break;
|
|
|
|
}
|
2018-09-24 12:28:54 +02:00
|
|
|
Err(EthcoreError(EthcoreErrorKind::Queue(QueueErrorKind::Full(limit)), _)) => {
|
2018-10-09 15:31:40 +02:00
|
|
|
debug_sync!(self, "Block import queue full ({}), restarting sync", limit);
|
|
|
|
download_action = DownloadAction::Reset;
|
2018-08-24 10:42:24 +02:00
|
|
|
break;
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
Err(e) => {
|
2018-10-09 15:31:40 +02:00
|
|
|
debug_sync!(self, "Bad block {:?} : {:?}", h, e);
|
|
|
|
download_action = DownloadAction::Reset;
|
2016-10-18 18:16:00 +02:00
|
|
|
break;
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
}
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(self, "Imported {} of {}", imported.len(), count);
|
2016-10-18 18:16:00 +02:00
|
|
|
self.imported_this_round = Some(self.imported_this_round.unwrap_or(0) + imported.len());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
if self.blocks.is_empty() {
|
|
|
|
// complete sync round
|
2018-10-09 15:31:40 +02:00
|
|
|
trace_sync!(self, "Sync round complete");
|
|
|
|
download_action = DownloadAction::Reset;
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
2018-10-09 15:31:40 +02:00
|
|
|
download_action
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
fn block_imported(&mut self, hash: &H256, number: BlockNumber, parent: &H256) {
|
|
|
|
self.last_imported_block = number;
|
|
|
|
self.last_imported_hash = hash.clone();
|
|
|
|
self.round_parents.push_back((hash.clone(), parent.clone()));
|
|
|
|
if self.round_parents.len() > MAX_ROUND_PARENTS {
|
|
|
|
self.round_parents.pop_front();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Determines if the first argument matches an ordered subset of the second, according to some predicate.
|
|
|
|
fn all_expected<A, B, F>(values: &[A], expected_values: &[B], is_expected: F) -> bool
|
|
|
|
where
|
|
|
|
F: Fn(&A, &B) -> bool,
|
|
|
|
{
|
|
|
|
let mut expected_iter = expected_values.iter();
|
|
|
|
values.iter().all(|val1| {
|
|
|
|
while let Some(val2) = expected_iter.next() {
|
|
|
|
if is_expected(val1, val2) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
false
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
|
|
|
use ethcore::{client::TestBlockChainClient, spec::Spec};
|
|
|
|
use ethkey::{Generator, Random};
|
|
|
|
use hash::keccak;
|
|
|
|
use parking_lot::RwLock;
|
|
|
|
use rlp::{encode_list, RlpStream};
|
|
|
|
use tests::{helpers::TestIo, snapshot::TestSnapshotService};
|
2019-01-04 14:05:46 +01:00
|
|
|
use triehash_ethereum::ordered_trie_root;
|
|
|
|
use types::{
|
|
|
|
header::Header as BlockHeader,
|
|
|
|
transaction::{SignedTransaction, Transaction},
|
2018-10-03 12:35:10 +02:00
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
fn dummy_header(number: u64, parent_hash: H256) -> BlockHeader {
|
|
|
|
let mut header = BlockHeader::new();
|
|
|
|
header.set_gas_limit(0.into());
|
|
|
|
header.set_difficulty((number * 100).into());
|
|
|
|
header.set_timestamp(number * 10);
|
|
|
|
header.set_number(number);
|
|
|
|
header.set_parent_hash(parent_hash);
|
|
|
|
header.set_state_root(H256::zero());
|
|
|
|
header
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
fn dummy_signed_tx() -> SignedTransaction {
|
|
|
|
let keypair = Random.generate().unwrap();
|
|
|
|
Transaction::default().sign(keypair.secret(), None)
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
fn import_headers(
|
|
|
|
headers: &[BlockHeader],
|
|
|
|
downloader: &mut BlockDownloader,
|
2020-07-29 10:36:15 +02:00
|
|
|
io: &mut dyn SyncIo,
|
2018-10-09 15:31:40 +02:00
|
|
|
) -> Result<DownloadAction, BlockDownloaderImportError> {
|
|
|
|
let mut stream = RlpStream::new();
|
|
|
|
stream.append_list(headers);
|
|
|
|
let bytes = stream.out();
|
|
|
|
let rlp = Rlp::new(&bytes);
|
|
|
|
let expected_hash = headers.first().unwrap().hash();
|
|
|
|
downloader.import_headers(io, &rlp, expected_hash)
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
fn import_headers_ok(
|
|
|
|
headers: &[BlockHeader],
|
|
|
|
downloader: &mut BlockDownloader,
|
2020-07-29 10:36:15 +02:00
|
|
|
io: &mut dyn SyncIo,
|
2018-10-09 15:31:40 +02:00
|
|
|
) {
|
|
|
|
let res = import_headers(headers, downloader, io);
|
|
|
|
assert!(res.is_ok());
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
#[test]
|
|
|
|
fn import_headers_in_chain_head_state() {
|
|
|
|
::env_logger::try_init().ok();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
let spec = Spec::new_test();
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &genesis_hash, 0);
|
2018-10-03 12:35:10 +02:00
|
|
|
downloader.state = State::ChainHead;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
let mut chain = TestBlockChainClient::new();
|
|
|
|
let snapshot_service = TestSnapshotService::new();
|
|
|
|
let queue = RwLock::new(VecDeque::new());
|
|
|
|
let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Valid headers sequence.
|
|
|
|
let valid_headers = [
|
|
|
|
spec.genesis_header(),
|
|
|
|
dummy_header(127, H256::random()),
|
|
|
|
dummy_header(254, H256::random()),
|
|
|
|
];
|
|
|
|
let rlp_data = encode_list(&valid_headers);
|
|
|
|
let valid_rlp = Rlp::new(&rlp_data);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
match downloader.import_headers(&mut io, &valid_rlp, genesis_hash) {
|
|
|
|
Ok(DownloadAction::Reset) => assert_eq!(downloader.state, State::Blocks),
|
|
|
|
_ => panic!("expected transition to Blocks state"),
|
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Headers are rejected because the expected hash does not match.
|
|
|
|
let invalid_start_block_headers = [
|
|
|
|
dummy_header(0, H256::random()),
|
|
|
|
dummy_header(127, H256::random()),
|
|
|
|
dummy_header(254, H256::random()),
|
|
|
|
];
|
|
|
|
let rlp_data = encode_list(&invalid_start_block_headers);
|
|
|
|
let invalid_start_block_rlp = Rlp::new(&rlp_data);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
match downloader.import_headers(&mut io, &invalid_start_block_rlp, genesis_hash) {
|
|
|
|
Err(BlockDownloaderImportError::Invalid) => (),
|
|
|
|
_ => panic!("expected BlockDownloaderImportError"),
|
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Headers are rejected because they are not spaced as expected.
|
|
|
|
let invalid_skip_headers = [
|
|
|
|
spec.genesis_header(),
|
|
|
|
dummy_header(128, H256::random()),
|
|
|
|
dummy_header(256, H256::random()),
|
|
|
|
];
|
|
|
|
let rlp_data = encode_list(&invalid_skip_headers);
|
|
|
|
let invalid_skip_rlp = Rlp::new(&rlp_data);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
match downloader.import_headers(&mut io, &invalid_skip_rlp, genesis_hash) {
|
|
|
|
Err(BlockDownloaderImportError::Invalid) => (),
|
|
|
|
_ => panic!("expected BlockDownloaderImportError"),
|
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Invalid because the packet size is too large.
|
|
|
|
let mut too_many_headers = Vec::with_capacity((SUBCHAIN_SIZE + 1) as usize);
|
|
|
|
too_many_headers.push(spec.genesis_header());
|
|
|
|
for i in 1..(SUBCHAIN_SIZE + 1) {
|
|
|
|
too_many_headers.push(dummy_header(
|
|
|
|
(MAX_HEADERS_TO_REQUEST as u64 - 1) * i,
|
|
|
|
H256::random(),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
let rlp_data = encode_list(&too_many_headers);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
let too_many_rlp = Rlp::new(&rlp_data);
|
|
|
|
match downloader.import_headers(&mut io, &too_many_rlp, genesis_hash) {
|
|
|
|
Err(BlockDownloaderImportError::Invalid) => (),
|
|
|
|
_ => panic!("expected BlockDownloaderImportError"),
|
|
|
|
};
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
#[test]
|
|
|
|
fn import_headers_in_blocks_state() {
|
|
|
|
::env_logger::try_init().ok();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
let mut chain = TestBlockChainClient::new();
|
|
|
|
let snapshot_service = TestSnapshotService::new();
|
|
|
|
let queue = RwLock::new(VecDeque::new());
|
|
|
|
let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
let mut headers = Vec::with_capacity(3);
|
|
|
|
let parent_hash = H256::random();
|
|
|
|
headers.push(dummy_header(127, parent_hash));
|
|
|
|
let parent_hash = headers[0].hash();
|
|
|
|
headers.push(dummy_header(128, parent_hash));
|
|
|
|
let parent_hash = headers[1].hash();
|
|
|
|
headers.push(dummy_header(129, parent_hash));
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &H256::random(), 0);
|
2018-10-03 12:35:10 +02:00
|
|
|
downloader.state = State::Blocks;
|
|
|
|
downloader.blocks.reset_to(vec![headers[0].hash()]);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
let rlp_data = encode_list(&headers);
|
|
|
|
let headers_rlp = Rlp::new(&rlp_data);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
match downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()) {
|
|
|
|
Ok(DownloadAction::None) => (),
|
|
|
|
_ => panic!("expected successful import"),
|
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Invalidate parent_hash link.
|
|
|
|
headers[2] = dummy_header(129, H256::random());
|
|
|
|
let rlp_data = encode_list(&headers);
|
|
|
|
let headers_rlp = Rlp::new(&rlp_data);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
match downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()) {
|
|
|
|
Err(BlockDownloaderImportError::Invalid) => (),
|
|
|
|
_ => panic!("expected BlockDownloaderImportError"),
|
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Invalidate header sequence by skipping a header.
|
|
|
|
headers[2] = dummy_header(130, headers[1].hash());
|
|
|
|
let rlp_data = encode_list(&headers);
|
|
|
|
let headers_rlp = Rlp::new(&rlp_data);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
match downloader.import_headers(&mut io, &headers_rlp, headers[0].hash()) {
|
|
|
|
Err(BlockDownloaderImportError::Invalid) => (),
|
|
|
|
_ => panic!("expected BlockDownloaderImportError"),
|
|
|
|
};
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
#[test]
|
|
|
|
fn import_bodies() {
|
|
|
|
::env_logger::try_init().ok();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
let mut chain = TestBlockChainClient::new();
|
|
|
|
let snapshot_service = TestSnapshotService::new();
|
|
|
|
let queue = RwLock::new(VecDeque::new());
|
|
|
|
let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Import block headers.
|
|
|
|
let mut headers = Vec::with_capacity(4);
|
|
|
|
let mut bodies = Vec::with_capacity(4);
|
|
|
|
let mut parent_hash = H256::zero();
|
|
|
|
for i in 0..4 {
|
|
|
|
// Construct the block body
|
2020-07-29 11:00:04 +02:00
|
|
|
let uncles = if i > 0 {
|
2018-10-09 22:07:25 +02:00
|
|
|
encode_list(&[dummy_header(i - 1, H256::random())])
|
2018-10-03 12:35:10 +02:00
|
|
|
} else {
|
|
|
|
::rlp::EMPTY_LIST_RLP.to_vec()
|
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2020-07-29 11:00:04 +02:00
|
|
|
let txs = encode_list(&[dummy_signed_tx()]);
|
2018-10-03 12:35:10 +02:00
|
|
|
let tx_root = ordered_trie_root(Rlp::new(&txs).iter().map(|r| r.as_raw()));
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
let mut rlp = RlpStream::new_list(2);
|
|
|
|
rlp.append_raw(&txs, 1);
|
|
|
|
rlp.append_raw(&uncles, 1);
|
|
|
|
bodies.push(rlp.out());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Construct the block header
|
|
|
|
let mut header = dummy_header(i, parent_hash);
|
|
|
|
header.set_transactions_root(tx_root);
|
|
|
|
header.set_uncles_hash(keccak(&uncles));
|
|
|
|
parent_hash = header.hash();
|
|
|
|
headers.push(header);
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &headers[0].hash(), 0);
|
2018-10-03 12:35:10 +02:00
|
|
|
downloader.state = State::Blocks;
|
|
|
|
downloader.blocks.reset_to(vec![headers[0].hash()]);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Only import the first three block headers.
|
|
|
|
let rlp_data = encode_list(&headers[0..3]);
|
|
|
|
let headers_rlp = Rlp::new(&rlp_data);
|
|
|
|
assert!(downloader
|
|
|
|
.import_headers(&mut io, &headers_rlp, headers[0].hash())
|
|
|
|
.is_ok());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Import first body successfully.
|
|
|
|
let mut rlp_data = RlpStream::new_list(1);
|
|
|
|
rlp_data.append_raw(&bodies[0], 1);
|
|
|
|
let bodies_rlp = Rlp::new(rlp_data.as_raw());
|
|
|
|
assert!(downloader
|
|
|
|
.import_bodies(&bodies_rlp, &[headers[0].hash(), headers[1].hash()])
|
|
|
|
.is_ok());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Import second body successfully.
|
|
|
|
let mut rlp_data = RlpStream::new_list(1);
|
|
|
|
rlp_data.append_raw(&bodies[1], 1);
|
|
|
|
let bodies_rlp = Rlp::new(rlp_data.as_raw());
|
|
|
|
assert!(downloader
|
|
|
|
.import_bodies(&bodies_rlp, &[headers[0].hash(), headers[1].hash()])
|
|
|
|
.is_ok());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Import unexpected third body.
|
|
|
|
let mut rlp_data = RlpStream::new_list(1);
|
|
|
|
rlp_data.append_raw(&bodies[2], 1);
|
|
|
|
let bodies_rlp = Rlp::new(rlp_data.as_raw());
|
|
|
|
match downloader.import_bodies(&bodies_rlp, &[headers[0].hash(), headers[1].hash()]) {
|
|
|
|
Err(BlockDownloaderImportError::Invalid) => (),
|
|
|
|
_ => panic!("expected BlockDownloaderImportError"),
|
|
|
|
};
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
#[test]
|
|
|
|
fn import_receipts() {
|
|
|
|
::env_logger::try_init().ok();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
let mut chain = TestBlockChainClient::new();
|
|
|
|
let snapshot_service = TestSnapshotService::new();
|
|
|
|
let queue = RwLock::new(VecDeque::new());
|
|
|
|
let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Import block headers.
|
|
|
|
let mut headers = Vec::with_capacity(4);
|
|
|
|
let mut receipts = Vec::with_capacity(4);
|
|
|
|
let mut parent_hash = H256::zero();
|
|
|
|
for i in 0..4 {
|
|
|
|
// Construct the receipts. Receipt root for the first two blocks is the same.
|
|
|
|
//
|
|
|
|
// The RLP-encoded integers are clearly not receipts, but the BlockDownloader treats
|
|
|
|
// all receipts as byte blobs, so it does not matter.
|
2020-07-29 11:00:04 +02:00
|
|
|
let receipts_rlp = if i < 2 {
|
2018-10-03 12:35:10 +02:00
|
|
|
encode_list(&[0u32])
|
|
|
|
} else {
|
|
|
|
encode_list(&[i as u32])
|
|
|
|
};
|
|
|
|
let receipts_root =
|
|
|
|
ordered_trie_root(Rlp::new(&receipts_rlp).iter().map(|r| r.as_raw()));
|
|
|
|
receipts.push(receipts_rlp);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Construct the block header.
|
|
|
|
let mut header = dummy_header(i, parent_hash);
|
|
|
|
header.set_receipts_root(receipts_root);
|
|
|
|
parent_hash = header.hash();
|
|
|
|
headers.push(header);
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
let mut downloader = BlockDownloader::new(BlockSet::OldBlocks, &headers[0].hash(), 0);
|
2018-10-03 12:35:10 +02:00
|
|
|
downloader.state = State::Blocks;
|
|
|
|
downloader.blocks.reset_to(vec![headers[0].hash()]);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Only import the first three block headers.
|
|
|
|
let rlp_data = encode_list(&headers[0..3]);
|
|
|
|
let headers_rlp = Rlp::new(&rlp_data);
|
|
|
|
assert!(downloader
|
|
|
|
.import_headers(&mut io, &headers_rlp, headers[0].hash())
|
|
|
|
.is_ok());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Import second and third receipts successfully.
|
|
|
|
let mut rlp_data = RlpStream::new_list(2);
|
|
|
|
rlp_data.append_raw(&receipts[1], 1);
|
|
|
|
rlp_data.append_raw(&receipts[2], 1);
|
|
|
|
let receipts_rlp = Rlp::new(rlp_data.as_raw());
|
|
|
|
assert!(downloader
|
|
|
|
.import_receipts(&receipts_rlp, &[headers[1].hash(), headers[2].hash()])
|
|
|
|
.is_ok());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
// Import unexpected fourth receipt.
|
|
|
|
let mut rlp_data = RlpStream::new_list(1);
|
|
|
|
rlp_data.append_raw(&receipts[3], 1);
|
|
|
|
let bodies_rlp = Rlp::new(rlp_data.as_raw());
|
|
|
|
match downloader.import_bodies(&bodies_rlp, &[headers[1].hash(), headers[2].hash()]) {
|
|
|
|
Err(BlockDownloaderImportError::Invalid) => (),
|
|
|
|
_ => panic!("expected BlockDownloaderImportError"),
|
|
|
|
};
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
#[test]
|
|
|
|
fn reset_after_multiple_sets_of_useless_headers() {
|
|
|
|
::env_logger::try_init().ok();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
let spec = Spec::new_test();
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &genesis_hash, 0);
|
|
|
|
downloader.state = State::ChainHead;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
let mut chain = TestBlockChainClient::new();
|
|
|
|
let snapshot_service = TestSnapshotService::new();
|
|
|
|
let queue = RwLock::new(VecDeque::new());
|
|
|
|
let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
let heads = [
|
|
|
|
spec.genesis_header(),
|
|
|
|
dummy_header(127, H256::random()),
|
|
|
|
dummy_header(254, H256::random()),
|
|
|
|
];
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
let short_subchain = [dummy_header(1, genesis_hash)];
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
import_headers_ok(&heads, &mut downloader, &mut io);
|
|
|
|
import_headers_ok(&short_subchain, &mut downloader, &mut io);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
assert_eq!(downloader.state, State::Blocks);
|
|
|
|
assert!(!downloader.blocks.is_empty());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
// simulate receiving useless headers
|
|
|
|
let head = vec![short_subchain.last().unwrap().clone()];
|
|
|
|
for _ in 0..MAX_USELESS_HEADERS_PER_ROUND {
|
|
|
|
let res = import_headers(&head, &mut downloader, &mut io);
|
|
|
|
assert!(res.is_err());
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
assert_eq!(downloader.state, State::Idle);
|
|
|
|
assert!(downloader.blocks.is_empty());
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
#[test]
|
|
|
|
fn dont_reset_after_multiple_sets_of_useless_headers_for_chain_head() {
|
|
|
|
::env_logger::try_init().ok();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
let spec = Spec::new_test();
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
let mut downloader = BlockDownloader::new(BlockSet::NewBlocks, &genesis_hash, 0);
|
|
|
|
downloader.state = State::ChainHead;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
let mut chain = TestBlockChainClient::new();
|
|
|
|
let snapshot_service = TestSnapshotService::new();
|
|
|
|
let queue = RwLock::new(VecDeque::new());
|
|
|
|
let mut io = TestIo::new(&mut chain, &snapshot_service, &queue, None);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
let heads = [spec.genesis_header()];
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
let short_subchain = [dummy_header(1, genesis_hash)];
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
import_headers_ok(&heads, &mut downloader, &mut io);
|
|
|
|
import_headers_ok(&short_subchain, &mut downloader, &mut io);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
assert_eq!(downloader.state, State::Blocks);
|
|
|
|
assert!(!downloader.blocks.is_empty());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
// simulate receiving useless headers
|
|
|
|
let head = vec![short_subchain.last().unwrap().clone()];
|
|
|
|
for _ in 0..MAX_USELESS_HEADERS_PER_ROUND {
|
|
|
|
let res = import_headers(&head, &mut downloader, &mut io);
|
|
|
|
assert!(res.is_err());
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
// download shouldn't be reset since this is the chain head for a single subchain.
|
|
|
|
// this state usually occurs for NewBlocks when it has reached the chain head.
|
|
|
|
assert_eq!(downloader.state, State::Blocks);
|
|
|
|
assert!(!downloader.blocks.is_empty());
|
|
|
|
}
|
2018-10-03 12:35:10 +02:00
|
|
|
}
|