2019-01-07 11:33:07 +01:00
|
|
|
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
|
|
|
// This file is part of Parity Ethereum.
|
2016-05-16 14:36:35 +02:00
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is free software: you can redistribute it and/or modify
|
2016-05-16 14:36:35 +02:00
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is distributed in the hope that it will be useful,
|
2016-05-16 14:36:35 +02:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2019-01-07 11:33:07 +01:00
|
|
|
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2016-05-16 14:36:35 +02:00
|
|
|
|
2018-07-19 12:46:33 +02:00
|
|
|
use std::collections::{HashSet, HashMap, hash_map};
|
2017-08-31 11:35:41 +02:00
|
|
|
use hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP};
|
2019-06-19 13:54:05 +02:00
|
|
|
use parity_util_mem::MallocSizeOf;
|
2018-01-10 13:35:18 +01:00
|
|
|
use ethereum_types::H256;
|
Delete crates from parity-ethereum and fetch them from parity-common instead (#9083)
Use crates from parity-common: hashdb, keccak-hash, kvdb, kvdb-memorydb, kvdb-rocksdb, memorydb, parity-bytes, parity-crypto, path, patricia_trie, plain_hasher, rlp, target, test-support, trie-standardmap, triehash
2018-07-10 14:59:19 +02:00
|
|
|
use triehash_ethereum::ordered_trie_root;
|
2017-09-06 20:47:45 +02:00
|
|
|
use bytes::Bytes;
|
2018-04-16 15:52:12 +02:00
|
|
|
use rlp::{Rlp, RlpStream, DecoderError};
|
2017-11-13 14:37:08 +01:00
|
|
|
use network;
|
2018-08-08 10:56:54 +02:00
|
|
|
use ethcore::verification::queue::kind::blocks::Unverified;
|
2019-01-04 14:05:46 +01:00
|
|
|
use types::transaction::UnverifiedTransaction;
|
|
|
|
use types::header::Header as BlockHeader;
|
2016-05-16 14:36:35 +02:00
|
|
|
|
2019-06-19 13:54:05 +02:00
|
|
|
malloc_size_of_is_0!(HeaderId);
|
2016-05-16 14:36:35 +02:00
|
|
|
|
2018-08-24 11:53:31 +02:00
|
|
|
#[derive(PartialEq, Debug, Clone)]
|
2019-06-19 13:54:05 +02:00
|
|
|
#[derive(MallocSizeOf)]
|
2018-08-08 10:56:54 +02:00
|
|
|
pub struct SyncHeader {
|
|
|
|
pub bytes: Bytes,
|
|
|
|
pub header: BlockHeader,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl SyncHeader {
|
|
|
|
pub fn from_rlp(bytes: Bytes) -> Result<Self, DecoderError> {
|
|
|
|
let result = SyncHeader {
|
|
|
|
header: ::rlp::decode(&bytes)?,
|
|
|
|
bytes,
|
|
|
|
};
|
|
|
|
|
|
|
|
Ok(result)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-19 13:54:05 +02:00
|
|
|
#[derive(MallocSizeOf)]
|
2018-08-08 10:56:54 +02:00
|
|
|
pub struct SyncBody {
|
|
|
|
pub transactions_bytes: Bytes,
|
|
|
|
pub transactions: Vec<UnverifiedTransaction>,
|
|
|
|
pub uncles_bytes: Bytes,
|
|
|
|
pub uncles: Vec<BlockHeader>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl SyncBody {
|
|
|
|
pub fn from_rlp(bytes: &[u8]) -> Result<Self, DecoderError> {
|
|
|
|
let rlp = Rlp::new(bytes);
|
|
|
|
let transactions_rlp = rlp.at(0)?;
|
|
|
|
let uncles_rlp = rlp.at(1)?;
|
|
|
|
|
|
|
|
let result = SyncBody {
|
|
|
|
transactions_bytes: transactions_rlp.as_raw().to_vec(),
|
|
|
|
transactions: transactions_rlp.as_list()?,
|
|
|
|
uncles_bytes: uncles_rlp.as_raw().to_vec(),
|
|
|
|
uncles: uncles_rlp.as_list()?,
|
|
|
|
};
|
|
|
|
|
|
|
|
Ok(result)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn empty_body() -> Self {
|
|
|
|
SyncBody {
|
|
|
|
transactions_bytes: ::rlp::EMPTY_LIST_RLP.to_vec(),
|
|
|
|
transactions: Vec::with_capacity(0),
|
|
|
|
uncles_bytes: ::rlp::EMPTY_LIST_RLP.to_vec(),
|
|
|
|
uncles: Vec::with_capacity(0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-16 14:36:35 +02:00
|
|
|
/// Block data with optional body.
|
2019-06-19 13:54:05 +02:00
|
|
|
#[derive(MallocSizeOf)]
|
2016-05-16 14:36:35 +02:00
|
|
|
struct SyncBlock {
|
2018-08-08 10:56:54 +02:00
|
|
|
header: SyncHeader,
|
|
|
|
body: Option<SyncBody>,
|
2016-10-18 18:16:00 +02:00
|
|
|
receipts: Option<Bytes>,
|
2017-03-31 14:12:15 +02:00
|
|
|
receipts_root: H256,
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
|
|
|
|
2018-08-08 10:56:54 +02:00
|
|
|
fn unverified_from_sync(header: SyncHeader, body: Option<SyncBody>) -> Unverified {
|
|
|
|
let mut stream = RlpStream::new_list(3);
|
|
|
|
stream.append_raw(&header.bytes, 1);
|
|
|
|
let body = body.unwrap_or_else(SyncBody::empty_body);
|
|
|
|
stream.append_raw(&body.transactions_bytes, 1);
|
|
|
|
stream.append_raw(&body.uncles_bytes, 1);
|
|
|
|
|
|
|
|
Unverified {
|
|
|
|
header: header.header,
|
|
|
|
transactions: body.transactions,
|
|
|
|
uncles: body.uncles,
|
|
|
|
bytes: stream.out().to_vec(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Block with optional receipt
|
|
|
|
pub struct BlockAndReceipts {
|
|
|
|
/// Block data.
|
2018-08-08 10:56:54 +02:00
|
|
|
pub block: Unverified,
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Block receipts RLP list.
|
|
|
|
pub receipts: Option<Bytes>,
|
2016-05-16 14:36:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Used to identify header by transactions and uncles hashes
|
|
|
|
#[derive(Eq, PartialEq, Hash)]
|
|
|
|
struct HeaderId {
|
|
|
|
transactions_root: H256,
|
|
|
|
uncles: H256
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A collection of blocks and subchain pointers being downloaded. This keeps track of
|
|
|
|
/// which headers/bodies need to be downloaded, which are being downloaded and also holds
|
|
|
|
/// the downloaded blocks.
|
2019-06-19 13:54:05 +02:00
|
|
|
#[derive(Default, MallocSizeOf)]
|
2016-05-16 14:36:35 +02:00
|
|
|
pub struct BlockCollection {
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Does this collection need block receipts.
|
|
|
|
need_receipts: bool,
|
2016-05-16 14:36:35 +02:00
|
|
|
/// Heads of subchains to download
|
|
|
|
heads: Vec<H256>,
|
|
|
|
/// Downloaded blocks.
|
|
|
|
blocks: HashMap<H256, SyncBlock>,
|
|
|
|
/// Downloaded blocks by parent.
|
|
|
|
parents: HashMap<H256, H256>,
|
|
|
|
/// Used to map body to header.
|
|
|
|
header_ids: HashMap<HeaderId, H256>,
|
2017-03-29 19:59:20 +02:00
|
|
|
/// Used to map receipts root to headers.
|
2018-09-25 12:24:59 +02:00
|
|
|
receipt_ids: HashMap<H256, Vec<H256>>,
|
2016-05-16 14:36:35 +02:00
|
|
|
/// First block in `blocks`.
|
|
|
|
head: Option<H256>,
|
|
|
|
/// Set of block header hashes being downloaded
|
|
|
|
downloading_headers: HashSet<H256>,
|
|
|
|
/// Set of block bodies being downloaded identified by block hash.
|
|
|
|
downloading_bodies: HashSet<H256>,
|
2017-03-31 14:12:15 +02:00
|
|
|
/// Set of block receipts being downloaded identified by receipt root.
|
2016-10-18 18:16:00 +02:00
|
|
|
downloading_receipts: HashSet<H256>,
|
2016-05-16 14:36:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl BlockCollection {
|
|
|
|
/// Create a new instance.
|
2016-10-18 18:16:00 +02:00
|
|
|
pub fn new(download_receipts: bool) -> BlockCollection {
|
2016-05-16 14:36:35 +02:00
|
|
|
BlockCollection {
|
2016-10-18 18:16:00 +02:00
|
|
|
need_receipts: download_receipts,
|
2016-05-16 14:36:35 +02:00
|
|
|
blocks: HashMap::new(),
|
|
|
|
header_ids: HashMap::new(),
|
2016-10-18 18:16:00 +02:00
|
|
|
receipt_ids: HashMap::new(),
|
2016-05-16 14:36:35 +02:00
|
|
|
heads: Vec::new(),
|
|
|
|
parents: HashMap::new(),
|
|
|
|
head: None,
|
|
|
|
downloading_headers: HashSet::new(),
|
|
|
|
downloading_bodies: HashSet::new(),
|
2016-10-18 18:16:00 +02:00
|
|
|
downloading_receipts: HashSet::new(),
|
2016-05-16 14:36:35 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Clear everything.
|
|
|
|
pub fn clear(&mut self) {
|
|
|
|
self.blocks.clear();
|
|
|
|
self.parents.clear();
|
|
|
|
self.header_ids.clear();
|
2016-10-18 18:16:00 +02:00
|
|
|
self.receipt_ids.clear();
|
2016-05-16 14:36:35 +02:00
|
|
|
self.heads.clear();
|
|
|
|
self.head = None;
|
|
|
|
self.downloading_headers.clear();
|
|
|
|
self.downloading_bodies.clear();
|
2016-10-18 18:16:00 +02:00
|
|
|
self.downloading_receipts.clear();
|
2016-05-16 14:36:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Reset collection for a new sync round with given subchain block hashes.
|
|
|
|
pub fn reset_to(&mut self, hashes: Vec<H256>) {
|
|
|
|
self.clear();
|
|
|
|
self.heads = hashes;
|
|
|
|
}
|
|
|
|
|
2016-05-16 19:46:09 +02:00
|
|
|
/// Insert a set of headers into collection and advance subchain head pointers.
|
2018-08-08 10:56:54 +02:00
|
|
|
pub fn insert_headers(&mut self, headers: Vec<SyncHeader>) {
|
2016-10-27 08:28:12 +02:00
|
|
|
for h in headers {
|
2016-05-16 14:36:35 +02:00
|
|
|
if let Err(e) = self.insert_header(h) {
|
|
|
|
trace!(target: "sync", "Ignored invalid header: {:?}", e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
self.update_heads();
|
|
|
|
}
|
2016-05-16 19:46:09 +02:00
|
|
|
|
2016-05-16 14:36:35 +02:00
|
|
|
/// Insert a collection of block bodies for previously downloaded headers.
|
2018-10-03 12:35:10 +02:00
|
|
|
pub fn insert_bodies(&mut self, bodies: Vec<SyncBody>) -> Vec<H256> {
|
|
|
|
bodies.into_iter()
|
|
|
|
.filter_map(|b| {
|
|
|
|
self.insert_body(b)
|
|
|
|
.map_err(|e| trace!(target: "sync", "Ignored invalid body: {:?}", e))
|
|
|
|
.ok()
|
|
|
|
})
|
|
|
|
.collect()
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Insert a collection of block receipts for previously downloaded headers.
|
2018-10-03 12:35:10 +02:00
|
|
|
pub fn insert_receipts(&mut self, receipts: Vec<Bytes>) -> Vec<Vec<H256>> {
|
2016-10-18 18:16:00 +02:00
|
|
|
if !self.need_receipts {
|
2018-10-03 12:35:10 +02:00
|
|
|
return Vec::new();
|
2016-05-16 14:36:35 +02:00
|
|
|
}
|
2018-10-03 12:35:10 +02:00
|
|
|
receipts.into_iter()
|
|
|
|
.filter_map(|r| {
|
|
|
|
self.insert_receipt(r)
|
|
|
|
.map_err(|e| trace!(target: "sync", "Ignored invalid receipt: {:?}", e))
|
|
|
|
.ok()
|
|
|
|
})
|
|
|
|
.collect()
|
2016-05-16 14:36:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns a set of block hashes that require a body download. The returned set is marked as being downloaded.
|
|
|
|
pub fn needed_bodies(&mut self, count: usize, _ignore_downloading: bool) -> Vec<H256> {
|
|
|
|
if self.head.is_none() {
|
|
|
|
return Vec::new();
|
|
|
|
}
|
|
|
|
let mut needed_bodies: Vec<H256> = Vec::new();
|
|
|
|
let mut head = self.head;
|
|
|
|
while head.is_some() && needed_bodies.len() < count {
|
|
|
|
head = self.parents.get(&head.unwrap()).cloned();
|
|
|
|
if let Some(head) = head {
|
|
|
|
match self.blocks.get(&head) {
|
|
|
|
Some(block) if block.body.is_none() && !self.downloading_bodies.contains(&head) => {
|
2016-07-25 18:38:36 +02:00
|
|
|
self.downloading_bodies.insert(head.clone());
|
2016-05-16 14:36:35 +02:00
|
|
|
needed_bodies.push(head.clone());
|
|
|
|
}
|
|
|
|
_ => (),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-07-25 18:38:36 +02:00
|
|
|
for h in self.header_ids.values() {
|
|
|
|
if needed_bodies.len() >= count {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if !self.downloading_bodies.contains(h) {
|
|
|
|
needed_bodies.push(h.clone());
|
|
|
|
self.downloading_bodies.insert(h.clone());
|
|
|
|
}
|
|
|
|
}
|
2016-05-16 14:36:35 +02:00
|
|
|
needed_bodies
|
|
|
|
}
|
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Returns a set of block hashes that require a receipt download. The returned set is marked as being downloaded.
|
|
|
|
pub fn needed_receipts(&mut self, count: usize, _ignore_downloading: bool) -> Vec<H256> {
|
|
|
|
if self.head.is_none() || !self.need_receipts {
|
|
|
|
return Vec::new();
|
|
|
|
}
|
|
|
|
let mut needed_receipts: Vec<H256> = Vec::new();
|
|
|
|
let mut head = self.head;
|
|
|
|
while head.is_some() && needed_receipts.len() < count {
|
|
|
|
head = self.parents.get(&head.unwrap()).cloned();
|
|
|
|
if let Some(head) = head {
|
|
|
|
match self.blocks.get(&head) {
|
2017-03-31 14:12:15 +02:00
|
|
|
Some(block) => {
|
|
|
|
if block.receipts.is_none() && !self.downloading_receipts.contains(&block.receipts_root) {
|
|
|
|
self.downloading_receipts.insert(block.receipts_root);
|
|
|
|
needed_receipts.push(head.clone());
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
|
|
|
_ => (),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-03-31 14:12:15 +02:00
|
|
|
// If there are multiple blocks per receipt, only request one of them.
|
|
|
|
for (root, h) in self.receipt_ids.iter().map(|(root, hashes)| (root, hashes[0])) {
|
2016-10-18 18:16:00 +02:00
|
|
|
if needed_receipts.len() >= count {
|
|
|
|
break;
|
|
|
|
}
|
2017-03-31 14:12:15 +02:00
|
|
|
if !self.downloading_receipts.contains(root) {
|
2016-10-18 18:16:00 +02:00
|
|
|
needed_receipts.push(h.clone());
|
2017-03-31 14:12:15 +02:00
|
|
|
self.downloading_receipts.insert(*root);
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
needed_receipts
|
|
|
|
}
|
|
|
|
|
2016-05-16 14:36:35 +02:00
|
|
|
/// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded.
|
|
|
|
pub fn needed_headers(&mut self, count: usize, ignore_downloading: bool) -> Option<(H256, usize)> {
|
|
|
|
// find subchain to download
|
|
|
|
let mut download = None;
|
|
|
|
{
|
|
|
|
for h in &self.heads {
|
2016-05-25 17:03:58 +02:00
|
|
|
if ignore_downloading || !self.downloading_headers.contains(h) {
|
2016-05-16 14:36:35 +02:00
|
|
|
self.downloading_headers.insert(h.clone());
|
|
|
|
download = Some(h.clone());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
download.map(|h| (h, count))
|
|
|
|
}
|
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Unmark header as being downloaded.
|
2016-05-16 14:36:35 +02:00
|
|
|
pub fn clear_header_download(&mut self, hash: &H256) {
|
|
|
|
self.downloading_headers.remove(hash);
|
|
|
|
}
|
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
/// Unmark block body as being downloaded.
|
|
|
|
pub fn clear_body_download(&mut self, hashes: &[H256]) {
|
|
|
|
for h in hashes {
|
|
|
|
self.downloading_bodies.remove(h);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Unmark block receipt as being downloaded.
|
|
|
|
pub fn clear_receipt_download(&mut self, hashes: &[H256]) {
|
|
|
|
for h in hashes {
|
2017-03-31 14:12:15 +02:00
|
|
|
if let Some(ref block) = self.blocks.get(h) {
|
|
|
|
self.downloading_receipts.remove(&block.receipts_root);
|
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
2016-05-16 14:36:35 +02:00
|
|
|
}
|
|
|
|
|
2018-05-17 10:58:35 +02:00
|
|
|
/// Get a valid chain of blocks ordered in ascending order and ready for importing into blockchain.
|
2016-10-18 18:16:00 +02:00
|
|
|
pub fn drain(&mut self) -> Vec<BlockAndReceipts> {
|
2016-05-16 14:36:35 +02:00
|
|
|
if self.blocks.is_empty() || self.head.is_none() {
|
|
|
|
return Vec::new();
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut drained = Vec::new();
|
|
|
|
let mut hashes = Vec::new();
|
|
|
|
{
|
|
|
|
let mut blocks = Vec::new();
|
|
|
|
let mut head = self.head;
|
2016-10-10 17:43:05 +02:00
|
|
|
while let Some(h) = head {
|
|
|
|
head = self.parents.get(&h).cloned();
|
2016-05-16 14:36:35 +02:00
|
|
|
if let Some(head) = head {
|
2018-08-08 10:56:54 +02:00
|
|
|
match self.blocks.remove(&head) {
|
|
|
|
Some(block) => {
|
|
|
|
if block.body.is_some() && (!self.need_receipts || block.receipts.is_some()) {
|
|
|
|
blocks.push(block);
|
|
|
|
hashes.push(head);
|
|
|
|
self.head = Some(head);
|
|
|
|
} else {
|
|
|
|
self.blocks.insert(head, block);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
},
|
|
|
|
_ => {
|
|
|
|
break;
|
|
|
|
},
|
2016-05-16 14:36:35 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-08 10:56:54 +02:00
|
|
|
for block in blocks.into_iter() {
|
|
|
|
let unverified = unverified_from_sync(block.header, block.body);
|
2016-10-18 18:16:00 +02:00
|
|
|
drained.push(BlockAndReceipts {
|
2018-08-08 10:56:54 +02:00
|
|
|
block: unverified,
|
2016-10-18 18:16:00 +02:00
|
|
|
receipts: block.receipts.clone(),
|
|
|
|
});
|
2016-05-16 14:36:35 +02:00
|
|
|
}
|
|
|
|
}
|
2018-08-08 10:56:54 +02:00
|
|
|
|
2016-10-18 18:16:00 +02:00
|
|
|
trace!(target: "sync", "Drained {} blocks, new head :{:?}", drained.len(), self.head);
|
2016-05-16 14:36:35 +02:00
|
|
|
drained
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Check if the collection is empty. We consider the syncing round complete once
|
|
|
|
/// there is no block data left and only a single or none head pointer remains.
|
|
|
|
pub fn is_empty(&self) -> bool {
|
2016-05-25 17:03:58 +02:00
|
|
|
self.heads.len() == 0 || (self.heads.len() == 1 && self.head.map_or(false, |h| h == self.heads[0]))
|
2016-05-16 14:36:35 +02:00
|
|
|
}
|
|
|
|
|
2016-11-16 19:34:12 +01:00
|
|
|
/// Check if collection contains a block header.
|
2016-05-16 14:36:35 +02:00
|
|
|
pub fn contains(&self, hash: &H256) -> bool {
|
|
|
|
self.blocks.contains_key(hash)
|
|
|
|
}
|
|
|
|
|
2018-10-09 15:31:40 +02:00
|
|
|
/// Check the number of heads
|
|
|
|
pub fn heads_len(&self) -> usize {
|
|
|
|
self.heads.len()
|
|
|
|
}
|
|
|
|
|
2016-05-16 14:36:35 +02:00
|
|
|
/// Check if given block hash is marked as being downloaded.
|
|
|
|
pub fn is_downloading(&self, hash: &H256) -> bool {
|
|
|
|
self.downloading_headers.contains(hash) || self.downloading_bodies.contains(hash)
|
|
|
|
}
|
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
fn insert_body(&mut self, body: SyncBody) -> Result<H256, network::Error> {
|
2016-10-18 18:16:00 +02:00
|
|
|
let header_id = {
|
2018-08-08 10:56:54 +02:00
|
|
|
let tx_root = ordered_trie_root(Rlp::new(&body.transactions_bytes).iter().map(|r| r.as_raw()));
|
|
|
|
let uncles = keccak(&body.uncles_bytes);
|
2016-10-18 18:16:00 +02:00
|
|
|
HeaderId {
|
|
|
|
transactions_root: tx_root,
|
|
|
|
uncles: uncles
|
|
|
|
}
|
2016-05-16 14:36:35 +02:00
|
|
|
};
|
2016-10-18 18:16:00 +02:00
|
|
|
|
2018-08-08 10:56:54 +02:00
|
|
|
match self.header_ids.remove(&header_id) {
|
2016-05-16 14:36:35 +02:00
|
|
|
Some(h) => {
|
|
|
|
self.downloading_bodies.remove(&h);
|
|
|
|
match self.blocks.get_mut(&h) {
|
|
|
|
Some(ref mut block) => {
|
|
|
|
trace!(target: "sync", "Got body {}", h);
|
2018-08-08 10:56:54 +02:00
|
|
|
block.body = Some(body);
|
2018-10-03 12:35:10 +02:00
|
|
|
Ok(h)
|
2016-05-16 14:36:35 +02:00
|
|
|
},
|
2016-06-20 11:07:22 +02:00
|
|
|
None => {
|
|
|
|
warn!("Got body with no header {}", h);
|
2019-06-17 08:44:59 +02:00
|
|
|
Err(network::Error::BadProtocol)
|
2016-06-20 11:07:22 +02:00
|
|
|
}
|
2016-05-16 14:36:35 +02:00
|
|
|
}
|
|
|
|
}
|
2016-06-20 11:07:22 +02:00
|
|
|
None => {
|
2016-10-18 18:16:00 +02:00
|
|
|
trace!(target: "sync", "Ignored unknown/stale block body. tx_root = {:?}, uncles = {:?}", header_id.transactions_root, header_id.uncles);
|
2019-06-17 08:44:59 +02:00
|
|
|
Err(network::Error::BadProtocol)
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-03 12:35:10 +02:00
|
|
|
fn insert_receipt(&mut self, r: Bytes) -> Result<Vec<H256>, network::Error> {
|
2016-10-18 18:16:00 +02:00
|
|
|
let receipt_root = {
|
2018-04-16 15:52:12 +02:00
|
|
|
let receipts = Rlp::new(&r);
|
2018-02-16 20:24:16 +01:00
|
|
|
ordered_trie_root(receipts.iter().map(|r| r.as_raw()))
|
2016-10-18 18:16:00 +02:00
|
|
|
};
|
2017-03-31 14:12:15 +02:00
|
|
|
self.downloading_receipts.remove(&receipt_root);
|
2017-03-29 19:59:20 +02:00
|
|
|
match self.receipt_ids.entry(receipt_root) {
|
2018-07-19 12:46:33 +02:00
|
|
|
hash_map::Entry::Occupied(entry) => {
|
2018-10-03 12:35:10 +02:00
|
|
|
let block_hashes = entry.remove();
|
|
|
|
for h in block_hashes.iter() {
|
2017-03-31 14:12:15 +02:00
|
|
|
match self.blocks.get_mut(&h) {
|
|
|
|
Some(ref mut block) => {
|
|
|
|
trace!(target: "sync", "Got receipt {}", h);
|
|
|
|
block.receipts = Some(r.clone());
|
|
|
|
},
|
|
|
|
None => {
|
|
|
|
warn!("Got receipt with no header {}", h);
|
2019-06-17 08:44:59 +02:00
|
|
|
return Err(network::Error::BadProtocol)
|
2017-03-31 14:12:15 +02:00
|
|
|
}
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
|
|
|
}
|
2018-10-03 12:35:10 +02:00
|
|
|
Ok(block_hashes)
|
2018-07-19 12:46:33 +02:00
|
|
|
},
|
|
|
|
hash_map::Entry::Vacant(_) => {
|
2016-10-18 18:16:00 +02:00
|
|
|
trace!(target: "sync", "Ignored unknown/stale block receipt {:?}", receipt_root);
|
2019-06-17 08:44:59 +02:00
|
|
|
Err(network::Error::BadProtocol)
|
2016-06-20 11:07:22 +02:00
|
|
|
}
|
|
|
|
}
|
2016-05-16 14:36:35 +02:00
|
|
|
}
|
|
|
|
|
2018-08-08 10:56:54 +02:00
|
|
|
fn insert_header(&mut self, info: SyncHeader) -> Result<H256, DecoderError> {
|
|
|
|
let hash = info.header.hash();
|
2016-05-16 14:36:35 +02:00
|
|
|
if self.blocks.contains_key(&hash) {
|
|
|
|
return Ok(hash);
|
|
|
|
}
|
2018-08-08 10:56:54 +02:00
|
|
|
|
2016-05-16 14:36:35 +02:00
|
|
|
match self.head {
|
|
|
|
None if hash == self.heads[0] => {
|
2016-10-18 18:16:00 +02:00
|
|
|
trace!(target: "sync", "New head {}", hash);
|
2018-08-08 10:56:54 +02:00
|
|
|
self.head = Some(info.header.parent_hash().clone());
|
2016-05-16 14:36:35 +02:00
|
|
|
},
|
|
|
|
_ => ()
|
|
|
|
}
|
|
|
|
|
|
|
|
let header_id = HeaderId {
|
2018-08-08 10:56:54 +02:00
|
|
|
transactions_root: *info.header.transactions_root(),
|
|
|
|
uncles: *info.header.uncles_hash(),
|
2016-05-16 14:36:35 +02:00
|
|
|
};
|
2018-08-08 10:56:54 +02:00
|
|
|
|
|
|
|
let body = if header_id.transactions_root == KECCAK_NULL_RLP && header_id.uncles == KECCAK_EMPTY_LIST_RLP {
|
2016-05-16 14:36:35 +02:00
|
|
|
// empty body, just mark as downloaded
|
2018-08-08 10:56:54 +02:00
|
|
|
Some(SyncBody::empty_body())
|
|
|
|
} else {
|
|
|
|
trace!(
|
|
|
|
"Queueing body tx_root = {:?}, uncles = {:?}, block = {:?}, number = {}",
|
|
|
|
header_id.transactions_root,
|
|
|
|
header_id.uncles,
|
|
|
|
hash,
|
|
|
|
info.header.number()
|
|
|
|
);
|
|
|
|
self.header_ids.insert(header_id, hash);
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
let (receipts, receipts_root) = if self.need_receipts {
|
|
|
|
let receipt_root = *info.header.receipts_root();
|
2017-08-31 11:35:41 +02:00
|
|
|
if receipt_root == KECCAK_NULL_RLP {
|
2016-10-18 18:16:00 +02:00
|
|
|
let receipts_stream = RlpStream::new_list(0);
|
2018-08-08 10:56:54 +02:00
|
|
|
(Some(receipts_stream.out()), receipt_root)
|
2016-10-18 18:16:00 +02:00
|
|
|
} else {
|
2018-09-25 12:24:59 +02:00
|
|
|
self.receipt_ids.entry(receipt_root).or_insert_with(Vec::new).push(hash);
|
2018-08-08 10:56:54 +02:00
|
|
|
(None, receipt_root)
|
2016-10-18 18:16:00 +02:00
|
|
|
}
|
2018-08-08 10:56:54 +02:00
|
|
|
} else {
|
2019-06-03 15:36:21 +02:00
|
|
|
(None, H256::zero())
|
2018-08-08 10:56:54 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
self.parents.insert(*info.header.parent_hash(), hash);
|
|
|
|
|
|
|
|
let block = SyncBlock {
|
|
|
|
header: info,
|
|
|
|
body,
|
|
|
|
receipts,
|
|
|
|
receipts_root,
|
|
|
|
};
|
2016-05-16 14:36:35 +02:00
|
|
|
|
2018-08-08 10:56:54 +02:00
|
|
|
self.blocks.insert(hash, block);
|
2018-02-09 09:32:06 +01:00
|
|
|
trace!(target: "sync", "New header: {:x}", hash);
|
2016-05-16 14:36:35 +02:00
|
|
|
Ok(hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// update subchain headers
|
|
|
|
fn update_heads(&mut self) {
|
|
|
|
let mut new_heads = Vec::new();
|
2016-05-25 17:03:58 +02:00
|
|
|
let old_subchains: HashSet<_> = { self.heads.iter().cloned().collect() };
|
2016-05-16 14:36:35 +02:00
|
|
|
for s in self.heads.drain(..) {
|
|
|
|
let mut h = s.clone();
|
2016-06-22 12:10:26 +02:00
|
|
|
if !self.blocks.contains_key(&h) {
|
|
|
|
new_heads.push(h);
|
|
|
|
continue;
|
|
|
|
}
|
2016-05-16 14:36:35 +02:00
|
|
|
loop {
|
|
|
|
match self.parents.get(&h) {
|
|
|
|
Some(next) => {
|
|
|
|
h = next.clone();
|
|
|
|
if old_subchains.contains(&h) {
|
2016-10-18 18:16:00 +02:00
|
|
|
trace!(target: "sync", "Completed subchain {:?}", s);
|
2016-05-16 14:36:35 +02:00
|
|
|
break; // reached head of the other subchain, merge by not adding
|
|
|
|
}
|
|
|
|
},
|
|
|
|
_ => {
|
|
|
|
new_heads.push(h);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
self.heads = new_heads;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
2018-08-08 10:56:54 +02:00
|
|
|
use super::{BlockCollection, SyncHeader};
|
2019-08-13 12:33:34 +02:00
|
|
|
use ethcore::client::{TestBlockChainClient, EachBlockWith, BlockChainClient};
|
|
|
|
use types::{
|
|
|
|
ids::BlockId,
|
|
|
|
BlockNumber
|
|
|
|
};
|
2018-08-08 10:56:54 +02:00
|
|
|
use ethcore::verification::queue::kind::blocks::Unverified;
|
2016-09-01 14:49:12 +02:00
|
|
|
use rlp::*;
|
2016-05-16 14:36:35 +02:00
|
|
|
|
|
|
|
fn is_empty(bc: &BlockCollection) -> bool {
|
|
|
|
bc.heads.is_empty() &&
|
|
|
|
bc.blocks.is_empty() &&
|
|
|
|
bc.parents.is_empty() &&
|
|
|
|
bc.header_ids.is_empty() &&
|
|
|
|
bc.head.is_none() &&
|
|
|
|
bc.downloading_headers.is_empty() &&
|
|
|
|
bc.downloading_bodies.is_empty()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn create_clear() {
|
2016-10-18 18:16:00 +02:00
|
|
|
let mut bc = BlockCollection::new(false);
|
2016-05-16 14:36:35 +02:00
|
|
|
assert!(is_empty(&bc));
|
|
|
|
let client = TestBlockChainClient::new();
|
|
|
|
client.add_blocks(100, EachBlockWith::Nothing);
|
2019-07-09 10:04:20 +02:00
|
|
|
let hashes = (0 .. 100).map(|i| (&client as &dyn BlockChainClient).block_hash(BlockId::Number(i)).unwrap()).collect();
|
2016-05-16 14:36:35 +02:00
|
|
|
bc.reset_to(hashes);
|
|
|
|
assert!(!is_empty(&bc));
|
|
|
|
bc.clear();
|
|
|
|
assert!(is_empty(&bc));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn insert_headers() {
|
2016-10-18 18:16:00 +02:00
|
|
|
let mut bc = BlockCollection::new(false);
|
2016-05-16 14:36:35 +02:00
|
|
|
assert!(is_empty(&bc));
|
|
|
|
let client = TestBlockChainClient::new();
|
|
|
|
let nblocks = 200;
|
|
|
|
client.add_blocks(nblocks, EachBlockWith::Nothing);
|
2016-12-28 13:44:51 +01:00
|
|
|
let blocks: Vec<_> = (0..nblocks)
|
2019-07-09 10:04:20 +02:00
|
|
|
.map(|i| (&client as &dyn BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner())
|
2016-12-28 13:44:51 +01:00
|
|
|
.collect();
|
2018-08-24 11:53:31 +02:00
|
|
|
let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect();
|
|
|
|
let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect();
|
|
|
|
let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(*h) } else { None }).collect();
|
2016-05-16 14:36:35 +02:00
|
|
|
bc.reset_to(heads);
|
|
|
|
assert!(!bc.is_empty());
|
|
|
|
assert_eq!(hashes[0], bc.heads[0]);
|
|
|
|
assert!(bc.needed_bodies(1, false).is_empty());
|
|
|
|
assert!(!bc.contains(&hashes[0]));
|
|
|
|
assert!(!bc.is_downloading(&hashes[0]));
|
|
|
|
|
|
|
|
let (h, n) = bc.needed_headers(6, false).unwrap();
|
|
|
|
assert!(bc.is_downloading(&hashes[0]));
|
|
|
|
assert_eq!(hashes[0], h);
|
|
|
|
assert_eq!(n, 6);
|
|
|
|
assert_eq!(bc.downloading_headers.len(), 1);
|
|
|
|
assert!(bc.drain().is_empty());
|
|
|
|
|
2018-08-24 11:53:31 +02:00
|
|
|
bc.insert_headers(headers[0..6].into_iter().map(Clone::clone).collect());
|
2016-05-16 14:36:35 +02:00
|
|
|
assert_eq!(hashes[5], bc.heads[0]);
|
|
|
|
for h in &hashes[0..6] {
|
|
|
|
bc.clear_header_download(h)
|
|
|
|
}
|
|
|
|
assert_eq!(bc.downloading_headers.len(), 0);
|
|
|
|
assert!(!bc.is_downloading(&hashes[0]));
|
|
|
|
assert!(bc.contains(&hashes[0]));
|
|
|
|
|
2018-08-08 10:56:54 +02:00
|
|
|
assert_eq!(
|
|
|
|
bc.drain().into_iter().map(|b| b.block).collect::<Vec<_>>(),
|
|
|
|
blocks[0..6].iter().map(|b| Unverified::from_rlp(b.to_vec()).unwrap()).collect::<Vec<_>>()
|
|
|
|
);
|
2016-05-16 14:36:35 +02:00
|
|
|
assert!(!bc.contains(&hashes[0]));
|
|
|
|
assert_eq!(hashes[5], bc.head.unwrap());
|
|
|
|
|
|
|
|
let (h, _) = bc.needed_headers(6, false).unwrap();
|
|
|
|
assert_eq!(hashes[5], h);
|
|
|
|
let (h, _) = bc.needed_headers(6, false).unwrap();
|
|
|
|
assert_eq!(hashes[20], h);
|
2018-08-24 11:53:31 +02:00
|
|
|
bc.insert_headers(headers[10..16].into_iter().map(Clone::clone).collect());
|
2016-05-16 14:36:35 +02:00
|
|
|
assert!(bc.drain().is_empty());
|
2018-08-24 11:53:31 +02:00
|
|
|
bc.insert_headers(headers[5..10].into_iter().map(Clone::clone).collect());
|
2018-08-08 10:56:54 +02:00
|
|
|
assert_eq!(
|
|
|
|
bc.drain().into_iter().map(|b| b.block).collect::<Vec<_>>(),
|
|
|
|
blocks[6..16].iter().map(|b| Unverified::from_rlp(b.to_vec()).unwrap()).collect::<Vec<_>>()
|
|
|
|
);
|
|
|
|
|
2016-05-16 14:36:35 +02:00
|
|
|
assert_eq!(hashes[15], bc.heads[0]);
|
|
|
|
|
2018-08-24 11:53:31 +02:00
|
|
|
bc.insert_headers(headers[15..].into_iter().map(Clone::clone).collect());
|
2016-05-16 14:36:35 +02:00
|
|
|
bc.drain();
|
|
|
|
assert!(bc.is_empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn insert_headers_with_gap() {
|
2016-10-18 18:16:00 +02:00
|
|
|
let mut bc = BlockCollection::new(false);
|
2016-05-16 14:36:35 +02:00
|
|
|
assert!(is_empty(&bc));
|
|
|
|
let client = TestBlockChainClient::new();
|
|
|
|
let nblocks = 200;
|
|
|
|
client.add_blocks(nblocks, EachBlockWith::Nothing);
|
2016-12-28 13:44:51 +01:00
|
|
|
let blocks: Vec<_> = (0..nblocks)
|
2019-07-09 10:04:20 +02:00
|
|
|
.map(|i| (&client as &dyn BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner())
|
2016-12-28 13:44:51 +01:00
|
|
|
.collect();
|
2018-08-24 11:53:31 +02:00
|
|
|
let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect();
|
|
|
|
let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect();
|
|
|
|
let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(*h) } else { None }).collect();
|
2016-05-16 14:36:35 +02:00
|
|
|
bc.reset_to(heads);
|
|
|
|
|
2018-08-24 11:53:31 +02:00
|
|
|
bc.insert_headers(headers[2..22].into_iter().map(Clone::clone).collect());
|
2016-05-16 14:36:35 +02:00
|
|
|
assert_eq!(hashes[0], bc.heads[0]);
|
|
|
|
assert_eq!(hashes[21], bc.heads[1]);
|
|
|
|
assert!(bc.head.is_none());
|
2018-08-24 11:53:31 +02:00
|
|
|
bc.insert_headers(headers[0..2].into_iter().map(Clone::clone).collect());
|
2016-05-16 14:36:35 +02:00
|
|
|
assert!(bc.head.is_some());
|
|
|
|
assert_eq!(hashes[21], bc.heads[0]);
|
|
|
|
}
|
2016-06-22 12:10:26 +02:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn insert_headers_no_gap() {
|
2016-10-18 18:16:00 +02:00
|
|
|
let mut bc = BlockCollection::new(false);
|
2016-06-22 12:10:26 +02:00
|
|
|
assert!(is_empty(&bc));
|
|
|
|
let client = TestBlockChainClient::new();
|
|
|
|
let nblocks = 200;
|
|
|
|
client.add_blocks(nblocks, EachBlockWith::Nothing);
|
2016-12-28 13:44:51 +01:00
|
|
|
let blocks: Vec<_> = (0..nblocks)
|
2019-07-09 10:04:20 +02:00
|
|
|
.map(|i| (&client as &dyn BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner())
|
2016-12-28 13:44:51 +01:00
|
|
|
.collect();
|
2018-08-24 11:53:31 +02:00
|
|
|
let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect();
|
|
|
|
let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect();
|
|
|
|
let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(*h) } else { None }).collect();
|
2016-06-22 12:10:26 +02:00
|
|
|
bc.reset_to(heads);
|
|
|
|
|
2018-08-24 11:53:31 +02:00
|
|
|
bc.insert_headers(headers[1..2].into_iter().map(Clone::clone).collect());
|
2016-06-22 12:10:26 +02:00
|
|
|
assert!(bc.drain().is_empty());
|
2018-08-24 11:53:31 +02:00
|
|
|
bc.insert_headers(headers[0..1].into_iter().map(Clone::clone).collect());
|
2016-06-22 12:10:26 +02:00
|
|
|
assert_eq!(bc.drain().len(), 2);
|
|
|
|
}
|
2016-05-16 14:36:35 +02:00
|
|
|
}
|