Code cleanup in the sync module (#11552)
* Code cleanup `flush_queue()` is mostly used by tests remove some unused params avoid clones in a few places * Consistent params naming Consistent use of log calls * Fix todo It'a actually not trivial to find out the size of these collections, likely we keep blocks from all kinds of forks in the `self.blocks`/`self.parents` collections and there's no good way to anticipate how many blocks we're going to drain. Most of the time we end up draining 0 blocks and then we drain a whole bunch of them, up to 30 000. * Revert making flush_queue() test-only Address review grumbles * More review grumbles * fix build
This commit is contained in:
parent
10d82ef119
commit
9e77e7e193
@ -197,8 +197,8 @@ pub trait IoClient: Sync + Send {
|
|||||||
/// Queue transactions for importing.
|
/// Queue transactions for importing.
|
||||||
fn queue_transactions(&self, transactions: Vec<Bytes>, peer_id: usize);
|
fn queue_transactions(&self, transactions: Vec<Bytes>, peer_id: usize);
|
||||||
|
|
||||||
/// Queue block import with transaction receipts. Does no sealing and transaction validation.
|
/// Queue block import with transaction receipts. Does no sealing or transaction validation.
|
||||||
fn queue_ancient_block(&self, block_bytes: Unverified, receipts_bytes: Bytes) -> EthcoreResult<H256>;
|
fn queue_ancient_block(&self, unverified: Unverified, receipts_bytes: Bytes) -> EthcoreResult<H256>;
|
||||||
|
|
||||||
/// Queue consensus engine message.
|
/// Queue consensus engine message.
|
||||||
fn queue_consensus_message(&self, message: Bytes);
|
fn queue_consensus_message(&self, message: Bytes);
|
||||||
|
@ -178,7 +178,7 @@ struct Importer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
|
/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
|
||||||
/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue.
|
/// Call `import_block()` to import a block asynchronously.
|
||||||
pub struct Client {
|
pub struct Client {
|
||||||
/// Flag used to disable the client forever. Not to be confused with `liveness`.
|
/// Flag used to disable the client forever. Not to be confused with `liveness`.
|
||||||
///
|
///
|
||||||
@ -870,7 +870,7 @@ impl Client {
|
|||||||
*self.on_user_defaults_change.lock() = Some(Box::new(f));
|
*self.on_user_defaults_change.lock() = Some(Box::new(f));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Flush the block import queue.
|
/// Flush the block import queue. Used mostly for tests.
|
||||||
pub fn flush_queue(&self) {
|
pub fn flush_queue(&self) {
|
||||||
self.importer.block_queue.flush();
|
self.importer.block_queue.flush();
|
||||||
while !self.importer.block_queue.is_empty() {
|
while !self.importer.block_queue.is_empty() {
|
||||||
@ -1444,6 +1444,7 @@ impl ImportBlock for Client {
|
|||||||
return Err(EthcoreError::Block(BlockError::UnknownParent(unverified.parent_hash())));
|
return Err(EthcoreError::Block(BlockError::UnknownParent(unverified.parent_hash())));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the queue is empty we propagate the block in a `PriorityTask`.
|
||||||
let raw = if self.importer.block_queue.is_empty() {
|
let raw = if self.importer.block_queue.is_empty() {
|
||||||
Some((unverified.bytes.clone(), *unverified.header.difficulty()))
|
Some((unverified.bytes.clone(), *unverified.header.difficulty()))
|
||||||
} else {
|
} else {
|
||||||
@ -2729,6 +2730,7 @@ impl ImportExportBlocks for Client {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
self.flush_queue();
|
self.flush_queue();
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -407,9 +407,9 @@ impl BlockDownloader {
|
|||||||
trace_sync!(self, "Error decoding block receipts RLP: {:?}", e);
|
trace_sync!(self, "Error decoding block receipts RLP: {:?}", e);
|
||||||
BlockDownloaderImportError::Invalid
|
BlockDownloaderImportError::Invalid
|
||||||
})?;
|
})?;
|
||||||
receipts.push(receipt.as_raw().to_vec());
|
receipts.push(receipt.as_raw());
|
||||||
}
|
}
|
||||||
let hashes = self.blocks.insert_receipts(receipts);
|
let hashes = self.blocks.insert_receipts(&receipts);
|
||||||
if hashes.len() != item_count {
|
if hashes.len() != item_count {
|
||||||
trace_sync!(self, "Deactivating peer for giving invalid block receipts");
|
trace_sync!(self, "Deactivating peer for giving invalid block receipts");
|
||||||
return Err(BlockDownloaderImportError::Invalid);
|
return Err(BlockDownloaderImportError::Invalid);
|
||||||
@ -501,7 +501,7 @@ impl BlockDownloader {
|
|||||||
MAX_BODIES_TO_REQUEST_SMALL
|
MAX_BODIES_TO_REQUEST_SMALL
|
||||||
};
|
};
|
||||||
|
|
||||||
let needed_bodies = self.blocks.needed_bodies(number_of_bodies_to_request, false);
|
let needed_bodies = self.blocks.needed_bodies(number_of_bodies_to_request);
|
||||||
if !needed_bodies.is_empty() {
|
if !needed_bodies.is_empty() {
|
||||||
return Some(BlockRequest::Bodies {
|
return Some(BlockRequest::Bodies {
|
||||||
hashes: needed_bodies,
|
hashes: needed_bodies,
|
||||||
@ -509,7 +509,7 @@ impl BlockDownloader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if self.download_receipts {
|
if self.download_receipts {
|
||||||
let needed_receipts = self.blocks.needed_receipts(MAX_RECEPITS_TO_REQUEST, false);
|
let needed_receipts = self.blocks.needed_receipts(MAX_RECEPITS_TO_REQUEST);
|
||||||
if !needed_receipts.is_empty() {
|
if !needed_receipts.is_empty() {
|
||||||
return Some(BlockRequest::Receipts {
|
return Some(BlockRequest::Receipts {
|
||||||
hashes: needed_receipts,
|
hashes: needed_receipts,
|
||||||
@ -518,7 +518,7 @@ impl BlockDownloader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find subchain to download
|
// find subchain to download
|
||||||
if let Some((h, count)) = self.blocks.needed_headers(MAX_HEADERS_TO_REQUEST, false) {
|
if let Some((h, count)) = self.blocks.needed_headers(MAX_HEADERS_TO_REQUEST) {
|
||||||
return Some(BlockRequest::Headers {
|
return Some(BlockRequest::Headers {
|
||||||
start: h,
|
start: h,
|
||||||
count: count as u64,
|
count: count as u64,
|
||||||
|
@ -19,7 +19,7 @@ use std::collections::{HashSet, HashMap, hash_map};
|
|||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use ethereum_types::H256;
|
use ethereum_types::H256;
|
||||||
use keccak_hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP};
|
use keccak_hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP};
|
||||||
use log::{trace, warn};
|
use log::{debug, trace, warn};
|
||||||
use parity_util_mem::MallocSizeOf;
|
use parity_util_mem::MallocSizeOf;
|
||||||
use rlp::{Rlp, RlpStream, DecoderError};
|
use rlp::{Rlp, RlpStream, DecoderError};
|
||||||
use triehash_ethereum::ordered_trie_root;
|
use triehash_ethereum::ordered_trie_root;
|
||||||
@ -103,7 +103,7 @@ fn unverified_from_sync(header: SyncHeader, body: Option<SyncBody>) -> Unverifie
|
|||||||
header: header.header,
|
header: header.header,
|
||||||
transactions: body.transactions,
|
transactions: body.transactions,
|
||||||
uncles: body.uncles,
|
uncles: body.uncles,
|
||||||
bytes: stream.out().to_vec(),
|
bytes: stream.out(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -196,11 +196,11 @@ impl BlockCollection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Insert a collection of block receipts for previously downloaded headers.
|
/// Insert a collection of block receipts for previously downloaded headers.
|
||||||
pub fn insert_receipts(&mut self, receipts: Vec<Bytes>) -> Vec<Vec<H256>> {
|
pub fn insert_receipts(&mut self, receipts: &[&[u8]]) -> Vec<Vec<H256>> {
|
||||||
if !self.need_receipts {
|
if !self.need_receipts {
|
||||||
return Vec::new();
|
return Vec::new();
|
||||||
}
|
}
|
||||||
receipts.into_iter()
|
receipts.iter()
|
||||||
.filter_map(|r| {
|
.filter_map(|r| {
|
||||||
self.insert_receipt(r)
|
self.insert_receipt(r)
|
||||||
.map_err(|e| trace!(target: "sync", "Ignored invalid receipt: {:?}", e))
|
.map_err(|e| trace!(target: "sync", "Ignored invalid receipt: {:?}", e))
|
||||||
@ -210,24 +210,28 @@ impl BlockCollection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a set of block hashes that require a body download. The returned set is marked as being downloaded.
|
/// Returns a set of block hashes that require a body download. The returned set is marked as being downloaded.
|
||||||
pub fn needed_bodies(&mut self, count: usize, _ignore_downloading: bool) -> Vec<H256> {
|
pub fn needed_bodies(&mut self, count: usize) -> Vec<H256> {
|
||||||
if self.head.is_none() {
|
if self.head.is_none() {
|
||||||
return Vec::new();
|
return Vec::new();
|
||||||
}
|
}
|
||||||
let mut needed_bodies: Vec<H256> = Vec::new();
|
let mut needed_bodies: Vec<H256> = Vec::with_capacity(count);
|
||||||
let mut head = self.head;
|
let mut head = self.head;
|
||||||
while head.is_some() && needed_bodies.len() < count {
|
while needed_bodies.len() < count {
|
||||||
head = self.parents.get(&head.unwrap()).cloned();
|
head = match head {
|
||||||
if let Some(head) = head {
|
Some(head) => {
|
||||||
match self.blocks.get(&head) {
|
match self.blocks.get(&head) {
|
||||||
Some(block) if block.body.is_none() && !self.downloading_bodies.contains(&head) => {
|
Some(block) if block.body.is_none() && !self.downloading_bodies.contains(&head) => {
|
||||||
self.downloading_bodies.insert(head.clone());
|
self.downloading_bodies.insert(head.clone());
|
||||||
needed_bodies.push(head.clone());
|
needed_bodies.push(head.clone());
|
||||||
|
}
|
||||||
|
_ => (),
|
||||||
}
|
}
|
||||||
_ => (),
|
self.parents.get(&head).copied()
|
||||||
}
|
},
|
||||||
}
|
None => break
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
for h in self.header_ids.values() {
|
for h in self.header_ids.values() {
|
||||||
if needed_bodies.len() >= count {
|
if needed_bodies.len() >= count {
|
||||||
break;
|
break;
|
||||||
@ -241,25 +245,28 @@ impl BlockCollection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a set of block hashes that require a receipt download. The returned set is marked as being downloaded.
|
/// Returns a set of block hashes that require a receipt download. The returned set is marked as being downloaded.
|
||||||
pub fn needed_receipts(&mut self, count: usize, _ignore_downloading: bool) -> Vec<H256> {
|
pub fn needed_receipts(&mut self, count: usize) -> Vec<H256> {
|
||||||
if self.head.is_none() || !self.need_receipts {
|
if self.head.is_none() || !self.need_receipts {
|
||||||
return Vec::new();
|
return Vec::new();
|
||||||
}
|
}
|
||||||
let mut needed_receipts: Vec<H256> = Vec::new();
|
let mut needed_receipts: Vec<H256> = Vec::with_capacity(count);
|
||||||
let mut head = self.head;
|
let mut head = self.head;
|
||||||
while head.is_some() && needed_receipts.len() < count {
|
while needed_receipts.len() < count {
|
||||||
head = self.parents.get(&head.unwrap()).cloned();
|
head = match head {
|
||||||
if let Some(head) = head {
|
Some(head) => {
|
||||||
match self.blocks.get(&head) {
|
match self.blocks.get(&head) {
|
||||||
Some(block) => {
|
Some(block) => {
|
||||||
if block.receipts.is_none() && !self.downloading_receipts.contains(&block.receipts_root) {
|
if block.receipts.is_none() && !self.downloading_receipts.contains(&block.receipts_root) {
|
||||||
self.downloading_receipts.insert(block.receipts_root);
|
self.downloading_receipts.insert(block.receipts_root);
|
||||||
needed_receipts.push(head.clone());
|
needed_receipts.push(head);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
_ => (),
|
||||||
}
|
}
|
||||||
_ => (),
|
self.parents.get(&head).copied()
|
||||||
}
|
},
|
||||||
}
|
None => break
|
||||||
|
};
|
||||||
}
|
}
|
||||||
// If there are multiple blocks per receipt, only request one of them.
|
// If there are multiple blocks per receipt, only request one of them.
|
||||||
for (root, h) in self.receipt_ids.iter().map(|(root, hashes)| (root, hashes[0])) {
|
for (root, h) in self.receipt_ids.iter().map(|(root, hashes)| (root, hashes[0])) {
|
||||||
@ -275,12 +282,12 @@ impl BlockCollection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded.
|
/// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded.
|
||||||
pub fn needed_headers(&mut self, count: usize, ignore_downloading: bool) -> Option<(H256, usize)> {
|
pub fn needed_headers(&mut self, count: usize) -> Option<(H256, usize)> {
|
||||||
// find subchain to download
|
// find subchain to download
|
||||||
let mut download = None;
|
let mut download = None;
|
||||||
{
|
{
|
||||||
for h in &self.heads {
|
for h in &self.heads {
|
||||||
if ignore_downloading || !self.downloading_headers.contains(h) {
|
if !self.downloading_headers.contains(h) {
|
||||||
self.downloading_headers.insert(h.clone());
|
self.downloading_headers.insert(h.clone());
|
||||||
download = Some(h.clone());
|
download = Some(h.clone());
|
||||||
break;
|
break;
|
||||||
@ -317,42 +324,40 @@ impl BlockCollection {
|
|||||||
return Vec::new();
|
return Vec::new();
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut drained = Vec::new();
|
|
||||||
let mut hashes = Vec::new();
|
let mut hashes = Vec::new();
|
||||||
{
|
let mut blocks = Vec::new();
|
||||||
let mut blocks = Vec::new();
|
let mut head = self.head;
|
||||||
let mut head = self.head;
|
while let Some(h) = head {
|
||||||
while let Some(h) = head {
|
head = self.parents.get(&h).copied();
|
||||||
head = self.parents.get(&h).cloned();
|
if let Some(head) = head {
|
||||||
if let Some(head) = head {
|
match self.blocks.remove(&head) {
|
||||||
match self.blocks.remove(&head) {
|
Some(block) => {
|
||||||
Some(block) => {
|
if block.body.is_some() && (!self.need_receipts || block.receipts.is_some()) {
|
||||||
if block.body.is_some() && (!self.need_receipts || block.receipts.is_some()) {
|
blocks.push(block);
|
||||||
blocks.push(block);
|
hashes.push(head);
|
||||||
hashes.push(head);
|
self.head = Some(head);
|
||||||
self.head = Some(head);
|
} else {
|
||||||
} else {
|
self.blocks.insert(head, block);
|
||||||
self.blocks.insert(head, block);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
_ => {
|
|
||||||
break;
|
break;
|
||||||
},
|
}
|
||||||
}
|
},
|
||||||
|
_ => {
|
||||||
|
break;
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for block in blocks.into_iter() {
|
|
||||||
let unverified = unverified_from_sync(block.header, block.body);
|
|
||||||
drained.push(BlockAndReceipts {
|
|
||||||
block: unverified,
|
|
||||||
receipts: block.receipts.clone(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
trace!(target: "sync", "Drained {} blocks, new head :{:?}", drained.len(), self.head);
|
let mut drained = Vec::with_capacity(blocks.len());
|
||||||
|
for block in blocks {
|
||||||
|
let unverified = unverified_from_sync(block.header, block.body);
|
||||||
|
drained.push(BlockAndReceipts {
|
||||||
|
block: unverified,
|
||||||
|
receipts: block.receipts,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!(target: "sync", "Drained {} blocks, new head :{:?}", drained.len(), self.head);
|
||||||
drained
|
drained
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -409,7 +414,7 @@ impl BlockCollection {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert_receipt(&mut self, r: Bytes) -> Result<Vec<H256>, network::Error> {
|
fn insert_receipt(&mut self, r: &[u8]) -> Result<Vec<H256>, network::Error> {
|
||||||
let receipt_root = {
|
let receipt_root = {
|
||||||
let receipts = Rlp::new(&r);
|
let receipts = Rlp::new(&r);
|
||||||
ordered_trie_root(receipts.iter().map(|r| r.as_raw()))
|
ordered_trie_root(receipts.iter().map(|r| r.as_raw()))
|
||||||
@ -422,7 +427,7 @@ impl BlockCollection {
|
|||||||
match self.blocks.get_mut(&h) {
|
match self.blocks.get_mut(&h) {
|
||||||
Some(ref mut block) => {
|
Some(ref mut block) => {
|
||||||
trace!(target: "sync", "Got receipt {}", h);
|
trace!(target: "sync", "Got receipt {}", h);
|
||||||
block.receipts = Some(r.clone());
|
block.receipts = Some(r.to_vec());
|
||||||
},
|
},
|
||||||
None => {
|
None => {
|
||||||
warn!("Got receipt with no header {}", h);
|
warn!("Got receipt with no header {}", h);
|
||||||
@ -581,11 +586,11 @@ mod test {
|
|||||||
bc.reset_to(heads);
|
bc.reset_to(heads);
|
||||||
assert!(!bc.is_empty());
|
assert!(!bc.is_empty());
|
||||||
assert_eq!(hashes[0], bc.heads[0]);
|
assert_eq!(hashes[0], bc.heads[0]);
|
||||||
assert!(bc.needed_bodies(1, false).is_empty());
|
assert!(bc.needed_bodies(1).is_empty());
|
||||||
assert!(!bc.contains(&hashes[0]));
|
assert!(!bc.contains(&hashes[0]));
|
||||||
assert!(!bc.is_downloading(&hashes[0]));
|
assert!(!bc.is_downloading(&hashes[0]));
|
||||||
|
|
||||||
let (h, n) = bc.needed_headers(6, false).unwrap();
|
let (h, n) = bc.needed_headers(6).unwrap();
|
||||||
assert!(bc.is_downloading(&hashes[0]));
|
assert!(bc.is_downloading(&hashes[0]));
|
||||||
assert_eq!(hashes[0], h);
|
assert_eq!(hashes[0], h);
|
||||||
assert_eq!(n, 6);
|
assert_eq!(n, 6);
|
||||||
@ -608,9 +613,9 @@ mod test {
|
|||||||
assert!(!bc.contains(&hashes[0]));
|
assert!(!bc.contains(&hashes[0]));
|
||||||
assert_eq!(hashes[5], bc.head.unwrap());
|
assert_eq!(hashes[5], bc.head.unwrap());
|
||||||
|
|
||||||
let (h, _) = bc.needed_headers(6, false).unwrap();
|
let (h, _) = bc.needed_headers(6).unwrap();
|
||||||
assert_eq!(hashes[5], h);
|
assert_eq!(hashes[5], h);
|
||||||
let (h, _) = bc.needed_headers(6, false).unwrap();
|
let (h, _) = bc.needed_headers(6).unwrap();
|
||||||
assert_eq!(hashes[20], h);
|
assert_eq!(hashes[20], h);
|
||||||
bc.insert_headers(headers[10..16].into_iter().map(Clone::clone).collect());
|
bc.insert_headers(headers[10..16].into_iter().map(Clone::clone).collect());
|
||||||
assert!(bc.drain().is_empty());
|
assert!(bc.drain().is_empty());
|
||||||
|
Loading…
Reference in New Issue
Block a user