openethereum/ethcore/src/block_queue.rs

438 lines
13 KiB
Rust
Raw Normal View History

2016-02-05 13:40:41 +01:00
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
2016-01-21 23:33:52 +01:00
//! A queue of blocks. Sits between network or other I/O and the BlockChain.
//! Sorts them ready for blockchain insertion.
2016-01-17 23:07:58 +01:00
use std::thread::{JoinHandle, self};
use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering};
2016-01-10 23:37:09 +01:00
use util::*;
use verification::*;
use error::*;
use engine::Engine;
2016-01-14 19:03:48 +01:00
use views::*;
2016-01-17 23:07:58 +01:00
use header::*;
2016-01-21 23:33:52 +01:00
use service::*;
2016-02-02 12:12:32 +01:00
use client::BlockStatus;
use util::panics::*;
2016-01-09 10:16:35 +01:00
2016-01-22 04:54:38 +01:00
/// Block queue status
#[derive(Debug)]
pub struct BlockQueueInfo {
/// Number of queued blocks pending verification
pub unverified_queue_size: usize,
/// Number of verified queued blocks pending import
pub verified_queue_size: usize,
2016-01-25 23:24:51 +01:00
/// Number of blocks being verified
pub verifying_queue_size: usize,
2016-01-22 04:54:38 +01:00
}
2016-01-25 18:56:36 +01:00
impl BlockQueueInfo {
/// The total size of the queues.
2016-01-25 23:24:51 +01:00
pub fn total_queue_size(&self) -> usize { self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size }
/// The size of the unverified and verifying queues.
pub fn incomplete_queue_size(&self) -> usize { self.unverified_queue_size + self.verifying_queue_size }
/// Indicates that queue is full
pub fn is_full(&self) -> bool {
self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size > MAX_UNVERIFIED_QUEUE_SIZE
}
/// Indicates that queue is empty
pub fn is_empty(&self) -> bool {
self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size == 0
}
2016-01-25 18:56:36 +01:00
}
2016-01-10 23:37:09 +01:00
/// A queue of blocks. Sits between network or other I/O and the BlockChain.
/// Sorts them ready for blockchain insertion.
pub struct BlockQueue {
2016-02-10 15:28:43 +01:00
panic_handler: Arc<PanicHandler>,
engine: Arc<Box<Engine>>,
2016-01-17 23:07:58 +01:00
more_to_verify: Arc<Condvar>,
verification: Arc<Mutex<Verification>>,
verifiers: Vec<JoinHandle<()>>,
deleting: Arc<AtomicBool>,
ready_signal: Arc<QueueSignal>,
2016-01-25 19:20:34 +01:00
empty: Arc<Condvar>,
2016-02-02 12:12:32 +01:00
processing: RwLock<HashSet<H256>>
2016-01-17 23:07:58 +01:00
}
struct UnVerifiedBlock {
header: Header,
bytes: Bytes,
}
struct VerifyingBlock {
hash: H256,
block: Option<PreVerifiedBlock>,
}
struct QueueSignal {
signalled: AtomicBool,
2016-01-15 12:26:04 +01:00
message_channel: IoChannel<NetSyncMessage>,
2016-01-17 23:07:58 +01:00
}
impl QueueSignal {
2016-02-19 12:19:43 +01:00
#[cfg_attr(feature="dev", allow(bool_comparison))]
2016-01-17 23:07:58 +01:00
fn set(&self) {
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
2016-01-18 01:39:19 +01:00
self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message");
2016-01-17 23:07:58 +01:00
}
}
fn reset(&self) {
self.signalled.store(false, AtomicOrdering::Relaxed);
}
}
#[derive(Default)]
struct Verification {
unverified: VecDeque<UnVerifiedBlock>,
verified: VecDeque<PreVerifiedBlock>,
verifying: VecDeque<VerifyingBlock>,
2016-01-15 12:26:04 +01:00
bad: HashSet<H256>,
}
2016-01-09 10:16:35 +01:00
2016-02-04 01:44:40 +01:00
const MAX_UNVERIFIED_QUEUE_SIZE: usize = 50000;
2016-01-09 10:16:35 +01:00
impl BlockQueue {
2016-01-10 23:37:09 +01:00
/// Creates a new queue instance.
2016-01-14 01:28:37 +01:00
pub fn new(engine: Arc<Box<Engine>>, message_channel: IoChannel<NetSyncMessage>) -> BlockQueue {
2016-01-17 23:07:58 +01:00
let verification = Arc::new(Mutex::new(Verification::default()));
let more_to_verify = Arc::new(Condvar::new());
let ready_signal = Arc::new(QueueSignal { signalled: AtomicBool::new(false), message_channel: message_channel });
let deleting = Arc::new(AtomicBool::new(false));
2016-01-25 19:20:34 +01:00
let empty = Arc::new(Condvar::new());
2016-02-10 16:35:52 +01:00
let panic_handler = PanicHandler::new_in_arc();
2016-01-17 23:07:58 +01:00
let mut verifiers: Vec<JoinHandle<()>> = Vec::new();
2016-01-22 04:54:38 +01:00
let thread_count = max(::num_cpus::get(), 3) - 2;
for i in 0..thread_count {
2016-01-17 23:07:58 +01:00
let verification = verification.clone();
let engine = engine.clone();
let more_to_verify = more_to_verify.clone();
let ready_signal = ready_signal.clone();
2016-01-25 19:20:34 +01:00
let empty = empty.clone();
2016-01-17 23:07:58 +01:00
let deleting = deleting.clone();
let panic_handler = panic_handler.clone();
verifiers.push(
thread::Builder::new()
.name(format!("Verifier #{}", i))
.spawn(move || {
panic_handler.catch_panic(move || {
BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting, empty)
}).unwrap()
})
.expect("Error starting block verification thread")
);
2016-01-17 23:07:58 +01:00
}
BlockQueue {
engine: engine,
panic_handler: panic_handler,
2016-01-17 23:07:58 +01:00
ready_signal: ready_signal.clone(),
more_to_verify: more_to_verify.clone(),
verification: verification.clone(),
verifiers: verifiers,
deleting: deleting.clone(),
2016-02-02 12:12:32 +01:00
processing: RwLock::new(HashSet::new()),
2016-01-25 19:20:34 +01:00
empty: empty.clone(),
2016-01-17 23:07:58 +01:00
}
}
2016-01-25 19:20:34 +01:00
fn verify(verification: Arc<Mutex<Verification>>, engine: Arc<Box<Engine>>, wait: Arc<Condvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>, empty: Arc<Condvar>) {
2016-02-16 17:53:31 +01:00
while !deleting.load(AtomicOrdering::Acquire) {
2016-01-17 23:07:58 +01:00
{
let mut lock = verification.lock().unwrap();
2016-01-25 19:20:34 +01:00
if lock.unverified.is_empty() && lock.verifying.is_empty() {
empty.notify_all();
}
2016-02-16 17:53:31 +01:00
while lock.unverified.is_empty() && !deleting.load(AtomicOrdering::Acquire) {
2016-01-17 23:07:58 +01:00
lock = wait.wait(lock).unwrap();
}
2016-02-16 17:53:31 +01:00
if deleting.load(AtomicOrdering::Acquire) {
2016-01-17 23:07:58 +01:00
return;
}
}
let block = {
let mut v = verification.lock().unwrap();
if v.unverified.is_empty() {
continue;
}
let block = v.unverified.pop_front().unwrap();
v.verifying.push_back(VerifyingBlock{ hash: block.header.hash(), block: None });
block
};
let block_hash = block.header.hash();
match verify_block_unordered(block.header, block.bytes, engine.deref().deref()) {
Ok(verified) => {
let mut v = verification.lock().unwrap();
for e in &mut v.verifying {
if e.hash == block_hash {
e.block = Some(verified);
break;
}
}
if !v.verifying.is_empty() && v.verifying.front().unwrap().hash == block_hash {
// we're next!
let mut vref = v.deref_mut();
BlockQueue::drain_verifying(&mut vref.verifying, &mut vref.verified, &mut vref.bad);
ready.set();
}
},
Err(err) => {
let mut v = verification.lock().unwrap();
warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err);
v.bad.insert(block_hash.clone());
v.verifying.retain(|e| e.hash != block_hash);
let mut vref = v.deref_mut();
BlockQueue::drain_verifying(&mut vref.verifying, &mut vref.verified, &mut vref.bad);
ready.set();
}
}
}
}
fn drain_verifying(verifying: &mut VecDeque<VerifyingBlock>, verified: &mut VecDeque<PreVerifiedBlock>, bad: &mut HashSet<H256>) {
while !verifying.is_empty() && verifying.front().unwrap().block.is_some() {
let block = verifying.pop_front().unwrap().block.unwrap();
if bad.contains(&block.header.parent_hash) {
bad.insert(block.header.hash());
}
else {
verified.push_back(block);
}
}
2016-01-09 10:16:35 +01:00
}
2016-01-10 23:37:09 +01:00
/// Clear the queue and stop verification activity.
2016-01-09 10:16:35 +01:00
pub fn clear(&mut self) {
2016-01-17 23:07:58 +01:00
let mut verification = self.verification.lock().unwrap();
verification.unverified.clear();
verification.verifying.clear();
verification.verified.clear();
self.processing.write().unwrap().clear();
2016-01-09 10:16:35 +01:00
}
2016-01-25 19:20:34 +01:00
/// Wait for queue to be empty
pub fn flush(&mut self) {
2016-01-25 23:24:51 +01:00
let mut verification = self.verification.lock().unwrap();
2016-01-26 15:33:24 +01:00
while !verification.unverified.is_empty() || !verification.verifying.is_empty() {
2016-01-25 23:24:51 +01:00
verification = self.empty.wait(verification).unwrap();
}
2016-01-25 19:20:34 +01:00
}
2016-02-02 12:12:32 +01:00
/// Check if the block is currently in the queue
pub fn block_status(&self, hash: &H256) -> BlockStatus {
if self.processing.read().unwrap().contains(&hash) {
return BlockStatus::Queued;
}
if self.verification.lock().unwrap().bad.contains(&hash) {
return BlockStatus::Bad;
}
BlockStatus::Unknown
}
2016-01-10 23:37:09 +01:00
/// Add a block to the queue.
2016-01-17 23:07:58 +01:00
pub fn import_block(&mut self, bytes: Bytes) -> ImportResult {
let header = BlockView::new(&bytes).header();
let h = header.hash();
2016-02-02 12:12:32 +01:00
if self.processing.read().unwrap().contains(&h) {
2016-01-17 23:07:58 +01:00
return Err(ImportError::AlreadyQueued);
}
{
let mut verification = self.verification.lock().unwrap();
if verification.bad.contains(&h) {
2016-01-17 23:07:58 +01:00
return Err(ImportError::Bad(None));
}
if verification.bad.contains(&header.parent_hash) {
verification.bad.insert(h.clone());
2016-01-17 23:07:58 +01:00
return Err(ImportError::Bad(None));
}
}
match verify_block_basic(&header, &bytes, self.engine.deref().deref()) {
Ok(()) => {
2016-02-02 12:12:32 +01:00
self.processing.write().unwrap().insert(h.clone());
2016-01-17 23:07:58 +01:00
self.verification.lock().unwrap().unverified.push_back(UnVerifiedBlock { header: header, bytes: bytes });
self.more_to_verify.notify_all();
Ok(h)
2016-01-17 23:07:58 +01:00
},
Err(err) => {
warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err);
self.verification.lock().unwrap().bad.insert(h.clone());
Err(From::from(err))
2016-01-17 23:07:58 +01:00
}
}
2016-01-09 10:16:35 +01:00
}
2016-01-15 12:26:04 +01:00
2016-01-17 23:07:58 +01:00
/// Mark given block and all its children as bad. Stops verification.
2016-02-24 10:55:34 +01:00
pub fn mark_as_bad(&mut self, hashes: &[H256]) {
2016-01-17 23:07:58 +01:00
let mut verification_lock = self.verification.lock().unwrap();
let mut verification = verification_lock.deref_mut();
let mut new_verified = VecDeque::new();
2016-02-24 10:55:34 +01:00
for hash in hashes {
verification.bad.insert(hash.clone());
self.processing.write().unwrap().remove(&hash);
for block in verification.verified.drain(..) {
if verification.bad.contains(&block.header.parent_hash) {
verification.bad.insert(block.header.hash());
self.processing.write().unwrap().remove(&block.header.hash());
}
else {
new_verified.push_back(block);
}
2016-01-17 23:07:58 +01:00
}
}
verification.verified = new_verified;
}
2016-02-02 12:12:32 +01:00
/// Mark given block as processed
pub fn mark_as_good(&mut self, hashes: &[H256]) {
let mut processing = self.processing.write().unwrap();
for h in hashes {
processing.remove(&h);
}
}
2016-01-22 04:54:38 +01:00
/// Removes up to `max` verified blocks from the queue
2016-01-17 23:07:58 +01:00
pub fn drain(&mut self, max: usize) -> Vec<PreVerifiedBlock> {
let mut verification = self.verification.lock().unwrap();
let count = min(max, verification.verified.len());
let mut result = Vec::with_capacity(count);
for _ in 0..count {
let block = verification.verified.pop_front().unwrap();
result.push(block);
}
self.ready_signal.reset();
2016-01-22 04:54:38 +01:00
if !verification.verified.is_empty() {
self.ready_signal.set();
}
2016-01-17 23:07:58 +01:00
result
}
2016-01-22 04:54:38 +01:00
/// Get queue status.
pub fn queue_info(&self) -> BlockQueueInfo {
let verification = self.verification.lock().unwrap();
2016-01-22 04:54:38 +01:00
BlockQueueInfo {
verified_queue_size: verification.verified.len(),
unverified_queue_size: verification.unverified.len(),
2016-01-25 23:24:51 +01:00
verifying_queue_size: verification.verifying.len(),
2016-01-22 04:54:38 +01:00
}
}
2016-01-17 23:07:58 +01:00
}
2016-02-10 15:28:43 +01:00
impl MayPanic for BlockQueue {
fn on_panic<F>(&self, closure: F) where F: OnPanicListener {
self.panic_handler.on_panic(closure);
}
}
2016-01-17 23:07:58 +01:00
impl Drop for BlockQueue {
fn drop(&mut self) {
self.clear();
2016-02-16 17:53:31 +01:00
self.deleting.store(true, AtomicOrdering::Release);
2016-01-17 23:07:58 +01:00
self.more_to_verify.notify_all();
for t in self.verifiers.drain(..) {
t.join().unwrap();
}
2016-01-15 12:26:04 +01:00
}
2016-01-09 10:16:35 +01:00
}
2016-01-18 00:24:20 +01:00
#[cfg(test)]
mod tests {
use util::*;
use spec::*;
2016-01-22 05:20:47 +01:00
use block_queue::*;
2016-01-28 19:14:07 +01:00
use tests::helpers::*;
use error::*;
2016-02-02 21:06:21 +01:00
use views::*;
2016-01-28 19:14:07 +01:00
fn get_test_queue() -> BlockQueue {
let spec = get_test_spec();
let engine = spec.to_engine().unwrap();
BlockQueue::new(Arc::new(engine), IoChannel::disconnected())
}
2016-01-18 00:24:20 +01:00
#[test]
2016-01-28 19:14:07 +01:00
fn can_be_created() {
2016-01-18 00:24:20 +01:00
// TODO better test
let spec = Spec::new_test();
let engine = spec.to_engine().unwrap();
let _ = BlockQueue::new(Arc::new(engine), IoChannel::disconnected());
}
2016-01-28 19:14:07 +01:00
#[test]
2016-01-28 19:43:57 +01:00
fn can_import_blocks() {
2016-01-28 19:14:07 +01:00
let mut queue = get_test_queue();
if let Err(e) = queue.import_block(get_good_dummy_block()) {
panic!("error importing block that is valid by definition({:?})", e);
}
}
#[test]
fn returns_error_for_duplicates() {
let mut queue = get_test_queue();
if let Err(e) = queue.import_block(get_good_dummy_block()) {
panic!("error importing block that is valid by definition({:?})", e);
}
2016-01-28 19:43:57 +01:00
2016-01-28 19:14:07 +01:00
let duplicate_import = queue.import_block(get_good_dummy_block());
2016-01-28 19:43:57 +01:00
match duplicate_import {
Err(e) => {
match e {
ImportError::AlreadyQueued => {},
_ => { panic!("must return AlreadyQueued error"); }
}
}
Ok(_) => { panic!("must produce error"); }
}
}
2016-01-28 19:14:07 +01:00
2016-01-28 19:43:57 +01:00
#[test]
2016-02-01 16:18:32 +01:00
fn returns_ok_for_drained_duplicates() {
2016-01-28 19:43:57 +01:00
let mut queue = get_test_queue();
2016-02-02 21:06:21 +01:00
let block = get_good_dummy_block();
let hash = BlockView::new(&block).header().hash().clone();
if let Err(e) = queue.import_block(block) {
2016-01-28 19:43:57 +01:00
panic!("error importing block that is valid by definition({:?})", e);
}
queue.flush();
2016-02-01 16:18:32 +01:00
queue.drain(10);
2016-02-02 21:06:21 +01:00
queue.mark_as_good(&[ hash ]);
2016-01-28 19:43:57 +01:00
2016-02-01 16:18:32 +01:00
if let Err(e) = queue.import_block(get_good_dummy_block()) {
panic!("error importing block that has already been drained ({:?})", e);
2016-01-28 19:14:07 +01:00
}
}
2016-02-06 23:15:53 +01:00
#[test]
fn returns_empty_once_finished() {
let mut queue = get_test_queue();
queue.import_block(get_good_dummy_block()).expect("error importing block that is valid by definition");
queue.flush();
queue.drain(1);
2016-02-08 12:35:51 +01:00
assert!(queue.queue_info().is_empty());
2016-02-06 23:15:53 +01:00
}
2016-01-18 00:24:20 +01:00
}