openethereum/ethcore/src/block_queue.rs

447 lines
14 KiB
Rust
Raw Normal View History

2016-02-05 13:40:41 +01:00
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
2016-01-21 23:33:52 +01:00
//! A queue of blocks. Sits between network or other I/O and the BlockChain.
//! Sorts them ready for blockchain insertion.
2016-01-17 23:07:58 +01:00
use std::thread::{JoinHandle, self};
use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering};
2016-01-10 23:37:09 +01:00
use util::*;
use verification::*;
use error::*;
use engine::Engine;
2016-01-14 19:03:48 +01:00
use views::*;
2016-01-17 23:07:58 +01:00
use header::*;
2016-01-21 23:33:52 +01:00
use service::*;
2016-02-02 12:12:32 +01:00
use client::BlockStatus;
use util::panics::*;
2016-01-09 10:16:35 +01:00
2016-01-22 04:54:38 +01:00
/// Block queue status
#[derive(Debug)]
pub struct BlockQueueInfo {
/// Number of queued blocks pending verification
pub unverified_queue_size: usize,
/// Number of verified queued blocks pending import
pub verified_queue_size: usize,
2016-01-25 23:24:51 +01:00
/// Number of blocks being verified
pub verifying_queue_size: usize,
2016-01-22 04:54:38 +01:00
}
2016-01-25 18:56:36 +01:00
impl BlockQueueInfo {
/// The total size of the queues.
2016-01-25 23:24:51 +01:00
pub fn total_queue_size(&self) -> usize { self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size }
/// The size of the unverified and verifying queues.
pub fn incomplete_queue_size(&self) -> usize { self.unverified_queue_size + self.verifying_queue_size }
/// Indicates that queue is full
pub fn is_full(&self) -> bool {
self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size > MAX_UNVERIFIED_QUEUE_SIZE
}
/// Indicates that queue is empty
pub fn is_empty(&self) -> bool {
self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size == 0
}
2016-01-25 18:56:36 +01:00
}
2016-01-10 23:37:09 +01:00
/// A queue of blocks. Sits between network or other I/O and the BlockChain.
/// Sorts them ready for blockchain insertion.
pub struct BlockQueue {
2016-02-10 15:28:43 +01:00
panic_handler: Arc<PanicHandler>,
engine: Arc<Box<Engine>>,
2016-01-17 23:07:58 +01:00
more_to_verify: Arc<Condvar>,
2016-02-21 19:46:29 +01:00
verification: Arc<Verification>,
2016-01-17 23:07:58 +01:00
verifiers: Vec<JoinHandle<()>>,
deleting: Arc<AtomicBool>,
ready_signal: Arc<QueueSignal>,
2016-01-25 19:20:34 +01:00
empty: Arc<Condvar>,
2016-02-02 12:12:32 +01:00
processing: RwLock<HashSet<H256>>
2016-01-17 23:07:58 +01:00
}
struct UnVerifiedBlock {
header: Header,
bytes: Bytes,
}
struct VerifyingBlock {
hash: H256,
block: Option<PreVerifiedBlock>,
}
struct QueueSignal {
signalled: AtomicBool,
2016-01-15 12:26:04 +01:00
message_channel: IoChannel<NetSyncMessage>,
2016-01-17 23:07:58 +01:00
}
impl QueueSignal {
2016-02-19 12:19:43 +01:00
#[cfg_attr(feature="dev", allow(bool_comparison))]
2016-01-17 23:07:58 +01:00
fn set(&self) {
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
2016-01-18 01:39:19 +01:00
self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message");
2016-01-17 23:07:58 +01:00
}
}
fn reset(&self) {
self.signalled.store(false, AtomicOrdering::Relaxed);
}
}
struct Verification {
2016-02-22 00:36:59 +01:00
// All locks must be captured in the order declared here.
2016-02-21 19:46:29 +01:00
unverified: Mutex<VecDeque<UnVerifiedBlock>>,
verified: Mutex<VecDeque<PreVerifiedBlock>>,
verifying: Mutex<VecDeque<VerifyingBlock>>,
bad: Mutex<HashSet<H256>>,
}
2016-01-09 10:16:35 +01:00
2016-02-04 01:44:40 +01:00
const MAX_UNVERIFIED_QUEUE_SIZE: usize = 50000;
2016-01-09 10:16:35 +01:00
impl BlockQueue {
2016-01-10 23:37:09 +01:00
/// Creates a new queue instance.
2016-01-14 01:28:37 +01:00
pub fn new(engine: Arc<Box<Engine>>, message_channel: IoChannel<NetSyncMessage>) -> BlockQueue {
2016-02-21 19:46:29 +01:00
let verification = Arc::new(Verification {
unverified: Mutex::new(VecDeque::new()),
verified: Mutex::new(VecDeque::new()),
verifying: Mutex::new(VecDeque::new()),
bad: Mutex::new(HashSet::new()),
});
2016-01-17 23:07:58 +01:00
let more_to_verify = Arc::new(Condvar::new());
let ready_signal = Arc::new(QueueSignal { signalled: AtomicBool::new(false), message_channel: message_channel });
let deleting = Arc::new(AtomicBool::new(false));
2016-01-25 19:20:34 +01:00
let empty = Arc::new(Condvar::new());
2016-02-10 16:35:52 +01:00
let panic_handler = PanicHandler::new_in_arc();
2016-01-17 23:07:58 +01:00
let mut verifiers: Vec<JoinHandle<()>> = Vec::new();
2016-02-22 00:36:59 +01:00
let thread_count = max(::num_cpus::get(), 3) - 2;
2016-01-22 04:54:38 +01:00
for i in 0..thread_count {
2016-01-17 23:07:58 +01:00
let verification = verification.clone();
let engine = engine.clone();
let more_to_verify = more_to_verify.clone();
let ready_signal = ready_signal.clone();
2016-01-25 19:20:34 +01:00
let empty = empty.clone();
2016-01-17 23:07:58 +01:00
let deleting = deleting.clone();
let panic_handler = panic_handler.clone();
verifiers.push(
thread::Builder::new()
.name(format!("Verifier #{}", i))
.spawn(move || {
panic_handler.catch_panic(move || {
2016-02-21 19:46:29 +01:00
BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting, empty)
}).unwrap()
})
.expect("Error starting block verification thread")
);
2016-01-17 23:07:58 +01:00
}
BlockQueue {
engine: engine,
panic_handler: panic_handler,
2016-01-17 23:07:58 +01:00
ready_signal: ready_signal.clone(),
more_to_verify: more_to_verify.clone(),
verification: verification.clone(),
verifiers: verifiers,
deleting: deleting.clone(),
2016-02-02 12:12:32 +01:00
processing: RwLock::new(HashSet::new()),
2016-01-25 19:20:34 +01:00
empty: empty.clone(),
2016-01-17 23:07:58 +01:00
}
}
2016-02-21 19:46:29 +01:00
fn verify(verification: Arc<Verification>, engine: Arc<Box<Engine>>, wait: Arc<Condvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>, empty: Arc<Condvar>) {
2016-02-16 17:53:31 +01:00
while !deleting.load(AtomicOrdering::Acquire) {
2016-01-17 23:07:58 +01:00
{
2016-02-21 19:46:29 +01:00
let mut unverified = verification.unverified.lock().unwrap();
2016-01-25 19:20:34 +01:00
2016-02-21 19:46:29 +01:00
if unverified.is_empty() && verification.verifying.lock().unwrap().is_empty() {
2016-01-25 19:20:34 +01:00
empty.notify_all();
}
2016-02-21 19:46:29 +01:00
while unverified.is_empty() && !deleting.load(AtomicOrdering::Acquire) {
unverified = wait.wait(unverified).unwrap();
2016-01-17 23:07:58 +01:00
}
2016-02-16 17:53:31 +01:00
if deleting.load(AtomicOrdering::Acquire) {
2016-01-17 23:07:58 +01:00
return;
}
}
let block = {
2016-02-21 19:46:29 +01:00
let mut unverified = verification.unverified.lock().unwrap();
if unverified.is_empty() {
2016-01-17 23:07:58 +01:00
continue;
}
2016-02-21 19:46:29 +01:00
let mut verifying = verification.verifying.lock().unwrap();
let block = unverified.pop_front().unwrap();
verifying.push_back(VerifyingBlock{ hash: block.header.hash(), block: None });
2016-01-17 23:07:58 +01:00
block
};
let block_hash = block.header.hash();
match verify_block_unordered(block.header, block.bytes, engine.deref().deref()) {
Ok(verified) => {
2016-02-21 19:46:29 +01:00
let mut verifying = verification.verifying.lock().unwrap();
for e in verifying.iter_mut() {
2016-01-17 23:07:58 +01:00
if e.hash == block_hash {
e.block = Some(verified);
break;
}
}
2016-02-21 19:46:29 +01:00
if !verifying.is_empty() && verifying.front().unwrap().hash == block_hash {
2016-01-17 23:07:58 +01:00
// we're next!
2016-02-21 19:46:29 +01:00
let mut verified = verification.verified.lock().unwrap();
let mut bad = verification.bad.lock().unwrap();
BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad);
2016-01-17 23:07:58 +01:00
ready.set();
}
},
Err(err) => {
2016-02-21 19:46:29 +01:00
let mut verifying = verification.verifying.lock().unwrap();
let mut verified = verification.verified.lock().unwrap();
let mut bad = verification.bad.lock().unwrap();
2016-01-17 23:07:58 +01:00
warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err);
2016-02-21 19:46:29 +01:00
bad.insert(block_hash.clone());
verifying.retain(|e| e.hash != block_hash);
BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad);
2016-01-17 23:07:58 +01:00
ready.set();
}
}
}
}
fn drain_verifying(verifying: &mut VecDeque<VerifyingBlock>, verified: &mut VecDeque<PreVerifiedBlock>, bad: &mut HashSet<H256>) {
while !verifying.is_empty() && verifying.front().unwrap().block.is_some() {
let block = verifying.pop_front().unwrap().block.unwrap();
if bad.contains(&block.header.parent_hash) {
bad.insert(block.header.hash());
}
else {
verified.push_back(block);
}
}
2016-01-09 10:16:35 +01:00
}
2016-01-10 23:37:09 +01:00
/// Clear the queue and stop verification activity.
2016-02-21 19:46:29 +01:00
pub fn clear(&self) {
let mut unverified = self.verification.unverified.lock().unwrap();
let mut verifying = self.verification.verifying.lock().unwrap();
let mut verified = self.verification.verified.lock().unwrap();
unverified.clear();
verifying.clear();
verified.clear();
self.processing.write().unwrap().clear();
2016-01-09 10:16:35 +01:00
}
2016-02-21 19:46:29 +01:00
/// Wait for unverified queue to be empty
pub fn flush(&self) {
let mut unverified = self.verification.unverified.lock().unwrap();
while !unverified.is_empty() || !self.verification.verifying.lock().unwrap().is_empty() {
unverified = self.empty.wait(unverified).unwrap();
2016-01-25 23:24:51 +01:00
}
2016-01-25 19:20:34 +01:00
}
2016-02-02 12:12:32 +01:00
/// Check if the block is currently in the queue
pub fn block_status(&self, hash: &H256) -> BlockStatus {
if self.processing.read().unwrap().contains(&hash) {
return BlockStatus::Queued;
}
2016-02-21 19:46:29 +01:00
if self.verification.bad.lock().unwrap().contains(&hash) {
2016-02-02 12:12:32 +01:00
return BlockStatus::Bad;
}
BlockStatus::Unknown
}
2016-01-10 23:37:09 +01:00
/// Add a block to the queue.
2016-02-21 19:46:29 +01:00
pub fn import_block(&self, bytes: Bytes) -> ImportResult {
2016-01-17 23:07:58 +01:00
let header = BlockView::new(&bytes).header();
let h = header.hash();
2016-01-17 23:07:58 +01:00
{
2016-02-21 19:46:29 +01:00
if self.processing.read().unwrap().contains(&h) {
return Err(ImportError::AlreadyQueued);
}
}
{
let mut bad = self.verification.bad.lock().unwrap();
if bad.contains(&h) {
2016-01-17 23:07:58 +01:00
return Err(ImportError::Bad(None));
}
2016-02-21 19:46:29 +01:00
if bad.contains(&header.parent_hash) {
bad.insert(h.clone());
2016-01-17 23:07:58 +01:00
return Err(ImportError::Bad(None));
}
}
match verify_block_basic(&header, &bytes, self.engine.deref().deref()) {
Ok(()) => {
2016-02-02 12:12:32 +01:00
self.processing.write().unwrap().insert(h.clone());
2016-02-21 19:46:29 +01:00
self.verification.unverified.lock().unwrap().push_back(UnVerifiedBlock { header: header, bytes: bytes });
2016-01-17 23:07:58 +01:00
self.more_to_verify.notify_all();
Ok(h)
2016-01-17 23:07:58 +01:00
},
Err(err) => {
warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err);
2016-02-21 19:46:29 +01:00
self.verification.bad.lock().unwrap().insert(h.clone());
Err(From::from(err))
2016-01-17 23:07:58 +01:00
}
}
2016-01-09 10:16:35 +01:00
}
2016-01-15 12:26:04 +01:00
2016-01-17 23:07:58 +01:00
/// Mark given block and all its children as bad. Stops verification.
2016-02-21 19:46:29 +01:00
pub fn mark_as_bad(&self, hash: &H256) {
let mut verified_lock = self.verification.verified.lock().unwrap();
let mut verified = verified_lock.deref_mut();
let mut bad = self.verification.bad.lock().unwrap();
bad.insert(hash.clone());
2016-02-02 12:12:32 +01:00
self.processing.write().unwrap().remove(&hash);
2016-01-17 23:07:58 +01:00
let mut new_verified = VecDeque::new();
2016-02-21 19:46:29 +01:00
for block in verified.drain(..) {
if bad.contains(&block.header.parent_hash) {
bad.insert(block.header.hash());
2016-02-02 12:12:32 +01:00
self.processing.write().unwrap().remove(&block.header.hash());
2016-01-17 23:07:58 +01:00
}
else {
new_verified.push_back(block);
}
}
2016-02-21 19:46:29 +01:00
*verified = new_verified;
2016-01-17 23:07:58 +01:00
}
2016-02-02 12:12:32 +01:00
/// Mark given block as processed
2016-02-21 19:46:29 +01:00
pub fn mark_as_good(&self, hashes: &[H256]) {
2016-02-02 12:12:32 +01:00
let mut processing = self.processing.write().unwrap();
for h in hashes {
processing.remove(&h);
}
}
2016-01-22 04:54:38 +01:00
/// Removes up to `max` verified blocks from the queue
2016-02-21 19:46:29 +01:00
pub fn drain(&self, max: usize) -> Vec<PreVerifiedBlock> {
let mut verified = self.verification.verified.lock().unwrap();
let count = min(max, verified.len());
2016-01-17 23:07:58 +01:00
let mut result = Vec::with_capacity(count);
for _ in 0..count {
2016-02-21 19:46:29 +01:00
let block = verified.pop_front().unwrap();
2016-01-17 23:07:58 +01:00
result.push(block);
}
self.ready_signal.reset();
2016-02-21 19:46:29 +01:00
if !verified.is_empty() {
2016-01-22 04:54:38 +01:00
self.ready_signal.set();
}
2016-01-17 23:07:58 +01:00
result
}
2016-01-22 04:54:38 +01:00
/// Get queue status.
pub fn queue_info(&self) -> BlockQueueInfo {
BlockQueueInfo {
2016-02-21 19:46:29 +01:00
unverified_queue_size: self.verification.unverified.lock().unwrap().len(),
verifying_queue_size: self.verification.verifying.lock().unwrap().len(),
verified_queue_size: self.verification.verified.lock().unwrap().len(),
2016-01-22 04:54:38 +01:00
}
}
2016-01-17 23:07:58 +01:00
}
2016-02-10 15:28:43 +01:00
impl MayPanic for BlockQueue {
fn on_panic<F>(&self, closure: F) where F: OnPanicListener {
self.panic_handler.on_panic(closure);
}
}
2016-01-17 23:07:58 +01:00
impl Drop for BlockQueue {
fn drop(&mut self) {
self.clear();
2016-02-16 17:53:31 +01:00
self.deleting.store(true, AtomicOrdering::Release);
2016-01-17 23:07:58 +01:00
self.more_to_verify.notify_all();
for t in self.verifiers.drain(..) {
t.join().unwrap();
}
2016-01-15 12:26:04 +01:00
}
2016-01-09 10:16:35 +01:00
}
2016-01-18 00:24:20 +01:00
#[cfg(test)]
mod tests {
use util::*;
use spec::*;
2016-01-22 05:20:47 +01:00
use block_queue::*;
2016-01-28 19:14:07 +01:00
use tests::helpers::*;
use error::*;
2016-02-02 21:06:21 +01:00
use views::*;
2016-01-28 19:14:07 +01:00
fn get_test_queue() -> BlockQueue {
let spec = get_test_spec();
let engine = spec.to_engine().unwrap();
BlockQueue::new(Arc::new(engine), IoChannel::disconnected())
}
2016-01-18 00:24:20 +01:00
#[test]
2016-01-28 19:14:07 +01:00
fn can_be_created() {
2016-01-18 00:24:20 +01:00
// TODO better test
let spec = Spec::new_test();
let engine = spec.to_engine().unwrap();
let _ = BlockQueue::new(Arc::new(engine), IoChannel::disconnected());
}
2016-01-28 19:14:07 +01:00
#[test]
2016-01-28 19:43:57 +01:00
fn can_import_blocks() {
2016-02-22 00:36:59 +01:00
let queue = get_test_queue();
2016-01-28 19:14:07 +01:00
if let Err(e) = queue.import_block(get_good_dummy_block()) {
panic!("error importing block that is valid by definition({:?})", e);
}
}
#[test]
fn returns_error_for_duplicates() {
2016-02-22 00:36:59 +01:00
let queue = get_test_queue();
2016-01-28 19:14:07 +01:00
if let Err(e) = queue.import_block(get_good_dummy_block()) {
panic!("error importing block that is valid by definition({:?})", e);
}
2016-01-28 19:43:57 +01:00
2016-01-28 19:14:07 +01:00
let duplicate_import = queue.import_block(get_good_dummy_block());
2016-01-28 19:43:57 +01:00
match duplicate_import {
Err(e) => {
match e {
ImportError::AlreadyQueued => {},
_ => { panic!("must return AlreadyQueued error"); }
}
}
Ok(_) => { panic!("must produce error"); }
}
}
2016-01-28 19:14:07 +01:00
2016-01-28 19:43:57 +01:00
#[test]
2016-02-01 16:18:32 +01:00
fn returns_ok_for_drained_duplicates() {
2016-02-22 00:36:59 +01:00
let queue = get_test_queue();
2016-02-02 21:06:21 +01:00
let block = get_good_dummy_block();
let hash = BlockView::new(&block).header().hash().clone();
if let Err(e) = queue.import_block(block) {
2016-01-28 19:43:57 +01:00
panic!("error importing block that is valid by definition({:?})", e);
}
queue.flush();
2016-02-01 16:18:32 +01:00
queue.drain(10);
2016-02-02 21:06:21 +01:00
queue.mark_as_good(&[ hash ]);
2016-01-28 19:43:57 +01:00
2016-02-01 16:18:32 +01:00
if let Err(e) = queue.import_block(get_good_dummy_block()) {
panic!("error importing block that has already been drained ({:?})", e);
2016-01-28 19:14:07 +01:00
}
}
2016-02-06 23:15:53 +01:00
#[test]
fn returns_empty_once_finished() {
2016-02-22 00:36:59 +01:00
let queue = get_test_queue();
2016-02-06 23:15:53 +01:00
queue.import_block(get_good_dummy_block()).expect("error importing block that is valid by definition");
queue.flush();
queue.drain(1);
2016-02-08 12:35:51 +01:00
assert!(queue.queue_info().is_empty());
2016-02-06 23:15:53 +01:00
}
2016-01-18 00:24:20 +01:00
}