openethereum/ethcore/src/verification/queue/mod.rs

748 lines
21 KiB
Rust
Raw Normal View History

2016-02-05 13:40:41 +01:00
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
2016-04-06 10:07:24 +02:00
//! A queue of blocks. Sits between network or other I/O and the `BlockChain`.
2016-01-21 23:33:52 +01:00
//! Sorts them ready for blockchain insertion.
2016-01-17 23:07:58 +01:00
use std::thread::{JoinHandle, self};
2016-10-04 20:09:54 +02:00
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrdering};
2016-07-27 11:39:24 +02:00
use std::sync::{Condvar as SCondvar, Mutex as SMutex};
2016-01-10 23:37:09 +01:00
use util::*;
use io::*;
use error::*;
use engines::Engine;
2016-01-21 23:33:52 +01:00
use service::*;
2016-01-09 10:16:35 +01:00
use self::kind::{HasHash, Kind};
pub use types::verification_queue_info::VerificationQueueInfo as QueueInfo;
pub mod kind;
2016-02-25 14:09:39 +01:00
2016-02-25 17:14:45 +01:00
const MIN_MEM_LIMIT: usize = 16384;
const MIN_QUEUE_LIMIT: usize = 512;
/// Type alias for block queue convenience.
pub type BlockQueue = VerificationQueue<self::kind::Blocks>;
/// Type alias for header queue convenience.
pub type HeaderQueue = VerificationQueue<self::kind::Headers>;
/// Verification queue configuration
#[derive(Debug, PartialEq, Clone)]
pub struct Config {
/// Maximum number of items to keep in unverified queue.
2016-02-25 14:09:39 +01:00
/// When the limit is reached, is_full returns true.
pub max_queue_size: usize,
/// Maximum heap memory to use.
/// When the limit is reached, is_full returns true.
pub max_mem_use: usize,
}
impl Default for Config {
2016-02-25 14:09:39 +01:00
fn default() -> Self {
Config {
2016-02-25 14:09:39 +01:00
max_queue_size: 30000,
max_mem_use: 50 * 1024 * 1024,
}
}
}
struct VerifierHandle {
deleting: Arc<AtomicBool>,
thread: JoinHandle<()>,
}
impl VerifierHandle {
// signal to the verifier thread that it should conclude its
// operations.
fn conclude(&self) {
self.deleting.store(true, AtomicOrdering::Release);
}
// join the verifier thread.
fn join(self) {
self.thread.join().unwrap();
}
}
/// An item which is in the process of being verified.
pub struct Verifying<K: Kind> {
hash: H256,
output: Option<K::Verified>,
}
2016-01-22 04:54:38 +01:00
impl<K: Kind> HeapSizeOf for Verifying<K> {
fn heap_size_of_children(&self) -> usize {
self.output.heap_size_of_children()
}
}
/// Status of items in the queue.
pub enum Status {
/// Currently queued.
Queued,
/// Known to be bad.
Bad,
/// Unknown.
Unknown,
2016-01-25 18:56:36 +01:00
}
// the internal queue sizes.
struct Sizes {
unverified: AtomicUsize,
verifying: AtomicUsize,
verified: AtomicUsize,
}
/// A queue of items to be verified. Sits between network or other I/O and the `BlockChain`.
/// Keeps them in the same order as inserted, minus invalid items.
pub struct VerificationQueue<K: Kind> {
2016-02-10 15:28:43 +01:00
panic_handler: Arc<PanicHandler>,
Snapshot creation and restoration (#1679) * to_rlp takes self by-reference * clean up some derefs * out-of-order insertion for blockchain * implement block rebuilder without verification * group block chunk header into struct * block rebuilder does verification * integrate snapshot service with client service; flesh out implementation more * initial implementation of snapshot service * remove snapshottaker trait * snapshot writer trait with packed and loose implementations * write chunks using "snapshotwriter" in service * have snapshot taking use snapshotwriter * implement snapshot readers * back up client dbs when replacing * use snapshot reader in snapshot service * describe offset format * use new get_db_path in parity, allow some errors in service * blockchain formatting * implement parity snapshot * implement snapshot restore * force blocks to be submitted in order * fix bug loading block hashes in packed reader * fix seal field loading * fix uncle hash computation * fix a few bugs * store genesis state in db. reverse block chunk order in packed writer * allow out-of-order import for blocks * bring restoration types together * only snapshot the last 30000 blocks * restore into overlaydb instead of journaldb * commit version to database * use memorydbs and commit directly * fix trie test compilation * fix failing tests * sha3_null_rlp, not H256::zero * move overlaydb to ref_overlaydb, add new overlaydb without on-disk rc * port archivedb to new overlaydb * add deletion mode tests for overlaydb * use new overlaydb, check state root at end * share chain info between state and block snapshotting * create blocks snapshot using blockchain directly * allow snapshot from arbitrary block, remove panickers from snapshot creation * begin test framework * blockchain chunking test * implement stateproducer::tick * state snapshot test * create block and state chunks concurrently, better restoration informant * fix tests * add deletion mode tests for overlaydb * address comments * more tests * Fix up tests. * remove a few printlns * add a little more documentation to `commit` * fix tests * fix ref_overlaydb test names * snapshot command skeleton * revert ref_overlaydb renaming * reimplement snapshot commands * fix many errors * everything but inject * get ethcore compiling * get snapshot tests passing again * instrument snapshot commands again * fix fallout from other changes, mark snapshots as experimental * optimize injection patterns * do two injections * fix up tests * take snapshots from 1000 blocks efore * address minor comments * fix a few io crate related errors * clarify names about total difficulty [ci skip]
2016-08-05 17:00:46 +02:00
engine: Arc<Engine>,
2016-07-27 11:39:24 +02:00
more_to_verify: Arc<SCondvar>,
verification: Arc<Verification<K>>,
verifiers: Mutex<Vec<VerifierHandle>>,
2016-01-17 23:07:58 +01:00
deleting: Arc<AtomicBool>,
ready_signal: Arc<QueueSignal>,
2016-07-27 11:39:24 +02:00
empty: Arc<SCondvar>,
2016-02-25 14:09:39 +01:00
processing: RwLock<HashSet<H256>>,
2016-10-04 20:09:54 +02:00
ticks_since_adjustment: AtomicUsize,
2016-02-25 14:09:39 +01:00
max_queue_size: usize,
max_mem_use: usize,
2016-01-17 23:07:58 +01:00
}
struct QueueSignal {
deleting: Arc<AtomicBool>,
2016-01-17 23:07:58 +01:00
signalled: AtomicBool,
2016-10-30 09:56:34 +01:00
message_channel: Mutex<IoChannel<ClientIoMessage>>,
2016-01-17 23:07:58 +01:00
}
impl QueueSignal {
2016-03-11 11:16:49 +01:00
#[cfg_attr(feature="dev", allow(bool_comparison))]
fn set_sync(&self) {
// Do not signal when we are about to close
if self.deleting.load(AtomicOrdering::Relaxed) {
return;
}
2016-01-17 23:07:58 +01:00
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
2016-10-30 09:56:34 +01:00
let channel = self.message_channel.lock().clone();
if let Err(e) = channel.send_sync(ClientIoMessage::BlockVerified) {
debug!("Error sending BlockVerified message: {:?}", e);
}
}
}
#[cfg_attr(feature="dev", allow(bool_comparison))]
fn set_async(&self) {
// Do not signal when we are about to close
if self.deleting.load(AtomicOrdering::Relaxed) {
return;
}
2016-01-17 23:07:58 +01:00
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
2016-10-30 09:56:34 +01:00
let channel = self.message_channel.lock().clone();
if let Err(e) = channel.send(ClientIoMessage::BlockVerified) {
debug!("Error sending BlockVerified message: {:?}", e);
}
2016-01-17 23:07:58 +01:00
}
}
2016-01-17 23:07:58 +01:00
fn reset(&self) {
self.signalled.store(false, AtomicOrdering::Relaxed);
}
}
struct Verification<K: Kind> {
2016-02-22 00:36:59 +01:00
// All locks must be captured in the order declared here.
unverified: Mutex<VecDeque<K::Unverified>>,
verifying: Mutex<VecDeque<Verifying<K>>>,
verified: Mutex<VecDeque<K::Verified>>,
2016-02-21 19:46:29 +01:00
bad: Mutex<HashSet<H256>>,
2016-07-27 11:39:24 +02:00
more_to_verify: SMutex<()>,
empty: SMutex<()>,
2016-10-04 20:09:54 +02:00
verified_count: AtomicUsize,
drained: AtomicUsize,
imported: AtomicUsize,
sizes: Sizes,
check_seal: bool,
}
2016-01-09 10:16:35 +01:00
impl<K: Kind> VerificationQueue<K> {
2016-01-10 23:37:09 +01:00
/// Creates a new queue instance.
pub fn new(config: Config, engine: Arc<Engine>, message_channel: IoChannel<ClientIoMessage>, check_seal: bool) -> Self {
2016-02-21 19:46:29 +01:00
let verification = Arc::new(Verification {
unverified: Mutex::new(VecDeque::new()),
verifying: Mutex::new(VecDeque::new()),
verified: Mutex::new(VecDeque::new()),
2016-02-21 19:46:29 +01:00
bad: Mutex::new(HashSet::new()),
2016-07-27 11:39:24 +02:00
more_to_verify: SMutex::new(()),
empty: SMutex::new(()),
2016-10-04 20:09:54 +02:00
verified_count: AtomicUsize::new(0),
drained: AtomicUsize::new(0),
imported: AtomicUsize::new(0),
sizes: Sizes {
unverified: AtomicUsize::new(0),
verifying: AtomicUsize::new(0),
verified: AtomicUsize::new(0),
},
check_seal: check_seal,
2016-02-21 19:46:29 +01:00
});
2016-07-27 11:39:24 +02:00
let more_to_verify = Arc::new(SCondvar::new());
2016-01-17 23:07:58 +01:00
let deleting = Arc::new(AtomicBool::new(false));
let ready_signal = Arc::new(QueueSignal {
deleting: deleting.clone(),
signalled: AtomicBool::new(false),
2016-10-30 09:56:34 +01:00
message_channel: Mutex::new(message_channel),
});
2016-07-27 11:39:24 +02:00
let empty = Arc::new(SCondvar::new());
2016-02-10 16:35:52 +01:00
let panic_handler = PanicHandler::new_in_arc();
2016-01-17 23:07:58 +01:00
let queue = VerificationQueue {
engine: engine,
panic_handler: panic_handler,
ready_signal: ready_signal,
more_to_verify: more_to_verify,
verification: verification,
verifiers: Mutex::new(Vec::with_capacity(::num_cpus::get())),
deleting: deleting,
2016-02-02 12:12:32 +01:00
processing: RwLock::new(HashSet::new()),
empty: empty,
2016-10-04 20:09:54 +02:00
ticks_since_adjustment: AtomicUsize::new(0),
2016-02-25 17:14:45 +01:00
max_queue_size: max(config.max_queue_size, MIN_QUEUE_LIMIT),
max_mem_use: max(config.max_mem_use, MIN_MEM_LIMIT),
};
let thread_count = max(::num_cpus::get(), 3) - 2;
for _ in 0..thread_count {
queue.add_verifier();
2016-01-17 23:07:58 +01:00
}
queue
2016-01-17 23:07:58 +01:00
}
fn verify(verification: Arc<Verification<K>>, engine: Arc<Engine>, wait: Arc<SCondvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>, empty: Arc<SCondvar>) {
2016-02-16 17:53:31 +01:00
while !deleting.load(AtomicOrdering::Acquire) {
2016-01-17 23:07:58 +01:00
{
2016-07-27 11:39:24 +02:00
let mut more_to_verify = verification.more_to_verify.lock().unwrap();
2016-01-25 19:20:34 +01:00
2016-07-27 11:39:24 +02:00
if verification.unverified.lock().is_empty() && verification.verifying.lock().is_empty() {
2016-01-25 19:20:34 +01:00
empty.notify_all();
}
2016-07-27 11:39:24 +02:00
while verification.unverified.lock().is_empty() && !deleting.load(AtomicOrdering::Acquire) {
more_to_verify = wait.wait(more_to_verify).unwrap();
2016-01-17 23:07:58 +01:00
}
2016-02-16 17:53:31 +01:00
if deleting.load(AtomicOrdering::Acquire) {
2016-01-17 23:07:58 +01:00
return;
}
}
let item = {
// acquire these locks before getting the item to verify.
let mut unverified = verification.unverified.lock();
let mut verifying = verification.verifying.lock();
let item = match unverified.pop_front() {
Some(item) => item,
None => continue,
};
verification.sizes.unverified.fetch_sub(item.heap_size_of_children(), AtomicOrdering::SeqCst);
verifying.push_back(Verifying { hash: item.hash(), output: None });
item
2016-01-17 23:07:58 +01:00
};
let hash = item.hash();
let is_ready = match K::verify(item, &*engine, verification.check_seal) {
2016-01-17 23:07:58 +01:00
Ok(verified) => {
let mut verifying = verification.verifying.lock();
let mut idx = None;
for (i, e) in verifying.iter_mut().enumerate() {
if e.hash == hash {
idx = Some(i);
verification.sizes.verifying.fetch_add(verified.heap_size_of_children(), AtomicOrdering::SeqCst);
e.output = Some(verified);
2016-01-17 23:07:58 +01:00
break;
}
}
if idx == Some(0) {
2016-01-17 23:07:58 +01:00
// we're next!
let mut verified = verification.verified.lock();
let mut bad = verification.bad.lock();
VerificationQueue::drain_verifying(&mut verifying, &mut verified, &mut bad, &verification.verified_count, &verification.sizes);
true
} else {
false
2016-01-17 23:07:58 +01:00
}
},
Err(_) => {
let mut verifying = verification.verifying.lock();
let mut verified = verification.verified.lock();
let mut bad = verification.bad.lock();
bad.insert(hash.clone());
verifying.retain(|e| e.hash != hash);
if verifying.front().map_or(false, |x| x.output.is_some()) {
VerificationQueue::drain_verifying(&mut verifying, &mut verified, &mut bad, &verification.verified_count, &verification.sizes);
true
} else {
false
}
2016-01-17 23:07:58 +01:00
}
};
if is_ready {
// Import the block immediately
ready.set_sync();
2016-01-17 23:07:58 +01:00
}
}
}
2016-10-04 20:09:54 +02:00
fn drain_verifying(
verifying: &mut VecDeque<Verifying<K>>,
verified: &mut VecDeque<K::Verified>,
bad: &mut HashSet<H256>,
v_count: &AtomicUsize,
sizes: &Sizes,
2016-10-04 20:09:54 +02:00
) {
let start_len = verified.len();
let mut removed_size = 0;
let mut inserted_size = 0;
while let Some(output) = verifying.front_mut().and_then(|x| x.output.take()) {
assert!(verifying.pop_front().is_some());
let size = output.heap_size_of_children();
removed_size += size;
if bad.contains(&output.parent_hash()) {
bad.insert(output.hash());
} else {
inserted_size += size;
verified.push_back(output);
2016-01-17 23:07:58 +01:00
}
}
2016-10-04 20:09:54 +02:00
v_count.fetch_add(verified.len() - start_len, AtomicOrdering::AcqRel);
sizes.verifying.fetch_sub(removed_size, AtomicOrdering::SeqCst);
sizes.verified.fetch_add(inserted_size, AtomicOrdering::SeqCst);
2016-01-09 10:16:35 +01:00
}
2016-01-10 23:37:09 +01:00
/// Clear the queue and stop verification activity.
2016-02-21 19:46:29 +01:00
pub fn clear(&self) {
let mut unverified = self.verification.unverified.lock();
let mut verifying = self.verification.verifying.lock();
let mut verified = self.verification.verified.lock();
2016-02-21 19:46:29 +01:00
unverified.clear();
verifying.clear();
verified.clear();
let sizes = &self.verification.sizes;
sizes.unverified.store(0, AtomicOrdering::Release);
sizes.verifying.store(0, AtomicOrdering::Release);
sizes.verified.store(0, AtomicOrdering::Release);
self.processing.write().clear();
2016-01-09 10:16:35 +01:00
}
2016-02-21 19:46:29 +01:00
/// Wait for unverified queue to be empty
pub fn flush(&self) {
2016-07-27 11:39:24 +02:00
let mut lock = self.verification.empty.lock().unwrap();
while !self.verification.unverified.lock().is_empty() || !self.verification.verifying.lock().is_empty() {
lock = self.empty.wait(lock).unwrap();
2016-01-25 23:24:51 +01:00
}
2016-01-25 19:20:34 +01:00
}
/// Check if the item is currently in the queue
pub fn status(&self, hash: &H256) -> Status {
if self.processing.read().contains(hash) {
return Status::Queued;
2016-02-02 12:12:32 +01:00
}
if self.verification.bad.lock().contains(hash) {
return Status::Bad;
2016-02-02 12:12:32 +01:00
}
Status::Unknown
2016-02-02 12:12:32 +01:00
}
2016-01-10 23:37:09 +01:00
/// Add a block to the queue.
pub fn import(&self, input: K::Input) -> ImportResult {
let h = input.hash();
2016-01-17 23:07:58 +01:00
{
if self.processing.read().contains(&h) {
2016-05-31 16:59:01 +02:00
return Err(ImportError::AlreadyQueued.into());
2016-02-21 19:46:29 +01:00
}
2016-03-02 01:24:06 +01:00
let mut bad = self.verification.bad.lock();
2016-02-21 19:46:29 +01:00
if bad.contains(&h) {
2016-05-31 16:59:01 +02:00
return Err(ImportError::KnownBad.into());
2016-01-17 23:07:58 +01:00
}
if bad.contains(&input.parent_hash()) {
2016-02-21 19:46:29 +01:00
bad.insert(h.clone());
2016-05-31 16:59:01 +02:00
return Err(ImportError::KnownBad.into());
2016-01-17 23:07:58 +01:00
}
}
match K::create(input, &*self.engine) {
Ok(item) => {
self.verification.sizes.unverified.fetch_add(item.heap_size_of_children(), AtomicOrdering::SeqCst);
self.processing.write().insert(h.clone());
self.verification.unverified.lock().push_back(item);
self.verification.imported.fetch_add(1, AtomicOrdering::AcqRel);
2016-01-17 23:07:58 +01:00
self.more_to_verify.notify_all();
Ok(h)
2016-01-17 23:07:58 +01:00
},
Err(err) => {
self.verification.bad.lock().insert(h.clone());
Err(err)
2016-01-17 23:07:58 +01:00
}
}
2016-01-09 10:16:35 +01:00
}
2016-01-15 12:26:04 +01:00
/// Mark given item and all its children as bad. pauses verification
/// until complete.
pub fn mark_as_bad(&self, hashes: &[H256]) {
if hashes.is_empty() {
2016-03-10 00:21:07 +01:00
return;
}
let mut verified_lock = self.verification.verified.lock();
Snapshot creation and restoration (#1679) * to_rlp takes self by-reference * clean up some derefs * out-of-order insertion for blockchain * implement block rebuilder without verification * group block chunk header into struct * block rebuilder does verification * integrate snapshot service with client service; flesh out implementation more * initial implementation of snapshot service * remove snapshottaker trait * snapshot writer trait with packed and loose implementations * write chunks using "snapshotwriter" in service * have snapshot taking use snapshotwriter * implement snapshot readers * back up client dbs when replacing * use snapshot reader in snapshot service * describe offset format * use new get_db_path in parity, allow some errors in service * blockchain formatting * implement parity snapshot * implement snapshot restore * force blocks to be submitted in order * fix bug loading block hashes in packed reader * fix seal field loading * fix uncle hash computation * fix a few bugs * store genesis state in db. reverse block chunk order in packed writer * allow out-of-order import for blocks * bring restoration types together * only snapshot the last 30000 blocks * restore into overlaydb instead of journaldb * commit version to database * use memorydbs and commit directly * fix trie test compilation * fix failing tests * sha3_null_rlp, not H256::zero * move overlaydb to ref_overlaydb, add new overlaydb without on-disk rc * port archivedb to new overlaydb * add deletion mode tests for overlaydb * use new overlaydb, check state root at end * share chain info between state and block snapshotting * create blocks snapshot using blockchain directly * allow snapshot from arbitrary block, remove panickers from snapshot creation * begin test framework * blockchain chunking test * implement stateproducer::tick * state snapshot test * create block and state chunks concurrently, better restoration informant * fix tests * add deletion mode tests for overlaydb * address comments * more tests * Fix up tests. * remove a few printlns * add a little more documentation to `commit` * fix tests * fix ref_overlaydb test names * snapshot command skeleton * revert ref_overlaydb renaming * reimplement snapshot commands * fix many errors * everything but inject * get ethcore compiling * get snapshot tests passing again * instrument snapshot commands again * fix fallout from other changes, mark snapshots as experimental * optimize injection patterns * do two injections * fix up tests * take snapshots from 1000 blocks efore * address minor comments * fix a few io crate related errors * clarify names about total difficulty [ci skip]
2016-08-05 17:00:46 +02:00
let mut verified = &mut *verified_lock;
let mut bad = self.verification.bad.lock();
let mut processing = self.processing.write();
bad.reserve(hashes.len());
for hash in hashes {
bad.insert(hash.clone());
processing.remove(hash);
2016-02-24 17:01:29 +01:00
}
2016-01-17 23:07:58 +01:00
let mut new_verified = VecDeque::new();
let mut removed_size = 0;
for output in verified.drain(..) {
if bad.contains(&output.parent_hash()) {
removed_size += output.heap_size_of_children();
bad.insert(output.hash());
processing.remove(&output.hash());
2016-02-24 17:01:29 +01:00
} else {
new_verified.push_back(output);
2016-01-17 23:07:58 +01:00
}
}
self.verification.sizes.verified.fetch_sub(removed_size, AtomicOrdering::SeqCst);
2016-02-21 19:46:29 +01:00
*verified = new_verified;
2016-01-17 23:07:58 +01:00
}
/// Mark given item as processed.
/// Returns true if the queue becomes empty.
pub fn mark_as_good(&self, hashes: &[H256]) -> bool {
if hashes.is_empty() {
return self.processing.read().is_empty();
2016-03-10 00:21:07 +01:00
}
let mut processing = self.processing.write();
for hash in hashes {
processing.remove(hash);
2016-02-02 12:12:32 +01:00
}
processing.is_empty()
2016-02-02 12:12:32 +01:00
}
/// Removes up to `max` verified items from the queue
pub fn drain(&self, max: usize) -> Vec<K::Verified> {
let mut verified = self.verification.verified.lock();
2016-02-21 19:46:29 +01:00
let count = min(max, verified.len());
let result = verified.drain(..count).collect::<Vec<_>>();
self.verification.drained.fetch_add(result.len(), AtomicOrdering::AcqRel);
let drained_size = result.iter().map(HeapSizeOf::heap_size_of_children).fold(0, |a, c| a + c);
self.verification.sizes.verified.fetch_sub(drained_size, AtomicOrdering::SeqCst);
2016-10-04 20:09:54 +02:00
2016-01-17 23:07:58 +01:00
self.ready_signal.reset();
2016-02-21 19:46:29 +01:00
if !verified.is_empty() {
self.ready_signal.set_async();
2016-01-22 04:54:38 +01:00
}
2016-01-17 23:07:58 +01:00
result
}
2016-01-22 04:54:38 +01:00
/// Get queue status.
pub fn queue_info(&self) -> QueueInfo {
use std::mem::size_of;
let (unverified_len, unverified_bytes) = {
let len = self.verification.unverified.lock().len();
let size = self.verification.sizes.unverified.load(AtomicOrdering::Acquire);
(len, size + len * size_of::<K::Unverified>())
};
let (verifying_len, verifying_bytes) = {
let len = self.verification.verifying.lock().len();
let size = self.verification.sizes.verifying.load(AtomicOrdering::Acquire);
(len, size + len * size_of::<Verifying<K>>())
};
let (verified_len, verified_bytes) = {
let len = self.verification.verified.lock().len();
let size = self.verification.sizes.verified.load(AtomicOrdering::Acquire);
(len, size + len * size_of::<K::Verified>())
};
QueueInfo {
unverified_queue_size: unverified_len,
verifying_queue_size: verifying_len,
verified_queue_size: verified_len,
2016-02-25 14:09:39 +01:00
max_queue_size: self.max_queue_size,
max_mem_use: self.max_mem_use,
mem_used: unverified_bytes
+ verifying_bytes
+ verified_bytes
2016-01-22 04:54:38 +01:00
}
}
2016-02-25 14:09:39 +01:00
/// Optimise memory footprint of the heap fields, and adjust the number of threads
/// to better suit the workload.
2016-03-09 11:38:53 +01:00
pub fn collect_garbage(&self) {
// number of ticks to average queue stats over
// when deciding whether to change the number of verifiers.
2016-11-17 13:10:33 +01:00
const READJUSTMENT_PERIOD: usize = 12;
2016-10-04 20:09:54 +02:00
{
self.verification.unverified.lock().shrink_to_fit();
self.verification.verifying.lock().shrink_to_fit();
2016-10-04 20:09:54 +02:00
self.verification.verified.lock().shrink_to_fit();
}
self.processing.write().shrink_to_fit();
2016-10-04 20:09:54 +02:00
if self.ticks_since_adjustment.load(AtomicOrdering::SeqCst) == READJUSTMENT_PERIOD {
self.ticks_since_adjustment.store(0, AtomicOrdering::SeqCst);
} else {
self.ticks_since_adjustment.fetch_add(1, AtomicOrdering::SeqCst);
return;
}
let v_count = self.verification.verified_count.load(AtomicOrdering::Acquire);
let drained = self.verification.drained.load(AtomicOrdering::Acquire);
let imported = self.verification.imported.load(AtomicOrdering::Acquire);
2016-10-04 20:09:54 +02:00
self.verification.verified_count.store(0, AtomicOrdering::Release);
self.verification.drained.store(0, AtomicOrdering::Release);
self.verification.imported.store(0, AtomicOrdering::Release);
// select which side of the queue is the bottleneck.
let target = min(drained, imported);
2016-10-04 20:09:54 +02:00
// compute the average rate of verification per thread and determine
// how many are necessary to match the rate of draining.
let num_verifiers = self.verifiers.lock().len();
let v_count_per = v_count as f64 / num_verifiers as f64;
let needed = if v_count < 20 {
1
} else {
(target as f64 / v_count_per as f64).ceil() as usize
};
trace!(target: "verification", "v_rate_per={}, target={}, scaling to {} verifiers",
v_count_per, target, needed);
2016-10-04 20:09:54 +02:00
for _ in num_verifiers..needed {
self.add_verifier();
}
2016-10-04 20:09:54 +02:00
for _ in needed..num_verifiers {
self.remove_verifier();
}
}
// add a verifier thread if possible.
fn add_verifier(&self) {
let mut verifiers = self.verifiers.lock();
let len = verifiers.len();
if len == ::num_cpus::get() {
return;
}
debug!(target: "verification", "Adding verification thread #{}", len);
let deleting = Arc::new(AtomicBool::new(false));
let panic_handler = self.panic_handler.clone();
let verification = self.verification.clone();
let engine = self.engine.clone();
let wait = self.more_to_verify.clone();
let ready = self.ready_signal.clone();
let empty = self.empty.clone();
verifiers.push(VerifierHandle {
deleting: deleting.clone(),
thread: thread::Builder::new()
.name(format!("Verifier #{}", len))
.spawn(move || {
panic_handler.catch_panic(move || {
VerificationQueue::verify(verification, engine, wait, ready, deleting, empty)
}).unwrap()
})
.expect("Failed to create verifier thread.")
});
}
// remove a verifier thread if possible.
fn remove_verifier(&self) {
let mut verifiers = self.verifiers.lock();
let len = verifiers.len();
2016-10-03 20:09:57 +02:00
// never remove the last thread.
if len == 1 {
return;
}
debug!(target: "verification", "Removing verification thread #{}", len - 1);
if let Some(handle) = verifiers.pop() {
handle.conclude();
self.more_to_verify.notify_all(); // to ensure it's joinable immediately.
handle.join();
}
2016-01-22 04:54:38 +01:00
}
2016-01-17 23:07:58 +01:00
}
impl<K: Kind> MayPanic for VerificationQueue<K> {
2016-02-10 15:28:43 +01:00
fn on_panic<F>(&self, closure: F) where F: OnPanicListener {
self.panic_handler.on_panic(closure);
}
}
impl<K: Kind> Drop for VerificationQueue<K> {
2016-01-17 23:07:58 +01:00
fn drop(&mut self) {
trace!(target: "shutdown", "[VerificationQueue] Closing...");
2016-01-17 23:07:58 +01:00
self.clear();
2016-02-16 17:53:31 +01:00
self.deleting.store(true, AtomicOrdering::Release);
let mut verifiers = self.verifiers.lock();
// first pass to signal conclusion. must be done before
// notify or deadlock possible.
for handle in verifiers.iter() {
handle.conclude();
}
2016-01-17 23:07:58 +01:00
self.more_to_verify.notify_all();
// second pass to join.
for handle in verifiers.drain(..) {
handle.join();
2016-01-17 23:07:58 +01:00
}
trace!(target: "shutdown", "[VerificationQueue] Closed.");
2016-01-15 12:26:04 +01:00
}
2016-01-09 10:16:35 +01:00
}
2016-01-18 00:24:20 +01:00
#[cfg(test)]
mod tests {
use util::*;
use io::*;
2016-01-18 00:24:20 +01:00
use spec::*;
use super::{BlockQueue, Config};
use super::kind::blocks::Unverified;
2016-01-28 19:14:07 +01:00
use tests::helpers::*;
use error::*;
2016-02-02 21:06:21 +01:00
use views::*;
2016-01-28 19:14:07 +01:00
fn get_test_queue() -> BlockQueue {
let spec = get_test_spec();
let engine = spec.engine;
2016-10-24 16:31:37 +02:00
BlockQueue::new(Config::default(), engine, IoChannel::disconnected(), true)
2016-01-28 19:14:07 +01:00
}
2016-01-18 00:24:20 +01:00
#[test]
2016-01-28 19:14:07 +01:00
fn can_be_created() {
2016-01-18 00:24:20 +01:00
// TODO better test
let spec = Spec::new_test();
let engine = spec.engine;
2016-10-24 16:31:37 +02:00
let _ = BlockQueue::new(Config::default(), engine, IoChannel::disconnected(), true);
2016-01-18 00:24:20 +01:00
}
2016-01-28 19:14:07 +01:00
#[test]
2016-01-28 19:43:57 +01:00
fn can_import_blocks() {
2016-02-22 00:36:59 +01:00
let queue = get_test_queue();
if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) {
2016-01-28 19:14:07 +01:00
panic!("error importing block that is valid by definition({:?})", e);
}
}
#[test]
fn returns_error_for_duplicates() {
2016-02-22 00:36:59 +01:00
let queue = get_test_queue();
if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) {
2016-01-28 19:14:07 +01:00
panic!("error importing block that is valid by definition({:?})", e);
}
2016-01-28 19:43:57 +01:00
let duplicate_import = queue.import(Unverified::new(get_good_dummy_block()));
2016-01-28 19:43:57 +01:00
match duplicate_import {
Err(e) => {
match e {
Error::Import(ImportError::AlreadyQueued) => {},
2016-01-28 19:43:57 +01:00
_ => { panic!("must return AlreadyQueued error"); }
}
}
Ok(_) => { panic!("must produce error"); }
}
}
2016-01-28 19:14:07 +01:00
2016-01-28 19:43:57 +01:00
#[test]
2016-02-01 16:18:32 +01:00
fn returns_ok_for_drained_duplicates() {
2016-02-22 00:36:59 +01:00
let queue = get_test_queue();
2016-02-02 21:06:21 +01:00
let block = get_good_dummy_block();
let hash = BlockView::new(&block).header().hash().clone();
if let Err(e) = queue.import(Unverified::new(block)) {
2016-01-28 19:43:57 +01:00
panic!("error importing block that is valid by definition({:?})", e);
}
queue.flush();
2016-02-01 16:18:32 +01:00
queue.drain(10);
2016-02-02 21:06:21 +01:00
queue.mark_as_good(&[ hash ]);
2016-01-28 19:43:57 +01:00
if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) {
2016-02-01 16:18:32 +01:00
panic!("error importing block that has already been drained ({:?})", e);
2016-01-28 19:14:07 +01:00
}
}
2016-02-06 23:15:53 +01:00
#[test]
fn returns_empty_once_finished() {
2016-02-22 00:36:59 +01:00
let queue = get_test_queue();
queue.import(Unverified::new(get_good_dummy_block()))
.expect("error importing block that is valid by definition");
2016-02-06 23:15:53 +01:00
queue.flush();
queue.drain(1);
2016-02-08 12:35:51 +01:00
assert!(queue.queue_info().is_empty());
2016-02-06 23:15:53 +01:00
}
2016-02-25 17:14:45 +01:00
#[test]
fn test_mem_limit() {
let spec = get_test_spec();
let engine = spec.engine;
let mut config = Config::default();
2016-02-25 17:14:45 +01:00
config.max_mem_use = super::MIN_MEM_LIMIT; // empty queue uses about 15000
2016-10-24 16:31:37 +02:00
let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true);
2016-02-25 17:14:45 +01:00
assert!(!queue.queue_info().is_full());
let mut blocks = get_good_dummy_block_seq(50);
for b in blocks.drain(..) {
queue.import(Unverified::new(b)).unwrap();
2016-02-25 17:14:45 +01:00
}
assert!(queue.queue_info().is_full());
}
2016-01-18 00:24:20 +01:00
}