openethereum/ethcore/src/verification/queue/mod.rs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1049 lines
34 KiB
Rust
Raw Normal View History

// Copyright 2015-2019 Parity Technologies (UK) Ltd.
// This file is part of Parity Ethereum.
2016-02-05 13:40:41 +01:00
// Parity Ethereum is free software: you can redistribute it and/or modify
2016-02-05 13:40:41 +01:00
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Ethereum is distributed in the hope that it will be useful,
2016-02-05 13:40:41 +01:00
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
2016-02-05 13:40:41 +01:00
2016-04-06 10:07:24 +02:00
//! A queue of blocks. Sits between network or other I/O and the `BlockChain`.
2016-01-21 23:33:52 +01:00
//! Sorts them ready for blockchain insertion.
use client::ClientIoMessage;
use engines::EthEngine;
2016-10-04 20:09:54 +02:00
use error::{BlockError, Error, ErrorKind, ImportErrorKind};
use ethereum_types::{H256, U256};
use heapsize::HeapSizeOf;
use io::*;
use len_caching_lock::LenCachingMutex;
use parking_lot::{Condvar, Mutex, RwLock};
2017-07-29 17:12:07 +02:00
use std::{
2020-08-05 06:08:03 +02:00
cmp,
2017-07-29 17:12:07 +02:00
collections::{HashMap, HashSet, VecDeque},
2020-08-05 06:08:03 +02:00
sync::{
2016-10-04 20:09:54 +02:00
atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrdering},
2020-08-05 06:08:03 +02:00
Arc,
},
thread::{self, JoinHandle},
2020-08-05 06:08:03 +02:00
};
2016-01-09 10:16:35 +01:00
use self::kind::{BlockLike, Kind};
pub use types::verification_queue_info::VerificationQueueInfo as QueueInfo;
pub mod kind;
2016-02-25 14:09:39 +01:00
2016-02-25 17:14:45 +01:00
const MIN_MEM_LIMIT: usize = 16384;
const MIN_QUEUE_LIMIT: usize = 512;
/// Type alias for block queue convenience.
pub type BlockQueue = VerificationQueue<self::kind::Blocks>;
/// Type alias for header queue convenience.
pub type HeaderQueue = VerificationQueue<self::kind::Headers>;
/// Verification queue configuration
#[derive(Debug, PartialEq, Clone)]
pub struct Config {
/// Maximum number of items to keep in unverified queue.
2016-02-25 14:09:39 +01:00
/// When the limit is reached, is_full returns true.
pub max_queue_size: usize,
/// Maximum heap memory to use.
/// When the limit is reached, is_full returns true.
pub max_mem_use: usize,
/// Settings for the number of verifiers and adaptation strategy.
pub verifier_settings: VerifierSettings,
2016-02-25 14:09:39 +01:00
}
impl Default for Config {
2016-02-25 14:09:39 +01:00
fn default() -> Self {
Config {
2016-02-25 14:09:39 +01:00
max_queue_size: 30000,
max_mem_use: 50 * 1024 * 1024,
verifier_settings: VerifierSettings::default(),
}
}
}
/// Verifier settings.
#[derive(Debug, PartialEq, Clone)]
pub struct VerifierSettings {
/// Whether to scale amount of verifiers according to load.
// Todo: replace w/ strategy enum?
pub scale_verifiers: bool,
/// Beginning amount of verifiers.
pub num_verifiers: usize,
}
impl Default for VerifierSettings {
fn default() -> Self {
VerifierSettings {
scale_verifiers: false,
num_verifiers: ::num_cpus::get(),
2016-02-25 14:09:39 +01:00
}
}
}
// pool states
enum State {
// all threads with id < inner value are to work.
Work(usize),
Exit,
}
/// An item which is in the process of being verified.
pub struct Verifying<K: Kind> {
hash: H256,
output: Option<K::Verified>,
}
2016-01-22 04:54:38 +01:00
impl<K: Kind> HeapSizeOf for Verifying<K> {
fn heap_size_of_children(&self) -> usize {
self.output.heap_size_of_children()
}
}
/// Status of items in the queue.
pub enum Status {
/// Currently queued.
Queued,
/// Known to be bad.
Bad,
/// Unknown.
Unknown,
2016-01-25 18:56:36 +01:00
}
impl Into<::types::block_status::BlockStatus> for Status {
fn into(self) -> ::types::block_status::BlockStatus {
use types::block_status::BlockStatus;
match self {
Status::Queued => BlockStatus::Queued,
Status::Bad => BlockStatus::Bad,
Status::Unknown => BlockStatus::Unknown,
}
}
}
// the internal queue sizes.
struct Sizes {
unverified: AtomicUsize,
verifying: AtomicUsize,
verified: AtomicUsize,
}
/// A queue of items to be verified. Sits between network or other I/O and the `BlockChain`.
/// Keeps them in the same order as inserted, minus invalid items.
pub struct VerificationQueue<K: Kind> {
2020-07-29 10:36:15 +02:00
engine: Arc<dyn EthEngine>,
more_to_verify: Arc<Condvar>,
verification: Arc<Verification<K>>,
2016-01-17 23:07:58 +01:00
deleting: Arc<AtomicBool>,
ready_signal: Arc<QueueSignal>,
empty: Arc<Condvar>,
processing: RwLock<HashMap<H256, U256>>, // hash to difficulty
2016-10-04 20:09:54 +02:00
ticks_since_adjustment: AtomicUsize,
2016-02-25 14:09:39 +01:00
max_queue_size: usize,
max_mem_use: usize,
scale_verifiers: bool,
2016-12-19 17:15:54 +01:00
verifier_handles: Vec<JoinHandle<()>>,
state: Arc<(Mutex<State>, Condvar)>,
total_difficulty: RwLock<U256>,
2016-01-17 23:07:58 +01:00
}
struct QueueSignal {
deleting: Arc<AtomicBool>,
2016-01-17 23:07:58 +01:00
signalled: AtomicBool,
2016-10-30 09:56:34 +01:00
message_channel: Mutex<IoChannel<ClientIoMessage>>,
2016-01-17 23:07:58 +01:00
}
impl QueueSignal {
fn set_sync(&self) {
// Do not signal when we are about to close
if self.deleting.load(AtomicOrdering::Relaxed) {
return;
}
2020-08-05 06:08:03 +02:00
2016-01-17 23:07:58 +01:00
if self
.signalled
.compare_and_swap(false, true, AtomicOrdering::Relaxed)
== false
{
2016-10-30 09:56:34 +01:00
let channel = self.message_channel.lock().clone();
if let Err(e) = channel.send_sync(ClientIoMessage::BlockVerified) {
debug!("Error sending BlockVerified message: {:?}", e);
}
}
2020-08-05 06:08:03 +02:00
}
fn set_async(&self) {
// Do not signal when we are about to close
if self.deleting.load(AtomicOrdering::Relaxed) {
return;
}
2020-08-05 06:08:03 +02:00
2016-01-17 23:07:58 +01:00
if self
.signalled
.compare_and_swap(false, true, AtomicOrdering::Relaxed)
== false
{
2016-10-30 09:56:34 +01:00
let channel = self.message_channel.lock().clone();
if let Err(e) = channel.send(ClientIoMessage::BlockVerified) {
debug!("Error sending BlockVerified message: {:?}", e);
2016-01-17 23:07:58 +01:00
}
}
2020-08-05 06:08:03 +02:00
}
2016-01-17 23:07:58 +01:00
fn reset(&self) {
self.signalled.store(false, AtomicOrdering::Relaxed);
}
}
struct Verification<K: Kind> {
2016-02-22 00:36:59 +01:00
// All locks must be captured in the order declared here.
unverified: LenCachingMutex<VecDeque<K::Unverified>>,
verifying: LenCachingMutex<VecDeque<Verifying<K>>>,
verified: LenCachingMutex<VecDeque<K::Verified>>,
2016-02-21 19:46:29 +01:00
bad: Mutex<HashSet<H256>>,
sizes: Sizes,
check_seal: bool,
}
2016-01-09 10:16:35 +01:00
impl<K: Kind> VerificationQueue<K> {
2016-01-10 23:37:09 +01:00
/// Creates a new queue instance.
pub fn new(
config: Config,
2020-07-29 10:36:15 +02:00
engine: Arc<dyn EthEngine>,
message_channel: IoChannel<ClientIoMessage>,
check_seal: bool,
) -> Self {
2016-02-21 19:46:29 +01:00
let verification = Arc::new(Verification {
unverified: LenCachingMutex::new(VecDeque::new()),
verifying: LenCachingMutex::new(VecDeque::new()),
verified: LenCachingMutex::new(VecDeque::new()),
2016-02-21 19:46:29 +01:00
bad: Mutex::new(HashSet::new()),
sizes: Sizes {
unverified: AtomicUsize::new(0),
verifying: AtomicUsize::new(0),
verified: AtomicUsize::new(0),
},
check_seal: check_seal,
2016-02-21 19:46:29 +01:00
});
let more_to_verify = Arc::new(Condvar::new());
2016-01-17 23:07:58 +01:00
let deleting = Arc::new(AtomicBool::new(false));
let ready_signal = Arc::new(QueueSignal {
deleting: deleting.clone(),
signalled: AtomicBool::new(false),
2016-10-30 09:56:34 +01:00
message_channel: Mutex::new(message_channel),
});
let empty = Arc::new(Condvar::new());
let scale_verifiers = config.verifier_settings.scale_verifiers;
2020-08-05 06:08:03 +02:00
let max_verifiers = ::num_cpus::get();
2017-07-29 17:12:07 +02:00
let default_amount = cmp::max(
1,
cmp::min(max_verifiers, config.verifier_settings.num_verifiers),
);
2020-08-05 06:08:03 +02:00
// if `auto-scaling` is enabled spawn up extra threads as they might be needed
// otherwise just spawn the number of threads specified by the config
let number_of_threads = if scale_verifiers {
max_verifiers
} else {
cmp::min(default_amount, max_verifiers)
};
2020-08-05 06:08:03 +02:00
2016-12-19 17:15:54 +01:00
let state = Arc::new((Mutex::new(State::Work(default_amount)), Condvar::new()));
let mut verifier_handles = Vec::with_capacity(number_of_threads);
2020-08-05 06:08:03 +02:00
debug!(target: "verification", "Allocating {} verifiers, {} initially active", number_of_threads, default_amount);
debug!(target: "verification", "Verifier auto-scaling {}", if scale_verifiers { "enabled" } else { "disabled" });
2020-08-05 06:08:03 +02:00
for i in 0..number_of_threads {
debug!(target: "verification", "Adding verification thread #{}", i);
2020-08-05 06:08:03 +02:00
let verification = verification.clone();
let engine = engine.clone();
let wait = more_to_verify.clone();
let ready = ready_signal.clone();
let empty = empty.clone();
let state = state.clone();
2020-08-05 06:08:03 +02:00
let handle = thread::Builder::new()
.name(format!("Verifier #{}", i))
.spawn(move || {
2017-06-22 19:00:53 +02:00
VerificationQueue::verify(verification, engine, wait, ready, empty, state, i)
})
.expect("Failed to create verifier thread.");
verifier_handles.push(handle);
}
2020-08-05 06:08:03 +02:00
VerificationQueue {
engine: engine,
ready_signal: ready_signal,
more_to_verify: more_to_verify,
verification: verification,
deleting: deleting,
processing: RwLock::new(HashMap::new()),
empty: empty,
2016-10-04 20:09:54 +02:00
ticks_since_adjustment: AtomicUsize::new(0),
2017-07-29 17:12:07 +02:00
max_queue_size: cmp::max(config.max_queue_size, MIN_QUEUE_LIMIT),
max_mem_use: cmp::max(config.max_mem_use, MIN_MEM_LIMIT),
scale_verifiers: scale_verifiers,
verifier_handles: verifier_handles,
state: state,
total_difficulty: RwLock::new(0.into()),
2020-08-05 06:08:03 +02:00
}
2016-01-17 23:07:58 +01:00
}
2020-08-05 06:08:03 +02:00
fn verify(
verification: Arc<Verification<K>>,
2020-07-29 10:36:15 +02:00
engine: Arc<dyn EthEngine>,
wait: Arc<Condvar>,
ready: Arc<QueueSignal>,
empty: Arc<Condvar>,
state: Arc<(Mutex<State>, Condvar)>,
id: usize,
) {
loop {
// check current state.
{
let mut cur_state = state.0.lock();
while let State::Work(x) = *cur_state {
// sleep until this thread is required.
if id < x {
break;
}
2020-08-05 06:08:03 +02:00
debug!(target: "verification", "verifier {} sleeping", id);
state.1.wait(&mut cur_state);
2016-12-19 17:15:54 +01:00
debug!(target: "verification", "verifier {} waking up", id);
}
2020-08-05 06:08:03 +02:00
2016-12-19 17:15:54 +01:00
if let State::Exit = *cur_state {
debug!(target: "verification", "verifier {} exiting", id);
break;
2020-08-05 06:08:03 +02:00
}
}
2020-08-05 06:08:03 +02:00
// wait for work if empty.
2016-01-17 23:07:58 +01:00
{
let mut unverified = verification.unverified.lock();
2020-08-05 06:08:03 +02:00
if unverified.is_empty() && verification.verifying.lock().is_empty() {
2016-01-25 19:20:34 +01:00
empty.notify_all();
}
2020-08-05 06:08:03 +02:00
while unverified.is_empty() {
if let State::Exit = *state.0.lock() {
debug!(target: "verification", "verifier {} exiting", id);
return;
}
2020-08-05 06:08:03 +02:00
wait.wait(unverified.inner_mut());
2016-01-17 23:07:58 +01:00
}
2020-08-05 06:08:03 +02:00
if let State::Exit = *state.0.lock() {
2016-12-19 17:15:54 +01:00
debug!(target: "verification", "verifier {} exiting", id);
2020-08-05 06:08:03 +02:00
return;
}
2016-12-19 17:15:54 +01:00
}
2020-08-05 06:08:03 +02:00
2016-12-19 17:15:54 +01:00
// do work.
let item = {
// acquire these locks before getting the item to verify.
let mut unverified = verification.unverified.lock();
let mut verifying = verification.verifying.lock();
2020-08-05 06:08:03 +02:00
let item = match unverified.pop_front() {
Some(item) => item,
None => continue,
};
2020-08-05 06:08:03 +02:00
verification
2020-08-05 06:08:03 +02:00
.sizes
.unverified
.fetch_sub(item.heap_size_of_children(), AtomicOrdering::SeqCst);
verifying.push_back(Verifying {
hash: item.hash(),
output: None,
});
item
2016-01-17 23:07:58 +01:00
};
2020-08-05 06:08:03 +02:00
let hash = item.hash();
let is_ready = match K::verify(item, &*engine, verification.check_seal) {
2016-01-17 23:07:58 +01:00
Ok(verified) => {
let mut verifying = verification.verifying.lock();
let mut idx = None;
for (i, e) in verifying.iter_mut().enumerate() {
if e.hash == hash {
idx = Some(i);
2020-08-05 06:08:03 +02:00
verification.sizes.verifying.fetch_add(
verified.heap_size_of_children(),
AtomicOrdering::SeqCst,
);
e.output = Some(verified);
2016-01-17 23:07:58 +01:00
break;
}
}
2020-08-05 06:08:03 +02:00
if idx == Some(0) {
2016-01-17 23:07:58 +01:00
// we're next!
let mut verified = verification.verified.lock();
let mut bad = verification.bad.lock();
2016-11-17 18:10:09 +01:00
VerificationQueue::drain_verifying(
&mut verifying,
&mut verified,
&mut bad,
&verification.sizes,
);
true
} else {
false
2020-08-05 06:08:03 +02:00
}
2016-01-17 23:07:58 +01:00
}
Err(_) => {
let mut verifying = verification.verifying.lock();
let mut verified = verification.verified.lock();
let mut bad = verification.bad.lock();
2020-08-05 06:08:03 +02:00
bad.insert(hash.clone());
verifying.retain(|e| e.hash != hash);
2020-08-05 06:08:03 +02:00
if verifying.front().map_or(false, |x| x.output.is_some()) {
2016-11-17 18:10:09 +01:00
VerificationQueue::drain_verifying(
&mut verifying,
&mut verified,
&mut bad,
&verification.sizes,
);
true
} else {
false
2016-01-17 23:07:58 +01:00
}
2020-08-05 06:08:03 +02:00
}
};
if is_ready {
// Import the block immediately
ready.set_sync();
2016-01-17 23:07:58 +01:00
}
2020-08-05 06:08:03 +02:00
}
2016-01-17 23:07:58 +01:00
}
2020-08-05 06:08:03 +02:00
2016-10-04 20:09:54 +02:00
fn drain_verifying(
verifying: &mut VecDeque<Verifying<K>>,
verified: &mut VecDeque<K::Verified>,
bad: &mut HashSet<H256>,
sizes: &Sizes,
2016-10-04 20:09:54 +02:00
) {
let mut removed_size = 0;
let mut inserted_size = 0;
2020-08-05 06:08:03 +02:00
while let Some(output) = verifying.front_mut().and_then(|x| x.output.take()) {
assert!(verifying.pop_front().is_some());
let size = output.heap_size_of_children();
removed_size += size;
2020-08-05 06:08:03 +02:00
if bad.contains(&output.parent_hash()) {
bad.insert(output.hash());
} else {
inserted_size += size;
verified.push_back(output);
2020-08-05 06:08:03 +02:00
}
2016-01-17 23:07:58 +01:00
}
2020-08-05 06:08:03 +02:00
sizes
.verifying
.fetch_sub(removed_size, AtomicOrdering::SeqCst);
sizes
.verified
.fetch_add(inserted_size, AtomicOrdering::SeqCst);
2016-01-09 10:16:35 +01:00
}
2020-08-05 06:08:03 +02:00
2016-01-10 23:37:09 +01:00
/// Clear the queue and stop verification activity.
2016-02-21 19:46:29 +01:00
pub fn clear(&self) {
let mut unverified = self.verification.unverified.lock();
let mut verifying = self.verification.verifying.lock();
let mut verified = self.verification.verified.lock();
2016-02-21 19:46:29 +01:00
unverified.clear();
verifying.clear();
verified.clear();
2020-08-05 06:08:03 +02:00
let sizes = &self.verification.sizes;
sizes.unverified.store(0, AtomicOrdering::Release);
sizes.verifying.store(0, AtomicOrdering::Release);
sizes.verified.store(0, AtomicOrdering::Release);
*self.total_difficulty.write() = 0.into();
2020-08-05 06:08:03 +02:00
self.processing.write().clear();
2016-01-09 10:16:35 +01:00
}
2020-08-05 06:08:03 +02:00
2016-02-21 19:46:29 +01:00
/// Wait for unverified queue to be empty
pub fn flush(&self) {
let mut unverified = self.verification.unverified.lock();
while !unverified.is_empty() || !self.verification.verifying.lock().is_empty() {
self.empty.wait(unverified.inner_mut());
2020-08-05 06:08:03 +02:00
}
2016-01-25 23:24:51 +01:00
}
2020-08-05 06:08:03 +02:00
/// Check if the item is currently in the queue
pub fn status(&self, hash: &H256) -> Status {
if self.processing.read().contains_key(hash) {
return Status::Queued;
2016-02-02 12:12:32 +01:00
}
if self.verification.bad.lock().contains(hash) {
return Status::Bad;
2016-02-02 12:12:32 +01:00
}
Status::Unknown
2016-02-02 12:12:32 +01:00
}
2020-08-05 06:08:03 +02:00
2016-01-10 23:37:09 +01:00
/// Add a block to the queue.
pub fn import(&self, input: K::Input) -> Result<H256, (Option<K::Input>, Error)> {
let hash = input.hash();
let raw_hash = input.raw_hash();
2016-01-17 23:07:58 +01:00
{
if self.processing.read().contains_key(&hash) {
bail!((
Some(input),
ErrorKind::Import(ImportErrorKind::AlreadyQueued).into()
));
2016-02-21 19:46:29 +01:00
}
2020-08-05 06:08:03 +02:00
let mut bad = self.verification.bad.lock();
if bad.contains(&hash) || bad.contains(&raw_hash) {
bail!((
Some(input),
ErrorKind::Import(ImportErrorKind::KnownBad).into()
));
2016-01-17 23:07:58 +01:00
}
2020-08-05 06:08:03 +02:00
if bad.contains(&input.parent_hash()) {
bad.insert(hash);
bail!((
Some(input),
ErrorKind::Import(ImportErrorKind::KnownBad).into()
));
2020-08-05 06:08:03 +02:00
}
2016-01-17 23:07:58 +01:00
}
2020-08-05 06:08:03 +02:00
match K::create(input, &*self.engine, self.verification.check_seal) {
Ok(item) => {
if self
.processing
.write()
.insert(hash, item.difficulty())
.is_some()
{
bail!((
None,
ErrorKind::Import(ImportErrorKind::AlreadyQueued).into()
));
}
self.verification
.sizes
.unverified
.fetch_add(item.heap_size_of_children(), AtomicOrdering::SeqCst);
2020-08-05 06:08:03 +02:00
{
let mut td = self.total_difficulty.write();
*td = *td + item.difficulty();
}
self.verification.unverified.lock().push_back(item);
2016-01-17 23:07:58 +01:00
self.more_to_verify.notify_all();
Ok(hash)
2016-01-17 23:07:58 +01:00
}
Err((input, err)) => {
2016-12-19 14:47:48 +01:00
match err {
// Don't mark future blocks as bad.
Error(ErrorKind::Block(BlockError::TemporarilyInvalid(_)), _) => {}
// If the transaction root or uncles hash is invalid, it doesn't necessarily mean
// that the header is invalid. We might have just received a malformed block body,
// so we shouldn't put the header hash to `bad`.
//
// We still put the entire `Item` hash to bad, so that we can early reject
// the items that are malformed.
Error(ErrorKind::Block(BlockError::InvalidTransactionsRoot(_)), _)
| Error(ErrorKind::Block(BlockError::InvalidUnclesHash(_)), _) => {
self.verification.bad.lock().insert(raw_hash);
}
2016-12-19 14:47:48 +01:00
_ => {
self.verification.bad.lock().insert(hash);
2020-08-05 06:08:03 +02:00
}
2016-12-19 14:47:48 +01:00
}
Err((Some(input), err))
2020-08-05 06:08:03 +02:00
}
}
2016-01-17 23:07:58 +01:00
}
2020-08-05 06:08:03 +02:00
/// Mark given item and all its children as bad. pauses verification
/// until complete.
pub fn mark_as_bad(&self, hashes: &[H256]) {
if hashes.is_empty() {
2016-03-10 00:21:07 +01:00
return;
}
let mut verified_lock = self.verification.verified.lock();
2017-10-15 15:10:20 +02:00
let verified = &mut *verified_lock;
let mut bad = self.verification.bad.lock();
let mut processing = self.processing.write();
bad.reserve(hashes.len());
for hash in hashes {
bad.insert(hash.clone());
if let Some(difficulty) = processing.remove(hash) {
let mut td = self.total_difficulty.write();
*td = *td - difficulty;
2020-08-05 06:08:03 +02:00
}
}
2020-08-05 06:08:03 +02:00
2016-01-17 23:07:58 +01:00
let mut new_verified = VecDeque::new();
let mut removed_size = 0;
for output in verified.drain(..) {
if bad.contains(&output.parent_hash()) {
removed_size += output.heap_size_of_children();
bad.insert(output.hash());
if let Some(difficulty) = processing.remove(&output.hash()) {
let mut td = self.total_difficulty.write();
*td = *td - difficulty;
}
2016-02-24 17:01:29 +01:00
} else {
new_verified.push_back(output);
2020-08-05 06:08:03 +02:00
}
2016-01-17 23:07:58 +01:00
}
2020-08-05 06:08:03 +02:00
self.verification
.sizes
.verified
.fetch_sub(removed_size, AtomicOrdering::SeqCst);
2016-02-21 19:46:29 +01:00
*verified = new_verified;
2016-01-17 23:07:58 +01:00
}
2020-08-05 06:08:03 +02:00
/// Mark given item as processed.
/// Returns true if the queue becomes empty.
pub fn mark_as_good(&self, hashes: &[H256]) -> bool {
if hashes.is_empty() {
return self.processing.read().is_empty();
2016-03-10 00:21:07 +01:00
}
let mut processing = self.processing.write();
for hash in hashes {
if let Some(difficulty) = processing.remove(hash) {
let mut td = self.total_difficulty.write();
*td = *td - difficulty;
2020-08-05 06:08:03 +02:00
}
}
processing.is_empty()
2016-02-02 12:12:32 +01:00
}
2020-08-05 06:08:03 +02:00
/// Removes up to `max` verified items from the queue
pub fn drain(&self, max: usize) -> Vec<K::Verified> {
let mut verified = self.verification.verified.lock();
2017-07-29 17:12:07 +02:00
let count = cmp::min(max, verified.len());
let result = verified.drain(..count).collect::<Vec<_>>();
2020-08-05 06:08:03 +02:00
let drained_size = result
.iter()
.map(HeapSizeOf::heap_size_of_children)
.fold(0, |a, c| a + c);
self.verification
.sizes
.verified
.fetch_sub(drained_size, AtomicOrdering::SeqCst);
2020-08-05 06:08:03 +02:00
2016-01-17 23:07:58 +01:00
self.ready_signal.reset();
2016-02-21 19:46:29 +01:00
if !verified.is_empty() {
self.ready_signal.set_async();
2016-01-22 04:54:38 +01:00
}
2016-01-17 23:07:58 +01:00
result
}
2020-08-05 06:08:03 +02:00
/// Returns true if there is nothing currently in the queue.
pub fn is_empty(&self) -> bool {
let v = &self.verification;
2020-08-05 06:08:03 +02:00
v.unverified.load_len() == 0 && v.verifying.load_len() == 0 && v.verified.load_len() == 0
}
2020-08-05 06:08:03 +02:00
2016-01-22 04:54:38 +01:00
/// Get queue status.
pub fn queue_info(&self) -> QueueInfo {
use std::mem::size_of;
2020-08-05 06:08:03 +02:00
let (unverified_len, unverified_bytes) = {
let len = self.verification.unverified.load_len();
let size = self
.verification
.sizes
.unverified
.load(AtomicOrdering::Acquire);
2020-08-05 06:08:03 +02:00
(len, size + len * size_of::<K::Unverified>())
};
let (verifying_len, verifying_bytes) = {
let len = self.verification.verifying.load_len();
let size = self
.verification
.sizes
.verifying
.load(AtomicOrdering::Acquire);
(len, size + len * size_of::<Verifying<K>>())
};
let (verified_len, verified_bytes) = {
let len = self.verification.verified.load_len();
let size = self
.verification
.sizes
.verified
.load(AtomicOrdering::Acquire);
(len, size + len * size_of::<K::Verified>())
};
2020-08-05 06:08:03 +02:00
QueueInfo {
unverified_queue_size: unverified_len,
verifying_queue_size: verifying_len,
verified_queue_size: verified_len,
2016-02-25 14:09:39 +01:00
max_queue_size: self.max_queue_size,
max_mem_use: self.max_mem_use,
mem_used: unverified_bytes + verifying_bytes + verified_bytes,
2016-01-22 04:54:38 +01:00
}
2020-08-05 06:08:03 +02:00
}
/// Get the total difficulty of all the blocks in the queue.
pub fn total_difficulty(&self) -> U256 {
self.total_difficulty.read().clone()
}
2020-08-05 06:08:03 +02:00
/// Get the current number of working verifiers.
pub fn num_verifiers(&self) -> usize {
match *self.state.0.lock() {
State::Work(x) => x,
State::Exit => panic!("state only set to exit on drop; queue live now; qed"),
}
2020-08-05 06:08:03 +02:00
}
/// Optimise memory footprint of the heap fields, and adjust the number of threads
/// to better suit the workload.
2016-03-09 11:38:53 +01:00
pub fn collect_garbage(&self) {
// number of ticks to average queue stats over
// when deciding whether to change the number of verifiers.
2016-11-21 14:23:34 +01:00
#[cfg(not(test))]
2016-11-17 13:10:33 +01:00
const READJUSTMENT_PERIOD: usize = 12;
2020-08-05 06:08:03 +02:00
2016-11-21 14:23:34 +01:00
#[cfg(test)]
const READJUSTMENT_PERIOD: usize = 1;
2020-08-05 06:08:03 +02:00
2016-11-17 18:10:09 +01:00
let (u_len, v_len) = {
let u_len = {
let mut q = self.verification.unverified.lock();
q.shrink_to_fit();
q.len()
};
self.verification.verifying.lock().shrink_to_fit();
2020-08-05 06:08:03 +02:00
2016-11-17 18:10:09 +01:00
let v_len = {
let mut q = self.verification.verified.lock();
q.shrink_to_fit();
q.len()
};
2020-08-05 06:08:03 +02:00
2016-11-17 18:10:09 +01:00
(u_len as isize, v_len as isize)
};
2020-08-05 06:08:03 +02:00
self.processing.write().shrink_to_fit();
2020-08-05 06:08:03 +02:00
if !self.scale_verifiers {
return;
}
2020-08-05 06:08:03 +02:00
2016-11-21 14:23:34 +01:00
if self
.ticks_since_adjustment
.fetch_add(1, AtomicOrdering::SeqCst)
+ 1
>= READJUSTMENT_PERIOD
{
2016-10-04 20:09:54 +02:00
self.ticks_since_adjustment.store(0, AtomicOrdering::SeqCst);
} else {
return;
}
2020-08-05 06:08:03 +02:00
let current = self.num_verifiers();
2020-08-05 06:08:03 +02:00
2016-11-17 18:10:09 +01:00
let diff = (v_len - u_len).abs();
let total = v_len + u_len;
2020-08-05 06:08:03 +02:00
2016-11-17 18:10:09 +01:00
self.scale_verifiers(if u_len < 20 {
1
} else if diff <= total / 10 {
current
} else if v_len > u_len {
current - 1
} else {
current + 1
});
}
2020-08-05 06:08:03 +02:00
// wake up or sleep verifiers to get as close to the target as
// possible, never going over the amount of initially allocated threads
// or below 1.
fn scale_verifiers(&self, target: usize) {
let current = self.num_verifiers();
2017-07-29 17:12:07 +02:00
let target = cmp::min(self.verifier_handles.len(), target);
let target = cmp::max(1, target);
2020-08-05 06:08:03 +02:00
debug!(target: "verification", "Scaling from {} to {} verifiers", current, target);
2020-08-05 06:08:03 +02:00
*self.state.0.lock() = State::Work(target);
self.state.1.notify_all();
2016-01-22 04:54:38 +01:00
}
2016-01-17 23:07:58 +01:00
}
impl<K: Kind> Drop for VerificationQueue<K> {
2016-01-17 23:07:58 +01:00
fn drop(&mut self) {
trace!(target: "shutdown", "[VerificationQueue] Closing...");
2016-01-17 23:07:58 +01:00
self.clear();
self.deleting.store(true, AtomicOrdering::SeqCst);
2020-08-05 06:08:03 +02:00
// set exit state; should be done before `more_to_verify` notification.
*self.state.0.lock() = State::Exit;
self.state.1.notify_all();
2020-08-05 06:08:03 +02:00
2016-12-19 17:15:54 +01:00
// acquire this lock to force threads to reach the waiting point
// if they're in-between the exit check and the more_to_verify wait.
{
let _unverified = self.verification.unverified.lock();
2016-12-19 17:15:54 +01:00
self.more_to_verify.notify_all();
}
2020-08-05 06:08:03 +02:00
// wait for all verifier threads to join.
for thread in self.verifier_handles.drain(..) {
thread
.join()
.expect("Propagating verifier thread panic on shutdown");
2016-01-17 23:07:58 +01:00
}
2020-08-05 06:08:03 +02:00
trace!(target: "shutdown", "[VerificationQueue] Closed.");
2016-01-15 12:26:04 +01:00
}
2016-01-09 10:16:35 +01:00
}
2016-01-18 00:24:20 +01:00
#[cfg(test)]
mod tests {
use super::{kind::blocks::Unverified, BlockQueue, Config, State};
use bytes::Bytes;
use error::*;
use io::*;
use spec::Spec;
Private transactions integration pr (#6422) * Private transaction message added * Empty line removed * Private transactions logic removed from client into the separate module * Fixed compilation after merge with head * Signed private transaction message added as well * Comments after the review fixed * Private tx execution * Test update * Renamed some methods * Fixed some tests * Reverted submodules * Fixed build * Private transaction message added * Empty line removed * Private transactions logic removed from client into the separate module * Fixed compilation after merge with head * Signed private transaction message added as well * Comments after the review fixed * Encrypted private transaction message and signed reply added * Private tx execution * Test update * Main scenario completed * Merged with the latest head * Private transactions API * Comments after review fixed * Parameters for private transactions added to parity arguments * New files added * New API methods added * Do not process packets from unconfirmed peers * Merge with ptm_ss branch * Encryption and permissioning with key server added * Fixed compilation after merge * Version of Parity protocol incremented in order to support private transactions * Doc strings for constants added * Proper format for doc string added * fixed some encryptor.rs grumbles * Private transactions functionality moved to the separate crate * Refactoring in order to remove late initialisation * Tests fixed after moving to the separate crate * Fetch method removed * Sync test helpers refactored * Interaction with encryptor refactored * Contract address retrieving via substate removed * Sensible gas limit for private transactions implemented * New private contract with nonces added * Parsing of the response from key server fixed * Build fixed after the merge, native contracts removed * Crate renamed * Tests moved to the separate directory * Handling of errors reworked in order to use error chain * Encodable macro added, new constructor replaced with default * Native ethabi usage removed * Couple conversions optimized * Interactions with client reworked * Errors omitting removed * Fix after merge * Fix after the merge * private transactions improvements in progress * private_transactions -> ethcore/private-tx * making private transactions more idiomatic * private-tx encryptor uses shared FetchClient and is more idiomatic * removed redundant tests, moved integration tests to tests/ dir * fixed failing service test * reenable add_notify on private tx provider * removed private_tx tests from sync module * removed commented out code * Use plain password instead of unlocking account manager * remove dead code * Link to the contract changed * Transaction signature chain replay protection module created * Redundant type conversion removed * Contract address returned by private provider * Test fixed * Addressing grumbles in PrivateTransactions (#8249) * Tiny fixes part 1. * A bunch of additional comments and todos. * Fix ethsync tests. * resolved merge conflicts * final private tx pr (#8318) * added cli option that enables private transactions * fixed failing test * fixed failing test * fixed failing test * fixed failing test
2018-04-09 16:14:33 +02:00
use test_helpers::{get_good_dummy_block, get_good_dummy_block_seq};
use types::{view, views::BlockView};
2020-08-05 06:08:03 +02:00
// create a test block queue.
// auto_scaling enables verifier adjustment.
fn get_test_queue(auto_scale: bool) -> BlockQueue {
2018-03-12 18:05:52 +01:00
let spec = Spec::new_test();
let engine = spec.engine;
2020-08-05 06:08:03 +02:00
let mut config = Config::default();
config.verifier_settings.scale_verifiers = auto_scale;
BlockQueue::new(config, engine, IoChannel::disconnected(), true)
2016-01-28 19:14:07 +01:00
}
2020-08-05 06:08:03 +02:00
fn get_test_config(num_verifiers: usize, is_auto_scale: bool) -> Config {
let mut config = Config::default();
config.verifier_settings.num_verifiers = num_verifiers;
config.verifier_settings.scale_verifiers = is_auto_scale;
config
}
2020-08-05 06:08:03 +02:00
fn new_unverified(bytes: Bytes) -> Unverified {
Unverified::from_rlp(bytes).expect("Should be valid rlp")
}
2020-08-05 06:08:03 +02:00
2016-01-18 00:24:20 +01:00
#[test]
2016-01-28 19:14:07 +01:00
fn can_be_created() {
2016-01-18 00:24:20 +01:00
// TODO better test
let spec = Spec::new_test();
let engine = spec.engine;
2016-10-24 16:31:37 +02:00
let _ = BlockQueue::new(Config::default(), engine, IoChannel::disconnected(), true);
2016-01-18 00:24:20 +01:00
}
2020-08-05 06:08:03 +02:00
2016-01-28 19:14:07 +01:00
#[test]
2016-01-28 19:43:57 +01:00
fn can_import_blocks() {
let queue = get_test_queue(false);
if let Err(e) = queue.import(new_unverified(get_good_dummy_block())) {
2016-01-28 19:14:07 +01:00
panic!("error importing block that is valid by definition({:?})", e);
2020-08-05 06:08:03 +02:00
}
2016-01-28 19:14:07 +01:00
}
2020-08-05 06:08:03 +02:00
2016-01-28 19:14:07 +01:00
#[test]
fn returns_error_for_duplicates() {
let queue = get_test_queue(false);
if let Err(e) = queue.import(new_unverified(get_good_dummy_block())) {
2016-01-28 19:14:07 +01:00
panic!("error importing block that is valid by definition({:?})", e);
}
2020-08-05 06:08:03 +02:00
let duplicate_import = queue.import(new_unverified(get_good_dummy_block()));
2016-01-28 19:43:57 +01:00
match duplicate_import {
Err((_, e)) => match e {
Error(ErrorKind::Import(ImportErrorKind::AlreadyQueued), _) => {}
2016-01-28 19:43:57 +01:00
_ => {
panic!("must return AlreadyQueued error");
}
},
Ok(_) => {
panic!("must produce error");
2020-08-05 06:08:03 +02:00
}
}
2016-01-28 19:43:57 +01:00
}
2020-08-05 06:08:03 +02:00
#[test]
fn returns_total_difficulty() {
let queue = get_test_queue(false);
let block = get_good_dummy_block();
let hash = view!(BlockView, &block).header().hash().clone();
if let Err(e) = queue.import(new_unverified(block)) {
panic!("error importing block that is valid by definition({:?})", e);
}
queue.flush();
assert_eq!(queue.total_difficulty(), 131072.into());
queue.drain(10);
assert_eq!(queue.total_difficulty(), 131072.into());
queue.mark_as_good(&[hash]);
assert_eq!(queue.total_difficulty(), 0.into());
}
2020-08-05 06:08:03 +02:00
2016-01-28 19:43:57 +01:00
#[test]
2016-02-01 16:18:32 +01:00
fn returns_ok_for_drained_duplicates() {
let queue = get_test_queue(false);
2016-02-02 21:06:21 +01:00
let block = get_good_dummy_block();
let hash = view!(BlockView, &block).header().hash().clone();
if let Err(e) = queue.import(new_unverified(block)) {
2016-01-28 19:43:57 +01:00
panic!("error importing block that is valid by definition({:?})", e);
}
queue.flush();
2016-02-01 16:18:32 +01:00
queue.drain(10);
2016-02-02 21:06:21 +01:00
queue.mark_as_good(&[hash]);
2020-08-05 06:08:03 +02:00
if let Err(e) = queue.import(new_unverified(get_good_dummy_block())) {
2016-02-01 16:18:32 +01:00
panic!(
"error importing block that has already been drained ({:?})",
e
);
2016-01-28 19:14:07 +01:00
}
}
2020-08-05 06:08:03 +02:00
2016-02-06 23:15:53 +01:00
#[test]
fn returns_empty_once_finished() {
let queue = get_test_queue(false);
queue
.import(new_unverified(get_good_dummy_block()))
.expect("error importing block that is valid by definition");
2016-02-06 23:15:53 +01:00
queue.flush();
queue.drain(1);
2020-08-05 06:08:03 +02:00
2016-02-08 12:35:51 +01:00
assert!(queue.queue_info().is_empty());
2016-02-06 23:15:53 +01:00
}
2020-08-05 06:08:03 +02:00
2016-02-25 17:14:45 +01:00
#[test]
fn test_mem_limit() {
2018-03-12 18:05:52 +01:00
let spec = Spec::new_test();
let engine = spec.engine;
let mut config = Config::default();
2016-02-25 17:14:45 +01:00
config.max_mem_use = super::MIN_MEM_LIMIT; // empty queue uses about 15000
2016-10-24 16:31:37 +02:00
let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true);
2016-02-25 17:14:45 +01:00
assert!(!queue.queue_info().is_full());
let mut blocks = get_good_dummy_block_seq(50);
for b in blocks.drain(..) {
queue.import(new_unverified(b)).unwrap();
2016-02-25 17:14:45 +01:00
}
assert!(queue.queue_info().is_full());
}
2020-08-05 06:08:03 +02:00
2016-11-21 14:23:34 +01:00
#[test]
fn scaling_limits() {
let max_verifiers = ::num_cpus::get();
let queue = get_test_queue(true);
queue.scale_verifiers(max_verifiers + 1);
2020-08-05 06:08:03 +02:00
assert!(queue.num_verifiers() < max_verifiers + 1);
2020-08-05 06:08:03 +02:00
2016-11-21 14:23:34 +01:00
queue.scale_verifiers(0);
2020-08-05 06:08:03 +02:00
assert!(queue.num_verifiers() == 1);
2016-11-21 14:23:34 +01:00
}
2020-08-05 06:08:03 +02:00
2016-11-21 14:23:34 +01:00
#[test]
fn readjust_verifiers() {
let queue = get_test_queue(true);
2020-08-05 06:08:03 +02:00
2016-12-19 17:15:54 +01:00
// put all the verifiers to sleep to ensure
2016-11-21 14:23:34 +01:00
// the test isn't timing sensitive.
*queue.state.0.lock() = State::Work(0);
2020-08-05 06:08:03 +02:00
2016-11-21 14:23:34 +01:00
for block in get_good_dummy_block_seq(5000) {
queue
.import(new_unverified(block))
.expect("Block good by definition; qed");
2016-11-21 14:23:34 +01:00
}
2020-08-05 06:08:03 +02:00
2016-11-21 14:23:34 +01:00
// almost all unverified == bump verifier count.
queue.collect_garbage();
assert_eq!(queue.num_verifiers(), 1);
2020-08-05 06:08:03 +02:00
2016-11-21 14:23:34 +01:00
queue.flush();
2020-08-05 06:08:03 +02:00
2016-11-21 14:23:34 +01:00
// nothing to verify == use minimum number of verifiers.
queue.collect_garbage();
assert_eq!(queue.num_verifiers(), 1);
2016-11-21 14:23:34 +01:00
}
2020-08-05 06:08:03 +02:00
#[test]
fn worker_threads_honor_specified_number_without_scaling() {
let spec = Spec::new_test();
let engine = spec.engine;
let config = get_test_config(1, false);
let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true);
2020-08-05 06:08:03 +02:00
assert_eq!(queue.num_verifiers(), 1);
}
2020-08-05 06:08:03 +02:00
#[test]
fn worker_threads_specified_to_zero_should_set_to_one() {
let spec = Spec::new_test();
let engine = spec.engine;
let config = get_test_config(0, false);
let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true);
2020-08-05 06:08:03 +02:00
assert_eq!(queue.num_verifiers(), 1);
}
2020-08-05 06:08:03 +02:00
#[test]
fn worker_threads_should_only_accept_max_number_cpus() {
let spec = Spec::new_test();
let engine = spec.engine;
let config = get_test_config(10_000, false);
let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true);
let num_cpus = ::num_cpus::get();
2020-08-05 06:08:03 +02:00
assert_eq!(queue.num_verifiers(), num_cpus);
}
2020-08-05 06:08:03 +02:00
#[test]
fn worker_threads_scaling_with_specifed_num_of_workers() {
let num_cpus = ::num_cpus::get();
// only run the test with at least 2 CPUs
if num_cpus > 1 {
let spec = Spec::new_test();
let engine = spec.engine;
let config = get_test_config(num_cpus - 1, true);
let queue = BlockQueue::new(config, engine, IoChannel::disconnected(), true);
queue.scale_verifiers(num_cpus);
2020-08-05 06:08:03 +02:00
assert_eq!(queue.num_verifiers(), num_cpus);
}
}
2016-01-18 00:24:20 +01:00
}