Strict memory order (#306)

* Make MemoryOrdering more strict

* fmt

* Strict mem order for priority_tasks_gate
This commit is contained in:
rakita 2021-03-10 12:36:23 +01:00 committed by GitHub
parent e2024c4b81
commit eca8fb74ae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 56 additions and 66 deletions

View File

@ -39,7 +39,7 @@ struct StratumControlService {
impl ControlService for StratumControlService { impl ControlService for StratumControlService {
fn shutdown(&self) -> bool { fn shutdown(&self) -> bool {
trace!(target: "hypervisor", "Received shutdown from control service"); trace!(target: "hypervisor", "Received shutdown from control service");
self.stop.store(true, ::std::sync::atomic::Ordering::Relaxed); self.stop.store(true, ::std::sync::atomic::Ordering::SeqCst);
true true
} }
} }

View File

@ -207,11 +207,11 @@ mod test {
// when // when
let bb = b.clone(); let bb = b.clone();
price_info.get(move |_| { price_info.get(move |_| {
bb.store(true, Ordering::Relaxed); bb.store(true, Ordering::SeqCst);
}); });
// then // then
assert_eq!(b.load(Ordering::Relaxed), false); assert_eq!(b.load(Ordering::SeqCst), false);
} }
#[test] #[test]
@ -225,10 +225,10 @@ mod test {
// when // when
let bb = b.clone(); let bb = b.clone();
price_info.get(move |_| { price_info.get(move |_| {
bb.store(true, Ordering::Relaxed); bb.store(true, Ordering::SeqCst);
}); });
// then // then
assert_eq!(b.load(Ordering::Relaxed), false); assert_eq!(b.load(Ordering::SeqCst), false);
} }
} }

View File

@ -480,7 +480,7 @@ impl TransactionQueue {
// We want to clear stale transactions from the queue as well. // We want to clear stale transactions from the queue as well.
// (Transactions that are occuping the queue for a long time without being included) // (Transactions that are occuping the queue for a long time without being included)
let stale_id = { let stale_id = {
let current_id = self.insertion_id.load(atomic::Ordering::Relaxed); let current_id = self.insertion_id.load(atomic::Ordering::SeqCst);
// wait at least for half of the queue to be replaced // wait at least for half of the queue to be replaced
let gap = self.pool.read().options().max_count / 2; let gap = self.pool.read().options().max_count / 2;
// but never less than 100 transactions // but never less than 100 transactions

View File

@ -34,6 +34,6 @@ impl StopGuard {
impl Drop for StopGuard { impl Drop for StopGuard {
fn drop(&mut self) { fn drop(&mut self) {
self.flag.store(true, Ordering::Relaxed) self.flag.store(true, Ordering::SeqCst)
} }
} }

View File

@ -284,7 +284,7 @@ impl Importer {
// t_nb 6.0 This is triggered by a message coming from a block queue when the block is ready for insertion // t_nb 6.0 This is triggered by a message coming from a block queue when the block is ready for insertion
pub fn import_verified_blocks(&self, client: &Client) -> usize { pub fn import_verified_blocks(&self, client: &Client) -> usize {
// Shortcut out if we know we're incapable of syncing the chain. // Shortcut out if we know we're incapable of syncing the chain.
if !client.enabled.load(AtomicOrdering::Relaxed) { if !client.enabled.load(AtomicOrdering::SeqCst) {
return 0; return 0;
} }
@ -1445,18 +1445,18 @@ impl Client {
} }
fn wake_up(&self) { fn wake_up(&self) {
if !self.liveness.load(AtomicOrdering::Relaxed) { if !self.liveness.load(AtomicOrdering::SeqCst) {
self.liveness.store(true, AtomicOrdering::Relaxed); self.liveness.store(true, AtomicOrdering::SeqCst);
self.notify(|n| n.start()); self.notify(|n| n.start());
info!(target: "mode", "wake_up: Waking."); info!(target: "mode", "wake_up: Waking.");
} }
} }
fn sleep(&self, force: bool) { fn sleep(&self, force: bool) {
if self.liveness.load(AtomicOrdering::Relaxed) { if self.liveness.load(AtomicOrdering::SeqCst) {
// only sleep if the import queue is mostly empty. // only sleep if the import queue is mostly empty.
if force || (self.queue_info().total_queue_size() <= MAX_QUEUE_SIZE_TO_SLEEP_ON) { if force || (self.queue_info().total_queue_size() <= MAX_QUEUE_SIZE_TO_SLEEP_ON) {
self.liveness.store(false, AtomicOrdering::Relaxed); self.liveness.store(false, AtomicOrdering::SeqCst);
self.notify(|n| n.stop()); self.notify(|n| n.stop());
info!(target: "mode", "sleep: Sleeping."); info!(target: "mode", "sleep: Sleeping.");
} else { } else {
@ -2058,13 +2058,13 @@ impl BlockChainClient for Client {
fn disable(&self) { fn disable(&self) {
self.set_mode(Mode::Off); self.set_mode(Mode::Off);
self.enabled.store(false, AtomicOrdering::Relaxed); self.enabled.store(false, AtomicOrdering::SeqCst);
self.clear_queue(); self.clear_queue();
} }
fn set_mode(&self, new_mode: Mode) { fn set_mode(&self, new_mode: Mode) {
trace!(target: "mode", "Client::set_mode({:?})", new_mode); trace!(target: "mode", "Client::set_mode({:?})", new_mode);
if !self.enabled.load(AtomicOrdering::Relaxed) { if !self.enabled.load(AtomicOrdering::SeqCst) {
return; return;
} }
{ {
@ -2095,7 +2095,7 @@ impl BlockChainClient for Client {
fn set_spec_name(&self, new_spec_name: String) -> Result<(), ()> { fn set_spec_name(&self, new_spec_name: String) -> Result<(), ()> {
trace!(target: "mode", "Client::set_spec_name({:?})", new_spec_name); trace!(target: "mode", "Client::set_spec_name({:?})", new_spec_name);
if !self.enabled.load(AtomicOrdering::Relaxed) { if !self.enabled.load(AtomicOrdering::SeqCst) {
return Err(()); return Err(());
} }
if let Some(ref h) = *self.exit_handler.lock() { if let Some(ref h) = *self.exit_handler.lock() {
@ -3241,7 +3241,7 @@ impl IoChannelQueue {
where where
F: Fn(&Client) + Send + Sync + 'static, F: Fn(&Client) + Send + Sync + 'static,
{ {
let queue_size = self.currently_queued.load(AtomicOrdering::Relaxed); let queue_size = self.currently_queued.load(AtomicOrdering::SeqCst);
if queue_size >= self.limit { if queue_size >= self.limit {
let err_limit = usize::try_from(self.limit).unwrap_or(usize::max_value()); let err_limit = usize::try_from(self.limit).unwrap_or(usize::max_value());
bail!("The queue is full ({})", err_limit); bail!("The queue is full ({})", err_limit);

View File

@ -237,7 +237,7 @@ impl TestBlockChainClient {
/// Set block queue size for testing /// Set block queue size for testing
pub fn set_queue_size(&self, size: usize) { pub fn set_queue_size(&self, size: usize) {
self.queue_size.store(size, AtomicOrder::Relaxed); self.queue_size.store(size, AtomicOrder::SeqCst);
} }
/// Set timestamp assigned to latest sealed block /// Set timestamp assigned to latest sealed block
@ -402,7 +402,7 @@ impl TestBlockChainClient {
/// Returns true if the client has been disabled. /// Returns true if the client has been disabled.
pub fn is_disabled(&self) -> bool { pub fn is_disabled(&self) -> bool {
self.disabled.load(AtomicOrder::Relaxed) self.disabled.load(AtomicOrder::SeqCst)
} }
} }
@ -959,7 +959,7 @@ impl BlockChainClient for TestBlockChainClient {
fn queue_info(&self) -> QueueInfo { fn queue_info(&self) -> QueueInfo {
QueueInfo { QueueInfo {
verified_queue_size: self.queue_size.load(AtomicOrder::Relaxed), verified_queue_size: self.queue_size.load(AtomicOrder::SeqCst),
unverified_queue_size: 0, unverified_queue_size: 0,
verifying_queue_size: 0, verifying_queue_size: 0,
max_queue_size: 0, max_queue_size: 0,
@ -1019,7 +1019,7 @@ impl BlockChainClient for TestBlockChainClient {
} }
fn disable(&self) { fn disable(&self) {
self.disabled.store(true, AtomicOrder::Relaxed); self.disabled.store(true, AtomicOrder::SeqCst);
} }
fn pruning_info(&self) -> PruningInfo { fn pruning_info(&self) -> PruningInfo {

View File

@ -127,34 +127,34 @@ pub struct Progress {
impl Progress { impl Progress {
/// Reset the progress. /// Reset the progress.
pub fn reset(&self) { pub fn reset(&self) {
self.accounts.store(0, Ordering::Release); self.accounts.store(0, Ordering::SeqCst);
self.blocks.store(0, Ordering::Release); self.blocks.store(0, Ordering::SeqCst);
self.size.store(0, Ordering::Release); self.size.store(0, Ordering::SeqCst);
self.abort.store(false, Ordering::Release); self.abort.store(false, Ordering::SeqCst);
// atomic fence here to ensure the others are written first? // atomic fence here to ensure the others are written first?
// logs might very rarely get polluted if not. // logs might very rarely get polluted if not.
self.done.store(false, Ordering::Release); self.done.store(false, Ordering::SeqCst);
} }
/// Get the number of accounts snapshotted thus far. /// Get the number of accounts snapshotted thus far.
pub fn accounts(&self) -> usize { pub fn accounts(&self) -> usize {
self.accounts.load(Ordering::Acquire) self.accounts.load(Ordering::SeqCst)
} }
/// Get the number of blocks snapshotted thus far. /// Get the number of blocks snapshotted thus far.
pub fn blocks(&self) -> usize { pub fn blocks(&self) -> usize {
self.blocks.load(Ordering::Acquire) self.blocks.load(Ordering::SeqCst)
} }
/// Get the written size of the snapshot in bytes. /// Get the written size of the snapshot in bytes.
pub fn size(&self) -> u64 { pub fn size(&self) -> u64 {
self.size.load(Ordering::Acquire) self.size.load(Ordering::SeqCst)
} }
/// Whether the snapshot is complete. /// Whether the snapshot is complete.
pub fn done(&self) -> bool { pub fn done(&self) -> bool {
self.done.load(Ordering::Acquire) self.done.load(Ordering::SeqCst)
} }
} }
/// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer. /// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer.

View File

@ -175,18 +175,13 @@ struct QueueSignal {
impl QueueSignal { impl QueueSignal {
fn set_sync(&self) { fn set_sync(&self) {
// Do not signal when we are about to close // Do not signal when we are about to close
if self.deleting.load(AtomicOrdering::Relaxed) { if self.deleting.load(AtomicOrdering::SeqCst) {
return; return;
} }
if self if self
.signalled .signalled
.compare_exchange( .compare_exchange(false, true, AtomicOrdering::SeqCst, AtomicOrdering::SeqCst)
false,
true,
AtomicOrdering::Relaxed,
AtomicOrdering::Relaxed,
)
.is_ok() .is_ok()
{ {
let channel = self.message_channel.lock().clone(); let channel = self.message_channel.lock().clone();
@ -198,18 +193,13 @@ impl QueueSignal {
fn set_async(&self) { fn set_async(&self) {
// Do not signal when we are about to close // Do not signal when we are about to close
if self.deleting.load(AtomicOrdering::Relaxed) { if self.deleting.load(AtomicOrdering::SeqCst) {
return; return;
} }
if self if self
.signalled .signalled
.compare_exchange( .compare_exchange(false, true, AtomicOrdering::SeqCst, AtomicOrdering::SeqCst)
false,
true,
AtomicOrdering::Relaxed,
AtomicOrdering::Relaxed,
)
.is_ok() .is_ok()
{ {
let channel = self.message_channel.lock().clone(); let channel = self.message_channel.lock().clone();
@ -220,7 +210,7 @@ impl QueueSignal {
} }
fn reset(&self) { fn reset(&self) {
self.signalled.store(false, AtomicOrdering::Relaxed); self.signalled.store(false, AtomicOrdering::SeqCst);
} }
} }
@ -499,9 +489,9 @@ impl<K: Kind> VerificationQueue<K> {
verified.clear(); verified.clear();
let sizes = &self.verification.sizes; let sizes = &self.verification.sizes;
sizes.unverified.store(0, AtomicOrdering::Release); sizes.unverified.store(0, AtomicOrdering::SeqCst);
sizes.verifying.store(0, AtomicOrdering::Release); sizes.verifying.store(0, AtomicOrdering::SeqCst);
sizes.verified.store(0, AtomicOrdering::Release); sizes.verified.store(0, AtomicOrdering::SeqCst);
*self.total_difficulty.write() = 0.into(); *self.total_difficulty.write() = 0.into();
self.processing.write().clear(); self.processing.write().clear();
@ -728,7 +718,7 @@ impl<K: Kind> VerificationQueue<K> {
.verification .verification
.sizes .sizes
.unverified .unverified
.load(AtomicOrdering::Acquire); .load(AtomicOrdering::SeqCst);
(len, size + len * size_of::<K::Unverified>()) (len, size + len * size_of::<K::Unverified>())
}; };
@ -738,7 +728,7 @@ impl<K: Kind> VerificationQueue<K> {
.verification .verification
.sizes .sizes
.verifying .verifying
.load(AtomicOrdering::Acquire); .load(AtomicOrdering::SeqCst);
(len, size + len * size_of::<Verifying<K>>()) (len, size + len * size_of::<Verifying<K>>())
}; };
let (verified_len, verified_bytes) = { let (verified_len, verified_bytes) = {
@ -747,7 +737,7 @@ impl<K: Kind> VerificationQueue<K> {
.verification .verification
.sizes .sizes
.verified .verified
.load(AtomicOrdering::Acquire); .load(AtomicOrdering::SeqCst);
(len, size + len * size_of::<K::Verified>()) (len, size + len * size_of::<K::Verified>())
}; };

View File

@ -489,7 +489,7 @@ impl ChainSyncApi {
if self if self
.priority_tasks_gate .priority_tasks_gate
.compare_exchange(false, true, Ordering::Acquire, Ordering::Release) .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
.is_err() .is_err()
{ {
return; return;
@ -552,7 +552,7 @@ impl ChainSyncApi {
// Process as many items as we can until the deadline is reached. // Process as many items as we can until the deadline is reached.
loop { loop {
if work().is_none() { if work().is_none() {
self.priority_tasks_gate.store(false, Ordering::Release); self.priority_tasks_gate.store(false, Ordering::SeqCst);
return; return;
} }
} }

View File

@ -464,7 +464,7 @@ impl Host {
} }
pub fn stop(&self, io: &IoContext<NetworkIoMessage>) { pub fn stop(&self, io: &IoContext<NetworkIoMessage>) {
self.stopping.store(true, AtomicOrdering::Release); self.stopping.store(true, AtomicOrdering::SeqCst);
let mut to_kill = Vec::new(); let mut to_kill = Vec::new();
for e in self.sessions.read().iter() { for e in self.sessions.read().iter() {
let mut s = e.lock(); let mut s = e.lock();
@ -1168,7 +1168,7 @@ impl IoHandler<NetworkIoMessage> for Host {
} }
fn stream_readable(&self, io: &IoContext<NetworkIoMessage>, stream: StreamToken) { fn stream_readable(&self, io: &IoContext<NetworkIoMessage>, stream: StreamToken) {
if self.stopping.load(AtomicOrdering::Acquire) { if self.stopping.load(AtomicOrdering::SeqCst) {
return; return;
} }
match stream { match stream {
@ -1180,7 +1180,7 @@ impl IoHandler<NetworkIoMessage> for Host {
} }
fn stream_writable(&self, io: &IoContext<NetworkIoMessage>, stream: StreamToken) { fn stream_writable(&self, io: &IoContext<NetworkIoMessage>, stream: StreamToken) {
if self.stopping.load(AtomicOrdering::Acquire) { if self.stopping.load(AtomicOrdering::SeqCst) {
return; return;
} }
match stream { match stream {
@ -1191,7 +1191,7 @@ impl IoHandler<NetworkIoMessage> for Host {
} }
fn timeout(&self, io: &IoContext<NetworkIoMessage>, token: TimerToken) { fn timeout(&self, io: &IoContext<NetworkIoMessage>, token: TimerToken) {
if self.stopping.load(AtomicOrdering::Acquire) { if self.stopping.load(AtomicOrdering::SeqCst) {
return; return;
} }
match token { match token {
@ -1253,7 +1253,7 @@ impl IoHandler<NetworkIoMessage> for Host {
} }
fn message(&self, io: &IoContext<NetworkIoMessage>, message: &NetworkIoMessage) { fn message(&self, io: &IoContext<NetworkIoMessage>, message: &NetworkIoMessage) {
if self.stopping.load(AtomicOrdering::Acquire) { if self.stopping.load(AtomicOrdering::SeqCst) {
return; return;
} }
match *message { match *message {

View File

@ -73,11 +73,11 @@ impl TestProtocol {
} }
pub fn got_timeout(&self) -> bool { pub fn got_timeout(&self) -> bool {
self.got_timeout.load(AtomicOrdering::Relaxed) self.got_timeout.load(AtomicOrdering::SeqCst)
} }
pub fn got_disconnect(&self) -> bool { pub fn got_disconnect(&self) -> bool {
self.got_disconnect.load(AtomicOrdering::Relaxed) self.got_disconnect.load(AtomicOrdering::SeqCst)
} }
} }
@ -101,13 +101,13 @@ impl NetworkProtocolHandler for TestProtocol {
} }
fn disconnected(&self, _io: &dyn NetworkContext, _peer: &PeerId) { fn disconnected(&self, _io: &dyn NetworkContext, _peer: &PeerId) {
self.got_disconnect.store(true, AtomicOrdering::Relaxed); self.got_disconnect.store(true, AtomicOrdering::SeqCst);
} }
/// Timer function called after a timeout created with `NetworkContext::timeout`. /// Timer function called after a timeout created with `NetworkContext::timeout`.
fn timeout(&self, _io: &dyn NetworkContext, timer: TimerToken) { fn timeout(&self, _io: &dyn NetworkContext, timer: TimerToken) {
assert_eq!(timer, 0); assert_eq!(timer, 0);
self.got_timeout.store(true, AtomicOrdering::Relaxed); self.got_timeout.store(true, AtomicOrdering::SeqCst);
} }
} }

View File

@ -171,7 +171,7 @@ impl RpcStats {
/// Returns number of open sessions /// Returns number of open sessions
pub fn sessions(&self) -> usize { pub fn sessions(&self) -> usize {
self.active_sessions.load(atomic::Ordering::Relaxed) self.active_sessions.load(atomic::Ordering::SeqCst)
} }
/// Returns requests rate /// Returns requests rate

View File

@ -86,13 +86,13 @@ impl Worker {
future::loop_fn(ini, |(stealer, channel, wait, wait_mutex, deleting)| { future::loop_fn(ini, |(stealer, channel, wait, wait_mutex, deleting)| {
{ {
let mut lock = wait_mutex.lock(); let mut lock = wait_mutex.lock();
if deleting.load(AtomicOrdering::Acquire) { if deleting.load(AtomicOrdering::SeqCst) {
return Ok(Loop::Break(())); return Ok(Loop::Break(()));
} }
wait.wait(&mut lock); wait.wait(&mut lock);
} }
while !deleting.load(AtomicOrdering::Acquire) { while !deleting.load(AtomicOrdering::SeqCst) {
match stealer.steal() { match stealer.steal() {
deque::Steal::Data(work) => { deque::Steal::Data(work) => {
Worker::do_work(work, channel.clone()) Worker::do_work(work, channel.clone())
@ -147,7 +147,7 @@ impl Drop for Worker {
fn drop(&mut self) { fn drop(&mut self) {
trace!(target: "shutdown", "[IoWorker] Closing..."); trace!(target: "shutdown", "[IoWorker] Closing...");
let _ = self.wait_mutex.lock(); let _ = self.wait_mutex.lock();
self.deleting.store(true, AtomicOrdering::Release); self.deleting.store(true, AtomicOrdering::SeqCst);
self.wait.notify_all(); self.wait.notify_all();
if let Some(thread) = self.thread.take() { if let Some(thread) = self.thread.take() {
thread.join().ok(); thread.join().ok();

View File

@ -259,7 +259,7 @@ impl Rpc {
{ {
let (c, p) = oneshot::<Result<JsonValue, RpcError>>(); let (c, p) = oneshot::<Result<JsonValue, RpcError>>();
let id = self.counter.fetch_add(1, Ordering::Relaxed); let id = self.counter.fetch_add(1, Ordering::SeqCst);
self.pending.insert(id, c); self.pending.insert(id, c);
let request = MethodCall { let request = MethodCall {