Replace tokio_core with tokio (ring -> 0.13) (#9657)

* Replace `tokio_core` with `tokio`.

* Remove `tokio-core` and replace with `tokio` in

    - `ethcore/stratum`

    - `secret_store`

    - `util/fetch`

    - `util/reactor`

* Bump hyper to 0.12 in

    - `miner`

    - `util/fake-fetch`

    - `util/fetch`

    - `secret_store`

* Bump `jsonrpc-***` to 0.9 in

    - `parity`

    - `ethcore/stratum`

    - `ipfs`

    - `rpc`

    - `rpc_client`

    - `whisper`

* Bump `ring` to 0.13

* Use a more graceful shutdown process in `secret_store` tests.

* Convert some mutexes to rwlocks in `secret_store`.

* Consolidate Tokio Runtime use, remove `CpuPool`.

* Rename and move the `tokio_reactor` crate (`util/reactor`) to
  `tokio_runtime` (`util/runtime`).

* Rename `EventLoop` to `Runtime`.

    - Rename `EventLoop::spawn` to `Runtime::with_default_thread_count`.

    - Add the `Runtime::with_thread_count` method.

    - Rename `Remote` to `Executor`.

* Remove uses of `CpuPool` and spawn all tasks via the `Runtime` executor
  instead.

* Other changes related to `CpuPool` removal:

    - Remove `Reservations::with_pool`. `::new` now takes an `Executor` as an argument.

    - Remove `SenderReservations::with_pool`. `::new` now takes an `Executor` as an argument.
This commit is contained in:
Nick Sanders
2018-10-22 00:40:50 -07:00
committed by Afri Schoedon
parent b8da38f4e4
commit 68ca8df22f
75 changed files with 2027 additions and 1671 deletions

View File

@@ -23,31 +23,25 @@ use ethereum_types::{U256, Address};
use futures::{Future, future, Poll, Async};
use futures::future::Either;
use futures::sync::oneshot;
use futures_cpupool::CpuPool;
use parity_runtime::Executor;
/// Manages currently reserved and prospective nonces
/// for multiple senders.
#[derive(Debug)]
pub struct Reservations {
nonces: HashMap<Address, SenderReservations>,
pool: CpuPool,
executor: Executor,
}
impl Reservations {
/// A maximal number of reserved nonces in the hashmap
/// before we start clearing the unused ones.
const CLEAN_AT: usize = 512;
/// Create new nonces manager and spawn a single-threaded cpu pool
/// for progressing execution of dropped nonces.
pub fn new() -> Self {
Self::with_pool(CpuPool::new(1))
}
/// Create new nonces manager with given cpupool.
pub fn with_pool(pool: CpuPool) -> Self {
/// Create new nonces manager with given executor.
pub fn new(executor: Executor) -> Self {
Reservations {
nonces: Default::default(),
pool,
executor,
}
}
@@ -59,9 +53,9 @@ impl Reservations {
self.nonces.retain(|_, v| !v.is_empty());
}
let pool = &self.pool;
let executor = &self.executor;
self.nonces.entry(sender)
.or_insert_with(move || SenderReservations::with_pool(pool.clone()))
.or_insert_with(move || SenderReservations::new(executor.clone()))
.reserve_nonce(minimal)
}
}
@@ -71,25 +65,18 @@ impl Reservations {
pub struct SenderReservations {
previous: Option<oneshot::Receiver<U256>>,
previous_ready: Arc<AtomicBool>,
pool: CpuPool,
executor: Executor,
prospective_value: U256,
dropped: Arc<AtomicUsize>,
}
impl SenderReservations {
/// Create new nonces manager and spawn a single-threaded cpu pool
/// for progressing execution of dropped nonces.
#[cfg(test)]
pub fn new() -> Self {
Self::with_pool(CpuPool::new(1))
}
/// Create new nonces manager with given cpu pool.
pub fn with_pool(pool: CpuPool) -> Self {
/// Create new nonces manager with given executor.
pub fn new(executor: Executor) -> Self {
SenderReservations {
previous: None,
previous_ready: Arc::new(AtomicBool::new(true)),
pool,
executor,
prospective_value: Default::default(),
dropped: Default::default(),
}
@@ -110,7 +97,7 @@ impl SenderReservations {
let (next, rx) = oneshot::channel();
let next = Some(next);
let next_sent = Arc::new(AtomicBool::default());
let pool = self.pool.clone();
let executor = self.executor.clone();
let dropped = self.dropped.clone();
self.previous_ready = next_sent.clone();
match mem::replace(&mut self.previous, Some(rx)) {
@@ -120,7 +107,7 @@ impl SenderReservations {
next_sent,
minimal,
prospective_value,
pool,
executor,
dropped,
},
None => Reserved {
@@ -129,7 +116,7 @@ impl SenderReservations {
next_sent,
minimal,
prospective_value,
pool,
executor,
dropped,
},
}
@@ -152,7 +139,7 @@ pub struct Reserved {
next_sent: Arc<AtomicBool>,
minimal: U256,
prospective_value: U256,
pool: CpuPool,
executor: Executor,
dropped: Arc<AtomicUsize>,
}
@@ -196,10 +183,14 @@ impl Drop for Reserved {
self.dropped.fetch_add(1, atomic::Ordering::SeqCst);
// If Reserved is dropped just pipe previous and next together.
let previous = mem::replace(&mut self.previous, Either::B(future::ok(U256::default())));
self.pool.spawn(previous.map(move |nonce| {
next_sent.store(true, atomic::Ordering::SeqCst);
next.send(nonce).expect(Ready::RECV_PROOF)
})).forget()
self.executor.spawn(
previous
.map(move |nonce| {
next_sent.store(true, atomic::Ordering::SeqCst);
next.send(nonce).expect(Ready::RECV_PROOF)
})
.map_err(|err| error!("Error dropping `Reserved`: {:?}", err))
);
}
}
}
@@ -253,10 +244,12 @@ impl Drop for Ready {
#[cfg(test)]
mod tests {
use super::*;
use parity_runtime::Runtime;
#[test]
fn should_reserve_a_set_of_nonces_and_resolve_them() {
let mut nonces = SenderReservations::new();
let runtime = Runtime::with_thread_count(1);
let mut nonces = SenderReservations::new(runtime.executor());
assert!(nonces.is_empty());
let n1 = nonces.reserve_nonce(5.into());
@@ -303,7 +296,8 @@ mod tests {
#[test]
fn should_return_prospective_nonce() {
let mut nonces = SenderReservations::new();
let runtime = Runtime::with_thread_count(1);
let mut nonces = SenderReservations::new(runtime.executor());
let n1 = nonces.reserve_nonce(5.into());
let n2 = nonces.reserve_nonce(5.into());

View File

@@ -95,7 +95,7 @@ impl<S: core::Middleware<Metadata>> GenericPollManager<S> {
jsonrpc: Some(core::Version::V2),
id: core::Id::Str(id.as_string()),
method: subscription.method.clone(),
params: Some(subscription.params.clone()),
params: subscription.params.clone(),
};
trace!(target: "pubsub", "Polling method: {:?}", call);
let result = self.rpc.handle_call(call.into(), subscription.metadata.clone());
@@ -141,7 +141,7 @@ mod tests {
use jsonrpc_core::{MetaIoHandler, NoopMiddleware, Value, Params};
use jsonrpc_core::futures::{Future, Stream};
use jsonrpc_pubsub::SubscriptionId;
use http::tokio_core::reactor;
use http::tokio::runtime::Runtime;
use super::GenericPollManager;
@@ -162,25 +162,25 @@ mod tests {
#[test]
fn should_poll_subscribed_method() {
// given
let mut el = reactor::Core::new().unwrap();
let mut el = Runtime::new().unwrap();
let mut poll_manager = poll_manager();
let (id, rx) = poll_manager.subscribe(Default::default(), "hello".into(), Params::None);
assert_eq!(id, SubscriptionId::String("0x416d77337e24399d".into()));
// then
poll_manager.tick().wait().unwrap();
let (res, rx) = el.run(rx.into_future()).unwrap();
let (res, rx) = el.block_on(rx.into_future()).unwrap();
assert_eq!(res, Some(Ok(Value::String("hello".into()))));
// retrieve second item
poll_manager.tick().wait().unwrap();
let (res, rx) = el.run(rx.into_future()).unwrap();
let (res, rx) = el.block_on(rx.into_future()).unwrap();
assert_eq!(res, Some(Ok(Value::String("world".into()))));
// and no more notifications
poll_manager.tick().wait().unwrap();
// we need to unsubscribe otherwise the future will never finish.
poll_manager.unsubscribe(&id);
assert_eq!(el.run(rx.into_future()).unwrap().0, None);
assert_eq!(el.block_on(rx.into_future()).unwrap().0, None);
}
}