Replace `tokio_core` with `tokio` (`ring` -> 0.13) (#9657)

* Replace `tokio_core` with `tokio`.

* Remove `tokio-core` and replace with `tokio` in

    - `ethcore/stratum`

    - `secret_store`

    - `util/fetch`

    - `util/reactor`

* Bump hyper to 0.12 in

    - `miner`

    - `util/fake-fetch`

    - `util/fetch`

    - `secret_store`

* Bump `jsonrpc-***` to 0.9 in

    - `parity`

    - `ethcore/stratum`

    - `ipfs`

    - `rpc`

    - `rpc_client`

    - `whisper`

* Bump `ring` to 0.13

* Use a more graceful shutdown process in `secret_store` tests.

* Convert some mutexes to rwlocks in `secret_store`.

* Consolidate Tokio Runtime use, remove `CpuPool`.

* Rename and move the `tokio_reactor` crate (`util/reactor`) to
  `tokio_runtime` (`util/runtime`).

* Rename `EventLoop` to `Runtime`.

    - Rename `EventLoop::spawn` to `Runtime::with_default_thread_count`.

    - Add the `Runtime::with_thread_count` method.

    - Rename `Remote` to `Executor`.

* Remove uses of `CpuPool` and spawn all tasks via the `Runtime` executor
  instead.

* Other changes related to `CpuPool` removal:

    - Remove `Reservations::with_pool`. `::new` now takes an `Executor` as an argument.

    - Remove `SenderReservations::with_pool`. `::new` now takes an `Executor` as an argument.
This commit is contained in:
Nick Sanders 2018-10-22 00:40:50 -07:00 committed by Afri Schoedon
parent b8da38f4e4
commit 68ca8df22f
75 changed files with 2027 additions and 1671 deletions

1179
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -28,10 +28,9 @@ serde = "1.0"
serde_json = "1.0" serde_json = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
futures = "0.1" futures = "0.1"
futures-cpupool = "0.1"
fdlimit = "0.1" fdlimit = "0.1"
ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" } ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
ethcore = { path = "ethcore", features = ["parity"] } ethcore = { path = "ethcore", features = ["parity"] }
parity-bytes = "0.1" parity-bytes = "0.1"
ethcore-io = { path = "util/io" } ethcore-io = { path = "util/io" }
@ -51,7 +50,7 @@ rpc-cli = { path = "rpc_cli" }
parity-hash-fetch = { path = "hash-fetch" } parity-hash-fetch = { path = "hash-fetch" }
parity-ipfs-api = { path = "ipfs" } parity-ipfs-api = { path = "ipfs" }
parity-local-store = { path = "local-store" } parity-local-store = { path = "local-store" }
parity-reactor = { path = "util/reactor" } parity-runtime = { path = "util/runtime" }
parity-rpc = { path = "rpc" } parity-rpc = { path = "rpc" }
parity-rpc-client = { path = "rpc_client" } parity-rpc-client = { path = "rpc_client" }
parity-updater = { path = "updater" } parity-updater = { path = "updater" }
@ -137,7 +136,4 @@ members = [
"util/keccak-hasher", "util/keccak-hasher",
"util/patricia-trie-ethereum", "util/patricia-trie-ethereum",
"util/fastmap", "util/fastmap",
] ]
[patch.crates-io]
ring = { git = "https://github.com/paritytech/ring" }

View File

@ -20,7 +20,7 @@ hashdb = "0.3.0"
memorydb = "0.3.0" memorydb = "0.3.0"
patricia-trie = "0.3.0" patricia-trie = "0.3.0"
patricia-trie-ethereum = { path = "../util/patricia-trie-ethereum" } patricia-trie-ethereum = { path = "../util/patricia-trie-ethereum" }
parity-crypto = "0.1" parity-crypto = "0.2"
error-chain = { version = "0.12", default-features = false } error-chain = { version = "0.12", default-features = false }
ethcore-io = { path = "../util/io" } ethcore-io = { path = "../util/io" }
ethcore-logger = { path = "../logger" } ethcore-logger = { path = "../logger" }

View File

@ -12,7 +12,7 @@ ethabi-derive = "6.0"
ethabi-contract = "6.0" ethabi-contract = "6.0"
ethcore = { path = ".." } ethcore = { path = ".." }
parity-bytes = "0.1" parity-bytes = "0.1"
parity-crypto = "0.1" parity-crypto = "0.2"
ethcore-io = { path = "../../util/io" } ethcore-io = { path = "../../util/io" }
ethcore-logger = { path = "../../logger" } ethcore-logger = { path = "../../logger" }
ethcore-miner = { path = "../../miner" } ethcore-miner = { path = "../../miner" }

View File

@ -125,9 +125,9 @@ impl SecretStoreEncryptor {
// send HTTP request // send HTTP request
let method = if use_post { let method = if use_post {
Method::Post Method::POST
} else { } else {
Method::Get Method::GET
}; };
let url = Url::from_str(&url).map_err(|e| ErrorKind::Encrypt(e.to_string()))?; let url = Url::from_str(&url).map_err(|e| ErrorKind::Encrypt(e.to_string()))?;

View File

@ -8,14 +8,14 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
ethereum-types = "0.4" ethereum-types = "0.4"
keccak-hash = "0.1" keccak-hash = "0.1"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-tcp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-tcp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
log = "0.4" log = "0.4"
parking_lot = "0.6" parking_lot = "0.6"
[dev-dependencies] [dev-dependencies]
env_logger = "0.5" env_logger = "0.5"
tokio-core = "0.1" tokio = "0.1"
tokio-io = "0.1" tokio-io = "0.1"
ethcore-logger = { path = "../../logger" } ethcore-logger = { path = "../../logger" }

View File

@ -25,7 +25,7 @@ extern crate parking_lot;
#[macro_use] extern crate log; #[macro_use] extern crate log;
#[cfg(test)] extern crate tokio_core; #[cfg(test)] extern crate tokio;
#[cfg(test)] extern crate tokio_io; #[cfg(test)] extern crate tokio_io;
#[cfg(test)] extern crate ethcore_logger; #[cfg(test)] extern crate ethcore_logger;
@ -323,12 +323,10 @@ impl MetaExtractor<SocketMetadata> for PeerMetaExtractor {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use std::net::SocketAddr; use std::net::{SocketAddr, Shutdown};
use std::sync::Arc; use std::sync::Arc;
use tokio_core::reactor::{Core, Timeout}; use tokio::{io, runtime::Runtime, timer::timeout::{self, Timeout}, net::TcpStream};
use tokio_core::net::TcpStream;
use tokio_io::io;
use jsonrpc_core::futures::{Future, future}; use jsonrpc_core::futures::{Future, future};
use ethcore_logger::init_log; use ethcore_logger::init_log;
@ -342,23 +340,23 @@ mod tests {
} }
fn dummy_request(addr: &SocketAddr, data: &str) -> Vec<u8> { fn dummy_request(addr: &SocketAddr, data: &str) -> Vec<u8> {
let mut core = Core::new().expect("Tokio Core should be created with no errors"); let mut runtime = Runtime::new().expect("Tokio Runtime should be created with no errors");
let mut buffer = vec![0u8; 2048];
let mut data_vec = data.as_bytes().to_vec(); let mut data_vec = data.as_bytes().to_vec();
data_vec.extend(b"\n"); data_vec.extend(b"\n");
let stream = TcpStream::connect(addr, &core.handle()) let stream = TcpStream::connect(addr)
.and_then(|stream| { .and_then(move |stream| {
io::write_all(stream, &data_vec) io::write_all(stream, data_vec)
}) })
.and_then(|(stream, _)| { .and_then(|(stream, _)| {
io::read(stream, &mut buffer) stream.shutdown(Shutdown::Write).unwrap();
io::read_to_end(stream, Vec::with_capacity(2048))
}) })
.and_then(|(_, read_buf, len)| { .and_then(|(_stream, read_buf)| {
future::ok(read_buf[0..len].to_vec()) future::ok(read_buf)
}); });
let result = core.run(stream).expect("Core should run with no errors"); let result = runtime.block_on(stream).expect("Runtime should run with no errors");
result result
} }
@ -417,7 +415,7 @@ mod tests {
} }
#[test] #[test]
fn receives_initial_paylaod() { fn receives_initial_payload() {
let addr = "127.0.0.1:19975".parse().unwrap(); let addr = "127.0.0.1:19975".parse().unwrap();
let _stratum = Stratum::start(&addr, DummyManager::new(), None).expect("There should be no error starting stratum"); let _stratum = Stratum::start(&addr, DummyManager::new(), None).expect("There should be no error starting stratum");
let request = r#"{"jsonrpc": "2.0", "method": "mining.subscribe", "params": [], "id": 2}"#; let request = r#"{"jsonrpc": "2.0", "method": "mining.subscribe", "params": [], "id": 2}"#;
@ -460,40 +458,43 @@ mod tests {
.to_vec(); .to_vec();
auth_request.extend(b"\n"); auth_request.extend(b"\n");
let mut core = Core::new().expect("Tokio Core should be created with no errors"); let auth_response = "{\"jsonrpc\":\"2.0\",\"result\":true,\"id\":1}\n";
let timeout1 = Timeout::new(::std::time::Duration::from_millis(100), &core.handle())
.expect("There should be a timeout produced in message test"); let mut runtime = Runtime::new().expect("Tokio Runtime should be created with no errors");
let timeout2 = Timeout::new(::std::time::Duration::from_millis(100), &core.handle()) let read_buf0 = vec![0u8; auth_response.len()];
.expect("There should be a timeout produced in message test"); let read_buf1 = Vec::with_capacity(2048);
let mut buffer = vec![0u8; 2048]; let stream = TcpStream::connect(&addr)
let mut buffer2 = vec![0u8; 2048]; .and_then(move |stream| {
let stream = TcpStream::connect(&addr, &core.handle()) io::write_all(stream, auth_request)
.and_then(|stream| {
io::write_all(stream, &auth_request)
}) })
.and_then(|(stream, _)| { .and_then(|(stream, _)| {
io::read(stream, &mut buffer) io::read_exact(stream, read_buf0)
}) })
.and_then(|(stream, _, _)| { .map_err(|err| panic!("{:?}", err))
.and_then(move |(stream, read_buf0)| {
assert_eq!(String::from_utf8(read_buf0).unwrap(), auth_response);
trace!(target: "stratum", "Received authorization confirmation"); trace!(target: "stratum", "Received authorization confirmation");
timeout1.join(future::ok(stream)) Timeout::new(future::ok(stream), ::std::time::Duration::from_millis(100))
}) })
.and_then(|(_, stream)| { .map_err(|err: timeout::Error<()>| panic!("Timeout: {:?}", err))
.and_then(move |stream| {
trace!(target: "stratum", "Pusing work to peers"); trace!(target: "stratum", "Pusing work to peers");
stratum.push_work_all(r#"{ "00040008", "100500" }"#.to_owned()) stratum.push_work_all(r#"{ "00040008", "100500" }"#.to_owned())
.expect("Pushing work should produce no errors"); .expect("Pushing work should produce no errors");
timeout2.join(future::ok(stream)) Timeout::new(future::ok(stream), ::std::time::Duration::from_millis(100))
}) })
.and_then(|(_, stream)| { .map_err(|err: timeout::Error<()>| panic!("Timeout: {:?}", err))
.and_then(|stream| {
trace!(target: "stratum", "Ready to read work from server"); trace!(target: "stratum", "Ready to read work from server");
io::read(stream, &mut buffer2) stream.shutdown(Shutdown::Write).unwrap();
io::read_to_end(stream, read_buf1)
}) })
.and_then(|(_, read_buf, len)| { .and_then(|(_, read_buf1)| {
trace!(target: "stratum", "Received work from server"); trace!(target: "stratum", "Received work from server");
future::ok(read_buf[0..len].to_vec()) future::ok(read_buf1)
}); });
let response = String::from_utf8( let response = String::from_utf8(
core.run(stream).expect("Core should run with no errors") runtime.block_on(stream).expect("Runtime should run with no errors")
).expect("Response should be utf-8"); ).expect("Response should be utf-8");
assert_eq!( assert_eq!(

View File

@ -6,7 +6,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
byteorder = "1.0" byteorder = "1.0"
edit-distance = "2.0" edit-distance = "2.0"
parity-crypto = "0.1" parity-crypto = "0.2"
eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" } eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" }
ethereum-types = "0.4" ethereum-types = "0.4"
lazy_static = "1.0" lazy_static = "1.0"

View File

@ -16,7 +16,7 @@ tiny-keccak = "1.4"
time = "0.1.34" time = "0.1.34"
itertools = "0.5" itertools = "0.5"
parking_lot = "0.6" parking_lot = "0.6"
parity-crypto = "0.1" parity-crypto = "0.2"
ethereum-types = "0.4" ethereum-types = "0.4"
dir = { path = "../util/dir" } dir = { path = "../util/dir" }
smallvec = "0.6" smallvec = "0.6"

View File

@ -8,7 +8,6 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
futures = "0.1" futures = "0.1"
futures-cpupool = "0.1"
log = "0.4" log = "0.4"
mime = "0.3" mime = "0.3"
mime_guess = "2.0.0-alpha.2" mime_guess = "2.0.0-alpha.2"
@ -17,7 +16,7 @@ rustc-hex = "1.0"
fetch = { path = "../util/fetch" } fetch = { path = "../util/fetch" }
parity-bytes = "0.1" parity-bytes = "0.1"
ethereum-types = "0.4" ethereum-types = "0.4"
parity-reactor = { path = "../util/reactor" } parity-runtime = { path = "../util/runtime" }
keccak-hash = "0.1" keccak-hash = "0.1"
registrar = { path = "../registrar" } registrar = { path = "../registrar" }
@ -26,6 +25,5 @@ ethabi-derive = "6.0"
ethabi-contract = "6.0" ethabi-contract = "6.0"
[dev-dependencies] [dev-dependencies]
hyper = "0.11"
parking_lot = "0.6" parking_lot = "0.6"
fake-fetch = { path = "../util/fake-fetch" } fake-fetch = { path = "../util/fake-fetch" }

View File

@ -23,9 +23,8 @@ use std::path::PathBuf;
use hash::keccak_buffer; use hash::keccak_buffer;
use fetch::{self, Fetch}; use fetch::{self, Fetch};
use futures_cpupool::CpuPool;
use futures::{Future, IntoFuture}; use futures::{Future, IntoFuture};
use parity_reactor::Remote; use parity_runtime::Executor;
use urlhint::{URLHintContract, URLHint, URLHintResult}; use urlhint::{URLHintContract, URLHint, URLHintResult};
use registrar::{RegistrarClient, Asynchronous}; use registrar::{RegistrarClient, Asynchronous};
use ethereum_types::H256; use ethereum_types::H256;
@ -109,21 +108,19 @@ fn validate_hash(path: PathBuf, hash: H256, body: fetch::BodyReader) -> Result<P
/// Default Hash-fetching client using on-chain contract to resolve hashes to URLs. /// Default Hash-fetching client using on-chain contract to resolve hashes to URLs.
pub struct Client<F: Fetch + 'static = fetch::Client> { pub struct Client<F: Fetch + 'static = fetch::Client> {
pool: CpuPool,
contract: URLHintContract, contract: URLHintContract,
fetch: F, fetch: F,
remote: Remote, executor: Executor,
random_path: Arc<Fn() -> PathBuf + Sync + Send>, random_path: Arc<Fn() -> PathBuf + Sync + Send>,
} }
impl<F: Fetch + 'static> Client<F> { impl<F: Fetch + 'static> Client<F> {
/// Creates new instance of the `Client` given on-chain contract client, fetch service and task runner. /// Creates new instance of the `Client` given on-chain contract client, fetch service and task runner.
pub fn with_fetch(contract: Arc<RegistrarClient<Call=Asynchronous>>, pool: CpuPool, fetch: F, remote: Remote) -> Self { pub fn with_fetch(contract: Arc<RegistrarClient<Call=Asynchronous>>, fetch: F, executor: Executor) -> Self {
Client { Client {
pool,
contract: URLHintContract::new(contract), contract: URLHintContract::new(contract),
fetch: fetch, fetch: fetch,
remote: remote, executor: executor,
random_path: Arc::new(random_temp_path), random_path: Arc::new(random_temp_path),
} }
} }
@ -135,7 +132,6 @@ impl<F: Fetch + 'static> HashFetch for Client<F> {
let random_path = self.random_path.clone(); let random_path = self.random_path.clone();
let remote_fetch = self.fetch.clone(); let remote_fetch = self.fetch.clone();
let pool = self.pool.clone();
let future = self.contract.resolve(hash) let future = self.contract.resolve(hash)
.map_err(|e| { warn!("Error resolving URL: {}", e); Error::NoResolution }) .map_err(|e| { warn!("Error resolving URL: {}", e); Error::NoResolution })
.and_then(|maybe_url| maybe_url.ok_or(Error::NoResolution)) .and_then(|maybe_url| maybe_url.ok_or(Error::NoResolution))
@ -162,7 +158,7 @@ impl<F: Fetch + 'static> HashFetch for Client<F> {
Ok(response) Ok(response)
} }
}) })
.and_then(move |response| pool.spawn_fn(move || { .and_then(move |response| {
debug!(target: "fetch", "Content fetched, validating hash ({:?})", hash); debug!(target: "fetch", "Content fetched, validating hash ({:?})", hash);
let path = random_path(); let path = random_path();
let res = validate_hash(path.clone(), hash, fetch::BodyReader::new(response)); let res = validate_hash(path.clone(), hash, fetch::BodyReader::new(response));
@ -172,10 +168,10 @@ impl<F: Fetch + 'static> HashFetch for Client<F> {
let _ = fs::remove_file(&path); let _ = fs::remove_file(&path);
} }
res res
})) })
.then(move |res| { on_done(res); Ok(()) as Result<(), ()> }); .then(move |res| { on_done(res); Ok(()) as Result<(), ()> });
self.remote.spawn(future); self.executor.spawn(future);
} }
} }
@ -197,8 +193,7 @@ mod tests {
use rustc_hex::FromHex; use rustc_hex::FromHex;
use std::sync::{Arc, mpsc}; use std::sync::{Arc, mpsc};
use parking_lot::Mutex; use parking_lot::Mutex;
use futures_cpupool::CpuPool; use parity_runtime::Executor;
use parity_reactor::Remote;
use urlhint::tests::{FakeRegistrar, URLHINT}; use urlhint::tests::{FakeRegistrar, URLHINT};
use super::{Error, Client, HashFetch, random_temp_path}; use super::{Error, Client, HashFetch, random_temp_path};
@ -216,7 +211,7 @@ mod tests {
// given // given
let contract = Arc::new(FakeRegistrar::new()); let contract = Arc::new(FakeRegistrar::new());
let fetch = FakeFetch::new(None::<usize>); let fetch = FakeFetch::new(None::<usize>);
let client = Client::with_fetch(contract.clone(), CpuPool::new(1), fetch, Remote::new_sync()); let client = Client::with_fetch(contract.clone(), fetch, Executor::new_sync());
// when // when
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
@ -234,7 +229,7 @@ mod tests {
// given // given
let registrar = Arc::new(registrar()); let registrar = Arc::new(registrar());
let fetch = FakeFetch::new(None::<usize>); let fetch = FakeFetch::new(None::<usize>);
let client = Client::with_fetch(registrar.clone(), CpuPool::new(1), fetch, Remote::new_sync()); let client = Client::with_fetch(registrar.clone(), fetch, Executor::new_sync());
// when // when
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
@ -252,7 +247,7 @@ mod tests {
// given // given
let registrar = Arc::new(registrar()); let registrar = Arc::new(registrar());
let fetch = FakeFetch::new(Some(1)); let fetch = FakeFetch::new(Some(1));
let mut client = Client::with_fetch(registrar.clone(), CpuPool::new(1), fetch, Remote::new_sync()); let mut client = Client::with_fetch(registrar.clone(), fetch, Executor::new_sync());
let path = random_temp_path(); let path = random_temp_path();
let path2 = path.clone(); let path2 = path.clone();
client.random_path = Arc::new(move || path2.clone()); client.random_path = Arc::new(move || path2.clone());
@ -275,7 +270,7 @@ mod tests {
// given // given
let registrar = Arc::new(registrar()); let registrar = Arc::new(registrar());
let fetch = FakeFetch::new(Some(1)); let fetch = FakeFetch::new(Some(1));
let client = Client::with_fetch(registrar.clone(), CpuPool::new(1), fetch, Remote::new_sync()); let client = Client::with_fetch(registrar.clone(), fetch, Executor::new_sync());
// when // when
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();

View File

@ -25,11 +25,10 @@ extern crate ethabi;
extern crate parity_bytes as bytes; extern crate parity_bytes as bytes;
extern crate ethereum_types; extern crate ethereum_types;
extern crate futures; extern crate futures;
extern crate futures_cpupool;
extern crate keccak_hash as hash; extern crate keccak_hash as hash;
extern crate mime; extern crate mime;
extern crate mime_guess; extern crate mime_guess;
extern crate parity_reactor; extern crate parity_runtime;
extern crate rand; extern crate rand;
extern crate rustc_hex; extern crate rustc_hex;
extern crate registrar; extern crate registrar;
@ -43,8 +42,6 @@ extern crate ethabi_contract;
#[cfg(test)] #[cfg(test)]
extern crate parking_lot; extern crate parking_lot;
#[cfg(test)] #[cfg(test)]
extern crate hyper;
#[cfg(test)]
extern crate fake_fetch; extern crate fake_fetch;
mod client; mod client;

View File

@ -9,11 +9,11 @@ authors = ["Parity Technologies <admin@parity.io>"]
ethcore = { path = "../ethcore" } ethcore = { path = "../ethcore" }
parity-bytes = "0.1" parity-bytes = "0.1"
ethereum-types = "0.4" ethereum-types = "0.4"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
rlp = { version = "0.3.0", features = ["ethereum"] } rlp = { version = "0.3.0", features = ["ethereum"] }
cid = "0.2" cid = "0.3"
multihash = "0.7" multihash = "0.8"
unicase = "2.0" unicase = "2.0"
[dev-dependencies] [dev-dependencies]

View File

@ -30,6 +30,42 @@ pub enum ServerError {
InvalidInterface InvalidInterface
} }
/// Handle IO errors (ports taken when starting the server).
impl From<::std::io::Error> for ServerError {
fn from(err: ::std::io::Error) -> ServerError {
ServerError::IoError(err)
}
}
impl From<http::hyper::error::Error> for ServerError {
fn from(err: http::hyper::error::Error) -> ServerError {
ServerError::Other(err)
}
}
impl From<ServerError> for String {
fn from(err: ServerError) -> String {
match err {
ServerError::IoError(err) => err.to_string(),
ServerError::Other(err) => err.to_string(),
ServerError::InvalidInterface => "Invalid --ipfs-api-interface parameter".into(),
}
}
}
impl ::std::fmt::Display for ServerError {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match self {
ServerError::IoError(err) => write!(f, "Io Error: {}", err),
ServerError::Other(err) => write!(f, "Other error: {}", err),
ServerError::InvalidInterface => write!(f, "Invalid interface"),
}
}
}
impl ::std::error::Error for ServerError {}
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum Error { pub enum Error {
CidParsingFailed, CidParsingFailed,
@ -71,27 +107,4 @@ impl From<multihash::Error> for Error {
fn from(_: multihash::Error) -> Error { fn from(_: multihash::Error) -> Error {
Error::CidParsingFailed Error::CidParsingFailed
} }
} }
/// Handle IO errors (ports taken when starting the server).
impl From<::std::io::Error> for ServerError {
fn from(err: ::std::io::Error) -> ServerError {
ServerError::IoError(err)
}
}
impl From<http::hyper::error::Error> for ServerError {
fn from(err: http::hyper::error::Error) -> ServerError {
ServerError::Other(err)
}
}
impl From<ServerError> for String {
fn from(err: ServerError) -> String {
match err {
ServerError::IoError(err) => err.to_string(),
ServerError::Other(err) => err.to_string(),
ServerError::InvalidInterface => "Invalid --ipfs-api-interface parameter".into(),
}
}
}

View File

@ -35,10 +35,9 @@ use std::net::{SocketAddr, IpAddr};
use core::futures::future::{self, FutureResult}; use core::futures::future::{self, FutureResult};
use core::futures::{self, Future}; use core::futures::{self, Future};
use ethcore::client::BlockChainClient; use ethcore::client::BlockChainClient;
use http::hyper::header::{self, Vary, ContentType}; use http::hyper::{self, server, Method, StatusCode, Body,
use http::hyper::{Method, StatusCode}; header::{self, HeaderValue},
use http::hyper::{self, server}; };
use unicase::Ascii;
use error::ServerError; use error::ServerError;
use route::Out; use route::Out;
@ -67,9 +66,9 @@ impl IpfsHandler {
client: client, client: client,
} }
} }
pub fn on_request(&self, req: hyper::Request) -> (Option<header::AccessControlAllowOrigin>, Out) { pub fn on_request(&self, req: hyper::Request<Body>) -> (Option<HeaderValue>, Out) {
match *req.method() { match *req.method() {
Method::Get | Method::Post => {}, Method::GET | Method::POST => {},
_ => return (None, Out::Bad("Invalid Request")), _ => return (None, Out::Bad("Invalid Request")),
} }
@ -77,8 +76,8 @@ impl IpfsHandler {
return (None, Out::Bad("Disallowed Host header")); return (None, Out::Bad("Disallowed Host header"));
} }
let cors_header = http::cors_header(&req, &self.cors_domains); let cors_header = http::cors_allow_origin(&req, &self.cors_domains);
if cors_header == http::CorsHeader::Invalid { if cors_header == http::AllowCors::Invalid {
return (None, Out::Bad("Disallowed Origin header")); return (None, Out::Bad("Disallowed Origin header"));
} }
@ -88,39 +87,39 @@ impl IpfsHandler {
} }
} }
impl server::Service for IpfsHandler { impl hyper::service::Service for IpfsHandler {
type Request = hyper::Request; type ReqBody = Body;
type Response = hyper::Response; type ResBody = Body;
type Error = hyper::Error; type Error = hyper::Error;
type Future = FutureResult<hyper::Response, hyper::Error>; type Future = FutureResult<hyper::Response<Body>, Self::Error>;
fn call(&self, request: Self::Request) -> Self::Future { fn call(&mut self, request: hyper::Request<Self::ReqBody>) -> Self::Future {
let (cors_header, out) = self.on_request(request); let (cors_header, out) = self.on_request(request);
let mut res = match out { let mut res = match out {
Out::OctetStream(bytes) => { Out::OctetStream(bytes) => {
hyper::Response::new() hyper::Response::builder()
.with_status(StatusCode::Ok) .status(StatusCode::OK)
.with_header(ContentType::octet_stream()) .header("content-type", HeaderValue::from_static("application/octet-stream"))
.with_body(bytes) .body(bytes.into())
}, },
Out::NotFound(reason) => { Out::NotFound(reason) => {
hyper::Response::new() hyper::Response::builder()
.with_status(StatusCode::NotFound) .status(StatusCode::NOT_FOUND)
.with_header(ContentType::plaintext()) .header("content-type", HeaderValue::from_static("text/plain; charset=utf-8"))
.with_body(reason) .body(reason.into())
}, },
Out::Bad(reason) => { Out::Bad(reason) => {
hyper::Response::new() hyper::Response::builder()
.with_status(StatusCode::BadRequest) .status(StatusCode::BAD_REQUEST)
.with_header(ContentType::plaintext()) .header("content-type", HeaderValue::from_static("text/plain; charset=utf-8"))
.with_body(reason) .body(reason.into())
} }
}; }.expect("Response builder: Parsing 'content-type' header name will not fail; qed");
if let Some(cors_header) = cors_header { if let Some(cors_header) = cors_header {
res.headers_mut().set(cors_header); res.headers_mut().append(header::ACCESS_CONTROL_ALLOW_ORIGIN, cors_header);
res.headers_mut().set(Vary::Items(vec![Ascii::new("Origin".into())])); res.headers_mut().append(header::VARY, HeaderValue::from_static("origin"));
} }
future::ok(res) future::ok(res)
@ -164,23 +163,32 @@ pub fn start_server(
let hosts: DomainsValidation<_> = hosts.map(move |hosts| include_current_interface(hosts, interface, port)).into(); let hosts: DomainsValidation<_> = hosts.map(move |hosts| include_current_interface(hosts, interface, port)).into();
let (close, shutdown_signal) = futures::sync::oneshot::channel::<()>(); let (close, shutdown_signal) = futures::sync::oneshot::channel::<()>();
let (tx, rx) = mpsc::sync_channel(1); let (tx, rx) = mpsc::sync_channel::<Result<(), ServerError>>(1);
let thread = thread::spawn(move || { let thread = thread::spawn(move || {
let send = |res| tx.send(res).expect("rx end is never dropped; qed"); let send = |res| tx.send(res).expect("rx end is never dropped; qed");
let server = match server::Http::new().bind(&addr, move || {
Ok(IpfsHandler::new(cors.clone(), hosts.clone(), client.clone())) let server_bldr = match server::Server::try_bind(&addr) {
}) { Ok(s) => s,
Ok(server) => {
send(Ok(()));
server
},
Err(err) => { Err(err) => {
send(Err(err)); send(Err(ServerError::from(err)));
return; return;
} }
}; };
let _ = server.run_until(shutdown_signal.map_err(|_| {})); let new_service = move || {
Ok::<_, ServerError>(
IpfsHandler::new(cors.clone(), hosts.clone(), client.clone())
)
};
let server = server_bldr
.serve(new_service)
.map_err(|_| ())
.select(shutdown_signal.map_err(|_| ()))
.then(|_| Ok(()));
hyper::rt::run(server);
send(Ok(()));
}); });
// Wait for server to start successfuly. // Wait for server to start successfuly.

View File

@ -10,8 +10,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
# Only work_notify, consider a separate crate # Only work_notify, consider a separate crate
ethash = { path = "../ethash", optional = true } ethash = { path = "../ethash", optional = true }
fetch = { path = "../util/fetch", optional = true } fetch = { path = "../util/fetch", optional = true }
hyper = { version = "0.11", optional = true } hyper = { version = "0.12", optional = true }
parity-reactor = { path = "../util/reactor", optional = true }
url = { version = "1", optional = true } url = { version = "1", optional = true }
# Miner # Miner
@ -20,7 +19,7 @@ error-chain = "0.12"
ethcore-transaction = { path = "../ethcore/transaction" } ethcore-transaction = { path = "../ethcore/transaction" }
ethereum-types = "0.4" ethereum-types = "0.4"
futures = "0.1" futures = "0.1"
futures-cpupool = "0.1" parity-runtime = { path = "../util/runtime" }
heapsize = "0.4" heapsize = "0.4"
keccak-hash = "0.1" keccak-hash = "0.1"
linked-hash-map = "0.5" linked-hash-map = "0.5"
@ -37,4 +36,4 @@ ethkey = { path = "../ethkey" }
rustc-hex = "1.0" rustc-hex = "1.0"
[features] [features]
work-notify = ["ethash", "fetch", "hyper", "parity-reactor", "url"] work-notify = ["ethash", "fetch", "hyper", "url"]

View File

@ -20,7 +20,7 @@ use std::time::{Instant, Duration};
use ansi_term::Colour; use ansi_term::Colour;
use ethereum_types::U256; use ethereum_types::U256;
use futures_cpupool::CpuPool; use parity_runtime::Executor;
use price_info::{Client as PriceInfoClient, PriceInfo}; use price_info::{Client as PriceInfoClient, PriceInfo};
use price_info::fetch::Client as FetchClient; use price_info::fetch::Client as FetchClient;
@ -43,7 +43,7 @@ pub struct GasPriceCalibrator {
impl GasPriceCalibrator { impl GasPriceCalibrator {
/// Create a new gas price calibrator. /// Create a new gas price calibrator.
pub fn new(options: GasPriceCalibratorOptions, fetch: FetchClient, p: CpuPool) -> GasPriceCalibrator { pub fn new(options: GasPriceCalibratorOptions, fetch: FetchClient, p: Executor) -> GasPriceCalibrator {
GasPriceCalibrator { GasPriceCalibrator {
options: options, options: options,
next_calibration: Instant::now(), next_calibration: Instant::now(),

View File

@ -23,7 +23,7 @@ extern crate ansi_term;
extern crate ethcore_transaction as transaction; extern crate ethcore_transaction as transaction;
extern crate ethereum_types; extern crate ethereum_types;
extern crate futures; extern crate futures;
extern crate futures_cpupool; extern crate parity_runtime;
extern crate heapsize; extern crate heapsize;
extern crate keccak_hash as hash; extern crate keccak_hash as hash;
extern crate linked_hash_map; extern crate linked_hash_map;

View File

@ -18,18 +18,19 @@
extern crate ethash; extern crate ethash;
extern crate fetch; extern crate fetch;
extern crate parity_reactor; extern crate parity_runtime;
extern crate url; extern crate url;
extern crate hyper; extern crate hyper;
use self::fetch::{Fetch, Request, Client as FetchClient, Method}; use self::fetch::{Fetch, Request, Client as FetchClient, Method};
use self::parity_reactor::Remote; use self::parity_runtime::Executor;
use self::ethash::SeedHashCompute; use self::ethash::SeedHashCompute;
use self::url::Url; use self::url::Url;
use self::hyper::header::ContentType; use self::hyper::header::{self, HeaderValue};
use ethereum_types::{H256, U256}; use ethereum_types::{H256, U256};
use parking_lot::Mutex; use parking_lot::Mutex;
use futures::Future; use futures::Future;
/// Trait for notifying about new mining work /// Trait for notifying about new mining work
@ -42,13 +43,13 @@ pub trait NotifyWork : Send + Sync {
pub struct WorkPoster { pub struct WorkPoster {
urls: Vec<Url>, urls: Vec<Url>,
client: FetchClient, client: FetchClient,
remote: Remote, executor: Executor,
seed_compute: Mutex<SeedHashCompute>, seed_compute: Mutex<SeedHashCompute>,
} }
impl WorkPoster { impl WorkPoster {
/// Create new `WorkPoster`. /// Create new `WorkPoster`.
pub fn new(urls: &[String], fetch: FetchClient, remote: Remote) -> Self { pub fn new(urls: &[String], fetch: FetchClient, executor: Executor) -> Self {
let urls = urls.into_iter().filter_map(|u| { let urls = urls.into_iter().filter_map(|u| {
match Url::parse(u) { match Url::parse(u) {
Ok(url) => Some(url), Ok(url) => Some(url),
@ -60,7 +61,7 @@ impl WorkPoster {
}).collect(); }).collect();
WorkPoster { WorkPoster {
client: fetch, client: fetch,
remote: remote, executor: executor,
urls: urls, urls: urls,
seed_compute: Mutex::new(SeedHashCompute::default()), seed_compute: Mutex::new(SeedHashCompute::default()),
} }
@ -80,9 +81,9 @@ impl NotifyWork for WorkPoster {
for u in &self.urls { for u in &self.urls {
let u = u.clone(); let u = u.clone();
self.remote.spawn(self.client.fetch( self.executor.spawn(self.client.fetch(
Request::new(u.clone(), Method::Post) Request::new(u.clone(), Method::POST)
.with_header(ContentType::json()) .with_header(header::CONTENT_TYPE, HeaderValue::from_static("application/json"))
.with_body(body.clone()), Default::default() .with_body(body.clone()), Default::default()
).map_err(move |e| { ).map_err(move |e| {
warn!("Error sending HTTP notification to {} : {}, retrying", u, e); warn!("Error sending HTTP notification to {} : {}, retrying", u, e);

View File

@ -25,7 +25,6 @@ extern crate clap;
extern crate dir; extern crate dir;
extern crate env_logger; extern crate env_logger;
extern crate futures; extern crate futures;
extern crate futures_cpupool;
extern crate atty; extern crate atty;
extern crate jsonrpc_core; extern crate jsonrpc_core;
extern crate num_cpus; extern crate num_cpus;
@ -60,7 +59,7 @@ extern crate kvdb;
extern crate parity_hash_fetch as hash_fetch; extern crate parity_hash_fetch as hash_fetch;
extern crate parity_ipfs_api; extern crate parity_ipfs_api;
extern crate parity_local_store as local_store; extern crate parity_local_store as local_store;
extern crate parity_reactor; extern crate parity_runtime;
extern crate parity_rpc; extern crate parity_rpc;
extern crate parity_updater as updater; extern crate parity_updater as updater;
extern crate parity_version; extern crate parity_version;

View File

@ -29,7 +29,7 @@ use light::TransactionQueue;
use futures::{future, Future}; use futures::{future, Future};
use parity_reactor::Remote; use parity_runtime::Executor;
use parking_lot::RwLock; use parking_lot::RwLock;
@ -50,8 +50,8 @@ pub struct QueueCull<T> {
pub on_demand: Arc<OnDemand>, pub on_demand: Arc<OnDemand>,
/// The transaction queue. /// The transaction queue.
pub txq: Arc<RwLock<TransactionQueue>>, pub txq: Arc<RwLock<TransactionQueue>>,
/// Event loop remote. /// Event loop executor.
pub remote: Remote, pub executor: Executor,
} }
impl<T: LightChainClient + 'static> IoHandler<ClientIoMessage> for QueueCull<T> { impl<T: LightChainClient + 'static> IoHandler<ClientIoMessage> for QueueCull<T> {
@ -70,7 +70,7 @@ impl<T: LightChainClient + 'static> IoHandler<ClientIoMessage> for QueueCull<T>
let start_nonce = self.client.engine().account_start_nonce(best_header.number()); let start_nonce = self.client.engine().account_start_nonce(best_header.number());
info!(target: "cull", "Attempting to cull queued transactions from {} senders.", senders.len()); info!(target: "cull", "Attempting to cull queued transactions from {} senders.", senders.len());
self.remote.spawn_with_timeout(move |_| { self.executor.spawn_with_timeout(move || {
let maybe_fetching = sync.with_context(move |ctx| { let maybe_fetching = sync.with_context(move |ctx| {
// fetch the nonce of each sender in the queue. // fetch the nonce of each sender in the queue.
let nonce_reqs = senders.iter() let nonce_reqs = senders.iter()

View File

@ -21,7 +21,7 @@ use ethcore::client::Mode;
use ethcore::ethereum; use ethcore::ethereum;
use ethcore::spec::{Spec, SpecParams}; use ethcore::spec::{Spec, SpecParams};
use ethereum_types::{U256, Address}; use ethereum_types::{U256, Address};
use futures_cpupool::CpuPool; use parity_runtime::Executor;
use hash_fetch::fetch::Client as FetchClient; use hash_fetch::fetch::Client as FetchClient;
use journaldb::Algorithm; use journaldb::Algorithm;
use miner::gas_pricer::GasPricer; use miner::gas_pricer::GasPricer;
@ -256,7 +256,7 @@ impl Default for GasPricerConfig {
} }
impl GasPricerConfig { impl GasPricerConfig {
pub fn to_gas_pricer(&self, fetch: FetchClient, p: CpuPool) -> GasPricer { pub fn to_gas_pricer(&self, fetch: FetchClient, p: Executor) -> GasPricer {
match *self { match *self {
GasPricerConfig::Fixed(u) => GasPricer::Fixed(u), GasPricerConfig::Fixed(u) => GasPricer::Fixed(u),
GasPricerConfig::Calibrated { usd_per_tx, recalibration_period, .. } => { GasPricerConfig::Calibrated { usd_per_tx, recalibration_period, .. } => {

View File

@ -23,14 +23,13 @@ use dir::default_data_path;
use dir::helpers::replace_home; use dir::helpers::replace_home;
use helpers::parity_ipc_path; use helpers::parity_ipc_path;
use jsonrpc_core::MetaIoHandler; use jsonrpc_core::MetaIoHandler;
use parity_reactor::TokioRemote; use parity_runtime::Executor;
use parity_rpc::informant::{RpcStats, Middleware}; use parity_rpc::informant::{RpcStats, Middleware};
use parity_rpc::{self as rpc, Metadata, DomainsValidation}; use parity_rpc::{self as rpc, Metadata, DomainsValidation};
use rpc_apis::{self, ApiSet}; use rpc_apis::{self, ApiSet};
pub use parity_rpc::{IpcServer, HttpServer, RequestMiddleware}; pub use parity_rpc::{IpcServer, HttpServer, RequestMiddleware};
pub use parity_rpc::ws::Server as WsServer; pub use parity_rpc::ws::Server as WsServer;
pub use parity_rpc::informant::CpuPool;
pub const DAPPS_DOMAIN: &'static str = "web3.site"; pub const DAPPS_DOMAIN: &'static str = "web3.site";
@ -134,9 +133,8 @@ fn address(enabled: bool, bind_iface: &str, bind_port: u16, hosts: &Option<Vec<S
pub struct Dependencies<D: rpc_apis::Dependencies> { pub struct Dependencies<D: rpc_apis::Dependencies> {
pub apis: Arc<D>, pub apis: Arc<D>,
pub remote: TokioRemote, pub executor: Executor,
pub stats: Arc<RpcStats>, pub stats: Arc<RpcStats>,
pub pool: Option<CpuPool>,
} }
pub fn new_ws<D: rpc_apis::Dependencies>( pub fn new_ws<D: rpc_apis::Dependencies>(
@ -155,7 +153,7 @@ pub fn new_ws<D: rpc_apis::Dependencies>(
let handler = { let handler = {
let mut handler = MetaIoHandler::with_middleware(( let mut handler = MetaIoHandler::with_middleware((
rpc::WsDispatcher::new(full_handler), rpc::WsDispatcher::new(full_handler),
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone()) Middleware::new(deps.stats.clone(), deps.apis.activity_notifier())
)); ));
let apis = conf.apis.list_apis(); let apis = conf.apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis); deps.apis.extend_with_set(&mut handler, &apis);
@ -163,7 +161,6 @@ pub fn new_ws<D: rpc_apis::Dependencies>(
handler handler
}; };
let remote = deps.remote.clone();
let allowed_origins = into_domains(with_domain(conf.origins, domain, &None)); let allowed_origins = into_domains(with_domain(conf.origins, domain, &None));
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()))); let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into())));
@ -178,7 +175,6 @@ pub fn new_ws<D: rpc_apis::Dependencies>(
let start_result = rpc::start_ws( let start_result = rpc::start_ws(
&addr, &addr,
handler, handler,
remote.clone(),
allowed_origins, allowed_origins,
allowed_hosts, allowed_hosts,
conf.max_connections, conf.max_connections,
@ -210,7 +206,6 @@ pub fn new_http<D: rpc_apis::Dependencies>(
let url = format!("{}:{}", conf.interface, conf.port); let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid {} listen host/port given: {}", id, url))?; let addr = url.parse().map_err(|_| format!("Invalid {} listen host/port given: {}", id, url))?;
let handler = setup_apis(conf.apis, deps); let handler = setup_apis(conf.apis, deps);
let remote = deps.remote.clone();
let cors_domains = into_domains(conf.cors); let cors_domains = into_domains(conf.cors);
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()))); let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into())));
@ -220,7 +215,6 @@ pub fn new_http<D: rpc_apis::Dependencies>(
cors_domains, cors_domains,
allowed_hosts, allowed_hosts,
handler, handler,
remote,
rpc::RpcExtractor, rpc::RpcExtractor,
conf.server_threads, conf.server_threads,
conf.max_payload, conf.max_payload,
@ -244,7 +238,6 @@ pub fn new_ipc<D: rpc_apis::Dependencies>(
} }
let handler = setup_apis(conf.apis, dependencies); let handler = setup_apis(conf.apis, dependencies);
let remote = dependencies.remote.clone();
let path = PathBuf::from(&conf.socket_addr); let path = PathBuf::from(&conf.socket_addr);
// Make sure socket file can be created on unix-like OS. // Make sure socket file can be created on unix-like OS.
// Windows pipe paths are not on the FS. // Windows pipe paths are not on the FS.
@ -255,7 +248,7 @@ pub fn new_ipc<D: rpc_apis::Dependencies>(
} }
} }
match rpc::start_ipc(&conf.socket_addr, handler, remote, rpc::RpcExtractor) { match rpc::start_ipc(&conf.socket_addr, handler, rpc::RpcExtractor) {
Ok(server) => Ok(Some(server)), Ok(server) => Ok(Some(server)),
Err(io_error) => Err(format!("IPC error: {}", io_error)), Err(io_error) => Err(format!("IPC error: {}", io_error)),
} }
@ -294,7 +287,7 @@ pub fn setup_apis<D>(apis: ApiSet, deps: &Dependencies<D>) -> MetaIoHandler<Meta
where D: rpc_apis::Dependencies where D: rpc_apis::Dependencies
{ {
let mut handler = MetaIoHandler::with_middleware( let mut handler = MetaIoHandler::with_middleware(
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone()) Middleware::new(deps.stats.clone(), deps.apis.activity_notifier())
); );
let apis = apis.list_apis(); let apis = apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis); deps.apis.extend_with_set(&mut handler, &apis);

View File

@ -28,13 +28,12 @@ use ethcore::miner::Miner;
use ethcore::snapshot::SnapshotService; use ethcore::snapshot::SnapshotService;
use ethcore_logger::RotatingLogger; use ethcore_logger::RotatingLogger;
use sync::{ManageNetwork, SyncProvider, LightSync}; use sync::{ManageNetwork, SyncProvider, LightSync};
use futures_cpupool::CpuPool;
use hash_fetch::fetch::Client as FetchClient; use hash_fetch::fetch::Client as FetchClient;
use jsonrpc_core::{self as core, MetaIoHandler}; use jsonrpc_core::{self as core, MetaIoHandler};
use light::client::LightChainClient; use light::client::LightChainClient;
use light::{TransactionQueue as LightTransactionQueue, Cache as LightDataCache}; use light::{TransactionQueue as LightTransactionQueue, Cache as LightDataCache};
use miner::external::ExternalMiner; use miner::external::ExternalMiner;
use parity_reactor; use parity_runtime::Executor;
use parity_rpc::dispatch::{FullDispatcher, LightDispatcher}; use parity_rpc::dispatch::{FullDispatcher, LightDispatcher};
use parity_rpc::informant::{ActivityNotifier, ClientNotifier}; use parity_rpc::informant::{ActivityNotifier, ClientNotifier};
use parity_rpc::{Metadata, NetworkSettings, Host}; use parity_rpc::{Metadata, NetworkSettings, Host};
@ -231,8 +230,7 @@ pub struct FullDependencies {
pub geth_compatibility: bool, pub geth_compatibility: bool,
pub ws_address: Option<Host>, pub ws_address: Option<Host>,
pub fetch: FetchClient, pub fetch: FetchClient,
pub pool: CpuPool, pub executor: Executor,
pub remote: parity_reactor::Remote,
pub whisper_rpc: Option<::whisper::RpcFactory>, pub whisper_rpc: Option<::whisper::RpcFactory>,
pub gas_price_percentile: usize, pub gas_price_percentile: usize,
pub poll_lifetime: u32, pub poll_lifetime: u32,
@ -253,7 +251,7 @@ impl FullDependencies {
let deps = &$deps; let deps = &$deps;
let dispatcher = FullDispatcher::new(deps.client.clone(), deps.miner.clone(), $nonces, deps.gas_price_percentile); let dispatcher = FullDispatcher::new(deps.client.clone(), deps.miner.clone(), $nonces, deps.gas_price_percentile);
if deps.signer_service.is_enabled() { if deps.signer_service.is_enabled() {
$handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, deps.remote.clone(), &deps.secret_store))) $handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, deps.executor.clone(), &deps.secret_store)))
} else { } else {
$handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher))) $handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher)))
} }
@ -261,7 +259,7 @@ impl FullDependencies {
} }
} }
let nonces = Arc::new(Mutex::new(dispatch::Reservations::with_pool(self.pool.clone()))); let nonces = Arc::new(Mutex::new(dispatch::Reservations::new(self.executor.clone())));
let dispatcher = FullDispatcher::new( let dispatcher = FullDispatcher::new(
self.client.clone(), self.client.clone(),
self.miner.clone(), self.miner.clone(),
@ -306,7 +304,7 @@ impl FullDependencies {
}, },
Api::EthPubSub => { Api::EthPubSub => {
if !for_generic_pubsub { if !for_generic_pubsub {
let client = EthPubSubClient::new(self.client.clone(), self.remote.clone()); let client = EthPubSubClient::new(self.client.clone(), self.executor.clone());
let h = client.handler(); let h = client.handler();
self.miner.add_transactions_listener(Box::new(move |hashes| if let Some(h) = h.upgrade() { self.miner.add_transactions_listener(Box::new(move |hashes| if let Some(h) = h.upgrade() {
h.notify_new_transactions(hashes); h.notify_new_transactions(hashes);
@ -322,7 +320,7 @@ impl FullDependencies {
handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate()); handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate());
}, },
Api::Signer => { Api::Signer => {
handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service, self.remote.clone()).to_delegate()); handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service, self.executor.clone()).to_delegate());
}, },
Api::Parity => { Api::Parity => {
let signer = match self.signer_service.is_enabled() { let signer = match self.signer_service.is_enabled() {
@ -351,7 +349,7 @@ impl FullDependencies {
let mut rpc = MetaIoHandler::default(); let mut rpc = MetaIoHandler::default();
let apis = ApiSet::List(apis.clone()).retain(ApiSet::PubSub).list_apis(); let apis = ApiSet::List(apis.clone()).retain(ApiSet::PubSub).list_apis();
self.extend_api(&mut rpc, &apis, true); self.extend_api(&mut rpc, &apis, true);
handler.extend_with(PubSubClient::new(rpc, self.remote.clone()).to_delegate()); handler.extend_with(PubSubClient::new(rpc, self.executor.clone()).to_delegate());
} }
}, },
Api::ParityAccounts => { Api::ParityAccounts => {
@ -364,7 +362,6 @@ impl FullDependencies {
&self.updater, &self.updater,
&self.net_service, &self.net_service,
self.fetch.clone(), self.fetch.clone(),
self.pool.clone(),
).to_delegate()) ).to_delegate())
}, },
Api::Traces => { Api::Traces => {
@ -440,9 +437,8 @@ pub struct LightDependencies<T> {
pub transaction_queue: Arc<RwLock<LightTransactionQueue>>, pub transaction_queue: Arc<RwLock<LightTransactionQueue>>,
pub ws_address: Option<Host>, pub ws_address: Option<Host>,
pub fetch: FetchClient, pub fetch: FetchClient,
pub pool: CpuPool,
pub geth_compatibility: bool, pub geth_compatibility: bool,
pub remote: parity_reactor::Remote, pub executor: Executor,
pub whisper_rpc: Option<::whisper::RpcFactory>, pub whisper_rpc: Option<::whisper::RpcFactory>,
pub private_tx_service: Option<Arc<PrivateTransactionManager>>, pub private_tx_service: Option<Arc<PrivateTransactionManager>>,
pub gas_price_percentile: usize, pub gas_price_percentile: usize,
@ -464,7 +460,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
self.on_demand.clone(), self.on_demand.clone(),
self.cache.clone(), self.cache.clone(),
self.transaction_queue.clone(), self.transaction_queue.clone(),
Arc::new(Mutex::new(dispatch::Reservations::with_pool(self.pool.clone()))), Arc::new(Mutex::new(dispatch::Reservations::new(self.executor.clone()))),
self.gas_price_percentile, self.gas_price_percentile,
); );
@ -476,7 +472,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
let secret_store = deps.secret_store.clone(); let secret_store = deps.secret_store.clone();
if deps.signer_service.is_enabled() { if deps.signer_service.is_enabled() {
$handler.extend_with($namespace::to_delegate( $handler.extend_with($namespace::to_delegate(
SigningQueueClient::new(&deps.signer_service, dispatcher, deps.remote.clone(), &secret_store) SigningQueueClient::new(&deps.signer_service, dispatcher, deps.executor.clone(), &secret_store)
)) ))
} else { } else {
$handler.extend_with( $handler.extend_with(
@ -522,7 +518,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
self.on_demand.clone(), self.on_demand.clone(),
self.sync.clone(), self.sync.clone(),
self.cache.clone(), self.cache.clone(),
self.remote.clone(), self.executor.clone(),
self.gas_price_percentile, self.gas_price_percentile,
); );
self.client.add_listener(client.handler() as Weak<_>); self.client.add_listener(client.handler() as Weak<_>);
@ -538,7 +534,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate()); handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate());
}, },
Api::Signer => { Api::Signer => {
handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service, self.remote.clone()).to_delegate()); handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service, self.executor.clone()).to_delegate());
}, },
Api::Parity => { Api::Parity => {
let signer = match self.signer_service.is_enabled() { let signer = match self.signer_service.is_enabled() {
@ -565,7 +561,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
let mut rpc = MetaIoHandler::default(); let mut rpc = MetaIoHandler::default();
let apis = ApiSet::List(apis.clone()).retain(ApiSet::PubSub).list_apis(); let apis = ApiSet::List(apis.clone()).retain(ApiSet::PubSub).list_apis();
self.extend_api(&mut rpc, &apis, true); self.extend_api(&mut rpc, &apis, true);
handler.extend_with(PubSubClient::new(rpc, self.remote.clone()).to_delegate()); handler.extend_with(PubSubClient::new(rpc, self.executor.clone()).to_delegate());
} }
}, },
Api::ParityAccounts => { Api::ParityAccounts => {
@ -575,7 +571,6 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
handler.extend_with(light::ParitySetClient::new( handler.extend_with(light::ParitySetClient::new(
self.sync.clone(), self.sync.clone(),
self.fetch.clone(), self.fetch.clone(),
self.pool.clone(),
).to_delegate()) ).to_delegate())
}, },
Api::Traces => { Api::Traces => {

View File

@ -34,14 +34,13 @@ use ethereum_types::Address;
use sync::{self, SyncConfig}; use sync::{self, SyncConfig};
use miner::work_notify::WorkPoster; use miner::work_notify::WorkPoster;
use futures::IntoFuture; use futures::IntoFuture;
use futures_cpupool::CpuPool;
use hash_fetch::{self, fetch}; use hash_fetch::{self, fetch};
use informant::{Informant, LightNodeInformantData, FullNodeInformantData}; use informant::{Informant, LightNodeInformantData, FullNodeInformantData};
use journaldb::Algorithm; use journaldb::Algorithm;
use light::Cache as LightDataCache; use light::Cache as LightDataCache;
use miner::external::ExternalMiner; use miner::external::ExternalMiner;
use node_filter::NodeFilter; use node_filter::NodeFilter;
use parity_reactor::EventLoop; use parity_runtime::Runtime;
use parity_rpc::{Origin, Metadata, NetworkSettings, informant, is_major_importing}; use parity_rpc::{Origin, Metadata, NetworkSettings, informant, is_major_importing};
use updater::{UpdatePolicy, Updater}; use updater::{UpdatePolicy, Updater};
use parity_version::version; use parity_version::version;
@ -270,7 +269,7 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
*sync_handle.write() = Arc::downgrade(&light_sync); *sync_handle.write() = Arc::downgrade(&light_sync);
// spin up event loop // spin up event loop
let event_loop = EventLoop::spawn(); let runtime = Runtime::with_default_thread_count();
// queue cull service. // queue cull service.
let queue_cull = Arc::new(::light_helpers::QueueCull { let queue_cull = Arc::new(::light_helpers::QueueCull {
@ -278,7 +277,7 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
sync: light_sync.clone(), sync: light_sync.clone(),
on_demand: on_demand.clone(), on_demand: on_demand.clone(),
txq: txq.clone(), txq: txq.clone(),
remote: event_loop.remote(), executor: runtime.executor(),
}); });
service.register_handler(queue_cull).map_err(|e| format!("Error attaching service: {:?}", e))?; service.register_handler(queue_cull).map_err(|e| format!("Error attaching service: {:?}", e))?;
@ -286,8 +285,6 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
// start the network. // start the network.
light_sync.start_network(); light_sync.start_network();
let cpu_pool = CpuPool::new(4);
// fetch service // fetch service
let fetch = fetch::Client::new(FETCH_LIGHT_NUM_DNS_THREADS).map_err(|e| format!("Error starting fetch client: {:?}", e))?; let fetch = fetch::Client::new(FETCH_LIGHT_NUM_DNS_THREADS).map_err(|e| format!("Error starting fetch client: {:?}", e))?;
let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; let passwords = passwords_from_files(&cmd.acc_conf.password_files)?;
@ -313,9 +310,8 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
transaction_queue: txq, transaction_queue: txq,
ws_address: cmd.ws_conf.address(), ws_address: cmd.ws_conf.address(),
fetch: fetch, fetch: fetch,
pool: cpu_pool.clone(),
geth_compatibility: cmd.geth_compatibility, geth_compatibility: cmd.geth_compatibility,
remote: event_loop.remote(), executor: runtime.executor(),
whisper_rpc: whisper_factory, whisper_rpc: whisper_factory,
private_tx_service: None, //TODO: add this to client. private_tx_service: None, //TODO: add this to client.
gas_price_percentile: cmd.gas_price_percentile, gas_price_percentile: cmd.gas_price_percentile,
@ -324,13 +320,8 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
let dependencies = rpc::Dependencies { let dependencies = rpc::Dependencies {
apis: deps_for_rpc_apis.clone(), apis: deps_for_rpc_apis.clone(),
remote: event_loop.raw_remote(), executor: runtime.executor(),
stats: rpc_stats.clone(), stats: rpc_stats.clone(),
pool: if cmd.http_conf.processing_threads > 0 {
Some(rpc::CpuPool::new(cmd.http_conf.processing_threads))
} else {
None
},
}; };
// start rpc servers // start rpc servers
@ -358,7 +349,7 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
rpc: rpc_direct, rpc: rpc_direct,
informant, informant,
client, client,
keep_alive: Box::new((event_loop, service, ws_server, http_server, ipc_server)), keep_alive: Box::new((runtime, service, ws_server, http_server, ipc_server)),
} }
}) })
} }
@ -477,10 +468,8 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
// prepare account provider // prepare account provider
let account_provider = Arc::new(prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?); let account_provider = Arc::new(prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?);
let cpu_pool = CpuPool::new(4);
// spin up event loop // spin up event loop
let event_loop = EventLoop::spawn(); let runtime = Runtime::with_default_thread_count();
// fetch service // fetch service
let fetch = fetch::Client::new(FETCH_FULL_NUM_DNS_THREADS).map_err(|e| format!("Error starting fetch client: {:?}", e))?; let fetch = fetch::Client::new(FETCH_FULL_NUM_DNS_THREADS).map_err(|e| format!("Error starting fetch client: {:?}", e))?;
@ -489,7 +478,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
// create miner // create miner
let miner = Arc::new(Miner::new( let miner = Arc::new(Miner::new(
cmd.miner_options, cmd.miner_options,
cmd.gas_pricer_conf.to_gas_pricer(fetch.clone(), cpu_pool.clone()), cmd.gas_pricer_conf.to_gas_pricer(fetch.clone(), runtime.executor()),
&spec, &spec,
Some(account_provider.clone()), Some(account_provider.clone()),
@ -500,7 +489,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
if !cmd.miner_extras.work_notify.is_empty() { if !cmd.miner_extras.work_notify.is_empty() {
miner.add_work_listener(Box::new( miner.add_work_listener(Box::new(
WorkPoster::new(&cmd.miner_extras.work_notify, fetch.clone(), event_loop.remote()) WorkPoster::new(&cmd.miner_extras.work_notify, fetch.clone(), runtime.executor())
)); ));
} }
@ -698,7 +687,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
&Arc::downgrade(&(service.client() as Arc<BlockChainClient>)), &Arc::downgrade(&(service.client() as Arc<BlockChainClient>)),
&Arc::downgrade(&sync_provider), &Arc::downgrade(&sync_provider),
update_policy, update_policy,
hash_fetch::Client::with_fetch(contract_client.clone(), cpu_pool.clone(), updater_fetch, event_loop.remote()) hash_fetch::Client::with_fetch(contract_client.clone(), updater_fetch, runtime.executor())
); );
service.add_notify(updater.clone()); service.add_notify(updater.clone());
@ -723,8 +712,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
geth_compatibility: cmd.geth_compatibility, geth_compatibility: cmd.geth_compatibility,
ws_address: cmd.ws_conf.address(), ws_address: cmd.ws_conf.address(),
fetch: fetch.clone(), fetch: fetch.clone(),
pool: cpu_pool.clone(), executor: runtime.executor(),
remote: event_loop.remote(),
whisper_rpc: whisper_factory, whisper_rpc: whisper_factory,
private_tx_service: Some(private_tx_service.clone()), private_tx_service: Some(private_tx_service.clone()),
gas_price_percentile: cmd.gas_price_percentile, gas_price_percentile: cmd.gas_price_percentile,
@ -733,14 +721,8 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
let dependencies = rpc::Dependencies { let dependencies = rpc::Dependencies {
apis: deps_for_rpc_apis.clone(), apis: deps_for_rpc_apis.clone(),
remote: event_loop.raw_remote(), executor: runtime.executor(),
stats: rpc_stats.clone(), stats: rpc_stats.clone(),
pool: if cmd.http_conf.processing_threads > 0 {
Some(rpc::CpuPool::new(cmd.http_conf.processing_threads))
} else {
None
},
}; };
// start rpc servers // start rpc servers
@ -820,7 +802,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
informant, informant,
client, client,
client_service: Arc::new(service), client_service: Arc::new(service),
keep_alive: Box::new((watcher, updater, ws_server, http_server, ipc_server, secretstore_key_server, ipfs_server, event_loop)), keep_alive: Box::new((watcher, updater, ws_server, http_server, ipc_server, secretstore_key_server, ipfs_server, runtime)),
} }
}) })
} }

View File

@ -9,11 +9,10 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
fetch = { path = "../util/fetch" } fetch = { path = "../util/fetch" }
futures = "0.1" futures = "0.1"
futures-cpupool = "0.1" parity-runtime = { path = "../util/runtime" }
log = "0.4" log = "0.4"
serde_json = "1.0" serde_json = "1.0"
[dev-dependencies] [dev-dependencies]
hyper = "0.11"
parking_lot = "0.6" parking_lot = "0.6"
fake-fetch = { path = "../util/fake-fetch" } fake-fetch = { path = "../util/fake-fetch" }

View File

@ -19,8 +19,8 @@
//! A simple client to get the current ETH price using an external API. //! A simple client to get the current ETH price using an external API.
extern crate futures; extern crate futures;
extern crate futures_cpupool;
extern crate serde_json; extern crate serde_json;
extern crate parity_runtime;
#[macro_use] #[macro_use]
extern crate log; extern crate log;
@ -38,8 +38,8 @@ use std::str;
use fetch::{Client as FetchClient, Fetch}; use fetch::{Client as FetchClient, Fetch};
use futures::{Future, Stream}; use futures::{Future, Stream};
use futures::future::{self, Either}; use futures::future::{self, Either};
use futures_cpupool::CpuPool;
use serde_json::Value; use serde_json::Value;
use parity_runtime::Executor;
/// Current ETH price information. /// Current ETH price information.
#[derive(Debug)] #[derive(Debug)]
@ -71,7 +71,7 @@ impl From<fetch::Error> for Error {
/// A client to get the current ETH price using an external API. /// A client to get the current ETH price using an external API.
pub struct Client<F = FetchClient> { pub struct Client<F = FetchClient> {
pool: CpuPool, pool: Executor,
api_endpoint: String, api_endpoint: String,
fetch: F, fetch: F,
} }
@ -92,7 +92,7 @@ impl<F> cmp::PartialEq for Client<F> {
impl<F: Fetch> Client<F> { impl<F: Fetch> Client<F> {
/// Creates a new instance of the `Client` given a `fetch::Client`. /// Creates a new instance of the `Client` given a `fetch::Client`.
pub fn new(fetch: F, pool: CpuPool) -> Client<F> { pub fn new(fetch: F, pool: Executor) -> Client<F> {
let api_endpoint = "https://api.etherscan.io/api?module=stats&action=ethprice".to_owned(); let api_endpoint = "https://api.etherscan.io/api?module=stats&action=ethprice".to_owned();
Client { pool, api_endpoint, fetch } Client { pool, api_endpoint, fetch }
} }
@ -108,7 +108,7 @@ impl<F: Fetch> Client<F> {
} }
Either::B(response.concat2().from_err()) Either::B(response.concat2().from_err())
}) })
.map(move |body| { .and_then(move |body| {
let body_str = str::from_utf8(&body).ok(); let body_str = str::from_utf8(&body).ok();
let value: Option<Value> = body_str.and_then(|s| serde_json::from_str(s).ok()); let value: Option<Value> = body_str.and_then(|s| serde_json::from_str(s).ok());
@ -128,30 +128,31 @@ impl<F: Fetch> Client<F> {
}) })
.map_err(|err| { .map_err(|err| {
warn!("Failed to auto-update latest ETH price: {:?}", err); warn!("Failed to auto-update latest ETH price: {:?}", err);
err
}); });
self.pool.spawn(future).forget() self.pool.spawn(future)
} }
} }
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use std::sync::Arc; use std::sync::Arc;
use futures_cpupool::CpuPool; use parity_runtime::{Runtime, Executor};
use Client; use Client;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use fake_fetch::FakeFetch; use fake_fetch::FakeFetch;
fn price_info_ok(response: &str) -> Client<FakeFetch<String>> { fn price_info_ok(response: &str, executor: Executor) -> Client<FakeFetch<String>> {
Client::new(FakeFetch::new(Some(response.to_owned())), CpuPool::new(1)) Client::new(FakeFetch::new(Some(response.to_owned())), executor)
} }
fn price_info_not_found() -> Client<FakeFetch<String>> { fn price_info_not_found(executor: Executor) -> Client<FakeFetch<String>> {
Client::new(FakeFetch::new(None::<String>), CpuPool::new(1)) Client::new(FakeFetch::new(None::<String>), executor)
} }
#[test] #[test]
fn should_get_price_info() { fn should_get_price_info() {
let runtime = Runtime::with_thread_count(1);
// given // given
let response = r#"{ let response = r#"{
"status": "1", "status": "1",
@ -164,7 +165,7 @@ mod test {
} }
}"#; }"#;
let price_info = price_info_ok(response); let price_info = price_info_ok(response, runtime.executor());
// when // when
price_info.get(|price| { price_info.get(|price| {
@ -176,10 +177,12 @@ mod test {
#[test] #[test]
fn should_not_call_set_price_if_response_is_malformed() { fn should_not_call_set_price_if_response_is_malformed() {
let runtime = Runtime::with_thread_count(1);
// given // given
let response = "{}"; let response = "{}";
let price_info = price_info_ok(response); let price_info = price_info_ok(response, runtime.executor());
let b = Arc::new(AtomicBool::new(false)); let b = Arc::new(AtomicBool::new(false));
// when // when
@ -194,8 +197,10 @@ mod test {
#[test] #[test]
fn should_not_call_set_price_if_response_is_invalid() { fn should_not_call_set_price_if_response_is_invalid() {
let runtime = Runtime::with_thread_count(1);
// given // given
let price_info = price_info_not_found(); let price_info = price_info_not_found(runtime.executor());
let b = Arc::new(AtomicBool::new(false)); let b = Arc::new(AtomicBool::new(false));
// when // when

View File

@ -9,11 +9,10 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
ansi_term = "0.10" ansi_term = "0.10"
cid = "0.2" cid = "0.3"
futures = "0.1.6" futures = "0.1.6"
futures-cpupool = "0.1"
log = "0.4" log = "0.4"
multihash ="0.7" multihash = "0.8"
order-stat = "0.1" order-stat = "0.1"
parking_lot = "0.6" parking_lot = "0.6"
rand = "0.4" rand = "0.4"
@ -28,17 +27,17 @@ tokio-timer = "0.1"
transient-hashmap = "0.4" transient-hashmap = "0.4"
itertools = "0.5" itertools = "0.5"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-ipc-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-ipc-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
ethash = { path = "../ethash" } ethash = { path = "../ethash" }
ethcore = { path = "../ethcore", features = ["test-helpers"] } ethcore = { path = "../ethcore", features = ["test-helpers"] }
parity-bytes = "0.1" parity-bytes = "0.1"
parity-crypto = "0.1" parity-crypto = "0.2"
fastmap = { path = "../util/fastmap" } fastmap = { path = "../util/fastmap" }
ethcore-devtools = { path = "../devtools" } ethcore-devtools = { path = "../devtools" }
ethcore-io = { path = "../util/io" } ethcore-io = { path = "../util/io" }
@ -55,7 +54,7 @@ ethkey = { path = "../ethkey" }
ethstore = { path = "../ethstore" } ethstore = { path = "../ethstore" }
fetch = { path = "../util/fetch" } fetch = { path = "../util/fetch" }
keccak-hash = "0.1.2" keccak-hash = "0.1.2"
parity-reactor = { path = "../util/reactor" } parity-runtime = { path = "../util/runtime" }
parity-updater = { path = "../updater" } parity-updater = { path = "../updater" }
parity-version = { path = "../util/version" } parity-version = { path = "../util/version" }
patricia-trie = "0.3.0" patricia-trie = "0.3.0"

View File

@ -42,13 +42,13 @@ impl<M, T> http::MetaExtractor<M> for MetaExtractor<T> where
T: HttpMetaExtractor<Metadata = M>, T: HttpMetaExtractor<Metadata = M>,
M: jsonrpc_core::Metadata, M: jsonrpc_core::Metadata,
{ {
fn read_metadata(&self, req: &hyper::server::Request) -> M { fn read_metadata(&self, req: &hyper::Request<hyper::Body>) -> M {
let as_string = |header: Option<&hyper::header::Raw>| header let as_string = |header: Option<&hyper::header::HeaderValue>| {
.and_then(|raw| raw.one()) header.and_then(|val| val.to_str().ok().map(|s| s.to_owned()))
.map(|raw| String::from_utf8_lossy(raw).into_owned()); };
let origin = as_string(req.headers().get_raw("origin")); let origin = as_string(req.headers().get("origin"));
let user_agent = as_string(req.headers().get_raw("user-agent")); let user_agent = as_string(req.headers().get("user-agent"));
self.extractor.read_metadata(origin, user_agent) self.extractor.read_metadata(origin, user_agent)
} }
} }

View File

@ -23,7 +23,6 @@ extern crate futures;
extern crate ansi_term; extern crate ansi_term;
extern crate cid; extern crate cid;
extern crate futures_cpupool;
extern crate itertools; extern crate itertools;
extern crate multihash; extern crate multihash;
extern crate order_stat; extern crate order_stat;
@ -60,7 +59,7 @@ extern crate ethkey;
extern crate ethstore; extern crate ethstore;
extern crate fetch; extern crate fetch;
extern crate keccak_hash as hash; extern crate keccak_hash as hash;
extern crate parity_reactor; extern crate parity_runtime;
extern crate parity_updater as updater; extern crate parity_updater as updater;
extern crate parity_version as version; extern crate parity_version as version;
extern crate patricia_trie as trie; extern crate patricia_trie as trie;
@ -124,7 +123,6 @@ pub use authcodes::{AuthCodes, TimeProvider};
pub use http_common::HttpMetaExtractor; pub use http_common::HttpMetaExtractor;
use std::net::SocketAddr; use std::net::SocketAddr;
use http::tokio_core;
/// RPC HTTP Server instance /// RPC HTTP Server instance
pub type HttpServer = http::Server; pub type HttpServer = http::Server;
@ -135,7 +133,6 @@ pub fn start_http<M, S, H, T>(
cors_domains: http::DomainsValidation<http::AccessControlAllowOrigin>, cors_domains: http::DomainsValidation<http::AccessControlAllowOrigin>,
allowed_hosts: http::DomainsValidation<http::Host>, allowed_hosts: http::DomainsValidation<http::Host>,
handler: H, handler: H,
remote: tokio_core::reactor::Remote,
extractor: T, extractor: T,
threads: usize, threads: usize,
max_payload: usize, max_payload: usize,
@ -148,7 +145,6 @@ pub fn start_http<M, S, H, T>(
let extractor = http_common::MetaExtractor::new(extractor); let extractor = http_common::MetaExtractor::new(extractor);
Ok(http::ServerBuilder::with_meta_extractor(handler, extractor) Ok(http::ServerBuilder::with_meta_extractor(handler, extractor)
.threads(threads) .threads(threads)
.event_loop_remote(remote)
.cors(cors_domains.into()) .cors(cors_domains.into())
.allowed_hosts(allowed_hosts.into()) .allowed_hosts(allowed_hosts.into())
.max_request_body_size(max_payload * 1024 * 1024) .max_request_body_size(max_payload * 1024 * 1024)
@ -162,7 +158,6 @@ pub fn start_http_with_middleware<M, S, H, T, R>(
cors_domains: http::DomainsValidation<http::AccessControlAllowOrigin>, cors_domains: http::DomainsValidation<http::AccessControlAllowOrigin>,
allowed_hosts: http::DomainsValidation<http::Host>, allowed_hosts: http::DomainsValidation<http::Host>,
handler: H, handler: H,
remote: tokio_core::reactor::Remote,
extractor: T, extractor: T,
middleware: R, middleware: R,
threads: usize, threads: usize,
@ -177,7 +172,6 @@ pub fn start_http_with_middleware<M, S, H, T, R>(
let extractor = http_common::MetaExtractor::new(extractor); let extractor = http_common::MetaExtractor::new(extractor);
Ok(http::ServerBuilder::with_meta_extractor(handler, extractor) Ok(http::ServerBuilder::with_meta_extractor(handler, extractor)
.threads(threads) .threads(threads)
.event_loop_remote(remote)
.cors(cors_domains.into()) .cors(cors_domains.into())
.allowed_hosts(allowed_hosts.into()) .allowed_hosts(allowed_hosts.into())
.max_request_body_size(max_payload * 1024 * 1024) .max_request_body_size(max_payload * 1024 * 1024)
@ -189,7 +183,6 @@ pub fn start_http_with_middleware<M, S, H, T, R>(
pub fn start_ipc<M, S, H, T>( pub fn start_ipc<M, S, H, T>(
addr: &str, addr: &str,
handler: H, handler: H,
remote: tokio_core::reactor::Remote,
extractor: T, extractor: T,
) -> ::std::io::Result<ipc::Server> where ) -> ::std::io::Result<ipc::Server> where
M: jsonrpc_core::Metadata, M: jsonrpc_core::Metadata,
@ -198,7 +191,6 @@ pub fn start_ipc<M, S, H, T>(
T: IpcMetaExtractor<M>, T: IpcMetaExtractor<M>,
{ {
ipc::ServerBuilder::with_meta_extractor(handler, extractor) ipc::ServerBuilder::with_meta_extractor(handler, extractor)
.event_loop_remote(remote)
.start(addr) .start(addr)
} }
@ -206,7 +198,6 @@ pub fn start_ipc<M, S, H, T>(
pub fn start_ws<M, S, H, T, U, V>( pub fn start_ws<M, S, H, T, U, V>(
addr: &SocketAddr, addr: &SocketAddr,
handler: H, handler: H,
remote: tokio_core::reactor::Remote,
allowed_origins: ws::DomainsValidation<ws::Origin>, allowed_origins: ws::DomainsValidation<ws::Origin>,
allowed_hosts: ws::DomainsValidation<ws::Host>, allowed_hosts: ws::DomainsValidation<ws::Host>,
max_connections: usize, max_connections: usize,
@ -222,7 +213,6 @@ pub fn start_ws<M, S, H, T, U, V>(
V: ws::RequestMiddleware, V: ws::RequestMiddleware,
{ {
ws::ServerBuilder::with_meta_extractor(handler, extractor) ws::ServerBuilder::with_meta_extractor(handler, extractor)
.event_loop_remote(remote)
.request_middleware(middleware) .request_middleware(middleware)
.allowed_origins(allowed_origins) .allowed_origins(allowed_origins)
.allowed_hosts(allowed_hosts) .allowed_hosts(allowed_hosts)

View File

@ -18,7 +18,7 @@ use std::ops::{Deref, DerefMut};
use std::path::PathBuf; use std::path::PathBuf;
use tempdir::TempDir; use tempdir::TempDir;
use parity_reactor::{EventLoop, TokioRemote}; use parity_runtime::{Runtime, TaskExecutor};
use authcodes::AuthCodes; use authcodes::AuthCodes;
@ -27,15 +27,15 @@ pub struct Server<T> {
/// Server /// Server
pub server: T, pub server: T,
/// RPC Event Loop /// RPC Event Loop
pub event_loop: EventLoop, pub event_loop: Runtime,
} }
impl<T> Server<T> { impl<T> Server<T> {
pub fn new<F>(f: F) -> Server<T> where pub fn new<F>(f: F) -> Server<T> where
F: FnOnce(TokioRemote) -> T, F: FnOnce(TaskExecutor) -> T,
{ {
let event_loop = EventLoop::spawn(); let event_loop = Runtime::with_thread_count(1);
let remote = event_loop.raw_remote(); let remote = event_loop.raw_executor();
Server { Server {
server: f(remote), server: f(remote),

View File

@ -26,14 +26,13 @@ fn serve(handler: Option<MetaIoHandler<Metadata>>) -> Server<HttpServer> {
let address = "127.0.0.1:0".parse().unwrap(); let address = "127.0.0.1:0".parse().unwrap();
let handler = handler.unwrap_or_default(); let handler = handler.unwrap_or_default();
Server::new(|remote| ::start_http_with_middleware( Server::new(|_remote| ::start_http_with_middleware(
&address, &address,
http::DomainsValidation::Disabled, http::DomainsValidation::Disabled,
http::DomainsValidation::Disabled, http::DomainsValidation::Disabled,
handler, handler,
remote,
extractors::RpcExtractor, extractors::RpcExtractor,
|request: hyper::Request| { |request: hyper::Request<hyper::Body>| {
http::RequestMiddlewareAction::Proceed { http::RequestMiddlewareAction::Proceed {
should_continue_on_invalid_cors: false, should_continue_on_invalid_cors: false,
request, request,
@ -50,7 +49,7 @@ fn request(server: Server<HttpServer>, request: &str) -> http_client::Response {
} }
#[cfg(test)] #[cfg(test)]
mod testsing { mod tests {
use jsonrpc_core::{MetaIoHandler, Value}; use jsonrpc_core::{MetaIoHandler, Value};
use v1::Metadata; use v1::Metadata;
use super::{request, Server}; use super::{request, Server};
@ -73,7 +72,7 @@ mod testsing {
// when // when
let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#; let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#;
let expected = "4B\n{\"jsonrpc\":\"2.0\",\"result\":\"unknown origin / unknown agent via RPC\",\"id\":1}\n\n0\n\n"; let expected = "{\"jsonrpc\":\"2.0\",\"result\":\"unknown origin / unknown agent via RPC\",\"id\":1}\n";
let res = request(server, let res = request(server,
&format!("\ &format!("\
POST / HTTP/1.1\r\n\ POST / HTTP/1.1\r\n\
@ -98,7 +97,7 @@ mod testsing {
// when // when
let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#; let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#;
let expected = "49\n{\"jsonrpc\":\"2.0\",\"result\":\"unknown origin / curl/7.16.3 via RPC\",\"id\":1}\n\n0\n\n"; let expected = "{\"jsonrpc\":\"2.0\",\"result\":\"unknown origin / curl/7.16.3 via RPC\",\"id\":1}\n";
let res = request(server, let res = request(server,
&format!("\ &format!("\
POST / HTTP/1.1\r\n\ POST / HTTP/1.1\r\n\

View File

@ -34,10 +34,9 @@ pub fn serve() -> (Server<ws::Server>, usize, GuardedAuthCodes) {
let authcodes = GuardedAuthCodes::new(); let authcodes = GuardedAuthCodes::new();
let stats = Arc::new(informant::RpcStats::default()); let stats = Arc::new(informant::RpcStats::default());
let res = Server::new(|remote| ::start_ws( let res = Server::new(|_| ::start_ws(
&address, &address,
io, io,
remote,
ws::DomainsValidation::Disabled, ws::DomainsValidation::Disabled,
ws::DomainsValidation::Disabled, ws::DomainsValidation::Disabled,
5, 5,

View File

@ -23,6 +23,7 @@ use authcodes;
use http_common::HttpMetaExtractor; use http_common::HttpMetaExtractor;
use ipc; use ipc;
use jsonrpc_core as core; use jsonrpc_core as core;
use jsonrpc_core::futures::future::Either;
use jsonrpc_pubsub::Session; use jsonrpc_pubsub::Session;
use ws; use ws;
use ethereum_types::H256; use ethereum_types::H256;
@ -216,26 +217,26 @@ impl<M: core::Middleware<Metadata>> WsDispatcher<M> {
} }
impl<M: core::Middleware<Metadata>> core::Middleware<Metadata> for WsDispatcher<M> { impl<M: core::Middleware<Metadata>> core::Middleware<Metadata> for WsDispatcher<M> {
type Future = core::futures::future::Either< type Future = Either<
M::Future, core::FutureRpcResult<M::Future>,
core::FutureResponse, core::FutureResponse,
>; >;
fn on_request<F, X>(&self, request: core::Request, meta: Metadata, process: F) -> Self::Future where fn on_request<F, X>(&self, request: core::Request, meta: Metadata, process: F)
-> Either<Self::Future, X>
where
F: FnOnce(core::Request, Metadata) -> X, F: FnOnce(core::Request, Metadata) -> X,
X: core::futures::Future<Item=Option<core::Response>, Error=()> + Send + 'static, X: core::futures::Future<Item=Option<core::Response>, Error=()> + Send + 'static,
{ {
use self::core::futures::future::Either::{A, B};
let use_full = match &meta.origin { let use_full = match &meta.origin {
&Origin::Signer { .. } => true, &Origin::Signer { .. } => true,
_ => false, _ => false,
}; };
if use_full { if use_full {
A(self.full_handler.handle_rpc_request(request, meta)) Either::A(Either::A(self.full_handler.handle_rpc_request(request, meta)))
} else { } else {
B(Box::new(process(request, meta))) Either::B(process(request, meta))
} }
} }
} }

View File

@ -23,31 +23,25 @@ use ethereum_types::{U256, Address};
use futures::{Future, future, Poll, Async}; use futures::{Future, future, Poll, Async};
use futures::future::Either; use futures::future::Either;
use futures::sync::oneshot; use futures::sync::oneshot;
use futures_cpupool::CpuPool; use parity_runtime::Executor;
/// Manages currently reserved and prospective nonces /// Manages currently reserved and prospective nonces
/// for multiple senders. /// for multiple senders.
#[derive(Debug)] #[derive(Debug)]
pub struct Reservations { pub struct Reservations {
nonces: HashMap<Address, SenderReservations>, nonces: HashMap<Address, SenderReservations>,
pool: CpuPool, executor: Executor,
} }
impl Reservations { impl Reservations {
/// A maximal number of reserved nonces in the hashmap /// A maximal number of reserved nonces in the hashmap
/// before we start clearing the unused ones. /// before we start clearing the unused ones.
const CLEAN_AT: usize = 512; const CLEAN_AT: usize = 512;
/// Create new nonces manager and spawn a single-threaded cpu pool /// Create new nonces manager with given executor.
/// for progressing execution of dropped nonces. pub fn new(executor: Executor) -> Self {
pub fn new() -> Self {
Self::with_pool(CpuPool::new(1))
}
/// Create new nonces manager with given cpupool.
pub fn with_pool(pool: CpuPool) -> Self {
Reservations { Reservations {
nonces: Default::default(), nonces: Default::default(),
pool, executor,
} }
} }
@ -59,9 +53,9 @@ impl Reservations {
self.nonces.retain(|_, v| !v.is_empty()); self.nonces.retain(|_, v| !v.is_empty());
} }
let pool = &self.pool; let executor = &self.executor;
self.nonces.entry(sender) self.nonces.entry(sender)
.or_insert_with(move || SenderReservations::with_pool(pool.clone())) .or_insert_with(move || SenderReservations::new(executor.clone()))
.reserve_nonce(minimal) .reserve_nonce(minimal)
} }
} }
@ -71,25 +65,18 @@ impl Reservations {
pub struct SenderReservations { pub struct SenderReservations {
previous: Option<oneshot::Receiver<U256>>, previous: Option<oneshot::Receiver<U256>>,
previous_ready: Arc<AtomicBool>, previous_ready: Arc<AtomicBool>,
pool: CpuPool, executor: Executor,
prospective_value: U256, prospective_value: U256,
dropped: Arc<AtomicUsize>, dropped: Arc<AtomicUsize>,
} }
impl SenderReservations { impl SenderReservations {
/// Create new nonces manager and spawn a single-threaded cpu pool /// Create new nonces manager with given executor.
/// for progressing execution of dropped nonces. pub fn new(executor: Executor) -> Self {
#[cfg(test)]
pub fn new() -> Self {
Self::with_pool(CpuPool::new(1))
}
/// Create new nonces manager with given cpu pool.
pub fn with_pool(pool: CpuPool) -> Self {
SenderReservations { SenderReservations {
previous: None, previous: None,
previous_ready: Arc::new(AtomicBool::new(true)), previous_ready: Arc::new(AtomicBool::new(true)),
pool, executor,
prospective_value: Default::default(), prospective_value: Default::default(),
dropped: Default::default(), dropped: Default::default(),
} }
@ -110,7 +97,7 @@ impl SenderReservations {
let (next, rx) = oneshot::channel(); let (next, rx) = oneshot::channel();
let next = Some(next); let next = Some(next);
let next_sent = Arc::new(AtomicBool::default()); let next_sent = Arc::new(AtomicBool::default());
let pool = self.pool.clone(); let executor = self.executor.clone();
let dropped = self.dropped.clone(); let dropped = self.dropped.clone();
self.previous_ready = next_sent.clone(); self.previous_ready = next_sent.clone();
match mem::replace(&mut self.previous, Some(rx)) { match mem::replace(&mut self.previous, Some(rx)) {
@ -120,7 +107,7 @@ impl SenderReservations {
next_sent, next_sent,
minimal, minimal,
prospective_value, prospective_value,
pool, executor,
dropped, dropped,
}, },
None => Reserved { None => Reserved {
@ -129,7 +116,7 @@ impl SenderReservations {
next_sent, next_sent,
minimal, minimal,
prospective_value, prospective_value,
pool, executor,
dropped, dropped,
}, },
} }
@ -152,7 +139,7 @@ pub struct Reserved {
next_sent: Arc<AtomicBool>, next_sent: Arc<AtomicBool>,
minimal: U256, minimal: U256,
prospective_value: U256, prospective_value: U256,
pool: CpuPool, executor: Executor,
dropped: Arc<AtomicUsize>, dropped: Arc<AtomicUsize>,
} }
@ -196,10 +183,14 @@ impl Drop for Reserved {
self.dropped.fetch_add(1, atomic::Ordering::SeqCst); self.dropped.fetch_add(1, atomic::Ordering::SeqCst);
// If Reserved is dropped just pipe previous and next together. // If Reserved is dropped just pipe previous and next together.
let previous = mem::replace(&mut self.previous, Either::B(future::ok(U256::default()))); let previous = mem::replace(&mut self.previous, Either::B(future::ok(U256::default())));
self.pool.spawn(previous.map(move |nonce| { self.executor.spawn(
next_sent.store(true, atomic::Ordering::SeqCst); previous
next.send(nonce).expect(Ready::RECV_PROOF) .map(move |nonce| {
})).forget() next_sent.store(true, atomic::Ordering::SeqCst);
next.send(nonce).expect(Ready::RECV_PROOF)
})
.map_err(|err| error!("Error dropping `Reserved`: {:?}", err))
);
} }
} }
} }
@ -253,10 +244,12 @@ impl Drop for Ready {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use parity_runtime::Runtime;
#[test] #[test]
fn should_reserve_a_set_of_nonces_and_resolve_them() { fn should_reserve_a_set_of_nonces_and_resolve_them() {
let mut nonces = SenderReservations::new(); let runtime = Runtime::with_thread_count(1);
let mut nonces = SenderReservations::new(runtime.executor());
assert!(nonces.is_empty()); assert!(nonces.is_empty());
let n1 = nonces.reserve_nonce(5.into()); let n1 = nonces.reserve_nonce(5.into());
@ -303,7 +296,8 @@ mod tests {
#[test] #[test]
fn should_return_prospective_nonce() { fn should_return_prospective_nonce() {
let mut nonces = SenderReservations::new(); let runtime = Runtime::with_thread_count(1);
let mut nonces = SenderReservations::new(runtime.executor());
let n1 = nonces.reserve_nonce(5.into()); let n1 = nonces.reserve_nonce(5.into());
let n2 = nonces.reserve_nonce(5.into()); let n2 = nonces.reserve_nonce(5.into());

View File

@ -95,7 +95,7 @@ impl<S: core::Middleware<Metadata>> GenericPollManager<S> {
jsonrpc: Some(core::Version::V2), jsonrpc: Some(core::Version::V2),
id: core::Id::Str(id.as_string()), id: core::Id::Str(id.as_string()),
method: subscription.method.clone(), method: subscription.method.clone(),
params: Some(subscription.params.clone()), params: subscription.params.clone(),
}; };
trace!(target: "pubsub", "Polling method: {:?}", call); trace!(target: "pubsub", "Polling method: {:?}", call);
let result = self.rpc.handle_call(call.into(), subscription.metadata.clone()); let result = self.rpc.handle_call(call.into(), subscription.metadata.clone());
@ -141,7 +141,7 @@ mod tests {
use jsonrpc_core::{MetaIoHandler, NoopMiddleware, Value, Params}; use jsonrpc_core::{MetaIoHandler, NoopMiddleware, Value, Params};
use jsonrpc_core::futures::{Future, Stream}; use jsonrpc_core::futures::{Future, Stream};
use jsonrpc_pubsub::SubscriptionId; use jsonrpc_pubsub::SubscriptionId;
use http::tokio_core::reactor; use http::tokio::runtime::Runtime;
use super::GenericPollManager; use super::GenericPollManager;
@ -162,25 +162,25 @@ mod tests {
#[test] #[test]
fn should_poll_subscribed_method() { fn should_poll_subscribed_method() {
// given // given
let mut el = reactor::Core::new().unwrap(); let mut el = Runtime::new().unwrap();
let mut poll_manager = poll_manager(); let mut poll_manager = poll_manager();
let (id, rx) = poll_manager.subscribe(Default::default(), "hello".into(), Params::None); let (id, rx) = poll_manager.subscribe(Default::default(), "hello".into(), Params::None);
assert_eq!(id, SubscriptionId::String("0x416d77337e24399d".into())); assert_eq!(id, SubscriptionId::String("0x416d77337e24399d".into()));
// then // then
poll_manager.tick().wait().unwrap(); poll_manager.tick().wait().unwrap();
let (res, rx) = el.run(rx.into_future()).unwrap(); let (res, rx) = el.block_on(rx.into_future()).unwrap();
assert_eq!(res, Some(Ok(Value::String("hello".into())))); assert_eq!(res, Some(Ok(Value::String("hello".into()))));
// retrieve second item // retrieve second item
poll_manager.tick().wait().unwrap(); poll_manager.tick().wait().unwrap();
let (res, rx) = el.run(rx.into_future()).unwrap(); let (res, rx) = el.block_on(rx.into_future()).unwrap();
assert_eq!(res, Some(Ok(Value::String("world".into())))); assert_eq!(res, Some(Ok(Value::String("world".into()))));
// and no more notifications // and no more notifications
poll_manager.tick().wait().unwrap(); poll_manager.tick().wait().unwrap();
// we need to unsubscribe otherwise the future will never finish. // we need to unsubscribe otherwise the future will never finish.
poll_manager.unsubscribe(&id); poll_manager.unsubscribe(&id);
assert_eq!(el.run(rx.into_future()).unwrap().0, None); assert_eq!(el.block_on(rx.into_future()).unwrap().0, None);
} }
} }

View File

@ -39,7 +39,7 @@ use sync::LightSync;
use light::cache::Cache; use light::cache::Cache;
use light::on_demand::OnDemand; use light::on_demand::OnDemand;
use light::client::{LightChainClient, LightChainNotify}; use light::client::{LightChainClient, LightChainNotify};
use parity_reactor::Remote; use parity_runtime::Executor;
use ethereum_types::H256; use ethereum_types::H256;
use bytes::Bytes; use bytes::Bytes;
use parking_lot::{RwLock, Mutex}; use parking_lot::{RwLock, Mutex};
@ -56,7 +56,7 @@ pub struct EthPubSubClient<C> {
impl<C> EthPubSubClient<C> { impl<C> EthPubSubClient<C> {
/// Creates new `EthPubSubClient`. /// Creates new `EthPubSubClient`.
pub fn new(client: Arc<C>, remote: Remote) -> Self { pub fn new(client: Arc<C>, executor: Executor) -> Self {
let heads_subscribers = Arc::new(RwLock::new(Subscribers::default())); let heads_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let logs_subscribers = Arc::new(RwLock::new(Subscribers::default())); let logs_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let transactions_subscribers = Arc::new(RwLock::new(Subscribers::default())); let transactions_subscribers = Arc::new(RwLock::new(Subscribers::default()));
@ -64,7 +64,7 @@ impl<C> EthPubSubClient<C> {
EthPubSubClient { EthPubSubClient {
handler: Arc::new(ChainNotificationHandler { handler: Arc::new(ChainNotificationHandler {
client, client,
remote, executor,
heads_subscribers: heads_subscribers.clone(), heads_subscribers: heads_subscribers.clone(),
logs_subscribers: logs_subscribers.clone(), logs_subscribers: logs_subscribers.clone(),
transactions_subscribers: transactions_subscribers.clone(), transactions_subscribers: transactions_subscribers.clone(),
@ -77,8 +77,8 @@ impl<C> EthPubSubClient<C> {
/// Creates new `EthPubSubCient` with deterministic subscription ids. /// Creates new `EthPubSubCient` with deterministic subscription ids.
#[cfg(test)] #[cfg(test)]
pub fn new_test(client: Arc<C>, remote: Remote) -> Self { pub fn new_test(client: Arc<C>, executor: Executor) -> Self {
let client = Self::new(client, remote); let client = Self::new(client, executor);
*client.heads_subscribers.write() = Subscribers::new_test(); *client.heads_subscribers.write() = Subscribers::new_test();
*client.logs_subscribers.write() = Subscribers::new_test(); *client.logs_subscribers.write() = Subscribers::new_test();
*client.transactions_subscribers.write() = Subscribers::new_test(); *client.transactions_subscribers.write() = Subscribers::new_test();
@ -98,7 +98,7 @@ impl EthPubSubClient<LightFetch> {
on_demand: Arc<OnDemand>, on_demand: Arc<OnDemand>,
sync: Arc<LightSync>, sync: Arc<LightSync>,
cache: Arc<Mutex<Cache>>, cache: Arc<Mutex<Cache>>,
remote: Remote, executor: Executor,
gas_price_percentile: usize, gas_price_percentile: usize,
) -> Self { ) -> Self {
let fetch = LightFetch { let fetch = LightFetch {
@ -108,22 +108,22 @@ impl EthPubSubClient<LightFetch> {
cache, cache,
gas_price_percentile, gas_price_percentile,
}; };
EthPubSubClient::new(Arc::new(fetch), remote) EthPubSubClient::new(Arc::new(fetch), executor)
} }
} }
/// PubSub Notification handler. /// PubSub Notification handler.
pub struct ChainNotificationHandler<C> { pub struct ChainNotificationHandler<C> {
client: Arc<C>, client: Arc<C>,
remote: Remote, executor: Executor,
heads_subscribers: Arc<RwLock<Subscribers<Client>>>, heads_subscribers: Arc<RwLock<Subscribers<Client>>>,
logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>, logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>,
transactions_subscribers: Arc<RwLock<Subscribers<Client>>>, transactions_subscribers: Arc<RwLock<Subscribers<Client>>>,
} }
impl<C> ChainNotificationHandler<C> { impl<C> ChainNotificationHandler<C> {
fn notify(remote: &Remote, subscriber: &Client, result: pubsub::Result) { fn notify(executor: &Executor, subscriber: &Client, result: pubsub::Result) {
remote.spawn(subscriber executor.spawn(subscriber
.notify(Ok(result)) .notify(Ok(result))
.map(|_| ()) .map(|_| ())
.map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e)) .map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e))
@ -133,7 +133,7 @@ impl<C> ChainNotificationHandler<C> {
fn notify_heads(&self, headers: &[(encoded::Header, BTreeMap<String, String>)]) { fn notify_heads(&self, headers: &[(encoded::Header, BTreeMap<String, String>)]) {
for subscriber in self.heads_subscribers.read().values() { for subscriber in self.heads_subscribers.read().values() {
for &(ref header, ref extra_info) in headers { for &(ref header, ref extra_info) in headers {
Self::notify(&self.remote, subscriber, pubsub::Result::Header(RichHeader { Self::notify(&self.executor, subscriber, pubsub::Result::Header(RichHeader {
inner: header.into(), inner: header.into(),
extra_info: extra_info.clone(), extra_info: extra_info.clone(),
})); }));
@ -159,14 +159,14 @@ impl<C> ChainNotificationHandler<C> {
.collect::<Vec<_>>() .collect::<Vec<_>>()
); );
let limit = filter.limit; let limit = filter.limit;
let remote = self.remote.clone(); let executor = self.executor.clone();
let subscriber = subscriber.clone(); let subscriber = subscriber.clone();
self.remote.spawn(logs self.executor.spawn(logs
.map(move |logs| { .map(move |logs| {
let logs = logs.into_iter().flat_map(|log| log).collect(); let logs = logs.into_iter().flat_map(|log| log).collect();
for log in limit_logs(logs, limit) { for log in limit_logs(logs, limit) {
Self::notify(&remote, &subscriber, pubsub::Result::Log(log)) Self::notify(&executor, &subscriber, pubsub::Result::Log(log))
} }
}) })
.map_err(|e| warn!("Unable to fetch latest logs: {:?}", e)) .map_err(|e| warn!("Unable to fetch latest logs: {:?}", e))
@ -178,7 +178,7 @@ impl<C> ChainNotificationHandler<C> {
pub fn notify_new_transactions(&self, hashes: &[H256]) { pub fn notify_new_transactions(&self, hashes: &[H256]) {
for subscriber in self.transactions_subscribers.read().values() { for subscriber in self.transactions_subscribers.read().values() {
for hash in hashes { for hash in hashes {
Self::notify(&self.remote, subscriber, pubsub::Result::TransactionHash((*hash).into())); Self::notify(&self.executor, subscriber, pubsub::Result::TransactionHash((*hash).into()));
} }
} }
} }

View File

@ -22,7 +22,6 @@ use std::sync::Arc;
use sync::ManageNetwork; use sync::ManageNetwork;
use fetch::{self, Fetch}; use fetch::{self, Fetch};
use futures_cpupool::CpuPool;
use hash::keccak_buffer; use hash::keccak_buffer;
use jsonrpc_core::{Result, BoxFuture}; use jsonrpc_core::{Result, BoxFuture};
@ -35,16 +34,14 @@ use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction};
pub struct ParitySetClient<F> { pub struct ParitySetClient<F> {
net: Arc<ManageNetwork>, net: Arc<ManageNetwork>,
fetch: F, fetch: F,
pool: CpuPool,
} }
impl<F: Fetch> ParitySetClient<F> { impl<F: Fetch> ParitySetClient<F> {
/// Creates new `ParitySetClient` with given `Fetch`. /// Creates new `ParitySetClient` with given `Fetch`.
pub fn new(net: Arc<ManageNetwork>, fetch: F, p: CpuPool) -> Self { pub fn new(net: Arc<ManageNetwork>, fetch: F) -> Self {
ParitySetClient { ParitySetClient {
net: net, net: net,
fetch: fetch, fetch: fetch,
pool: p,
} }
} }
} }
@ -134,7 +131,7 @@ impl<F: Fetch> ParitySet for ParitySetClient<F> {
}) })
.map(Into::into) .map(Into::into)
}); });
Box::new(self.pool.spawn(future)) Box::new(future)
} }
fn upgrade_ready(&self) -> Result<Option<ReleaseInfo>> { fn upgrade_ready(&self) -> Result<Option<ReleaseInfo>> {

View File

@ -23,7 +23,6 @@ use ethcore::client::{BlockChainClient, Mode};
use ethcore::miner::MinerService; use ethcore::miner::MinerService;
use sync::ManageNetwork; use sync::ManageNetwork;
use fetch::{self, Fetch}; use fetch::{self, Fetch};
use futures_cpupool::CpuPool;
use hash::keccak_buffer; use hash::keccak_buffer;
use updater::{Service as UpdateService}; use updater::{Service as UpdateService};
@ -40,7 +39,6 @@ pub struct ParitySetClient<C, M, U, F = fetch::Client> {
updater: Arc<U>, updater: Arc<U>,
net: Arc<ManageNetwork>, net: Arc<ManageNetwork>,
fetch: F, fetch: F,
pool: CpuPool,
} }
impl<C, M, U, F> ParitySetClient<C, M, U, F> impl<C, M, U, F> ParitySetClient<C, M, U, F>
@ -53,7 +51,6 @@ impl<C, M, U, F> ParitySetClient<C, M, U, F>
updater: &Arc<U>, updater: &Arc<U>,
net: &Arc<ManageNetwork>, net: &Arc<ManageNetwork>,
fetch: F, fetch: F,
pool: CpuPool,
) -> Self { ) -> Self {
ParitySetClient { ParitySetClient {
client: client.clone(), client: client.clone(),
@ -61,7 +58,6 @@ impl<C, M, U, F> ParitySetClient<C, M, U, F>
updater: updater.clone(), updater: updater.clone(),
net: net.clone(), net: net.clone(),
fetch: fetch, fetch: fetch,
pool: pool,
} }
} }
} }
@ -177,7 +173,7 @@ impl<C, M, U, F> ParitySet for ParitySetClient<C, M, U, F> where
}) })
.map(Into::into) .map(Into::into)
}); });
Box::new(self.pool.spawn(future)) Box::new(future)
} }
fn upgrade_ready(&self) -> Result<Option<ReleaseInfo>> { fn upgrade_ready(&self) -> Result<Option<ReleaseInfo>> {

View File

@ -27,7 +27,7 @@ use jsonrpc_macros::pubsub::Subscriber;
use jsonrpc_pubsub::SubscriptionId; use jsonrpc_pubsub::SubscriptionId;
use tokio_timer; use tokio_timer;
use parity_reactor::Remote; use parity_runtime::Executor;
use v1::helpers::GenericPollManager; use v1::helpers::GenericPollManager;
use v1::metadata::Metadata; use v1::metadata::Metadata;
use v1::traits::PubSub; use v1::traits::PubSub;
@ -35,12 +35,12 @@ use v1::traits::PubSub;
/// Parity PubSub implementation. /// Parity PubSub implementation.
pub struct PubSubClient<S: core::Middleware<Metadata>> { pub struct PubSubClient<S: core::Middleware<Metadata>> {
poll_manager: Arc<RwLock<GenericPollManager<S>>>, poll_manager: Arc<RwLock<GenericPollManager<S>>>,
remote: Remote, executor: Executor,
} }
impl<S: core::Middleware<Metadata>> PubSubClient<S> { impl<S: core::Middleware<Metadata>> PubSubClient<S> {
/// Creates new `PubSubClient`. /// Creates new `PubSubClient`.
pub fn new(rpc: MetaIoHandler<Metadata, S>, remote: Remote) -> Self { pub fn new(rpc: MetaIoHandler<Metadata, S>, executor: Executor) -> Self {
let poll_manager = Arc::new(RwLock::new(GenericPollManager::new(rpc))); let poll_manager = Arc::new(RwLock::new(GenericPollManager::new(rpc)));
let pm2 = poll_manager.clone(); let pm2 = poll_manager.clone();
@ -50,14 +50,14 @@ impl<S: core::Middleware<Metadata>> PubSubClient<S> {
// Start ticking // Start ticking
let interval = timer.interval(Duration::from_millis(1000)); let interval = timer.interval(Duration::from_millis(1000));
remote.spawn(interval executor.spawn(interval
.map_err(|e| warn!("Polling timer error: {:?}", e)) .map_err(|e| warn!("Polling timer error: {:?}", e))
.for_each(move |_| pm2.read().tick()) .for_each(move |_| pm2.read().tick())
); );
PubSubClient { PubSubClient {
poll_manager, poll_manager,
remote, executor,
} }
} }
} }
@ -65,8 +65,8 @@ impl<S: core::Middleware<Metadata>> PubSubClient<S> {
impl PubSubClient<core::NoopMiddleware> { impl PubSubClient<core::NoopMiddleware> {
/// Creates new `PubSubClient` with deterministic ids. /// Creates new `PubSubClient` with deterministic ids.
#[cfg(test)] #[cfg(test)]
pub fn new_test(rpc: MetaIoHandler<Metadata, core::NoopMiddleware>, remote: Remote) -> Self { pub fn new_test(rpc: MetaIoHandler<Metadata, core::NoopMiddleware>, executor: Executor) -> Self {
let client = Self::new(MetaIoHandler::with_middleware(Default::default()), remote); let client = Self::new(MetaIoHandler::with_middleware(Default::default()), executor);
*client.poll_manager.write() = GenericPollManager::new_test(rpc); *client.poll_manager.write() = GenericPollManager::new_test(rpc);
client client
} }
@ -84,7 +84,7 @@ impl<S: core::Middleware<Metadata>> PubSub for PubSubClient<S> {
let (id, receiver) = poll_manager.subscribe(meta, method, params); let (id, receiver) = poll_manager.subscribe(meta, method, params);
match subscriber.assign_id(id.clone()) { match subscriber.assign_id(id.clone()) {
Ok(sink) => { Ok(sink) => {
self.remote.spawn(receiver.forward(sink.sink_map_err(|e| { self.executor.spawn(receiver.forward(sink.sink_map_err(|e| {
warn!("Cannot send notification: {:?}", e); warn!("Cannot send notification: {:?}", e);
})).map(|_| ())); })).map(|_| ()));
}, },

View File

@ -20,7 +20,7 @@ use std::sync::Arc;
use ethcore::account_provider::AccountProvider; use ethcore::account_provider::AccountProvider;
use ethkey; use ethkey;
use parity_reactor::Remote; use parity_runtime::Executor;
use parking_lot::Mutex; use parking_lot::Mutex;
use rlp::Rlp; use rlp::Rlp;
use transaction::{SignedTransaction, PendingTransaction}; use transaction::{SignedTransaction, PendingTransaction};
@ -50,7 +50,7 @@ impl<D: Dispatcher + 'static> SignerClient<D> {
store: &Arc<AccountProvider>, store: &Arc<AccountProvider>,
dispatcher: D, dispatcher: D,
signer: &Arc<SignerService>, signer: &Arc<SignerService>,
remote: Remote, executor: Executor,
) -> Self { ) -> Self {
let subscribers = Arc::new(Mutex::new(Subscribers::default())); let subscribers = Arc::new(Mutex::new(Subscribers::default()));
let subs = Arc::downgrade(&subscribers); let subs = Arc::downgrade(&subscribers);
@ -60,7 +60,7 @@ impl<D: Dispatcher + 'static> SignerClient<D> {
let requests = s.requests().into_iter().map(Into::into).collect::<Vec<ConfirmationRequest>>(); let requests = s.requests().into_iter().map(Into::into).collect::<Vec<ConfirmationRequest>>();
for subscription in subs.lock().values() { for subscription in subs.lock().values() {
let subscription: &Sink<_> = subscription; let subscription: &Sink<_> = subscription;
remote.spawn(subscription executor.spawn(subscription
.notify(Ok(requests.clone())) .notify(Ok(requests.clone()))
.map(|_| ()) .map(|_| ())
.map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e)) .map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e))

View File

@ -44,7 +44,7 @@ use v1::types::{
Origin, Origin,
}; };
use parity_reactor::Remote; use parity_runtime::Executor;
/// After 60s entries that are not queried with `check_request` will get garbage collected. /// After 60s entries that are not queried with `check_request` will get garbage collected.
const MAX_PENDING_DURATION_SEC: u32 = 60; const MAX_PENDING_DURATION_SEC: u32 = 60;
@ -67,7 +67,7 @@ impl Future for DispatchResult {
} }
} }
fn schedule(remote: Remote, fn schedule(executor: Executor,
confirmations: Arc<Mutex<TransientHashMap<U256, Option<RpcConfirmationResult>>>>, confirmations: Arc<Mutex<TransientHashMap<U256, Option<RpcConfirmationResult>>>>,
id: U256, id: U256,
future: RpcConfirmationReceiver) { future: RpcConfirmationReceiver) {
@ -83,7 +83,7 @@ fn schedule(remote: Remote,
confirmations.insert(id, Some(result)); confirmations.insert(id, Some(result));
Ok(()) Ok(())
}); });
remote.spawn(future); executor.spawn(future);
} }
/// Implementation of functions that require signing when no trusted signer is used. /// Implementation of functions that require signing when no trusted signer is used.
@ -91,19 +91,19 @@ pub struct SigningQueueClient<D> {
signer: Arc<SignerService>, signer: Arc<SignerService>,
accounts: Arc<AccountProvider>, accounts: Arc<AccountProvider>,
dispatcher: D, dispatcher: D,
remote: Remote, executor: Executor,
// None here means that the request hasn't yet been confirmed // None here means that the request hasn't yet been confirmed
confirmations: Arc<Mutex<TransientHashMap<U256, Option<RpcConfirmationResult>>>>, confirmations: Arc<Mutex<TransientHashMap<U256, Option<RpcConfirmationResult>>>>,
} }
impl<D: Dispatcher + 'static> SigningQueueClient<D> { impl<D: Dispatcher + 'static> SigningQueueClient<D> {
/// Creates a new signing queue client given shared signing queue. /// Creates a new signing queue client given shared signing queue.
pub fn new(signer: &Arc<SignerService>, dispatcher: D, remote: Remote, accounts: &Arc<AccountProvider>) -> Self { pub fn new(signer: &Arc<SignerService>, dispatcher: D, executor: Executor, accounts: &Arc<AccountProvider>) -> Self {
SigningQueueClient { SigningQueueClient {
signer: signer.clone(), signer: signer.clone(),
accounts: accounts.clone(), accounts: accounts.clone(),
dispatcher, dispatcher,
remote, executor,
confirmations: Arc::new(Mutex::new(TransientHashMap::new(MAX_PENDING_DURATION_SEC))), confirmations: Arc::new(Mutex::new(TransientHashMap::new(MAX_PENDING_DURATION_SEC))),
} }
} }
@ -143,7 +143,7 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningQueueClient<D> {
} }
fn post_sign(&self, meta: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>> { fn post_sign(&self, meta: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>> {
let remote = self.remote.clone(); let executor = self.executor.clone();
let confirmations = self.confirmations.clone(); let confirmations = self.confirmations.clone();
Box::new(self.dispatch( Box::new(self.dispatch(
@ -153,21 +153,21 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningQueueClient<D> {
).map(move |result| match result { ).map(move |result| match result {
DispatchResult::Value(v) => RpcEither::Or(v), DispatchResult::Value(v) => RpcEither::Or(v),
DispatchResult::Future(id, future) => { DispatchResult::Future(id, future) => {
schedule(remote, confirmations, id, future); schedule(executor, confirmations, id, future);
RpcEither::Either(id.into()) RpcEither::Either(id.into())
}, },
})) }))
} }
fn post_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>> { fn post_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>> {
let remote = self.remote.clone(); let executor = self.executor.clone();
let confirmations = self.confirmations.clone(); let confirmations = self.confirmations.clone();
Box::new(self.dispatch(RpcConfirmationPayload::SendTransaction(request), DefaultAccount::Provided(self.accounts.default_account().ok().unwrap_or_default()), meta.origin) Box::new(self.dispatch(RpcConfirmationPayload::SendTransaction(request), DefaultAccount::Provided(self.accounts.default_account().ok().unwrap_or_default()), meta.origin)
.map(|result| match result { .map(|result| match result {
DispatchResult::Value(v) => RpcEither::Or(v), DispatchResult::Value(v) => RpcEither::Or(v),
DispatchResult::Future(id, future) => { DispatchResult::Future(id, future) => {
schedule(remote, confirmations, id, future); schedule(executor, confirmations, id, future);
RpcEither::Either(id.into()) RpcEither::Either(id.into())
}, },
})) }))

View File

@ -20,12 +20,13 @@ use std::fmt;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::{self, AtomicUsize}; use std::sync::atomic::{self, AtomicUsize};
use std::time; use std::time;
use futures_cpupool as pool; use parity_runtime;
use jsonrpc_core as rpc; use jsonrpc_core as core;
use jsonrpc_core::futures::future::Either;
use order_stat; use order_stat;
use parking_lot::RwLock; use parking_lot::RwLock;
pub use self::pool::CpuPool; pub use self::parity_runtime::Executor;
const RATE_SECONDS: usize = 10; const RATE_SECONDS: usize = 10;
const STATS_SAMPLES: usize = 60; const STATS_SAMPLES: usize = 60;
@ -186,16 +187,14 @@ pub trait ActivityNotifier: Send + Sync + 'static {
pub struct Middleware<T: ActivityNotifier = ClientNotifier> { pub struct Middleware<T: ActivityNotifier = ClientNotifier> {
stats: Arc<RpcStats>, stats: Arc<RpcStats>,
notifier: T, notifier: T,
pool: Option<CpuPool>,
} }
impl<T: ActivityNotifier> Middleware<T> { impl<T: ActivityNotifier> Middleware<T> {
/// Create new Middleware with stats counter and activity notifier. /// Create new Middleware with stats counter and activity notifier.
pub fn new(stats: Arc<RpcStats>, notifier: T, pool: Option<CpuPool>) -> Self { pub fn new(stats: Arc<RpcStats>, notifier: T) -> Self {
Middleware { Middleware {
stats, stats,
notifier, notifier,
pool,
} }
} }
@ -204,28 +203,24 @@ impl<T: ActivityNotifier> Middleware<T> {
} }
} }
impl<M: rpc::Metadata, T: ActivityNotifier> rpc::Middleware<M> for Middleware<T> { impl<M: core::Metadata, T: ActivityNotifier> core::Middleware<M> for Middleware<T> {
type Future = rpc::futures::future::Either< type Future = core::FutureResponse;
pool::CpuFuture<Option<rpc::Response>, ()>,
rpc::FutureResponse,
>;
fn on_request<F, X>(&self, request: rpc::Request, meta: M, process: F) -> Self::Future where fn on_request<F, X>(&self, request: core::Request, meta: M, process: F) -> Either<Self::Future, X> where
F: FnOnce(rpc::Request, M) -> X, F: FnOnce(core::Request, M) -> X,
X: rpc::futures::Future<Item=Option<rpc::Response>, Error=()> + Send + 'static, X: core::futures::Future<Item=Option<core::Response>, Error=()> + Send + 'static,
{ {
use self::rpc::futures::future::Either::{A, B};
let start = time::Instant::now(); let start = time::Instant::now();
self.notifier.active(); self.notifier.active();
self.stats.count_request(); self.stats.count_request();
let id = match request { let id = match request {
rpc::Request::Single(rpc::Call::MethodCall(ref call)) => Some(call.id.clone()), core::Request::Single(core::Call::MethodCall(ref call)) => Some(call.id.clone()),
_ => None, _ => None,
}; };
let stats = self.stats.clone(); let stats = self.stats.clone();
let future = process(request, meta).map(move |res| { let future = process(request, meta).map(move |res| {
let time = Self::as_micro(start.elapsed()); let time = Self::as_micro(start.elapsed());
if time > 10_000 { if time > 10_000 {
@ -235,10 +230,7 @@ impl<M: rpc::Metadata, T: ActivityNotifier> rpc::Middleware<M> for Middleware<T>
res res
}); });
match self.pool { Either::A(Box::new(future))
Some(ref pool) => A(pool.spawn(future)),
None => B(Box::new(future)),
}
} }
} }

View File

@ -32,6 +32,7 @@ use ethjson::spec::ForkSpec;
use io::IoChannel; use io::IoChannel;
use miner::external::ExternalMiner; use miner::external::ExternalMiner;
use parking_lot::Mutex; use parking_lot::Mutex;
use parity_runtime::Runtime;
use jsonrpc_core::IoHandler; use jsonrpc_core::IoHandler;
use v1::helpers::dispatch::FullDispatcher; use v1::helpers::dispatch::FullDispatcher;
@ -73,6 +74,7 @@ fn make_spec(chain: &BlockChain) -> Spec {
} }
struct EthTester { struct EthTester {
_runtime: Runtime,
client: Arc<Client>, client: Arc<Client>,
_miner: Arc<Miner>, _miner: Arc<Miner>,
_snapshot: Arc<TestSnapshotService>, _snapshot: Arc<TestSnapshotService>,
@ -99,6 +101,7 @@ impl EthTester {
} }
fn from_spec(spec: Spec) -> Self { fn from_spec(spec: Spec) -> Self {
let runtime = Runtime::with_thread_count(1);
let account_provider = account_provider(); let account_provider = account_provider();
let opt_account_provider = account_provider.clone(); let opt_account_provider = account_provider.clone();
let miner_service = miner_service(&spec, account_provider.clone()); let miner_service = miner_service(&spec, account_provider.clone());
@ -124,7 +127,7 @@ impl EthTester {
Default::default(), Default::default(),
); );
let reservations = Arc::new(Mutex::new(nonce::Reservations::new())); let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor())));
let dispatcher = FullDispatcher::new(client.clone(), miner_service.clone(), reservations, 50); let dispatcher = FullDispatcher::new(client.clone(), miner_service.clone(), reservations, 50);
let eth_sign = SigningUnsafeClient::new( let eth_sign = SigningUnsafeClient::new(
@ -137,6 +140,7 @@ impl EthTester {
handler.extend_with(eth_sign.to_delegate()); handler.extend_with(eth_sign.to_delegate());
EthTester { EthTester {
_runtime: runtime,
_miner: miner_service, _miner: miner_service,
_snapshot: snapshot_service, _snapshot: snapshot_service,
client: client, client: client,

View File

@ -32,6 +32,7 @@ use miner::external::ExternalMiner;
use rlp; use rlp;
use rustc_hex::{FromHex, ToHex}; use rustc_hex::{FromHex, ToHex};
use transaction::{Transaction, Action}; use transaction::{Transaction, Action};
use parity_runtime::Runtime;
use jsonrpc_core::IoHandler; use jsonrpc_core::IoHandler;
use v1::{Eth, EthClient, EthClientOptions, EthFilter, EthFilterClient, EthSigning, SigningUnsafeClient}; use v1::{Eth, EthClient, EthClientOptions, EthFilter, EthFilterClient, EthSigning, SigningUnsafeClient};
@ -65,6 +66,7 @@ fn snapshot_service() -> Arc<TestSnapshotService> {
} }
struct EthTester { struct EthTester {
pub runtime: Runtime,
pub client: Arc<TestBlockChainClient>, pub client: Arc<TestBlockChainClient>,
pub sync: Arc<TestSyncProvider>, pub sync: Arc<TestSyncProvider>,
pub accounts_provider: Arc<AccountProvider>, pub accounts_provider: Arc<AccountProvider>,
@ -82,6 +84,7 @@ impl Default for EthTester {
impl EthTester { impl EthTester {
pub fn new_with_options(options: EthClientOptions) -> Self { pub fn new_with_options(options: EthClientOptions) -> Self {
let runtime = Runtime::with_thread_count(1);
let client = blockchain_client(); let client = blockchain_client();
let sync = sync_provider(); let sync = sync_provider();
let ap = accounts_provider(); let ap = accounts_provider();
@ -94,7 +97,7 @@ impl EthTester {
let poll_lifetime = options.poll_lifetime; let poll_lifetime = options.poll_lifetime;
let eth = EthClient::new(&client, &snapshot, &sync, &opt_ap, &miner, &external_miner, options).to_delegate(); let eth = EthClient::new(&client, &snapshot, &sync, &opt_ap, &miner, &external_miner, options).to_delegate();
let filter = EthFilterClient::new(client.clone(), miner.clone(), poll_lifetime).to_delegate(); let filter = EthFilterClient::new(client.clone(), miner.clone(), poll_lifetime).to_delegate();
let reservations = Arc::new(Mutex::new(nonce::Reservations::new())); let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor())));
let dispatcher = FullDispatcher::new(client.clone(), miner.clone(), reservations, gas_price_percentile); let dispatcher = FullDispatcher::new(client.clone(), miner.clone(), reservations, gas_price_percentile);
let sign = SigningUnsafeClient::new(&opt_ap, dispatcher).to_delegate(); let sign = SigningUnsafeClient::new(&opt_ap, dispatcher).to_delegate();
@ -104,6 +107,7 @@ impl EthTester {
io.extend_with(filter); io.extend_with(filter);
EthTester { EthTester {
runtime,
client: client, client: client,
sync: sync, sync: sync,
accounts_provider: ap, accounts_provider: ap,

View File

@ -25,14 +25,14 @@ use std::time::Duration;
use v1::{EthPubSub, EthPubSubClient, Metadata}; use v1::{EthPubSub, EthPubSubClient, Metadata};
use ethcore::client::{TestBlockChainClient, EachBlockWith, ChainNotify, ChainRoute, ChainRouteType}; use ethcore::client::{TestBlockChainClient, EachBlockWith, ChainNotify, ChainRoute, ChainRouteType};
use parity_reactor::EventLoop; use parity_runtime::Runtime;
const DURATION_ZERO: Duration = Duration::from_millis(0); const DURATION_ZERO: Duration = Duration::from_millis(0);
#[test] #[test]
fn should_subscribe_to_new_heads() { fn should_subscribe_to_new_heads() {
// given // given
let el = EventLoop::spawn(); let el = Runtime::with_thread_count(1);
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
// Insert some blocks // Insert some blocks
client.add_blocks(3, EachBlockWith::Nothing); client.add_blocks(3, EachBlockWith::Nothing);
@ -40,7 +40,7 @@ fn should_subscribe_to_new_heads() {
let h2 = client.block_hash_delta_minus(2); let h2 = client.block_hash_delta_minus(2);
let h1 = client.block_hash_delta_minus(3); let h1 = client.block_hash_delta_minus(3);
let pubsub = EthPubSubClient::new_test(Arc::new(client), el.remote()); let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor());
let handler = pubsub.handler().upgrade().unwrap(); let handler = pubsub.handler().upgrade().unwrap();
let pubsub = pubsub.to_delegate(); let pubsub = pubsub.to_delegate();
@ -89,7 +89,7 @@ fn should_subscribe_to_logs() {
use ethcore::client::BlockInfo; use ethcore::client::BlockInfo;
// given // given
let el = EventLoop::spawn(); let el = Runtime::with_thread_count(1);
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
// Insert some blocks // Insert some blocks
client.add_blocks(1, EachBlockWith::Transaction); client.add_blocks(1, EachBlockWith::Transaction);
@ -112,7 +112,7 @@ fn should_subscribe_to_logs() {
} }
]); ]);
let pubsub = EthPubSubClient::new_test(Arc::new(client), el.remote()); let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor());
let handler = pubsub.handler().upgrade().unwrap(); let handler = pubsub.handler().upgrade().unwrap();
let pubsub = pubsub.to_delegate(); let pubsub = pubsub.to_delegate();
@ -156,10 +156,10 @@ fn should_subscribe_to_logs() {
#[test] #[test]
fn should_subscribe_to_pending_transactions() { fn should_subscribe_to_pending_transactions() {
// given // given
let el = EventLoop::spawn(); let el = Runtime::with_thread_count(1);
let client = TestBlockChainClient::new(); let client = TestBlockChainClient::new();
let pubsub = EthPubSubClient::new_test(Arc::new(client), el.remote()); let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor());
let handler = pubsub.handler().upgrade().unwrap(); let handler = pubsub.handler().upgrade().unwrap();
let pubsub = pubsub.to_delegate(); let pubsub = pubsub.to_delegate();
@ -203,9 +203,9 @@ fn should_subscribe_to_pending_transactions() {
#[test] #[test]
fn should_return_unimplemented() { fn should_return_unimplemented() {
// given // given
let el = EventLoop::spawn(); let el = Runtime::with_thread_count(1);
let client = TestBlockChainClient::new(); let client = TestBlockChainClient::new();
let pubsub = EthPubSubClient::new_test(Arc::new(client), el.remote()); let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor());
let pubsub = pubsub.to_delegate(); let pubsub = pubsub.to_delegate();
let mut io = MetaIoHandler::default(); let mut io = MetaIoHandler::default();

View File

@ -22,7 +22,6 @@ use ethereum_types::{U256, Address};
use ethcore::miner::MinerService; use ethcore::miner::MinerService;
use ethcore::client::TestBlockChainClient; use ethcore::client::TestBlockChainClient;
use sync::ManageNetwork; use sync::ManageNetwork;
use futures_cpupool::CpuPool;
use jsonrpc_core::IoHandler; use jsonrpc_core::IoHandler;
use v1::{ParitySet, ParitySetClient}; use v1::{ParitySet, ParitySetClient};
@ -55,8 +54,7 @@ fn parity_set_client(
updater: &Arc<TestUpdater>, updater: &Arc<TestUpdater>,
net: &Arc<TestManageNetwork>, net: &Arc<TestManageNetwork>,
) -> TestParitySetClient { ) -> TestParitySetClient {
let pool = CpuPool::new(1); ParitySetClient::new(client, miner, updater, &(net.clone() as Arc<ManageNetwork>), FakeFetch::new(Some(1)))
ParitySetClient::new(client, miner, updater, &(net.clone() as Arc<ManageNetwork>), FakeFetch::new(Some(1)), pool)
} }
#[test] #[test]

View File

@ -24,6 +24,7 @@ use ethcore::client::TestBlockChainClient;
use jsonrpc_core::IoHandler; use jsonrpc_core::IoHandler;
use parking_lot::Mutex; use parking_lot::Mutex;
use transaction::{Action, Transaction}; use transaction::{Action, Transaction};
use parity_runtime::Runtime;
use v1::{PersonalClient, Personal, Metadata}; use v1::{PersonalClient, Personal, Metadata};
use v1::helpers::nonce; use v1::helpers::nonce;
@ -32,6 +33,7 @@ use v1::tests::helpers::TestMinerService;
use v1::types::H520; use v1::types::H520;
struct PersonalTester { struct PersonalTester {
_runtime: Runtime,
accounts: Arc<AccountProvider>, accounts: Arc<AccountProvider>,
io: IoHandler<Metadata>, io: IoHandler<Metadata>,
miner: Arc<TestMinerService>, miner: Arc<TestMinerService>,
@ -51,10 +53,11 @@ fn miner_service() -> Arc<TestMinerService> {
} }
fn setup() -> PersonalTester { fn setup() -> PersonalTester {
let runtime = Runtime::with_thread_count(1);
let accounts = accounts_provider(); let accounts = accounts_provider();
let client = blockchain_client(); let client = blockchain_client();
let miner = miner_service(); let miner = miner_service();
let reservations = Arc::new(Mutex::new(nonce::Reservations::new())); let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor())));
let dispatcher = FullDispatcher::new(client, miner.clone(), reservations, 50); let dispatcher = FullDispatcher::new(client, miner.clone(), reservations, 50);
let personal = PersonalClient::new(&accounts, dispatcher, false); let personal = PersonalClient::new(&accounts, dispatcher, false);
@ -63,6 +66,7 @@ fn setup() -> PersonalTester {
io.extend_with(personal.to_delegate()); io.extend_with(personal.to_delegate());
let tester = PersonalTester { let tester = PersonalTester {
_runtime: runtime,
accounts: accounts, accounts: accounts,
io: io, io: io,
miner: miner, miner: miner,

View File

@ -20,7 +20,7 @@ use jsonrpc_core::{self as core, MetaIoHandler};
use jsonrpc_core::futures::{self, Stream, Future}; use jsonrpc_core::futures::{self, Stream, Future};
use jsonrpc_pubsub::Session; use jsonrpc_pubsub::Session;
use parity_reactor::EventLoop; use parity_runtime::Runtime;
use v1::{PubSub, PubSubClient, Metadata}; use v1::{PubSub, PubSubClient, Metadata};
fn rpc() -> MetaIoHandler<Metadata, core::NoopMiddleware> { fn rpc() -> MetaIoHandler<Metadata, core::NoopMiddleware> {
@ -40,9 +40,9 @@ fn rpc() -> MetaIoHandler<Metadata, core::NoopMiddleware> {
#[test] #[test]
fn should_subscribe_to_a_method() { fn should_subscribe_to_a_method() {
// given // given
let el = EventLoop::spawn(); let el = Runtime::with_thread_count(1);
let rpc = rpc(); let rpc = rpc();
let pubsub = PubSubClient::new_test(rpc, el.remote()).to_delegate(); let pubsub = PubSubClient::new_test(rpc, el.executor()).to_delegate();
let mut io = MetaIoHandler::default(); let mut io = MetaIoHandler::default();
io.extend_with(pubsub); io.extend_with(pubsub);

View File

@ -21,7 +21,7 @@ use bytes::ToPretty;
use ethcore::account_provider::AccountProvider; use ethcore::account_provider::AccountProvider;
use ethcore::client::TestBlockChainClient; use ethcore::client::TestBlockChainClient;
use parity_reactor::EventLoop; use parity_runtime::Runtime;
use parking_lot::Mutex; use parking_lot::Mutex;
use rlp::encode; use rlp::encode;
use transaction::{Transaction, Action, SignedTransaction}; use transaction::{Transaction, Action, SignedTransaction};
@ -36,6 +36,7 @@ use v1::helpers::{nonce, SigningQueue, SignerService, FilledTransactionRequest,
use v1::helpers::dispatch::{FullDispatcher, eth_data_hash}; use v1::helpers::dispatch::{FullDispatcher, eth_data_hash};
struct SignerTester { struct SignerTester {
_runtime: Runtime,
signer: Arc<SignerService>, signer: Arc<SignerService>,
accounts: Arc<AccountProvider>, accounts: Arc<AccountProvider>,
io: IoHandler<Metadata>, io: IoHandler<Metadata>,
@ -56,18 +57,19 @@ fn miner_service() -> Arc<TestMinerService> {
} }
fn signer_tester() -> SignerTester { fn signer_tester() -> SignerTester {
let runtime = Runtime::with_thread_count(1);
let signer = Arc::new(SignerService::new_test(false)); let signer = Arc::new(SignerService::new_test(false));
let accounts = accounts_provider(); let accounts = accounts_provider();
let client = blockchain_client(); let client = blockchain_client();
let miner = miner_service(); let miner = miner_service();
let reservations = Arc::new(Mutex::new(nonce::Reservations::new())); let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor())));
let event_loop = EventLoop::spawn();
let dispatcher = FullDispatcher::new(client, miner.clone(), reservations, 50); let dispatcher = FullDispatcher::new(client, miner.clone(), reservations, 50);
let mut io = IoHandler::default(); let mut io = IoHandler::default();
io.extend_with(SignerClient::new(&accounts, dispatcher, &signer, event_loop.remote()).to_delegate()); io.extend_with(SignerClient::new(&accounts, dispatcher, &signer, runtime.executor()).to_delegate());
SignerTester { SignerTester {
_runtime: runtime,
signer: signer, signer: signer,
accounts: accounts, accounts: accounts,
io: io, io: io,

View File

@ -39,10 +39,10 @@ use ethstore::ethkey::{Generator, Random};
use parking_lot::Mutex; use parking_lot::Mutex;
use serde_json; use serde_json;
use transaction::{Transaction, Action, SignedTransaction}; use transaction::{Transaction, Action, SignedTransaction};
use parity_runtime::{Runtime, Executor};
use parity_reactor::Remote;
struct SigningTester { struct SigningTester {
pub runtime: Runtime,
pub signer: Arc<SignerService>, pub signer: Arc<SignerService>,
pub client: Arc<TestBlockChainClient>, pub client: Arc<TestBlockChainClient>,
pub miner: Arc<TestMinerService>, pub miner: Arc<TestMinerService>,
@ -52,23 +52,25 @@ struct SigningTester {
impl Default for SigningTester { impl Default for SigningTester {
fn default() -> Self { fn default() -> Self {
let runtime = Runtime::with_thread_count(1);
let signer = Arc::new(SignerService::new_test(false)); let signer = Arc::new(SignerService::new_test(false));
let client = Arc::new(TestBlockChainClient::default()); let client = Arc::new(TestBlockChainClient::default());
let miner = Arc::new(TestMinerService::default()); let miner = Arc::new(TestMinerService::default());
let accounts = Arc::new(AccountProvider::transient_provider()); let accounts = Arc::new(AccountProvider::transient_provider());
let reservations = Arc::new(Mutex::new(nonce::Reservations::new())); let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor())));
let mut io = IoHandler::default(); let mut io = IoHandler::default();
let dispatcher = FullDispatcher::new(client.clone(), miner.clone(), reservations, 50); let dispatcher = FullDispatcher::new(client.clone(), miner.clone(), reservations, 50);
let remote = Remote::new_thread_per_future(); let executor = Executor::new_thread_per_future();
let rpc = SigningQueueClient::new(&signer, dispatcher.clone(), remote.clone(), &accounts); let rpc = SigningQueueClient::new(&signer, dispatcher.clone(), executor.clone(), &accounts);
io.extend_with(EthSigning::to_delegate(rpc)); io.extend_with(EthSigning::to_delegate(rpc));
let rpc = SigningQueueClient::new(&signer, dispatcher, remote, &accounts); let rpc = SigningQueueClient::new(&signer, dispatcher, executor, &accounts);
io.extend_with(ParitySigning::to_delegate(rpc)); io.extend_with(ParitySigning::to_delegate(rpc));
SigningTester { SigningTester {
runtime,
signer: signer, signer: signer,
client: client, client: client,
miner: miner, miner: miner,

View File

@ -14,7 +14,7 @@ serde_json = "1.0"
url = "1.2.0" url = "1.2.0"
matches = "0.1" matches = "0.1"
parking_lot = "0.6" parking_lot = "0.6"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
parity-rpc = { path = "../rpc" } parity-rpc = { path = "../rpc" }
keccak-hash = "0.1" keccak-hash = "0.1"

View File

@ -274,7 +274,7 @@ impl Rpc {
let request = MethodCall { let request = MethodCall {
jsonrpc: Some(Version::V2), jsonrpc: Some(Version::V2),
method: method.to_owned(), method: method.to_owned(),
params: Some(Params::Array(params)), params: Params::Array(params),
id: Id::Num(id as u64), id: Id::Num(id as u64),
}; };

View File

@ -9,23 +9,20 @@ authors = ["Parity Technologies <admin@parity.io>"]
byteorder = "1.0" byteorder = "1.0"
log = "0.4" log = "0.4"
parking_lot = "0.6" parking_lot = "0.6"
hyper = { version = "0.11", default-features = false } hyper = { version = "0.12", default-features = false }
serde = "1.0" serde = "1.0"
serde_json = "1.0" serde_json = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
futures = "0.1" futures = "0.1"
futures-cpupool = "0.1"
rustc-hex = "1.0" rustc-hex = "1.0"
tiny-keccak = "1.4" tiny-keccak = "1.4"
tokio = "0.1" tokio = "~0.1.11"
tokio-core = "0.1"
tokio-io = "0.1" tokio-io = "0.1"
tokio-service = "0.1" tokio-service = "0.1"
tokio-proto = "0.1"
url = "1.0" url = "1.0"
ethcore = { path = "../ethcore" } ethcore = { path = "../ethcore" }
parity-bytes = "0.1" parity-bytes = "0.1"
parity-crypto = "0.1" parity-crypto = "0.2"
ethcore-logger = { path = "../logger" } ethcore-logger = { path = "../logger" }
ethcore-sync = { path = "../ethcore/sync" } ethcore-sync = { path = "../ethcore/sync" }
ethcore-transaction = { path = "../ethcore/transaction" } ethcore-transaction = { path = "../ethcore/transaction" }

View File

@ -20,7 +20,7 @@ use std::sync::Arc;
use std::sync::mpsc; use std::sync::mpsc;
use futures::{self, Future}; use futures::{self, Future};
use parking_lot::Mutex; use parking_lot::Mutex;
use tokio_core::reactor::Core; use tokio::runtime;
use crypto::DEFAULT_MAC; use crypto::DEFAULT_MAC;
use ethkey::crypto; use ethkey::crypto;
use super::acl_storage::AclStorage; use super::acl_storage::AclStorage;
@ -191,7 +191,11 @@ impl KeyServerCore {
let (stop, stopped) = futures::oneshot(); let (stop, stopped) = futures::oneshot();
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
let handle = thread::Builder::new().name("KeyServerLoop".into()).spawn(move || { let handle = thread::Builder::new().name("KeyServerLoop".into()).spawn(move || {
let mut el = match Core::new() { let runtime_res = runtime::Builder::new()
.core_threads(config.threads)
.build();
let mut el = match runtime_res {
Ok(el) => el, Ok(el) => el,
Err(e) => { Err(e) => {
tx.send(Err(Error::Internal(format!("error initializing event loop: {}", e)))).expect("Rx is blocking upper thread."); tx.send(Err(Error::Internal(format!("error initializing event loop: {}", e)))).expect("Rx is blocking upper thread.");
@ -199,10 +203,10 @@ impl KeyServerCore {
}, },
}; };
let cluster = ClusterCore::new(el.handle(), config); let cluster = ClusterCore::new(el.executor(), config);
let cluster_client = cluster.and_then(|c| c.run().map(|_| c.client())); let cluster_client = cluster.and_then(|c| c.run().map(|_| c.client()));
tx.send(cluster_client.map_err(Into::into)).expect("Rx is blocking upper thread."); tx.send(cluster_client.map_err(Into::into)).expect("Rx is blocking upper thread.");
let _ = el.run(futures::empty().select(stopped)); let _ = el.block_on(futures::empty().select(stopped));
trace!(target: "secretstore_net", "{}: KeyServerLoop thread stopped", self_key_pair.public()); trace!(target: "secretstore_net", "{}: KeyServerLoop thread stopped", self_key_pair.public());
}).map_err(|e| Error::Internal(format!("{}", e)))?; }).map_err(|e| Error::Internal(format!("{}", e)))?;

View File

@ -701,7 +701,7 @@ impl SessionImpl {
let threshold = data.threshold.expect("threshold is filled in initialization phase; KV phase follows initialization phase; qed"); let threshold = data.threshold.expect("threshold is filled in initialization phase; KV phase follows initialization phase; qed");
let is_zero = data.is_zero.expect("is_zero is filled in initialization phase; KV phase follows initialization phase; qed"); let is_zero = data.is_zero.expect("is_zero is filled in initialization phase; KV phase follows initialization phase; qed");
let self_public_share = { let self_public_share = {
if !is_zero { if !is_zero {
let derived_point = data.derived_point.clone().expect("derived point generated on initialization phase; KV phase follows initialization phase; qed"); let derived_point = data.derived_point.clone().expect("derived point generated on initialization phase; KV phase follows initialization phase; qed");
let number_id = data.nodes[self.node()].id_number.clone(); let number_id = data.nodes[self.node()].id_number.clone();
for (_ , node_data) in data.nodes.iter_mut().filter(|&(node_id, _)| node_id != self.node()) { for (_ , node_data) in data.nodes.iter_mut().filter(|&(node_id, _)| node_id != self.node()) {
@ -942,12 +942,12 @@ pub mod tests {
use std::sync::Arc; use std::sync::Arc;
use std::collections::{BTreeSet, BTreeMap, VecDeque}; use std::collections::{BTreeSet, BTreeMap, VecDeque};
use std::time::Duration; use std::time::Duration;
use tokio_core::reactor::Core;
use ethereum_types::Address; use ethereum_types::Address;
use ethkey::{Random, Generator, KeyPair}; use ethkey::{Random, Generator, KeyPair};
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage}; use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage};
use key_server_cluster::message::{self, Message, GenerationMessage}; use key_server_cluster::message::{self, Message, GenerationMessage};
use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established}; use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until,
all_connections_established, new_runtime};
use key_server_cluster::cluster_sessions::ClusterSession; use key_server_cluster::cluster_sessions::ClusterSession;
use key_server_cluster::generation_session::{SessionImpl, SessionState, SessionParams}; use key_server_cluster::generation_session::{SessionImpl, SessionState, SessionParams};
use key_server_cluster::math; use key_server_cluster::math;
@ -1357,19 +1357,22 @@ pub mod tests {
let test_cases = [(1, 3)]; let test_cases = [(1, 3)];
for &(threshold, num_nodes) in &test_cases { for &(threshold, num_nodes) in &test_cases {
let mut core = Core::new().unwrap(); let mut core = new_runtime();
// prepare cluster objects for each node // prepare cluster objects for each node
let clusters = make_clusters(&core, 6031, num_nodes); let clusters = make_clusters(&core, 6031, num_nodes);
run_clusters(&clusters); run_clusters(&clusters);
// `clusters` contains `Arc<ClusterCore>` and clones will refer to the same cores.
let clusters_clone = clusters.clone();
// establish connections // establish connections
loop_until(&mut core, CONN_TIMEOUT, || clusters.iter().all(all_connections_established)); loop_until(&mut core, CONN_TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
// run session to completion // run session to completion
let session_id = SessionId::default(); let session_id = SessionId::default();
let session = clusters[0].client().new_generation_session(session_id, Default::default(), Default::default(), threshold).unwrap(); let session = clusters[0].client().new_generation_session(session_id, Default::default(), Default::default(), threshold).unwrap();
loop_until(&mut core, SESSION_TIMEOUT, || session.joint_public_and_secret().is_some()); loop_until(&mut core, SESSION_TIMEOUT, move || session.joint_public_and_secret().is_some());
} }
} }

View File

@ -17,15 +17,16 @@
use std::io; use std::io;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::collections::btree_map::Entry; use std::collections::btree_map::Entry;
use std::net::{SocketAddr, IpAddr}; use std::net::{SocketAddr, IpAddr};
use futures::{finished, failed, Future, Stream}; use futures::{future, Future, Stream};
use futures_cpupool::CpuPool; use parking_lot::{Mutex, RwLock};
use parking_lot::{RwLock, Mutex};
use tokio_io::IoFuture; use tokio_io::IoFuture;
use tokio_core::reactor::{Handle, Remote, Interval}; use tokio::runtime::TaskExecutor;
use tokio_core::net::{TcpListener, TcpStream}; use tokio::timer::{Interval, timeout::Error as TimeoutError};
use tokio::net::{TcpListener, TcpStream};
use ethkey::{Public, KeyPair, Signature, Random, Generator}; use ethkey::{Public, KeyPair, Signature, Random, Generator};
use ethereum_types::{Address, H256}; use ethereum_types::{Address, H256};
use key_server_cluster::{Error, NodeId, SessionId, Requester, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair}; use key_server_cluster::{Error, NodeId, SessionId, Requester, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair};
@ -136,8 +137,9 @@ pub struct ClusterConfiguration {
pub acl_storage: Arc<AclStorage>, pub acl_storage: Arc<AclStorage>,
/// Administrator public key. /// Administrator public key.
pub admin_public: Option<Public>, pub admin_public: Option<Public>,
/// Should key servers set change session should be started when servers set changes. /// Should key servers set change session when servers set changes? This
/// This will only work when servers set is configured using KeyServerSet contract. /// will only work when servers set is configured using KeyServerSet
/// contract.
pub auto_migrate_enabled: bool, pub auto_migrate_enabled: bool,
} }
@ -149,8 +151,6 @@ pub struct ClusterState {
/// Network cluster implementation. /// Network cluster implementation.
pub struct ClusterCore { pub struct ClusterCore {
/// Handle to the event loop.
handle: Handle,
/// Listen address. /// Listen address.
listen_address: SocketAddr, listen_address: SocketAddr,
/// Cluster data. /// Cluster data.
@ -165,7 +165,7 @@ pub struct ClusterClientImpl {
/// Network cluster view. It is a communication channel, required in single session. /// Network cluster view. It is a communication channel, required in single session.
pub struct ClusterView { pub struct ClusterView {
core: Arc<Mutex<ClusterViewCore>>, core: Arc<RwLock<ClusterViewCore>>,
configured_nodes_count: usize, configured_nodes_count: usize,
connected_nodes_count: usize, connected_nodes_count: usize,
} }
@ -175,15 +175,15 @@ pub struct ClusterData {
/// Cluster configuration. /// Cluster configuration.
pub config: ClusterConfiguration, pub config: ClusterConfiguration,
/// Handle to the event loop. /// Handle to the event loop.
pub handle: Remote, pub executor: TaskExecutor,
/// Handle to the cpu thread pool.
pub pool: CpuPool,
/// KeyPair this node holds. /// KeyPair this node holds.
pub self_key_pair: Arc<NodeKeyPair>, pub self_key_pair: Arc<NodeKeyPair>,
/// Connections data. /// Connections data.
pub connections: ClusterConnections, pub connections: ClusterConnections,
/// Active sessions data. /// Active sessions data.
pub sessions: ClusterSessions, pub sessions: ClusterSessions,
/// Shutdown flag:
pub is_shutdown: Arc<AtomicBool>,
} }
/// Connections that are forming the cluster. Lock order: trigger.lock() -> data.lock(). /// Connections that are forming the cluster. Lock order: trigger.lock() -> data.lock().
@ -231,19 +231,18 @@ pub struct Connection {
/// Connection key. /// Connection key.
key: KeyPair, key: KeyPair,
/// Last message time. /// Last message time.
last_message_time: Mutex<Instant>, last_message_time: RwLock<Instant>,
} }
impl ClusterCore { impl ClusterCore {
pub fn new(handle: Handle, config: ClusterConfiguration) -> Result<Arc<Self>, Error> { pub fn new(executor: TaskExecutor, config: ClusterConfiguration) -> Result<Arc<Self>, Error> {
let listen_address = make_socket_address(&config.listen_address.0, config.listen_address.1)?; let listen_address = make_socket_address(&config.listen_address.0, config.listen_address.1)?;
let connections = ClusterConnections::new(&config)?; let connections = ClusterConnections::new(&config)?;
let servers_set_change_creator_connector = connections.connector.clone(); let servers_set_change_creator_connector = connections.connector.clone();
let sessions = ClusterSessions::new(&config, servers_set_change_creator_connector); let sessions = ClusterSessions::new(&config, servers_set_change_creator_connector);
let data = ClusterData::new(&handle, config, connections, sessions); let data = ClusterData::new(&executor, config, connections, sessions);
Ok(Arc::new(ClusterCore { Ok(Arc::new(ClusterCore {
handle: handle,
listen_address: listen_address, listen_address: listen_address,
data: data, data: data,
})) }))
@ -272,7 +271,7 @@ impl ClusterCore {
.and_then(|_| self.run_connections())?; .and_then(|_| self.run_connections())?;
// schedule maintain procedures // schedule maintain procedures
ClusterCore::schedule_maintain(&self.handle, self.data.clone()); ClusterCore::schedule_maintain(self.data.clone());
Ok(()) Ok(())
} }
@ -280,7 +279,7 @@ impl ClusterCore {
/// Start listening for incoming connections. /// Start listening for incoming connections.
pub fn run_listener(&self) -> Result<(), Error> { pub fn run_listener(&self) -> Result<(), Error> {
// start listeining for incoming connections // start listeining for incoming connections
self.handle.spawn(ClusterCore::listen(&self.handle, self.data.clone(), self.listen_address.clone())?); self.data.spawn(ClusterCore::listen(self.data.clone(), self.listen_address.clone())?);
Ok(()) Ok(())
} }
@ -293,53 +292,49 @@ impl ClusterCore {
/// Connect to peer. /// Connect to peer.
fn connect(data: Arc<ClusterData>, node_address: SocketAddr) { fn connect(data: Arc<ClusterData>, node_address: SocketAddr) {
data.handle.clone().spawn(move |handle| { data.clone().spawn(ClusterCore::connect_future(data, node_address));
data.pool.clone().spawn(ClusterCore::connect_future(handle, data, node_address))
})
} }
/// Connect to socket using given context and handle. /// Connect to socket using given context and executor.
fn connect_future(handle: &Handle, data: Arc<ClusterData>, node_address: SocketAddr) -> BoxedEmptyFuture { fn connect_future(data: Arc<ClusterData>, node_address: SocketAddr) -> BoxedEmptyFuture {
let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect(); let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect();
Box::new(net_connect(&node_address, handle, data.self_key_pair.clone(), disconnected_nodes) Box::new(net_connect(&node_address, data.self_key_pair.clone(), disconnected_nodes)
.then(move |result| ClusterCore::process_connection_result(data, Some(node_address), result)) .then(move |result| ClusterCore::process_connection_result(data, Some(node_address), result))
.then(|_| finished(()))) .then(|_| future::ok(())))
} }
/// Start listening for incoming connections. /// Start listening for incoming connections.
fn listen(handle: &Handle, data: Arc<ClusterData>, listen_address: SocketAddr) -> Result<BoxedEmptyFuture, Error> { fn listen(data: Arc<ClusterData>, listen_address: SocketAddr) -> Result<BoxedEmptyFuture, Error> {
Ok(Box::new(TcpListener::bind(&listen_address, &handle)? Ok(Box::new(TcpListener::bind(&listen_address)?
.incoming() .incoming()
.and_then(move |(stream, node_address)| { .and_then(move |stream| {
ClusterCore::accept_connection(data.clone(), stream, node_address); ClusterCore::accept_connection(data.clone(), stream);
Ok(()) Ok(())
}) })
.for_each(|_| Ok(())) .for_each(|_| Ok(()))
.then(|_| finished(())))) .then(|_| future::ok(()))))
} }
/// Accept connection. /// Accept connection.
fn accept_connection(data: Arc<ClusterData>, stream: TcpStream, node_address: SocketAddr) { fn accept_connection(data: Arc<ClusterData>, stream: TcpStream) {
data.handle.clone().spawn(move |handle| { data.clone().spawn(ClusterCore::accept_connection_future(data, stream))
data.pool.clone().spawn(ClusterCore::accept_connection_future(handle, data, stream, node_address))
})
} }
/// Accept connection future. /// Accept connection future.
fn accept_connection_future(handle: &Handle, data: Arc<ClusterData>, stream: TcpStream, node_address: SocketAddr) -> BoxedEmptyFuture { fn accept_connection_future(data: Arc<ClusterData>, stream: TcpStream) -> BoxedEmptyFuture {
Box::new(net_accept_connection(node_address, stream, handle, data.self_key_pair.clone()) Box::new(net_accept_connection(stream, data.self_key_pair.clone())
.then(move |result| ClusterCore::process_connection_result(data, None, result)) .then(move |result| ClusterCore::process_connection_result(data, None, result))
.then(|_| finished(()))) .then(|_| future::ok(())))
} }
/// Schedule mainatain procedures. /// Schedule mainatain procedures.
fn schedule_maintain(handle: &Handle, data: Arc<ClusterData>) { fn schedule_maintain(data: Arc<ClusterData>) {
let d = data.clone(); let d = data.clone();
let interval: BoxedEmptyFuture = Box::new(Interval::new(Duration::new(MAINTAIN_INTERVAL, 0), handle)
.expect("failed to create interval") let interval = Interval::new_interval(Duration::new(MAINTAIN_INTERVAL, 0))
.and_then(move |_| Ok(ClusterCore::maintain(data.clone()))) .and_then(move |_| Ok(ClusterCore::maintain(data.clone())))
.for_each(|_| Ok(())) .for_each(|_| Ok(()))
.then(|_| finished(()))); .then(|_| future::ok(()));
d.spawn(interval); d.spawn(interval);
} }
@ -362,20 +357,20 @@ impl ClusterCore {
Ok((_, Ok(message))) => { Ok((_, Ok(message))) => {
ClusterCore::process_connection_message(data.clone(), connection.clone(), message); ClusterCore::process_connection_message(data.clone(), connection.clone(), message);
// continue serving connection // continue serving connection
data.spawn(ClusterCore::process_connection_messages(data.clone(), connection)); data.spawn(ClusterCore::process_connection_messages(data.clone(), connection).then(|_| Ok(())));
Box::new(finished(Ok(()))) Box::new(future::ok(Ok(())))
}, },
Ok((_, Err(err))) => { Ok((_, Err(err))) => {
warn!(target: "secretstore_net", "{}: protocol error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); warn!(target: "secretstore_net", "{}: protocol error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id());
// continue serving connection // continue serving connection
data.spawn(ClusterCore::process_connection_messages(data.clone(), connection)); data.spawn(ClusterCore::process_connection_messages(data.clone(), connection).then(|_| Ok(())));
Box::new(finished(Err(err))) Box::new(future::ok(Err(err)))
}, },
Err(err) => { Err(err) => {
warn!(target: "secretstore_net", "{}: network error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); warn!(target: "secretstore_net", "{}: network error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id());
// close connection // close connection
data.connections.remove(data.clone(), connection.node_id(), connection.is_inbound()); data.connections.remove(data.clone(), connection.node_id(), connection.is_inbound());
Box::new(failed(err)) Box::new(future::err(err))
}, },
} }
)) ))
@ -394,7 +389,7 @@ impl ClusterCore {
data.sessions.on_connection_timeout(connection.node_id()); data.sessions.on_connection_timeout(connection.node_id());
} }
else if last_message_diff > KEEP_ALIVE_SEND_INTERVAL { else if last_message_diff > KEEP_ALIVE_SEND_INTERVAL {
data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {})))); data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {}))).then(|_| Ok(())));
} }
} }
} }
@ -415,33 +410,35 @@ impl ClusterCore {
} }
/// Process connection future result. /// Process connection future result.
fn process_connection_result(data: Arc<ClusterData>, outbound_addr: Option<SocketAddr>, result: Result<DeadlineStatus<Result<NetConnection, Error>>, io::Error>) -> IoFuture<Result<(), Error>> { fn process_connection_result(data: Arc<ClusterData>, outbound_addr: Option<SocketAddr>,
result: Result<DeadlineStatus<Result<NetConnection, Error>>, TimeoutError<io::Error>>) -> IoFuture<Result<(), Error>>
{
match result { match result {
Ok(DeadlineStatus::Meet(Ok(connection))) => { Ok(DeadlineStatus::Meet(Ok(connection))) => {
let connection = Connection::new(outbound_addr.is_none(), connection); let connection = Connection::new(outbound_addr.is_none(), connection);
if data.connections.insert(data.clone(), connection.clone()) { if data.connections.insert(data.clone(), connection.clone()) {
ClusterCore::process_connection_messages(data.clone(), connection) ClusterCore::process_connection_messages(data.clone(), connection)
} else { } else {
Box::new(finished(Ok(()))) Box::new(future::ok(Ok(())))
} }
}, },
Ok(DeadlineStatus::Meet(Err(err))) => { Ok(DeadlineStatus::Meet(Err(err))) => {
warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}", warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}",
data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" },
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
Box::new(finished(Ok(()))) Box::new(future::ok(Ok(())))
}, },
Ok(DeadlineStatus::Timeout) => { Ok(DeadlineStatus::Timeout) => {
warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}", warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}",
data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" }, data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" },
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
Box::new(finished(Ok(()))) Box::new(future::ok(Ok(())))
}, },
Err(err) => { Err(err) => {
warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}", warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}",
data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" },
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
Box::new(finished(Ok(()))) Box::new(future::ok(Ok(())))
}, },
} }
} }
@ -595,7 +592,7 @@ impl ClusterCore {
if !message.is_error_message() { if !message.is_error_message() {
let session_id = message.into_session_id().expect("session_id only fails for cluster messages; only session messages are passed to process_message; qed"); let session_id = message.into_session_id().expect("session_id only fails for cluster messages; only session messages are passed to process_message; qed");
let session_nonce = message.session_nonce().expect("session_nonce only fails for cluster messages; only session messages are passed to process_message; qed"); let session_nonce = message.session_nonce().expect("session_nonce only fails for cluster messages; only session messages are passed to process_message; qed");
data.spawn(connection.send_message(SC::make_error_message(session_id, session_nonce, error))); data.spawn(connection.send_message(SC::make_error_message(session_id, session_nonce, error)).then(|_| Ok(())));
} }
return None; return None;
}, },
@ -648,13 +645,19 @@ impl ClusterCore {
match message { match message {
ClusterMessage::KeepAlive(_) => data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse { ClusterMessage::KeepAlive(_) => data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse {
session_id: None, session_id: None,
})))), }))).then(|_| Ok(()))),
ClusterMessage::KeepAliveResponse(msg) => if let Some(session_id) = msg.session_id { ClusterMessage::KeepAliveResponse(msg) => if let Some(session_id) = msg.session_id {
data.sessions.on_session_keep_alive(connection.node_id(), session_id.into()); data.sessions.on_session_keep_alive(connection.node_id(), session_id.into());
}, },
_ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}", data.self_key_pair.public(), message, connection.node_id(), connection.node_address()), _ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}", data.self_key_pair.public(), message, connection.node_id(), connection.node_address()),
} }
} }
/// Prevents new tasks from being spawned.
#[cfg(test)]
pub fn shutdown(&self) {
self.data.shutdown()
}
} }
impl ClusterConnections { impl ClusterConnections {
@ -787,14 +790,14 @@ impl ClusterConnections {
} }
impl ClusterData { impl ClusterData {
pub fn new(handle: &Handle, config: ClusterConfiguration, connections: ClusterConnections, sessions: ClusterSessions) -> Arc<Self> { pub fn new(executor: &TaskExecutor, config: ClusterConfiguration, connections: ClusterConnections, sessions: ClusterSessions) -> Arc<Self> {
Arc::new(ClusterData { Arc::new(ClusterData {
handle: handle.remote().clone(), executor: executor.clone(),
pool: CpuPool::new(config.threads),
self_key_pair: config.self_key_pair.clone(), self_key_pair: config.self_key_pair.clone(),
connections: connections, connections: connections,
sessions: sessions, sessions: sessions,
config: config, config: config,
is_shutdown: Arc::new(AtomicBool::new(false)),
}) })
} }
@ -803,12 +806,28 @@ impl ClusterData {
self.connections.get(node) self.connections.get(node)
} }
/// Spawns a future using thread pool and schedules execution of it with event loop handle. /// Spawns a future on the runtime.
pub fn spawn<F>(&self, f: F) where F: Future + Send + 'static, F::Item: Send + 'static, F::Error: Send + 'static { //
let pool_work = self.pool.spawn(f); // TODO: Consider implementing a more graceful shutdown process using an
self.handle.spawn(move |_handle| { // `AtomicBool`, etc. which would prevent tasks from being spawned after a
pool_work.then(|_| finished(())) // shutdown signal is given. (Recursive calls, in
}) // `process_connection_messages` for example, appear to continue
// indefinitely.)
pub fn spawn<F>(&self, f: F) where F: Future<Item = (), Error = ()> + Send + 'static {
if self.is_shutdown.load(Ordering::Acquire) == false {
if let Err(err) = future::Executor::execute(&self.executor, Box::new(f)) {
error!("Secret store runtime unable to spawn task. Runtime is shutting down. ({:?})", err);
}
} else {
error!("Secret store runtime unable to spawn task. Shutdown has been started.");
}
}
/// Sets the `is_shutdown` flag which prevents future tasks from being
/// spawned via `::spawn`.
#[cfg(test)]
pub fn shutdown(&self) {
self.is_shutdown.store(true, Ordering::Release);
} }
} }
@ -820,7 +839,7 @@ impl Connection {
is_inbound: is_inbound, is_inbound: is_inbound,
stream: connection.stream, stream: connection.stream,
key: connection.key, key: connection.key,
last_message_time: Mutex::new(Instant::now()), last_message_time: RwLock::new(Instant::now()),
}) })
} }
@ -833,11 +852,11 @@ impl Connection {
} }
pub fn last_message_time(&self) -> Instant { pub fn last_message_time(&self) -> Instant {
*self.last_message_time.lock() *self.last_message_time.read()
} }
pub fn set_last_message_time(&self, last_message_time: Instant) { pub fn set_last_message_time(&self, last_message_time: Instant) {
*self.last_message_time.lock() = last_message_time; *self.last_message_time.write() = last_message_time;
} }
pub fn node_address(&self) -> &SocketAddr { pub fn node_address(&self) -> &SocketAddr {
@ -858,7 +877,7 @@ impl ClusterView {
ClusterView { ClusterView {
configured_nodes_count: configured_nodes_count, configured_nodes_count: configured_nodes_count,
connected_nodes_count: nodes.len(), connected_nodes_count: nodes.len(),
core: Arc::new(Mutex::new(ClusterViewCore { core: Arc::new(RwLock::new(ClusterViewCore {
cluster: cluster, cluster: cluster,
nodes: nodes, nodes: nodes,
})), })),
@ -868,29 +887,29 @@ impl ClusterView {
impl Cluster for ClusterView { impl Cluster for ClusterView {
fn broadcast(&self, message: Message) -> Result<(), Error> { fn broadcast(&self, message: Message) -> Result<(), Error> {
let core = self.core.lock(); let core = self.core.read();
for node in core.nodes.iter().filter(|n| *n != core.cluster.self_key_pair.public()) { for node in core.nodes.iter().filter(|n| *n != core.cluster.self_key_pair.public()) {
trace!(target: "secretstore_net", "{}: sent message {} to {}", core.cluster.self_key_pair.public(), message, node); trace!(target: "secretstore_net", "{}: sent message {} to {}", core.cluster.self_key_pair.public(), message, node);
let connection = core.cluster.connection(node).ok_or(Error::NodeDisconnected)?; let connection = core.cluster.connection(node).ok_or(Error::NodeDisconnected)?;
core.cluster.spawn(connection.send_message(message.clone())) core.cluster.spawn(connection.send_message(message.clone()).then(|_| Ok(())))
} }
Ok(()) Ok(())
} }
fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> {
let core = self.core.lock(); let core = self.core.read();
trace!(target: "secretstore_net", "{}: sent message {} to {}", core.cluster.self_key_pair.public(), message, to); trace!(target: "secretstore_net", "{}: sent message {} to {}", core.cluster.self_key_pair.public(), message, to);
let connection = core.cluster.connection(to).ok_or(Error::NodeDisconnected)?; let connection = core.cluster.connection(to).ok_or(Error::NodeDisconnected)?;
core.cluster.spawn(connection.send_message(message)); core.cluster.spawn(connection.send_message(message).then(|_| Ok(())));
Ok(()) Ok(())
} }
fn is_connected(&self, node: &NodeId) -> bool { fn is_connected(&self, node: &NodeId) -> bool {
self.core.lock().nodes.contains(node) self.core.read().nodes.contains(node)
} }
fn nodes(&self) -> BTreeSet<NodeId> { fn nodes(&self) -> BTreeSet<NodeId> {
self.core.lock().nodes.clone() self.core.read().nodes.clone()
} }
fn configured_nodes_count(&self) -> usize { fn configured_nodes_count(&self) -> usize {
@ -1118,8 +1137,11 @@ pub mod tests {
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use std::collections::{BTreeSet, VecDeque}; use std::collections::{BTreeSet, VecDeque};
use parking_lot::Mutex; use parking_lot::RwLock;
use tokio_core::reactor::Core; use tokio::{
runtime::{Runtime, Builder as RuntimeBuilder},
prelude::{future, Future},
};
use ethereum_types::{Address, H256}; use ethereum_types::{Address, H256};
use ethkey::{Random, Generator, Public, Signature, sign}; use ethkey::{Random, Generator, Public, Signature, sign};
use key_server_cluster::{NodeId, SessionId, Requester, Error, DummyAclStorage, DummyKeyStorage, use key_server_cluster::{NodeId, SessionId, Requester, Error, DummyAclStorage, DummyKeyStorage,
@ -1135,7 +1157,7 @@ pub mod tests {
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession, use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession,
IsolatedSessionTransport as KeyVersionNegotiationSessionTransport}; IsolatedSessionTransport as KeyVersionNegotiationSessionTransport};
const TIMEOUT: Duration = Duration::from_millis(300); const TIMEOUT: Duration = Duration::from_millis(1000);
#[derive(Default)] #[derive(Default)]
pub struct DummyClusterClient { pub struct DummyClusterClient {
@ -1145,7 +1167,7 @@ pub mod tests {
#[derive(Debug)] #[derive(Debug)]
pub struct DummyCluster { pub struct DummyCluster {
id: NodeId, id: NodeId,
data: Mutex<DummyClusterData>, data: RwLock<DummyClusterData>,
} }
#[derive(Debug, Default)] #[derive(Debug, Default)]
@ -1182,7 +1204,7 @@ pub mod tests {
pub fn new(id: NodeId) -> Self { pub fn new(id: NodeId) -> Self {
DummyCluster { DummyCluster {
id: id, id: id,
data: Mutex::new(DummyClusterData::default()) data: RwLock::new(DummyClusterData::default())
} }
} }
@ -1191,25 +1213,25 @@ pub mod tests {
} }
pub fn add_node(&self, node: NodeId) { pub fn add_node(&self, node: NodeId) {
self.data.lock().nodes.insert(node); self.data.write().nodes.insert(node);
} }
pub fn add_nodes<I: Iterator<Item=NodeId>>(&self, nodes: I) { pub fn add_nodes<I: Iterator<Item=NodeId>>(&self, nodes: I) {
self.data.lock().nodes.extend(nodes) self.data.write().nodes.extend(nodes)
} }
pub fn remove_node(&self, node: &NodeId) { pub fn remove_node(&self, node: &NodeId) {
self.data.lock().nodes.remove(node); self.data.write().nodes.remove(node);
} }
pub fn take_message(&self) -> Option<(NodeId, Message)> { pub fn take_message(&self) -> Option<(NodeId, Message)> {
self.data.lock().messages.pop_front() self.data.write().messages.pop_front()
} }
} }
impl Cluster for DummyCluster { impl Cluster for DummyCluster {
fn broadcast(&self, message: Message) -> Result<(), Error> { fn broadcast(&self, message: Message) -> Result<(), Error> {
let mut data = self.data.lock(); let mut data = self.data.write();
let all_nodes: Vec<_> = data.nodes.iter().cloned().filter(|n| n != &self.id).collect(); let all_nodes: Vec<_> = data.nodes.iter().cloned().filter(|n| n != &self.id).collect();
for node in all_nodes { for node in all_nodes {
data.messages.push_back((node, message.clone())); data.messages.push_back((node, message.clone()));
@ -1219,40 +1241,49 @@ pub mod tests {
fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> {
debug_assert!(&self.id != to); debug_assert!(&self.id != to);
self.data.lock().messages.push_back((to.clone(), message)); self.data.write().messages.push_back((to.clone(), message));
Ok(()) Ok(())
} }
fn is_connected(&self, node: &NodeId) -> bool { fn is_connected(&self, node: &NodeId) -> bool {
let data = self.data.lock(); let data = self.data.read();
&self.id == node || data.nodes.contains(node) &self.id == node || data.nodes.contains(node)
} }
fn nodes(&self) -> BTreeSet<NodeId> { fn nodes(&self) -> BTreeSet<NodeId> {
self.data.lock().nodes.iter().cloned().collect() self.data.read().nodes.iter().cloned().collect()
} }
fn configured_nodes_count(&self) -> usize { fn configured_nodes_count(&self) -> usize {
self.data.lock().nodes.len() self.data.read().nodes.len()
} }
fn connected_nodes_count(&self) -> usize { fn connected_nodes_count(&self) -> usize {
self.data.lock().nodes.len() self.data.read().nodes.len()
} }
} }
pub fn loop_until<F>(core: &mut Core, timeout: Duration, predicate: F) where F: Fn() -> bool { /// Loops until `predicate` returns `true` or `timeout` has elapsed.
let start = Instant::now(); pub fn loop_until<F>(runtime: &mut Runtime, timeout: Duration, predicate: F)
loop { where F: Send + 'static + Fn() -> bool
core.turn(Some(Duration::from_millis(1))); {
if predicate() { use futures::Stream;
break; use tokio::timer::Interval;
}
if Instant::now() - start > timeout { let start = Instant::now();
panic!("no result in {:?}", timeout);
} runtime.block_on(Interval::new_interval(Duration::from_millis(1))
} .and_then(move |_| {
if Instant::now() - start > timeout {
panic!("no result in {:?}", timeout);
}
Ok(())
})
.take_while(move |_| future::ok(!predicate()))
.for_each(|_| Ok(()))
.then(|_| future::ok::<(), ()>(()))
).unwrap();
} }
pub fn all_connections_established(cluster: &Arc<ClusterCore>) -> bool { pub fn all_connections_established(cluster: &Arc<ClusterCore>) -> bool {
@ -1261,7 +1292,7 @@ pub mod tests {
.all(|p| cluster.connection(p).is_some()) .all(|p| cluster.connection(p).is_some())
} }
pub fn make_clusters(core: &Core, ports_begin: u16, num_nodes: usize) -> Vec<Arc<ClusterCore>> { pub fn make_clusters(runtime: &Runtime, ports_begin: u16, num_nodes: usize) -> Vec<Arc<ClusterCore>> {
let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect();
let cluster_params: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration { let cluster_params: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration {
threads: 1, threads: 1,
@ -1277,7 +1308,7 @@ pub mod tests {
auto_migrate_enabled: false, auto_migrate_enabled: false,
}).collect(); }).collect();
let clusters: Vec<_> = cluster_params.into_iter().enumerate() let clusters: Vec<_> = cluster_params.into_iter().enumerate()
.map(|(_, params)| ClusterCore::new(core.handle(), params).unwrap()) .map(|(_, params)| ClusterCore::new(runtime.executor(), params).unwrap())
.collect(); .collect();
clusters clusters
@ -1292,97 +1323,134 @@ pub mod tests {
} }
} }
pub fn shutdown_clusters(clusters: &[Arc<ClusterCore>]) {
for cluster in clusters {
cluster.shutdown()
}
}
/// Returns a new runtime with a static number of threads.
pub fn new_runtime() -> Runtime {
RuntimeBuilder::new()
.core_threads(4)
.build()
.expect("Unable to create tokio runtime")
}
#[test] #[test]
fn cluster_connects_to_other_nodes() { fn cluster_connects_to_other_nodes() {
let mut core = Core::new().unwrap(); let mut runtime = new_runtime();
let clusters = make_clusters(&core, 6010, 3); let clusters = make_clusters(&runtime, 6010, 3);
run_clusters(&clusters); run_clusters(&clusters);
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established)); let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
} }
#[test] #[test]
fn cluster_wont_start_generation_session_if_not_fully_connected() { fn cluster_wont_start_generation_session_if_not_fully_connected() {
let core = Core::new().unwrap(); let runtime = new_runtime();
let clusters = make_clusters(&core, 6013, 3); let clusters = make_clusters(&runtime, 6013, 3);
clusters[0].run().unwrap(); clusters[0].run().unwrap();
match clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1) { match clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1) {
Err(Error::NodeDisconnected) => (), Err(Error::NodeDisconnected) => (),
Err(e) => panic!("unexpected error {:?}", e), Err(e) => panic!("unexpected error {:?}", e),
_ => panic!("unexpected success"), _ => panic!("unexpected success"),
} }
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
} }
#[test] #[test]
fn error_in_generation_session_broadcasted_to_all_other_nodes() { fn error_in_generation_session_broadcasted_to_all_other_nodes() {
//::logger::init_log(); //::logger::init_log();
let mut core = Core::new().unwrap(); let mut runtime = new_runtime();
let clusters = make_clusters(&core, 6016, 3); let clusters = make_clusters(&runtime, 6016, 3);
run_clusters(&clusters); run_clusters(&clusters);
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established)); let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
// ask one of nodes to produce faulty generation sessions // ask one of nodes to produce faulty generation sessions
clusters[1].client().make_faulty_generation_sessions(); clusters[1].client().make_faulty_generation_sessions();
// start && wait for generation session to fail // start && wait for generation session to fail
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some() let session_clone = session.clone();
&& clusters[0].client().generation_session(&SessionId::default()).is_none()); let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_err()); assert!(session.joint_public_and_secret().unwrap().is_err());
// check that faulty session is either removed from all nodes, or nonexistent (already removed) // check that faulty session is either removed from all nodes, or nonexistent (already removed)
for i in 1..3 { for i in 1..3 {
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) { if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
let session_clone = session.clone();
let clusters_clone = clusters.clone();
// wait for both session completion && session removal (session completion event is fired // wait for both session completion && session removal (session completion event is fired
// before session is removed from its own container by cluster) // before session is removed from its own container by cluster)
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some() loop_until(&mut runtime, TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
&& clusters[i].client().generation_session(&SessionId::default()).is_none()); && clusters_clone[i].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_err()); assert!(session.joint_public_and_secret().unwrap().is_err());
} }
} }
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
} }
#[test] #[test]
fn generation_session_completion_signalled_if_failed_on_master() { fn generation_session_completion_signalled_if_failed_on_master() {
//::logger::init_log(); //::logger::init_log();
let mut core = Core::new().unwrap(); let mut runtime = new_runtime();
let clusters = make_clusters(&core, 6025, 3);
let clusters = make_clusters(&runtime, 6025, 3);
run_clusters(&clusters); run_clusters(&clusters);
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established)); let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
// ask one of nodes to produce faulty generation sessions // ask one of nodes to produce faulty generation sessions
clusters[0].client().make_faulty_generation_sessions(); clusters[0].client().make_faulty_generation_sessions();
// start && wait for generation session to fail // start && wait for generation session to fail
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some() let session_clone = session.clone();
&& clusters[0].client().generation_session(&SessionId::default()).is_none()); let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_err()); assert!(session.joint_public_and_secret().unwrap().is_err());
// check that faulty session is either removed from all nodes, or nonexistent (already removed) // check that faulty session is either removed from all nodes, or nonexistent (already removed)
for i in 1..3 { for i in 1..3 {
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) { if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
let session_clone = session.clone();
let clusters_clone = clusters.clone();
// wait for both session completion && session removal (session completion event is fired // wait for both session completion && session removal (session completion event is fired
// before session is removed from its own container by cluster) // before session is removed from its own container by cluster)
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some() loop_until(&mut runtime, TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
&& clusters[i].client().generation_session(&SessionId::default()).is_none()); && clusters_clone[i].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_err()); assert!(session.joint_public_and_secret().unwrap().is_err());
} }
} }
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
} }
#[test] #[test]
fn generation_session_is_removed_when_succeeded() { fn generation_session_is_removed_when_succeeded() {
//::logger::init_log(); //::logger::init_log();
let mut core = Core::new().unwrap(); let mut runtime = new_runtime();
let clusters = make_clusters(&core, 6019, 3); let clusters = make_clusters(&runtime, 6019, 3);
run_clusters(&clusters); run_clusters(&clusters);
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established)); let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
// start && wait for generation session to complete // start && wait for generation session to complete
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished let session_clone = session.clone();
|| session.state() == GenerationSessionState::Failed) let clusters_clone = clusters.clone();
&& clusters[0].client().generation_session(&SessionId::default()).is_none()); loop_until(&mut runtime, TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|| session_clone.state() == GenerationSessionState::Failed)
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_ok()); assert!(session.joint_public_and_secret().unwrap().is_ok());
// check that on non-master nodes session is either: // check that on non-master nodes session is either:
@ -1392,19 +1460,24 @@ pub mod tests {
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) { if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
// run to completion if completion message is still on the way // run to completion if completion message is still on the way
// AND check that it is actually removed from cluster sessions // AND check that it is actually removed from cluster sessions
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished let session_clone = session.clone();
|| session.state() == GenerationSessionState::Failed) let clusters_clone = clusters.clone();
&& clusters[i].client().generation_session(&SessionId::default()).is_none()); loop_until(&mut runtime, TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|| session_clone.state() == GenerationSessionState::Failed)
&& clusters_clone[i].client().generation_session(&SessionId::default()).is_none());
} }
} }
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
} }
#[test] #[test]
fn sessions_are_removed_when_initialization_fails() { fn sessions_are_removed_when_initialization_fails() {
let mut core = Core::new().unwrap(); let mut runtime = new_runtime();
let clusters = make_clusters(&core, 6022, 3); let clusters = make_clusters(&runtime, 6022, 3);
run_clusters(&clusters); run_clusters(&clusters);
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established)); let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
// generation session // generation session
{ {
@ -1432,6 +1505,8 @@ pub mod tests {
assert!(clusters[0].data.sessions.decryption_sessions.is_empty()); assert!(clusters[0].data.sessions.decryption_sessions.is_empty());
assert!(clusters[0].data.sessions.negotiation_sessions.is_empty()); assert!(clusters[0].data.sessions.negotiation_sessions.is_empty());
} }
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
} }
// test ignored because of // test ignored because of
@ -1441,16 +1516,19 @@ pub mod tests {
#[ignore] #[ignore]
fn schnorr_signing_session_completes_if_node_does_not_have_a_share() { fn schnorr_signing_session_completes_if_node_does_not_have_a_share() {
//::logger::init_log(); //::logger::init_log();
let mut core = Core::new().unwrap(); let mut runtime = new_runtime();
let clusters = make_clusters(&core, 6028, 3); let clusters = make_clusters(&runtime, 6028, 3);
run_clusters(&clusters); run_clusters(&clusters);
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established)); let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
// start && wait for generation session to complete // start && wait for generation session to complete
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished let session_clone = session.clone();
|| session.state() == GenerationSessionState::Failed) let clusters_clone = clusters.clone();
&& clusters[0].client().generation_session(&SessionId::default()).is_none()); loop_until(&mut runtime, TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|| session_clone.state() == GenerationSessionState::Failed)
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_ok()); assert!(session.joint_public_and_secret().unwrap().is_ok());
// now remove share from node2 // now remove share from node2
@ -1462,8 +1540,10 @@ pub mod tests {
let session0 = clusters[0].client().new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap(); let session0 = clusters[0].client().new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap();
let session = clusters[0].data.sessions.schnorr_signing_sessions.first().unwrap(); let session = clusters[0].data.sessions.schnorr_signing_sessions.first().unwrap();
loop_until(&mut core, TIMEOUT, || session.is_finished() && (0..3).all(|i| let session_clone = session.clone();
clusters[i].data.sessions.schnorr_signing_sessions.is_empty())); let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || session_clone.is_finished() && (0..3).all(|i|
clusters_clone[i].data.sessions.schnorr_signing_sessions.is_empty()));
session0.wait().unwrap(); session0.wait().unwrap();
// and try to sign message with generated key using node that has no key share // and try to sign message with generated key using node that has no key share
@ -1471,8 +1551,10 @@ pub mod tests {
let session2 = clusters[2].client().new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap(); let session2 = clusters[2].client().new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap();
let session = clusters[2].data.sessions.schnorr_signing_sessions.first().unwrap(); let session = clusters[2].data.sessions.schnorr_signing_sessions.first().unwrap();
loop_until(&mut core, TIMEOUT, || session.is_finished() && (0..3).all(|i| let session_clone = session.clone();
clusters[i].data.sessions.schnorr_signing_sessions.is_empty())); let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || session_clone.is_finished() && (0..3).all(|i|
clusters_clone[i].data.sessions.schnorr_signing_sessions.is_empty()));
session2.wait().unwrap(); session2.wait().unwrap();
// now remove share from node1 // now remove share from node1
@ -1483,8 +1565,11 @@ pub mod tests {
let session1 = clusters[0].client().new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap(); let session1 = clusters[0].client().new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap();
let session = clusters[0].data.sessions.schnorr_signing_sessions.first().unwrap(); let session = clusters[0].data.sessions.schnorr_signing_sessions.first().unwrap();
loop_until(&mut core, TIMEOUT, || session.is_finished()); let session = session.clone();
loop_until(&mut runtime, TIMEOUT, move || session.is_finished());
session1.wait().unwrap_err(); session1.wait().unwrap_err();
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
} }
// test ignored because of // test ignored because of
@ -1494,16 +1579,19 @@ pub mod tests {
#[ignore] #[ignore]
fn ecdsa_signing_session_completes_if_node_does_not_have_a_share() { fn ecdsa_signing_session_completes_if_node_does_not_have_a_share() {
//::logger::init_log(); //::logger::init_log();
let mut core = Core::new().unwrap(); let mut runtime = new_runtime();
let clusters = make_clusters(&core, 6041, 4); let clusters = make_clusters(&runtime, 6041, 4);
run_clusters(&clusters); run_clusters(&clusters);
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established)); let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
// start && wait for generation session to complete // start && wait for generation session to complete
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished let session_clone = session.clone();
|| session.state() == GenerationSessionState::Failed) let clusters_clone = clusters.clone();
&& clusters[0].client().generation_session(&SessionId::default()).is_none()); loop_until(&mut runtime, TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|| session_clone.state() == GenerationSessionState::Failed)
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_ok()); assert!(session.joint_public_and_secret().unwrap().is_ok());
// now remove share from node2 // now remove share from node2
@ -1515,16 +1603,20 @@ pub mod tests {
let session0 = clusters[0].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap(); let session0 = clusters[0].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap();
let session = clusters[0].data.sessions.ecdsa_signing_sessions.first().unwrap(); let session = clusters[0].data.sessions.ecdsa_signing_sessions.first().unwrap();
loop_until(&mut core, Duration::from_millis(1000), || session.is_finished() && (0..3).all(|i| let session_clone = session.clone();
clusters[i].data.sessions.ecdsa_signing_sessions.is_empty())); let clusters_clone = clusters.clone();
loop_until(&mut runtime, Duration::from_millis(1000), move || session_clone.is_finished() && (0..3).all(|i|
clusters_clone[i].data.sessions.ecdsa_signing_sessions.is_empty()));
session0.wait().unwrap(); session0.wait().unwrap();
// and try to sign message with generated key using node that has no key share // and try to sign message with generated key using node that has no key share
let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap();
let session2 = clusters[2].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap(); let session2 = clusters[2].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap();
let session = clusters[2].data.sessions.ecdsa_signing_sessions.first().unwrap(); let session = clusters[2].data.sessions.ecdsa_signing_sessions.first().unwrap();
loop_until(&mut core, Duration::from_millis(1000), || session.is_finished() && (0..3).all(|i| let session_clone = session.clone();
clusters[i].data.sessions.ecdsa_signing_sessions.is_empty())); let clusters_clone = clusters.clone();
loop_until(&mut runtime, Duration::from_millis(1000), move || session_clone.is_finished() && (0..3).all(|i|
clusters_clone[i].data.sessions.ecdsa_signing_sessions.is_empty()));
session2.wait().unwrap(); session2.wait().unwrap();
// now remove share from node1 // now remove share from node1
@ -1534,7 +1626,9 @@ pub mod tests {
let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap();
let session1 = clusters[0].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap(); let session1 = clusters[0].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap();
let session = clusters[0].data.sessions.ecdsa_signing_sessions.first().unwrap(); let session = clusters[0].data.sessions.ecdsa_signing_sessions.first().unwrap();
loop_until(&mut core, Duration::from_millis(1000), || session.is_finished()); loop_until(&mut runtime, Duration::from_millis(1000), move || session.is_finished());
session1.wait().unwrap_err(); session1.wait().unwrap_err();
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
} }
} }

View File

@ -16,18 +16,34 @@
use std::io; use std::io;
use std::time::Duration; use std::time::Duration;
use futures::{Future, Select, Poll, Async}; use futures::{Future, Poll};
use tokio_core::reactor::{Handle, Timeout}; use tokio::timer::timeout::{Timeout, Error as TimeoutError};
type DeadlineBox<F> = Box<Future<Item = DeadlineStatus<<F as Future>::Item>, Error = <F as Future>::Error> + Send>; type DeadlineBox<F> = Box<Future<
Item = DeadlineStatus<<F as Future>::Item>,
Error = TimeoutError<<F as Future>::Error>
> + Send>;
/// Complete a passed future or fail if it is not completed within timeout. /// Complete a passed future or fail if it is not completed within timeout.
pub fn deadline<F, T>(duration: Duration, handle: &Handle, future: F) -> Result<Deadline<F>, io::Error> pub fn deadline<F, T>(duration: Duration, future: F) -> Result<Deadline<F>, io::Error>
where F: Future<Item = T, Error = io::Error> + Send + 'static, T: 'static { where F: Future<Item = T, Error = io::Error> + Send + 'static, T: Send + 'static
let timeout: DeadlineBox<F> = Box::new(Timeout::new(duration, handle)?.map(|_| DeadlineStatus::Timeout)); {
let future: DeadlineBox<F> = Box::new(future.map(DeadlineStatus::Meet)); let timeout = Box::new(Timeout::new(future, duration)
.then(|res| {
match res {
Ok(fut) => Ok(DeadlineStatus::Meet(fut)),
Err(err) => {
if err.is_elapsed() {
Ok(DeadlineStatus::Timeout)
} else {
Err(err)
}
},
}
})
);
let deadline = Deadline { let deadline = Deadline {
future: timeout.select(future), future: timeout,
}; };
Ok(deadline) Ok(deadline)
} }
@ -43,19 +59,15 @@ pub enum DeadlineStatus<T> {
/// Future, which waits for passed future completion within given period, or fails with timeout. /// Future, which waits for passed future completion within given period, or fails with timeout.
pub struct Deadline<F> where F: Future { pub struct Deadline<F> where F: Future {
future: Select<DeadlineBox<F>, DeadlineBox<F>>, future: DeadlineBox<F>,
} }
impl<F, T> Future for Deadline<F> where F: Future<Item = T, Error = io::Error> { impl<F, T> Future for Deadline<F> where F: Future<Item = T, Error = io::Error> {
type Item = DeadlineStatus<T>; type Item = DeadlineStatus<T>;
type Error = io::Error; type Error = TimeoutError<io::Error>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> { fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.future.poll() { self.future.poll()
Ok(Async::Ready((result, _other))) => Ok(Async::Ready(result)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err((err, _other)) => Err(err),
}
} }
} }
@ -63,14 +75,14 @@ impl<F, T> Future for Deadline<F> where F: Future<Item = T, Error = io::Error> {
mod tests { mod tests {
use std::time::Duration; use std::time::Duration;
use futures::{Future, done}; use futures::{Future, done};
use tokio_core::reactor::Core; use tokio::reactor::Reactor;
use super::{deadline, DeadlineStatus}; use super::{deadline, DeadlineStatus};
#[test] #[test]
fn deadline_result_works() { fn deadline_result_works() {
let mut core = Core::new().unwrap(); let mut reactor = Reactor::new().unwrap();
let deadline = deadline(Duration::from_millis(1000), &core.handle(), done(Ok(()))).unwrap(); let deadline = deadline(Duration::from_millis(1000), done(Ok(()))).unwrap();
core.turn(Some(Duration::from_millis(3))); reactor.turn(Some(Duration::from_millis(3))).unwrap();
assert_eq!(deadline.wait().unwrap(), DeadlineStatus::Meet(())); assert_eq!(deadline.wait().unwrap(), DeadlineStatus::Meet(()));
} }
} }

View File

@ -19,7 +19,7 @@ use std::net::Shutdown;
use std::io::{Read, Write, Error}; use std::io::{Read, Write, Error};
use futures::Poll; use futures::Poll;
use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::{AsyncRead, AsyncWrite};
use tokio_core::net::TcpStream; use tokio::net::TcpStream;
/// Read+Write implementation for Arc<TcpStream>. /// Read+Write implementation for Arc<TcpStream>.
pub struct SharedTcpStream { pub struct SharedTcpStream {

View File

@ -19,20 +19,23 @@ use std::sync::Arc;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::time::Duration; use std::time::Duration;
use futures::{Future, Poll}; use futures::{Future, Poll};
use tokio_core::reactor::Handle; use tokio::net::TcpStream;
use tokio_core::net::TcpStream;
use key_server_cluster::{Error, NodeKeyPair}; use key_server_cluster::{Error, NodeKeyPair};
use key_server_cluster::io::{accept_handshake, Handshake, Deadline, deadline}; use key_server_cluster::io::{accept_handshake, Handshake, Deadline, deadline};
use key_server_cluster::net::Connection; use key_server_cluster::net::Connection;
/// Create future for accepting incoming connection. /// Create future for accepting incoming connection.
pub fn accept_connection(address: SocketAddr, stream: TcpStream, handle: &Handle, self_key_pair: Arc<NodeKeyPair>) -> Deadline<AcceptConnection> { pub fn accept_connection(stream: TcpStream, self_key_pair: Arc<NodeKeyPair>) -> Deadline<AcceptConnection> {
// TODO: This could fail so it would be better either to accept the
// address as a separate argument or return a result.
let address = stream.peer_addr().expect("Unable to determine tcp peer address");
let accept = AcceptConnection { let accept = AcceptConnection {
handshake: accept_handshake(stream, self_key_pair), handshake: accept_handshake(stream, self_key_pair),
address: address, address: address,
}; };
deadline(Duration::new(5, 0), handle, accept).expect("Failed to create timeout") deadline(Duration::new(5, 0), accept).expect("Failed to create timeout")
} }
/// Future for accepting incoming connection. /// Future for accepting incoming connection.

View File

@ -20,26 +20,25 @@ use std::io;
use std::time::Duration; use std::time::Duration;
use std::net::SocketAddr; use std::net::SocketAddr;
use futures::{Future, Poll, Async}; use futures::{Future, Poll, Async};
use tokio_core::reactor::Handle; use tokio::net::{TcpStream, tcp::ConnectFuture};
use tokio_core::net::{TcpStream, TcpStreamNew};
use key_server_cluster::{Error, NodeId, NodeKeyPair}; use key_server_cluster::{Error, NodeId, NodeKeyPair};
use key_server_cluster::io::{handshake, Handshake, Deadline, deadline}; use key_server_cluster::io::{handshake, Handshake, Deadline, deadline};
use key_server_cluster::net::Connection; use key_server_cluster::net::Connection;
/// Create future for connecting to other node. /// Create future for connecting to other node.
pub fn connect(address: &SocketAddr, handle: &Handle, self_key_pair: Arc<NodeKeyPair>, trusted_nodes: BTreeSet<NodeId>) -> Deadline<Connect> { pub fn connect(address: &SocketAddr, self_key_pair: Arc<NodeKeyPair>, trusted_nodes: BTreeSet<NodeId>) -> Deadline<Connect> {
let connect = Connect { let connect = Connect {
state: ConnectState::TcpConnect(TcpStream::connect(address, handle)), state: ConnectState::TcpConnect(TcpStream::connect(address)),
address: address.clone(), address: address.clone(),
self_key_pair: self_key_pair, self_key_pair: self_key_pair,
trusted_nodes: trusted_nodes, trusted_nodes: trusted_nodes,
}; };
deadline(Duration::new(5, 0), handle, connect).expect("Failed to create timeout") deadline(Duration::new(5, 0), connect).expect("Failed to create timeout")
} }
enum ConnectState { enum ConnectState {
TcpConnect(TcpStreamNew), TcpConnect(ConnectFuture),
Handshake(Handshake<TcpStream>), Handshake(Handshake<TcpStream>),
Connected, Connected,
} }

View File

@ -24,7 +24,6 @@ extern crate ethcore_sync as sync;
extern crate ethcore_transaction as transaction; extern crate ethcore_transaction as transaction;
extern crate ethereum_types; extern crate ethereum_types;
extern crate ethkey; extern crate ethkey;
extern crate futures_cpupool;
extern crate hyper; extern crate hyper;
extern crate keccak_hash as hash; extern crate keccak_hash as hash;
extern crate kvdb; extern crate kvdb;
@ -34,9 +33,7 @@ extern crate serde;
extern crate serde_json; extern crate serde_json;
extern crate tiny_keccak; extern crate tiny_keccak;
extern crate tokio; extern crate tokio;
extern crate tokio_core;
extern crate tokio_io; extern crate tokio_io;
extern crate tokio_proto;
extern crate tokio_service; extern crate tokio_service;
extern crate url; extern crate url;

View File

@ -16,14 +16,17 @@
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use hyper::{self, header, Chunk, Uri, Request as HttpRequest, Response as HttpResponse, Method as HttpMethod, StatusCode as HttpStatusCode}; use hyper::{self, Uri, Request as HttpRequest, Response as HttpResponse, Method as HttpMethod,
use hyper::server::Http; StatusCode as HttpStatusCode, Body,
header::{self, HeaderValue},
server::conn::Http,
service::Service,
};
use serde::Serialize; use serde::Serialize;
use serde_json; use serde_json;
use tokio; use tokio;
use tokio::net::TcpListener; use tokio::net::TcpListener;
use tokio::runtime::Runtime; use tokio::runtime::{Runtime, Builder as RuntimeBuilder};
use tokio_service::Service;
use futures::{future, Future, Stream}; use futures::{future, Future, Stream};
use url::percent_encoding::percent_decode; use url::percent_encoding::percent_decode;
@ -88,7 +91,10 @@ impl KeyServerHttpListener {
key_server: key_server, key_server: key_server,
}); });
let mut runtime = Runtime::new()?; let mut runtime = RuntimeBuilder::new()
// TODO: Add config option/arg?
.core_threads(2)
.build()?;
let listener_address = format!("{}:{}", listener_address.address, listener_address.port).parse()?; let listener_address = format!("{}:{}", listener_address.address, listener_address.port).parse()?;
let listener = TcpListener::bind(&listener_address)?; let listener = TcpListener::bind(&listener_address)?;
@ -97,10 +103,10 @@ impl KeyServerHttpListener {
let server = listener.incoming() let server = listener.incoming()
.map_err(|e| warn!("Key server listener error: {:?}", e)) .map_err(|e| warn!("Key server listener error: {:?}", e))
.for_each(move |socket| { .for_each(move |socket| {
let http: Http<Chunk> = Http::new(); let http = Http::new();
let serve = http.serve_connection(socket, KeyServerHttpHandler { let serve = http.serve_connection(socket,
handler: shared_handler2.clone(), KeyServerHttpHandler { handler: shared_handler2.clone() }
}).map(|_| ()).map_err(|e| { ).map(|_| ()).map_err(|e| {
warn!("Key server handler error: {:?}", e); warn!("Key server handler error: {:?}", e);
}); });
@ -119,7 +125,7 @@ impl KeyServerHttpListener {
} }
impl KeyServerHttpHandler { impl KeyServerHttpHandler {
fn process(self, req_method: HttpMethod, req_uri: Uri, path: &str, req_body: &[u8]) -> HttpResponse { fn process(self, req_method: HttpMethod, req_uri: Uri, path: &str, req_body: &[u8]) -> HttpResponse<Body> {
match parse_request(&req_method, &path, &req_body) { match parse_request(&req_method, &path, &req_body) {
Request::GenerateServerKey(document, signature, threshold) => { Request::GenerateServerKey(document, signature, threshold) => {
return_server_public_key(&req_uri, self.handler.key_server.upgrade() return_server_public_key(&req_uri, self.handler.key_server.upgrade()
@ -195,22 +201,28 @@ impl KeyServerHttpHandler {
}, },
Request::Invalid => { Request::Invalid => {
warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri); warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri);
HttpResponse::new().with_status(HttpStatusCode::BadRequest) HttpResponse::builder()
.status(HttpStatusCode::BAD_REQUEST)
.body(Body::empty())
.expect("Nothing to parse, cannot fail; qed")
}, },
} }
} }
} }
impl Service for KeyServerHttpHandler { impl Service for KeyServerHttpHandler {
type Request = HttpRequest; type ReqBody = Body;
type Response = HttpResponse; type ResBody = Body;
type Error = hyper::Error; type Error = hyper::Error;
type Future = Box<Future<Item=Self::Response, Error=Self::Error> + Send>; type Future = Box<Future<Item = HttpResponse<Self::ResBody>, Error=Self::Error> + Send>;
fn call(&self, req: HttpRequest) -> Self::Future { fn call(&mut self, req: HttpRequest<Body>) -> Self::Future {
if req.headers().has::<header::Origin>() { if req.headers().contains_key(header::ORIGIN) {
warn!(target: "secretstore", "Ignoring {}-request {} with Origin header", req.method(), req.uri()); warn!(target: "secretstore", "Ignoring {}-request {} with Origin header", req.method(), req.uri());
return Box::new(future::ok(HttpResponse::new().with_status(HttpStatusCode::NotFound))); return Box::new(future::ok(HttpResponse::builder()
.status(HttpStatusCode::NOT_FOUND)
.body(Body::empty())
.expect("Nothing to parse, cannot fail; qed")))
} }
let req_method = req.method().clone(); let req_method = req.method().clone();
@ -218,35 +230,40 @@ impl Service for KeyServerHttpHandler {
// We cannot consume Self because of the Service trait requirement. // We cannot consume Self because of the Service trait requirement.
let this = self.clone(); let this = self.clone();
Box::new(req.body().concat2().map(move |body| { Box::new(req.into_body().concat2().map(move |body| {
let path = req_uri.path().to_string(); let path = req_uri.path().to_string();
if path.starts_with("/") { if path.starts_with("/") {
this.process(req_method, req_uri, &path, &body) this.process(req_method, req_uri, &path, &body)
} else { } else {
warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri); warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri);
HttpResponse::new().with_status(HttpStatusCode::NotFound) HttpResponse::builder()
.status(HttpStatusCode::NOT_FOUND)
.body(Body::empty())
.expect("Nothing to parse, cannot fail; qed")
} }
})) }))
} }
} }
fn return_empty(req_uri: &Uri, empty: Result<(), Error>) -> HttpResponse { fn return_empty(req_uri: &Uri, empty: Result<(), Error>) -> HttpResponse<Body> {
return_bytes::<i32>(req_uri, empty.map(|_| None)) return_bytes::<i32>(req_uri, empty.map(|_| None))
} }
fn return_server_public_key(req_uri: &Uri, server_public: Result<Public, Error>) -> HttpResponse { fn return_server_public_key(req_uri: &Uri, server_public: Result<Public, Error>) -> HttpResponse<Body> {
return_bytes(req_uri, server_public.map(|k| Some(SerializablePublic(k)))) return_bytes(req_uri, server_public.map(|k| Some(SerializablePublic(k))))
} }
fn return_message_signature(req_uri: &Uri, signature: Result<EncryptedDocumentKey, Error>) -> HttpResponse { fn return_message_signature(req_uri: &Uri, signature: Result<EncryptedDocumentKey, Error>) -> HttpResponse<Body> {
return_bytes(req_uri, signature.map(|s| Some(SerializableBytes(s)))) return_bytes(req_uri, signature.map(|s| Some(SerializableBytes(s))))
} }
fn return_document_key(req_uri: &Uri, document_key: Result<EncryptedDocumentKey, Error>) -> HttpResponse { fn return_document_key(req_uri: &Uri, document_key: Result<EncryptedDocumentKey, Error>) -> HttpResponse<Body> {
return_bytes(req_uri, document_key.map(|k| Some(SerializableBytes(k)))) return_bytes(req_uri, document_key.map(|k| Some(SerializableBytes(k))))
} }
fn return_document_key_shadow(req_uri: &Uri, document_key_shadow: Result<EncryptedDocumentKeyShadow, Error>) -> HttpResponse { fn return_document_key_shadow(req_uri: &Uri, document_key_shadow: Result<EncryptedDocumentKeyShadow, Error>)
-> HttpResponse<Body>
{
return_bytes(req_uri, document_key_shadow.map(|k| Some(SerializableEncryptedDocumentKeyShadow { return_bytes(req_uri, document_key_shadow.map(|k| Some(SerializableEncryptedDocumentKeyShadow {
decrypted_secret: k.decrypted_secret.into(), decrypted_secret: k.decrypted_secret.into(),
common_point: k.common_point.expect("always filled when requesting document_key_shadow; qed").into(), common_point: k.common_point.expect("always filled when requesting document_key_shadow; qed").into(),
@ -254,42 +271,65 @@ fn return_document_key_shadow(req_uri: &Uri, document_key_shadow: Result<Encrypt
}))) })))
} }
fn return_bytes<T: Serialize>(req_uri: &Uri, result: Result<Option<T>, Error>) -> HttpResponse { fn return_bytes<T: Serialize>(req_uri: &Uri, result: Result<Option<T>, Error>) -> HttpResponse<Body> {
match result { match result {
Ok(Some(result)) => match serde_json::to_vec(&result) { Ok(Some(result)) => match serde_json::to_vec(&result) {
Ok(result) => HttpResponse::new() Ok(result) => {
.with_header(header::ContentType::json()) let body: Body = result.into();
.with_body(result), HttpResponse::builder()
.header(header::CONTENT_TYPE, HeaderValue::from_static("application/json; charset=utf-8"))
.body(body)
.expect("Error creating http response")
},
Err(err) => { Err(err) => {
warn!(target: "secretstore", "response to request {} has failed with: {}", req_uri, err); warn!(target: "secretstore", "response to request {} has failed with: {}", req_uri, err);
HttpResponse::new().with_status(HttpStatusCode::InternalServerError) HttpResponse::builder()
.status(HttpStatusCode::INTERNAL_SERVER_ERROR)
.body(Body::empty())
.expect("Nothing to parse, cannot fail; qed")
} }
}, },
Ok(None) => HttpResponse::new().with_status(HttpStatusCode::Ok), Ok(None) => {
HttpResponse::builder()
.status(HttpStatusCode::OK)
.body(Body::empty())
.expect("Nothing to parse, cannot fail; qed")
},
Err(err) => return_error(err), Err(err) => return_error(err),
} }
} }
fn return_error(err: Error) -> HttpResponse { fn return_error(err: Error) -> HttpResponse<Body> {
let mut res = HttpResponse::new().with_status(match err { let status = match err {
Error::AccessDenied | Error::ConsensusUnreachable | Error::ConsensusTemporaryUnreachable => | Error::AccessDenied
HttpStatusCode::Forbidden, | Error::ConsensusUnreachable
Error::ServerKeyIsNotFound | Error::DocumentKeyIsNotFound => | Error::ConsensusTemporaryUnreachable =>
HttpStatusCode::NotFound, HttpStatusCode::FORBIDDEN,
Error::InsufficientRequesterData(_) | Error::Hyper(_) | Error::Serde(_) | Error::ServerKeyIsNotFound
| Error::DocumentKeyAlreadyStored | Error::ServerKeyAlreadyGenerated => | Error::DocumentKeyIsNotFound =>
HttpStatusCode::BadRequest, HttpStatusCode::NOT_FOUND,
_ => HttpStatusCode::InternalServerError, | Error::InsufficientRequesterData(_)
}); | Error::Hyper(_)
| Error::Serde(_)
| Error::DocumentKeyAlreadyStored
| Error::ServerKeyAlreadyGenerated =>
HttpStatusCode::BAD_REQUEST,
_ => HttpStatusCode::INTERNAL_SERVER_ERROR,
};
let mut res = HttpResponse::builder();
res.status(status);
// return error text. ignore errors when returning error // return error text. ignore errors when returning error
let error_text = format!("\"{}\"", err); let error_text = format!("\"{}\"", err);
if let Ok(error_text) = serde_json::to_vec(&error_text) { if let Ok(error_text) = serde_json::to_vec(&error_text) {
res.headers_mut().set(header::ContentType::json()); res.header(header::CONTENT_TYPE, HeaderValue::from_static("application/json; charset=utf-8"));
res.set_body(error_text); res.body(error_text.into())
.expect("`error_text` is a formatted string, parsing cannot fail; qed")
} else {
res.body(Body::empty())
.expect("Nothing to parse, cannot fail; qed")
} }
res
} }
fn parse_request(method: &HttpMethod, uri_path: &str, body: &[u8]) -> Request { fn parse_request(method: &HttpMethod, uri_path: &str, body: &[u8]) -> Request {
@ -328,19 +368,19 @@ fn parse_request(method: &HttpMethod, uri_path: &str, body: &[u8]) -> Request {
let common_point = path.get(args_offset + 2).map(|v| v.parse()); let common_point = path.get(args_offset + 2).map(|v| v.parse());
let encrypted_key = path.get(args_offset + 3).map(|v| v.parse()); let encrypted_key = path.get(args_offset + 3).map(|v| v.parse());
match (prefix, args_count, method, threshold, message_hash, common_point, encrypted_key) { match (prefix, args_count, method, threshold, message_hash, common_point, encrypted_key) {
("shadow", 3, &HttpMethod::Post, Some(Ok(threshold)), _, _, _) => ("shadow", 3, &HttpMethod::POST, Some(Ok(threshold)), _, _, _) =>
Request::GenerateServerKey(document, signature, threshold), Request::GenerateServerKey(document, signature, threshold),
("shadow", 4, &HttpMethod::Post, _, _, Some(Ok(common_point)), Some(Ok(encrypted_key))) => ("shadow", 4, &HttpMethod::POST, _, _, Some(Ok(common_point)), Some(Ok(encrypted_key))) =>
Request::StoreDocumentKey(document, signature, common_point, encrypted_key), Request::StoreDocumentKey(document, signature, common_point, encrypted_key),
("", 3, &HttpMethod::Post, Some(Ok(threshold)), _, _, _) => ("", 3, &HttpMethod::POST, Some(Ok(threshold)), _, _, _) =>
Request::GenerateDocumentKey(document, signature, threshold), Request::GenerateDocumentKey(document, signature, threshold),
("", 2, &HttpMethod::Get, _, _, _, _) => ("", 2, &HttpMethod::GET, _, _, _, _) =>
Request::GetDocumentKey(document, signature), Request::GetDocumentKey(document, signature),
("shadow", 2, &HttpMethod::Get, _, _, _, _) => ("shadow", 2, &HttpMethod::GET, _, _, _, _) =>
Request::GetDocumentKeyShadow(document, signature), Request::GetDocumentKeyShadow(document, signature),
("schnorr", 3, &HttpMethod::Get, _, Some(Ok(message_hash)), _, _) => ("schnorr", 3, &HttpMethod::GET, _, Some(Ok(message_hash)), _, _) =>
Request::SchnorrSignMessage(document, signature, message_hash), Request::SchnorrSignMessage(document, signature, message_hash),
("ecdsa", 3, &HttpMethod::Get, _, Some(Ok(message_hash)), _, _) => ("ecdsa", 3, &HttpMethod::GET, _, Some(Ok(message_hash)), _, _) =>
Request::EcdsaSignMessage(document, signature, message_hash), Request::EcdsaSignMessage(document, signature, message_hash),
_ => Request::Invalid, _ => Request::Invalid,
} }
@ -348,7 +388,7 @@ fn parse_request(method: &HttpMethod, uri_path: &str, body: &[u8]) -> Request {
fn parse_admin_request(method: &HttpMethod, path: Vec<String>, body: &[u8]) -> Request { fn parse_admin_request(method: &HttpMethod, path: Vec<String>, body: &[u8]) -> Request {
let args_count = path.len(); let args_count = path.len();
if *method != HttpMethod::Post || args_count != 4 || path[1] != "servers_set_change" { if *method != HttpMethod::POST || args_count != 4 || path[1] != "servers_set_change" {
return Request::Invalid; return Request::Invalid;
} }
@ -392,39 +432,39 @@ mod tests {
#[test] #[test]
fn parse_request_successful() { fn parse_request_successful() {
// POST /shadow/{server_key_id}/{signature}/{threshold} => generate server key // POST /shadow/{server_key_id}/{signature}/{threshold} => generate server key
assert_eq!(parse_request(&HttpMethod::Post, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2", Default::default()), assert_eq!(parse_request(&HttpMethod::POST, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2", Default::default()),
Request::GenerateServerKey("0000000000000000000000000000000000000000000000000000000000000001".into(), Request::GenerateServerKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
2)); 2));
// POST /shadow/{server_key_id}/{signature}/{common_point}/{encrypted_key} => store encrypted document key // POST /shadow/{server_key_id}/{signature}/{common_point}/{encrypted_key} => store encrypted document key
assert_eq!(parse_request(&HttpMethod::Post, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8/1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb", Default::default()), assert_eq!(parse_request(&HttpMethod::POST, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8/1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb", Default::default()),
Request::StoreDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), Request::StoreDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse().unwrap(), "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse().unwrap(),
"1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb".parse().unwrap())); "1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb".parse().unwrap()));
// POST /{server_key_id}/{signature}/{threshold} => generate server && document key // POST /{server_key_id}/{signature}/{threshold} => generate server && document key
assert_eq!(parse_request(&HttpMethod::Post, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2", Default::default()), assert_eq!(parse_request(&HttpMethod::POST, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2", Default::default()),
Request::GenerateDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), Request::GenerateDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
2)); 2));
// GET /{server_key_id}/{signature} => get document key // GET /{server_key_id}/{signature} => get document key
assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()), assert_eq!(parse_request(&HttpMethod::GET, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()),
Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap()));
assert_eq!(parse_request(&HttpMethod::Get, "/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()), assert_eq!(parse_request(&HttpMethod::GET, "/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()),
Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap()));
// GET /shadow/{server_key_id}/{signature} => get document key shadow // GET /shadow/{server_key_id}/{signature} => get document key shadow
assert_eq!(parse_request(&HttpMethod::Get, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()), assert_eq!(parse_request(&HttpMethod::GET, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()),
Request::GetDocumentKeyShadow("0000000000000000000000000000000000000000000000000000000000000001".into(), Request::GetDocumentKeyShadow("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap()));
// GET /schnorr/{server_key_id}/{signature}/{message_hash} => schnorr-sign message with server key // GET /schnorr/{server_key_id}/{signature}/{message_hash} => schnorr-sign message with server key
assert_eq!(parse_request(&HttpMethod::Get, "/schnorr/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c", Default::default()), assert_eq!(parse_request(&HttpMethod::GET, "/schnorr/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c", Default::default()),
Request::SchnorrSignMessage("0000000000000000000000000000000000000000000000000000000000000001".into(), Request::SchnorrSignMessage("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
"281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap())); "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap()));
// GET /ecdsa/{server_key_id}/{signature}/{message_hash} => ecdsa-sign message with server key // GET /ecdsa/{server_key_id}/{signature}/{message_hash} => ecdsa-sign message with server key
assert_eq!(parse_request(&HttpMethod::Get, "/ecdsa/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c", Default::default()), assert_eq!(parse_request(&HttpMethod::GET, "/ecdsa/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c", Default::default()),
Request::EcdsaSignMessage("0000000000000000000000000000000000000000000000000000000000000001".into(), Request::EcdsaSignMessage("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
"281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap())); "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap()));
@ -432,7 +472,7 @@ mod tests {
let node1: Public = "843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91".parse().unwrap(); let node1: Public = "843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91".parse().unwrap();
let node2: Public = "07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3".parse().unwrap(); let node2: Public = "07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3".parse().unwrap();
let nodes = vec![node1, node2].into_iter().collect(); let nodes = vec![node1, node2].into_iter().collect();
assert_eq!(parse_request(&HttpMethod::Post, "/admin/servers_set_change/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", assert_eq!(parse_request(&HttpMethod::POST, "/admin/servers_set_change/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01",
&r#"["0x843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91", &r#"["0x843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91",
"0x07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3"]"#.as_bytes()), "0x07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3"]"#.as_bytes()),
Request::ChangeServersSet( Request::ChangeServersSet(
@ -444,20 +484,20 @@ mod tests {
#[test] #[test]
fn parse_request_failed() { fn parse_request_failed() {
assert_eq!(parse_request(&HttpMethod::Get, "", Default::default()), Request::Invalid); assert_eq!(parse_request(&HttpMethod::GET, "", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "/shadow", Default::default()), Request::Invalid); assert_eq!(parse_request(&HttpMethod::GET, "/shadow", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "///2", Default::default()), Request::Invalid); assert_eq!(parse_request(&HttpMethod::GET, "///2", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "/shadow///2", Default::default()), Request::Invalid); assert_eq!(parse_request(&HttpMethod::GET, "/shadow///2", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001", Default::default()), Request::Invalid); assert_eq!(parse_request(&HttpMethod::GET, "/0000000000000000000000000000000000000000000000000000000000000001", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/", Default::default()), Request::Invalid); assert_eq!(parse_request(&HttpMethod::GET, "/0000000000000000000000000000000000000000000000000000000000000001/", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "/a/b", Default::default()), Request::Invalid); assert_eq!(parse_request(&HttpMethod::GET, "/a/b", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "/schnorr/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002", Default::default()), Request::Invalid); assert_eq!(parse_request(&HttpMethod::GET, "/schnorr/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "/ecdsa/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002", Default::default()), Request::Invalid); assert_eq!(parse_request(&HttpMethod::GET, "/ecdsa/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Post, "/admin/servers_set_change/xxx/yyy", assert_eq!(parse_request(&HttpMethod::POST, "/admin/servers_set_change/xxx/yyy",
&r#"["0x843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91", &r#"["0x843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91",
"0x07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3"]"#.as_bytes()), "0x07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3"]"#.as_bytes()),
Request::Invalid); Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Post, "/admin/servers_set_change/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", "".as_bytes()), assert_eq!(parse_request(&HttpMethod::POST, "/admin/servers_set_change/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", "".as_bytes()),
Request::Invalid); Request::Invalid);
} }
} }

View File

@ -8,4 +8,4 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
fetch = { path = "../fetch" } fetch = { path = "../fetch" }
futures = "0.1" futures = "0.1"
hyper = "0.11" hyper = "0.12"

View File

@ -18,7 +18,7 @@ extern crate fetch;
extern crate hyper; extern crate hyper;
extern crate futures; extern crate futures;
use hyper::StatusCode; use hyper::{StatusCode, Body};
use futures::{future, future::FutureResult}; use futures::{future, future::FutureResult};
use fetch::{Fetch, Url, Request}; use fetch::{Fetch, Url, Request};
@ -39,10 +39,13 @@ impl<T: 'static> Fetch for FakeFetch<T> where T: Clone + Send+ Sync {
fn fetch(&self, request: Request, abort: fetch::Abort) -> Self::Result { fn fetch(&self, request: Request, abort: fetch::Abort) -> Self::Result {
let u = request.url().clone(); let u = request.url().clone();
future::ok(if self.val.is_some() { future::ok(if self.val.is_some() {
let r = hyper::Response::new().with_body(&b"Some content"[..]); let r = hyper::Response::new("Some content".into());
fetch::client::Response::new(u, r, abort) fetch::client::Response::new(u, r, abort)
} else { } else {
fetch::client::Response::new(u, hyper::Response::new().with_status(StatusCode::NotFound), abort) let r = hyper::Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::empty()).expect("Nothing to parse, can not fail; qed");
fetch::client::Response::new(u, r, abort)
}) })
} }

View File

@ -8,11 +8,11 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
futures = "0.1" futures = "0.1"
hyper = "0.11" hyper = "~0.12.9"
hyper-rustls = "0.11" hyper-rustls = "0.14"
http = "0.1"
log = "0.4" log = "0.4"
tokio-core = "0.1" tokio = "~0.1.8"
tokio-timer = "0.1"
url = "1" url = "1"
bytes = "0.4" bytes = "0.4"

View File

@ -17,8 +17,7 @@
use futures::future::{self, Loop}; use futures::future::{self, Loop};
use futures::sync::{mpsc, oneshot}; use futures::sync::{mpsc, oneshot};
use futures::{self, Future, Async, Sink, Stream}; use futures::{self, Future, Async, Sink, Stream};
use hyper::header::{UserAgent, Location, ContentLength, ContentType}; use hyper::header::{self, HeaderMap, HeaderValue, IntoHeaderName};
use hyper::mime::Mime;
use hyper::{self, Method, StatusCode}; use hyper::{self, Method, StatusCode};
use hyper_rustls; use hyper_rustls;
use std; use std;
@ -29,8 +28,7 @@ use std::sync::mpsc::RecvTimeoutError;
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
use std::{io, fmt}; use std::{io, fmt};
use tokio_core::reactor; use tokio::{self, util::FutureExt};
use tokio_timer::{self, Timer};
use url::{self, Url}; use url::{self, Url};
use bytes::Bytes; use bytes::Bytes;
@ -118,7 +116,7 @@ impl Abort {
/// Types which retrieve content from some URL. /// Types which retrieve content from some URL.
pub trait Fetch: Clone + Send + Sync + 'static { pub trait Fetch: Clone + Send + Sync + 'static {
/// The result future. /// The result future.
type Result: Future<Item=Response, Error=Error> + Send + 'static; type Result: Future<Item = Response, Error = Error> + Send + 'static;
/// Make a request to given URL /// Make a request to given URL
fn fetch(&self, request: Request, abort: Abort) -> Self::Result; fn fetch(&self, request: Request, abort: Abort) -> Self::Result;
@ -131,7 +129,7 @@ pub trait Fetch: Clone + Send + Sync + 'static {
} }
type TxResponse = oneshot::Sender<Result<Response, Error>>; type TxResponse = oneshot::Sender<Result<Response, Error>>;
type TxStartup = std::sync::mpsc::SyncSender<Result<(), io::Error>>; type TxStartup = std::sync::mpsc::SyncSender<Result<(), tokio::io::Error>>;
type ChanItem = Option<(Request, Abort, TxResponse)>; type ChanItem = Option<(Request, Abort, TxResponse)>;
/// An implementation of `Fetch` using a `hyper` client. /// An implementation of `Fetch` using a `hyper` client.
@ -140,9 +138,8 @@ type ChanItem = Option<(Request, Abort, TxResponse)>;
// not implement `Send` currently. // not implement `Send` currently.
#[derive(Debug)] #[derive(Debug)]
pub struct Client { pub struct Client {
core: mpsc::Sender<ChanItem>, runtime: mpsc::Sender<ChanItem>,
refs: Arc<AtomicUsize>, refs: Arc<AtomicUsize>,
timer: Timer,
} }
// When cloning a client we increment the internal reference counter. // When cloning a client we increment the internal reference counter.
@ -150,9 +147,8 @@ impl Clone for Client {
fn clone(&self) -> Client { fn clone(&self) -> Client {
self.refs.fetch_add(1, Ordering::SeqCst); self.refs.fetch_add(1, Ordering::SeqCst);
Client { Client {
core: self.core.clone(), runtime: self.runtime.clone(),
refs: self.refs.clone(), refs: self.refs.clone(),
timer: self.timer.clone(),
} }
} }
} }
@ -163,7 +159,7 @@ impl Drop for Client {
fn drop(&mut self) { fn drop(&mut self) {
if self.refs.fetch_sub(1, Ordering::SeqCst) == 1 { if self.refs.fetch_sub(1, Ordering::SeqCst) == 1 {
// ignore send error as it means the background thread is gone already // ignore send error as it means the background thread is gone already
let _ = self.core.clone().send(None).wait(); let _ = self.runtime.clone().send(None).wait();
} }
} }
} }
@ -193,23 +189,20 @@ impl Client {
} }
Ok(Client { Ok(Client {
core: tx_proto, runtime: tx_proto,
refs: Arc::new(AtomicUsize::new(1)), refs: Arc::new(AtomicUsize::new(1)),
timer: Timer::default(),
}) })
} }
fn background_thread(tx_start: TxStartup, rx_proto: mpsc::Receiver<ChanItem>, num_dns_threads: usize) -> io::Result<thread::JoinHandle<()>> { fn background_thread(tx_start: TxStartup, rx_proto: mpsc::Receiver<ChanItem>, num_dns_threads: usize) -> io::Result<thread::JoinHandle<()>> {
thread::Builder::new().name("fetch".into()).spawn(move || { thread::Builder::new().name("fetch".into()).spawn(move || {
let mut core = match reactor::Core::new() { let mut runtime = match tokio::runtime::current_thread::Runtime::new() {
Ok(c) => c, Ok(c) => c,
Err(e) => return tx_start.send(Err(e)).unwrap_or(()) Err(e) => return tx_start.send(Err(e)).unwrap_or(())
}; };
let handle = core.handle(); let hyper = hyper::Client::builder()
let hyper = hyper::Client::configure() .build(hyper_rustls::HttpsConnector::new(num_dns_threads));
.connector(hyper_rustls::HttpsConnector::new(num_dns_threads, &core.handle()))
.build(&core.handle());
let future = rx_proto.take_while(|item| Ok(item.is_some())) let future = rx_proto.take_while(|item| Ok(item.is_some()))
.map(|item| item.expect("`take_while` is only passing on channel items != None; qed")) .map(|item| item.expect("`take_while` is only passing on channel items != None; qed"))
@ -241,13 +234,18 @@ impl Client {
request2.set_url(next_url); request2.set_url(next_url);
request2 request2
} else { } else {
Request::new(next_url, Method::Get) Request::new(next_url, Method::GET)
}; };
Ok(Loop::Continue((client, request, abort, redirects + 1))) Ok(Loop::Continue((client, request, abort, redirects + 1)))
} else { } else {
let content_len = resp.headers.get::<ContentLength>().cloned(); if let Some(ref h_val) = resp.headers.get(header::CONTENT_LENGTH) {
if content_len.map(|n| *n > abort.max_size() as u64).unwrap_or(false) { let content_len = h_val
return Err(Error::SizeLimit) .to_str()?
.parse::<u64>()?;
if content_len > abort.max_size() as u64 {
return Err(Error::SizeLimit)
}
} }
Ok(Loop::Break(resp)) Ok(Loop::Break(resp))
} }
@ -256,7 +254,7 @@ impl Client {
.then(|result| { .then(|result| {
future::ok(sender.send(result).unwrap_or(())) future::ok(sender.send(result).unwrap_or(()))
}); });
handle.spawn(fut); tokio::spawn(fut);
trace!(target: "fetch", "waiting for next request ..."); trace!(target: "fetch", "waiting for next request ...");
future::ok(()) future::ok(())
}); });
@ -264,7 +262,7 @@ impl Client {
tx_start.send(Ok(())).unwrap_or(()); tx_start.send(Ok(())).unwrap_or(());
debug!(target: "fetch", "processing requests ..."); debug!(target: "fetch", "processing requests ...");
if let Err(()) = core.run(future) { if let Err(()) = runtime.block_on(future) {
error!(target: "fetch", "error while executing future") error!(target: "fetch", "error while executing future")
} }
debug!(target: "fetch", "fetch background thread finished") debug!(target: "fetch", "fetch background thread finished")
@ -273,7 +271,7 @@ impl Client {
} }
impl Fetch for Client { impl Fetch for Client {
type Result = Box<Future<Item=Response, Error=Error> + Send>; type Result = Box<Future<Item=Response, Error=Error> + Send + 'static>;
fn fetch(&self, request: Request, abort: Abort) -> Self::Result { fn fetch(&self, request: Request, abort: Abort) -> Self::Result {
debug!(target: "fetch", "fetching: {:?}", request.url()); debug!(target: "fetch", "fetching: {:?}", request.url());
@ -282,7 +280,7 @@ impl Fetch for Client {
} }
let (tx_res, rx_res) = oneshot::channel(); let (tx_res, rx_res) = oneshot::channel();
let maxdur = abort.max_duration(); let maxdur = abort.max_duration();
let sender = self.core.clone(); let sender = self.runtime.clone();
let future = sender.send(Some((request, abort, tx_res))) let future = sender.send(Some((request, abort, tx_res)))
.map_err(|e| { .map_err(|e| {
error!(target: "fetch", "failed to schedule request: {}", e); error!(target: "fetch", "failed to schedule request: {}", e);
@ -291,7 +289,15 @@ impl Fetch for Client {
.and_then(|_| rx_res.map_err(|oneshot::Canceled| Error::BackgroundThreadDead)) .and_then(|_| rx_res.map_err(|oneshot::Canceled| Error::BackgroundThreadDead))
.and_then(future::result); .and_then(future::result);
Box::new(self.timer.timeout(future, maxdur)) Box::new(future.timeout(maxdur)
.map_err(|err| {
if err.is_inner() {
Error::from(err.into_inner().unwrap())
} else {
Error::from(err)
}
})
)
} }
/// Get content from some URL. /// Get content from some URL.
@ -315,22 +321,21 @@ impl Fetch for Client {
// Extract redirect location from response. The second return value indicate whether the original method should be preserved. // Extract redirect location from response. The second return value indicate whether the original method should be preserved.
fn redirect_location(u: Url, r: &Response) -> Option<(Url, bool)> { fn redirect_location(u: Url, r: &Response) -> Option<(Url, bool)> {
use hyper::StatusCode::*;
let preserve_method = match r.status() { let preserve_method = match r.status() {
TemporaryRedirect | PermanentRedirect => true, StatusCode::TEMPORARY_REDIRECT | StatusCode::PERMANENT_REDIRECT => true,
_ => false, _ => false,
}; };
match r.status() { match r.status() {
MovedPermanently StatusCode::MOVED_PERMANENTLY
| PermanentRedirect | StatusCode::PERMANENT_REDIRECT
| TemporaryRedirect | StatusCode::TEMPORARY_REDIRECT
| Found | StatusCode::FOUND
| SeeOther => { | StatusCode::SEE_OTHER => {
if let Some(loc) = r.headers.get::<Location>() { r.headers.get(header::LOCATION).and_then(|loc| {
u.join(loc).ok().map(|url| (url, preserve_method)) loc.to_str().ok().and_then(|loc_s| {
} else { u.join(loc_s).ok().map(|url| (url, preserve_method))
None })
} })
} }
_ => None _ => None
} }
@ -341,7 +346,7 @@ fn redirect_location(u: Url, r: &Response) -> Option<(Url, bool)> {
pub struct Request { pub struct Request {
url: Url, url: Url,
method: Method, method: Method,
headers: hyper::Headers, headers: HeaderMap,
body: Bytes, body: Bytes,
} }
@ -350,19 +355,19 @@ impl Request {
pub fn new(url: Url, method: Method) -> Request { pub fn new(url: Url, method: Method) -> Request {
Request { Request {
url, method, url, method,
headers: hyper::Headers::new(), headers: HeaderMap::new(),
body: Default::default(), body: Default::default(),
} }
} }
/// Create a new GET request. /// Create a new GET request.
pub fn get(url: Url) -> Request { pub fn get(url: Url) -> Request {
Request::new(url, Method::Get) Request::new(url, Method::GET)
} }
/// Create a new empty POST request. /// Create a new empty POST request.
pub fn post(url: Url) -> Request { pub fn post(url: Url) -> Request {
Request::new(url, Method::Post) Request::new(url, Method::POST)
} }
/// Read the url. /// Read the url.
@ -371,12 +376,12 @@ impl Request {
} }
/// Read the request headers. /// Read the request headers.
pub fn headers(&self) -> &hyper::Headers { pub fn headers(&self) -> &HeaderMap {
&self.headers &self.headers
} }
/// Get a mutable reference to the headers. /// Get a mutable reference to the headers.
pub fn headers_mut(&mut self) -> &mut hyper::Headers { pub fn headers_mut(&mut self) -> &mut HeaderMap {
&mut self.headers &mut self.headers
} }
@ -391,8 +396,10 @@ impl Request {
} }
/// Consume self, and return it with the added given header. /// Consume self, and return it with the added given header.
pub fn with_header<H: hyper::header::Header>(mut self, value: H) -> Self { pub fn with_header<K>(mut self, key: K, val: HeaderValue) -> Self
self.headers_mut().set(value); where K: IntoHeaderName,
{
self.headers_mut().append(key, val);
self self
} }
@ -403,16 +410,15 @@ impl Request {
} }
} }
impl Into<hyper::Request> for Request { impl From<Request> for hyper::Request<hyper::Body> {
fn into(mut self) -> hyper::Request { fn from(req: Request) -> hyper::Request<hyper::Body> {
let uri = self.url.as_ref().parse().expect("Every valid URLis also a URI."); let uri: hyper::Uri = req.url.as_ref().parse().expect("Every valid URLis also a URI.");
let mut req = hyper::Request::new(self.method, uri); hyper::Request::builder()
.method(req.method)
self.headers.set(UserAgent::new("Parity Fetch Neo")); .uri(uri)
*req.headers_mut() = self.headers; .header(header::USER_AGENT, HeaderValue::from_static("Parity Fetch Neo"))
req.set_body(self.body); .body(req.body.into())
.expect("Header, uri, method, and body are already valid and can not fail to parse; qed")
req
} }
} }
@ -421,7 +427,7 @@ impl Into<hyper::Request> for Request {
pub struct Response { pub struct Response {
url: Url, url: Url,
status: StatusCode, status: StatusCode,
headers: hyper::Headers, headers: HeaderMap,
body: hyper::Body, body: hyper::Body,
abort: Abort, abort: Abort,
nread: usize, nread: usize,
@ -429,12 +435,12 @@ pub struct Response {
impl Response { impl Response {
/// Create a new response, wrapping a hyper response. /// Create a new response, wrapping a hyper response.
pub fn new(u: Url, r: hyper::Response, a: Abort) -> Response { pub fn new(u: Url, r: hyper::Response<hyper::Body>, a: Abort) -> Response {
Response { Response {
url: u, url: u,
status: r.status(), status: r.status(),
headers: r.headers().clone(), headers: r.headers().clone(),
body: r.body(), body: r.into_body(),
abort: a, abort: a,
nread: 0, nread: 0,
} }
@ -447,26 +453,21 @@ impl Response {
/// Status code == OK (200)? /// Status code == OK (200)?
pub fn is_success(&self) -> bool { pub fn is_success(&self) -> bool {
self.status() == StatusCode::Ok self.status() == StatusCode::OK
} }
/// Status code == 404. /// Status code == 404.
pub fn is_not_found(&self) -> bool { pub fn is_not_found(&self) -> bool {
self.status() == StatusCode::NotFound self.status() == StatusCode::NOT_FOUND
} }
/// Is the content-type text/html? /// Is the content-type text/html?
pub fn is_html(&self) -> bool { pub fn is_html(&self) -> bool {
if let Some(ref mime) = self.content_type() { self.headers.get(header::CONTENT_TYPE).and_then(|ct_val| {
mime.type_() == "text" && mime.subtype() == "html" ct_val.to_str().ok().map(|ct_str| {
} else { ct_str.contains("text") && ct_str.contains("html")
false })
} }).unwrap_or(false)
}
/// The conten-type header value.
pub fn content_type(&self) -> Option<Mime> {
self.headers.get::<ContentType>().map(|ct| ct.0.clone())
} }
} }
@ -562,6 +563,10 @@ impl io::Read for BodyReader {
pub enum Error { pub enum Error {
/// Hyper gave us an error. /// Hyper gave us an error.
Hyper(hyper::Error), Hyper(hyper::Error),
/// A hyper header conversion error.
HyperHeaderToStrError(hyper::header::ToStrError),
/// An integer parsing error.
ParseInt(std::num::ParseIntError),
/// Some I/O error occured. /// Some I/O error occured.
Io(io::Error), Io(io::Error),
/// Invalid URLs where attempted to parse. /// Invalid URLs where attempted to parse.
@ -570,8 +575,10 @@ pub enum Error {
Aborted, Aborted,
/// Too many redirects have been encountered. /// Too many redirects have been encountered.
TooManyRedirects, TooManyRedirects,
/// tokio-timer inner future gave us an error.
TokioTimeoutInnerVal(String),
/// tokio-timer gave us an error. /// tokio-timer gave us an error.
Timer(tokio_timer::TimerError), TokioTimer(Option<tokio::timer::Error>),
/// The maximum duration was reached. /// The maximum duration was reached.
Timeout, Timeout,
/// The response body is too large. /// The response body is too large.
@ -585,23 +592,43 @@ impl fmt::Display for Error {
match *self { match *self {
Error::Aborted => write!(fmt, "The request has been aborted."), Error::Aborted => write!(fmt, "The request has been aborted."),
Error::Hyper(ref e) => write!(fmt, "{}", e), Error::Hyper(ref e) => write!(fmt, "{}", e),
Error::HyperHeaderToStrError(ref e) => write!(fmt, "{}", e),
Error::ParseInt(ref e) => write!(fmt, "{}", e),
Error::Url(ref e) => write!(fmt, "{}", e), Error::Url(ref e) => write!(fmt, "{}", e),
Error::Io(ref e) => write!(fmt, "{}", e), Error::Io(ref e) => write!(fmt, "{}", e),
Error::BackgroundThreadDead => write!(fmt, "background thread gond"), Error::BackgroundThreadDead => write!(fmt, "background thread gond"),
Error::TooManyRedirects => write!(fmt, "too many redirects"), Error::TooManyRedirects => write!(fmt, "too many redirects"),
Error::Timer(ref e) => write!(fmt, "{}", e), Error::TokioTimeoutInnerVal(ref s) => write!(fmt, "tokio timer inner value error: {:?}", s),
Error::TokioTimer(ref e) => write!(fmt, "tokio timer error: {:?}", e),
Error::Timeout => write!(fmt, "request timed out"), Error::Timeout => write!(fmt, "request timed out"),
Error::SizeLimit => write!(fmt, "size limit reached"), Error::SizeLimit => write!(fmt, "size limit reached"),
} }
} }
} }
impl ::std::error::Error for Error {
fn description(&self) -> &str { "Fetch client error" }
fn cause(&self) -> Option<&::std::error::Error> { None }
}
impl From<hyper::Error> for Error { impl From<hyper::Error> for Error {
fn from(e: hyper::Error) -> Self { fn from(e: hyper::Error) -> Self {
Error::Hyper(e) Error::Hyper(e)
} }
} }
impl From<hyper::header::ToStrError> for Error {
fn from(e: hyper::header::ToStrError) -> Self {
Error::HyperHeaderToStrError(e)
}
}
impl From<std::num::ParseIntError> for Error {
fn from(e: std::num::ParseIntError) -> Self {
Error::ParseInt(e)
}
}
impl From<io::Error> for Error { impl From<io::Error> for Error {
fn from(e: io::Error) -> Self { fn from(e: io::Error) -> Self {
Error::Io(e) Error::Io(e)
@ -614,24 +641,35 @@ impl From<url::ParseError> for Error {
} }
} }
impl<F> From<tokio_timer::TimeoutError<F>> for Error { impl<T: std::fmt::Debug> From<tokio::timer::timeout::Error<T>> for Error {
fn from(e: tokio_timer::TimeoutError<F>) -> Self { fn from(e: tokio::timer::timeout::Error<T>) -> Self {
match e { if e.is_inner() {
tokio_timer::TimeoutError::Timer(_, e) => Error::Timer(e), Error::TokioTimeoutInnerVal(format!("{:?}", e.into_inner().unwrap()))
tokio_timer::TimeoutError::TimedOut(_) => Error::Timeout, } else if e.is_elapsed() {
Error::Timeout
} else {
Error::TokioTimer(e.into_timer())
} }
} }
} }
impl From<tokio::timer::Error> for Error {
fn from(e: tokio::timer::Error) -> Self {
Error::TokioTimer(Some(e))
}
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use futures::future; use futures::future;
use futures::sync::mpsc; use futures::sync::oneshot;
use hyper::StatusCode; use hyper::{
use hyper::server::{Http, Request, Response, Service}; StatusCode,
use tokio_timer::Timer; service::Service,
use std; };
use tokio::timer::Delay;
use tokio::runtime::current_thread::Runtime;
use std::io::Read; use std::io::Read;
use std::net::SocketAddr; use std::net::SocketAddr;
@ -641,139 +679,238 @@ mod test {
fn it_should_fetch() { fn it_should_fetch() {
let server = TestServer::run(); let server = TestServer::run();
let client = Client::new(4).unwrap(); let client = Client::new(4).unwrap();
let future = client.get(&format!("http://{}?123", server.addr()), Default::default()); let mut runtime = Runtime::new().unwrap();
let resp = future.wait().unwrap();
assert!(resp.is_success()); let future = client.get(&format!("http://{}?123", server.addr()), Abort::default())
let body = resp.concat2().wait().unwrap(); .map(|resp| {
assert_eq!(&body[..], b"123") assert!(resp.is_success());
resp
})
.map(|resp| resp.concat2())
.flatten()
.map(|body| assert_eq!(&body[..], b"123"))
.map_err(|err| panic!(err));
runtime.block_on(future).unwrap();
} }
#[test] #[test]
fn it_should_fetch_in_light_mode() { fn it_should_fetch_in_light_mode() {
let server = TestServer::run(); let server = TestServer::run();
let client = Client::new(1).unwrap(); let client = Client::new(1).unwrap();
let future = client.get(&format!("http://{}?123", server.addr()), Default::default()); let mut runtime = Runtime::new().unwrap();
let resp = future.wait().unwrap();
assert!(resp.is_success()); let future = client.get(&format!("http://{}?123", server.addr()), Abort::default())
let body = resp.concat2().wait().unwrap(); .map(|resp| {
assert_eq!(&body[..], b"123") assert!(resp.is_success());
resp
})
.map(|resp| resp.concat2())
.flatten()
.map(|body| assert_eq!(&body[..], b"123"))
.map_err(|err| panic!(err));
runtime.block_on(future).unwrap();
} }
#[test] #[test]
fn it_should_timeout() { fn it_should_timeout() {
let server = TestServer::run(); let server = TestServer::run();
let client = Client::new(4).unwrap(); let client = Client::new(4).unwrap();
let mut runtime = Runtime::new().unwrap();
let abort = Abort::default().with_max_duration(Duration::from_secs(1)); let abort = Abort::default().with_max_duration(Duration::from_secs(1));
match client.get(&format!("http://{}/delay?3", server.addr()), abort).wait() {
Err(Error::Timeout) => {} let future = client.get(&format!("http://{}/delay?3", server.addr()), abort)
other => panic!("expected timeout, got {:?}", other) .then(|res| {
} match res {
Err(Error::Timeout) => Ok::<_, ()>(()),
other => panic!("expected timeout, got {:?}", other),
}
});
runtime.block_on(future).unwrap();
} }
#[test] #[test]
fn it_should_follow_redirects() { fn it_should_follow_redirects() {
let server = TestServer::run(); let server = TestServer::run();
let client = Client::new(4).unwrap(); let client = Client::new(4).unwrap();
let mut runtime = Runtime::new().unwrap();
let abort = Abort::default(); let abort = Abort::default();
let future = client.get(&format!("http://{}/redirect?http://{}/", server.addr(), server.addr()), abort);
assert!(future.wait().unwrap().is_success()) let future = client.get(&format!("http://{}/redirect?http://{}/", server.addr(), server.addr()), abort)
.and_then(|resp| {
if resp.is_success() { Ok(()) } else { panic!("Response unsuccessful") }
});
runtime.block_on(future).unwrap();
} }
#[test] #[test]
fn it_should_follow_relative_redirects() { fn it_should_follow_relative_redirects() {
let server = TestServer::run(); let server = TestServer::run();
let client = Client::new(4).unwrap(); let client = Client::new(4).unwrap();
let mut runtime = Runtime::new().unwrap();
let abort = Abort::default().with_max_redirects(4); let abort = Abort::default().with_max_redirects(4);
let future = client.get(&format!("http://{}/redirect?/", server.addr()), abort); let future = client.get(&format!("http://{}/redirect?/", server.addr()), abort)
assert!(future.wait().unwrap().is_success()) .and_then(|resp| {
if resp.is_success() { Ok(()) } else { panic!("Response unsuccessful") }
});
runtime.block_on(future).unwrap();
} }
#[test] #[test]
fn it_should_not_follow_too_many_redirects() { fn it_should_not_follow_too_many_redirects() {
let server = TestServer::run(); let server = TestServer::run();
let client = Client::new(4).unwrap(); let client = Client::new(4).unwrap();
let mut runtime = Runtime::new().unwrap();
let abort = Abort::default().with_max_redirects(3); let abort = Abort::default().with_max_redirects(3);
match client.get(&format!("http://{}/loop", server.addr()), abort).wait() { let future = client.get(&format!("http://{}/loop", server.addr()), abort)
Err(Error::TooManyRedirects) => {} .then(|res| {
other => panic!("expected too many redirects error, got {:?}", other) match res {
} Err(Error::TooManyRedirects) => Ok::<_, ()>(()),
other => panic!("expected too many redirects error, got {:?}", other)
}
});
runtime.block_on(future).unwrap();
} }
#[test] #[test]
fn it_should_read_data() { fn it_should_read_data() {
let server = TestServer::run(); let server = TestServer::run();
let client = Client::new(4).unwrap(); let client = Client::new(4).unwrap();
let mut runtime = Runtime::new().unwrap();
let abort = Abort::default(); let abort = Abort::default();
let future = client.get(&format!("http://{}?abcdefghijklmnopqrstuvwxyz", server.addr()), abort); let future = client.get(&format!("http://{}?abcdefghijklmnopqrstuvwxyz", server.addr()), abort)
let resp = future.wait().unwrap(); .and_then(|resp| {
assert!(resp.is_success()); if resp.is_success() { Ok(resp) } else { panic!("Response unsuccessful") }
assert_eq!(&resp.concat2().wait().unwrap()[..], b"abcdefghijklmnopqrstuvwxyz") })
.map(|resp| resp.concat2())
.flatten()
.map(|body| assert_eq!(&body[..], b"abcdefghijklmnopqrstuvwxyz"));
runtime.block_on(future).unwrap();
} }
#[test] #[test]
fn it_should_not_read_too_much_data() { fn it_should_not_read_too_much_data() {
let server = TestServer::run(); let server = TestServer::run();
let client = Client::new(4).unwrap(); let client = Client::new(4).unwrap();
let mut runtime = Runtime::new().unwrap();
let abort = Abort::default().with_max_size(3); let abort = Abort::default().with_max_size(3);
let resp = client.get(&format!("http://{}/?1234", server.addr()), abort).wait().unwrap(); let future = client.get(&format!("http://{}/?1234", server.addr()), abort)
assert!(resp.is_success()); .and_then(|resp| {
match resp.concat2().wait() { if resp.is_success() { Ok(resp) } else { panic!("Response unsuccessful") }
Err(Error::SizeLimit) => {} })
other => panic!("expected size limit error, got {:?}", other) .map(|resp| resp.concat2())
} .flatten()
.then(|body| {
match body {
Err(Error::SizeLimit) => Ok::<_, ()>(()),
other => panic!("expected size limit error, got {:?}", other),
}
});
runtime.block_on(future).unwrap();
} }
#[test] #[test]
fn it_should_not_read_too_much_data_sync() { fn it_should_not_read_too_much_data_sync() {
let server = TestServer::run(); let server = TestServer::run();
let client = Client::new(4).unwrap(); let client = Client::new(4).unwrap();
let mut runtime = Runtime::new().unwrap();
// let abort = Abort::default().with_max_size(3);
// let resp = client.get(&format!("http://{}/?1234", server.addr()), abort).wait().unwrap();
// assert!(resp.is_success());
// let mut buffer = Vec::new();
// let mut reader = BodyReader::new(resp);
// match reader.read_to_end(&mut buffer) {
// Err(ref e) if e.kind() == io::ErrorKind::PermissionDenied => {}
// other => panic!("expected size limit error, got {:?}", other)
// }
// FIXME (c0gent): The prior version of this test (pre-hyper-0.12,
// commented out above) is not possible to recreate. It relied on an
// apparent bug in `Client::background_thread` which suppressed the
// `SizeLimit` error from occurring. This is due to the headers
// collection not returning a value for content length when queried.
// The precise reason why this was happening is unclear.
let abort = Abort::default().with_max_size(3); let abort = Abort::default().with_max_size(3);
let resp = client.get(&format!("http://{}/?1234", server.addr()), abort).wait().unwrap(); let future = client.get(&format!("http://{}/?1234", server.addr()), abort)
assert!(resp.is_success()); .and_then(|resp| {
let mut buffer = Vec::new(); assert_eq!(true, false, "Unreachable. (see FIXME note)");
let mut reader = BodyReader::new(resp); assert!(resp.is_success());
match reader.read_to_end(&mut buffer) { let mut buffer = Vec::new();
Err(ref e) if e.kind() == io::ErrorKind::PermissionDenied => {} let mut reader = BodyReader::new(resp);
other => panic!("expected size limit error, got {:?}", other) match reader.read_to_end(&mut buffer) {
Err(ref e) if e.kind() == io::ErrorKind::PermissionDenied => Ok(()),
other => panic!("expected size limit error, got {:?}", other)
}
});
// FIXME: This simply demonstrates the above point.
match runtime.block_on(future) {
Err(Error::SizeLimit) => {},
other => panic!("Expected `Error::SizeLimit`, got: {:?}", other),
} }
} }
struct TestServer(Timer); struct TestServer;
impl Service for TestServer { impl Service for TestServer {
type Request = Request; type ReqBody = hyper::Body;
type Response = Response; type ResBody = hyper::Body;
type Error = hyper::Error; type Error = Error;
type Future = Box<Future<Item=Self::Response, Error=Self::Error>>; type Future = Box<Future<Item=hyper::Response<Self::ResBody>, Error=Self::Error> + Send + 'static>;
fn call(&self, req: Request) -> Self::Future { fn call(&mut self, req: hyper::Request<hyper::Body>) -> Self::Future {
match req.uri().path() { match req.uri().path() {
"/" => { "/" => {
let body = req.uri().query().unwrap_or("").to_string(); let body = req.uri().query().unwrap_or("").to_string();
let req = Response::new().with_body(body); let res = hyper::Response::new(body.into());
Box::new(future::ok(req)) Box::new(future::ok(res))
} }
"/redirect" => { "/redirect" => {
let loc = Location::new(req.uri().query().unwrap_or("/").to_string()); let loc = req.uri().query().unwrap_or("/").to_string();
let req = Response::new() let res = hyper::Response::builder()
.with_status(StatusCode::MovedPermanently) .status(StatusCode::MOVED_PERMANENTLY)
.with_header(loc); .header(hyper::header::LOCATION, loc)
Box::new(future::ok(req)) .body(hyper::Body::empty())
.expect("Unable to create response");
Box::new(future::ok(res))
} }
"/loop" => { "/loop" => {
let req = Response::new() let res = hyper::Response::builder()
.with_status(StatusCode::MovedPermanently) .status(StatusCode::MOVED_PERMANENTLY)
.with_header(Location::new("/loop".to_string())); .header(hyper::header::LOCATION, "/loop")
Box::new(future::ok(req)) .body(hyper::Body::empty())
.expect("Unable to create response");
Box::new(future::ok(res))
} }
"/delay" => { "/delay" => {
let d = Duration::from_secs(req.uri().query().unwrap_or("0").parse().unwrap()); let dur = Duration::from_secs(req.uri().query().unwrap_or("0").parse().unwrap());
Box::new(self.0.sleep(d) let delayed_res = Delay::new(std::time::Instant::now() + dur)
.map_err(|_| return io::Error::new(io::ErrorKind::Other, "timer error")) .and_then(|_| Ok::<_, _>(hyper::Response::new(hyper::Body::empty())))
.from_err() .from_err();
.map(|_| Response::new())) Box::new(delayed_res)
}
_ => {
let res = hyper::Response::builder()
.status(StatusCode::NOT_FOUND)
.body(hyper::Body::empty())
.expect("Unable to create response");
Box::new(future::ok(res))
} }
_ => Box::new(future::ok(Response::new().with_status(StatusCode::NotFound)))
} }
} }
} }
@ -781,19 +918,27 @@ mod test {
impl TestServer { impl TestServer {
fn run() -> Handle { fn run() -> Handle {
let (tx_start, rx_start) = std::sync::mpsc::sync_channel(1); let (tx_start, rx_start) = std::sync::mpsc::sync_channel(1);
let (tx_end, rx_end) = mpsc::channel(0); let (tx_end, rx_end) = oneshot::channel();
let rx_end_fut = rx_end.into_future().map(|_| ()).map_err(|_| ()); let rx_end_fut = rx_end.map(|_| ()).map_err(|_| ());
thread::spawn(move || { thread::spawn(move || {
let addr = ADDRESS.parse().unwrap(); let addr = ADDRESS.parse().unwrap();
let server = Http::new().bind(&addr, || Ok(TestServer(Timer::default()))).unwrap();
tx_start.send(server.local_addr().unwrap()).unwrap_or(()); let server = hyper::server::Server::bind(&addr)
server.run_until(rx_end_fut).unwrap(); .serve(|| future::ok::<_, hyper::Error>(TestServer));
tx_start.send(server.local_addr()).unwrap_or(());
tokio::run(
server.with_graceful_shutdown(rx_end_fut)
.map_err(|e| panic!("server error: {}", e))
);
}); });
Handle(rx_start.recv().unwrap(), tx_end)
Handle(rx_start.recv().unwrap(), Some(tx_end))
} }
} }
struct Handle(SocketAddr, mpsc::Sender<()>); struct Handle(SocketAddr, Option<oneshot::Sender<()>>);
impl Handle { impl Handle {
fn addr(&self) -> SocketAddr { fn addr(&self) -> SocketAddr {
@ -803,7 +948,7 @@ mod test {
impl Drop for Handle { impl Drop for Handle {
fn drop(&mut self) { fn drop(&mut self) {
self.1.clone().send(()).wait().unwrap(); self.1.take().unwrap().send(()).unwrap();
} }
} }
} }

View File

@ -26,9 +26,9 @@ extern crate futures;
extern crate hyper; extern crate hyper;
extern crate hyper_rustls; extern crate hyper_rustls;
extern crate http;
extern crate tokio_core; extern crate tokio;
extern crate tokio_timer;
extern crate url; extern crate url;
extern crate bytes; extern crate bytes;

View File

@ -21,7 +21,7 @@ ansi_term = "0.10"
rustc-hex = "1.0" rustc-hex = "1.0"
ethcore-io = { path = "../io", features = ["mio"] } ethcore-io = { path = "../io", features = ["mio"] }
parity-bytes = "0.1" parity-bytes = "0.1"
parity-crypto = "0.1" parity-crypto = "0.2"
ethcore-logger = { path ="../../logger" } ethcore-logger = { path ="../../logger" }
ethcore-network = { path = "../network" } ethcore-network = { path = "../network" }
ethereum-types = "0.4" ethereum-types = "0.4"

View File

@ -8,7 +8,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
error-chain = { version = "0.12", default-features = false } error-chain = { version = "0.12", default-features = false }
parity-crypto = "0.1" parity-crypto = "0.2"
ethcore-io = { path = "../io" } ethcore-io = { path = "../io" }
ethereum-types = "0.4" ethereum-types = "0.4"
ethkey = { path = "../../ethkey" } ethkey = { path = "../../ethkey" }

View File

@ -1,238 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Tokio Core Reactor wrapper.
extern crate futures;
extern crate tokio_core;
use std::{fmt, thread};
use std::sync::mpsc;
use std::time::Duration;
use futures::{Future, IntoFuture};
pub use tokio_core::reactor::{Remote as TokioRemote, Handle, Timeout};
/// Event Loop for futures.
/// Wrapper around `tokio::reactor::Core`.
/// Runs in a separate thread.
pub struct EventLoop {
remote: Remote,
handle: EventLoopHandle,
}
impl EventLoop {
/// Spawns a new thread with `EventLoop` with given handler.
pub fn spawn() -> Self {
let (stop, stopped) = futures::oneshot();
let (tx, rx) = mpsc::channel();
let handle = thread::spawn(move || {
let mut el = tokio_core::reactor::Core::new().expect("Creating an event loop should not fail.");
tx.send(el.remote()).expect("Rx is blocking upper thread.");
let _ = el.run(futures::empty().select(stopped));
});
let remote = rx.recv().expect("tx is transfered to a newly spawned thread.");
EventLoop {
remote: Remote {
inner: Mode::Tokio(remote),
},
handle: EventLoopHandle {
close: Some(stop),
handle: Some(handle),
},
}
}
/// Returns this event loop raw remote.
///
/// Deprecated: Exists only to connect with current JSONRPC implementation.
pub fn raw_remote(&self) -> TokioRemote {
if let Mode::Tokio(ref remote) = self.remote.inner {
remote.clone()
} else {
panic!("Event loop is never initialized in other mode then Tokio.")
}
}
/// Returns event loop remote.
pub fn remote(&self) -> Remote {
self.remote.clone()
}
}
#[derive(Clone)]
enum Mode {
Tokio(TokioRemote),
Sync,
ThreadPerFuture,
}
impl fmt::Debug for Mode {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::Mode::*;
match *self {
Tokio(_) => write!(fmt, "tokio"),
Sync => write!(fmt, "synchronous"),
ThreadPerFuture => write!(fmt, "thread per future"),
}
}
}
#[derive(Debug, Clone)]
pub struct Remote {
inner: Mode,
}
impl Remote {
/// Remote for existing event loop.
///
/// Deprecated: Exists only to connect with current JSONRPC implementation.
pub fn new(remote: TokioRemote) -> Self {
Remote {
inner: Mode::Tokio(remote),
}
}
/// Synchronous remote, used mostly for tests.
pub fn new_sync() -> Self {
Remote {
inner: Mode::Sync,
}
}
/// Spawns a new thread for each future (use only for tests).
pub fn new_thread_per_future() -> Self {
Remote {
inner: Mode::ThreadPerFuture,
}
}
/// Spawn a future to this event loop
pub fn spawn<R>(&self, r: R) where
R: IntoFuture<Item=(), Error=()> + Send + 'static,
R::Future: 'static,
{
match self.inner {
Mode::Tokio(ref remote) => remote.spawn(move |_| r),
Mode::Sync => {
let _= r.into_future().wait();
},
Mode::ThreadPerFuture => {
thread::spawn(move || {
let _= r.into_future().wait();
});
},
}
}
/// Spawn a new future returned by given closure.
pub fn spawn_fn<F, R>(&self, f: F) where
F: FnOnce(&Handle) -> R + Send + 'static,
R: IntoFuture<Item=(), Error=()>,
R::Future: 'static,
{
match self.inner {
Mode::Tokio(ref remote) => remote.spawn(move |handle| f(handle)),
Mode::Sync => {
let mut core = tokio_core::reactor::Core::new().expect("Creating an event loop should not fail.");
let handle = core.handle();
let _ = core.run(f(&handle).into_future());
},
Mode::ThreadPerFuture => {
thread::spawn(move || {
let mut core = tokio_core::reactor::Core::new().expect("Creating an event loop should not fail.");
let handle = core.handle();
let _ = core.run(f(&handle).into_future());
});
},
}
}
/// Spawn a new future and wait for it or for a timeout to occur.
pub fn spawn_with_timeout<F, R, T>(&self, f: F, duration: Duration, on_timeout: T) where
T: FnOnce() -> () + Send + 'static,
F: FnOnce(&Handle) -> R + Send + 'static,
R: IntoFuture<Item=(), Error=()>,
R::Future: 'static,
{
match self.inner {
Mode::Tokio(ref remote) => remote.spawn(move |handle| {
let future = f(handle).into_future();
let timeout = Timeout::new(duration, handle).expect("Event loop is still up.");
future.select(timeout.then(move |_| {
on_timeout();
Ok(())
})).then(|_| Ok(()))
}),
Mode::Sync => {
let mut core = tokio_core::reactor::Core::new().expect("Creating an event loop should not fail.");
let handle = core.handle();
let future = f(&handle).into_future();
let timeout = Timeout::new(duration, &handle).expect("Event loop is still up.");
let _: Result<(), ()> = core.run(future.select(timeout.then(move |_| {
on_timeout();
Ok(())
})).then(|_| Ok(())));
},
Mode::ThreadPerFuture => {
thread::spawn(move || {
let mut core = tokio_core::reactor::Core::new().expect("Creating an event loop should not fail.");
let handle = core.handle();
let future = f(&handle).into_future();
let timeout = Timeout::new(duration, &handle).expect("Event loop is still up.");
let _: Result<(), ()> = core.run(future.select(timeout.then(move |_| {
on_timeout();
Ok(())
})).then(|_| Ok(())));
});
},
}
}
}
/// A handle to running event loop. Dropping the handle will cause event loop to finish.
pub struct EventLoopHandle {
close: Option<futures::Complete<()>>,
handle: Option<thread::JoinHandle<()>>
}
impl From<EventLoop> for EventLoopHandle {
fn from(el: EventLoop) -> Self {
el.handle
}
}
impl Drop for EventLoopHandle {
fn drop(&mut self) {
self.close.take().map(|v| v.send(()));
}
}
impl EventLoopHandle {
/// Blocks current thread and waits until the event loop is finished.
pub fn wait(mut self) -> thread::Result<()> {
self.handle.take()
.expect("Handle is taken only in `wait`, `wait` is consuming; qed").join()
}
/// Finishes this event loop.
pub fn close(mut self) {
let _ = self.close.take()
.expect("Close is taken only in `close` and `drop`. `close` is consuming; qed")
.send(());
}
}

View File

@ -1,11 +1,11 @@
[package] [package]
description = "Parity Reactor" description = "Parity Runtime"
homepage = "http://parity.io" homepage = "http://parity.io"
license = "GPL-3.0" license = "GPL-3.0"
name = "parity-reactor" name = "parity-runtime"
version = "0.1.0" version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
futures = "0.1" futures = "0.1"
tokio-core = "0.1" tokio = "~0.1.9"

256
util/runtime/src/lib.rs Normal file
View File

@ -0,0 +1,256 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Tokio Runtime wrapper.
extern crate futures;
extern crate tokio;
use std::{fmt, thread};
use std::sync::mpsc;
use std::time::{Duration, Instant};
use futures::{future, Future, IntoFuture};
pub use tokio::timer::Delay;
pub use tokio::runtime::{Runtime as TokioRuntime, Builder as TokioRuntimeBuilder, TaskExecutor};
/// Runtime for futures.
///
/// Runs in a separate thread.
pub struct Runtime {
executor: Executor,
handle: RuntimeHandle,
}
impl Runtime {
fn new(runtime_bldr: &mut TokioRuntimeBuilder) -> Self {
let mut runtime = runtime_bldr
.build()
.expect("Building a Tokio runtime will only fail when mio components \
cannot be initialized (catastrophic)");
let (stop, stopped) = futures::oneshot();
let (tx, rx) = mpsc::channel();
let handle = thread::spawn(move || {
tx.send(runtime.executor()).expect("Rx is blocking upper thread.");
runtime.block_on(futures::empty().select(stopped).map(|_| ()).map_err(|_| ()))
.expect("Tokio runtime should not have unhandled errors.");
});
let executor = rx.recv().expect("tx is transfered to a newly spawned thread.");
Runtime {
executor: Executor {
inner: Mode::Tokio(executor),
},
handle: RuntimeHandle {
close: Some(stop),
handle: Some(handle),
},
}
}
/// Spawns a new tokio runtime with a default thread count on a background
/// thread and returns a `Runtime` which can be used to spawn tasks via
/// its executor.
pub fn with_default_thread_count() -> Self {
let mut runtime_bldr = TokioRuntimeBuilder::new();
Self::new(&mut runtime_bldr)
}
/// Spawns a new tokio runtime with a the specified thread count on a
/// background thread and returns a `Runtime` which can be used to spawn
/// tasks via its executor.
pub fn with_thread_count(thread_count: usize) -> Self {
let mut runtime_bldr = TokioRuntimeBuilder::new();
runtime_bldr.core_threads(thread_count);
Self::new(&mut runtime_bldr)
}
/// Returns this runtime raw executor.
///
/// Deprecated: Exists only to connect with current JSONRPC implementation.
pub fn raw_executor(&self) -> TaskExecutor {
if let Mode::Tokio(ref executor) = self.executor.inner {
executor.clone()
} else {
panic!("Runtime is not initialized in Tokio mode.")
}
}
/// Returns runtime executor.
pub fn executor(&self) -> Executor {
self.executor.clone()
}
}
#[derive(Clone)]
enum Mode {
Tokio(TaskExecutor),
Sync,
ThreadPerFuture,
}
impl fmt::Debug for Mode {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::Mode::*;
match *self {
Tokio(_) => write!(fmt, "tokio"),
Sync => write!(fmt, "synchronous"),
ThreadPerFuture => write!(fmt, "thread per future"),
}
}
}
/// Returns a future which runs `f` until `duration` has elapsed, at which
/// time `on_timeout` is run and the future resolves.
fn timeout<F, R, T>(f: F, duration: Duration, on_timeout: T)
-> impl Future<Item = (), Error = ()> + Send + 'static
where
T: FnOnce() -> () + Send + 'static,
F: FnOnce() -> R + Send + 'static,
R: IntoFuture<Item=(), Error=()> + Send + 'static,
R::Future: Send + 'static,
{
let future = future::lazy(f);
let timeout = Delay::new(Instant::now() + duration)
.then(move |_| {
on_timeout();
Ok(())
});
future.select(timeout).then(|_| Ok(()))
}
#[derive(Debug, Clone)]
pub struct Executor {
inner: Mode,
}
impl Executor {
/// Executor for existing runtime.
///
/// Deprecated: Exists only to connect with current JSONRPC implementation.
pub fn new(executor: TaskExecutor) -> Self {
Executor {
inner: Mode::Tokio(executor),
}
}
/// Synchronous executor, used mostly for tests.
pub fn new_sync() -> Self {
Executor {
inner: Mode::Sync,
}
}
/// Spawns a new thread for each future (use only for tests).
pub fn new_thread_per_future() -> Self {
Executor {
inner: Mode::ThreadPerFuture,
}
}
/// Spawn a future to this runtime
pub fn spawn<R>(&self, r: R) where
R: IntoFuture<Item=(), Error=()> + Send + 'static,
R::Future: Send + 'static,
{
match self.inner {
Mode::Tokio(ref executor) => executor.spawn(r.into_future()),
Mode::Sync => {
let _= r.into_future().wait();
},
Mode::ThreadPerFuture => {
thread::spawn(move || {
let _= r.into_future().wait();
});
},
}
}
/// Spawn a new future returned by given closure.
pub fn spawn_fn<F, R>(&self, f: F) where
F: FnOnce() -> R + Send + 'static,
R: IntoFuture<Item=(), Error=()> + Send + 'static,
R::Future: Send + 'static,
{
match self.inner {
Mode::Tokio(ref executor) => executor.spawn(future::lazy(f)),
Mode::Sync => {
let _ = future::lazy(f).wait();
},
Mode::ThreadPerFuture => {
thread::spawn(move || {
let _= f().into_future().wait();
});
},
}
}
/// Spawn a new future and wait for it or for a timeout to occur.
pub fn spawn_with_timeout<F, R, T>(&self, f: F, duration: Duration, on_timeout: T) where
T: FnOnce() -> () + Send + 'static,
F: FnOnce() -> R + Send + 'static,
R: IntoFuture<Item=(), Error=()> + Send + 'static,
R::Future: Send + 'static,
{
match self.inner {
Mode::Tokio(ref executor) => {
executor.spawn(timeout(f, duration, on_timeout))
},
Mode::Sync => {
let _ = timeout(f, duration, on_timeout).wait();
},
Mode::ThreadPerFuture => {
thread::spawn(move || {
let _ = timeout(f, duration, on_timeout).wait();
});
},
}
}
}
/// A handle to a runtime. Dropping the handle will cause runtime to shutdown.
pub struct RuntimeHandle {
close: Option<futures::Complete<()>>,
handle: Option<thread::JoinHandle<()>>
}
impl From<Runtime> for RuntimeHandle {
fn from(el: Runtime) -> Self {
el.handle
}
}
impl Drop for RuntimeHandle {
fn drop(&mut self) {
self.close.take().map(|v| v.send(()));
}
}
impl RuntimeHandle {
/// Blocks current thread and waits until the runtime is finished.
pub fn wait(mut self) -> thread::Result<()> {
self.handle.take()
.expect("Handle is taken only in `wait`, `wait` is consuming; qed").join()
}
/// Finishes this runtime.
pub fn close(mut self) {
let _ = self.close.take()
.expect("Close is taken only in `close` and `drop`. `close` is consuming; qed")
.send(());
}
}

View File

@ -9,7 +9,7 @@ bitflags = "0.9"
byteorder = "1.0.0" byteorder = "1.0.0"
ethereum-types = "0.4" ethereum-types = "0.4"
ethcore-network = { path = "../util/network" } ethcore-network = { path = "../util/network" }
parity-crypto = "0.1" parity-crypto = "0.2"
ethkey = { path = "../ethkey" } ethkey = { path = "../ethkey" }
hex = "0.2" hex = "0.2"
log = "0.4" log = "0.4"
@ -25,6 +25,6 @@ slab = "0.3"
smallvec = "0.6" smallvec = "0.6"
tiny-keccak = "1.4" tiny-keccak = "1.4"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }

View File

@ -14,9 +14,9 @@ docopt = "0.8"
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
panic_hook = { path = "../../util/panic_hook" } panic_hook = { path = "../../util/panic_hook" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
log = "0.4" log = "0.4"
[[bin]] [[bin]]