Replace tokio_core with tokio (ring -> 0.13) (#9657)

* Replace `tokio_core` with `tokio`.

* Remove `tokio-core` and replace with `tokio` in

    - `ethcore/stratum`

    - `secret_store`

    - `util/fetch`

    - `util/reactor`

* Bump hyper to 0.12 in

    - `miner`

    - `util/fake-fetch`

    - `util/fetch`

    - `secret_store`

* Bump `jsonrpc-***` to 0.9 in

    - `parity`

    - `ethcore/stratum`

    - `ipfs`

    - `rpc`

    - `rpc_client`

    - `whisper`

* Bump `ring` to 0.13

* Use a more graceful shutdown process in `secret_store` tests.

* Convert some mutexes to rwlocks in `secret_store`.

* Consolidate Tokio Runtime use, remove `CpuPool`.

* Rename and move the `tokio_reactor` crate (`util/reactor`) to
  `tokio_runtime` (`util/runtime`).

* Rename `EventLoop` to `Runtime`.

    - Rename `EventLoop::spawn` to `Runtime::with_default_thread_count`.

    - Add the `Runtime::with_thread_count` method.

    - Rename `Remote` to `Executor`.

* Remove uses of `CpuPool` and spawn all tasks via the `Runtime` executor
  instead.

* Other changes related to `CpuPool` removal:

    - Remove `Reservations::with_pool`. `::new` now takes an `Executor` as an argument.

    - Remove `SenderReservations::with_pool`. `::new` now takes an `Executor` as an argument.
This commit is contained in:
Nick Sanders 2018-10-22 00:40:50 -07:00 committed by Afri Schoedon
parent b8da38f4e4
commit 68ca8df22f
75 changed files with 2027 additions and 1671 deletions

1179
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -28,10 +28,9 @@ serde = "1.0"
serde_json = "1.0"
serde_derive = "1.0"
futures = "0.1"
futures-cpupool = "0.1"
fdlimit = "0.1"
ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
ethcore = { path = "ethcore", features = ["parity"] }
parity-bytes = "0.1"
ethcore-io = { path = "util/io" }
@ -51,7 +50,7 @@ rpc-cli = { path = "rpc_cli" }
parity-hash-fetch = { path = "hash-fetch" }
parity-ipfs-api = { path = "ipfs" }
parity-local-store = { path = "local-store" }
parity-reactor = { path = "util/reactor" }
parity-runtime = { path = "util/runtime" }
parity-rpc = { path = "rpc" }
parity-rpc-client = { path = "rpc_client" }
parity-updater = { path = "updater" }
@ -138,6 +137,3 @@ members = [
"util/patricia-trie-ethereum",
"util/fastmap",
]
[patch.crates-io]
ring = { git = "https://github.com/paritytech/ring" }

View File

@ -20,7 +20,7 @@ hashdb = "0.3.0"
memorydb = "0.3.0"
patricia-trie = "0.3.0"
patricia-trie-ethereum = { path = "../util/patricia-trie-ethereum" }
parity-crypto = "0.1"
parity-crypto = "0.2"
error-chain = { version = "0.12", default-features = false }
ethcore-io = { path = "../util/io" }
ethcore-logger = { path = "../logger" }

View File

@ -12,7 +12,7 @@ ethabi-derive = "6.0"
ethabi-contract = "6.0"
ethcore = { path = ".." }
parity-bytes = "0.1"
parity-crypto = "0.1"
parity-crypto = "0.2"
ethcore-io = { path = "../../util/io" }
ethcore-logger = { path = "../../logger" }
ethcore-miner = { path = "../../miner" }

View File

@ -125,9 +125,9 @@ impl SecretStoreEncryptor {
// send HTTP request
let method = if use_post {
Method::Post
Method::POST
} else {
Method::Get
Method::GET
};
let url = Url::from_str(&url).map_err(|e| ErrorKind::Encrypt(e.to_string()))?;

View File

@ -8,14 +8,14 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
ethereum-types = "0.4"
keccak-hash = "0.1"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-tcp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-tcp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
log = "0.4"
parking_lot = "0.6"
[dev-dependencies]
env_logger = "0.5"
tokio-core = "0.1"
tokio = "0.1"
tokio-io = "0.1"
ethcore-logger = { path = "../../logger" }

View File

@ -25,7 +25,7 @@ extern crate parking_lot;
#[macro_use] extern crate log;
#[cfg(test)] extern crate tokio_core;
#[cfg(test)] extern crate tokio;
#[cfg(test)] extern crate tokio_io;
#[cfg(test)] extern crate ethcore_logger;
@ -323,12 +323,10 @@ impl MetaExtractor<SocketMetadata> for PeerMetaExtractor {
#[cfg(test)]
mod tests {
use super::*;
use std::net::SocketAddr;
use std::net::{SocketAddr, Shutdown};
use std::sync::Arc;
use tokio_core::reactor::{Core, Timeout};
use tokio_core::net::TcpStream;
use tokio_io::io;
use tokio::{io, runtime::Runtime, timer::timeout::{self, Timeout}, net::TcpStream};
use jsonrpc_core::futures::{Future, future};
use ethcore_logger::init_log;
@ -342,23 +340,23 @@ mod tests {
}
fn dummy_request(addr: &SocketAddr, data: &str) -> Vec<u8> {
let mut core = Core::new().expect("Tokio Core should be created with no errors");
let mut buffer = vec![0u8; 2048];
let mut runtime = Runtime::new().expect("Tokio Runtime should be created with no errors");
let mut data_vec = data.as_bytes().to_vec();
data_vec.extend(b"\n");
let stream = TcpStream::connect(addr, &core.handle())
.and_then(|stream| {
io::write_all(stream, &data_vec)
let stream = TcpStream::connect(addr)
.and_then(move |stream| {
io::write_all(stream, data_vec)
})
.and_then(|(stream, _)| {
io::read(stream, &mut buffer)
stream.shutdown(Shutdown::Write).unwrap();
io::read_to_end(stream, Vec::with_capacity(2048))
})
.and_then(|(_, read_buf, len)| {
future::ok(read_buf[0..len].to_vec())
.and_then(|(_stream, read_buf)| {
future::ok(read_buf)
});
let result = core.run(stream).expect("Core should run with no errors");
let result = runtime.block_on(stream).expect("Runtime should run with no errors");
result
}
@ -417,7 +415,7 @@ mod tests {
}
#[test]
fn receives_initial_paylaod() {
fn receives_initial_payload() {
let addr = "127.0.0.1:19975".parse().unwrap();
let _stratum = Stratum::start(&addr, DummyManager::new(), None).expect("There should be no error starting stratum");
let request = r#"{"jsonrpc": "2.0", "method": "mining.subscribe", "params": [], "id": 2}"#;
@ -460,40 +458,43 @@ mod tests {
.to_vec();
auth_request.extend(b"\n");
let mut core = Core::new().expect("Tokio Core should be created with no errors");
let timeout1 = Timeout::new(::std::time::Duration::from_millis(100), &core.handle())
.expect("There should be a timeout produced in message test");
let timeout2 = Timeout::new(::std::time::Duration::from_millis(100), &core.handle())
.expect("There should be a timeout produced in message test");
let mut buffer = vec![0u8; 2048];
let mut buffer2 = vec![0u8; 2048];
let stream = TcpStream::connect(&addr, &core.handle())
.and_then(|stream| {
io::write_all(stream, &auth_request)
let auth_response = "{\"jsonrpc\":\"2.0\",\"result\":true,\"id\":1}\n";
let mut runtime = Runtime::new().expect("Tokio Runtime should be created with no errors");
let read_buf0 = vec![0u8; auth_response.len()];
let read_buf1 = Vec::with_capacity(2048);
let stream = TcpStream::connect(&addr)
.and_then(move |stream| {
io::write_all(stream, auth_request)
})
.and_then(|(stream, _)| {
io::read(stream, &mut buffer)
io::read_exact(stream, read_buf0)
})
.and_then(|(stream, _, _)| {
.map_err(|err| panic!("{:?}", err))
.and_then(move |(stream, read_buf0)| {
assert_eq!(String::from_utf8(read_buf0).unwrap(), auth_response);
trace!(target: "stratum", "Received authorization confirmation");
timeout1.join(future::ok(stream))
Timeout::new(future::ok(stream), ::std::time::Duration::from_millis(100))
})
.and_then(|(_, stream)| {
.map_err(|err: timeout::Error<()>| panic!("Timeout: {:?}", err))
.and_then(move |stream| {
trace!(target: "stratum", "Pusing work to peers");
stratum.push_work_all(r#"{ "00040008", "100500" }"#.to_owned())
.expect("Pushing work should produce no errors");
timeout2.join(future::ok(stream))
Timeout::new(future::ok(stream), ::std::time::Duration::from_millis(100))
})
.and_then(|(_, stream)| {
.map_err(|err: timeout::Error<()>| panic!("Timeout: {:?}", err))
.and_then(|stream| {
trace!(target: "stratum", "Ready to read work from server");
io::read(stream, &mut buffer2)
stream.shutdown(Shutdown::Write).unwrap();
io::read_to_end(stream, read_buf1)
})
.and_then(|(_, read_buf, len)| {
.and_then(|(_, read_buf1)| {
trace!(target: "stratum", "Received work from server");
future::ok(read_buf[0..len].to_vec())
future::ok(read_buf1)
});
let response = String::from_utf8(
core.run(stream).expect("Core should run with no errors")
runtime.block_on(stream).expect("Runtime should run with no errors")
).expect("Response should be utf-8");
assert_eq!(

View File

@ -6,7 +6,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
byteorder = "1.0"
edit-distance = "2.0"
parity-crypto = "0.1"
parity-crypto = "0.2"
eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" }
ethereum-types = "0.4"
lazy_static = "1.0"

View File

@ -16,7 +16,7 @@ tiny-keccak = "1.4"
time = "0.1.34"
itertools = "0.5"
parking_lot = "0.6"
parity-crypto = "0.1"
parity-crypto = "0.2"
ethereum-types = "0.4"
dir = { path = "../util/dir" }
smallvec = "0.6"

View File

@ -8,7 +8,6 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
futures = "0.1"
futures-cpupool = "0.1"
log = "0.4"
mime = "0.3"
mime_guess = "2.0.0-alpha.2"
@ -17,7 +16,7 @@ rustc-hex = "1.0"
fetch = { path = "../util/fetch" }
parity-bytes = "0.1"
ethereum-types = "0.4"
parity-reactor = { path = "../util/reactor" }
parity-runtime = { path = "../util/runtime" }
keccak-hash = "0.1"
registrar = { path = "../registrar" }
@ -26,6 +25,5 @@ ethabi-derive = "6.0"
ethabi-contract = "6.0"
[dev-dependencies]
hyper = "0.11"
parking_lot = "0.6"
fake-fetch = { path = "../util/fake-fetch" }

View File

@ -23,9 +23,8 @@ use std::path::PathBuf;
use hash::keccak_buffer;
use fetch::{self, Fetch};
use futures_cpupool::CpuPool;
use futures::{Future, IntoFuture};
use parity_reactor::Remote;
use parity_runtime::Executor;
use urlhint::{URLHintContract, URLHint, URLHintResult};
use registrar::{RegistrarClient, Asynchronous};
use ethereum_types::H256;
@ -109,21 +108,19 @@ fn validate_hash(path: PathBuf, hash: H256, body: fetch::BodyReader) -> Result<P
/// Default Hash-fetching client using on-chain contract to resolve hashes to URLs.
pub struct Client<F: Fetch + 'static = fetch::Client> {
pool: CpuPool,
contract: URLHintContract,
fetch: F,
remote: Remote,
executor: Executor,
random_path: Arc<Fn() -> PathBuf + Sync + Send>,
}
impl<F: Fetch + 'static> Client<F> {
/// Creates new instance of the `Client` given on-chain contract client, fetch service and task runner.
pub fn with_fetch(contract: Arc<RegistrarClient<Call=Asynchronous>>, pool: CpuPool, fetch: F, remote: Remote) -> Self {
pub fn with_fetch(contract: Arc<RegistrarClient<Call=Asynchronous>>, fetch: F, executor: Executor) -> Self {
Client {
pool,
contract: URLHintContract::new(contract),
fetch: fetch,
remote: remote,
executor: executor,
random_path: Arc::new(random_temp_path),
}
}
@ -135,7 +132,6 @@ impl<F: Fetch + 'static> HashFetch for Client<F> {
let random_path = self.random_path.clone();
let remote_fetch = self.fetch.clone();
let pool = self.pool.clone();
let future = self.contract.resolve(hash)
.map_err(|e| { warn!("Error resolving URL: {}", e); Error::NoResolution })
.and_then(|maybe_url| maybe_url.ok_or(Error::NoResolution))
@ -162,7 +158,7 @@ impl<F: Fetch + 'static> HashFetch for Client<F> {
Ok(response)
}
})
.and_then(move |response| pool.spawn_fn(move || {
.and_then(move |response| {
debug!(target: "fetch", "Content fetched, validating hash ({:?})", hash);
let path = random_path();
let res = validate_hash(path.clone(), hash, fetch::BodyReader::new(response));
@ -172,10 +168,10 @@ impl<F: Fetch + 'static> HashFetch for Client<F> {
let _ = fs::remove_file(&path);
}
res
}))
})
.then(move |res| { on_done(res); Ok(()) as Result<(), ()> });
self.remote.spawn(future);
self.executor.spawn(future);
}
}
@ -197,8 +193,7 @@ mod tests {
use rustc_hex::FromHex;
use std::sync::{Arc, mpsc};
use parking_lot::Mutex;
use futures_cpupool::CpuPool;
use parity_reactor::Remote;
use parity_runtime::Executor;
use urlhint::tests::{FakeRegistrar, URLHINT};
use super::{Error, Client, HashFetch, random_temp_path};
@ -216,7 +211,7 @@ mod tests {
// given
let contract = Arc::new(FakeRegistrar::new());
let fetch = FakeFetch::new(None::<usize>);
let client = Client::with_fetch(contract.clone(), CpuPool::new(1), fetch, Remote::new_sync());
let client = Client::with_fetch(contract.clone(), fetch, Executor::new_sync());
// when
let (tx, rx) = mpsc::channel();
@ -234,7 +229,7 @@ mod tests {
// given
let registrar = Arc::new(registrar());
let fetch = FakeFetch::new(None::<usize>);
let client = Client::with_fetch(registrar.clone(), CpuPool::new(1), fetch, Remote::new_sync());
let client = Client::with_fetch(registrar.clone(), fetch, Executor::new_sync());
// when
let (tx, rx) = mpsc::channel();
@ -252,7 +247,7 @@ mod tests {
// given
let registrar = Arc::new(registrar());
let fetch = FakeFetch::new(Some(1));
let mut client = Client::with_fetch(registrar.clone(), CpuPool::new(1), fetch, Remote::new_sync());
let mut client = Client::with_fetch(registrar.clone(), fetch, Executor::new_sync());
let path = random_temp_path();
let path2 = path.clone();
client.random_path = Arc::new(move || path2.clone());
@ -275,7 +270,7 @@ mod tests {
// given
let registrar = Arc::new(registrar());
let fetch = FakeFetch::new(Some(1));
let client = Client::with_fetch(registrar.clone(), CpuPool::new(1), fetch, Remote::new_sync());
let client = Client::with_fetch(registrar.clone(), fetch, Executor::new_sync());
// when
let (tx, rx) = mpsc::channel();

View File

@ -25,11 +25,10 @@ extern crate ethabi;
extern crate parity_bytes as bytes;
extern crate ethereum_types;
extern crate futures;
extern crate futures_cpupool;
extern crate keccak_hash as hash;
extern crate mime;
extern crate mime_guess;
extern crate parity_reactor;
extern crate parity_runtime;
extern crate rand;
extern crate rustc_hex;
extern crate registrar;
@ -43,8 +42,6 @@ extern crate ethabi_contract;
#[cfg(test)]
extern crate parking_lot;
#[cfg(test)]
extern crate hyper;
#[cfg(test)]
extern crate fake_fetch;
mod client;

View File

@ -9,11 +9,11 @@ authors = ["Parity Technologies <admin@parity.io>"]
ethcore = { path = "../ethcore" }
parity-bytes = "0.1"
ethereum-types = "0.4"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
rlp = { version = "0.3.0", features = ["ethereum"] }
cid = "0.2"
multihash = "0.7"
cid = "0.3"
multihash = "0.8"
unicase = "2.0"
[dev-dependencies]

View File

@ -30,6 +30,42 @@ pub enum ServerError {
InvalidInterface
}
/// Handle IO errors (ports taken when starting the server).
impl From<::std::io::Error> for ServerError {
fn from(err: ::std::io::Error) -> ServerError {
ServerError::IoError(err)
}
}
impl From<http::hyper::error::Error> for ServerError {
fn from(err: http::hyper::error::Error) -> ServerError {
ServerError::Other(err)
}
}
impl From<ServerError> for String {
fn from(err: ServerError) -> String {
match err {
ServerError::IoError(err) => err.to_string(),
ServerError::Other(err) => err.to_string(),
ServerError::InvalidInterface => "Invalid --ipfs-api-interface parameter".into(),
}
}
}
impl ::std::fmt::Display for ServerError {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match self {
ServerError::IoError(err) => write!(f, "Io Error: {}", err),
ServerError::Other(err) => write!(f, "Other error: {}", err),
ServerError::InvalidInterface => write!(f, "Invalid interface"),
}
}
}
impl ::std::error::Error for ServerError {}
#[derive(Debug, PartialEq)]
pub enum Error {
CidParsingFailed,
@ -72,26 +108,3 @@ impl From<multihash::Error> for Error {
Error::CidParsingFailed
}
}
/// Handle IO errors (ports taken when starting the server).
impl From<::std::io::Error> for ServerError {
fn from(err: ::std::io::Error) -> ServerError {
ServerError::IoError(err)
}
}
impl From<http::hyper::error::Error> for ServerError {
fn from(err: http::hyper::error::Error) -> ServerError {
ServerError::Other(err)
}
}
impl From<ServerError> for String {
fn from(err: ServerError) -> String {
match err {
ServerError::IoError(err) => err.to_string(),
ServerError::Other(err) => err.to_string(),
ServerError::InvalidInterface => "Invalid --ipfs-api-interface parameter".into(),
}
}
}

View File

@ -35,10 +35,9 @@ use std::net::{SocketAddr, IpAddr};
use core::futures::future::{self, FutureResult};
use core::futures::{self, Future};
use ethcore::client::BlockChainClient;
use http::hyper::header::{self, Vary, ContentType};
use http::hyper::{Method, StatusCode};
use http::hyper::{self, server};
use unicase::Ascii;
use http::hyper::{self, server, Method, StatusCode, Body,
header::{self, HeaderValue},
};
use error::ServerError;
use route::Out;
@ -67,9 +66,9 @@ impl IpfsHandler {
client: client,
}
}
pub fn on_request(&self, req: hyper::Request) -> (Option<header::AccessControlAllowOrigin>, Out) {
pub fn on_request(&self, req: hyper::Request<Body>) -> (Option<HeaderValue>, Out) {
match *req.method() {
Method::Get | Method::Post => {},
Method::GET | Method::POST => {},
_ => return (None, Out::Bad("Invalid Request")),
}
@ -77,8 +76,8 @@ impl IpfsHandler {
return (None, Out::Bad("Disallowed Host header"));
}
let cors_header = http::cors_header(&req, &self.cors_domains);
if cors_header == http::CorsHeader::Invalid {
let cors_header = http::cors_allow_origin(&req, &self.cors_domains);
if cors_header == http::AllowCors::Invalid {
return (None, Out::Bad("Disallowed Origin header"));
}
@ -88,39 +87,39 @@ impl IpfsHandler {
}
}
impl server::Service for IpfsHandler {
type Request = hyper::Request;
type Response = hyper::Response;
impl hyper::service::Service for IpfsHandler {
type ReqBody = Body;
type ResBody = Body;
type Error = hyper::Error;
type Future = FutureResult<hyper::Response, hyper::Error>;
type Future = FutureResult<hyper::Response<Body>, Self::Error>;
fn call(&self, request: Self::Request) -> Self::Future {
fn call(&mut self, request: hyper::Request<Self::ReqBody>) -> Self::Future {
let (cors_header, out) = self.on_request(request);
let mut res = match out {
Out::OctetStream(bytes) => {
hyper::Response::new()
.with_status(StatusCode::Ok)
.with_header(ContentType::octet_stream())
.with_body(bytes)
hyper::Response::builder()
.status(StatusCode::OK)
.header("content-type", HeaderValue::from_static("application/octet-stream"))
.body(bytes.into())
},
Out::NotFound(reason) => {
hyper::Response::new()
.with_status(StatusCode::NotFound)
.with_header(ContentType::plaintext())
.with_body(reason)
hyper::Response::builder()
.status(StatusCode::NOT_FOUND)
.header("content-type", HeaderValue::from_static("text/plain; charset=utf-8"))
.body(reason.into())
},
Out::Bad(reason) => {
hyper::Response::new()
.with_status(StatusCode::BadRequest)
.with_header(ContentType::plaintext())
.with_body(reason)
hyper::Response::builder()
.status(StatusCode::BAD_REQUEST)
.header("content-type", HeaderValue::from_static("text/plain; charset=utf-8"))
.body(reason.into())
}
};
}.expect("Response builder: Parsing 'content-type' header name will not fail; qed");
if let Some(cors_header) = cors_header {
res.headers_mut().set(cors_header);
res.headers_mut().set(Vary::Items(vec![Ascii::new("Origin".into())]));
res.headers_mut().append(header::ACCESS_CONTROL_ALLOW_ORIGIN, cors_header);
res.headers_mut().append(header::VARY, HeaderValue::from_static("origin"));
}
future::ok(res)
@ -164,23 +163,32 @@ pub fn start_server(
let hosts: DomainsValidation<_> = hosts.map(move |hosts| include_current_interface(hosts, interface, port)).into();
let (close, shutdown_signal) = futures::sync::oneshot::channel::<()>();
let (tx, rx) = mpsc::sync_channel(1);
let (tx, rx) = mpsc::sync_channel::<Result<(), ServerError>>(1);
let thread = thread::spawn(move || {
let send = |res| tx.send(res).expect("rx end is never dropped; qed");
let server = match server::Http::new().bind(&addr, move || {
Ok(IpfsHandler::new(cors.clone(), hosts.clone(), client.clone()))
}) {
Ok(server) => {
send(Ok(()));
server
},
let server_bldr = match server::Server::try_bind(&addr) {
Ok(s) => s,
Err(err) => {
send(Err(err));
send(Err(ServerError::from(err)));
return;
}
};
let _ = server.run_until(shutdown_signal.map_err(|_| {}));
let new_service = move || {
Ok::<_, ServerError>(
IpfsHandler::new(cors.clone(), hosts.clone(), client.clone())
)
};
let server = server_bldr
.serve(new_service)
.map_err(|_| ())
.select(shutdown_signal.map_err(|_| ()))
.then(|_| Ok(()));
hyper::rt::run(server);
send(Ok(()));
});
// Wait for server to start successfuly.

View File

@ -10,8 +10,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
# Only work_notify, consider a separate crate
ethash = { path = "../ethash", optional = true }
fetch = { path = "../util/fetch", optional = true }
hyper = { version = "0.11", optional = true }
parity-reactor = { path = "../util/reactor", optional = true }
hyper = { version = "0.12", optional = true }
url = { version = "1", optional = true }
# Miner
@ -20,7 +19,7 @@ error-chain = "0.12"
ethcore-transaction = { path = "../ethcore/transaction" }
ethereum-types = "0.4"
futures = "0.1"
futures-cpupool = "0.1"
parity-runtime = { path = "../util/runtime" }
heapsize = "0.4"
keccak-hash = "0.1"
linked-hash-map = "0.5"
@ -37,4 +36,4 @@ ethkey = { path = "../ethkey" }
rustc-hex = "1.0"
[features]
work-notify = ["ethash", "fetch", "hyper", "parity-reactor", "url"]
work-notify = ["ethash", "fetch", "hyper", "url"]

View File

@ -20,7 +20,7 @@ use std::time::{Instant, Duration};
use ansi_term::Colour;
use ethereum_types::U256;
use futures_cpupool::CpuPool;
use parity_runtime::Executor;
use price_info::{Client as PriceInfoClient, PriceInfo};
use price_info::fetch::Client as FetchClient;
@ -43,7 +43,7 @@ pub struct GasPriceCalibrator {
impl GasPriceCalibrator {
/// Create a new gas price calibrator.
pub fn new(options: GasPriceCalibratorOptions, fetch: FetchClient, p: CpuPool) -> GasPriceCalibrator {
pub fn new(options: GasPriceCalibratorOptions, fetch: FetchClient, p: Executor) -> GasPriceCalibrator {
GasPriceCalibrator {
options: options,
next_calibration: Instant::now(),

View File

@ -23,7 +23,7 @@ extern crate ansi_term;
extern crate ethcore_transaction as transaction;
extern crate ethereum_types;
extern crate futures;
extern crate futures_cpupool;
extern crate parity_runtime;
extern crate heapsize;
extern crate keccak_hash as hash;
extern crate linked_hash_map;

View File

@ -18,18 +18,19 @@
extern crate ethash;
extern crate fetch;
extern crate parity_reactor;
extern crate parity_runtime;
extern crate url;
extern crate hyper;
use self::fetch::{Fetch, Request, Client as FetchClient, Method};
use self::parity_reactor::Remote;
use self::parity_runtime::Executor;
use self::ethash::SeedHashCompute;
use self::url::Url;
use self::hyper::header::ContentType;
use self::hyper::header::{self, HeaderValue};
use ethereum_types::{H256, U256};
use parking_lot::Mutex;
use futures::Future;
/// Trait for notifying about new mining work
@ -42,13 +43,13 @@ pub trait NotifyWork : Send + Sync {
pub struct WorkPoster {
urls: Vec<Url>,
client: FetchClient,
remote: Remote,
executor: Executor,
seed_compute: Mutex<SeedHashCompute>,
}
impl WorkPoster {
/// Create new `WorkPoster`.
pub fn new(urls: &[String], fetch: FetchClient, remote: Remote) -> Self {
pub fn new(urls: &[String], fetch: FetchClient, executor: Executor) -> Self {
let urls = urls.into_iter().filter_map(|u| {
match Url::parse(u) {
Ok(url) => Some(url),
@ -60,7 +61,7 @@ impl WorkPoster {
}).collect();
WorkPoster {
client: fetch,
remote: remote,
executor: executor,
urls: urls,
seed_compute: Mutex::new(SeedHashCompute::default()),
}
@ -80,9 +81,9 @@ impl NotifyWork for WorkPoster {
for u in &self.urls {
let u = u.clone();
self.remote.spawn(self.client.fetch(
Request::new(u.clone(), Method::Post)
.with_header(ContentType::json())
self.executor.spawn(self.client.fetch(
Request::new(u.clone(), Method::POST)
.with_header(header::CONTENT_TYPE, HeaderValue::from_static("application/json"))
.with_body(body.clone()), Default::default()
).map_err(move |e| {
warn!("Error sending HTTP notification to {} : {}, retrying", u, e);

View File

@ -25,7 +25,6 @@ extern crate clap;
extern crate dir;
extern crate env_logger;
extern crate futures;
extern crate futures_cpupool;
extern crate atty;
extern crate jsonrpc_core;
extern crate num_cpus;
@ -60,7 +59,7 @@ extern crate kvdb;
extern crate parity_hash_fetch as hash_fetch;
extern crate parity_ipfs_api;
extern crate parity_local_store as local_store;
extern crate parity_reactor;
extern crate parity_runtime;
extern crate parity_rpc;
extern crate parity_updater as updater;
extern crate parity_version;

View File

@ -29,7 +29,7 @@ use light::TransactionQueue;
use futures::{future, Future};
use parity_reactor::Remote;
use parity_runtime::Executor;
use parking_lot::RwLock;
@ -50,8 +50,8 @@ pub struct QueueCull<T> {
pub on_demand: Arc<OnDemand>,
/// The transaction queue.
pub txq: Arc<RwLock<TransactionQueue>>,
/// Event loop remote.
pub remote: Remote,
/// Event loop executor.
pub executor: Executor,
}
impl<T: LightChainClient + 'static> IoHandler<ClientIoMessage> for QueueCull<T> {
@ -70,7 +70,7 @@ impl<T: LightChainClient + 'static> IoHandler<ClientIoMessage> for QueueCull<T>
let start_nonce = self.client.engine().account_start_nonce(best_header.number());
info!(target: "cull", "Attempting to cull queued transactions from {} senders.", senders.len());
self.remote.spawn_with_timeout(move |_| {
self.executor.spawn_with_timeout(move || {
let maybe_fetching = sync.with_context(move |ctx| {
// fetch the nonce of each sender in the queue.
let nonce_reqs = senders.iter()

View File

@ -21,7 +21,7 @@ use ethcore::client::Mode;
use ethcore::ethereum;
use ethcore::spec::{Spec, SpecParams};
use ethereum_types::{U256, Address};
use futures_cpupool::CpuPool;
use parity_runtime::Executor;
use hash_fetch::fetch::Client as FetchClient;
use journaldb::Algorithm;
use miner::gas_pricer::GasPricer;
@ -256,7 +256,7 @@ impl Default for GasPricerConfig {
}
impl GasPricerConfig {
pub fn to_gas_pricer(&self, fetch: FetchClient, p: CpuPool) -> GasPricer {
pub fn to_gas_pricer(&self, fetch: FetchClient, p: Executor) -> GasPricer {
match *self {
GasPricerConfig::Fixed(u) => GasPricer::Fixed(u),
GasPricerConfig::Calibrated { usd_per_tx, recalibration_period, .. } => {

View File

@ -23,14 +23,13 @@ use dir::default_data_path;
use dir::helpers::replace_home;
use helpers::parity_ipc_path;
use jsonrpc_core::MetaIoHandler;
use parity_reactor::TokioRemote;
use parity_runtime::Executor;
use parity_rpc::informant::{RpcStats, Middleware};
use parity_rpc::{self as rpc, Metadata, DomainsValidation};
use rpc_apis::{self, ApiSet};
pub use parity_rpc::{IpcServer, HttpServer, RequestMiddleware};
pub use parity_rpc::ws::Server as WsServer;
pub use parity_rpc::informant::CpuPool;
pub const DAPPS_DOMAIN: &'static str = "web3.site";
@ -134,9 +133,8 @@ fn address(enabled: bool, bind_iface: &str, bind_port: u16, hosts: &Option<Vec<S
pub struct Dependencies<D: rpc_apis::Dependencies> {
pub apis: Arc<D>,
pub remote: TokioRemote,
pub executor: Executor,
pub stats: Arc<RpcStats>,
pub pool: Option<CpuPool>,
}
pub fn new_ws<D: rpc_apis::Dependencies>(
@ -155,7 +153,7 @@ pub fn new_ws<D: rpc_apis::Dependencies>(
let handler = {
let mut handler = MetaIoHandler::with_middleware((
rpc::WsDispatcher::new(full_handler),
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier())
));
let apis = conf.apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
@ -163,7 +161,6 @@ pub fn new_ws<D: rpc_apis::Dependencies>(
handler
};
let remote = deps.remote.clone();
let allowed_origins = into_domains(with_domain(conf.origins, domain, &None));
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into())));
@ -178,7 +175,6 @@ pub fn new_ws<D: rpc_apis::Dependencies>(
let start_result = rpc::start_ws(
&addr,
handler,
remote.clone(),
allowed_origins,
allowed_hosts,
conf.max_connections,
@ -210,7 +206,6 @@ pub fn new_http<D: rpc_apis::Dependencies>(
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid {} listen host/port given: {}", id, url))?;
let handler = setup_apis(conf.apis, deps);
let remote = deps.remote.clone();
let cors_domains = into_domains(conf.cors);
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into())));
@ -220,7 +215,6 @@ pub fn new_http<D: rpc_apis::Dependencies>(
cors_domains,
allowed_hosts,
handler,
remote,
rpc::RpcExtractor,
conf.server_threads,
conf.max_payload,
@ -244,7 +238,6 @@ pub fn new_ipc<D: rpc_apis::Dependencies>(
}
let handler = setup_apis(conf.apis, dependencies);
let remote = dependencies.remote.clone();
let path = PathBuf::from(&conf.socket_addr);
// Make sure socket file can be created on unix-like OS.
// Windows pipe paths are not on the FS.
@ -255,7 +248,7 @@ pub fn new_ipc<D: rpc_apis::Dependencies>(
}
}
match rpc::start_ipc(&conf.socket_addr, handler, remote, rpc::RpcExtractor) {
match rpc::start_ipc(&conf.socket_addr, handler, rpc::RpcExtractor) {
Ok(server) => Ok(Some(server)),
Err(io_error) => Err(format!("IPC error: {}", io_error)),
}
@ -294,7 +287,7 @@ pub fn setup_apis<D>(apis: ApiSet, deps: &Dependencies<D>) -> MetaIoHandler<Meta
where D: rpc_apis::Dependencies
{
let mut handler = MetaIoHandler::with_middleware(
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier())
);
let apis = apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);

View File

@ -28,13 +28,12 @@ use ethcore::miner::Miner;
use ethcore::snapshot::SnapshotService;
use ethcore_logger::RotatingLogger;
use sync::{ManageNetwork, SyncProvider, LightSync};
use futures_cpupool::CpuPool;
use hash_fetch::fetch::Client as FetchClient;
use jsonrpc_core::{self as core, MetaIoHandler};
use light::client::LightChainClient;
use light::{TransactionQueue as LightTransactionQueue, Cache as LightDataCache};
use miner::external::ExternalMiner;
use parity_reactor;
use parity_runtime::Executor;
use parity_rpc::dispatch::{FullDispatcher, LightDispatcher};
use parity_rpc::informant::{ActivityNotifier, ClientNotifier};
use parity_rpc::{Metadata, NetworkSettings, Host};
@ -231,8 +230,7 @@ pub struct FullDependencies {
pub geth_compatibility: bool,
pub ws_address: Option<Host>,
pub fetch: FetchClient,
pub pool: CpuPool,
pub remote: parity_reactor::Remote,
pub executor: Executor,
pub whisper_rpc: Option<::whisper::RpcFactory>,
pub gas_price_percentile: usize,
pub poll_lifetime: u32,
@ -253,7 +251,7 @@ impl FullDependencies {
let deps = &$deps;
let dispatcher = FullDispatcher::new(deps.client.clone(), deps.miner.clone(), $nonces, deps.gas_price_percentile);
if deps.signer_service.is_enabled() {
$handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, deps.remote.clone(), &deps.secret_store)))
$handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, deps.executor.clone(), &deps.secret_store)))
} else {
$handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher)))
}
@ -261,7 +259,7 @@ impl FullDependencies {
}
}
let nonces = Arc::new(Mutex::new(dispatch::Reservations::with_pool(self.pool.clone())));
let nonces = Arc::new(Mutex::new(dispatch::Reservations::new(self.executor.clone())));
let dispatcher = FullDispatcher::new(
self.client.clone(),
self.miner.clone(),
@ -306,7 +304,7 @@ impl FullDependencies {
},
Api::EthPubSub => {
if !for_generic_pubsub {
let client = EthPubSubClient::new(self.client.clone(), self.remote.clone());
let client = EthPubSubClient::new(self.client.clone(), self.executor.clone());
let h = client.handler();
self.miner.add_transactions_listener(Box::new(move |hashes| if let Some(h) = h.upgrade() {
h.notify_new_transactions(hashes);
@ -322,7 +320,7 @@ impl FullDependencies {
handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate());
},
Api::Signer => {
handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service, self.remote.clone()).to_delegate());
handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service, self.executor.clone()).to_delegate());
},
Api::Parity => {
let signer = match self.signer_service.is_enabled() {
@ -351,7 +349,7 @@ impl FullDependencies {
let mut rpc = MetaIoHandler::default();
let apis = ApiSet::List(apis.clone()).retain(ApiSet::PubSub).list_apis();
self.extend_api(&mut rpc, &apis, true);
handler.extend_with(PubSubClient::new(rpc, self.remote.clone()).to_delegate());
handler.extend_with(PubSubClient::new(rpc, self.executor.clone()).to_delegate());
}
},
Api::ParityAccounts => {
@ -364,7 +362,6 @@ impl FullDependencies {
&self.updater,
&self.net_service,
self.fetch.clone(),
self.pool.clone(),
).to_delegate())
},
Api::Traces => {
@ -440,9 +437,8 @@ pub struct LightDependencies<T> {
pub transaction_queue: Arc<RwLock<LightTransactionQueue>>,
pub ws_address: Option<Host>,
pub fetch: FetchClient,
pub pool: CpuPool,
pub geth_compatibility: bool,
pub remote: parity_reactor::Remote,
pub executor: Executor,
pub whisper_rpc: Option<::whisper::RpcFactory>,
pub private_tx_service: Option<Arc<PrivateTransactionManager>>,
pub gas_price_percentile: usize,
@ -464,7 +460,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
self.on_demand.clone(),
self.cache.clone(),
self.transaction_queue.clone(),
Arc::new(Mutex::new(dispatch::Reservations::with_pool(self.pool.clone()))),
Arc::new(Mutex::new(dispatch::Reservations::new(self.executor.clone()))),
self.gas_price_percentile,
);
@ -476,7 +472,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
let secret_store = deps.secret_store.clone();
if deps.signer_service.is_enabled() {
$handler.extend_with($namespace::to_delegate(
SigningQueueClient::new(&deps.signer_service, dispatcher, deps.remote.clone(), &secret_store)
SigningQueueClient::new(&deps.signer_service, dispatcher, deps.executor.clone(), &secret_store)
))
} else {
$handler.extend_with(
@ -522,7 +518,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
self.on_demand.clone(),
self.sync.clone(),
self.cache.clone(),
self.remote.clone(),
self.executor.clone(),
self.gas_price_percentile,
);
self.client.add_listener(client.handler() as Weak<_>);
@ -538,7 +534,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate());
},
Api::Signer => {
handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service, self.remote.clone()).to_delegate());
handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service, self.executor.clone()).to_delegate());
},
Api::Parity => {
let signer = match self.signer_service.is_enabled() {
@ -565,7 +561,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
let mut rpc = MetaIoHandler::default();
let apis = ApiSet::List(apis.clone()).retain(ApiSet::PubSub).list_apis();
self.extend_api(&mut rpc, &apis, true);
handler.extend_with(PubSubClient::new(rpc, self.remote.clone()).to_delegate());
handler.extend_with(PubSubClient::new(rpc, self.executor.clone()).to_delegate());
}
},
Api::ParityAccounts => {
@ -575,7 +571,6 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
handler.extend_with(light::ParitySetClient::new(
self.sync.clone(),
self.fetch.clone(),
self.pool.clone(),
).to_delegate())
},
Api::Traces => {

View File

@ -34,14 +34,13 @@ use ethereum_types::Address;
use sync::{self, SyncConfig};
use miner::work_notify::WorkPoster;
use futures::IntoFuture;
use futures_cpupool::CpuPool;
use hash_fetch::{self, fetch};
use informant::{Informant, LightNodeInformantData, FullNodeInformantData};
use journaldb::Algorithm;
use light::Cache as LightDataCache;
use miner::external::ExternalMiner;
use node_filter::NodeFilter;
use parity_reactor::EventLoop;
use parity_runtime::Runtime;
use parity_rpc::{Origin, Metadata, NetworkSettings, informant, is_major_importing};
use updater::{UpdatePolicy, Updater};
use parity_version::version;
@ -270,7 +269,7 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
*sync_handle.write() = Arc::downgrade(&light_sync);
// spin up event loop
let event_loop = EventLoop::spawn();
let runtime = Runtime::with_default_thread_count();
// queue cull service.
let queue_cull = Arc::new(::light_helpers::QueueCull {
@ -278,7 +277,7 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
sync: light_sync.clone(),
on_demand: on_demand.clone(),
txq: txq.clone(),
remote: event_loop.remote(),
executor: runtime.executor(),
});
service.register_handler(queue_cull).map_err(|e| format!("Error attaching service: {:?}", e))?;
@ -286,8 +285,6 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
// start the network.
light_sync.start_network();
let cpu_pool = CpuPool::new(4);
// fetch service
let fetch = fetch::Client::new(FETCH_LIGHT_NUM_DNS_THREADS).map_err(|e| format!("Error starting fetch client: {:?}", e))?;
let passwords = passwords_from_files(&cmd.acc_conf.password_files)?;
@ -313,9 +310,8 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
transaction_queue: txq,
ws_address: cmd.ws_conf.address(),
fetch: fetch,
pool: cpu_pool.clone(),
geth_compatibility: cmd.geth_compatibility,
remote: event_loop.remote(),
executor: runtime.executor(),
whisper_rpc: whisper_factory,
private_tx_service: None, //TODO: add this to client.
gas_price_percentile: cmd.gas_price_percentile,
@ -324,13 +320,8 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
let dependencies = rpc::Dependencies {
apis: deps_for_rpc_apis.clone(),
remote: event_loop.raw_remote(),
executor: runtime.executor(),
stats: rpc_stats.clone(),
pool: if cmd.http_conf.processing_threads > 0 {
Some(rpc::CpuPool::new(cmd.http_conf.processing_threads))
} else {
None
},
};
// start rpc servers
@ -358,7 +349,7 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
rpc: rpc_direct,
informant,
client,
keep_alive: Box::new((event_loop, service, ws_server, http_server, ipc_server)),
keep_alive: Box::new((runtime, service, ws_server, http_server, ipc_server)),
}
})
}
@ -477,10 +468,8 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
// prepare account provider
let account_provider = Arc::new(prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?);
let cpu_pool = CpuPool::new(4);
// spin up event loop
let event_loop = EventLoop::spawn();
let runtime = Runtime::with_default_thread_count();
// fetch service
let fetch = fetch::Client::new(FETCH_FULL_NUM_DNS_THREADS).map_err(|e| format!("Error starting fetch client: {:?}", e))?;
@ -489,7 +478,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
// create miner
let miner = Arc::new(Miner::new(
cmd.miner_options,
cmd.gas_pricer_conf.to_gas_pricer(fetch.clone(), cpu_pool.clone()),
cmd.gas_pricer_conf.to_gas_pricer(fetch.clone(), runtime.executor()),
&spec,
Some(account_provider.clone()),
@ -500,7 +489,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
if !cmd.miner_extras.work_notify.is_empty() {
miner.add_work_listener(Box::new(
WorkPoster::new(&cmd.miner_extras.work_notify, fetch.clone(), event_loop.remote())
WorkPoster::new(&cmd.miner_extras.work_notify, fetch.clone(), runtime.executor())
));
}
@ -698,7 +687,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
&Arc::downgrade(&(service.client() as Arc<BlockChainClient>)),
&Arc::downgrade(&sync_provider),
update_policy,
hash_fetch::Client::with_fetch(contract_client.clone(), cpu_pool.clone(), updater_fetch, event_loop.remote())
hash_fetch::Client::with_fetch(contract_client.clone(), updater_fetch, runtime.executor())
);
service.add_notify(updater.clone());
@ -723,8 +712,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
geth_compatibility: cmd.geth_compatibility,
ws_address: cmd.ws_conf.address(),
fetch: fetch.clone(),
pool: cpu_pool.clone(),
remote: event_loop.remote(),
executor: runtime.executor(),
whisper_rpc: whisper_factory,
private_tx_service: Some(private_tx_service.clone()),
gas_price_percentile: cmd.gas_price_percentile,
@ -733,14 +721,8 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
let dependencies = rpc::Dependencies {
apis: deps_for_rpc_apis.clone(),
remote: event_loop.raw_remote(),
executor: runtime.executor(),
stats: rpc_stats.clone(),
pool: if cmd.http_conf.processing_threads > 0 {
Some(rpc::CpuPool::new(cmd.http_conf.processing_threads))
} else {
None
},
};
// start rpc servers
@ -820,7 +802,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
informant,
client,
client_service: Arc::new(service),
keep_alive: Box::new((watcher, updater, ws_server, http_server, ipc_server, secretstore_key_server, ipfs_server, event_loop)),
keep_alive: Box::new((watcher, updater, ws_server, http_server, ipc_server, secretstore_key_server, ipfs_server, runtime)),
}
})
}

View File

@ -9,11 +9,10 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
fetch = { path = "../util/fetch" }
futures = "0.1"
futures-cpupool = "0.1"
parity-runtime = { path = "../util/runtime" }
log = "0.4"
serde_json = "1.0"
[dev-dependencies]
hyper = "0.11"
parking_lot = "0.6"
fake-fetch = { path = "../util/fake-fetch" }

View File

@ -19,8 +19,8 @@
//! A simple client to get the current ETH price using an external API.
extern crate futures;
extern crate futures_cpupool;
extern crate serde_json;
extern crate parity_runtime;
#[macro_use]
extern crate log;
@ -38,8 +38,8 @@ use std::str;
use fetch::{Client as FetchClient, Fetch};
use futures::{Future, Stream};
use futures::future::{self, Either};
use futures_cpupool::CpuPool;
use serde_json::Value;
use parity_runtime::Executor;
/// Current ETH price information.
#[derive(Debug)]
@ -71,7 +71,7 @@ impl From<fetch::Error> for Error {
/// A client to get the current ETH price using an external API.
pub struct Client<F = FetchClient> {
pool: CpuPool,
pool: Executor,
api_endpoint: String,
fetch: F,
}
@ -92,7 +92,7 @@ impl<F> cmp::PartialEq for Client<F> {
impl<F: Fetch> Client<F> {
/// Creates a new instance of the `Client` given a `fetch::Client`.
pub fn new(fetch: F, pool: CpuPool) -> Client<F> {
pub fn new(fetch: F, pool: Executor) -> Client<F> {
let api_endpoint = "https://api.etherscan.io/api?module=stats&action=ethprice".to_owned();
Client { pool, api_endpoint, fetch }
}
@ -108,7 +108,7 @@ impl<F: Fetch> Client<F> {
}
Either::B(response.concat2().from_err())
})
.map(move |body| {
.and_then(move |body| {
let body_str = str::from_utf8(&body).ok();
let value: Option<Value> = body_str.and_then(|s| serde_json::from_str(s).ok());
@ -128,30 +128,31 @@ impl<F: Fetch> Client<F> {
})
.map_err(|err| {
warn!("Failed to auto-update latest ETH price: {:?}", err);
err
});
self.pool.spawn(future).forget()
self.pool.spawn(future)
}
}
#[cfg(test)]
mod test {
use std::sync::Arc;
use futures_cpupool::CpuPool;
use parity_runtime::{Runtime, Executor};
use Client;
use std::sync::atomic::{AtomicBool, Ordering};
use fake_fetch::FakeFetch;
fn price_info_ok(response: &str) -> Client<FakeFetch<String>> {
Client::new(FakeFetch::new(Some(response.to_owned())), CpuPool::new(1))
fn price_info_ok(response: &str, executor: Executor) -> Client<FakeFetch<String>> {
Client::new(FakeFetch::new(Some(response.to_owned())), executor)
}
fn price_info_not_found() -> Client<FakeFetch<String>> {
Client::new(FakeFetch::new(None::<String>), CpuPool::new(1))
fn price_info_not_found(executor: Executor) -> Client<FakeFetch<String>> {
Client::new(FakeFetch::new(None::<String>), executor)
}
#[test]
fn should_get_price_info() {
let runtime = Runtime::with_thread_count(1);
// given
let response = r#"{
"status": "1",
@ -164,7 +165,7 @@ mod test {
}
}"#;
let price_info = price_info_ok(response);
let price_info = price_info_ok(response, runtime.executor());
// when
price_info.get(|price| {
@ -176,10 +177,12 @@ mod test {
#[test]
fn should_not_call_set_price_if_response_is_malformed() {
let runtime = Runtime::with_thread_count(1);
// given
let response = "{}";
let price_info = price_info_ok(response);
let price_info = price_info_ok(response, runtime.executor());
let b = Arc::new(AtomicBool::new(false));
// when
@ -194,8 +197,10 @@ mod test {
#[test]
fn should_not_call_set_price_if_response_is_invalid() {
let runtime = Runtime::with_thread_count(1);
// given
let price_info = price_info_not_found();
let price_info = price_info_not_found(runtime.executor());
let b = Arc::new(AtomicBool::new(false));
// when

View File

@ -9,11 +9,10 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
ansi_term = "0.10"
cid = "0.2"
cid = "0.3"
futures = "0.1.6"
futures-cpupool = "0.1"
log = "0.4"
multihash ="0.7"
multihash = "0.8"
order-stat = "0.1"
parking_lot = "0.6"
rand = "0.4"
@ -28,17 +27,17 @@ tokio-timer = "0.1"
transient-hashmap = "0.4"
itertools = "0.5"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-ipc-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-ipc-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
ethash = { path = "../ethash" }
ethcore = { path = "../ethcore", features = ["test-helpers"] }
parity-bytes = "0.1"
parity-crypto = "0.1"
parity-crypto = "0.2"
fastmap = { path = "../util/fastmap" }
ethcore-devtools = { path = "../devtools" }
ethcore-io = { path = "../util/io" }
@ -55,7 +54,7 @@ ethkey = { path = "../ethkey" }
ethstore = { path = "../ethstore" }
fetch = { path = "../util/fetch" }
keccak-hash = "0.1.2"
parity-reactor = { path = "../util/reactor" }
parity-runtime = { path = "../util/runtime" }
parity-updater = { path = "../updater" }
parity-version = { path = "../util/version" }
patricia-trie = "0.3.0"

View File

@ -42,13 +42,13 @@ impl<M, T> http::MetaExtractor<M> for MetaExtractor<T> where
T: HttpMetaExtractor<Metadata = M>,
M: jsonrpc_core::Metadata,
{
fn read_metadata(&self, req: &hyper::server::Request) -> M {
let as_string = |header: Option<&hyper::header::Raw>| header
.and_then(|raw| raw.one())
.map(|raw| String::from_utf8_lossy(raw).into_owned());
fn read_metadata(&self, req: &hyper::Request<hyper::Body>) -> M {
let as_string = |header: Option<&hyper::header::HeaderValue>| {
header.and_then(|val| val.to_str().ok().map(|s| s.to_owned()))
};
let origin = as_string(req.headers().get_raw("origin"));
let user_agent = as_string(req.headers().get_raw("user-agent"));
let origin = as_string(req.headers().get("origin"));
let user_agent = as_string(req.headers().get("user-agent"));
self.extractor.read_metadata(origin, user_agent)
}
}

View File

@ -23,7 +23,6 @@ extern crate futures;
extern crate ansi_term;
extern crate cid;
extern crate futures_cpupool;
extern crate itertools;
extern crate multihash;
extern crate order_stat;
@ -60,7 +59,7 @@ extern crate ethkey;
extern crate ethstore;
extern crate fetch;
extern crate keccak_hash as hash;
extern crate parity_reactor;
extern crate parity_runtime;
extern crate parity_updater as updater;
extern crate parity_version as version;
extern crate patricia_trie as trie;
@ -124,7 +123,6 @@ pub use authcodes::{AuthCodes, TimeProvider};
pub use http_common::HttpMetaExtractor;
use std::net::SocketAddr;
use http::tokio_core;
/// RPC HTTP Server instance
pub type HttpServer = http::Server;
@ -135,7 +133,6 @@ pub fn start_http<M, S, H, T>(
cors_domains: http::DomainsValidation<http::AccessControlAllowOrigin>,
allowed_hosts: http::DomainsValidation<http::Host>,
handler: H,
remote: tokio_core::reactor::Remote,
extractor: T,
threads: usize,
max_payload: usize,
@ -148,7 +145,6 @@ pub fn start_http<M, S, H, T>(
let extractor = http_common::MetaExtractor::new(extractor);
Ok(http::ServerBuilder::with_meta_extractor(handler, extractor)
.threads(threads)
.event_loop_remote(remote)
.cors(cors_domains.into())
.allowed_hosts(allowed_hosts.into())
.max_request_body_size(max_payload * 1024 * 1024)
@ -162,7 +158,6 @@ pub fn start_http_with_middleware<M, S, H, T, R>(
cors_domains: http::DomainsValidation<http::AccessControlAllowOrigin>,
allowed_hosts: http::DomainsValidation<http::Host>,
handler: H,
remote: tokio_core::reactor::Remote,
extractor: T,
middleware: R,
threads: usize,
@ -177,7 +172,6 @@ pub fn start_http_with_middleware<M, S, H, T, R>(
let extractor = http_common::MetaExtractor::new(extractor);
Ok(http::ServerBuilder::with_meta_extractor(handler, extractor)
.threads(threads)
.event_loop_remote(remote)
.cors(cors_domains.into())
.allowed_hosts(allowed_hosts.into())
.max_request_body_size(max_payload * 1024 * 1024)
@ -189,7 +183,6 @@ pub fn start_http_with_middleware<M, S, H, T, R>(
pub fn start_ipc<M, S, H, T>(
addr: &str,
handler: H,
remote: tokio_core::reactor::Remote,
extractor: T,
) -> ::std::io::Result<ipc::Server> where
M: jsonrpc_core::Metadata,
@ -198,7 +191,6 @@ pub fn start_ipc<M, S, H, T>(
T: IpcMetaExtractor<M>,
{
ipc::ServerBuilder::with_meta_extractor(handler, extractor)
.event_loop_remote(remote)
.start(addr)
}
@ -206,7 +198,6 @@ pub fn start_ipc<M, S, H, T>(
pub fn start_ws<M, S, H, T, U, V>(
addr: &SocketAddr,
handler: H,
remote: tokio_core::reactor::Remote,
allowed_origins: ws::DomainsValidation<ws::Origin>,
allowed_hosts: ws::DomainsValidation<ws::Host>,
max_connections: usize,
@ -222,7 +213,6 @@ pub fn start_ws<M, S, H, T, U, V>(
V: ws::RequestMiddleware,
{
ws::ServerBuilder::with_meta_extractor(handler, extractor)
.event_loop_remote(remote)
.request_middleware(middleware)
.allowed_origins(allowed_origins)
.allowed_hosts(allowed_hosts)

View File

@ -18,7 +18,7 @@ use std::ops::{Deref, DerefMut};
use std::path::PathBuf;
use tempdir::TempDir;
use parity_reactor::{EventLoop, TokioRemote};
use parity_runtime::{Runtime, TaskExecutor};
use authcodes::AuthCodes;
@ -27,15 +27,15 @@ pub struct Server<T> {
/// Server
pub server: T,
/// RPC Event Loop
pub event_loop: EventLoop,
pub event_loop: Runtime,
}
impl<T> Server<T> {
pub fn new<F>(f: F) -> Server<T> where
F: FnOnce(TokioRemote) -> T,
F: FnOnce(TaskExecutor) -> T,
{
let event_loop = EventLoop::spawn();
let remote = event_loop.raw_remote();
let event_loop = Runtime::with_thread_count(1);
let remote = event_loop.raw_executor();
Server {
server: f(remote),

View File

@ -26,14 +26,13 @@ fn serve(handler: Option<MetaIoHandler<Metadata>>) -> Server<HttpServer> {
let address = "127.0.0.1:0".parse().unwrap();
let handler = handler.unwrap_or_default();
Server::new(|remote| ::start_http_with_middleware(
Server::new(|_remote| ::start_http_with_middleware(
&address,
http::DomainsValidation::Disabled,
http::DomainsValidation::Disabled,
handler,
remote,
extractors::RpcExtractor,
|request: hyper::Request| {
|request: hyper::Request<hyper::Body>| {
http::RequestMiddlewareAction::Proceed {
should_continue_on_invalid_cors: false,
request,
@ -50,7 +49,7 @@ fn request(server: Server<HttpServer>, request: &str) -> http_client::Response {
}
#[cfg(test)]
mod testsing {
mod tests {
use jsonrpc_core::{MetaIoHandler, Value};
use v1::Metadata;
use super::{request, Server};
@ -73,7 +72,7 @@ mod testsing {
// when
let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#;
let expected = "4B\n{\"jsonrpc\":\"2.0\",\"result\":\"unknown origin / unknown agent via RPC\",\"id\":1}\n\n0\n\n";
let expected = "{\"jsonrpc\":\"2.0\",\"result\":\"unknown origin / unknown agent via RPC\",\"id\":1}\n";
let res = request(server,
&format!("\
POST / HTTP/1.1\r\n\
@ -98,7 +97,7 @@ mod testsing {
// when
let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#;
let expected = "49\n{\"jsonrpc\":\"2.0\",\"result\":\"unknown origin / curl/7.16.3 via RPC\",\"id\":1}\n\n0\n\n";
let expected = "{\"jsonrpc\":\"2.0\",\"result\":\"unknown origin / curl/7.16.3 via RPC\",\"id\":1}\n";
let res = request(server,
&format!("\
POST / HTTP/1.1\r\n\

View File

@ -34,10 +34,9 @@ pub fn serve() -> (Server<ws::Server>, usize, GuardedAuthCodes) {
let authcodes = GuardedAuthCodes::new();
let stats = Arc::new(informant::RpcStats::default());
let res = Server::new(|remote| ::start_ws(
let res = Server::new(|_| ::start_ws(
&address,
io,
remote,
ws::DomainsValidation::Disabled,
ws::DomainsValidation::Disabled,
5,

View File

@ -23,6 +23,7 @@ use authcodes;
use http_common::HttpMetaExtractor;
use ipc;
use jsonrpc_core as core;
use jsonrpc_core::futures::future::Either;
use jsonrpc_pubsub::Session;
use ws;
use ethereum_types::H256;
@ -216,26 +217,26 @@ impl<M: core::Middleware<Metadata>> WsDispatcher<M> {
}
impl<M: core::Middleware<Metadata>> core::Middleware<Metadata> for WsDispatcher<M> {
type Future = core::futures::future::Either<
M::Future,
type Future = Either<
core::FutureRpcResult<M::Future>,
core::FutureResponse,
>;
fn on_request<F, X>(&self, request: core::Request, meta: Metadata, process: F) -> Self::Future where
fn on_request<F, X>(&self, request: core::Request, meta: Metadata, process: F)
-> Either<Self::Future, X>
where
F: FnOnce(core::Request, Metadata) -> X,
X: core::futures::Future<Item=Option<core::Response>, Error=()> + Send + 'static,
{
use self::core::futures::future::Either::{A, B};
let use_full = match &meta.origin {
&Origin::Signer { .. } => true,
_ => false,
};
if use_full {
A(self.full_handler.handle_rpc_request(request, meta))
Either::A(Either::A(self.full_handler.handle_rpc_request(request, meta)))
} else {
B(Box::new(process(request, meta)))
Either::B(process(request, meta))
}
}
}

View File

@ -23,31 +23,25 @@ use ethereum_types::{U256, Address};
use futures::{Future, future, Poll, Async};
use futures::future::Either;
use futures::sync::oneshot;
use futures_cpupool::CpuPool;
use parity_runtime::Executor;
/// Manages currently reserved and prospective nonces
/// for multiple senders.
#[derive(Debug)]
pub struct Reservations {
nonces: HashMap<Address, SenderReservations>,
pool: CpuPool,
executor: Executor,
}
impl Reservations {
/// A maximal number of reserved nonces in the hashmap
/// before we start clearing the unused ones.
const CLEAN_AT: usize = 512;
/// Create new nonces manager and spawn a single-threaded cpu pool
/// for progressing execution of dropped nonces.
pub fn new() -> Self {
Self::with_pool(CpuPool::new(1))
}
/// Create new nonces manager with given cpupool.
pub fn with_pool(pool: CpuPool) -> Self {
/// Create new nonces manager with given executor.
pub fn new(executor: Executor) -> Self {
Reservations {
nonces: Default::default(),
pool,
executor,
}
}
@ -59,9 +53,9 @@ impl Reservations {
self.nonces.retain(|_, v| !v.is_empty());
}
let pool = &self.pool;
let executor = &self.executor;
self.nonces.entry(sender)
.or_insert_with(move || SenderReservations::with_pool(pool.clone()))
.or_insert_with(move || SenderReservations::new(executor.clone()))
.reserve_nonce(minimal)
}
}
@ -71,25 +65,18 @@ impl Reservations {
pub struct SenderReservations {
previous: Option<oneshot::Receiver<U256>>,
previous_ready: Arc<AtomicBool>,
pool: CpuPool,
executor: Executor,
prospective_value: U256,
dropped: Arc<AtomicUsize>,
}
impl SenderReservations {
/// Create new nonces manager and spawn a single-threaded cpu pool
/// for progressing execution of dropped nonces.
#[cfg(test)]
pub fn new() -> Self {
Self::with_pool(CpuPool::new(1))
}
/// Create new nonces manager with given cpu pool.
pub fn with_pool(pool: CpuPool) -> Self {
/// Create new nonces manager with given executor.
pub fn new(executor: Executor) -> Self {
SenderReservations {
previous: None,
previous_ready: Arc::new(AtomicBool::new(true)),
pool,
executor,
prospective_value: Default::default(),
dropped: Default::default(),
}
@ -110,7 +97,7 @@ impl SenderReservations {
let (next, rx) = oneshot::channel();
let next = Some(next);
let next_sent = Arc::new(AtomicBool::default());
let pool = self.pool.clone();
let executor = self.executor.clone();
let dropped = self.dropped.clone();
self.previous_ready = next_sent.clone();
match mem::replace(&mut self.previous, Some(rx)) {
@ -120,7 +107,7 @@ impl SenderReservations {
next_sent,
minimal,
prospective_value,
pool,
executor,
dropped,
},
None => Reserved {
@ -129,7 +116,7 @@ impl SenderReservations {
next_sent,
minimal,
prospective_value,
pool,
executor,
dropped,
},
}
@ -152,7 +139,7 @@ pub struct Reserved {
next_sent: Arc<AtomicBool>,
minimal: U256,
prospective_value: U256,
pool: CpuPool,
executor: Executor,
dropped: Arc<AtomicUsize>,
}
@ -196,10 +183,14 @@ impl Drop for Reserved {
self.dropped.fetch_add(1, atomic::Ordering::SeqCst);
// If Reserved is dropped just pipe previous and next together.
let previous = mem::replace(&mut self.previous, Either::B(future::ok(U256::default())));
self.pool.spawn(previous.map(move |nonce| {
self.executor.spawn(
previous
.map(move |nonce| {
next_sent.store(true, atomic::Ordering::SeqCst);
next.send(nonce).expect(Ready::RECV_PROOF)
})).forget()
})
.map_err(|err| error!("Error dropping `Reserved`: {:?}", err))
);
}
}
}
@ -253,10 +244,12 @@ impl Drop for Ready {
#[cfg(test)]
mod tests {
use super::*;
use parity_runtime::Runtime;
#[test]
fn should_reserve_a_set_of_nonces_and_resolve_them() {
let mut nonces = SenderReservations::new();
let runtime = Runtime::with_thread_count(1);
let mut nonces = SenderReservations::new(runtime.executor());
assert!(nonces.is_empty());
let n1 = nonces.reserve_nonce(5.into());
@ -303,7 +296,8 @@ mod tests {
#[test]
fn should_return_prospective_nonce() {
let mut nonces = SenderReservations::new();
let runtime = Runtime::with_thread_count(1);
let mut nonces = SenderReservations::new(runtime.executor());
let n1 = nonces.reserve_nonce(5.into());
let n2 = nonces.reserve_nonce(5.into());

View File

@ -95,7 +95,7 @@ impl<S: core::Middleware<Metadata>> GenericPollManager<S> {
jsonrpc: Some(core::Version::V2),
id: core::Id::Str(id.as_string()),
method: subscription.method.clone(),
params: Some(subscription.params.clone()),
params: subscription.params.clone(),
};
trace!(target: "pubsub", "Polling method: {:?}", call);
let result = self.rpc.handle_call(call.into(), subscription.metadata.clone());
@ -141,7 +141,7 @@ mod tests {
use jsonrpc_core::{MetaIoHandler, NoopMiddleware, Value, Params};
use jsonrpc_core::futures::{Future, Stream};
use jsonrpc_pubsub::SubscriptionId;
use http::tokio_core::reactor;
use http::tokio::runtime::Runtime;
use super::GenericPollManager;
@ -162,25 +162,25 @@ mod tests {
#[test]
fn should_poll_subscribed_method() {
// given
let mut el = reactor::Core::new().unwrap();
let mut el = Runtime::new().unwrap();
let mut poll_manager = poll_manager();
let (id, rx) = poll_manager.subscribe(Default::default(), "hello".into(), Params::None);
assert_eq!(id, SubscriptionId::String("0x416d77337e24399d".into()));
// then
poll_manager.tick().wait().unwrap();
let (res, rx) = el.run(rx.into_future()).unwrap();
let (res, rx) = el.block_on(rx.into_future()).unwrap();
assert_eq!(res, Some(Ok(Value::String("hello".into()))));
// retrieve second item
poll_manager.tick().wait().unwrap();
let (res, rx) = el.run(rx.into_future()).unwrap();
let (res, rx) = el.block_on(rx.into_future()).unwrap();
assert_eq!(res, Some(Ok(Value::String("world".into()))));
// and no more notifications
poll_manager.tick().wait().unwrap();
// we need to unsubscribe otherwise the future will never finish.
poll_manager.unsubscribe(&id);
assert_eq!(el.run(rx.into_future()).unwrap().0, None);
assert_eq!(el.block_on(rx.into_future()).unwrap().0, None);
}
}

View File

@ -39,7 +39,7 @@ use sync::LightSync;
use light::cache::Cache;
use light::on_demand::OnDemand;
use light::client::{LightChainClient, LightChainNotify};
use parity_reactor::Remote;
use parity_runtime::Executor;
use ethereum_types::H256;
use bytes::Bytes;
use parking_lot::{RwLock, Mutex};
@ -56,7 +56,7 @@ pub struct EthPubSubClient<C> {
impl<C> EthPubSubClient<C> {
/// Creates new `EthPubSubClient`.
pub fn new(client: Arc<C>, remote: Remote) -> Self {
pub fn new(client: Arc<C>, executor: Executor) -> Self {
let heads_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let logs_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let transactions_subscribers = Arc::new(RwLock::new(Subscribers::default()));
@ -64,7 +64,7 @@ impl<C> EthPubSubClient<C> {
EthPubSubClient {
handler: Arc::new(ChainNotificationHandler {
client,
remote,
executor,
heads_subscribers: heads_subscribers.clone(),
logs_subscribers: logs_subscribers.clone(),
transactions_subscribers: transactions_subscribers.clone(),
@ -77,8 +77,8 @@ impl<C> EthPubSubClient<C> {
/// Creates new `EthPubSubCient` with deterministic subscription ids.
#[cfg(test)]
pub fn new_test(client: Arc<C>, remote: Remote) -> Self {
let client = Self::new(client, remote);
pub fn new_test(client: Arc<C>, executor: Executor) -> Self {
let client = Self::new(client, executor);
*client.heads_subscribers.write() = Subscribers::new_test();
*client.logs_subscribers.write() = Subscribers::new_test();
*client.transactions_subscribers.write() = Subscribers::new_test();
@ -98,7 +98,7 @@ impl EthPubSubClient<LightFetch> {
on_demand: Arc<OnDemand>,
sync: Arc<LightSync>,
cache: Arc<Mutex<Cache>>,
remote: Remote,
executor: Executor,
gas_price_percentile: usize,
) -> Self {
let fetch = LightFetch {
@ -108,22 +108,22 @@ impl EthPubSubClient<LightFetch> {
cache,
gas_price_percentile,
};
EthPubSubClient::new(Arc::new(fetch), remote)
EthPubSubClient::new(Arc::new(fetch), executor)
}
}
/// PubSub Notification handler.
pub struct ChainNotificationHandler<C> {
client: Arc<C>,
remote: Remote,
executor: Executor,
heads_subscribers: Arc<RwLock<Subscribers<Client>>>,
logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>,
transactions_subscribers: Arc<RwLock<Subscribers<Client>>>,
}
impl<C> ChainNotificationHandler<C> {
fn notify(remote: &Remote, subscriber: &Client, result: pubsub::Result) {
remote.spawn(subscriber
fn notify(executor: &Executor, subscriber: &Client, result: pubsub::Result) {
executor.spawn(subscriber
.notify(Ok(result))
.map(|_| ())
.map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e))
@ -133,7 +133,7 @@ impl<C> ChainNotificationHandler<C> {
fn notify_heads(&self, headers: &[(encoded::Header, BTreeMap<String, String>)]) {
for subscriber in self.heads_subscribers.read().values() {
for &(ref header, ref extra_info) in headers {
Self::notify(&self.remote, subscriber, pubsub::Result::Header(RichHeader {
Self::notify(&self.executor, subscriber, pubsub::Result::Header(RichHeader {
inner: header.into(),
extra_info: extra_info.clone(),
}));
@ -159,14 +159,14 @@ impl<C> ChainNotificationHandler<C> {
.collect::<Vec<_>>()
);
let limit = filter.limit;
let remote = self.remote.clone();
let executor = self.executor.clone();
let subscriber = subscriber.clone();
self.remote.spawn(logs
self.executor.spawn(logs
.map(move |logs| {
let logs = logs.into_iter().flat_map(|log| log).collect();
for log in limit_logs(logs, limit) {
Self::notify(&remote, &subscriber, pubsub::Result::Log(log))
Self::notify(&executor, &subscriber, pubsub::Result::Log(log))
}
})
.map_err(|e| warn!("Unable to fetch latest logs: {:?}", e))
@ -178,7 +178,7 @@ impl<C> ChainNotificationHandler<C> {
pub fn notify_new_transactions(&self, hashes: &[H256]) {
for subscriber in self.transactions_subscribers.read().values() {
for hash in hashes {
Self::notify(&self.remote, subscriber, pubsub::Result::TransactionHash((*hash).into()));
Self::notify(&self.executor, subscriber, pubsub::Result::TransactionHash((*hash).into()));
}
}
}

View File

@ -22,7 +22,6 @@ use std::sync::Arc;
use sync::ManageNetwork;
use fetch::{self, Fetch};
use futures_cpupool::CpuPool;
use hash::keccak_buffer;
use jsonrpc_core::{Result, BoxFuture};
@ -35,16 +34,14 @@ use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction};
pub struct ParitySetClient<F> {
net: Arc<ManageNetwork>,
fetch: F,
pool: CpuPool,
}
impl<F: Fetch> ParitySetClient<F> {
/// Creates new `ParitySetClient` with given `Fetch`.
pub fn new(net: Arc<ManageNetwork>, fetch: F, p: CpuPool) -> Self {
pub fn new(net: Arc<ManageNetwork>, fetch: F) -> Self {
ParitySetClient {
net: net,
fetch: fetch,
pool: p,
}
}
}
@ -134,7 +131,7 @@ impl<F: Fetch> ParitySet for ParitySetClient<F> {
})
.map(Into::into)
});
Box::new(self.pool.spawn(future))
Box::new(future)
}
fn upgrade_ready(&self) -> Result<Option<ReleaseInfo>> {

View File

@ -23,7 +23,6 @@ use ethcore::client::{BlockChainClient, Mode};
use ethcore::miner::MinerService;
use sync::ManageNetwork;
use fetch::{self, Fetch};
use futures_cpupool::CpuPool;
use hash::keccak_buffer;
use updater::{Service as UpdateService};
@ -40,7 +39,6 @@ pub struct ParitySetClient<C, M, U, F = fetch::Client> {
updater: Arc<U>,
net: Arc<ManageNetwork>,
fetch: F,
pool: CpuPool,
}
impl<C, M, U, F> ParitySetClient<C, M, U, F>
@ -53,7 +51,6 @@ impl<C, M, U, F> ParitySetClient<C, M, U, F>
updater: &Arc<U>,
net: &Arc<ManageNetwork>,
fetch: F,
pool: CpuPool,
) -> Self {
ParitySetClient {
client: client.clone(),
@ -61,7 +58,6 @@ impl<C, M, U, F> ParitySetClient<C, M, U, F>
updater: updater.clone(),
net: net.clone(),
fetch: fetch,
pool: pool,
}
}
}
@ -177,7 +173,7 @@ impl<C, M, U, F> ParitySet for ParitySetClient<C, M, U, F> where
})
.map(Into::into)
});
Box::new(self.pool.spawn(future))
Box::new(future)
}
fn upgrade_ready(&self) -> Result<Option<ReleaseInfo>> {

View File

@ -27,7 +27,7 @@ use jsonrpc_macros::pubsub::Subscriber;
use jsonrpc_pubsub::SubscriptionId;
use tokio_timer;
use parity_reactor::Remote;
use parity_runtime::Executor;
use v1::helpers::GenericPollManager;
use v1::metadata::Metadata;
use v1::traits::PubSub;
@ -35,12 +35,12 @@ use v1::traits::PubSub;
/// Parity PubSub implementation.
pub struct PubSubClient<S: core::Middleware<Metadata>> {
poll_manager: Arc<RwLock<GenericPollManager<S>>>,
remote: Remote,
executor: Executor,
}
impl<S: core::Middleware<Metadata>> PubSubClient<S> {
/// Creates new `PubSubClient`.
pub fn new(rpc: MetaIoHandler<Metadata, S>, remote: Remote) -> Self {
pub fn new(rpc: MetaIoHandler<Metadata, S>, executor: Executor) -> Self {
let poll_manager = Arc::new(RwLock::new(GenericPollManager::new(rpc)));
let pm2 = poll_manager.clone();
@ -50,14 +50,14 @@ impl<S: core::Middleware<Metadata>> PubSubClient<S> {
// Start ticking
let interval = timer.interval(Duration::from_millis(1000));
remote.spawn(interval
executor.spawn(interval
.map_err(|e| warn!("Polling timer error: {:?}", e))
.for_each(move |_| pm2.read().tick())
);
PubSubClient {
poll_manager,
remote,
executor,
}
}
}
@ -65,8 +65,8 @@ impl<S: core::Middleware<Metadata>> PubSubClient<S> {
impl PubSubClient<core::NoopMiddleware> {
/// Creates new `PubSubClient` with deterministic ids.
#[cfg(test)]
pub fn new_test(rpc: MetaIoHandler<Metadata, core::NoopMiddleware>, remote: Remote) -> Self {
let client = Self::new(MetaIoHandler::with_middleware(Default::default()), remote);
pub fn new_test(rpc: MetaIoHandler<Metadata, core::NoopMiddleware>, executor: Executor) -> Self {
let client = Self::new(MetaIoHandler::with_middleware(Default::default()), executor);
*client.poll_manager.write() = GenericPollManager::new_test(rpc);
client
}
@ -84,7 +84,7 @@ impl<S: core::Middleware<Metadata>> PubSub for PubSubClient<S> {
let (id, receiver) = poll_manager.subscribe(meta, method, params);
match subscriber.assign_id(id.clone()) {
Ok(sink) => {
self.remote.spawn(receiver.forward(sink.sink_map_err(|e| {
self.executor.spawn(receiver.forward(sink.sink_map_err(|e| {
warn!("Cannot send notification: {:?}", e);
})).map(|_| ()));
},

View File

@ -20,7 +20,7 @@ use std::sync::Arc;
use ethcore::account_provider::AccountProvider;
use ethkey;
use parity_reactor::Remote;
use parity_runtime::Executor;
use parking_lot::Mutex;
use rlp::Rlp;
use transaction::{SignedTransaction, PendingTransaction};
@ -50,7 +50,7 @@ impl<D: Dispatcher + 'static> SignerClient<D> {
store: &Arc<AccountProvider>,
dispatcher: D,
signer: &Arc<SignerService>,
remote: Remote,
executor: Executor,
) -> Self {
let subscribers = Arc::new(Mutex::new(Subscribers::default()));
let subs = Arc::downgrade(&subscribers);
@ -60,7 +60,7 @@ impl<D: Dispatcher + 'static> SignerClient<D> {
let requests = s.requests().into_iter().map(Into::into).collect::<Vec<ConfirmationRequest>>();
for subscription in subs.lock().values() {
let subscription: &Sink<_> = subscription;
remote.spawn(subscription
executor.spawn(subscription
.notify(Ok(requests.clone()))
.map(|_| ())
.map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e))

View File

@ -44,7 +44,7 @@ use v1::types::{
Origin,
};
use parity_reactor::Remote;
use parity_runtime::Executor;
/// After 60s entries that are not queried with `check_request` will get garbage collected.
const MAX_PENDING_DURATION_SEC: u32 = 60;
@ -67,7 +67,7 @@ impl Future for DispatchResult {
}
}
fn schedule(remote: Remote,
fn schedule(executor: Executor,
confirmations: Arc<Mutex<TransientHashMap<U256, Option<RpcConfirmationResult>>>>,
id: U256,
future: RpcConfirmationReceiver) {
@ -83,7 +83,7 @@ fn schedule(remote: Remote,
confirmations.insert(id, Some(result));
Ok(())
});
remote.spawn(future);
executor.spawn(future);
}
/// Implementation of functions that require signing when no trusted signer is used.
@ -91,19 +91,19 @@ pub struct SigningQueueClient<D> {
signer: Arc<SignerService>,
accounts: Arc<AccountProvider>,
dispatcher: D,
remote: Remote,
executor: Executor,
// None here means that the request hasn't yet been confirmed
confirmations: Arc<Mutex<TransientHashMap<U256, Option<RpcConfirmationResult>>>>,
}
impl<D: Dispatcher + 'static> SigningQueueClient<D> {
/// Creates a new signing queue client given shared signing queue.
pub fn new(signer: &Arc<SignerService>, dispatcher: D, remote: Remote, accounts: &Arc<AccountProvider>) -> Self {
pub fn new(signer: &Arc<SignerService>, dispatcher: D, executor: Executor, accounts: &Arc<AccountProvider>) -> Self {
SigningQueueClient {
signer: signer.clone(),
accounts: accounts.clone(),
dispatcher,
remote,
executor,
confirmations: Arc::new(Mutex::new(TransientHashMap::new(MAX_PENDING_DURATION_SEC))),
}
}
@ -143,7 +143,7 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningQueueClient<D> {
}
fn post_sign(&self, meta: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>> {
let remote = self.remote.clone();
let executor = self.executor.clone();
let confirmations = self.confirmations.clone();
Box::new(self.dispatch(
@ -153,21 +153,21 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningQueueClient<D> {
).map(move |result| match result {
DispatchResult::Value(v) => RpcEither::Or(v),
DispatchResult::Future(id, future) => {
schedule(remote, confirmations, id, future);
schedule(executor, confirmations, id, future);
RpcEither::Either(id.into())
},
}))
}
fn post_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>> {
let remote = self.remote.clone();
let executor = self.executor.clone();
let confirmations = self.confirmations.clone();
Box::new(self.dispatch(RpcConfirmationPayload::SendTransaction(request), DefaultAccount::Provided(self.accounts.default_account().ok().unwrap_or_default()), meta.origin)
.map(|result| match result {
DispatchResult::Value(v) => RpcEither::Or(v),
DispatchResult::Future(id, future) => {
schedule(remote, confirmations, id, future);
schedule(executor, confirmations, id, future);
RpcEither::Either(id.into())
},
}))

View File

@ -20,12 +20,13 @@ use std::fmt;
use std::sync::Arc;
use std::sync::atomic::{self, AtomicUsize};
use std::time;
use futures_cpupool as pool;
use jsonrpc_core as rpc;
use parity_runtime;
use jsonrpc_core as core;
use jsonrpc_core::futures::future::Either;
use order_stat;
use parking_lot::RwLock;
pub use self::pool::CpuPool;
pub use self::parity_runtime::Executor;
const RATE_SECONDS: usize = 10;
const STATS_SAMPLES: usize = 60;
@ -186,16 +187,14 @@ pub trait ActivityNotifier: Send + Sync + 'static {
pub struct Middleware<T: ActivityNotifier = ClientNotifier> {
stats: Arc<RpcStats>,
notifier: T,
pool: Option<CpuPool>,
}
impl<T: ActivityNotifier> Middleware<T> {
/// Create new Middleware with stats counter and activity notifier.
pub fn new(stats: Arc<RpcStats>, notifier: T, pool: Option<CpuPool>) -> Self {
pub fn new(stats: Arc<RpcStats>, notifier: T) -> Self {
Middleware {
stats,
notifier,
pool,
}
}
@ -204,28 +203,24 @@ impl<T: ActivityNotifier> Middleware<T> {
}
}
impl<M: rpc::Metadata, T: ActivityNotifier> rpc::Middleware<M> for Middleware<T> {
type Future = rpc::futures::future::Either<
pool::CpuFuture<Option<rpc::Response>, ()>,
rpc::FutureResponse,
>;
impl<M: core::Metadata, T: ActivityNotifier> core::Middleware<M> for Middleware<T> {
type Future = core::FutureResponse;
fn on_request<F, X>(&self, request: rpc::Request, meta: M, process: F) -> Self::Future where
F: FnOnce(rpc::Request, M) -> X,
X: rpc::futures::Future<Item=Option<rpc::Response>, Error=()> + Send + 'static,
fn on_request<F, X>(&self, request: core::Request, meta: M, process: F) -> Either<Self::Future, X> where
F: FnOnce(core::Request, M) -> X,
X: core::futures::Future<Item=Option<core::Response>, Error=()> + Send + 'static,
{
use self::rpc::futures::future::Either::{A, B};
let start = time::Instant::now();
self.notifier.active();
self.stats.count_request();
let id = match request {
rpc::Request::Single(rpc::Call::MethodCall(ref call)) => Some(call.id.clone()),
core::Request::Single(core::Call::MethodCall(ref call)) => Some(call.id.clone()),
_ => None,
};
let stats = self.stats.clone();
let future = process(request, meta).map(move |res| {
let time = Self::as_micro(start.elapsed());
if time > 10_000 {
@ -235,10 +230,7 @@ impl<M: rpc::Metadata, T: ActivityNotifier> rpc::Middleware<M> for Middleware<T>
res
});
match self.pool {
Some(ref pool) => A(pool.spawn(future)),
None => B(Box::new(future)),
}
Either::A(Box::new(future))
}
}

View File

@ -32,6 +32,7 @@ use ethjson::spec::ForkSpec;
use io::IoChannel;
use miner::external::ExternalMiner;
use parking_lot::Mutex;
use parity_runtime::Runtime;
use jsonrpc_core::IoHandler;
use v1::helpers::dispatch::FullDispatcher;
@ -73,6 +74,7 @@ fn make_spec(chain: &BlockChain) -> Spec {
}
struct EthTester {
_runtime: Runtime,
client: Arc<Client>,
_miner: Arc<Miner>,
_snapshot: Arc<TestSnapshotService>,
@ -99,6 +101,7 @@ impl EthTester {
}
fn from_spec(spec: Spec) -> Self {
let runtime = Runtime::with_thread_count(1);
let account_provider = account_provider();
let opt_account_provider = account_provider.clone();
let miner_service = miner_service(&spec, account_provider.clone());
@ -124,7 +127,7 @@ impl EthTester {
Default::default(),
);
let reservations = Arc::new(Mutex::new(nonce::Reservations::new()));
let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor())));
let dispatcher = FullDispatcher::new(client.clone(), miner_service.clone(), reservations, 50);
let eth_sign = SigningUnsafeClient::new(
@ -137,6 +140,7 @@ impl EthTester {
handler.extend_with(eth_sign.to_delegate());
EthTester {
_runtime: runtime,
_miner: miner_service,
_snapshot: snapshot_service,
client: client,

View File

@ -32,6 +32,7 @@ use miner::external::ExternalMiner;
use rlp;
use rustc_hex::{FromHex, ToHex};
use transaction::{Transaction, Action};
use parity_runtime::Runtime;
use jsonrpc_core::IoHandler;
use v1::{Eth, EthClient, EthClientOptions, EthFilter, EthFilterClient, EthSigning, SigningUnsafeClient};
@ -65,6 +66,7 @@ fn snapshot_service() -> Arc<TestSnapshotService> {
}
struct EthTester {
pub runtime: Runtime,
pub client: Arc<TestBlockChainClient>,
pub sync: Arc<TestSyncProvider>,
pub accounts_provider: Arc<AccountProvider>,
@ -82,6 +84,7 @@ impl Default for EthTester {
impl EthTester {
pub fn new_with_options(options: EthClientOptions) -> Self {
let runtime = Runtime::with_thread_count(1);
let client = blockchain_client();
let sync = sync_provider();
let ap = accounts_provider();
@ -94,7 +97,7 @@ impl EthTester {
let poll_lifetime = options.poll_lifetime;
let eth = EthClient::new(&client, &snapshot, &sync, &opt_ap, &miner, &external_miner, options).to_delegate();
let filter = EthFilterClient::new(client.clone(), miner.clone(), poll_lifetime).to_delegate();
let reservations = Arc::new(Mutex::new(nonce::Reservations::new()));
let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor())));
let dispatcher = FullDispatcher::new(client.clone(), miner.clone(), reservations, gas_price_percentile);
let sign = SigningUnsafeClient::new(&opt_ap, dispatcher).to_delegate();
@ -104,6 +107,7 @@ impl EthTester {
io.extend_with(filter);
EthTester {
runtime,
client: client,
sync: sync,
accounts_provider: ap,

View File

@ -25,14 +25,14 @@ use std::time::Duration;
use v1::{EthPubSub, EthPubSubClient, Metadata};
use ethcore::client::{TestBlockChainClient, EachBlockWith, ChainNotify, ChainRoute, ChainRouteType};
use parity_reactor::EventLoop;
use parity_runtime::Runtime;
const DURATION_ZERO: Duration = Duration::from_millis(0);
#[test]
fn should_subscribe_to_new_heads() {
// given
let el = EventLoop::spawn();
let el = Runtime::with_thread_count(1);
let mut client = TestBlockChainClient::new();
// Insert some blocks
client.add_blocks(3, EachBlockWith::Nothing);
@ -40,7 +40,7 @@ fn should_subscribe_to_new_heads() {
let h2 = client.block_hash_delta_minus(2);
let h1 = client.block_hash_delta_minus(3);
let pubsub = EthPubSubClient::new_test(Arc::new(client), el.remote());
let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor());
let handler = pubsub.handler().upgrade().unwrap();
let pubsub = pubsub.to_delegate();
@ -89,7 +89,7 @@ fn should_subscribe_to_logs() {
use ethcore::client::BlockInfo;
// given
let el = EventLoop::spawn();
let el = Runtime::with_thread_count(1);
let mut client = TestBlockChainClient::new();
// Insert some blocks
client.add_blocks(1, EachBlockWith::Transaction);
@ -112,7 +112,7 @@ fn should_subscribe_to_logs() {
}
]);
let pubsub = EthPubSubClient::new_test(Arc::new(client), el.remote());
let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor());
let handler = pubsub.handler().upgrade().unwrap();
let pubsub = pubsub.to_delegate();
@ -156,10 +156,10 @@ fn should_subscribe_to_logs() {
#[test]
fn should_subscribe_to_pending_transactions() {
// given
let el = EventLoop::spawn();
let el = Runtime::with_thread_count(1);
let client = TestBlockChainClient::new();
let pubsub = EthPubSubClient::new_test(Arc::new(client), el.remote());
let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor());
let handler = pubsub.handler().upgrade().unwrap();
let pubsub = pubsub.to_delegate();
@ -203,9 +203,9 @@ fn should_subscribe_to_pending_transactions() {
#[test]
fn should_return_unimplemented() {
// given
let el = EventLoop::spawn();
let el = Runtime::with_thread_count(1);
let client = TestBlockChainClient::new();
let pubsub = EthPubSubClient::new_test(Arc::new(client), el.remote());
let pubsub = EthPubSubClient::new_test(Arc::new(client), el.executor());
let pubsub = pubsub.to_delegate();
let mut io = MetaIoHandler::default();

View File

@ -22,7 +22,6 @@ use ethereum_types::{U256, Address};
use ethcore::miner::MinerService;
use ethcore::client::TestBlockChainClient;
use sync::ManageNetwork;
use futures_cpupool::CpuPool;
use jsonrpc_core::IoHandler;
use v1::{ParitySet, ParitySetClient};
@ -55,8 +54,7 @@ fn parity_set_client(
updater: &Arc<TestUpdater>,
net: &Arc<TestManageNetwork>,
) -> TestParitySetClient {
let pool = CpuPool::new(1);
ParitySetClient::new(client, miner, updater, &(net.clone() as Arc<ManageNetwork>), FakeFetch::new(Some(1)), pool)
ParitySetClient::new(client, miner, updater, &(net.clone() as Arc<ManageNetwork>), FakeFetch::new(Some(1)))
}
#[test]

View File

@ -24,6 +24,7 @@ use ethcore::client::TestBlockChainClient;
use jsonrpc_core::IoHandler;
use parking_lot::Mutex;
use transaction::{Action, Transaction};
use parity_runtime::Runtime;
use v1::{PersonalClient, Personal, Metadata};
use v1::helpers::nonce;
@ -32,6 +33,7 @@ use v1::tests::helpers::TestMinerService;
use v1::types::H520;
struct PersonalTester {
_runtime: Runtime,
accounts: Arc<AccountProvider>,
io: IoHandler<Metadata>,
miner: Arc<TestMinerService>,
@ -51,10 +53,11 @@ fn miner_service() -> Arc<TestMinerService> {
}
fn setup() -> PersonalTester {
let runtime = Runtime::with_thread_count(1);
let accounts = accounts_provider();
let client = blockchain_client();
let miner = miner_service();
let reservations = Arc::new(Mutex::new(nonce::Reservations::new()));
let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor())));
let dispatcher = FullDispatcher::new(client, miner.clone(), reservations, 50);
let personal = PersonalClient::new(&accounts, dispatcher, false);
@ -63,6 +66,7 @@ fn setup() -> PersonalTester {
io.extend_with(personal.to_delegate());
let tester = PersonalTester {
_runtime: runtime,
accounts: accounts,
io: io,
miner: miner,

View File

@ -20,7 +20,7 @@ use jsonrpc_core::{self as core, MetaIoHandler};
use jsonrpc_core::futures::{self, Stream, Future};
use jsonrpc_pubsub::Session;
use parity_reactor::EventLoop;
use parity_runtime::Runtime;
use v1::{PubSub, PubSubClient, Metadata};
fn rpc() -> MetaIoHandler<Metadata, core::NoopMiddleware> {
@ -40,9 +40,9 @@ fn rpc() -> MetaIoHandler<Metadata, core::NoopMiddleware> {
#[test]
fn should_subscribe_to_a_method() {
// given
let el = EventLoop::spawn();
let el = Runtime::with_thread_count(1);
let rpc = rpc();
let pubsub = PubSubClient::new_test(rpc, el.remote()).to_delegate();
let pubsub = PubSubClient::new_test(rpc, el.executor()).to_delegate();
let mut io = MetaIoHandler::default();
io.extend_with(pubsub);

View File

@ -21,7 +21,7 @@ use bytes::ToPretty;
use ethcore::account_provider::AccountProvider;
use ethcore::client::TestBlockChainClient;
use parity_reactor::EventLoop;
use parity_runtime::Runtime;
use parking_lot::Mutex;
use rlp::encode;
use transaction::{Transaction, Action, SignedTransaction};
@ -36,6 +36,7 @@ use v1::helpers::{nonce, SigningQueue, SignerService, FilledTransactionRequest,
use v1::helpers::dispatch::{FullDispatcher, eth_data_hash};
struct SignerTester {
_runtime: Runtime,
signer: Arc<SignerService>,
accounts: Arc<AccountProvider>,
io: IoHandler<Metadata>,
@ -56,18 +57,19 @@ fn miner_service() -> Arc<TestMinerService> {
}
fn signer_tester() -> SignerTester {
let runtime = Runtime::with_thread_count(1);
let signer = Arc::new(SignerService::new_test(false));
let accounts = accounts_provider();
let client = blockchain_client();
let miner = miner_service();
let reservations = Arc::new(Mutex::new(nonce::Reservations::new()));
let event_loop = EventLoop::spawn();
let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor())));
let dispatcher = FullDispatcher::new(client, miner.clone(), reservations, 50);
let mut io = IoHandler::default();
io.extend_with(SignerClient::new(&accounts, dispatcher, &signer, event_loop.remote()).to_delegate());
io.extend_with(SignerClient::new(&accounts, dispatcher, &signer, runtime.executor()).to_delegate());
SignerTester {
_runtime: runtime,
signer: signer,
accounts: accounts,
io: io,

View File

@ -39,10 +39,10 @@ use ethstore::ethkey::{Generator, Random};
use parking_lot::Mutex;
use serde_json;
use transaction::{Transaction, Action, SignedTransaction};
use parity_reactor::Remote;
use parity_runtime::{Runtime, Executor};
struct SigningTester {
pub runtime: Runtime,
pub signer: Arc<SignerService>,
pub client: Arc<TestBlockChainClient>,
pub miner: Arc<TestMinerService>,
@ -52,23 +52,25 @@ struct SigningTester {
impl Default for SigningTester {
fn default() -> Self {
let runtime = Runtime::with_thread_count(1);
let signer = Arc::new(SignerService::new_test(false));
let client = Arc::new(TestBlockChainClient::default());
let miner = Arc::new(TestMinerService::default());
let accounts = Arc::new(AccountProvider::transient_provider());
let reservations = Arc::new(Mutex::new(nonce::Reservations::new()));
let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor())));
let mut io = IoHandler::default();
let dispatcher = FullDispatcher::new(client.clone(), miner.clone(), reservations, 50);
let remote = Remote::new_thread_per_future();
let executor = Executor::new_thread_per_future();
let rpc = SigningQueueClient::new(&signer, dispatcher.clone(), remote.clone(), &accounts);
let rpc = SigningQueueClient::new(&signer, dispatcher.clone(), executor.clone(), &accounts);
io.extend_with(EthSigning::to_delegate(rpc));
let rpc = SigningQueueClient::new(&signer, dispatcher, remote, &accounts);
let rpc = SigningQueueClient::new(&signer, dispatcher, executor, &accounts);
io.extend_with(ParitySigning::to_delegate(rpc));
SigningTester {
runtime,
signer: signer,
client: client,
miner: miner,

View File

@ -14,7 +14,7 @@ serde_json = "1.0"
url = "1.2.0"
matches = "0.1"
parking_lot = "0.6"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
parity-rpc = { path = "../rpc" }
keccak-hash = "0.1"

View File

@ -274,7 +274,7 @@ impl Rpc {
let request = MethodCall {
jsonrpc: Some(Version::V2),
method: method.to_owned(),
params: Some(Params::Array(params)),
params: Params::Array(params),
id: Id::Num(id as u64),
};

View File

@ -9,23 +9,20 @@ authors = ["Parity Technologies <admin@parity.io>"]
byteorder = "1.0"
log = "0.4"
parking_lot = "0.6"
hyper = { version = "0.11", default-features = false }
hyper = { version = "0.12", default-features = false }
serde = "1.0"
serde_json = "1.0"
serde_derive = "1.0"
futures = "0.1"
futures-cpupool = "0.1"
rustc-hex = "1.0"
tiny-keccak = "1.4"
tokio = "0.1"
tokio-core = "0.1"
tokio = "~0.1.11"
tokio-io = "0.1"
tokio-service = "0.1"
tokio-proto = "0.1"
url = "1.0"
ethcore = { path = "../ethcore" }
parity-bytes = "0.1"
parity-crypto = "0.1"
parity-crypto = "0.2"
ethcore-logger = { path = "../logger" }
ethcore-sync = { path = "../ethcore/sync" }
ethcore-transaction = { path = "../ethcore/transaction" }

View File

@ -20,7 +20,7 @@ use std::sync::Arc;
use std::sync::mpsc;
use futures::{self, Future};
use parking_lot::Mutex;
use tokio_core::reactor::Core;
use tokio::runtime;
use crypto::DEFAULT_MAC;
use ethkey::crypto;
use super::acl_storage::AclStorage;
@ -191,7 +191,11 @@ impl KeyServerCore {
let (stop, stopped) = futures::oneshot();
let (tx, rx) = mpsc::channel();
let handle = thread::Builder::new().name("KeyServerLoop".into()).spawn(move || {
let mut el = match Core::new() {
let runtime_res = runtime::Builder::new()
.core_threads(config.threads)
.build();
let mut el = match runtime_res {
Ok(el) => el,
Err(e) => {
tx.send(Err(Error::Internal(format!("error initializing event loop: {}", e)))).expect("Rx is blocking upper thread.");
@ -199,10 +203,10 @@ impl KeyServerCore {
},
};
let cluster = ClusterCore::new(el.handle(), config);
let cluster = ClusterCore::new(el.executor(), config);
let cluster_client = cluster.and_then(|c| c.run().map(|_| c.client()));
tx.send(cluster_client.map_err(Into::into)).expect("Rx is blocking upper thread.");
let _ = el.run(futures::empty().select(stopped));
let _ = el.block_on(futures::empty().select(stopped));
trace!(target: "secretstore_net", "{}: KeyServerLoop thread stopped", self_key_pair.public());
}).map_err(|e| Error::Internal(format!("{}", e)))?;

View File

@ -942,12 +942,12 @@ pub mod tests {
use std::sync::Arc;
use std::collections::{BTreeSet, BTreeMap, VecDeque};
use std::time::Duration;
use tokio_core::reactor::Core;
use ethereum_types::Address;
use ethkey::{Random, Generator, KeyPair};
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage};
use key_server_cluster::message::{self, Message, GenerationMessage};
use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established};
use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until,
all_connections_established, new_runtime};
use key_server_cluster::cluster_sessions::ClusterSession;
use key_server_cluster::generation_session::{SessionImpl, SessionState, SessionParams};
use key_server_cluster::math;
@ -1357,19 +1357,22 @@ pub mod tests {
let test_cases = [(1, 3)];
for &(threshold, num_nodes) in &test_cases {
let mut core = Core::new().unwrap();
let mut core = new_runtime();
// prepare cluster objects for each node
let clusters = make_clusters(&core, 6031, num_nodes);
run_clusters(&clusters);
// `clusters` contains `Arc<ClusterCore>` and clones will refer to the same cores.
let clusters_clone = clusters.clone();
// establish connections
loop_until(&mut core, CONN_TIMEOUT, || clusters.iter().all(all_connections_established));
loop_until(&mut core, CONN_TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
// run session to completion
let session_id = SessionId::default();
let session = clusters[0].client().new_generation_session(session_id, Default::default(), Default::default(), threshold).unwrap();
loop_until(&mut core, SESSION_TIMEOUT, || session.joint_public_and_secret().is_some());
loop_until(&mut core, SESSION_TIMEOUT, move || session.joint_public_and_secret().is_some());
}
}

View File

@ -17,15 +17,16 @@
use std::io;
use std::time::{Duration, Instant};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::collections::{BTreeMap, BTreeSet};
use std::collections::btree_map::Entry;
use std::net::{SocketAddr, IpAddr};
use futures::{finished, failed, Future, Stream};
use futures_cpupool::CpuPool;
use parking_lot::{RwLock, Mutex};
use futures::{future, Future, Stream};
use parking_lot::{Mutex, RwLock};
use tokio_io::IoFuture;
use tokio_core::reactor::{Handle, Remote, Interval};
use tokio_core::net::{TcpListener, TcpStream};
use tokio::runtime::TaskExecutor;
use tokio::timer::{Interval, timeout::Error as TimeoutError};
use tokio::net::{TcpListener, TcpStream};
use ethkey::{Public, KeyPair, Signature, Random, Generator};
use ethereum_types::{Address, H256};
use key_server_cluster::{Error, NodeId, SessionId, Requester, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair};
@ -136,8 +137,9 @@ pub struct ClusterConfiguration {
pub acl_storage: Arc<AclStorage>,
/// Administrator public key.
pub admin_public: Option<Public>,
/// Should key servers set change session should be started when servers set changes.
/// This will only work when servers set is configured using KeyServerSet contract.
/// Should key servers set change session when servers set changes? This
/// will only work when servers set is configured using KeyServerSet
/// contract.
pub auto_migrate_enabled: bool,
}
@ -149,8 +151,6 @@ pub struct ClusterState {
/// Network cluster implementation.
pub struct ClusterCore {
/// Handle to the event loop.
handle: Handle,
/// Listen address.
listen_address: SocketAddr,
/// Cluster data.
@ -165,7 +165,7 @@ pub struct ClusterClientImpl {
/// Network cluster view. It is a communication channel, required in single session.
pub struct ClusterView {
core: Arc<Mutex<ClusterViewCore>>,
core: Arc<RwLock<ClusterViewCore>>,
configured_nodes_count: usize,
connected_nodes_count: usize,
}
@ -175,15 +175,15 @@ pub struct ClusterData {
/// Cluster configuration.
pub config: ClusterConfiguration,
/// Handle to the event loop.
pub handle: Remote,
/// Handle to the cpu thread pool.
pub pool: CpuPool,
pub executor: TaskExecutor,
/// KeyPair this node holds.
pub self_key_pair: Arc<NodeKeyPair>,
/// Connections data.
pub connections: ClusterConnections,
/// Active sessions data.
pub sessions: ClusterSessions,
/// Shutdown flag:
pub is_shutdown: Arc<AtomicBool>,
}
/// Connections that are forming the cluster. Lock order: trigger.lock() -> data.lock().
@ -231,19 +231,18 @@ pub struct Connection {
/// Connection key.
key: KeyPair,
/// Last message time.
last_message_time: Mutex<Instant>,
last_message_time: RwLock<Instant>,
}
impl ClusterCore {
pub fn new(handle: Handle, config: ClusterConfiguration) -> Result<Arc<Self>, Error> {
pub fn new(executor: TaskExecutor, config: ClusterConfiguration) -> Result<Arc<Self>, Error> {
let listen_address = make_socket_address(&config.listen_address.0, config.listen_address.1)?;
let connections = ClusterConnections::new(&config)?;
let servers_set_change_creator_connector = connections.connector.clone();
let sessions = ClusterSessions::new(&config, servers_set_change_creator_connector);
let data = ClusterData::new(&handle, config, connections, sessions);
let data = ClusterData::new(&executor, config, connections, sessions);
Ok(Arc::new(ClusterCore {
handle: handle,
listen_address: listen_address,
data: data,
}))
@ -272,7 +271,7 @@ impl ClusterCore {
.and_then(|_| self.run_connections())?;
// schedule maintain procedures
ClusterCore::schedule_maintain(&self.handle, self.data.clone());
ClusterCore::schedule_maintain(self.data.clone());
Ok(())
}
@ -280,7 +279,7 @@ impl ClusterCore {
/// Start listening for incoming connections.
pub fn run_listener(&self) -> Result<(), Error> {
// start listeining for incoming connections
self.handle.spawn(ClusterCore::listen(&self.handle, self.data.clone(), self.listen_address.clone())?);
self.data.spawn(ClusterCore::listen(self.data.clone(), self.listen_address.clone())?);
Ok(())
}
@ -293,53 +292,49 @@ impl ClusterCore {
/// Connect to peer.
fn connect(data: Arc<ClusterData>, node_address: SocketAddr) {
data.handle.clone().spawn(move |handle| {
data.pool.clone().spawn(ClusterCore::connect_future(handle, data, node_address))
})
data.clone().spawn(ClusterCore::connect_future(data, node_address));
}
/// Connect to socket using given context and handle.
fn connect_future(handle: &Handle, data: Arc<ClusterData>, node_address: SocketAddr) -> BoxedEmptyFuture {
/// Connect to socket using given context and executor.
fn connect_future(data: Arc<ClusterData>, node_address: SocketAddr) -> BoxedEmptyFuture {
let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect();
Box::new(net_connect(&node_address, handle, data.self_key_pair.clone(), disconnected_nodes)
Box::new(net_connect(&node_address, data.self_key_pair.clone(), disconnected_nodes)
.then(move |result| ClusterCore::process_connection_result(data, Some(node_address), result))
.then(|_| finished(())))
.then(|_| future::ok(())))
}
/// Start listening for incoming connections.
fn listen(handle: &Handle, data: Arc<ClusterData>, listen_address: SocketAddr) -> Result<BoxedEmptyFuture, Error> {
Ok(Box::new(TcpListener::bind(&listen_address, &handle)?
fn listen(data: Arc<ClusterData>, listen_address: SocketAddr) -> Result<BoxedEmptyFuture, Error> {
Ok(Box::new(TcpListener::bind(&listen_address)?
.incoming()
.and_then(move |(stream, node_address)| {
ClusterCore::accept_connection(data.clone(), stream, node_address);
.and_then(move |stream| {
ClusterCore::accept_connection(data.clone(), stream);
Ok(())
})
.for_each(|_| Ok(()))
.then(|_| finished(()))))
.then(|_| future::ok(()))))
}
/// Accept connection.
fn accept_connection(data: Arc<ClusterData>, stream: TcpStream, node_address: SocketAddr) {
data.handle.clone().spawn(move |handle| {
data.pool.clone().spawn(ClusterCore::accept_connection_future(handle, data, stream, node_address))
})
fn accept_connection(data: Arc<ClusterData>, stream: TcpStream) {
data.clone().spawn(ClusterCore::accept_connection_future(data, stream))
}
/// Accept connection future.
fn accept_connection_future(handle: &Handle, data: Arc<ClusterData>, stream: TcpStream, node_address: SocketAddr) -> BoxedEmptyFuture {
Box::new(net_accept_connection(node_address, stream, handle, data.self_key_pair.clone())
fn accept_connection_future(data: Arc<ClusterData>, stream: TcpStream) -> BoxedEmptyFuture {
Box::new(net_accept_connection(stream, data.self_key_pair.clone())
.then(move |result| ClusterCore::process_connection_result(data, None, result))
.then(|_| finished(())))
.then(|_| future::ok(())))
}
/// Schedule mainatain procedures.
fn schedule_maintain(handle: &Handle, data: Arc<ClusterData>) {
fn schedule_maintain(data: Arc<ClusterData>) {
let d = data.clone();
let interval: BoxedEmptyFuture = Box::new(Interval::new(Duration::new(MAINTAIN_INTERVAL, 0), handle)
.expect("failed to create interval")
let interval = Interval::new_interval(Duration::new(MAINTAIN_INTERVAL, 0))
.and_then(move |_| Ok(ClusterCore::maintain(data.clone())))
.for_each(|_| Ok(()))
.then(|_| finished(())));
.then(|_| future::ok(()));
d.spawn(interval);
}
@ -362,20 +357,20 @@ impl ClusterCore {
Ok((_, Ok(message))) => {
ClusterCore::process_connection_message(data.clone(), connection.clone(), message);
// continue serving connection
data.spawn(ClusterCore::process_connection_messages(data.clone(), connection));
Box::new(finished(Ok(())))
data.spawn(ClusterCore::process_connection_messages(data.clone(), connection).then(|_| Ok(())));
Box::new(future::ok(Ok(())))
},
Ok((_, Err(err))) => {
warn!(target: "secretstore_net", "{}: protocol error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id());
// continue serving connection
data.spawn(ClusterCore::process_connection_messages(data.clone(), connection));
Box::new(finished(Err(err)))
data.spawn(ClusterCore::process_connection_messages(data.clone(), connection).then(|_| Ok(())));
Box::new(future::ok(Err(err)))
},
Err(err) => {
warn!(target: "secretstore_net", "{}: network error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id());
// close connection
data.connections.remove(data.clone(), connection.node_id(), connection.is_inbound());
Box::new(failed(err))
Box::new(future::err(err))
},
}
))
@ -394,7 +389,7 @@ impl ClusterCore {
data.sessions.on_connection_timeout(connection.node_id());
}
else if last_message_diff > KEEP_ALIVE_SEND_INTERVAL {
data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {}))));
data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {}))).then(|_| Ok(())));
}
}
}
@ -415,33 +410,35 @@ impl ClusterCore {
}
/// Process connection future result.
fn process_connection_result(data: Arc<ClusterData>, outbound_addr: Option<SocketAddr>, result: Result<DeadlineStatus<Result<NetConnection, Error>>, io::Error>) -> IoFuture<Result<(), Error>> {
fn process_connection_result(data: Arc<ClusterData>, outbound_addr: Option<SocketAddr>,
result: Result<DeadlineStatus<Result<NetConnection, Error>>, TimeoutError<io::Error>>) -> IoFuture<Result<(), Error>>
{
match result {
Ok(DeadlineStatus::Meet(Ok(connection))) => {
let connection = Connection::new(outbound_addr.is_none(), connection);
if data.connections.insert(data.clone(), connection.clone()) {
ClusterCore::process_connection_messages(data.clone(), connection)
} else {
Box::new(finished(Ok(())))
Box::new(future::ok(Ok(())))
}
},
Ok(DeadlineStatus::Meet(Err(err))) => {
warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}",
data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" },
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
Box::new(finished(Ok(())))
Box::new(future::ok(Ok(())))
},
Ok(DeadlineStatus::Timeout) => {
warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}",
data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" },
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
Box::new(finished(Ok(())))
Box::new(future::ok(Ok(())))
},
Err(err) => {
warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}",
data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" },
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
Box::new(finished(Ok(())))
Box::new(future::ok(Ok(())))
},
}
}
@ -595,7 +592,7 @@ impl ClusterCore {
if !message.is_error_message() {
let session_id = message.into_session_id().expect("session_id only fails for cluster messages; only session messages are passed to process_message; qed");
let session_nonce = message.session_nonce().expect("session_nonce only fails for cluster messages; only session messages are passed to process_message; qed");
data.spawn(connection.send_message(SC::make_error_message(session_id, session_nonce, error)));
data.spawn(connection.send_message(SC::make_error_message(session_id, session_nonce, error)).then(|_| Ok(())));
}
return None;
},
@ -648,13 +645,19 @@ impl ClusterCore {
match message {
ClusterMessage::KeepAlive(_) => data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse {
session_id: None,
})))),
}))).then(|_| Ok(()))),
ClusterMessage::KeepAliveResponse(msg) => if let Some(session_id) = msg.session_id {
data.sessions.on_session_keep_alive(connection.node_id(), session_id.into());
},
_ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}", data.self_key_pair.public(), message, connection.node_id(), connection.node_address()),
}
}
/// Prevents new tasks from being spawned.
#[cfg(test)]
pub fn shutdown(&self) {
self.data.shutdown()
}
}
impl ClusterConnections {
@ -787,14 +790,14 @@ impl ClusterConnections {
}
impl ClusterData {
pub fn new(handle: &Handle, config: ClusterConfiguration, connections: ClusterConnections, sessions: ClusterSessions) -> Arc<Self> {
pub fn new(executor: &TaskExecutor, config: ClusterConfiguration, connections: ClusterConnections, sessions: ClusterSessions) -> Arc<Self> {
Arc::new(ClusterData {
handle: handle.remote().clone(),
pool: CpuPool::new(config.threads),
executor: executor.clone(),
self_key_pair: config.self_key_pair.clone(),
connections: connections,
sessions: sessions,
config: config,
is_shutdown: Arc::new(AtomicBool::new(false)),
})
}
@ -803,12 +806,28 @@ impl ClusterData {
self.connections.get(node)
}
/// Spawns a future using thread pool and schedules execution of it with event loop handle.
pub fn spawn<F>(&self, f: F) where F: Future + Send + 'static, F::Item: Send + 'static, F::Error: Send + 'static {
let pool_work = self.pool.spawn(f);
self.handle.spawn(move |_handle| {
pool_work.then(|_| finished(()))
})
/// Spawns a future on the runtime.
//
// TODO: Consider implementing a more graceful shutdown process using an
// `AtomicBool`, etc. which would prevent tasks from being spawned after a
// shutdown signal is given. (Recursive calls, in
// `process_connection_messages` for example, appear to continue
// indefinitely.)
pub fn spawn<F>(&self, f: F) where F: Future<Item = (), Error = ()> + Send + 'static {
if self.is_shutdown.load(Ordering::Acquire) == false {
if let Err(err) = future::Executor::execute(&self.executor, Box::new(f)) {
error!("Secret store runtime unable to spawn task. Runtime is shutting down. ({:?})", err);
}
} else {
error!("Secret store runtime unable to spawn task. Shutdown has been started.");
}
}
/// Sets the `is_shutdown` flag which prevents future tasks from being
/// spawned via `::spawn`.
#[cfg(test)]
pub fn shutdown(&self) {
self.is_shutdown.store(true, Ordering::Release);
}
}
@ -820,7 +839,7 @@ impl Connection {
is_inbound: is_inbound,
stream: connection.stream,
key: connection.key,
last_message_time: Mutex::new(Instant::now()),
last_message_time: RwLock::new(Instant::now()),
})
}
@ -833,11 +852,11 @@ impl Connection {
}
pub fn last_message_time(&self) -> Instant {
*self.last_message_time.lock()
*self.last_message_time.read()
}
pub fn set_last_message_time(&self, last_message_time: Instant) {
*self.last_message_time.lock() = last_message_time;
*self.last_message_time.write() = last_message_time;
}
pub fn node_address(&self) -> &SocketAddr {
@ -858,7 +877,7 @@ impl ClusterView {
ClusterView {
configured_nodes_count: configured_nodes_count,
connected_nodes_count: nodes.len(),
core: Arc::new(Mutex::new(ClusterViewCore {
core: Arc::new(RwLock::new(ClusterViewCore {
cluster: cluster,
nodes: nodes,
})),
@ -868,29 +887,29 @@ impl ClusterView {
impl Cluster for ClusterView {
fn broadcast(&self, message: Message) -> Result<(), Error> {
let core = self.core.lock();
let core = self.core.read();
for node in core.nodes.iter().filter(|n| *n != core.cluster.self_key_pair.public()) {
trace!(target: "secretstore_net", "{}: sent message {} to {}", core.cluster.self_key_pair.public(), message, node);
let connection = core.cluster.connection(node).ok_or(Error::NodeDisconnected)?;
core.cluster.spawn(connection.send_message(message.clone()))
core.cluster.spawn(connection.send_message(message.clone()).then(|_| Ok(())))
}
Ok(())
}
fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> {
let core = self.core.lock();
let core = self.core.read();
trace!(target: "secretstore_net", "{}: sent message {} to {}", core.cluster.self_key_pair.public(), message, to);
let connection = core.cluster.connection(to).ok_or(Error::NodeDisconnected)?;
core.cluster.spawn(connection.send_message(message));
core.cluster.spawn(connection.send_message(message).then(|_| Ok(())));
Ok(())
}
fn is_connected(&self, node: &NodeId) -> bool {
self.core.lock().nodes.contains(node)
self.core.read().nodes.contains(node)
}
fn nodes(&self) -> BTreeSet<NodeId> {
self.core.lock().nodes.clone()
self.core.read().nodes.clone()
}
fn configured_nodes_count(&self) -> usize {
@ -1118,8 +1137,11 @@ pub mod tests {
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::{Duration, Instant};
use std::collections::{BTreeSet, VecDeque};
use parking_lot::Mutex;
use tokio_core::reactor::Core;
use parking_lot::RwLock;
use tokio::{
runtime::{Runtime, Builder as RuntimeBuilder},
prelude::{future, Future},
};
use ethereum_types::{Address, H256};
use ethkey::{Random, Generator, Public, Signature, sign};
use key_server_cluster::{NodeId, SessionId, Requester, Error, DummyAclStorage, DummyKeyStorage,
@ -1135,7 +1157,7 @@ pub mod tests {
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession,
IsolatedSessionTransport as KeyVersionNegotiationSessionTransport};
const TIMEOUT: Duration = Duration::from_millis(300);
const TIMEOUT: Duration = Duration::from_millis(1000);
#[derive(Default)]
pub struct DummyClusterClient {
@ -1145,7 +1167,7 @@ pub mod tests {
#[derive(Debug)]
pub struct DummyCluster {
id: NodeId,
data: Mutex<DummyClusterData>,
data: RwLock<DummyClusterData>,
}
#[derive(Debug, Default)]
@ -1182,7 +1204,7 @@ pub mod tests {
pub fn new(id: NodeId) -> Self {
DummyCluster {
id: id,
data: Mutex::new(DummyClusterData::default())
data: RwLock::new(DummyClusterData::default())
}
}
@ -1191,25 +1213,25 @@ pub mod tests {
}
pub fn add_node(&self, node: NodeId) {
self.data.lock().nodes.insert(node);
self.data.write().nodes.insert(node);
}
pub fn add_nodes<I: Iterator<Item=NodeId>>(&self, nodes: I) {
self.data.lock().nodes.extend(nodes)
self.data.write().nodes.extend(nodes)
}
pub fn remove_node(&self, node: &NodeId) {
self.data.lock().nodes.remove(node);
self.data.write().nodes.remove(node);
}
pub fn take_message(&self) -> Option<(NodeId, Message)> {
self.data.lock().messages.pop_front()
self.data.write().messages.pop_front()
}
}
impl Cluster for DummyCluster {
fn broadcast(&self, message: Message) -> Result<(), Error> {
let mut data = self.data.lock();
let mut data = self.data.write();
let all_nodes: Vec<_> = data.nodes.iter().cloned().filter(|n| n != &self.id).collect();
for node in all_nodes {
data.messages.push_back((node, message.clone()));
@ -1219,40 +1241,49 @@ pub mod tests {
fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> {
debug_assert!(&self.id != to);
self.data.lock().messages.push_back((to.clone(), message));
self.data.write().messages.push_back((to.clone(), message));
Ok(())
}
fn is_connected(&self, node: &NodeId) -> bool {
let data = self.data.lock();
let data = self.data.read();
&self.id == node || data.nodes.contains(node)
}
fn nodes(&self) -> BTreeSet<NodeId> {
self.data.lock().nodes.iter().cloned().collect()
self.data.read().nodes.iter().cloned().collect()
}
fn configured_nodes_count(&self) -> usize {
self.data.lock().nodes.len()
self.data.read().nodes.len()
}
fn connected_nodes_count(&self) -> usize {
self.data.lock().nodes.len()
self.data.read().nodes.len()
}
}
pub fn loop_until<F>(core: &mut Core, timeout: Duration, predicate: F) where F: Fn() -> bool {
/// Loops until `predicate` returns `true` or `timeout` has elapsed.
pub fn loop_until<F>(runtime: &mut Runtime, timeout: Duration, predicate: F)
where F: Send + 'static + Fn() -> bool
{
use futures::Stream;
use tokio::timer::Interval;
let start = Instant::now();
loop {
core.turn(Some(Duration::from_millis(1)));
if predicate() {
break;
}
runtime.block_on(Interval::new_interval(Duration::from_millis(1))
.and_then(move |_| {
if Instant::now() - start > timeout {
panic!("no result in {:?}", timeout);
}
}
Ok(())
})
.take_while(move |_| future::ok(!predicate()))
.for_each(|_| Ok(()))
.then(|_| future::ok::<(), ()>(()))
).unwrap();
}
pub fn all_connections_established(cluster: &Arc<ClusterCore>) -> bool {
@ -1261,7 +1292,7 @@ pub mod tests {
.all(|p| cluster.connection(p).is_some())
}
pub fn make_clusters(core: &Core, ports_begin: u16, num_nodes: usize) -> Vec<Arc<ClusterCore>> {
pub fn make_clusters(runtime: &Runtime, ports_begin: u16, num_nodes: usize) -> Vec<Arc<ClusterCore>> {
let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect();
let cluster_params: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration {
threads: 1,
@ -1277,7 +1308,7 @@ pub mod tests {
auto_migrate_enabled: false,
}).collect();
let clusters: Vec<_> = cluster_params.into_iter().enumerate()
.map(|(_, params)| ClusterCore::new(core.handle(), params).unwrap())
.map(|(_, params)| ClusterCore::new(runtime.executor(), params).unwrap())
.collect();
clusters
@ -1292,97 +1323,134 @@ pub mod tests {
}
}
pub fn shutdown_clusters(clusters: &[Arc<ClusterCore>]) {
for cluster in clusters {
cluster.shutdown()
}
}
/// Returns a new runtime with a static number of threads.
pub fn new_runtime() -> Runtime {
RuntimeBuilder::new()
.core_threads(4)
.build()
.expect("Unable to create tokio runtime")
}
#[test]
fn cluster_connects_to_other_nodes() {
let mut core = Core::new().unwrap();
let clusters = make_clusters(&core, 6010, 3);
let mut runtime = new_runtime();
let clusters = make_clusters(&runtime, 6010, 3);
run_clusters(&clusters);
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
}
#[test]
fn cluster_wont_start_generation_session_if_not_fully_connected() {
let core = Core::new().unwrap();
let clusters = make_clusters(&core, 6013, 3);
let runtime = new_runtime();
let clusters = make_clusters(&runtime, 6013, 3);
clusters[0].run().unwrap();
match clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1) {
Err(Error::NodeDisconnected) => (),
Err(e) => panic!("unexpected error {:?}", e),
_ => panic!("unexpected success"),
}
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
}
#[test]
fn error_in_generation_session_broadcasted_to_all_other_nodes() {
//::logger::init_log();
let mut core = Core::new().unwrap();
let clusters = make_clusters(&core, 6016, 3);
let mut runtime = new_runtime();
let clusters = make_clusters(&runtime, 6016, 3);
run_clusters(&clusters);
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
// ask one of nodes to produce faulty generation sessions
clusters[1].client().make_faulty_generation_sessions();
// start && wait for generation session to fail
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some()
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
let session_clone = session.clone();
let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_err());
// check that faulty session is either removed from all nodes, or nonexistent (already removed)
for i in 1..3 {
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
let session_clone = session.clone();
let clusters_clone = clusters.clone();
// wait for both session completion && session removal (session completion event is fired
// before session is removed from its own container by cluster)
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some()
&& clusters[i].client().generation_session(&SessionId::default()).is_none());
loop_until(&mut runtime, TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
&& clusters_clone[i].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_err());
}
}
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
}
#[test]
fn generation_session_completion_signalled_if_failed_on_master() {
//::logger::init_log();
let mut core = Core::new().unwrap();
let clusters = make_clusters(&core, 6025, 3);
let mut runtime = new_runtime();
let clusters = make_clusters(&runtime, 6025, 3);
run_clusters(&clusters);
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
// ask one of nodes to produce faulty generation sessions
clusters[0].client().make_faulty_generation_sessions();
// start && wait for generation session to fail
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some()
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
let session_clone = session.clone();
let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_err());
// check that faulty session is either removed from all nodes, or nonexistent (already removed)
for i in 1..3 {
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
let session_clone = session.clone();
let clusters_clone = clusters.clone();
// wait for both session completion && session removal (session completion event is fired
// before session is removed from its own container by cluster)
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some()
&& clusters[i].client().generation_session(&SessionId::default()).is_none());
loop_until(&mut runtime, TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
&& clusters_clone[i].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_err());
}
}
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
}
#[test]
fn generation_session_is_removed_when_succeeded() {
//::logger::init_log();
let mut core = Core::new().unwrap();
let clusters = make_clusters(&core, 6019, 3);
let mut runtime = new_runtime();
let clusters = make_clusters(&runtime, 6019, 3);
run_clusters(&clusters);
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
// start && wait for generation session to complete
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished
|| session.state() == GenerationSessionState::Failed)
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
let session_clone = session.clone();
let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|| session_clone.state() == GenerationSessionState::Failed)
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_ok());
// check that on non-master nodes session is either:
@ -1392,19 +1460,24 @@ pub mod tests {
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
// run to completion if completion message is still on the way
// AND check that it is actually removed from cluster sessions
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished
|| session.state() == GenerationSessionState::Failed)
&& clusters[i].client().generation_session(&SessionId::default()).is_none());
let session_clone = session.clone();
let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|| session_clone.state() == GenerationSessionState::Failed)
&& clusters_clone[i].client().generation_session(&SessionId::default()).is_none());
}
}
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
}
#[test]
fn sessions_are_removed_when_initialization_fails() {
let mut core = Core::new().unwrap();
let clusters = make_clusters(&core, 6022, 3);
let mut runtime = new_runtime();
let clusters = make_clusters(&runtime, 6022, 3);
run_clusters(&clusters);
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
// generation session
{
@ -1432,6 +1505,8 @@ pub mod tests {
assert!(clusters[0].data.sessions.decryption_sessions.is_empty());
assert!(clusters[0].data.sessions.negotiation_sessions.is_empty());
}
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
}
// test ignored because of
@ -1441,16 +1516,19 @@ pub mod tests {
#[ignore]
fn schnorr_signing_session_completes_if_node_does_not_have_a_share() {
//::logger::init_log();
let mut core = Core::new().unwrap();
let clusters = make_clusters(&core, 6028, 3);
let mut runtime = new_runtime();
let clusters = make_clusters(&runtime, 6028, 3);
run_clusters(&clusters);
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
// start && wait for generation session to complete
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished
|| session.state() == GenerationSessionState::Failed)
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
let session_clone = session.clone();
let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|| session_clone.state() == GenerationSessionState::Failed)
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_ok());
// now remove share from node2
@ -1462,8 +1540,10 @@ pub mod tests {
let session0 = clusters[0].client().new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap();
let session = clusters[0].data.sessions.schnorr_signing_sessions.first().unwrap();
loop_until(&mut core, TIMEOUT, || session.is_finished() && (0..3).all(|i|
clusters[i].data.sessions.schnorr_signing_sessions.is_empty()));
let session_clone = session.clone();
let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || session_clone.is_finished() && (0..3).all(|i|
clusters_clone[i].data.sessions.schnorr_signing_sessions.is_empty()));
session0.wait().unwrap();
// and try to sign message with generated key using node that has no key share
@ -1471,8 +1551,10 @@ pub mod tests {
let session2 = clusters[2].client().new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap();
let session = clusters[2].data.sessions.schnorr_signing_sessions.first().unwrap();
loop_until(&mut core, TIMEOUT, || session.is_finished() && (0..3).all(|i|
clusters[i].data.sessions.schnorr_signing_sessions.is_empty()));
let session_clone = session.clone();
let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || session_clone.is_finished() && (0..3).all(|i|
clusters_clone[i].data.sessions.schnorr_signing_sessions.is_empty()));
session2.wait().unwrap();
// now remove share from node1
@ -1483,8 +1565,11 @@ pub mod tests {
let session1 = clusters[0].client().new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap();
let session = clusters[0].data.sessions.schnorr_signing_sessions.first().unwrap();
loop_until(&mut core, TIMEOUT, || session.is_finished());
let session = session.clone();
loop_until(&mut runtime, TIMEOUT, move || session.is_finished());
session1.wait().unwrap_err();
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
}
// test ignored because of
@ -1494,16 +1579,19 @@ pub mod tests {
#[ignore]
fn ecdsa_signing_session_completes_if_node_does_not_have_a_share() {
//::logger::init_log();
let mut core = Core::new().unwrap();
let clusters = make_clusters(&core, 6041, 4);
let mut runtime = new_runtime();
let clusters = make_clusters(&runtime, 6041, 4);
run_clusters(&clusters);
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
// start && wait for generation session to complete
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished
|| session.state() == GenerationSessionState::Failed)
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
let session_clone = session.clone();
let clusters_clone = clusters.clone();
loop_until(&mut runtime, TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|| session_clone.state() == GenerationSessionState::Failed)
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
assert!(session.joint_public_and_secret().unwrap().is_ok());
// now remove share from node2
@ -1515,16 +1603,20 @@ pub mod tests {
let session0 = clusters[0].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap();
let session = clusters[0].data.sessions.ecdsa_signing_sessions.first().unwrap();
loop_until(&mut core, Duration::from_millis(1000), || session.is_finished() && (0..3).all(|i|
clusters[i].data.sessions.ecdsa_signing_sessions.is_empty()));
let session_clone = session.clone();
let clusters_clone = clusters.clone();
loop_until(&mut runtime, Duration::from_millis(1000), move || session_clone.is_finished() && (0..3).all(|i|
clusters_clone[i].data.sessions.ecdsa_signing_sessions.is_empty()));
session0.wait().unwrap();
// and try to sign message with generated key using node that has no key share
let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap();
let session2 = clusters[2].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap();
let session = clusters[2].data.sessions.ecdsa_signing_sessions.first().unwrap();
loop_until(&mut core, Duration::from_millis(1000), || session.is_finished() && (0..3).all(|i|
clusters[i].data.sessions.ecdsa_signing_sessions.is_empty()));
let session_clone = session.clone();
let clusters_clone = clusters.clone();
loop_until(&mut runtime, Duration::from_millis(1000), move || session_clone.is_finished() && (0..3).all(|i|
clusters_clone[i].data.sessions.ecdsa_signing_sessions.is_empty()));
session2.wait().unwrap();
// now remove share from node1
@ -1534,7 +1626,9 @@ pub mod tests {
let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap();
let session1 = clusters[0].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap();
let session = clusters[0].data.sessions.ecdsa_signing_sessions.first().unwrap();
loop_until(&mut core, Duration::from_millis(1000), || session.is_finished());
loop_until(&mut runtime, Duration::from_millis(1000), move || session.is_finished());
session1.wait().unwrap_err();
shutdown_clusters(&clusters);
runtime.shutdown_now().wait().unwrap();
}
}

View File

@ -16,18 +16,34 @@
use std::io;
use std::time::Duration;
use futures::{Future, Select, Poll, Async};
use tokio_core::reactor::{Handle, Timeout};
use futures::{Future, Poll};
use tokio::timer::timeout::{Timeout, Error as TimeoutError};
type DeadlineBox<F> = Box<Future<Item = DeadlineStatus<<F as Future>::Item>, Error = <F as Future>::Error> + Send>;
type DeadlineBox<F> = Box<Future<
Item = DeadlineStatus<<F as Future>::Item>,
Error = TimeoutError<<F as Future>::Error>
> + Send>;
/// Complete a passed future or fail if it is not completed within timeout.
pub fn deadline<F, T>(duration: Duration, handle: &Handle, future: F) -> Result<Deadline<F>, io::Error>
where F: Future<Item = T, Error = io::Error> + Send + 'static, T: 'static {
let timeout: DeadlineBox<F> = Box::new(Timeout::new(duration, handle)?.map(|_| DeadlineStatus::Timeout));
let future: DeadlineBox<F> = Box::new(future.map(DeadlineStatus::Meet));
pub fn deadline<F, T>(duration: Duration, future: F) -> Result<Deadline<F>, io::Error>
where F: Future<Item = T, Error = io::Error> + Send + 'static, T: Send + 'static
{
let timeout = Box::new(Timeout::new(future, duration)
.then(|res| {
match res {
Ok(fut) => Ok(DeadlineStatus::Meet(fut)),
Err(err) => {
if err.is_elapsed() {
Ok(DeadlineStatus::Timeout)
} else {
Err(err)
}
},
}
})
);
let deadline = Deadline {
future: timeout.select(future),
future: timeout,
};
Ok(deadline)
}
@ -43,19 +59,15 @@ pub enum DeadlineStatus<T> {
/// Future, which waits for passed future completion within given period, or fails with timeout.
pub struct Deadline<F> where F: Future {
future: Select<DeadlineBox<F>, DeadlineBox<F>>,
future: DeadlineBox<F>,
}
impl<F, T> Future for Deadline<F> where F: Future<Item = T, Error = io::Error> {
type Item = DeadlineStatus<T>;
type Error = io::Error;
type Error = TimeoutError<io::Error>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.future.poll() {
Ok(Async::Ready((result, _other))) => Ok(Async::Ready(result)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err((err, _other)) => Err(err),
}
self.future.poll()
}
}
@ -63,14 +75,14 @@ impl<F, T> Future for Deadline<F> where F: Future<Item = T, Error = io::Error> {
mod tests {
use std::time::Duration;
use futures::{Future, done};
use tokio_core::reactor::Core;
use tokio::reactor::Reactor;
use super::{deadline, DeadlineStatus};
#[test]
fn deadline_result_works() {
let mut core = Core::new().unwrap();
let deadline = deadline(Duration::from_millis(1000), &core.handle(), done(Ok(()))).unwrap();
core.turn(Some(Duration::from_millis(3)));
let mut reactor = Reactor::new().unwrap();
let deadline = deadline(Duration::from_millis(1000), done(Ok(()))).unwrap();
reactor.turn(Some(Duration::from_millis(3))).unwrap();
assert_eq!(deadline.wait().unwrap(), DeadlineStatus::Meet(()));
}
}

View File

@ -19,7 +19,7 @@ use std::net::Shutdown;
use std::io::{Read, Write, Error};
use futures::Poll;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_core::net::TcpStream;
use tokio::net::TcpStream;
/// Read+Write implementation for Arc<TcpStream>.
pub struct SharedTcpStream {

View File

@ -19,20 +19,23 @@ use std::sync::Arc;
use std::net::SocketAddr;
use std::time::Duration;
use futures::{Future, Poll};
use tokio_core::reactor::Handle;
use tokio_core::net::TcpStream;
use tokio::net::TcpStream;
use key_server_cluster::{Error, NodeKeyPair};
use key_server_cluster::io::{accept_handshake, Handshake, Deadline, deadline};
use key_server_cluster::net::Connection;
/// Create future for accepting incoming connection.
pub fn accept_connection(address: SocketAddr, stream: TcpStream, handle: &Handle, self_key_pair: Arc<NodeKeyPair>) -> Deadline<AcceptConnection> {
pub fn accept_connection(stream: TcpStream, self_key_pair: Arc<NodeKeyPair>) -> Deadline<AcceptConnection> {
// TODO: This could fail so it would be better either to accept the
// address as a separate argument or return a result.
let address = stream.peer_addr().expect("Unable to determine tcp peer address");
let accept = AcceptConnection {
handshake: accept_handshake(stream, self_key_pair),
address: address,
};
deadline(Duration::new(5, 0), handle, accept).expect("Failed to create timeout")
deadline(Duration::new(5, 0), accept).expect("Failed to create timeout")
}
/// Future for accepting incoming connection.

View File

@ -20,26 +20,25 @@ use std::io;
use std::time::Duration;
use std::net::SocketAddr;
use futures::{Future, Poll, Async};
use tokio_core::reactor::Handle;
use tokio_core::net::{TcpStream, TcpStreamNew};
use tokio::net::{TcpStream, tcp::ConnectFuture};
use key_server_cluster::{Error, NodeId, NodeKeyPair};
use key_server_cluster::io::{handshake, Handshake, Deadline, deadline};
use key_server_cluster::net::Connection;
/// Create future for connecting to other node.
pub fn connect(address: &SocketAddr, handle: &Handle, self_key_pair: Arc<NodeKeyPair>, trusted_nodes: BTreeSet<NodeId>) -> Deadline<Connect> {
pub fn connect(address: &SocketAddr, self_key_pair: Arc<NodeKeyPair>, trusted_nodes: BTreeSet<NodeId>) -> Deadline<Connect> {
let connect = Connect {
state: ConnectState::TcpConnect(TcpStream::connect(address, handle)),
state: ConnectState::TcpConnect(TcpStream::connect(address)),
address: address.clone(),
self_key_pair: self_key_pair,
trusted_nodes: trusted_nodes,
};
deadline(Duration::new(5, 0), handle, connect).expect("Failed to create timeout")
deadline(Duration::new(5, 0), connect).expect("Failed to create timeout")
}
enum ConnectState {
TcpConnect(TcpStreamNew),
TcpConnect(ConnectFuture),
Handshake(Handshake<TcpStream>),
Connected,
}

View File

@ -24,7 +24,6 @@ extern crate ethcore_sync as sync;
extern crate ethcore_transaction as transaction;
extern crate ethereum_types;
extern crate ethkey;
extern crate futures_cpupool;
extern crate hyper;
extern crate keccak_hash as hash;
extern crate kvdb;
@ -34,9 +33,7 @@ extern crate serde;
extern crate serde_json;
extern crate tiny_keccak;
extern crate tokio;
extern crate tokio_core;
extern crate tokio_io;
extern crate tokio_proto;
extern crate tokio_service;
extern crate url;

View File

@ -16,14 +16,17 @@
use std::collections::BTreeSet;
use std::sync::{Arc, Weak};
use hyper::{self, header, Chunk, Uri, Request as HttpRequest, Response as HttpResponse, Method as HttpMethod, StatusCode as HttpStatusCode};
use hyper::server::Http;
use hyper::{self, Uri, Request as HttpRequest, Response as HttpResponse, Method as HttpMethod,
StatusCode as HttpStatusCode, Body,
header::{self, HeaderValue},
server::conn::Http,
service::Service,
};
use serde::Serialize;
use serde_json;
use tokio;
use tokio::net::TcpListener;
use tokio::runtime::Runtime;
use tokio_service::Service;
use tokio::runtime::{Runtime, Builder as RuntimeBuilder};
use futures::{future, Future, Stream};
use url::percent_encoding::percent_decode;
@ -88,7 +91,10 @@ impl KeyServerHttpListener {
key_server: key_server,
});
let mut runtime = Runtime::new()?;
let mut runtime = RuntimeBuilder::new()
// TODO: Add config option/arg?
.core_threads(2)
.build()?;
let listener_address = format!("{}:{}", listener_address.address, listener_address.port).parse()?;
let listener = TcpListener::bind(&listener_address)?;
@ -97,10 +103,10 @@ impl KeyServerHttpListener {
let server = listener.incoming()
.map_err(|e| warn!("Key server listener error: {:?}", e))
.for_each(move |socket| {
let http: Http<Chunk> = Http::new();
let serve = http.serve_connection(socket, KeyServerHttpHandler {
handler: shared_handler2.clone(),
}).map(|_| ()).map_err(|e| {
let http = Http::new();
let serve = http.serve_connection(socket,
KeyServerHttpHandler { handler: shared_handler2.clone() }
).map(|_| ()).map_err(|e| {
warn!("Key server handler error: {:?}", e);
});
@ -119,7 +125,7 @@ impl KeyServerHttpListener {
}
impl KeyServerHttpHandler {
fn process(self, req_method: HttpMethod, req_uri: Uri, path: &str, req_body: &[u8]) -> HttpResponse {
fn process(self, req_method: HttpMethod, req_uri: Uri, path: &str, req_body: &[u8]) -> HttpResponse<Body> {
match parse_request(&req_method, &path, &req_body) {
Request::GenerateServerKey(document, signature, threshold) => {
return_server_public_key(&req_uri, self.handler.key_server.upgrade()
@ -195,22 +201,28 @@ impl KeyServerHttpHandler {
},
Request::Invalid => {
warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri);
HttpResponse::new().with_status(HttpStatusCode::BadRequest)
HttpResponse::builder()
.status(HttpStatusCode::BAD_REQUEST)
.body(Body::empty())
.expect("Nothing to parse, cannot fail; qed")
},
}
}
}
impl Service for KeyServerHttpHandler {
type Request = HttpRequest;
type Response = HttpResponse;
type ReqBody = Body;
type ResBody = Body;
type Error = hyper::Error;
type Future = Box<Future<Item=Self::Response, Error=Self::Error> + Send>;
type Future = Box<Future<Item = HttpResponse<Self::ResBody>, Error=Self::Error> + Send>;
fn call(&self, req: HttpRequest) -> Self::Future {
if req.headers().has::<header::Origin>() {
fn call(&mut self, req: HttpRequest<Body>) -> Self::Future {
if req.headers().contains_key(header::ORIGIN) {
warn!(target: "secretstore", "Ignoring {}-request {} with Origin header", req.method(), req.uri());
return Box::new(future::ok(HttpResponse::new().with_status(HttpStatusCode::NotFound)));
return Box::new(future::ok(HttpResponse::builder()
.status(HttpStatusCode::NOT_FOUND)
.body(Body::empty())
.expect("Nothing to parse, cannot fail; qed")))
}
let req_method = req.method().clone();
@ -218,35 +230,40 @@ impl Service for KeyServerHttpHandler {
// We cannot consume Self because of the Service trait requirement.
let this = self.clone();
Box::new(req.body().concat2().map(move |body| {
Box::new(req.into_body().concat2().map(move |body| {
let path = req_uri.path().to_string();
if path.starts_with("/") {
this.process(req_method, req_uri, &path, &body)
} else {
warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri);
HttpResponse::new().with_status(HttpStatusCode::NotFound)
HttpResponse::builder()
.status(HttpStatusCode::NOT_FOUND)
.body(Body::empty())
.expect("Nothing to parse, cannot fail; qed")
}
}))
}
}
fn return_empty(req_uri: &Uri, empty: Result<(), Error>) -> HttpResponse {
fn return_empty(req_uri: &Uri, empty: Result<(), Error>) -> HttpResponse<Body> {
return_bytes::<i32>(req_uri, empty.map(|_| None))
}
fn return_server_public_key(req_uri: &Uri, server_public: Result<Public, Error>) -> HttpResponse {
fn return_server_public_key(req_uri: &Uri, server_public: Result<Public, Error>) -> HttpResponse<Body> {
return_bytes(req_uri, server_public.map(|k| Some(SerializablePublic(k))))
}
fn return_message_signature(req_uri: &Uri, signature: Result<EncryptedDocumentKey, Error>) -> HttpResponse {
fn return_message_signature(req_uri: &Uri, signature: Result<EncryptedDocumentKey, Error>) -> HttpResponse<Body> {
return_bytes(req_uri, signature.map(|s| Some(SerializableBytes(s))))
}
fn return_document_key(req_uri: &Uri, document_key: Result<EncryptedDocumentKey, Error>) -> HttpResponse {
fn return_document_key(req_uri: &Uri, document_key: Result<EncryptedDocumentKey, Error>) -> HttpResponse<Body> {
return_bytes(req_uri, document_key.map(|k| Some(SerializableBytes(k))))
}
fn return_document_key_shadow(req_uri: &Uri, document_key_shadow: Result<EncryptedDocumentKeyShadow, Error>) -> HttpResponse {
fn return_document_key_shadow(req_uri: &Uri, document_key_shadow: Result<EncryptedDocumentKeyShadow, Error>)
-> HttpResponse<Body>
{
return_bytes(req_uri, document_key_shadow.map(|k| Some(SerializableEncryptedDocumentKeyShadow {
decrypted_secret: k.decrypted_secret.into(),
common_point: k.common_point.expect("always filled when requesting document_key_shadow; qed").into(),
@ -254,42 +271,65 @@ fn return_document_key_shadow(req_uri: &Uri, document_key_shadow: Result<Encrypt
})))
}
fn return_bytes<T: Serialize>(req_uri: &Uri, result: Result<Option<T>, Error>) -> HttpResponse {
fn return_bytes<T: Serialize>(req_uri: &Uri, result: Result<Option<T>, Error>) -> HttpResponse<Body> {
match result {
Ok(Some(result)) => match serde_json::to_vec(&result) {
Ok(result) => HttpResponse::new()
.with_header(header::ContentType::json())
.with_body(result),
Ok(result) => {
let body: Body = result.into();
HttpResponse::builder()
.header(header::CONTENT_TYPE, HeaderValue::from_static("application/json; charset=utf-8"))
.body(body)
.expect("Error creating http response")
},
Err(err) => {
warn!(target: "secretstore", "response to request {} has failed with: {}", req_uri, err);
HttpResponse::new().with_status(HttpStatusCode::InternalServerError)
HttpResponse::builder()
.status(HttpStatusCode::INTERNAL_SERVER_ERROR)
.body(Body::empty())
.expect("Nothing to parse, cannot fail; qed")
}
},
Ok(None) => HttpResponse::new().with_status(HttpStatusCode::Ok),
Ok(None) => {
HttpResponse::builder()
.status(HttpStatusCode::OK)
.body(Body::empty())
.expect("Nothing to parse, cannot fail; qed")
},
Err(err) => return_error(err),
}
}
fn return_error(err: Error) -> HttpResponse {
let mut res = HttpResponse::new().with_status(match err {
Error::AccessDenied | Error::ConsensusUnreachable | Error::ConsensusTemporaryUnreachable =>
HttpStatusCode::Forbidden,
Error::ServerKeyIsNotFound | Error::DocumentKeyIsNotFound =>
HttpStatusCode::NotFound,
Error::InsufficientRequesterData(_) | Error::Hyper(_) | Error::Serde(_)
| Error::DocumentKeyAlreadyStored | Error::ServerKeyAlreadyGenerated =>
HttpStatusCode::BadRequest,
_ => HttpStatusCode::InternalServerError,
});
fn return_error(err: Error) -> HttpResponse<Body> {
let status = match err {
| Error::AccessDenied
| Error::ConsensusUnreachable
| Error::ConsensusTemporaryUnreachable =>
HttpStatusCode::FORBIDDEN,
| Error::ServerKeyIsNotFound
| Error::DocumentKeyIsNotFound =>
HttpStatusCode::NOT_FOUND,
| Error::InsufficientRequesterData(_)
| Error::Hyper(_)
| Error::Serde(_)
| Error::DocumentKeyAlreadyStored
| Error::ServerKeyAlreadyGenerated =>
HttpStatusCode::BAD_REQUEST,
_ => HttpStatusCode::INTERNAL_SERVER_ERROR,
};
let mut res = HttpResponse::builder();
res.status(status);
// return error text. ignore errors when returning error
let error_text = format!("\"{}\"", err);
if let Ok(error_text) = serde_json::to_vec(&error_text) {
res.headers_mut().set(header::ContentType::json());
res.set_body(error_text);
res.header(header::CONTENT_TYPE, HeaderValue::from_static("application/json; charset=utf-8"));
res.body(error_text.into())
.expect("`error_text` is a formatted string, parsing cannot fail; qed")
} else {
res.body(Body::empty())
.expect("Nothing to parse, cannot fail; qed")
}
res
}
fn parse_request(method: &HttpMethod, uri_path: &str, body: &[u8]) -> Request {
@ -328,19 +368,19 @@ fn parse_request(method: &HttpMethod, uri_path: &str, body: &[u8]) -> Request {
let common_point = path.get(args_offset + 2).map(|v| v.parse());
let encrypted_key = path.get(args_offset + 3).map(|v| v.parse());
match (prefix, args_count, method, threshold, message_hash, common_point, encrypted_key) {
("shadow", 3, &HttpMethod::Post, Some(Ok(threshold)), _, _, _) =>
("shadow", 3, &HttpMethod::POST, Some(Ok(threshold)), _, _, _) =>
Request::GenerateServerKey(document, signature, threshold),
("shadow", 4, &HttpMethod::Post, _, _, Some(Ok(common_point)), Some(Ok(encrypted_key))) =>
("shadow", 4, &HttpMethod::POST, _, _, Some(Ok(common_point)), Some(Ok(encrypted_key))) =>
Request::StoreDocumentKey(document, signature, common_point, encrypted_key),
("", 3, &HttpMethod::Post, Some(Ok(threshold)), _, _, _) =>
("", 3, &HttpMethod::POST, Some(Ok(threshold)), _, _, _) =>
Request::GenerateDocumentKey(document, signature, threshold),
("", 2, &HttpMethod::Get, _, _, _, _) =>
("", 2, &HttpMethod::GET, _, _, _, _) =>
Request::GetDocumentKey(document, signature),
("shadow", 2, &HttpMethod::Get, _, _, _, _) =>
("shadow", 2, &HttpMethod::GET, _, _, _, _) =>
Request::GetDocumentKeyShadow(document, signature),
("schnorr", 3, &HttpMethod::Get, _, Some(Ok(message_hash)), _, _) =>
("schnorr", 3, &HttpMethod::GET, _, Some(Ok(message_hash)), _, _) =>
Request::SchnorrSignMessage(document, signature, message_hash),
("ecdsa", 3, &HttpMethod::Get, _, Some(Ok(message_hash)), _, _) =>
("ecdsa", 3, &HttpMethod::GET, _, Some(Ok(message_hash)), _, _) =>
Request::EcdsaSignMessage(document, signature, message_hash),
_ => Request::Invalid,
}
@ -348,7 +388,7 @@ fn parse_request(method: &HttpMethod, uri_path: &str, body: &[u8]) -> Request {
fn parse_admin_request(method: &HttpMethod, path: Vec<String>, body: &[u8]) -> Request {
let args_count = path.len();
if *method != HttpMethod::Post || args_count != 4 || path[1] != "servers_set_change" {
if *method != HttpMethod::POST || args_count != 4 || path[1] != "servers_set_change" {
return Request::Invalid;
}
@ -392,39 +432,39 @@ mod tests {
#[test]
fn parse_request_successful() {
// POST /shadow/{server_key_id}/{signature}/{threshold} => generate server key
assert_eq!(parse_request(&HttpMethod::Post, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2", Default::default()),
assert_eq!(parse_request(&HttpMethod::POST, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2", Default::default()),
Request::GenerateServerKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
2));
// POST /shadow/{server_key_id}/{signature}/{common_point}/{encrypted_key} => store encrypted document key
assert_eq!(parse_request(&HttpMethod::Post, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8/1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb", Default::default()),
assert_eq!(parse_request(&HttpMethod::POST, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8/1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb", Default::default()),
Request::StoreDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse().unwrap(),
"1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb".parse().unwrap()));
// POST /{server_key_id}/{signature}/{threshold} => generate server && document key
assert_eq!(parse_request(&HttpMethod::Post, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2", Default::default()),
assert_eq!(parse_request(&HttpMethod::POST, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2", Default::default()),
Request::GenerateDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
2));
// GET /{server_key_id}/{signature} => get document key
assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()),
assert_eq!(parse_request(&HttpMethod::GET, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()),
Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap()));
assert_eq!(parse_request(&HttpMethod::Get, "/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()),
assert_eq!(parse_request(&HttpMethod::GET, "/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()),
Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap()));
// GET /shadow/{server_key_id}/{signature} => get document key shadow
assert_eq!(parse_request(&HttpMethod::Get, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()),
assert_eq!(parse_request(&HttpMethod::GET, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()),
Request::GetDocumentKeyShadow("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap()));
// GET /schnorr/{server_key_id}/{signature}/{message_hash} => schnorr-sign message with server key
assert_eq!(parse_request(&HttpMethod::Get, "/schnorr/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c", Default::default()),
assert_eq!(parse_request(&HttpMethod::GET, "/schnorr/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c", Default::default()),
Request::SchnorrSignMessage("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
"281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap()));
// GET /ecdsa/{server_key_id}/{signature}/{message_hash} => ecdsa-sign message with server key
assert_eq!(parse_request(&HttpMethod::Get, "/ecdsa/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c", Default::default()),
assert_eq!(parse_request(&HttpMethod::GET, "/ecdsa/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c", Default::default()),
Request::EcdsaSignMessage("0000000000000000000000000000000000000000000000000000000000000001".into(),
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
"281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap()));
@ -432,7 +472,7 @@ mod tests {
let node1: Public = "843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91".parse().unwrap();
let node2: Public = "07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3".parse().unwrap();
let nodes = vec![node1, node2].into_iter().collect();
assert_eq!(parse_request(&HttpMethod::Post, "/admin/servers_set_change/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01",
assert_eq!(parse_request(&HttpMethod::POST, "/admin/servers_set_change/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01",
&r#"["0x843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91",
"0x07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3"]"#.as_bytes()),
Request::ChangeServersSet(
@ -444,20 +484,20 @@ mod tests {
#[test]
fn parse_request_failed() {
assert_eq!(parse_request(&HttpMethod::Get, "", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "/shadow", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "///2", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "/shadow///2", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "/a/b", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "/schnorr/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Get, "/ecdsa/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Post, "/admin/servers_set_change/xxx/yyy",
assert_eq!(parse_request(&HttpMethod::GET, "", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::GET, "/shadow", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::GET, "///2", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::GET, "/shadow///2", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::GET, "/0000000000000000000000000000000000000000000000000000000000000001", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::GET, "/0000000000000000000000000000000000000000000000000000000000000001/", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::GET, "/a/b", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::GET, "/schnorr/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::GET, "/ecdsa/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002", Default::default()), Request::Invalid);
assert_eq!(parse_request(&HttpMethod::POST, "/admin/servers_set_change/xxx/yyy",
&r#"["0x843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91",
"0x07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3"]"#.as_bytes()),
Request::Invalid);
assert_eq!(parse_request(&HttpMethod::Post, "/admin/servers_set_change/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", "".as_bytes()),
assert_eq!(parse_request(&HttpMethod::POST, "/admin/servers_set_change/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", "".as_bytes()),
Request::Invalid);
}
}

View File

@ -8,4 +8,4 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
fetch = { path = "../fetch" }
futures = "0.1"
hyper = "0.11"
hyper = "0.12"

View File

@ -18,7 +18,7 @@ extern crate fetch;
extern crate hyper;
extern crate futures;
use hyper::StatusCode;
use hyper::{StatusCode, Body};
use futures::{future, future::FutureResult};
use fetch::{Fetch, Url, Request};
@ -39,10 +39,13 @@ impl<T: 'static> Fetch for FakeFetch<T> where T: Clone + Send+ Sync {
fn fetch(&self, request: Request, abort: fetch::Abort) -> Self::Result {
let u = request.url().clone();
future::ok(if self.val.is_some() {
let r = hyper::Response::new().with_body(&b"Some content"[..]);
let r = hyper::Response::new("Some content".into());
fetch::client::Response::new(u, r, abort)
} else {
fetch::client::Response::new(u, hyper::Response::new().with_status(StatusCode::NotFound), abort)
let r = hyper::Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::empty()).expect("Nothing to parse, can not fail; qed");
fetch::client::Response::new(u, r, abort)
})
}

View File

@ -8,11 +8,11 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
futures = "0.1"
hyper = "0.11"
hyper-rustls = "0.11"
hyper = "~0.12.9"
hyper-rustls = "0.14"
http = "0.1"
log = "0.4"
tokio-core = "0.1"
tokio-timer = "0.1"
tokio = "~0.1.8"
url = "1"
bytes = "0.4"

View File

@ -17,8 +17,7 @@
use futures::future::{self, Loop};
use futures::sync::{mpsc, oneshot};
use futures::{self, Future, Async, Sink, Stream};
use hyper::header::{UserAgent, Location, ContentLength, ContentType};
use hyper::mime::Mime;
use hyper::header::{self, HeaderMap, HeaderValue, IntoHeaderName};
use hyper::{self, Method, StatusCode};
use hyper_rustls;
use std;
@ -29,8 +28,7 @@ use std::sync::mpsc::RecvTimeoutError;
use std::thread;
use std::time::Duration;
use std::{io, fmt};
use tokio_core::reactor;
use tokio_timer::{self, Timer};
use tokio::{self, util::FutureExt};
use url::{self, Url};
use bytes::Bytes;
@ -131,7 +129,7 @@ pub trait Fetch: Clone + Send + Sync + 'static {
}
type TxResponse = oneshot::Sender<Result<Response, Error>>;
type TxStartup = std::sync::mpsc::SyncSender<Result<(), io::Error>>;
type TxStartup = std::sync::mpsc::SyncSender<Result<(), tokio::io::Error>>;
type ChanItem = Option<(Request, Abort, TxResponse)>;
/// An implementation of `Fetch` using a `hyper` client.
@ -140,9 +138,8 @@ type ChanItem = Option<(Request, Abort, TxResponse)>;
// not implement `Send` currently.
#[derive(Debug)]
pub struct Client {
core: mpsc::Sender<ChanItem>,
runtime: mpsc::Sender<ChanItem>,
refs: Arc<AtomicUsize>,
timer: Timer,
}
// When cloning a client we increment the internal reference counter.
@ -150,9 +147,8 @@ impl Clone for Client {
fn clone(&self) -> Client {
self.refs.fetch_add(1, Ordering::SeqCst);
Client {
core: self.core.clone(),
runtime: self.runtime.clone(),
refs: self.refs.clone(),
timer: self.timer.clone(),
}
}
}
@ -163,7 +159,7 @@ impl Drop for Client {
fn drop(&mut self) {
if self.refs.fetch_sub(1, Ordering::SeqCst) == 1 {
// ignore send error as it means the background thread is gone already
let _ = self.core.clone().send(None).wait();
let _ = self.runtime.clone().send(None).wait();
}
}
}
@ -193,23 +189,20 @@ impl Client {
}
Ok(Client {
core: tx_proto,
runtime: tx_proto,
refs: Arc::new(AtomicUsize::new(1)),
timer: Timer::default(),
})
}
fn background_thread(tx_start: TxStartup, rx_proto: mpsc::Receiver<ChanItem>, num_dns_threads: usize) -> io::Result<thread::JoinHandle<()>> {
thread::Builder::new().name("fetch".into()).spawn(move || {
let mut core = match reactor::Core::new() {
let mut runtime = match tokio::runtime::current_thread::Runtime::new() {
Ok(c) => c,
Err(e) => return tx_start.send(Err(e)).unwrap_or(())
};
let handle = core.handle();
let hyper = hyper::Client::configure()
.connector(hyper_rustls::HttpsConnector::new(num_dns_threads, &core.handle()))
.build(&core.handle());
let hyper = hyper::Client::builder()
.build(hyper_rustls::HttpsConnector::new(num_dns_threads));
let future = rx_proto.take_while(|item| Ok(item.is_some()))
.map(|item| item.expect("`take_while` is only passing on channel items != None; qed"))
@ -241,14 +234,19 @@ impl Client {
request2.set_url(next_url);
request2
} else {
Request::new(next_url, Method::Get)
Request::new(next_url, Method::GET)
};
Ok(Loop::Continue((client, request, abort, redirects + 1)))
} else {
let content_len = resp.headers.get::<ContentLength>().cloned();
if content_len.map(|n| *n > abort.max_size() as u64).unwrap_or(false) {
if let Some(ref h_val) = resp.headers.get(header::CONTENT_LENGTH) {
let content_len = h_val
.to_str()?
.parse::<u64>()?;
if content_len > abort.max_size() as u64 {
return Err(Error::SizeLimit)
}
}
Ok(Loop::Break(resp))
}
})
@ -256,7 +254,7 @@ impl Client {
.then(|result| {
future::ok(sender.send(result).unwrap_or(()))
});
handle.spawn(fut);
tokio::spawn(fut);
trace!(target: "fetch", "waiting for next request ...");
future::ok(())
});
@ -264,7 +262,7 @@ impl Client {
tx_start.send(Ok(())).unwrap_or(());
debug!(target: "fetch", "processing requests ...");
if let Err(()) = core.run(future) {
if let Err(()) = runtime.block_on(future) {
error!(target: "fetch", "error while executing future")
}
debug!(target: "fetch", "fetch background thread finished")
@ -273,7 +271,7 @@ impl Client {
}
impl Fetch for Client {
type Result = Box<Future<Item=Response, Error=Error> + Send>;
type Result = Box<Future<Item=Response, Error=Error> + Send + 'static>;
fn fetch(&self, request: Request, abort: Abort) -> Self::Result {
debug!(target: "fetch", "fetching: {:?}", request.url());
@ -282,7 +280,7 @@ impl Fetch for Client {
}
let (tx_res, rx_res) = oneshot::channel();
let maxdur = abort.max_duration();
let sender = self.core.clone();
let sender = self.runtime.clone();
let future = sender.send(Some((request, abort, tx_res)))
.map_err(|e| {
error!(target: "fetch", "failed to schedule request: {}", e);
@ -291,7 +289,15 @@ impl Fetch for Client {
.and_then(|_| rx_res.map_err(|oneshot::Canceled| Error::BackgroundThreadDead))
.and_then(future::result);
Box::new(self.timer.timeout(future, maxdur))
Box::new(future.timeout(maxdur)
.map_err(|err| {
if err.is_inner() {
Error::from(err.into_inner().unwrap())
} else {
Error::from(err)
}
})
)
}
/// Get content from some URL.
@ -315,22 +321,21 @@ impl Fetch for Client {
// Extract redirect location from response. The second return value indicate whether the original method should be preserved.
fn redirect_location(u: Url, r: &Response) -> Option<(Url, bool)> {
use hyper::StatusCode::*;
let preserve_method = match r.status() {
TemporaryRedirect | PermanentRedirect => true,
StatusCode::TEMPORARY_REDIRECT | StatusCode::PERMANENT_REDIRECT => true,
_ => false,
};
match r.status() {
MovedPermanently
| PermanentRedirect
| TemporaryRedirect
| Found
| SeeOther => {
if let Some(loc) = r.headers.get::<Location>() {
u.join(loc).ok().map(|url| (url, preserve_method))
} else {
None
}
StatusCode::MOVED_PERMANENTLY
| StatusCode::PERMANENT_REDIRECT
| StatusCode::TEMPORARY_REDIRECT
| StatusCode::FOUND
| StatusCode::SEE_OTHER => {
r.headers.get(header::LOCATION).and_then(|loc| {
loc.to_str().ok().and_then(|loc_s| {
u.join(loc_s).ok().map(|url| (url, preserve_method))
})
})
}
_ => None
}
@ -341,7 +346,7 @@ fn redirect_location(u: Url, r: &Response) -> Option<(Url, bool)> {
pub struct Request {
url: Url,
method: Method,
headers: hyper::Headers,
headers: HeaderMap,
body: Bytes,
}
@ -350,19 +355,19 @@ impl Request {
pub fn new(url: Url, method: Method) -> Request {
Request {
url, method,
headers: hyper::Headers::new(),
headers: HeaderMap::new(),
body: Default::default(),
}
}
/// Create a new GET request.
pub fn get(url: Url) -> Request {
Request::new(url, Method::Get)
Request::new(url, Method::GET)
}
/// Create a new empty POST request.
pub fn post(url: Url) -> Request {
Request::new(url, Method::Post)
Request::new(url, Method::POST)
}
/// Read the url.
@ -371,12 +376,12 @@ impl Request {
}
/// Read the request headers.
pub fn headers(&self) -> &hyper::Headers {
pub fn headers(&self) -> &HeaderMap {
&self.headers
}
/// Get a mutable reference to the headers.
pub fn headers_mut(&mut self) -> &mut hyper::Headers {
pub fn headers_mut(&mut self) -> &mut HeaderMap {
&mut self.headers
}
@ -391,8 +396,10 @@ impl Request {
}
/// Consume self, and return it with the added given header.
pub fn with_header<H: hyper::header::Header>(mut self, value: H) -> Self {
self.headers_mut().set(value);
pub fn with_header<K>(mut self, key: K, val: HeaderValue) -> Self
where K: IntoHeaderName,
{
self.headers_mut().append(key, val);
self
}
@ -403,16 +410,15 @@ impl Request {
}
}
impl Into<hyper::Request> for Request {
fn into(mut self) -> hyper::Request {
let uri = self.url.as_ref().parse().expect("Every valid URLis also a URI.");
let mut req = hyper::Request::new(self.method, uri);
self.headers.set(UserAgent::new("Parity Fetch Neo"));
*req.headers_mut() = self.headers;
req.set_body(self.body);
req
impl From<Request> for hyper::Request<hyper::Body> {
fn from(req: Request) -> hyper::Request<hyper::Body> {
let uri: hyper::Uri = req.url.as_ref().parse().expect("Every valid URLis also a URI.");
hyper::Request::builder()
.method(req.method)
.uri(uri)
.header(header::USER_AGENT, HeaderValue::from_static("Parity Fetch Neo"))
.body(req.body.into())
.expect("Header, uri, method, and body are already valid and can not fail to parse; qed")
}
}
@ -421,7 +427,7 @@ impl Into<hyper::Request> for Request {
pub struct Response {
url: Url,
status: StatusCode,
headers: hyper::Headers,
headers: HeaderMap,
body: hyper::Body,
abort: Abort,
nread: usize,
@ -429,12 +435,12 @@ pub struct Response {
impl Response {
/// Create a new response, wrapping a hyper response.
pub fn new(u: Url, r: hyper::Response, a: Abort) -> Response {
pub fn new(u: Url, r: hyper::Response<hyper::Body>, a: Abort) -> Response {
Response {
url: u,
status: r.status(),
headers: r.headers().clone(),
body: r.body(),
body: r.into_body(),
abort: a,
nread: 0,
}
@ -447,26 +453,21 @@ impl Response {
/// Status code == OK (200)?
pub fn is_success(&self) -> bool {
self.status() == StatusCode::Ok
self.status() == StatusCode::OK
}
/// Status code == 404.
pub fn is_not_found(&self) -> bool {
self.status() == StatusCode::NotFound
self.status() == StatusCode::NOT_FOUND
}
/// Is the content-type text/html?
pub fn is_html(&self) -> bool {
if let Some(ref mime) = self.content_type() {
mime.type_() == "text" && mime.subtype() == "html"
} else {
false
}
}
/// The conten-type header value.
pub fn content_type(&self) -> Option<Mime> {
self.headers.get::<ContentType>().map(|ct| ct.0.clone())
self.headers.get(header::CONTENT_TYPE).and_then(|ct_val| {
ct_val.to_str().ok().map(|ct_str| {
ct_str.contains("text") && ct_str.contains("html")
})
}).unwrap_or(false)
}
}
@ -562,6 +563,10 @@ impl io::Read for BodyReader {
pub enum Error {
/// Hyper gave us an error.
Hyper(hyper::Error),
/// A hyper header conversion error.
HyperHeaderToStrError(hyper::header::ToStrError),
/// An integer parsing error.
ParseInt(std::num::ParseIntError),
/// Some I/O error occured.
Io(io::Error),
/// Invalid URLs where attempted to parse.
@ -570,8 +575,10 @@ pub enum Error {
Aborted,
/// Too many redirects have been encountered.
TooManyRedirects,
/// tokio-timer inner future gave us an error.
TokioTimeoutInnerVal(String),
/// tokio-timer gave us an error.
Timer(tokio_timer::TimerError),
TokioTimer(Option<tokio::timer::Error>),
/// The maximum duration was reached.
Timeout,
/// The response body is too large.
@ -585,23 +592,43 @@ impl fmt::Display for Error {
match *self {
Error::Aborted => write!(fmt, "The request has been aborted."),
Error::Hyper(ref e) => write!(fmt, "{}", e),
Error::HyperHeaderToStrError(ref e) => write!(fmt, "{}", e),
Error::ParseInt(ref e) => write!(fmt, "{}", e),
Error::Url(ref e) => write!(fmt, "{}", e),
Error::Io(ref e) => write!(fmt, "{}", e),
Error::BackgroundThreadDead => write!(fmt, "background thread gond"),
Error::TooManyRedirects => write!(fmt, "too many redirects"),
Error::Timer(ref e) => write!(fmt, "{}", e),
Error::TokioTimeoutInnerVal(ref s) => write!(fmt, "tokio timer inner value error: {:?}", s),
Error::TokioTimer(ref e) => write!(fmt, "tokio timer error: {:?}", e),
Error::Timeout => write!(fmt, "request timed out"),
Error::SizeLimit => write!(fmt, "size limit reached"),
}
}
}
impl ::std::error::Error for Error {
fn description(&self) -> &str { "Fetch client error" }
fn cause(&self) -> Option<&::std::error::Error> { None }
}
impl From<hyper::Error> for Error {
fn from(e: hyper::Error) -> Self {
Error::Hyper(e)
}
}
impl From<hyper::header::ToStrError> for Error {
fn from(e: hyper::header::ToStrError) -> Self {
Error::HyperHeaderToStrError(e)
}
}
impl From<std::num::ParseIntError> for Error {
fn from(e: std::num::ParseIntError) -> Self {
Error::ParseInt(e)
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::Io(e)
@ -614,24 +641,35 @@ impl From<url::ParseError> for Error {
}
}
impl<F> From<tokio_timer::TimeoutError<F>> for Error {
fn from(e: tokio_timer::TimeoutError<F>) -> Self {
match e {
tokio_timer::TimeoutError::Timer(_, e) => Error::Timer(e),
tokio_timer::TimeoutError::TimedOut(_) => Error::Timeout,
impl<T: std::fmt::Debug> From<tokio::timer::timeout::Error<T>> for Error {
fn from(e: tokio::timer::timeout::Error<T>) -> Self {
if e.is_inner() {
Error::TokioTimeoutInnerVal(format!("{:?}", e.into_inner().unwrap()))
} else if e.is_elapsed() {
Error::Timeout
} else {
Error::TokioTimer(e.into_timer())
}
}
}
impl From<tokio::timer::Error> for Error {
fn from(e: tokio::timer::Error) -> Self {
Error::TokioTimer(Some(e))
}
}
#[cfg(test)]
mod test {
use super::*;
use futures::future;
use futures::sync::mpsc;
use hyper::StatusCode;
use hyper::server::{Http, Request, Response, Service};
use tokio_timer::Timer;
use std;
use futures::sync::oneshot;
use hyper::{
StatusCode,
service::Service,
};
use tokio::timer::Delay;
use tokio::runtime::current_thread::Runtime;
use std::io::Read;
use std::net::SocketAddr;
@ -641,139 +679,238 @@ mod test {
fn it_should_fetch() {
let server = TestServer::run();
let client = Client::new(4).unwrap();
let future = client.get(&format!("http://{}?123", server.addr()), Default::default());
let resp = future.wait().unwrap();
let mut runtime = Runtime::new().unwrap();
let future = client.get(&format!("http://{}?123", server.addr()), Abort::default())
.map(|resp| {
assert!(resp.is_success());
let body = resp.concat2().wait().unwrap();
assert_eq!(&body[..], b"123")
resp
})
.map(|resp| resp.concat2())
.flatten()
.map(|body| assert_eq!(&body[..], b"123"))
.map_err(|err| panic!(err));
runtime.block_on(future).unwrap();
}
#[test]
fn it_should_fetch_in_light_mode() {
let server = TestServer::run();
let client = Client::new(1).unwrap();
let future = client.get(&format!("http://{}?123", server.addr()), Default::default());
let resp = future.wait().unwrap();
let mut runtime = Runtime::new().unwrap();
let future = client.get(&format!("http://{}?123", server.addr()), Abort::default())
.map(|resp| {
assert!(resp.is_success());
let body = resp.concat2().wait().unwrap();
assert_eq!(&body[..], b"123")
resp
})
.map(|resp| resp.concat2())
.flatten()
.map(|body| assert_eq!(&body[..], b"123"))
.map_err(|err| panic!(err));
runtime.block_on(future).unwrap();
}
#[test]
fn it_should_timeout() {
let server = TestServer::run();
let client = Client::new(4).unwrap();
let mut runtime = Runtime::new().unwrap();
let abort = Abort::default().with_max_duration(Duration::from_secs(1));
match client.get(&format!("http://{}/delay?3", server.addr()), abort).wait() {
Err(Error::Timeout) => {}
other => panic!("expected timeout, got {:?}", other)
let future = client.get(&format!("http://{}/delay?3", server.addr()), abort)
.then(|res| {
match res {
Err(Error::Timeout) => Ok::<_, ()>(()),
other => panic!("expected timeout, got {:?}", other),
}
});
runtime.block_on(future).unwrap();
}
#[test]
fn it_should_follow_redirects() {
let server = TestServer::run();
let client = Client::new(4).unwrap();
let mut runtime = Runtime::new().unwrap();
let abort = Abort::default();
let future = client.get(&format!("http://{}/redirect?http://{}/", server.addr(), server.addr()), abort);
assert!(future.wait().unwrap().is_success())
let future = client.get(&format!("http://{}/redirect?http://{}/", server.addr(), server.addr()), abort)
.and_then(|resp| {
if resp.is_success() { Ok(()) } else { panic!("Response unsuccessful") }
});
runtime.block_on(future).unwrap();
}
#[test]
fn it_should_follow_relative_redirects() {
let server = TestServer::run();
let client = Client::new(4).unwrap();
let mut runtime = Runtime::new().unwrap();
let abort = Abort::default().with_max_redirects(4);
let future = client.get(&format!("http://{}/redirect?/", server.addr()), abort);
assert!(future.wait().unwrap().is_success())
let future = client.get(&format!("http://{}/redirect?/", server.addr()), abort)
.and_then(|resp| {
if resp.is_success() { Ok(()) } else { panic!("Response unsuccessful") }
});
runtime.block_on(future).unwrap();
}
#[test]
fn it_should_not_follow_too_many_redirects() {
let server = TestServer::run();
let client = Client::new(4).unwrap();
let mut runtime = Runtime::new().unwrap();
let abort = Abort::default().with_max_redirects(3);
match client.get(&format!("http://{}/loop", server.addr()), abort).wait() {
Err(Error::TooManyRedirects) => {}
let future = client.get(&format!("http://{}/loop", server.addr()), abort)
.then(|res| {
match res {
Err(Error::TooManyRedirects) => Ok::<_, ()>(()),
other => panic!("expected too many redirects error, got {:?}", other)
}
});
runtime.block_on(future).unwrap();
}
#[test]
fn it_should_read_data() {
let server = TestServer::run();
let client = Client::new(4).unwrap();
let mut runtime = Runtime::new().unwrap();
let abort = Abort::default();
let future = client.get(&format!("http://{}?abcdefghijklmnopqrstuvwxyz", server.addr()), abort);
let resp = future.wait().unwrap();
assert!(resp.is_success());
assert_eq!(&resp.concat2().wait().unwrap()[..], b"abcdefghijklmnopqrstuvwxyz")
let future = client.get(&format!("http://{}?abcdefghijklmnopqrstuvwxyz", server.addr()), abort)
.and_then(|resp| {
if resp.is_success() { Ok(resp) } else { panic!("Response unsuccessful") }
})
.map(|resp| resp.concat2())
.flatten()
.map(|body| assert_eq!(&body[..], b"abcdefghijklmnopqrstuvwxyz"));
runtime.block_on(future).unwrap();
}
#[test]
fn it_should_not_read_too_much_data() {
let server = TestServer::run();
let client = Client::new(4).unwrap();
let mut runtime = Runtime::new().unwrap();
let abort = Abort::default().with_max_size(3);
let resp = client.get(&format!("http://{}/?1234", server.addr()), abort).wait().unwrap();
assert!(resp.is_success());
match resp.concat2().wait() {
Err(Error::SizeLimit) => {}
other => panic!("expected size limit error, got {:?}", other)
let future = client.get(&format!("http://{}/?1234", server.addr()), abort)
.and_then(|resp| {
if resp.is_success() { Ok(resp) } else { panic!("Response unsuccessful") }
})
.map(|resp| resp.concat2())
.flatten()
.then(|body| {
match body {
Err(Error::SizeLimit) => Ok::<_, ()>(()),
other => panic!("expected size limit error, got {:?}", other),
}
});
runtime.block_on(future).unwrap();
}
#[test]
fn it_should_not_read_too_much_data_sync() {
let server = TestServer::run();
let client = Client::new(4).unwrap();
let mut runtime = Runtime::new().unwrap();
// let abort = Abort::default().with_max_size(3);
// let resp = client.get(&format!("http://{}/?1234", server.addr()), abort).wait().unwrap();
// assert!(resp.is_success());
// let mut buffer = Vec::new();
// let mut reader = BodyReader::new(resp);
// match reader.read_to_end(&mut buffer) {
// Err(ref e) if e.kind() == io::ErrorKind::PermissionDenied => {}
// other => panic!("expected size limit error, got {:?}", other)
// }
// FIXME (c0gent): The prior version of this test (pre-hyper-0.12,
// commented out above) is not possible to recreate. It relied on an
// apparent bug in `Client::background_thread` which suppressed the
// `SizeLimit` error from occurring. This is due to the headers
// collection not returning a value for content length when queried.
// The precise reason why this was happening is unclear.
let abort = Abort::default().with_max_size(3);
let resp = client.get(&format!("http://{}/?1234", server.addr()), abort).wait().unwrap();
let future = client.get(&format!("http://{}/?1234", server.addr()), abort)
.and_then(|resp| {
assert_eq!(true, false, "Unreachable. (see FIXME note)");
assert!(resp.is_success());
let mut buffer = Vec::new();
let mut reader = BodyReader::new(resp);
match reader.read_to_end(&mut buffer) {
Err(ref e) if e.kind() == io::ErrorKind::PermissionDenied => {}
Err(ref e) if e.kind() == io::ErrorKind::PermissionDenied => Ok(()),
other => panic!("expected size limit error, got {:?}", other)
}
});
// FIXME: This simply demonstrates the above point.
match runtime.block_on(future) {
Err(Error::SizeLimit) => {},
other => panic!("Expected `Error::SizeLimit`, got: {:?}", other),
}
}
struct TestServer(Timer);
struct TestServer;
impl Service for TestServer {
type Request = Request;
type Response = Response;
type Error = hyper::Error;
type Future = Box<Future<Item=Self::Response, Error=Self::Error>>;
type ReqBody = hyper::Body;
type ResBody = hyper::Body;
type Error = Error;
type Future = Box<Future<Item=hyper::Response<Self::ResBody>, Error=Self::Error> + Send + 'static>;
fn call(&self, req: Request) -> Self::Future {
fn call(&mut self, req: hyper::Request<hyper::Body>) -> Self::Future {
match req.uri().path() {
"/" => {
let body = req.uri().query().unwrap_or("").to_string();
let req = Response::new().with_body(body);
Box::new(future::ok(req))
let res = hyper::Response::new(body.into());
Box::new(future::ok(res))
}
"/redirect" => {
let loc = Location::new(req.uri().query().unwrap_or("/").to_string());
let req = Response::new()
.with_status(StatusCode::MovedPermanently)
.with_header(loc);
Box::new(future::ok(req))
let loc = req.uri().query().unwrap_or("/").to_string();
let res = hyper::Response::builder()
.status(StatusCode::MOVED_PERMANENTLY)
.header(hyper::header::LOCATION, loc)
.body(hyper::Body::empty())
.expect("Unable to create response");
Box::new(future::ok(res))
}
"/loop" => {
let req = Response::new()
.with_status(StatusCode::MovedPermanently)
.with_header(Location::new("/loop".to_string()));
Box::new(future::ok(req))
let res = hyper::Response::builder()
.status(StatusCode::MOVED_PERMANENTLY)
.header(hyper::header::LOCATION, "/loop")
.body(hyper::Body::empty())
.expect("Unable to create response");
Box::new(future::ok(res))
}
"/delay" => {
let d = Duration::from_secs(req.uri().query().unwrap_or("0").parse().unwrap());
Box::new(self.0.sleep(d)
.map_err(|_| return io::Error::new(io::ErrorKind::Other, "timer error"))
.from_err()
.map(|_| Response::new()))
let dur = Duration::from_secs(req.uri().query().unwrap_or("0").parse().unwrap());
let delayed_res = Delay::new(std::time::Instant::now() + dur)
.and_then(|_| Ok::<_, _>(hyper::Response::new(hyper::Body::empty())))
.from_err();
Box::new(delayed_res)
}
_ => {
let res = hyper::Response::builder()
.status(StatusCode::NOT_FOUND)
.body(hyper::Body::empty())
.expect("Unable to create response");
Box::new(future::ok(res))
}
_ => Box::new(future::ok(Response::new().with_status(StatusCode::NotFound)))
}
}
}
@ -781,19 +918,27 @@ mod test {
impl TestServer {
fn run() -> Handle {
let (tx_start, rx_start) = std::sync::mpsc::sync_channel(1);
let (tx_end, rx_end) = mpsc::channel(0);
let rx_end_fut = rx_end.into_future().map(|_| ()).map_err(|_| ());
let (tx_end, rx_end) = oneshot::channel();
let rx_end_fut = rx_end.map(|_| ()).map_err(|_| ());
thread::spawn(move || {
let addr = ADDRESS.parse().unwrap();
let server = Http::new().bind(&addr, || Ok(TestServer(Timer::default()))).unwrap();
tx_start.send(server.local_addr().unwrap()).unwrap_or(());
server.run_until(rx_end_fut).unwrap();
let server = hyper::server::Server::bind(&addr)
.serve(|| future::ok::<_, hyper::Error>(TestServer));
tx_start.send(server.local_addr()).unwrap_or(());
tokio::run(
server.with_graceful_shutdown(rx_end_fut)
.map_err(|e| panic!("server error: {}", e))
);
});
Handle(rx_start.recv().unwrap(), tx_end)
Handle(rx_start.recv().unwrap(), Some(tx_end))
}
}
struct Handle(SocketAddr, mpsc::Sender<()>);
struct Handle(SocketAddr, Option<oneshot::Sender<()>>);
impl Handle {
fn addr(&self) -> SocketAddr {
@ -803,7 +948,7 @@ mod test {
impl Drop for Handle {
fn drop(&mut self) {
self.1.clone().send(()).wait().unwrap();
self.1.take().unwrap().send(()).unwrap();
}
}
}

View File

@ -26,9 +26,9 @@ extern crate futures;
extern crate hyper;
extern crate hyper_rustls;
extern crate http;
extern crate tokio_core;
extern crate tokio_timer;
extern crate tokio;
extern crate url;
extern crate bytes;

View File

@ -21,7 +21,7 @@ ansi_term = "0.10"
rustc-hex = "1.0"
ethcore-io = { path = "../io", features = ["mio"] }
parity-bytes = "0.1"
parity-crypto = "0.1"
parity-crypto = "0.2"
ethcore-logger = { path ="../../logger" }
ethcore-network = { path = "../network" }
ethereum-types = "0.4"

View File

@ -8,7 +8,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
error-chain = { version = "0.12", default-features = false }
parity-crypto = "0.1"
parity-crypto = "0.2"
ethcore-io = { path = "../io" }
ethereum-types = "0.4"
ethkey = { path = "../../ethkey" }

View File

@ -1,238 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Tokio Core Reactor wrapper.
extern crate futures;
extern crate tokio_core;
use std::{fmt, thread};
use std::sync::mpsc;
use std::time::Duration;
use futures::{Future, IntoFuture};
pub use tokio_core::reactor::{Remote as TokioRemote, Handle, Timeout};
/// Event Loop for futures.
/// Wrapper around `tokio::reactor::Core`.
/// Runs in a separate thread.
pub struct EventLoop {
remote: Remote,
handle: EventLoopHandle,
}
impl EventLoop {
/// Spawns a new thread with `EventLoop` with given handler.
pub fn spawn() -> Self {
let (stop, stopped) = futures::oneshot();
let (tx, rx) = mpsc::channel();
let handle = thread::spawn(move || {
let mut el = tokio_core::reactor::Core::new().expect("Creating an event loop should not fail.");
tx.send(el.remote()).expect("Rx is blocking upper thread.");
let _ = el.run(futures::empty().select(stopped));
});
let remote = rx.recv().expect("tx is transfered to a newly spawned thread.");
EventLoop {
remote: Remote {
inner: Mode::Tokio(remote),
},
handle: EventLoopHandle {
close: Some(stop),
handle: Some(handle),
},
}
}
/// Returns this event loop raw remote.
///
/// Deprecated: Exists only to connect with current JSONRPC implementation.
pub fn raw_remote(&self) -> TokioRemote {
if let Mode::Tokio(ref remote) = self.remote.inner {
remote.clone()
} else {
panic!("Event loop is never initialized in other mode then Tokio.")
}
}
/// Returns event loop remote.
pub fn remote(&self) -> Remote {
self.remote.clone()
}
}
#[derive(Clone)]
enum Mode {
Tokio(TokioRemote),
Sync,
ThreadPerFuture,
}
impl fmt::Debug for Mode {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::Mode::*;
match *self {
Tokio(_) => write!(fmt, "tokio"),
Sync => write!(fmt, "synchronous"),
ThreadPerFuture => write!(fmt, "thread per future"),
}
}
}
#[derive(Debug, Clone)]
pub struct Remote {
inner: Mode,
}
impl Remote {
/// Remote for existing event loop.
///
/// Deprecated: Exists only to connect with current JSONRPC implementation.
pub fn new(remote: TokioRemote) -> Self {
Remote {
inner: Mode::Tokio(remote),
}
}
/// Synchronous remote, used mostly for tests.
pub fn new_sync() -> Self {
Remote {
inner: Mode::Sync,
}
}
/// Spawns a new thread for each future (use only for tests).
pub fn new_thread_per_future() -> Self {
Remote {
inner: Mode::ThreadPerFuture,
}
}
/// Spawn a future to this event loop
pub fn spawn<R>(&self, r: R) where
R: IntoFuture<Item=(), Error=()> + Send + 'static,
R::Future: 'static,
{
match self.inner {
Mode::Tokio(ref remote) => remote.spawn(move |_| r),
Mode::Sync => {
let _= r.into_future().wait();
},
Mode::ThreadPerFuture => {
thread::spawn(move || {
let _= r.into_future().wait();
});
},
}
}
/// Spawn a new future returned by given closure.
pub fn spawn_fn<F, R>(&self, f: F) where
F: FnOnce(&Handle) -> R + Send + 'static,
R: IntoFuture<Item=(), Error=()>,
R::Future: 'static,
{
match self.inner {
Mode::Tokio(ref remote) => remote.spawn(move |handle| f(handle)),
Mode::Sync => {
let mut core = tokio_core::reactor::Core::new().expect("Creating an event loop should not fail.");
let handle = core.handle();
let _ = core.run(f(&handle).into_future());
},
Mode::ThreadPerFuture => {
thread::spawn(move || {
let mut core = tokio_core::reactor::Core::new().expect("Creating an event loop should not fail.");
let handle = core.handle();
let _ = core.run(f(&handle).into_future());
});
},
}
}
/// Spawn a new future and wait for it or for a timeout to occur.
pub fn spawn_with_timeout<F, R, T>(&self, f: F, duration: Duration, on_timeout: T) where
T: FnOnce() -> () + Send + 'static,
F: FnOnce(&Handle) -> R + Send + 'static,
R: IntoFuture<Item=(), Error=()>,
R::Future: 'static,
{
match self.inner {
Mode::Tokio(ref remote) => remote.spawn(move |handle| {
let future = f(handle).into_future();
let timeout = Timeout::new(duration, handle).expect("Event loop is still up.");
future.select(timeout.then(move |_| {
on_timeout();
Ok(())
})).then(|_| Ok(()))
}),
Mode::Sync => {
let mut core = tokio_core::reactor::Core::new().expect("Creating an event loop should not fail.");
let handle = core.handle();
let future = f(&handle).into_future();
let timeout = Timeout::new(duration, &handle).expect("Event loop is still up.");
let _: Result<(), ()> = core.run(future.select(timeout.then(move |_| {
on_timeout();
Ok(())
})).then(|_| Ok(())));
},
Mode::ThreadPerFuture => {
thread::spawn(move || {
let mut core = tokio_core::reactor::Core::new().expect("Creating an event loop should not fail.");
let handle = core.handle();
let future = f(&handle).into_future();
let timeout = Timeout::new(duration, &handle).expect("Event loop is still up.");
let _: Result<(), ()> = core.run(future.select(timeout.then(move |_| {
on_timeout();
Ok(())
})).then(|_| Ok(())));
});
},
}
}
}
/// A handle to running event loop. Dropping the handle will cause event loop to finish.
pub struct EventLoopHandle {
close: Option<futures::Complete<()>>,
handle: Option<thread::JoinHandle<()>>
}
impl From<EventLoop> for EventLoopHandle {
fn from(el: EventLoop) -> Self {
el.handle
}
}
impl Drop for EventLoopHandle {
fn drop(&mut self) {
self.close.take().map(|v| v.send(()));
}
}
impl EventLoopHandle {
/// Blocks current thread and waits until the event loop is finished.
pub fn wait(mut self) -> thread::Result<()> {
self.handle.take()
.expect("Handle is taken only in `wait`, `wait` is consuming; qed").join()
}
/// Finishes this event loop.
pub fn close(mut self) {
let _ = self.close.take()
.expect("Close is taken only in `close` and `drop`. `close` is consuming; qed")
.send(());
}
}

View File

@ -1,11 +1,11 @@
[package]
description = "Parity Reactor"
description = "Parity Runtime"
homepage = "http://parity.io"
license = "GPL-3.0"
name = "parity-reactor"
name = "parity-runtime"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
futures = "0.1"
tokio-core = "0.1"
tokio = "~0.1.9"

256
util/runtime/src/lib.rs Normal file
View File

@ -0,0 +1,256 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Tokio Runtime wrapper.
extern crate futures;
extern crate tokio;
use std::{fmt, thread};
use std::sync::mpsc;
use std::time::{Duration, Instant};
use futures::{future, Future, IntoFuture};
pub use tokio::timer::Delay;
pub use tokio::runtime::{Runtime as TokioRuntime, Builder as TokioRuntimeBuilder, TaskExecutor};
/// Runtime for futures.
///
/// Runs in a separate thread.
pub struct Runtime {
executor: Executor,
handle: RuntimeHandle,
}
impl Runtime {
fn new(runtime_bldr: &mut TokioRuntimeBuilder) -> Self {
let mut runtime = runtime_bldr
.build()
.expect("Building a Tokio runtime will only fail when mio components \
cannot be initialized (catastrophic)");
let (stop, stopped) = futures::oneshot();
let (tx, rx) = mpsc::channel();
let handle = thread::spawn(move || {
tx.send(runtime.executor()).expect("Rx is blocking upper thread.");
runtime.block_on(futures::empty().select(stopped).map(|_| ()).map_err(|_| ()))
.expect("Tokio runtime should not have unhandled errors.");
});
let executor = rx.recv().expect("tx is transfered to a newly spawned thread.");
Runtime {
executor: Executor {
inner: Mode::Tokio(executor),
},
handle: RuntimeHandle {
close: Some(stop),
handle: Some(handle),
},
}
}
/// Spawns a new tokio runtime with a default thread count on a background
/// thread and returns a `Runtime` which can be used to spawn tasks via
/// its executor.
pub fn with_default_thread_count() -> Self {
let mut runtime_bldr = TokioRuntimeBuilder::new();
Self::new(&mut runtime_bldr)
}
/// Spawns a new tokio runtime with a the specified thread count on a
/// background thread and returns a `Runtime` which can be used to spawn
/// tasks via its executor.
pub fn with_thread_count(thread_count: usize) -> Self {
let mut runtime_bldr = TokioRuntimeBuilder::new();
runtime_bldr.core_threads(thread_count);
Self::new(&mut runtime_bldr)
}
/// Returns this runtime raw executor.
///
/// Deprecated: Exists only to connect with current JSONRPC implementation.
pub fn raw_executor(&self) -> TaskExecutor {
if let Mode::Tokio(ref executor) = self.executor.inner {
executor.clone()
} else {
panic!("Runtime is not initialized in Tokio mode.")
}
}
/// Returns runtime executor.
pub fn executor(&self) -> Executor {
self.executor.clone()
}
}
#[derive(Clone)]
enum Mode {
Tokio(TaskExecutor),
Sync,
ThreadPerFuture,
}
impl fmt::Debug for Mode {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::Mode::*;
match *self {
Tokio(_) => write!(fmt, "tokio"),
Sync => write!(fmt, "synchronous"),
ThreadPerFuture => write!(fmt, "thread per future"),
}
}
}
/// Returns a future which runs `f` until `duration` has elapsed, at which
/// time `on_timeout` is run and the future resolves.
fn timeout<F, R, T>(f: F, duration: Duration, on_timeout: T)
-> impl Future<Item = (), Error = ()> + Send + 'static
where
T: FnOnce() -> () + Send + 'static,
F: FnOnce() -> R + Send + 'static,
R: IntoFuture<Item=(), Error=()> + Send + 'static,
R::Future: Send + 'static,
{
let future = future::lazy(f);
let timeout = Delay::new(Instant::now() + duration)
.then(move |_| {
on_timeout();
Ok(())
});
future.select(timeout).then(|_| Ok(()))
}
#[derive(Debug, Clone)]
pub struct Executor {
inner: Mode,
}
impl Executor {
/// Executor for existing runtime.
///
/// Deprecated: Exists only to connect with current JSONRPC implementation.
pub fn new(executor: TaskExecutor) -> Self {
Executor {
inner: Mode::Tokio(executor),
}
}
/// Synchronous executor, used mostly for tests.
pub fn new_sync() -> Self {
Executor {
inner: Mode::Sync,
}
}
/// Spawns a new thread for each future (use only for tests).
pub fn new_thread_per_future() -> Self {
Executor {
inner: Mode::ThreadPerFuture,
}
}
/// Spawn a future to this runtime
pub fn spawn<R>(&self, r: R) where
R: IntoFuture<Item=(), Error=()> + Send + 'static,
R::Future: Send + 'static,
{
match self.inner {
Mode::Tokio(ref executor) => executor.spawn(r.into_future()),
Mode::Sync => {
let _= r.into_future().wait();
},
Mode::ThreadPerFuture => {
thread::spawn(move || {
let _= r.into_future().wait();
});
},
}
}
/// Spawn a new future returned by given closure.
pub fn spawn_fn<F, R>(&self, f: F) where
F: FnOnce() -> R + Send + 'static,
R: IntoFuture<Item=(), Error=()> + Send + 'static,
R::Future: Send + 'static,
{
match self.inner {
Mode::Tokio(ref executor) => executor.spawn(future::lazy(f)),
Mode::Sync => {
let _ = future::lazy(f).wait();
},
Mode::ThreadPerFuture => {
thread::spawn(move || {
let _= f().into_future().wait();
});
},
}
}
/// Spawn a new future and wait for it or for a timeout to occur.
pub fn spawn_with_timeout<F, R, T>(&self, f: F, duration: Duration, on_timeout: T) where
T: FnOnce() -> () + Send + 'static,
F: FnOnce() -> R + Send + 'static,
R: IntoFuture<Item=(), Error=()> + Send + 'static,
R::Future: Send + 'static,
{
match self.inner {
Mode::Tokio(ref executor) => {
executor.spawn(timeout(f, duration, on_timeout))
},
Mode::Sync => {
let _ = timeout(f, duration, on_timeout).wait();
},
Mode::ThreadPerFuture => {
thread::spawn(move || {
let _ = timeout(f, duration, on_timeout).wait();
});
},
}
}
}
/// A handle to a runtime. Dropping the handle will cause runtime to shutdown.
pub struct RuntimeHandle {
close: Option<futures::Complete<()>>,
handle: Option<thread::JoinHandle<()>>
}
impl From<Runtime> for RuntimeHandle {
fn from(el: Runtime) -> Self {
el.handle
}
}
impl Drop for RuntimeHandle {
fn drop(&mut self) {
self.close.take().map(|v| v.send(()));
}
}
impl RuntimeHandle {
/// Blocks current thread and waits until the runtime is finished.
pub fn wait(mut self) -> thread::Result<()> {
self.handle.take()
.expect("Handle is taken only in `wait`, `wait` is consuming; qed").join()
}
/// Finishes this runtime.
pub fn close(mut self) {
let _ = self.close.take()
.expect("Close is taken only in `close` and `drop`. `close` is consuming; qed")
.send(());
}
}

View File

@ -9,7 +9,7 @@ bitflags = "0.9"
byteorder = "1.0.0"
ethereum-types = "0.4"
ethcore-network = { path = "../util/network" }
parity-crypto = "0.1"
parity-crypto = "0.2"
ethkey = { path = "../ethkey" }
hex = "0.2"
log = "0.4"
@ -25,6 +25,6 @@ slab = "0.3"
smallvec = "0.6"
tiny-keccak = "1.4"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }

View File

@ -14,9 +14,9 @@ docopt = "0.8"
serde = "1.0"
serde_derive = "1.0"
panic_hook = { path = "../../util/panic_hook" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-2.2" }
log = "0.4"
[[bin]]