Remove secret_store runtimes. (#9888)
* Remove the independent runtimes from `KeyServerHttpListener` and `KeyServerCore` and instead require a `parity_runtime::Executor` to be passed upon creation of each. * Remove the `threads` parameter from both `ClusterConfiguration` structs. * Implement the `future::Executor` trait for `parity_runtime::Executor`. * Update tests. - Update the `loop_until` function to instead use a oneshot to signal completion. - Modify the `make_key_servers` function to create and return a runtime.
This commit is contained in:
parent
f20f4c74d2
commit
c880716f16
1
Cargo.lock
generated
1
Cargo.lock
generated
@ -918,6 +918,7 @@ dependencies = [
|
|||||||
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-crypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-crypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"parity-runtime 0.1.0",
|
||||||
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
@ -739,7 +739,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
|
|||||||
account_provider: account_provider,
|
account_provider: account_provider,
|
||||||
accounts_passwords: &passwords,
|
accounts_passwords: &passwords,
|
||||||
};
|
};
|
||||||
let secretstore_key_server = secretstore::start(cmd.secretstore_conf.clone(), secretstore_deps)?;
|
let secretstore_key_server = secretstore::start(cmd.secretstore_conf.clone(), secretstore_deps, runtime.executor())?;
|
||||||
|
|
||||||
// the ipfs server
|
// the ipfs server
|
||||||
let ipfs_server = ipfs::start_server(cmd.ipfs_conf.clone(), client.clone())?;
|
let ipfs_server = ipfs::start_server(cmd.ipfs_conf.clone(), client.clone())?;
|
||||||
|
@ -24,6 +24,7 @@ use ethcore::miner::Miner;
|
|||||||
use ethkey::{Secret, Public};
|
use ethkey::{Secret, Public};
|
||||||
use sync::SyncProvider;
|
use sync::SyncProvider;
|
||||||
use ethereum_types::Address;
|
use ethereum_types::Address;
|
||||||
|
use parity_runtime::Executor;
|
||||||
|
|
||||||
/// This node secret key.
|
/// This node secret key.
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
@ -100,14 +101,14 @@ pub struct Dependencies<'a> {
|
|||||||
|
|
||||||
#[cfg(not(feature = "secretstore"))]
|
#[cfg(not(feature = "secretstore"))]
|
||||||
mod server {
|
mod server {
|
||||||
use super::{Configuration, Dependencies};
|
use super::{Configuration, Dependencies, Executor};
|
||||||
|
|
||||||
/// Noop key server implementation
|
/// Noop key server implementation
|
||||||
pub struct KeyServer;
|
pub struct KeyServer;
|
||||||
|
|
||||||
impl KeyServer {
|
impl KeyServer {
|
||||||
/// Create new noop key server
|
/// Create new noop key server
|
||||||
pub fn new(_conf: Configuration, _deps: Dependencies) -> Result<Self, String> {
|
pub fn new(_conf: Configuration, _deps: Dependencies, _executor: Executor) -> Result<Self, String> {
|
||||||
Ok(KeyServer)
|
Ok(KeyServer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -120,7 +121,7 @@ mod server {
|
|||||||
use ethkey::KeyPair;
|
use ethkey::KeyPair;
|
||||||
use ansi_term::Colour::{Red, White};
|
use ansi_term::Colour::{Red, White};
|
||||||
use db;
|
use db;
|
||||||
use super::{Configuration, Dependencies, NodeSecretKey, ContractAddress};
|
use super::{Configuration, Dependencies, NodeSecretKey, ContractAddress, Executor};
|
||||||
|
|
||||||
fn into_service_contract_address(address: ContractAddress) -> ethcore_secretstore::ContractAddress {
|
fn into_service_contract_address(address: ContractAddress) -> ethcore_secretstore::ContractAddress {
|
||||||
match address {
|
match address {
|
||||||
@ -136,7 +137,7 @@ mod server {
|
|||||||
|
|
||||||
impl KeyServer {
|
impl KeyServer {
|
||||||
/// Create new key server
|
/// Create new key server
|
||||||
pub fn new(mut conf: Configuration, deps: Dependencies) -> Result<Self, String> {
|
pub fn new(mut conf: Configuration, deps: Dependencies, executor: Executor) -> Result<Self, String> {
|
||||||
let self_secret: Arc<ethcore_secretstore::NodeKeyPair> = match conf.self_secret.take() {
|
let self_secret: Arc<ethcore_secretstore::NodeKeyPair> = match conf.self_secret.take() {
|
||||||
Some(NodeSecretKey::Plain(secret)) => Arc::new(ethcore_secretstore::PlainNodeKeyPair::new(
|
Some(NodeSecretKey::Plain(secret)) => Arc::new(ethcore_secretstore::PlainNodeKeyPair::new(
|
||||||
KeyPair::from_secret(secret).map_err(|e| format!("invalid secret: {}", e))?)),
|
KeyPair::from_secret(secret).map_err(|e| format!("invalid secret: {}", e))?)),
|
||||||
@ -179,7 +180,6 @@ mod server {
|
|||||||
service_contract_doc_sretr_address: conf.service_contract_doc_sretr_address.map(into_service_contract_address),
|
service_contract_doc_sretr_address: conf.service_contract_doc_sretr_address.map(into_service_contract_address),
|
||||||
acl_check_contract_address: conf.acl_check_contract_address.map(into_service_contract_address),
|
acl_check_contract_address: conf.acl_check_contract_address.map(into_service_contract_address),
|
||||||
cluster_config: ethcore_secretstore::ClusterConfiguration {
|
cluster_config: ethcore_secretstore::ClusterConfiguration {
|
||||||
threads: 4,
|
|
||||||
listener_address: ethcore_secretstore::NodeAddress {
|
listener_address: ethcore_secretstore::NodeAddress {
|
||||||
address: conf.interface.clone(),
|
address: conf.interface.clone(),
|
||||||
port: conf.port,
|
port: conf.port,
|
||||||
@ -198,7 +198,7 @@ mod server {
|
|||||||
cconf.cluster_config.nodes.insert(self_secret.public().clone(), cconf.cluster_config.listener_address.clone());
|
cconf.cluster_config.nodes.insert(self_secret.public().clone(), cconf.cluster_config.listener_address.clone());
|
||||||
|
|
||||||
let db = db::open_secretstore_db(&conf.data_path)?;
|
let db = db::open_secretstore_db(&conf.data_path)?;
|
||||||
let key_server = ethcore_secretstore::start(deps.client, deps.sync, deps.miner, self_secret, cconf, db)
|
let key_server = ethcore_secretstore::start(deps.client, deps.sync, deps.miner, self_secret, cconf, db, executor)
|
||||||
.map_err(|e| format!("Error starting KeyServer {}: {}", key_server_name, e))?;
|
.map_err(|e| format!("Error starting KeyServer {}: {}", key_server_name, e))?;
|
||||||
|
|
||||||
Ok(KeyServer {
|
Ok(KeyServer {
|
||||||
@ -238,11 +238,11 @@ impl Default for Configuration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Start secret store-related functionality
|
/// Start secret store-related functionality
|
||||||
pub fn start(conf: Configuration, deps: Dependencies) -> Result<Option<KeyServer>, String> {
|
pub fn start(conf: Configuration, deps: Dependencies, executor: Executor) -> Result<Option<KeyServer>, String> {
|
||||||
if !conf.enabled {
|
if !conf.enabled {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
KeyServer::new(conf, deps)
|
KeyServer::new(conf, deps, executor)
|
||||||
.map(|s| Some(s))
|
.map(|s| Some(s))
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ futures = "0.1"
|
|||||||
rustc-hex = "1.0"
|
rustc-hex = "1.0"
|
||||||
tiny-keccak = "1.4"
|
tiny-keccak = "1.4"
|
||||||
tokio = "~0.1.11"
|
tokio = "~0.1.11"
|
||||||
|
parity-runtime = { path = "../util/runtime" }
|
||||||
tokio-io = "0.1"
|
tokio-io = "0.1"
|
||||||
tokio-service = "0.1"
|
tokio-service = "0.1"
|
||||||
url = "1.0"
|
url = "1.0"
|
||||||
|
@ -15,14 +15,11 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use std::thread;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::mpsc;
|
|
||||||
use futures::{self, Future};
|
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use tokio::runtime;
|
|
||||||
use crypto::DEFAULT_MAC;
|
use crypto::DEFAULT_MAC;
|
||||||
use ethkey::crypto;
|
use ethkey::crypto;
|
||||||
|
use parity_runtime::Executor;
|
||||||
use super::acl_storage::AclStorage;
|
use super::acl_storage::AclStorage;
|
||||||
use super::key_storage::KeyStorage;
|
use super::key_storage::KeyStorage;
|
||||||
use super::key_server_set::KeyServerSet;
|
use super::key_server_set::KeyServerSet;
|
||||||
@ -39,16 +36,16 @@ pub struct KeyServerImpl {
|
|||||||
|
|
||||||
/// Secret store key server data.
|
/// Secret store key server data.
|
||||||
pub struct KeyServerCore {
|
pub struct KeyServerCore {
|
||||||
close: Option<futures::Complete<()>>,
|
|
||||||
handle: Option<thread::JoinHandle<()>>,
|
|
||||||
cluster: Arc<ClusterClient>,
|
cluster: Arc<ClusterClient>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyServerImpl {
|
impl KeyServerImpl {
|
||||||
/// Create new key server instance
|
/// Create new key server instance
|
||||||
pub fn new(config: &ClusterConfiguration, key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>, acl_storage: Arc<AclStorage>, key_storage: Arc<KeyStorage>) -> Result<Self, Error> {
|
pub fn new(config: &ClusterConfiguration, key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>,
|
||||||
|
acl_storage: Arc<AclStorage>, key_storage: Arc<KeyStorage>, executor: Executor) -> Result<Self, Error>
|
||||||
|
{
|
||||||
Ok(KeyServerImpl {
|
Ok(KeyServerImpl {
|
||||||
data: Arc::new(Mutex::new(KeyServerCore::new(config, key_server_set, self_key_pair, acl_storage, key_storage)?)),
|
data: Arc::new(Mutex::new(KeyServerCore::new(config, key_server_set, self_key_pair, acl_storage, key_storage, executor)?)),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -175,9 +172,10 @@ impl MessageSigner for KeyServerImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl KeyServerCore {
|
impl KeyServerCore {
|
||||||
pub fn new(config: &ClusterConfiguration, key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>, acl_storage: Arc<AclStorage>, key_storage: Arc<KeyStorage>) -> Result<Self, Error> {
|
pub fn new(config: &ClusterConfiguration, key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>,
|
||||||
|
acl_storage: Arc<AclStorage>, key_storage: Arc<KeyStorage>, executor: Executor) -> Result<Self, Error>
|
||||||
|
{
|
||||||
let config = NetClusterConfiguration {
|
let config = NetClusterConfiguration {
|
||||||
threads: config.threads,
|
|
||||||
self_key_pair: self_key_pair.clone(),
|
self_key_pair: self_key_pair.clone(),
|
||||||
listen_address: (config.listener_address.address.clone(), config.listener_address.port),
|
listen_address: (config.listener_address.address.clone(), config.listener_address.port),
|
||||||
key_server_set: key_server_set,
|
key_server_set: key_server_set,
|
||||||
@ -188,45 +186,16 @@ impl KeyServerCore {
|
|||||||
auto_migrate_enabled: config.auto_migrate_enabled,
|
auto_migrate_enabled: config.auto_migrate_enabled,
|
||||||
};
|
};
|
||||||
|
|
||||||
let (stop, stopped) = futures::oneshot();
|
let cluster = ClusterCore::new(executor, config)
|
||||||
let (tx, rx) = mpsc::channel();
|
.and_then(|c| c.run().map(|_| c.client()))
|
||||||
let handle = thread::Builder::new().name("KeyServerLoop".into()).spawn(move || {
|
.map_err(|err| Error::from(err))?;
|
||||||
let runtime_res = runtime::Builder::new()
|
|
||||||
.core_threads(config.threads)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
let mut el = match runtime_res {
|
|
||||||
Ok(el) => el,
|
|
||||||
Err(e) => {
|
|
||||||
tx.send(Err(Error::Internal(format!("error initializing event loop: {}", e)))).expect("Rx is blocking upper thread.");
|
|
||||||
return;
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
let cluster = ClusterCore::new(el.executor(), config);
|
|
||||||
let cluster_client = cluster.and_then(|c| c.run().map(|_| c.client()));
|
|
||||||
tx.send(cluster_client.map_err(Into::into)).expect("Rx is blocking upper thread.");
|
|
||||||
let _ = el.block_on(futures::empty().select(stopped));
|
|
||||||
|
|
||||||
trace!(target: "secretstore_net", "{}: KeyServerLoop thread stopped", self_key_pair.public());
|
|
||||||
}).map_err(|e| Error::Internal(format!("{}", e)))?;
|
|
||||||
let cluster = rx.recv().map_err(|e| Error::Internal(format!("error initializing event loop: {}", e)))??;
|
|
||||||
|
|
||||||
Ok(KeyServerCore {
|
Ok(KeyServerCore {
|
||||||
close: Some(stop),
|
cluster,
|
||||||
handle: Some(handle),
|
|
||||||
cluster: cluster,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for KeyServerCore {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.close.take().map(|v| v.send(()));
|
|
||||||
self.handle.take().map(|h| h.join());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
@ -243,6 +212,7 @@ pub mod tests {
|
|||||||
use key_server_set::tests::MapKeyServerSet;
|
use key_server_set::tests::MapKeyServerSet;
|
||||||
use key_server_cluster::math;
|
use key_server_cluster::math;
|
||||||
use ethereum_types::{H256, H520};
|
use ethereum_types::{H256, H520};
|
||||||
|
use parity_runtime::Runtime;
|
||||||
use types::{Error, Public, ClusterConfiguration, NodeAddress, RequestSignature, ServerKeyId,
|
use types::{Error, Public, ClusterConfiguration, NodeAddress, RequestSignature, ServerKeyId,
|
||||||
EncryptedDocumentKey, EncryptedDocumentKeyShadow, MessageHash, EncryptedMessageSignature,
|
EncryptedDocumentKey, EncryptedDocumentKeyShadow, MessageHash, EncryptedMessageSignature,
|
||||||
Requester, NodeId};
|
Requester, NodeId};
|
||||||
@ -294,10 +264,9 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_key_servers(start_port: u16, num_nodes: usize) -> (Vec<KeyServerImpl>, Vec<Arc<DummyKeyStorage>>) {
|
fn make_key_servers(start_port: u16, num_nodes: usize) -> (Vec<KeyServerImpl>, Vec<Arc<DummyKeyStorage>>, Runtime) {
|
||||||
let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect();
|
let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect();
|
||||||
let configs: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration {
|
let configs: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration {
|
||||||
threads: 1,
|
|
||||||
listener_address: NodeAddress {
|
listener_address: NodeAddress {
|
||||||
address: "127.0.0.1".into(),
|
address: "127.0.0.1".into(),
|
||||||
port: start_port + (i as u16),
|
port: start_port + (i as u16),
|
||||||
@ -316,11 +285,12 @@ pub mod tests {
|
|||||||
.map(|(k, a)| (k.clone(), format!("{}:{}", a.address, a.port).parse().unwrap()))
|
.map(|(k, a)| (k.clone(), format!("{}:{}", a.address, a.port).parse().unwrap()))
|
||||||
.collect();
|
.collect();
|
||||||
let key_storages = (0..num_nodes).map(|_| Arc::new(DummyKeyStorage::default())).collect::<Vec<_>>();
|
let key_storages = (0..num_nodes).map(|_| Arc::new(DummyKeyStorage::default())).collect::<Vec<_>>();
|
||||||
|
let runtime = Runtime::with_thread_count(4);
|
||||||
let key_servers: Vec<_> = configs.into_iter().enumerate().map(|(i, cfg)|
|
let key_servers: Vec<_> = configs.into_iter().enumerate().map(|(i, cfg)|
|
||||||
KeyServerImpl::new(&cfg, Arc::new(MapKeyServerSet::new(false, key_servers_set.clone())),
|
KeyServerImpl::new(&cfg, Arc::new(MapKeyServerSet::new(false, key_servers_set.clone())),
|
||||||
Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())),
|
Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())),
|
||||||
Arc::new(DummyAclStorage::default()),
|
Arc::new(DummyAclStorage::default()),
|
||||||
key_storages[i].clone()).unwrap()
|
key_storages[i].clone(), runtime.executor()).unwrap()
|
||||||
).collect();
|
).collect();
|
||||||
|
|
||||||
// wait until connections are established. It is fast => do not bother with events here
|
// wait until connections are established. It is fast => do not bother with events here
|
||||||
@ -350,13 +320,13 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
(key_servers, key_storages)
|
(key_servers, key_storages, runtime)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn document_key_generation_and_retrievement_works_over_network_with_single_node() {
|
fn document_key_generation_and_retrievement_works_over_network_with_single_node() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let (key_servers, _) = make_key_servers(6070, 1);
|
let (key_servers, _, runtime) = make_key_servers(6070, 1);
|
||||||
|
|
||||||
// generate document key
|
// generate document key
|
||||||
let threshold = 0;
|
let threshold = 0;
|
||||||
@ -372,12 +342,13 @@ pub mod tests {
|
|||||||
let retrieved_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap();
|
let retrieved_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap();
|
||||||
assert_eq!(retrieved_key, generated_key);
|
assert_eq!(retrieved_key, generated_key);
|
||||||
}
|
}
|
||||||
|
drop(runtime);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn document_key_generation_and_retrievement_works_over_network_with_3_nodes() {
|
fn document_key_generation_and_retrievement_works_over_network_with_3_nodes() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let (key_servers, key_storages) = make_key_servers(6080, 3);
|
let (key_servers, key_storages, runtime) = make_key_servers(6080, 3);
|
||||||
|
|
||||||
let test_cases = [0, 1, 2];
|
let test_cases = [0, 1, 2];
|
||||||
for threshold in &test_cases {
|
for threshold in &test_cases {
|
||||||
@ -399,12 +370,13 @@ pub mod tests {
|
|||||||
assert!(key_share.encrypted_point.is_some());
|
assert!(key_share.encrypted_point.is_some());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
drop(runtime);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn server_key_generation_and_storing_document_key_works_over_network_with_3_nodes() {
|
fn server_key_generation_and_storing_document_key_works_over_network_with_3_nodes() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let (key_servers, _) = make_key_servers(6090, 3);
|
let (key_servers, _, runtime) = make_key_servers(6090, 3);
|
||||||
|
|
||||||
let test_cases = [0, 1, 2];
|
let test_cases = [0, 1, 2];
|
||||||
for threshold in &test_cases {
|
for threshold in &test_cases {
|
||||||
@ -430,12 +402,13 @@ pub mod tests {
|
|||||||
assert_eq!(retrieved_key, generated_key);
|
assert_eq!(retrieved_key, generated_key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
drop(runtime);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn server_key_generation_and_message_signing_works_over_network_with_3_nodes() {
|
fn server_key_generation_and_message_signing_works_over_network_with_3_nodes() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let (key_servers, _) = make_key_servers(6100, 3);
|
let (key_servers, _, runtime) = make_key_servers(6100, 3);
|
||||||
|
|
||||||
let test_cases = [0, 1, 2];
|
let test_cases = [0, 1, 2];
|
||||||
for threshold in &test_cases {
|
for threshold in &test_cases {
|
||||||
@ -455,12 +428,13 @@ pub mod tests {
|
|||||||
// check signature
|
// check signature
|
||||||
assert_eq!(math::verify_schnorr_signature(&server_public, &(signature_c, signature_s), &message_hash), Ok(true));
|
assert_eq!(math::verify_schnorr_signature(&server_public, &(signature_c, signature_s), &message_hash), Ok(true));
|
||||||
}
|
}
|
||||||
|
drop(runtime);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn decryption_session_is_delegated_when_node_does_not_have_key_share() {
|
fn decryption_session_is_delegated_when_node_does_not_have_key_share() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let (key_servers, _) = make_key_servers(6110, 3);
|
let (key_servers, _, runtime) = make_key_servers(6110, 3);
|
||||||
|
|
||||||
// generate document key
|
// generate document key
|
||||||
let threshold = 0;
|
let threshold = 0;
|
||||||
@ -477,12 +451,13 @@ pub mod tests {
|
|||||||
let retrieved_key = key_servers[0].restore_document_key(&document, &signature.into()).unwrap();
|
let retrieved_key = key_servers[0].restore_document_key(&document, &signature.into()).unwrap();
|
||||||
let retrieved_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap();
|
let retrieved_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap();
|
||||||
assert_eq!(retrieved_key, generated_key);
|
assert_eq!(retrieved_key, generated_key);
|
||||||
|
drop(runtime);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn schnorr_signing_session_is_delegated_when_node_does_not_have_key_share() {
|
fn schnorr_signing_session_is_delegated_when_node_does_not_have_key_share() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let (key_servers, _) = make_key_servers(6114, 3);
|
let (key_servers, _, runtime) = make_key_servers(6114, 3);
|
||||||
let threshold = 1;
|
let threshold = 1;
|
||||||
|
|
||||||
// generate server key
|
// generate server key
|
||||||
@ -503,12 +478,13 @@ pub mod tests {
|
|||||||
|
|
||||||
// check signature
|
// check signature
|
||||||
assert_eq!(math::verify_schnorr_signature(&server_public, &(signature_c, signature_s), &message_hash), Ok(true));
|
assert_eq!(math::verify_schnorr_signature(&server_public, &(signature_c, signature_s), &message_hash), Ok(true));
|
||||||
|
drop(runtime);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn ecdsa_signing_session_is_delegated_when_node_does_not_have_key_share() {
|
fn ecdsa_signing_session_is_delegated_when_node_does_not_have_key_share() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let (key_servers, _) = make_key_servers(6117, 4);
|
let (key_servers, _, runtime) = make_key_servers(6117, 4);
|
||||||
let threshold = 1;
|
let threshold = 1;
|
||||||
|
|
||||||
// generate server key
|
// generate server key
|
||||||
@ -528,6 +504,7 @@ pub mod tests {
|
|||||||
|
|
||||||
// check signature
|
// check signature
|
||||||
assert!(verify_public(&server_public, &signature.into(), &message_hash).unwrap());
|
assert!(verify_public(&server_public, &signature.into(), &message_hash).unwrap());
|
||||||
|
drop(runtime);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -1367,12 +1367,12 @@ pub mod tests {
|
|||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
|
|
||||||
// establish connections
|
// establish connections
|
||||||
loop_until(&mut core, CONN_TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
loop_until(&core.executor(), CONN_TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
||||||
|
|
||||||
// run session to completion
|
// run session to completion
|
||||||
let session_id = SessionId::default();
|
let session_id = SessionId::default();
|
||||||
let session = clusters[0].client().new_generation_session(session_id, Default::default(), Default::default(), threshold).unwrap();
|
let session = clusters[0].client().new_generation_session(session_id, Default::default(), Default::default(), threshold).unwrap();
|
||||||
loop_until(&mut core, SESSION_TIMEOUT, move || session.joint_public_and_secret().is_some());
|
loop_until(&core.executor(), SESSION_TIMEOUT, move || session.joint_public_and_secret().is_some());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,11 +24,11 @@ use std::net::{SocketAddr, IpAddr};
|
|||||||
use futures::{future, Future, Stream};
|
use futures::{future, Future, Stream};
|
||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock};
|
||||||
use tokio_io::IoFuture;
|
use tokio_io::IoFuture;
|
||||||
use tokio::runtime::TaskExecutor;
|
|
||||||
use tokio::timer::{Interval, timeout::Error as TimeoutError};
|
use tokio::timer::{Interval, timeout::Error as TimeoutError};
|
||||||
use tokio::net::{TcpListener, TcpStream};
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
use ethkey::{Public, KeyPair, Signature, Random, Generator};
|
use ethkey::{Public, KeyPair, Signature, Random, Generator};
|
||||||
use ethereum_types::{Address, H256};
|
use ethereum_types::{Address, H256};
|
||||||
|
use parity_runtime::Executor;
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, Requester, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair};
|
use key_server_cluster::{Error, NodeId, SessionId, Requester, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair};
|
||||||
use key_server_cluster::cluster_sessions::{ClusterSession, AdminSession, ClusterSessions, SessionIdWithSubSession,
|
use key_server_cluster::cluster_sessions::{ClusterSession, AdminSession, ClusterSessions, SessionIdWithSubSession,
|
||||||
ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, AdminSessionCreationData, ClusterSessionsListener};
|
ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, AdminSessionCreationData, ClusterSessionsListener};
|
||||||
@ -121,8 +121,6 @@ pub trait Cluster: Send + Sync {
|
|||||||
/// Cluster initialization parameters.
|
/// Cluster initialization parameters.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct ClusterConfiguration {
|
pub struct ClusterConfiguration {
|
||||||
/// Number of threads reserved by cluster.
|
|
||||||
pub threads: usize,
|
|
||||||
/// Allow connecting to 'higher' nodes.
|
/// Allow connecting to 'higher' nodes.
|
||||||
pub allow_connecting_to_higher_nodes: bool,
|
pub allow_connecting_to_higher_nodes: bool,
|
||||||
/// KeyPair this node holds.
|
/// KeyPair this node holds.
|
||||||
@ -175,14 +173,14 @@ pub struct ClusterData {
|
|||||||
/// Cluster configuration.
|
/// Cluster configuration.
|
||||||
pub config: ClusterConfiguration,
|
pub config: ClusterConfiguration,
|
||||||
/// Handle to the event loop.
|
/// Handle to the event loop.
|
||||||
pub executor: TaskExecutor,
|
pub executor: Executor,
|
||||||
/// KeyPair this node holds.
|
/// KeyPair this node holds.
|
||||||
pub self_key_pair: Arc<NodeKeyPair>,
|
pub self_key_pair: Arc<NodeKeyPair>,
|
||||||
/// Connections data.
|
/// Connections data.
|
||||||
pub connections: ClusterConnections,
|
pub connections: ClusterConnections,
|
||||||
/// Active sessions data.
|
/// Active sessions data.
|
||||||
pub sessions: ClusterSessions,
|
pub sessions: ClusterSessions,
|
||||||
/// Shutdown flag:
|
/// A shutdown flag.
|
||||||
pub is_shutdown: Arc<AtomicBool>,
|
pub is_shutdown: Arc<AtomicBool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -235,7 +233,7 @@ pub struct Connection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ClusterCore {
|
impl ClusterCore {
|
||||||
pub fn new(executor: TaskExecutor, config: ClusterConfiguration) -> Result<Arc<Self>, Error> {
|
pub fn new(executor: Executor, config: ClusterConfiguration) -> Result<Arc<Self>, Error> {
|
||||||
let listen_address = make_socket_address(&config.listen_address.0, config.listen_address.1)?;
|
let listen_address = make_socket_address(&config.listen_address.0, config.listen_address.1)?;
|
||||||
let connections = ClusterConnections::new(&config)?;
|
let connections = ClusterConnections::new(&config)?;
|
||||||
let servers_set_change_creator_connector = connections.connector.clone();
|
let servers_set_change_creator_connector = connections.connector.clone();
|
||||||
@ -790,7 +788,7 @@ impl ClusterConnections {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ClusterData {
|
impl ClusterData {
|
||||||
pub fn new(executor: &TaskExecutor, config: ClusterConfiguration, connections: ClusterConnections, sessions: ClusterSessions) -> Arc<Self> {
|
pub fn new(executor: &Executor, config: ClusterConfiguration, connections: ClusterConnections, sessions: ClusterSessions) -> Arc<Self> {
|
||||||
Arc::new(ClusterData {
|
Arc::new(ClusterData {
|
||||||
executor: executor.clone(),
|
executor: executor.clone(),
|
||||||
self_key_pair: config.self_key_pair.clone(),
|
self_key_pair: config.self_key_pair.clone(),
|
||||||
@ -807,12 +805,6 @@ impl ClusterData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Spawns a future on the runtime.
|
/// Spawns a future on the runtime.
|
||||||
//
|
|
||||||
// TODO: Consider implementing a more graceful shutdown process using an
|
|
||||||
// `AtomicBool`, etc. which would prevent tasks from being spawned after a
|
|
||||||
// shutdown signal is given. (Recursive calls, in
|
|
||||||
// `process_connection_messages` for example, appear to continue
|
|
||||||
// indefinitely.)
|
|
||||||
pub fn spawn<F>(&self, f: F) where F: Future<Item = (), Error = ()> + Send + 'static {
|
pub fn spawn<F>(&self, f: F) where F: Future<Item = (), Error = ()> + Send + 'static {
|
||||||
if self.is_shutdown.load(Ordering::Acquire) == false {
|
if self.is_shutdown.load(Ordering::Acquire) == false {
|
||||||
if let Err(err) = future::Executor::execute(&self.executor, Box::new(f)) {
|
if let Err(err) = future::Executor::execute(&self.executor, Box::new(f)) {
|
||||||
@ -1139,9 +1131,12 @@ pub mod tests {
|
|||||||
use std::collections::{BTreeSet, VecDeque};
|
use std::collections::{BTreeSet, VecDeque};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use tokio::{
|
use tokio::{
|
||||||
runtime::{Runtime, Builder as RuntimeBuilder},
|
|
||||||
prelude::{future, Future},
|
prelude::{future, Future},
|
||||||
};
|
};
|
||||||
|
use parity_runtime::{
|
||||||
|
futures::sync::oneshot,
|
||||||
|
Runtime, Executor,
|
||||||
|
};
|
||||||
use ethereum_types::{Address, H256};
|
use ethereum_types::{Address, H256};
|
||||||
use ethkey::{Random, Generator, Public, Signature, sign};
|
use ethkey::{Random, Generator, Public, Signature, sign};
|
||||||
use key_server_cluster::{NodeId, SessionId, Requester, Error, DummyAclStorage, DummyKeyStorage,
|
use key_server_cluster::{NodeId, SessionId, Requester, Error, DummyAclStorage, DummyKeyStorage,
|
||||||
@ -1263,16 +1258,18 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Loops until `predicate` returns `true` or `timeout` has elapsed.
|
/// Blocks the calling thread, looping until `predicate` returns `true` or
|
||||||
pub fn loop_until<F>(runtime: &mut Runtime, timeout: Duration, predicate: F)
|
/// `timeout` has elapsed.
|
||||||
|
pub fn loop_until<F>(executor: &Executor, timeout: Duration, predicate: F)
|
||||||
where F: Send + 'static + Fn() -> bool
|
where F: Send + 'static + Fn() -> bool
|
||||||
{
|
{
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use tokio::timer::Interval;
|
use tokio::timer::Interval;
|
||||||
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
|
let (complete_tx, complete_rx) = oneshot::channel();
|
||||||
|
|
||||||
runtime.block_on(Interval::new_interval(Duration::from_millis(1))
|
executor.spawn(Interval::new_interval(Duration::from_millis(1))
|
||||||
.and_then(move |_| {
|
.and_then(move |_| {
|
||||||
if Instant::now() - start > timeout {
|
if Instant::now() - start > timeout {
|
||||||
panic!("no result in {:?}", timeout);
|
panic!("no result in {:?}", timeout);
|
||||||
@ -1282,8 +1279,13 @@ pub mod tests {
|
|||||||
})
|
})
|
||||||
.take_while(move |_| future::ok(!predicate()))
|
.take_while(move |_| future::ok(!predicate()))
|
||||||
.for_each(|_| Ok(()))
|
.for_each(|_| Ok(()))
|
||||||
.then(|_| future::ok::<(), ()>(()))
|
.then(|_| {
|
||||||
).unwrap();
|
complete_tx.send(()).expect("receiver dropped");
|
||||||
|
future::ok::<(), ()>(())
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
complete_rx.wait().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn all_connections_established(cluster: &Arc<ClusterCore>) -> bool {
|
pub fn all_connections_established(cluster: &Arc<ClusterCore>) -> bool {
|
||||||
@ -1295,7 +1297,6 @@ pub mod tests {
|
|||||||
pub fn make_clusters(runtime: &Runtime, ports_begin: u16, num_nodes: usize) -> Vec<Arc<ClusterCore>> {
|
pub fn make_clusters(runtime: &Runtime, ports_begin: u16, num_nodes: usize) -> Vec<Arc<ClusterCore>> {
|
||||||
let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect();
|
let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect();
|
||||||
let cluster_params: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration {
|
let cluster_params: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration {
|
||||||
threads: 1,
|
|
||||||
self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())),
|
self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())),
|
||||||
listen_address: ("127.0.0.1".to_owned(), ports_begin + i as u16),
|
listen_address: ("127.0.0.1".to_owned(), ports_begin + i as u16),
|
||||||
key_server_set: Arc::new(MapKeyServerSet::new(false, key_pairs.iter().enumerate()
|
key_server_set: Arc::new(MapKeyServerSet::new(false, key_pairs.iter().enumerate()
|
||||||
@ -1331,21 +1332,17 @@ pub mod tests {
|
|||||||
|
|
||||||
/// Returns a new runtime with a static number of threads.
|
/// Returns a new runtime with a static number of threads.
|
||||||
pub fn new_runtime() -> Runtime {
|
pub fn new_runtime() -> Runtime {
|
||||||
RuntimeBuilder::new()
|
Runtime::with_thread_count(4)
|
||||||
.core_threads(4)
|
|
||||||
.build()
|
|
||||||
.expect("Unable to create tokio runtime")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn cluster_connects_to_other_nodes() {
|
fn cluster_connects_to_other_nodes() {
|
||||||
let mut runtime = new_runtime();
|
let runtime = new_runtime();
|
||||||
let clusters = make_clusters(&runtime, 6010, 3);
|
let clusters = make_clusters(&runtime, 6010, 3);
|
||||||
run_clusters(&clusters);
|
run_clusters(&clusters);
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
loop_until(&runtime.executor(), TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
||||||
shutdown_clusters(&clusters);
|
shutdown_clusters(&clusters);
|
||||||
runtime.shutdown_now().wait().unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -1359,17 +1356,16 @@ pub mod tests {
|
|||||||
_ => panic!("unexpected success"),
|
_ => panic!("unexpected success"),
|
||||||
}
|
}
|
||||||
shutdown_clusters(&clusters);
|
shutdown_clusters(&clusters);
|
||||||
runtime.shutdown_now().wait().unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn error_in_generation_session_broadcasted_to_all_other_nodes() {
|
fn error_in_generation_session_broadcasted_to_all_other_nodes() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let mut runtime = new_runtime();
|
let runtime = new_runtime();
|
||||||
let clusters = make_clusters(&runtime, 6016, 3);
|
let clusters = make_clusters(&runtime, 6016, 3);
|
||||||
run_clusters(&clusters);
|
run_clusters(&clusters);
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
loop_until(&runtime.executor(), TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
||||||
|
|
||||||
// ask one of nodes to produce faulty generation sessions
|
// ask one of nodes to produce faulty generation sessions
|
||||||
clusters[1].client().make_faulty_generation_sessions();
|
clusters[1].client().make_faulty_generation_sessions();
|
||||||
@ -1378,7 +1374,7 @@ pub mod tests {
|
|||||||
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
|
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
|
||||||
let session_clone = session.clone();
|
let session_clone = session.clone();
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
|
loop_until(&runtime.executor(), TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
|
||||||
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
|
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
|
||||||
assert!(session.joint_public_and_secret().unwrap().is_err());
|
assert!(session.joint_public_and_secret().unwrap().is_err());
|
||||||
|
|
||||||
@ -1389,24 +1385,22 @@ pub mod tests {
|
|||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
// wait for both session completion && session removal (session completion event is fired
|
// wait for both session completion && session removal (session completion event is fired
|
||||||
// before session is removed from its own container by cluster)
|
// before session is removed from its own container by cluster)
|
||||||
loop_until(&mut runtime, TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
|
loop_until(&runtime.executor(), TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
|
||||||
&& clusters_clone[i].client().generation_session(&SessionId::default()).is_none());
|
&& clusters_clone[i].client().generation_session(&SessionId::default()).is_none());
|
||||||
assert!(session.joint_public_and_secret().unwrap().is_err());
|
assert!(session.joint_public_and_secret().unwrap().is_err());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
shutdown_clusters(&clusters);
|
shutdown_clusters(&clusters);
|
||||||
runtime.shutdown_now().wait().unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn generation_session_completion_signalled_if_failed_on_master() {
|
fn generation_session_completion_signalled_if_failed_on_master() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let mut runtime = new_runtime();
|
let runtime = new_runtime();
|
||||||
|
|
||||||
let clusters = make_clusters(&runtime, 6025, 3);
|
let clusters = make_clusters(&runtime, 6025, 3);
|
||||||
run_clusters(&clusters);
|
run_clusters(&clusters);
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
loop_until(&runtime.executor(), TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
||||||
|
|
||||||
// ask one of nodes to produce faulty generation sessions
|
// ask one of nodes to produce faulty generation sessions
|
||||||
clusters[0].client().make_faulty_generation_sessions();
|
clusters[0].client().make_faulty_generation_sessions();
|
||||||
@ -1415,7 +1409,7 @@ pub mod tests {
|
|||||||
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
|
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
|
||||||
let session_clone = session.clone();
|
let session_clone = session.clone();
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
|
loop_until(&runtime.executor(), TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
|
||||||
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
|
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
|
||||||
assert!(session.joint_public_and_secret().unwrap().is_err());
|
assert!(session.joint_public_and_secret().unwrap().is_err());
|
||||||
|
|
||||||
@ -1426,29 +1420,28 @@ pub mod tests {
|
|||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
// wait for both session completion && session removal (session completion event is fired
|
// wait for both session completion && session removal (session completion event is fired
|
||||||
// before session is removed from its own container by cluster)
|
// before session is removed from its own container by cluster)
|
||||||
loop_until(&mut runtime, TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
|
loop_until(&runtime.executor(), TIMEOUT, move || session_clone.joint_public_and_secret().is_some()
|
||||||
&& clusters_clone[i].client().generation_session(&SessionId::default()).is_none());
|
&& clusters_clone[i].client().generation_session(&SessionId::default()).is_none());
|
||||||
assert!(session.joint_public_and_secret().unwrap().is_err());
|
assert!(session.joint_public_and_secret().unwrap().is_err());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
shutdown_clusters(&clusters);
|
shutdown_clusters(&clusters);
|
||||||
runtime.shutdown_now().wait().unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn generation_session_is_removed_when_succeeded() {
|
fn generation_session_is_removed_when_succeeded() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let mut runtime = new_runtime();
|
let runtime = new_runtime();
|
||||||
let clusters = make_clusters(&runtime, 6019, 3);
|
let clusters = make_clusters(&runtime, 6019, 3);
|
||||||
run_clusters(&clusters);
|
run_clusters(&clusters);
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
loop_until(&runtime.executor(), TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
||||||
|
|
||||||
// start && wait for generation session to complete
|
// start && wait for generation session to complete
|
||||||
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
|
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
|
||||||
let session_clone = session.clone();
|
let session_clone = session.clone();
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|
loop_until(&runtime.executor(), TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|
||||||
|| session_clone.state() == GenerationSessionState::Failed)
|
|| session_clone.state() == GenerationSessionState::Failed)
|
||||||
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
|
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
|
||||||
assert!(session.joint_public_and_secret().unwrap().is_ok());
|
assert!(session.joint_public_and_secret().unwrap().is_ok());
|
||||||
@ -1462,22 +1455,21 @@ pub mod tests {
|
|||||||
// AND check that it is actually removed from cluster sessions
|
// AND check that it is actually removed from cluster sessions
|
||||||
let session_clone = session.clone();
|
let session_clone = session.clone();
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|
loop_until(&runtime.executor(), TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|
||||||
|| session_clone.state() == GenerationSessionState::Failed)
|
|| session_clone.state() == GenerationSessionState::Failed)
|
||||||
&& clusters_clone[i].client().generation_session(&SessionId::default()).is_none());
|
&& clusters_clone[i].client().generation_session(&SessionId::default()).is_none());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
shutdown_clusters(&clusters);
|
shutdown_clusters(&clusters);
|
||||||
runtime.shutdown_now().wait().unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn sessions_are_removed_when_initialization_fails() {
|
fn sessions_are_removed_when_initialization_fails() {
|
||||||
let mut runtime = new_runtime();
|
let runtime = new_runtime();
|
||||||
let clusters = make_clusters(&runtime, 6022, 3);
|
let clusters = make_clusters(&runtime, 6022, 3);
|
||||||
run_clusters(&clusters);
|
run_clusters(&clusters);
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
loop_until(&runtime.executor(), TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
||||||
|
|
||||||
// generation session
|
// generation session
|
||||||
{
|
{
|
||||||
@ -1506,7 +1498,6 @@ pub mod tests {
|
|||||||
assert!(clusters[0].data.sessions.negotiation_sessions.is_empty());
|
assert!(clusters[0].data.sessions.negotiation_sessions.is_empty());
|
||||||
}
|
}
|
||||||
shutdown_clusters(&clusters);
|
shutdown_clusters(&clusters);
|
||||||
runtime.shutdown_now().wait().unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// test ignored because of
|
// test ignored because of
|
||||||
@ -1516,17 +1507,17 @@ pub mod tests {
|
|||||||
#[ignore]
|
#[ignore]
|
||||||
fn schnorr_signing_session_completes_if_node_does_not_have_a_share() {
|
fn schnorr_signing_session_completes_if_node_does_not_have_a_share() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let mut runtime = new_runtime();
|
let runtime = new_runtime();
|
||||||
let clusters = make_clusters(&runtime, 6028, 3);
|
let clusters = make_clusters(&runtime, 6028, 3);
|
||||||
run_clusters(&clusters);
|
run_clusters(&clusters);
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
loop_until(&runtime.executor(), TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
||||||
|
|
||||||
// start && wait for generation session to complete
|
// start && wait for generation session to complete
|
||||||
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
|
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
|
||||||
let session_clone = session.clone();
|
let session_clone = session.clone();
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|
loop_until(&runtime.executor(), TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|
||||||
|| session_clone.state() == GenerationSessionState::Failed)
|
|| session_clone.state() == GenerationSessionState::Failed)
|
||||||
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
|
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
|
||||||
assert!(session.joint_public_and_secret().unwrap().is_ok());
|
assert!(session.joint_public_and_secret().unwrap().is_ok());
|
||||||
@ -1542,7 +1533,7 @@ pub mod tests {
|
|||||||
|
|
||||||
let session_clone = session.clone();
|
let session_clone = session.clone();
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || session_clone.is_finished() && (0..3).all(|i|
|
loop_until(&runtime.executor(), TIMEOUT, move || session_clone.is_finished() && (0..3).all(|i|
|
||||||
clusters_clone[i].data.sessions.schnorr_signing_sessions.is_empty()));
|
clusters_clone[i].data.sessions.schnorr_signing_sessions.is_empty()));
|
||||||
session0.wait().unwrap();
|
session0.wait().unwrap();
|
||||||
|
|
||||||
@ -1553,7 +1544,7 @@ pub mod tests {
|
|||||||
|
|
||||||
let session_clone = session.clone();
|
let session_clone = session.clone();
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || session_clone.is_finished() && (0..3).all(|i|
|
loop_until(&runtime.executor(), TIMEOUT, move || session_clone.is_finished() && (0..3).all(|i|
|
||||||
clusters_clone[i].data.sessions.schnorr_signing_sessions.is_empty()));
|
clusters_clone[i].data.sessions.schnorr_signing_sessions.is_empty()));
|
||||||
session2.wait().unwrap();
|
session2.wait().unwrap();
|
||||||
|
|
||||||
@ -1566,10 +1557,9 @@ pub mod tests {
|
|||||||
let session = clusters[0].data.sessions.schnorr_signing_sessions.first().unwrap();
|
let session = clusters[0].data.sessions.schnorr_signing_sessions.first().unwrap();
|
||||||
|
|
||||||
let session = session.clone();
|
let session = session.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || session.is_finished());
|
loop_until(&runtime.executor(), TIMEOUT, move || session.is_finished());
|
||||||
session1.wait().unwrap_err();
|
session1.wait().unwrap_err();
|
||||||
shutdown_clusters(&clusters);
|
shutdown_clusters(&clusters);
|
||||||
runtime.shutdown_now().wait().unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// test ignored because of
|
// test ignored because of
|
||||||
@ -1579,17 +1569,17 @@ pub mod tests {
|
|||||||
#[ignore]
|
#[ignore]
|
||||||
fn ecdsa_signing_session_completes_if_node_does_not_have_a_share() {
|
fn ecdsa_signing_session_completes_if_node_does_not_have_a_share() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let mut runtime = new_runtime();
|
let runtime = new_runtime();
|
||||||
let clusters = make_clusters(&runtime, 6041, 4);
|
let clusters = make_clusters(&runtime, 6041, 4);
|
||||||
run_clusters(&clusters);
|
run_clusters(&clusters);
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
loop_until(&runtime.executor(), TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
|
||||||
|
|
||||||
// start && wait for generation session to complete
|
// start && wait for generation session to complete
|
||||||
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
|
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
|
||||||
let session_clone = session.clone();
|
let session_clone = session.clone();
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|
loop_until(&runtime.executor(), TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished
|
||||||
|| session_clone.state() == GenerationSessionState::Failed)
|
|| session_clone.state() == GenerationSessionState::Failed)
|
||||||
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
|
&& clusters_clone[0].client().generation_session(&SessionId::default()).is_none());
|
||||||
assert!(session.joint_public_and_secret().unwrap().is_ok());
|
assert!(session.joint_public_and_secret().unwrap().is_ok());
|
||||||
@ -1605,7 +1595,7 @@ pub mod tests {
|
|||||||
|
|
||||||
let session_clone = session.clone();
|
let session_clone = session.clone();
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, Duration::from_millis(1000), move || session_clone.is_finished() && (0..3).all(|i|
|
loop_until(&runtime.executor(), Duration::from_millis(1000), move || session_clone.is_finished() && (0..3).all(|i|
|
||||||
clusters_clone[i].data.sessions.ecdsa_signing_sessions.is_empty()));
|
clusters_clone[i].data.sessions.ecdsa_signing_sessions.is_empty()));
|
||||||
session0.wait().unwrap();
|
session0.wait().unwrap();
|
||||||
|
|
||||||
@ -1615,7 +1605,7 @@ pub mod tests {
|
|||||||
let session = clusters[2].data.sessions.ecdsa_signing_sessions.first().unwrap();
|
let session = clusters[2].data.sessions.ecdsa_signing_sessions.first().unwrap();
|
||||||
let session_clone = session.clone();
|
let session_clone = session.clone();
|
||||||
let clusters_clone = clusters.clone();
|
let clusters_clone = clusters.clone();
|
||||||
loop_until(&mut runtime, Duration::from_millis(1000), move || session_clone.is_finished() && (0..3).all(|i|
|
loop_until(&runtime.executor(), Duration::from_millis(1000), move || session_clone.is_finished() && (0..3).all(|i|
|
||||||
clusters_clone[i].data.sessions.ecdsa_signing_sessions.is_empty()));
|
clusters_clone[i].data.sessions.ecdsa_signing_sessions.is_empty()));
|
||||||
session2.wait().unwrap();
|
session2.wait().unwrap();
|
||||||
|
|
||||||
@ -1626,9 +1616,8 @@ pub mod tests {
|
|||||||
let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap();
|
let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap();
|
||||||
let session1 = clusters[0].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap();
|
let session1 = clusters[0].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap();
|
||||||
let session = clusters[0].data.sessions.ecdsa_signing_sessions.first().unwrap();
|
let session = clusters[0].data.sessions.ecdsa_signing_sessions.first().unwrap();
|
||||||
loop_until(&mut runtime, Duration::from_millis(1000), move || session.is_finished());
|
loop_until(&runtime.executor(), Duration::from_millis(1000), move || session.is_finished());
|
||||||
session1.wait().unwrap_err();
|
session1.wait().unwrap_err();
|
||||||
shutdown_clusters(&clusters);
|
shutdown_clusters(&clusters);
|
||||||
runtime.shutdown_now().wait().unwrap();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -582,7 +582,6 @@ mod tests {
|
|||||||
pub fn make_cluster_sessions() -> ClusterSessions {
|
pub fn make_cluster_sessions() -> ClusterSessions {
|
||||||
let key_pair = Random.generate().unwrap();
|
let key_pair = Random.generate().unwrap();
|
||||||
let config = ClusterConfiguration {
|
let config = ClusterConfiguration {
|
||||||
threads: 1,
|
|
||||||
self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pair.clone())),
|
self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pair.clone())),
|
||||||
listen_address: ("127.0.0.1".to_owned(), 100_u16),
|
listen_address: ("127.0.0.1".to_owned(), 100_u16),
|
||||||
key_server_set: Arc::new(MapKeyServerSet::new(false, vec![(key_pair.public().clone(), format!("127.0.0.1:{}", 100).parse().unwrap())].into_iter().collect())),
|
key_server_set: Arc::new(MapKeyServerSet::new(false, vec![(key_pair.public().clone(), format!("127.0.0.1:{}", 100).parse().unwrap())].into_iter().collect())),
|
||||||
|
@ -33,6 +33,7 @@ extern crate serde;
|
|||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
extern crate tiny_keccak;
|
extern crate tiny_keccak;
|
||||||
extern crate tokio;
|
extern crate tokio;
|
||||||
|
extern crate parity_runtime;
|
||||||
extern crate tokio_io;
|
extern crate tokio_io;
|
||||||
extern crate tokio_service;
|
extern crate tokio_service;
|
||||||
extern crate url;
|
extern crate url;
|
||||||
@ -72,6 +73,7 @@ use kvdb::KeyValueDB;
|
|||||||
use ethcore::client::Client;
|
use ethcore::client::Client;
|
||||||
use ethcore::miner::Miner;
|
use ethcore::miner::Miner;
|
||||||
use sync::SyncProvider;
|
use sync::SyncProvider;
|
||||||
|
use parity_runtime::Executor;
|
||||||
|
|
||||||
pub use types::{ServerKeyId, EncryptedDocumentKey, RequestSignature, Public,
|
pub use types::{ServerKeyId, EncryptedDocumentKey, RequestSignature, Public,
|
||||||
Error, NodeAddress, ContractAddress, ServiceConfiguration, ClusterConfiguration};
|
Error, NodeAddress, ContractAddress, ServiceConfiguration, ClusterConfiguration};
|
||||||
@ -79,7 +81,9 @@ pub use traits::{NodeKeyPair, KeyServer};
|
|||||||
pub use self::node_key_pair::{PlainNodeKeyPair, KeyStoreNodeKeyPair};
|
pub use self::node_key_pair::{PlainNodeKeyPair, KeyStoreNodeKeyPair};
|
||||||
|
|
||||||
/// Start new key server instance
|
/// Start new key server instance
|
||||||
pub fn start(client: Arc<Client>, sync: Arc<SyncProvider>, miner: Arc<Miner>, self_key_pair: Arc<NodeKeyPair>, mut config: ServiceConfiguration, db: Arc<KeyValueDB>) -> Result<Box<KeyServer>, Error> {
|
pub fn start(client: Arc<Client>, sync: Arc<SyncProvider>, miner: Arc<Miner>, self_key_pair: Arc<NodeKeyPair>, mut config: ServiceConfiguration,
|
||||||
|
db: Arc<KeyValueDB>, executor: Executor) -> Result<Box<KeyServer>, Error>
|
||||||
|
{
|
||||||
let trusted_client = trusted_client::TrustedClient::new(self_key_pair.clone(), client.clone(), sync, miner);
|
let trusted_client = trusted_client::TrustedClient::new(self_key_pair.clone(), client.clone(), sync, miner);
|
||||||
let acl_storage: Arc<acl_storage::AclStorage> = match config.acl_check_contract_address.take() {
|
let acl_storage: Arc<acl_storage::AclStorage> = match config.acl_check_contract_address.take() {
|
||||||
Some(acl_check_contract_address) => acl_storage::OnChainAclStorage::new(trusted_client.clone(), acl_check_contract_address)?,
|
Some(acl_check_contract_address) => acl_storage::OnChainAclStorage::new(trusted_client.clone(), acl_check_contract_address)?,
|
||||||
@ -89,13 +93,14 @@ pub fn start(client: Arc<Client>, sync: Arc<SyncProvider>, miner: Arc<Miner>, se
|
|||||||
let key_server_set = key_server_set::OnChainKeyServerSet::new(trusted_client.clone(), config.cluster_config.key_server_set_contract_address.take(),
|
let key_server_set = key_server_set::OnChainKeyServerSet::new(trusted_client.clone(), config.cluster_config.key_server_set_contract_address.take(),
|
||||||
self_key_pair.clone(), config.cluster_config.auto_migrate_enabled, config.cluster_config.nodes.clone())?;
|
self_key_pair.clone(), config.cluster_config.auto_migrate_enabled, config.cluster_config.nodes.clone())?;
|
||||||
let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(db)?);
|
let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(db)?);
|
||||||
let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set.clone(), self_key_pair.clone(), acl_storage.clone(), key_storage.clone())?);
|
let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set.clone(), self_key_pair.clone(),
|
||||||
|
acl_storage.clone(), key_storage.clone(), executor.clone())?);
|
||||||
let cluster = key_server.cluster();
|
let cluster = key_server.cluster();
|
||||||
let key_server: Arc<KeyServer> = key_server;
|
let key_server: Arc<KeyServer> = key_server;
|
||||||
|
|
||||||
// prepare HTTP listener
|
// prepare HTTP listener
|
||||||
let http_listener = match config.listener_address {
|
let http_listener = match config.listener_address {
|
||||||
Some(listener_address) => Some(listener::http_listener::KeyServerHttpListener::start(listener_address, Arc::downgrade(&key_server))?),
|
Some(listener_address) => Some(listener::http_listener::KeyServerHttpListener::start(listener_address, Arc::downgrade(&key_server), executor)?),
|
||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ use serde::Serialize;
|
|||||||
use serde_json;
|
use serde_json;
|
||||||
use tokio;
|
use tokio;
|
||||||
use tokio::net::TcpListener;
|
use tokio::net::TcpListener;
|
||||||
use tokio::runtime::{Runtime, Builder as RuntimeBuilder};
|
use parity_runtime::Executor;
|
||||||
use futures::{future, Future, Stream};
|
use futures::{future, Future, Stream};
|
||||||
use url::percent_encoding::percent_decode;
|
use url::percent_encoding::percent_decode;
|
||||||
|
|
||||||
@ -46,7 +46,7 @@ use types::{Error, Public, MessageHash, NodeAddress, RequestSignature, ServerKey
|
|||||||
/// To change servers set: POST /admin/servers_set_change/{old_signature}/{new_signature} + BODY: json array of hex-encoded nodes ids
|
/// To change servers set: POST /admin/servers_set_change/{old_signature}/{new_signature} + BODY: json array of hex-encoded nodes ids
|
||||||
|
|
||||||
pub struct KeyServerHttpListener {
|
pub struct KeyServerHttpListener {
|
||||||
_runtime: Runtime,
|
_executor: Executor,
|
||||||
_handler: Arc<KeyServerSharedHttpHandler>,
|
_handler: Arc<KeyServerSharedHttpHandler>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,15 +86,11 @@ struct KeyServerSharedHttpHandler {
|
|||||||
|
|
||||||
impl KeyServerHttpListener {
|
impl KeyServerHttpListener {
|
||||||
/// Start KeyServer http listener
|
/// Start KeyServer http listener
|
||||||
pub fn start(listener_address: NodeAddress, key_server: Weak<KeyServer>) -> Result<Self, Error> {
|
pub fn start(listener_address: NodeAddress, key_server: Weak<KeyServer>, executor: Executor) -> Result<Self, Error> {
|
||||||
let shared_handler = Arc::new(KeyServerSharedHttpHandler {
|
let shared_handler = Arc::new(KeyServerSharedHttpHandler {
|
||||||
key_server: key_server,
|
key_server: key_server,
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut runtime = RuntimeBuilder::new()
|
|
||||||
// TODO: Add config option/arg?
|
|
||||||
.core_threads(2)
|
|
||||||
.build()?;
|
|
||||||
let listener_address = format!("{}:{}", listener_address.address, listener_address.port).parse()?;
|
let listener_address = format!("{}:{}", listener_address.address, listener_address.port).parse()?;
|
||||||
let listener = TcpListener::bind(&listener_address)?;
|
let listener = TcpListener::bind(&listener_address)?;
|
||||||
|
|
||||||
@ -113,10 +109,10 @@ impl KeyServerHttpListener {
|
|||||||
tokio::spawn(serve)
|
tokio::spawn(serve)
|
||||||
});
|
});
|
||||||
|
|
||||||
runtime.spawn(server);
|
executor.spawn(server);
|
||||||
|
|
||||||
let listener = KeyServerHttpListener {
|
let listener = KeyServerHttpListener {
|
||||||
_runtime: runtime,
|
_executor: executor,
|
||||||
_handler: shared_handler,
|
_handler: shared_handler,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -419,13 +415,16 @@ mod tests {
|
|||||||
use traits::KeyServer;
|
use traits::KeyServer;
|
||||||
use key_server::tests::DummyKeyServer;
|
use key_server::tests::DummyKeyServer;
|
||||||
use types::NodeAddress;
|
use types::NodeAddress;
|
||||||
|
use parity_runtime::Runtime;
|
||||||
use super::{parse_request, Request, KeyServerHttpListener};
|
use super::{parse_request, Request, KeyServerHttpListener};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn http_listener_successfully_drops() {
|
fn http_listener_successfully_drops() {
|
||||||
let key_server: Arc<KeyServer> = Arc::new(DummyKeyServer::default());
|
let key_server: Arc<KeyServer> = Arc::new(DummyKeyServer::default());
|
||||||
let address = NodeAddress { address: "127.0.0.1".into(), port: 9000 };
|
let address = NodeAddress { address: "127.0.0.1".into(), port: 9000 };
|
||||||
let listener = KeyServerHttpListener::start(address, Arc::downgrade(&key_server)).unwrap();
|
let runtime = Runtime::with_thread_count(1);
|
||||||
|
let listener = KeyServerHttpListener::start(address, Arc::downgrade(&key_server),
|
||||||
|
runtime.executor()).unwrap();
|
||||||
drop(listener);
|
drop(listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,8 +75,6 @@ pub struct ServiceConfiguration {
|
|||||||
/// Key server cluster configuration
|
/// Key server cluster configuration
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct ClusterConfiguration {
|
pub struct ClusterConfiguration {
|
||||||
/// Number of threads reserved by cluster.
|
|
||||||
pub threads: usize,
|
|
||||||
/// This node address.
|
/// This node address.
|
||||||
pub listener_address: NodeAddress,
|
pub listener_address: NodeAddress,
|
||||||
/// All cluster nodes addresses.
|
/// All cluster nodes addresses.
|
||||||
|
@ -16,8 +16,8 @@
|
|||||||
|
|
||||||
//! Tokio Runtime wrapper.
|
//! Tokio Runtime wrapper.
|
||||||
|
|
||||||
extern crate futures;
|
pub extern crate futures;
|
||||||
extern crate tokio;
|
pub extern crate tokio;
|
||||||
|
|
||||||
use std::{fmt, thread};
|
use std::{fmt, thread};
|
||||||
use std::sync::mpsc;
|
use std::sync::mpsc;
|
||||||
@ -222,6 +222,24 @@ impl Executor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<F: Future<Item = (), Error = ()> + Send + 'static> future::Executor<F> for Executor {
|
||||||
|
fn execute(&self, future: F) -> Result<(), future::ExecuteError<F>> {
|
||||||
|
match self.inner {
|
||||||
|
Mode::Tokio(ref executor) => executor.execute(future),
|
||||||
|
Mode::Sync => {
|
||||||
|
let _= future.wait();
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
Mode::ThreadPerFuture => {
|
||||||
|
thread::spawn(move || {
|
||||||
|
let _= future.wait();
|
||||||
|
});
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A handle to a runtime. Dropping the handle will cause runtime to shutdown.
|
/// A handle to a runtime. Dropping the handle will cause runtime to shutdown.
|
||||||
pub struct RuntimeHandle {
|
pub struct RuntimeHandle {
|
||||||
close: Option<futures::Complete<()>>,
|
close: Option<futures::Complete<()>>,
|
||||||
|
Loading…
Reference in New Issue
Block a user