SecretStore: use in-memory transport in cluster tests (#9850)

* fixing SS tests

* removed some redundant clones

* fixed grumbles

* replaced hash.clone() with *hash + fixed comment

* lost files
This commit is contained in:
Svyatoslav Nikolsky 2019-02-18 15:38:19 +03:00 committed by Afri Schoedon
parent af7dc3676b
commit ef0eda0c39
18 changed files with 3567 additions and 2490 deletions

View File

@ -23,11 +23,11 @@ use parity_runtime::Executor;
use super::acl_storage::AclStorage; use super::acl_storage::AclStorage;
use super::key_storage::KeyStorage; use super::key_storage::KeyStorage;
use super::key_server_set::KeyServerSet; use super::key_server_set::KeyServerSet;
use key_server_cluster::{math, ClusterCore}; use key_server_cluster::{math, new_network_cluster};
use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer, NodeKeyPair}; use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer, NodeKeyPair};
use types::{Error, Public, RequestSignature, Requester, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow, use types::{Error, Public, RequestSignature, Requester, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow,
ClusterConfiguration, MessageHash, EncryptedMessageSignature, NodeId}; ClusterConfiguration, MessageHash, EncryptedMessageSignature, NodeId};
use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration}; use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration, NetConnectionsManagerConfig};
/// Secret store key server implementation /// Secret store key server implementation
pub struct KeyServerImpl { pub struct KeyServerImpl {
@ -175,20 +175,23 @@ impl KeyServerCore {
pub fn new(config: &ClusterConfiguration, key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>, pub fn new(config: &ClusterConfiguration, key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>,
acl_storage: Arc<AclStorage>, key_storage: Arc<KeyStorage>, executor: Executor) -> Result<Self, Error> acl_storage: Arc<AclStorage>, key_storage: Arc<KeyStorage>, executor: Executor) -> Result<Self, Error>
{ {
let config = NetClusterConfiguration { let cconfig = NetClusterConfiguration {
self_key_pair: self_key_pair.clone(), self_key_pair: self_key_pair.clone(),
listen_address: (config.listener_address.address.clone(), config.listener_address.port),
key_server_set: key_server_set, key_server_set: key_server_set,
allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes,
acl_storage: acl_storage, acl_storage: acl_storage,
key_storage: key_storage, key_storage: key_storage,
admin_public: config.admin_public.clone(), admin_public: config.admin_public,
preserve_sessions: false,
};
let net_config = NetConnectionsManagerConfig {
listen_address: (config.listener_address.address.clone(), config.listener_address.port),
allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes,
auto_migrate_enabled: config.auto_migrate_enabled, auto_migrate_enabled: config.auto_migrate_enabled,
}; };
let cluster = ClusterCore::new(executor, config) let core = new_network_cluster(executor, cconfig, net_config)?;
.and_then(|c| c.run().map(|_| c.client())) let cluster = core.client();
.map_err(|err| Error::from(err))?; core.run()?;
Ok(KeyServerCore { Ok(KeyServerCore {
cluster, cluster,
@ -297,14 +300,14 @@ pub mod tests {
let start = time::Instant::now(); let start = time::Instant::now();
let mut tried_reconnections = false; let mut tried_reconnections = false;
loop { loop {
if key_servers.iter().all(|ks| ks.cluster().cluster_state().connected.len() == num_nodes - 1) { if key_servers.iter().all(|ks| ks.cluster().is_fully_connected()) {
break; break;
} }
let old_tried_reconnections = tried_reconnections; let old_tried_reconnections = tried_reconnections;
let mut fully_connected = true; let mut fully_connected = true;
for key_server in &key_servers { for key_server in &key_servers {
if key_server.cluster().cluster_state().connected.len() != num_nodes - 1 { if !key_server.cluster().is_fully_connected() {
fully_connected = false; fully_connected = false;
if !old_tried_reconnections { if !old_tried_reconnections {
tried_reconnections = true; tried_reconnections = true;
@ -434,7 +437,7 @@ pub mod tests {
#[test] #[test]
fn decryption_session_is_delegated_when_node_does_not_have_key_share() { fn decryption_session_is_delegated_when_node_does_not_have_key_share() {
let _ = ::env_logger::try_init(); let _ = ::env_logger::try_init();
let (key_servers, _, runtime) = make_key_servers(6110, 3); let (key_servers, key_storages, runtime) = make_key_servers(6110, 3);
// generate document key // generate document key
let threshold = 0; let threshold = 0;
@ -445,7 +448,7 @@ pub mod tests {
let generated_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap(); let generated_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap();
// remove key from node0 // remove key from node0
key_servers[0].cluster().key_storage().remove(&document).unwrap(); key_storages[0].remove(&document).unwrap();
// now let's try to retrieve key back by requesting it from node0, so that session must be delegated // now let's try to retrieve key back by requesting it from node0, so that session must be delegated
let retrieved_key = key_servers[0].restore_document_key(&document, &signature.into()).unwrap(); let retrieved_key = key_servers[0].restore_document_key(&document, &signature.into()).unwrap();
@ -457,7 +460,7 @@ pub mod tests {
#[test] #[test]
fn schnorr_signing_session_is_delegated_when_node_does_not_have_key_share() { fn schnorr_signing_session_is_delegated_when_node_does_not_have_key_share() {
let _ = ::env_logger::try_init(); let _ = ::env_logger::try_init();
let (key_servers, _, runtime) = make_key_servers(6114, 3); let (key_servers, key_storages, runtime) = make_key_servers(6114, 3);
let threshold = 1; let threshold = 1;
// generate server key // generate server key
@ -467,7 +470,7 @@ pub mod tests {
let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), threshold).unwrap(); let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), threshold).unwrap();
// remove key from node0 // remove key from node0
key_servers[0].cluster().key_storage().remove(&server_key_id).unwrap(); key_storages[0].remove(&server_key_id).unwrap();
// sign message // sign message
let message_hash = H256::from(42); let message_hash = H256::from(42);
@ -484,7 +487,7 @@ pub mod tests {
#[test] #[test]
fn ecdsa_signing_session_is_delegated_when_node_does_not_have_key_share() { fn ecdsa_signing_session_is_delegated_when_node_does_not_have_key_share() {
let _ = ::env_logger::try_init(); let _ = ::env_logger::try_init();
let (key_servers, _, runtime) = make_key_servers(6117, 4); let (key_servers, key_storages, runtime) = make_key_servers(6117, 4);
let threshold = 1; let threshold = 1;
// generate server key // generate server key
@ -494,7 +497,7 @@ pub mod tests {
let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), threshold).unwrap(); let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), threshold).unwrap();
// remove key from node0 // remove key from node0
key_servers[0].cluster().key_storage().remove(&server_key_id).unwrap(); key_storages[0].remove(&server_key_id).unwrap();
// sign message // sign message
let message_hash = H256::random(); let message_hash = H256::random();

View File

@ -1045,148 +1045,204 @@ fn check_nodes_set(all_nodes_set: &BTreeSet<NodeId>, new_nodes_set: &BTreeSet<No
pub mod tests { pub mod tests {
use std::sync::Arc; use std::sync::Arc;
use std::collections::{VecDeque, BTreeMap, BTreeSet}; use std::collections::{VecDeque, BTreeMap, BTreeSet};
use ethereum_types::H256;
use ethkey::{Random, Generator, Public, Signature, KeyPair, sign}; use ethkey::{Random, Generator, Public, Signature, KeyPair, sign};
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage}; use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, NodeKeyPair, PlainNodeKeyPair};
use key_server_cluster::cluster::Cluster;
use key_server_cluster::cluster_sessions::ClusterSession; use key_server_cluster::cluster_sessions::ClusterSession;
use key_server_cluster::cluster::tests::DummyCluster; use key_server_cluster::cluster::tests::MessageLoop as ClusterMessageLoop;
use key_server_cluster::generation_session::tests::{MessageLoop as GenerationMessageLoop, Node as GenerationNode, generate_nodes_ids}; use key_server_cluster::generation_session::tests::{MessageLoop as GenerationMessageLoop};
use key_server_cluster::math;
use key_server_cluster::message::Message; use key_server_cluster::message::Message;
use key_server_cluster::admin_sessions::ShareChangeSessionMeta; use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
use key_server_cluster::admin_sessions::share_add_session::tests::check_secret_is_preserved;
use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash; use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash;
use super::{SessionImpl, SessionParams}; use super::{SessionImpl, SessionParams};
struct Node { pub trait AdminSessionAdapter<S> {
pub cluster: Arc<DummyCluster>, const SIGN_NEW_NODES: bool;
pub key_storage: Arc<DummyKeyStorage>,
pub session: SessionImpl, fn create(
meta: ShareChangeSessionMeta,
admin_public: Public,
all_nodes_set: BTreeSet<NodeId>,
ml: &ClusterMessageLoop,
idx: usize
) -> S;
} }
struct MessageLoop { pub struct MessageLoop<S> {
pub ml: ClusterMessageLoop,
pub admin_key_pair: KeyPair, pub admin_key_pair: KeyPair,
pub original_key_pair: KeyPair, pub original_key_pair: KeyPair,
pub original_key_version: H256,
pub all_nodes_set: BTreeSet<NodeId>, pub all_nodes_set: BTreeSet<NodeId>,
pub new_nodes_set: BTreeSet<NodeId>, pub new_nodes_set: BTreeSet<NodeId>,
pub all_set_signature: Signature, pub all_set_signature: Signature,
pub new_set_signature: Signature, pub new_set_signature: Signature,
pub nodes: BTreeMap<NodeId, Node>, pub sessions: BTreeMap<NodeId, S>,
pub queue: VecDeque<(NodeId, NodeId, Message)>, pub queue: VecDeque<(NodeId, NodeId, Message)>,
} }
fn create_session(mut meta: ShareChangeSessionMeta, self_node_id: NodeId, admin_public: Public, all_nodes_set: BTreeSet<NodeId>, cluster: Arc<Cluster>, key_storage: Arc<KeyStorage>) -> SessionImpl { impl<S> ::std::fmt::Debug for MessageLoop<S> {
meta.self_node_id = self_node_id; fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
SessionImpl::new(SessionParams { write!(f, "{:?}", self.ml)
meta: meta,
all_nodes_set: all_nodes_set,
cluster: cluster,
key_storage: key_storage,
nonce: 1,
admin_public: admin_public,
migration_id: None,
}).unwrap()
}
fn create_node(meta: ShareChangeSessionMeta, admin_public: Public, all_nodes_set: BTreeSet<NodeId>, node: &GenerationNode) -> Node {
for n in &all_nodes_set {
node.cluster.add_node(n.clone());
}
Node {
cluster: node.cluster.clone(),
key_storage: node.key_storage.clone(),
session: create_session(meta, node.session.node().clone(), admin_public, all_nodes_set, node.cluster.clone(), node.key_storage.clone()),
} }
} }
impl MessageLoop { struct Adapter;
pub fn new(gml: &GenerationMessageLoop, master_node_id: NodeId, original_key_pair: Option<KeyPair>, new_nodes_ids: BTreeSet<NodeId>, removed_nodes_ids: BTreeSet<NodeId>, isolated_nodes_ids: BTreeSet<NodeId>) -> Self {
impl AdminSessionAdapter<SessionImpl> for Adapter {
const SIGN_NEW_NODES: bool = true;
fn create(
mut meta: ShareChangeSessionMeta,
admin_public: Public,
all_nodes_set: BTreeSet<NodeId>,
ml: &ClusterMessageLoop,
idx: usize
) -> SessionImpl {
meta.self_node_id = *ml.node_key_pair(idx).public();
SessionImpl::new(SessionParams {
meta: meta,
all_nodes_set: all_nodes_set,
cluster: ml.cluster(idx).view().unwrap(),
key_storage: ml.key_storage(idx).clone(),
nonce: 1,
admin_public: admin_public,
migration_id: None,
}).unwrap()
}
}
impl<S: ClusterSession> MessageLoop<S> {
pub fn with_gml<C: AdminSessionAdapter<S>>(
gml: GenerationMessageLoop,
master: NodeId,
add: Option<Vec<KeyPair>>,
removed_nodes_ids: Option<BTreeSet<NodeId>>,
isolated_nodes_ids: Option<BTreeSet<NodeId>>,
) -> Self {
// read generated key data
let original_key_pair = gml.compute_key_pair();
let original_key_version = gml.key_version();
Self::with_ml::<C>(
gml.0,
original_key_pair,
original_key_version,
master,
add,
removed_nodes_ids,
isolated_nodes_ids)
}
pub fn and_then<C: AdminSessionAdapter<S>>(
self,
master: NodeId,
add: Option<Vec<KeyPair>>,
removed_nodes_ids: Option<BTreeSet<NodeId>>,
isolated_nodes_ids: Option<BTreeSet<NodeId>>,
) -> Self {
Self::with_ml::<C>(
self.ml,
self.original_key_pair,
self.original_key_version,
master,
add,
removed_nodes_ids,
isolated_nodes_ids,
)
}
pub fn with_ml<C: AdminSessionAdapter<S>>(
mut ml: ClusterMessageLoop,
original_key_pair: KeyPair,
original_key_version: H256,
master: NodeId,
add: Option<Vec<KeyPair>>,
removed_nodes_ids: Option<BTreeSet<NodeId>>,
isolated_nodes_ids: Option<BTreeSet<NodeId>>,
) -> Self {
let add = add.unwrap_or_default();
let removed_nodes_ids = removed_nodes_ids.unwrap_or_default();
let isolated_nodes_ids = isolated_nodes_ids.unwrap_or_default();
// generate admin key pair // generate admin key pair
let admin_key_pair = Random.generate().unwrap(); let admin_key_pair = Random.generate().unwrap();
let admin_public = admin_key_pair.public().clone(); let admin_public = admin_key_pair.public().clone();
// compute original secret key
let original_key_pair = original_key_pair.unwrap_or_else(|| gml.compute_key_pair(1));
// all active nodes set // all active nodes set
let mut all_nodes_set: BTreeSet<_> = gml.nodes.keys() let mut all_nodes_set: BTreeSet<_> = ml.nodes().into_iter()
.filter(|n| !isolated_nodes_ids.contains(n)) .filter(|n| !isolated_nodes_ids.contains(n))
.cloned()
.collect(); .collect();
// new nodes set includes all old nodes, except nodes being removed + all nodes being added // new nodes set includes all old nodes, except nodes being removed + all nodes being added
let new_nodes_set: BTreeSet<NodeId> = all_nodes_set.iter().cloned() let new_nodes_set: BTreeSet<NodeId> = all_nodes_set.iter().cloned()
.chain(new_nodes_ids.iter().cloned()) .chain(add.iter().map(|kp| *kp.public()))
.filter(|n| !removed_nodes_ids.contains(n)) .filter(|n| !removed_nodes_ids.contains(n))
.collect(); .collect();
all_nodes_set.extend(new_nodes_ids.iter().cloned()); let mut old_set_to_sign = all_nodes_set.clone();
all_nodes_set.extend(add.iter().map(|kp| *kp.public()));
if C::SIGN_NEW_NODES {
old_set_to_sign.extend(add.iter().map(|kp| *kp.public()));
}
for isolated_node_id in &isolated_nodes_ids { for isolated_node_id in &isolated_nodes_ids {
all_nodes_set.remove(isolated_node_id); all_nodes_set.remove(isolated_node_id);
} }
let meta = ShareChangeSessionMeta { let meta = ShareChangeSessionMeta {
self_node_id: master_node_id.clone(), self_node_id: master,
master_node_id: master_node_id.clone(), master_node_id: master,
id: SessionId::default(), id: SessionId::default(),
configured_nodes_count: all_nodes_set.len(), configured_nodes_count: all_nodes_set.len(),
connected_nodes_count: all_nodes_set.len(), connected_nodes_count: all_nodes_set.len(),
}; };
let old_nodes = gml.nodes.iter().map(|n| create_node(meta.clone(), admin_public.clone(), all_nodes_set.clone(), n.1)); // include new nodes in the cluster
let new_nodes = new_nodes_ids.into_iter().map(|new_node_id| { for node_key_pair in &add {
let new_node_cluster = Arc::new(DummyCluster::new(new_node_id.clone())); ml.include(Arc::new(PlainNodeKeyPair::new(node_key_pair.clone())));
for node in &all_nodes_set { }
new_node_cluster.add_node(node.clone()); // isolate nodes from the cluster
} for isolated_node_id in &isolated_nodes_ids {
let idx = ml.nodes().iter().position(|n| n == isolated_node_id).unwrap();
let new_node_key_storage = Arc::new(DummyKeyStorage::default()); ml.exclude(idx);
let new_node_session = create_session(meta.clone(), new_node_id, admin_public.clone(), all_nodes_set.clone(), new_node_cluster.clone(), new_node_key_storage.clone());
Node {
cluster: new_node_cluster,
key_storage: new_node_key_storage,
session: new_node_session,
}
});
let nodes: BTreeMap<_, _> = old_nodes.chain(new_nodes).map(|n| (n.session.core.meta.self_node_id.clone(), n)).collect();
for node in nodes.values() {
for isolated_node_id in &isolated_nodes_ids {
node.cluster.remove_node(isolated_node_id);
}
} }
let all_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&all_nodes_set)).unwrap(); // prepare set of nodes
let sessions: BTreeMap<_, _> = (0..ml.nodes().len())
.map(|idx| (ml.node(idx), C::create(meta.clone(), admin_public, all_nodes_set.clone(), &ml, idx)))
.collect();
let all_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&old_set_to_sign)).unwrap();
let new_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap(); let new_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap();
MessageLoop { MessageLoop {
ml,
admin_key_pair: admin_key_pair, admin_key_pair: admin_key_pair,
original_key_pair: original_key_pair, original_key_pair,
original_key_version,
all_nodes_set: all_nodes_set.clone(), all_nodes_set: all_nodes_set.clone(),
new_nodes_set: new_nodes_set, new_nodes_set: new_nodes_set,
all_set_signature: all_set_signature, all_set_signature: all_set_signature,
new_set_signature: new_set_signature, new_set_signature: new_set_signature,
nodes: nodes, sessions,
queue: Default::default(), queue: Default::default(),
} }
} }
pub fn run(&mut self) { pub fn run(&mut self) {
// run session until completion
while let Some((from, to, message)) = self.take_message() { while let Some((from, to, message)) = self.take_message() {
self.process_message((from, to, message)).unwrap(); self.process_message((from, to, message)).unwrap();
} }
// check that all sessions have finished
assert!(self.sessions.values().all(|s| s.is_finished()));
} }
pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> {
self.nodes.values() self.ml.take_message().or_else(|| self.queue.pop_front())
.filter_map(|n| n.cluster.take_message().map(|m| (n.session.core.meta.self_node_id.clone(), m.0, m.1)))
.nth(0)
.or_else(|| self.queue.pop_front())
} }
pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> {
match { match msg.2 { match self.sessions[&msg.1].on_message(&msg.0, &msg.2) {
Message::ServersSetChange(ref message) => self.nodes[&msg.1].session.process_message(&msg.0, message),
_ => unreachable!("only servers set change messages are expected"),
} } {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(Error::TooEarlyForRequest) => { Err(Error::TooEarlyForRequest) => {
self.queue.push_back(msg); self.queue.push_back(msg);
@ -1195,213 +1251,201 @@ pub mod tests {
Err(err) => Err(err), Err(err) => Err(err),
} }
} }
/// This only works for schemes where threshold = 1
pub fn check_secret_is_preserved<'a, I: IntoIterator<Item=&'a NodeId>>(&self, nodes: I) {
let nodes: Vec<_> = nodes.into_iter().collect();
let key_storages: Vec<_> = nodes.iter().map(|n| self.ml.key_storage_of(n)).collect();
let n = nodes.len();
let document_secret_plain = math::generate_random_point().unwrap();
for n1 in 0..n {
for n2 in n1+1..n {
let share1 = key_storages[n1].get(&SessionId::default()).unwrap();
let share2 = key_storages[n2].get(&SessionId::default()).unwrap();
let id_number1 = share1.as_ref().unwrap().last_version().unwrap().id_numbers[nodes[n1]].clone();
let id_number2 = share1.as_ref().unwrap().last_version().unwrap().id_numbers[nodes[n2]].clone();
// now encrypt and decrypt data
let (document_secret_decrypted, document_secret_decrypted_test) =
math::tests::do_encryption_and_decryption(1,
self.original_key_pair.public(),
&[id_number1, id_number2],
&[share1.unwrap().last_version().unwrap().secret_share.clone(),
share2.unwrap().last_version().unwrap().secret_share.clone()],
Some(self.original_key_pair.secret()),
document_secret_plain.clone());
assert_eq!(document_secret_plain, document_secret_decrypted_test);
assert_eq!(document_secret_plain, document_secret_decrypted);
}
}
}
} }
pub fn generate_key(threshold: usize, nodes_ids: BTreeSet<NodeId>) -> GenerationMessageLoop { impl MessageLoop<SessionImpl> {
let mut gml = GenerationMessageLoop::with_nodes_ids(nodes_ids); pub fn run_at(mut self, master: NodeId) -> Self {
gml.master().initialize(Default::default(), Default::default(), false, threshold, gml.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).unwrap(); self.sessions[&master].initialize(
while let Some((from, to, message)) = gml.take_message() { self.new_nodes_set.clone(),
gml.process_message((from, to, message)).unwrap(); self.all_set_signature.clone(),
self.new_set_signature.clone()).unwrap();
self.run();
self
} }
}
pub fn generate_key(num_nodes: usize, threshold: usize) -> GenerationMessageLoop {
let gml = GenerationMessageLoop::new(num_nodes).init(threshold).unwrap();
gml.0.loop_until(|| gml.0.is_empty());
gml gml
} }
#[test] #[test]
fn node_added_using_servers_set_change() { fn node_added_using_servers_set_change() {
// initial 2-of-3 session // initial 2-of-3 session
let gml = generate_key(1, generate_nodes_ids(3)); let gml = generate_key(3, 1);
let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap();
// insert 1 node so that it becames 2-of-4 session // add 1 node so that it becames 2-of-4 session
let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); let add = vec![Random.generate().unwrap()];
let mut ml = MessageLoop::new(&gml, master_node_id, None, nodes_to_add, BTreeSet::new(), BTreeSet::new()); let master = gml.0.node(0);
ml.nodes[&master_node_id].session.initialize(ml.nodes.keys().cloned().collect(), ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); let ml = MessageLoop::with_gml::<Adapter>(gml, master, Some(add), None, None).run_at(master);
ml.run();
// try to recover secret for every possible combination of nodes && check that secret is the same // try to recover secret for every possible combination of nodes && check that secret is the same
check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter().map(|(k, v)| (k.clone(), v.key_storage.clone())).collect()); ml.check_secret_is_preserved(ml.sessions.keys());
// check that all sessions have finished
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
} }
#[test] #[test]
fn node_added_using_server_set_change_from_this_node() { fn node_added_using_server_set_change_from_this_node() {
// initial 2-of-3 session // initial 2-of-3 session
let gml = generate_key(1, generate_nodes_ids(3)); let gml = generate_key(3, 1);
// insert 1 node so that it becames 2-of-4 session // insert 1 node so that it becames 2-of-4 session
// master node is the node we are adding => // master node is the node we are adding =>
// 1) add session is delegated to one of old nodes // 1) add session is delegated to one of old nodes
// 2) key share is pushed to new node // 2) key share is pushed to new node
// 3) delegated session is returned back to added node // 3) delegated session is returned back to added node
let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); let add = vec![Random.generate().unwrap()];
let master_node_id = nodes_to_add.iter().cloned().nth(0).unwrap(); let master = add[0].public().clone();
let mut ml = MessageLoop::new(&gml, master_node_id, None, nodes_to_add, BTreeSet::new(), BTreeSet::new()); let ml = MessageLoop::with_gml::<Adapter>(gml, master, Some(add), None, None).run_at(master);
ml.nodes[&master_node_id].session.initialize(ml.nodes.keys().cloned().collect(), ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap();
ml.run();
// check that all sessions have finished // try to recover secret for every possible combination of nodes && check that secret is the same
assert!(ml.nodes.values().all(|n| n.session.is_finished())); ml.check_secret_is_preserved(ml.sessions.keys());
} }
#[test] #[test]
fn node_moved_using_servers_set_change() { fn node_moved_using_servers_set_change() {
// initial 2-of-3 session // initial 2-of-3 session
let gml = generate_key(1, generate_nodes_ids(3)); let gml = generate_key(3, 1);
let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap();
// remove 1 node && insert 1 node so that one share is moved // remove 1 node && insert 1 node so that one share is moved
let nodes_to_remove: BTreeSet<_> = gml.nodes.keys().cloned().skip(1).take(1).collect(); let master = gml.0.node(0);
let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); let remove: BTreeSet<_> = ::std::iter::once(gml.0.node(1)).collect();
let mut ml = MessageLoop::new(&gml, master_node_id, None, nodes_to_add.clone(), nodes_to_remove.clone(), BTreeSet::new()); let add = vec![Random.generate().unwrap()];
let new_nodes_set = ml.nodes.keys().cloned().filter(|n| !nodes_to_remove.contains(n)).collect(); let ml = MessageLoop::with_gml::<Adapter>(gml, master, Some(add), Some(remove.clone()), None).run_at(master);
ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap();
ml.run();
// check that secret is still the same as before moving the share // check that secret is still the same as before moving the share
check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter() ml.check_secret_is_preserved(ml.sessions.keys()
.filter(|&(k, _)| !nodes_to_remove.contains(k)) .filter(|k| !remove.contains(k)));
.map(|(k, v)| (k.clone(), v.key_storage.clone()))
.collect());
// check that all removed nodes do not own key share // check that all removed nodes do not own key share
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_none())); assert!(ml.sessions.keys().filter(|k| remove.contains(k))
.all(|k| ml.ml.key_storage_of(k).get(&SessionId::default()).unwrap().is_none()));
// check that all sessions have finished
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
} }
#[test] #[test]
fn node_removed_using_servers_set_change() { fn node_removed_using_servers_set_change() {
// initial 2-of-3 session // initial 2-of-3 session
let gml = generate_key(1, generate_nodes_ids(3)); let gml = generate_key(3, 1);
let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap();
// remove 1 node so that session becames 2-of-2 // remove 1 node so that session becames 2-of-2
let nodes_to_remove: BTreeSet<_> = gml.nodes.keys().cloned().skip(1).take(1).collect(); let remove: BTreeSet<_> = ::std::iter::once(gml.0.node(0)).collect();
let new_nodes_set: BTreeSet<_> = gml.nodes.keys().cloned().filter(|n| !nodes_to_remove.contains(&n)).collect(); let master = gml.0.node(0);
let mut ml = MessageLoop::new(&gml, master_node_id, None, BTreeSet::new(), nodes_to_remove.clone(), BTreeSet::new()); let ml = MessageLoop::with_gml::<Adapter>(gml, master, None, Some(remove.clone()), None).run_at(master);
ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap();
ml.run();
// try to recover secret for every possible combination of nodes && check that secret is the same // try to recover secret for every possible combination of nodes && check that secret is the same
check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter() ml.check_secret_is_preserved(ml.sessions.keys()
.filter(|&(k, _)| !nodes_to_remove.contains(k)) .filter(|k| !remove.contains(k)));
.map(|(k, v)| (k.clone(), v.key_storage.clone()))
.collect());
// check that all removed nodes do not own key share // check that all removed nodes do not own key share
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_none())); assert!(ml.sessions.keys().filter(|k| remove.contains(k))
.all(|k| ml.ml.key_storage_of(k).get(&SessionId::default()).unwrap().is_none()));
// check that all sessions have finished
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
} }
#[test] #[test]
fn isolated_node_removed_using_servers_set_change() { fn isolated_node_removed_using_servers_set_change() {
// initial 2-of-3 session // initial 2-of-3 session
let gml = generate_key(1, generate_nodes_ids(3)); let gml = generate_key(3, 1);
let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap();
// remove 1 node so that session becames 2-of-2 // remove 1 node so that session becames 2-of-2
let nodes_to_isolate: BTreeSet<_> = gml.nodes.keys().cloned().skip(1).take(1).collect(); let isolate: BTreeSet<_> = ::std::iter::once(gml.0.node(1)).collect();
let new_nodes_set: BTreeSet<_> = gml.nodes.keys().cloned().filter(|n| !nodes_to_isolate.contains(&n)).collect(); let master = gml.0.node(0);
let mut ml = MessageLoop::new(&gml, master_node_id, None, BTreeSet::new(), BTreeSet::new(), nodes_to_isolate.clone()); let ml = MessageLoop::with_gml::<Adapter>(gml, master, None, None, Some(isolate.clone()))
ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); .run_at(master);
ml.run();
// try to recover secret for every possible combination of nodes && check that secret is the same // try to recover secret for every possible combination of nodes && check that secret is the same
check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter() ml.check_secret_is_preserved(ml.sessions.keys()
.filter(|&(k, _)| !nodes_to_isolate.contains(k)) .filter(|k| !isolate.contains(k)));
.map(|(k, v)| (k.clone(), v.key_storage.clone()))
.collect());
// check that all isolated nodes still OWN key share // check that all isolated nodes still OWN key share
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_isolate.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_some())); assert!(ml.sessions.keys().filter(|k| isolate.contains(k))
.all(|k| ml.ml.key_storage_of(k).get(&SessionId::default()).unwrap().is_some()));
// check that all sessions have finished
assert!(ml.nodes.iter().filter(|&(k, _)| !nodes_to_isolate.contains(k)).all(|(_, v)| v.session.is_finished()));
} }
#[test] #[test]
fn having_less_than_required_nodes_after_change_does_not_fail_change_session() { fn having_less_than_required_nodes_after_change_does_not_fail_change_session() {
// initial 2-of-3 session // initial 2-of-3 session
let gml = generate_key(1, generate_nodes_ids(3)); let gml = generate_key(3, 1);
let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap();
// remove 2 nodes so that key becomes irrecoverable (make sure the session is completed, even though key is irrecoverable) // remove 2 nodes so that key becomes irrecoverable (make sure the session is completed
let nodes_to_remove: BTreeSet<_> = gml.nodes.keys().cloned().skip(1).take(2).collect(); // even though key is irrecoverable)
let new_nodes_set: BTreeSet<_> = gml.nodes.keys().cloned().filter(|n| !nodes_to_remove.contains(&n)).collect(); let remove: BTreeSet<_> = gml.0.nodes().into_iter().skip(1).take(2).collect();
let mut ml = MessageLoop::new(&gml, master_node_id, None, BTreeSet::new(), nodes_to_remove.clone(), BTreeSet::new()); let master = gml.0.node(0);
ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); let ml = MessageLoop::with_gml::<Adapter>(gml, master, None, Some(remove.clone()), None).run_at(master);
ml.run();
// check that all removed nodes do not own key share // check that all removed nodes do not own key share
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_none())); assert!(ml.sessions.keys().filter(|k| remove.contains(k))
.all(|k| ml.ml.key_storage_of(k).get(&SessionId::default()).unwrap().is_none()));
// check that all sessions have finished
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
// and now let's add new node (make sure the session is completed, even though key is still irrecoverable) // and now let's add new node (make sure the session is completed, even though key is still irrecoverable)
// isolated here are not actually isolated, but removed on the previous step // isolated here are not actually isolated, but removed on the previous step
let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); let add = vec![Random.generate().unwrap()];
let new_nodes_set: BTreeSet<_> = gml.nodes.keys().cloned().filter(|n| !nodes_to_remove.contains(&n)) let master = add[0].public().clone();
.chain(nodes_to_add.iter().cloned()) let ml = ml.and_then::<Adapter>(master, Some(add.clone()), None, Some(remove)).run_at(master);
.collect();
let master_node_id = nodes_to_add.iter().cloned().nth(0).unwrap();
let mut ml = MessageLoop::new(&gml, master_node_id, Some(ml.original_key_pair.clone()), nodes_to_add.clone(), BTreeSet::new(), nodes_to_remove.clone());
ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap();
ml.run();
// check that all added nodes do not own key share (there's not enough nodes to run share add session) // check that all added nodes do not own key share (there's not enough nodes to run share add session)
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_add.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_none())); assert!(ml.sessions.keys().filter(|k| add.iter().any(|n| n.public() == *k))
.all(|k| ml.ml.key_storage_of(k).get(&SessionId::default()).unwrap().is_none()));
// check that all sessions have finished
assert!(ml.nodes.iter().filter(|&(k, _)| !nodes_to_remove.contains(k)).all(|(_, n)| n.session.is_finished()));
} }
#[test] #[test]
fn removing_node_from_cluster_of_2_works() { fn removing_node_from_cluster_of_2_works() {
// initial 2-of-2 session // initial 2-of-2 session
let gml = generate_key(1, generate_nodes_ids(2)); let gml = generate_key(2, 1);
let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap();
// make 2nd node isolated so that key becomes irrecoverable (make sure the session is completed, even though key is irrecoverable) // make 2nd node isolated so that key becomes irrecoverable (make sure the session is completed,
let nodes_to_isolate: BTreeSet<_> = gml.nodes.keys().cloned().skip(1).take(1).collect(); // even though key is irrecoverable)
let new_nodes_set: BTreeSet<_> = gml.nodes.keys().cloned().filter(|n| !nodes_to_isolate.contains(&n)).collect(); let isolate: BTreeSet<_> = gml.0.nodes().into_iter().skip(1).take(1).collect();
let mut ml = MessageLoop::new(&gml, master_node_id, None, BTreeSet::new(), BTreeSet::new(), nodes_to_isolate.clone()); let master = gml.0.node(0);
ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); MessageLoop::with_gml::<Adapter>(gml, master, None, None, Some(isolate)).run_at(master);
ml.run();
// check that session on master node has completed (session on 2nd node is not even started in network mode)
assert!(ml.nodes.values().take(1).all(|n| n.session.is_finished()));
} }
#[test] #[test]
fn adding_node_that_has_lost_its_database_works() { fn adding_node_that_has_lost_its_database_works() {
// initial 2-of-2 session // initial 2-of-2 session
let gml = generate_key(1, generate_nodes_ids(2)); let gml = generate_key(2, 1);
let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap();
// insert 1 node so that it becames 2-of-3 session // insert 1 node so that it becames 2-of-3 session
let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); let add = vec![Random.generate().unwrap()];
let mut ml = MessageLoop::new(&gml, master_node_id, None, nodes_to_add.clone(), BTreeSet::new(), BTreeSet::new()); let master = gml.0.node(0);
ml.nodes[&master_node_id].session.initialize(ml.nodes.keys().cloned().collect(), ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); let ml = MessageLoop::with_gml::<Adapter>(gml, master, Some(add.clone()), None, None)
ml.run(); .run_at(master);
// now let's say new node has lost its db and we're trying to join it again // now let's say new node has lost its db and we're trying to join it again
ml.nodes[nodes_to_add.iter().nth(0).unwrap()].key_storage.clear().unwrap(); ml.ml.key_storage_of(add[0].public()).clear().unwrap();
// this time old nodes have version, where new node is mentioned, but it doesn't report it when negotiating // this time old nodes have version, where new node is mentioned, but it doesn't report it when negotiating
let mut ml = MessageLoop::new(&gml, master_node_id, None, nodes_to_add, BTreeSet::new(), BTreeSet::new()); let ml = ml.and_then::<Adapter>(master, Some(add), None, None).run_at(master);
ml.nodes[&master_node_id].session.initialize(ml.nodes.keys().cloned().collect(), ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap();
ml.run();
// try to recover secret for every possible combination of nodes && check that secret is the same // try to recover secret for every possible combination of nodes && check that secret is the same
check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter().map(|(k, v)| (k.clone(), v.key_storage.clone())).collect()); ml.check_secret_is_preserved(ml.sessions.keys());
// check that all sessions have finished
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
} }
} }

View File

@ -318,6 +318,7 @@ impl<T> SessionImpl<T> where T: SessionTransport {
new_set_signature), new_set_signature),
consensus_transport: consensus_transport, consensus_transport: consensus_transport,
})?; })?;
consensus_session.initialize(new_nodes_map.keys().cloned().collect())?; consensus_session.initialize(new_nodes_map.keys().cloned().collect())?;
// update data // update data
@ -881,405 +882,197 @@ impl SessionTransport for IsolatedSessionTransport {
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use std::sync::Arc; use std::collections::BTreeSet;
use std::collections::{VecDeque, BTreeMap, BTreeSet, HashSet}; use ethkey::{Random, Generator, Public};
use ethkey::{Random, Generator, Public, KeyPair, Signature, sign}; use key_server_cluster::{NodeId, Error, KeyStorage, NodeKeyPair};
use ethereum_types::H256; use key_server_cluster::cluster::tests::MessageLoop as ClusterMessageLoop;
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage}; use key_server_cluster::servers_set_change_session::tests::{MessageLoop, AdminSessionAdapter, generate_key};
use key_server_cluster::cluster::Cluster;
use key_server_cluster::cluster::tests::DummyCluster;
use key_server_cluster::cluster_sessions::ClusterSession;
use key_server_cluster::generation_session::tests::{Node as GenerationNode, generate_nodes_ids};
use key_server_cluster::math;
use key_server_cluster::message::Message;
use key_server_cluster::servers_set_change_session::tests::generate_key;
use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash;
use key_server_cluster::admin_sessions::ShareChangeSessionMeta; use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
use super::{SessionImpl, SessionParams, IsolatedSessionTransport}; use super::{SessionImpl, SessionParams, IsolatedSessionTransport};
struct Node { struct Adapter;
pub cluster: Arc<DummyCluster>,
pub key_storage: Arc<DummyKeyStorage>,
pub session: SessionImpl<IsolatedSessionTransport>,
}
struct MessageLoop { impl AdminSessionAdapter<SessionImpl<IsolatedSessionTransport>> for Adapter {
pub admin_key_pair: KeyPair, const SIGN_NEW_NODES: bool = false;
pub original_key_pair: KeyPair,
pub old_nodes_set: BTreeSet<NodeId>,
pub new_nodes_set: BTreeSet<NodeId>,
pub old_set_signature: Signature,
pub new_set_signature: Signature,
pub nodes: BTreeMap<NodeId, Node>,
pub queue: VecDeque<(NodeId, NodeId, Message)>,
pub version: H256,
}
fn create_session(mut meta: ShareChangeSessionMeta, admin_public: Public, self_node_id: NodeId, cluster: Arc<Cluster>, key_storage: Arc<KeyStorage>) -> SessionImpl<IsolatedSessionTransport> { fn create(
let session_id = meta.id.clone(); mut meta: ShareChangeSessionMeta,
meta.self_node_id = self_node_id; admin_public: Public,
let key_version = key_storage.get(&session_id).unwrap().map(|ks| ks.versions.iter().last().unwrap().hash.clone()); _: BTreeSet<NodeId>,
ml: &ClusterMessageLoop,
idx: usize
) -> SessionImpl<IsolatedSessionTransport> {
let key_storage = ml.key_storage(idx).clone();
let key_version = key_storage.get(&meta.id).unwrap().map(|ks| ks.last_version().unwrap().hash);
SessionImpl::new(SessionParams { meta.self_node_id = *ml.node_key_pair(idx).public();
meta: meta.clone(), SessionImpl::new(SessionParams {
transport: IsolatedSessionTransport::new(session_id, key_version, 1, cluster), meta: meta.clone(),
key_storage: key_storage, transport: IsolatedSessionTransport::new(meta.id, key_version, 1, ml.cluster(idx).view().unwrap()),
admin_public: Some(admin_public), key_storage,
nonce: 1, admin_public: Some(admin_public),
}).unwrap() nonce: 1,
} }).unwrap()
fn create_node(meta: ShareChangeSessionMeta, admin_public: Public, node: GenerationNode, added_nodes: &BTreeSet<NodeId>) -> Node {
node.cluster.add_nodes(added_nodes.iter().cloned());
Node {
cluster: node.cluster.clone(),
key_storage: node.key_storage.clone(),
session: create_session(meta, admin_public, node.session.node().clone(), node.cluster, node.key_storage),
} }
} }
/// This only works for schemes where threshold = 1 impl MessageLoop<SessionImpl<IsolatedSessionTransport>> {
pub fn check_secret_is_preserved(joint_key_pair: KeyPair, nodes: BTreeMap<NodeId, Arc<DummyKeyStorage>>) { pub fn init_at(self, master: NodeId) -> Result<Self, Error> {
let n = nodes.len(); self.sessions[&master].initialize(
let document_secret_plain = math::generate_random_point().unwrap(); Some(self.original_key_version),
for n1 in 0..n { Some(self.new_nodes_set.clone()),
for n2 in n1+1..n { Some(self.all_set_signature.clone()),
let share1 = nodes.values().nth(n1).unwrap().get(&SessionId::default()).unwrap(); Some(self.new_set_signature.clone()))?;
let share2 = nodes.values().nth(n2).unwrap().get(&SessionId::default()).unwrap(); Ok(self)
let id_number1 = share1.as_ref().unwrap().last_version().unwrap().id_numbers[nodes.keys().nth(n1).unwrap()].clone();
let id_number2 = share1.as_ref().unwrap().last_version().unwrap().id_numbers[nodes.keys().nth(n2).unwrap()].clone();
// now encrypt and decrypt data
let (document_secret_decrypted, document_secret_decrypted_test) =
math::tests::do_encryption_and_decryption(1,
joint_key_pair.public(),
&[id_number1, id_number2],
&[share1.unwrap().last_version().unwrap().secret_share.clone(),
share2.unwrap().last_version().unwrap().secret_share.clone()],
Some(joint_key_pair.secret()),
document_secret_plain.clone());
assert_eq!(document_secret_plain, document_secret_decrypted_test);
assert_eq!(document_secret_plain, document_secret_decrypted);
}
}
}
impl MessageLoop {
pub fn new(t: usize, master_node_id: NodeId, old_nodes_set: BTreeSet<NodeId>, new_nodes_set: BTreeSet<NodeId>) -> Self {
// generate admin key pair
let admin_key_pair = Random.generate().unwrap();
let admin_public = admin_key_pair.public().clone();
// run initial generation session
let gml = generate_key(t, old_nodes_set.clone());
// compute original secret key
let version = gml.nodes.values().nth(0).unwrap().key_storage.get(&Default::default()).unwrap().unwrap().versions[0].hash.clone();
let original_key_pair = gml.compute_key_pair(t);
// prepare sessions on all nodes
let meta = ShareChangeSessionMeta {
id: SessionId::default(),
self_node_id: NodeId::default(),
master_node_id: master_node_id,
configured_nodes_count: new_nodes_set.iter().chain(old_nodes_set.iter()).collect::<HashSet<_>>().len(),
connected_nodes_count: new_nodes_set.iter().chain(old_nodes_set.iter()).collect::<HashSet<_>>().len(),
};
let new_nodes = new_nodes_set.iter()
.filter(|n| !old_nodes_set.contains(&n))
.map(|new_node_id| {
let new_node_cluster = Arc::new(DummyCluster::new(new_node_id.clone()));
let new_node_key_storage = Arc::new(DummyKeyStorage::default());
let new_node_session = create_session(meta.clone(), admin_public.clone(), new_node_id.clone(), new_node_cluster.clone(), new_node_key_storage.clone());
new_node_cluster.add_nodes(new_nodes_set.iter().cloned());
Node {
cluster: new_node_cluster,
key_storage: new_node_key_storage,
session: new_node_session,
}
});
let old_nodes = gml.nodes.into_iter().map(|gn| create_node(meta.clone(), admin_public.clone(), gn.1, &new_nodes_set));
let nodes = old_nodes.chain(new_nodes).map(|n| (n.session.core.meta.self_node_id.clone(), n)).collect();
let old_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&old_nodes_set)).unwrap();
let new_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap();
MessageLoop {
admin_key_pair: admin_key_pair,
original_key_pair: original_key_pair,
version: version,
old_nodes_set: old_nodes_set.clone(),
new_nodes_set: new_nodes_set.clone(),
old_set_signature: old_set_signature,
new_set_signature: new_set_signature,
nodes: nodes,
queue: Default::default(),
}
} }
pub fn new_additional(master_node_id: NodeId, ml: MessageLoop, new_nodes_set: BTreeSet<NodeId>) -> Self { pub fn run_at(self, master: NodeId) -> Result<Self, Error> {
let version = ml.nodes.values().nth(0).unwrap().key_storage.get(&Default::default()).unwrap().unwrap().versions.last().unwrap().hash.clone(); let mut ml = self.init_at(master)?;
ml.run();
// prepare sessions on all nodes Ok(ml)
let meta = ShareChangeSessionMeta {
id: SessionId::default(),
self_node_id: NodeId::default(),
master_node_id: master_node_id,
configured_nodes_count: new_nodes_set.iter().chain(ml.nodes.keys()).collect::<BTreeSet<_>>().len(),
connected_nodes_count: new_nodes_set.iter().chain(ml.nodes.keys()).collect::<BTreeSet<_>>().len(),
};
let old_nodes_set = ml.nodes.keys().cloned().collect();
let nodes = ml.nodes.iter()
.map(|(n, nd)| {
let node_cluster = nd.cluster.clone();
let node_key_storage = nd.key_storage.clone();
let node_session = create_session(meta.clone(), ml.admin_key_pair.public().clone(), n.clone(), node_cluster.clone(), node_key_storage.clone());
node_cluster.add_nodes(new_nodes_set.iter().cloned());
(n.clone(), Node {
cluster: node_cluster,
key_storage: node_key_storage,
session: node_session,
})
}).chain(new_nodes_set.difference(&old_nodes_set).map(|n| {
let new_node_cluster = Arc::new(DummyCluster::new(n.clone()));
let new_node_key_storage = Arc::new(DummyKeyStorage::default());
let new_node_session = create_session(meta.clone(), ml.admin_key_pair.public().clone(), n.clone(), new_node_cluster.clone(), new_node_key_storage.clone());
new_node_cluster.add_nodes(new_nodes_set.iter().cloned());
(n.clone(), Node {
cluster: new_node_cluster,
key_storage: new_node_key_storage,
session: new_node_session,
})
})).collect();
let old_set_signature = sign(ml.admin_key_pair.secret(), &ordered_nodes_hash(&old_nodes_set)).unwrap();
let new_set_signature = sign(ml.admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap();
MessageLoop {
admin_key_pair: ml.admin_key_pair,
original_key_pair: ml.original_key_pair,
version: version,
old_nodes_set: old_nodes_set.clone(),
new_nodes_set: new_nodes_set.clone(),
old_set_signature: old_set_signature,
new_set_signature: new_set_signature,
nodes: nodes,
queue: Default::default(),
}
}
pub fn update_signature(&mut self) {
self.old_set_signature = sign(self.admin_key_pair.secret(), &ordered_nodes_hash(&self.old_nodes_set)).unwrap();
self.new_set_signature = sign(self.admin_key_pair.secret(), &ordered_nodes_hash(&self.new_nodes_set)).unwrap();
}
pub fn run(&mut self) {
while let Some((from, to, message)) = self.take_message() {
self.process_message((from, to, message)).unwrap();
}
}
pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> {
self.nodes.values()
.filter_map(|n| n.cluster.take_message().map(|m| (n.session.core.meta.self_node_id.clone(), m.0, m.1)))
.nth(0)
.or_else(|| self.queue.pop_front())
}
pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> {
match { match msg.2 {
Message::ShareAdd(ref message) =>
self.nodes[&msg.1].session.process_message(&msg.0, message),
_ => unreachable!("only servers set change messages are expected"),
} } {
Ok(_) => Ok(()),
Err(Error::TooEarlyForRequest) => {
self.queue.push_back(msg);
Ok(())
},
Err(err) => Err(err),
}
} }
} }
#[test] #[test]
fn node_add_fails_if_nodes_removed() { fn node_add_fails_if_nodes_removed() {
let old_nodes_set = generate_nodes_ids(3); // initial 2-of-3 session
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); let gml = generate_key(3, 1);
let node_to_remove_id = old_nodes_set.iter().cloned().nth(1).unwrap();
let mut new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(1)).collect(); // try to remove 1 node
new_nodes_set.remove(&node_to_remove_id); let add = vec![Random.generate().unwrap()];
let ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); let remove: BTreeSet<_> = ::std::iter::once(gml.0.node(1)).collect();
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), let master = gml.0.node(0);
Some(ml.old_set_signature.clone()), assert_eq!(MessageLoop::with_gml::<Adapter>(gml, master, Some(add), Some(remove), None)
Some(ml.new_set_signature.clone()) .run_at(master).unwrap_err(), Error::ConsensusUnreachable);
).unwrap_err(), Error::ConsensusUnreachable);
} }
#[test] #[test]
fn node_add_fails_if_no_nodes_added() { fn node_add_fails_if_no_nodes_added() {
let old_nodes_set = generate_nodes_ids(3); // initial 2-of-3 session
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); let gml = generate_key(3, 1);
let new_nodes_set = old_nodes_set.clone();
let ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); // try to add 0 nodes
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), let add = vec![];
Some(ml.old_set_signature.clone()), let master = gml.0.node(0);
Some(ml.new_set_signature.clone()) assert_eq!(MessageLoop::with_gml::<Adapter>(gml, master, Some(add), None, None)
).unwrap_err(), Error::ConsensusUnreachable); .run_at(master).unwrap_err(), Error::ConsensusUnreachable);
} }
#[test] #[test]
fn node_add_fails_if_started_on_adding_node() { fn node_add_fails_if_started_on_adding_node() {
let old_nodes_set = generate_nodes_ids(3); // initial 2-of-3 session
let nodes_to_add_set = generate_nodes_ids(1); let gml = generate_key(3, 1);
let master_node_id = nodes_to_add_set.iter().cloned().nth(0).unwrap();
let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(nodes_to_add_set.into_iter()).collect(); // try to add 1 node using this node as a master node
let ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); let add = vec![Random.generate().unwrap()];
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), let master = *add[0].public();
Some(ml.old_set_signature.clone()), assert_eq!(MessageLoop::with_gml::<Adapter>(gml, master, Some(add), None, None)
Some(ml.new_set_signature.clone()) .run_at(master).unwrap_err(), Error::ServerKeyIsNotFound);
).unwrap_err(), Error::ServerKeyIsNotFound);
} }
#[test] #[test]
fn node_add_fails_if_initialized_twice() { fn node_add_fails_if_initialized_twice() {
let old_nodes_set = generate_nodes_ids(3); // initial 2-of-3 session
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); let gml = generate_key(3, 1);
let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(1)).collect();
let ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); // try to add 1 node using this node as a master node
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set.clone()), let add = vec![Random.generate().unwrap()];
Some(ml.old_set_signature.clone()), let master = gml.0.node(0);
Some(ml.new_set_signature.clone()) assert_eq!(MessageLoop::with_gml::<Adapter>(gml, master, Some(add), None, None)
), Ok(())); .init_at(master).unwrap()
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), .init_at(master).unwrap_err(), Error::InvalidStateForRequest);
Some(ml.old_set_signature.clone()),
Some(ml.new_set_signature.clone())
), Err(Error::InvalidStateForRequest));
} }
#[test] #[test]
fn node_add_fails_if_started_without_signatures() { fn node_add_fails_if_started_without_signatures() {
let old_nodes_set = generate_nodes_ids(3); // initial 2-of-3 session
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); let gml = generate_key(3, 1);
let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(1)).collect();
let ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); // try to add 1 node using this node as a master node
assert_eq!(ml.nodes[&master_node_id].session.initialize(None, None, None, None), Err(Error::InvalidMessage)); let add = vec![Random.generate().unwrap()];
let master = gml.0.node(0);
assert_eq!(MessageLoop::with_gml::<Adapter>(gml, master, Some(add), None, None)
.sessions[&master]
.initialize(None, None, None, None).unwrap_err(), Error::InvalidMessage);
} }
#[test] #[test]
fn nodes_added_using_share_add() { fn nodes_added_using_share_add() {
let test_cases = vec![(3, 1), (3, 3)]; let test_cases = vec![(3, 1), (3, 3)];
for (n, nodes_to_add) in test_cases { for (n, add) in test_cases {
// generate key && prepare ShareAdd sessions // generate key
let old_nodes_set = generate_nodes_ids(n); let gml = generate_key(n, 1);
let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(nodes_to_add)).collect();
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
let mut ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone());
// initialize session on master node && run to completion // run share add session
ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), let add = (0..add).map(|_| Random.generate().unwrap()).collect();
Some(ml.old_set_signature.clone()), let master = gml.0.node(0);
Some(ml.new_set_signature.clone())).unwrap(); let ml = MessageLoop::with_gml::<Adapter>(gml, master, Some(add), None, None)
ml.run(); .run_at(master).unwrap();
// check that session has completed on all nodes
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
// check that secret is still the same as before adding the share // check that secret is still the same as before adding the share
check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter().map(|(k, v)| (k.clone(), v.key_storage.clone())).collect()); ml.check_secret_is_preserved(ml.sessions.keys());
} }
} }
#[test] #[test]
fn nodes_added_using_share_add_with_isolated_nodes() { fn nodes_added_using_share_add_with_isolated_nodes() {
let (n, nodes_to_add) = (3, 3); let (n, add) = (3, 3);
// generate key && prepare ShareAdd sessions // generate key
let old_nodes_set = generate_nodes_ids(n); let gml = generate_key(n, 1);
let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(nodes_to_add)).collect();
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
let isolated_node_id = old_nodes_set.iter().cloned().nth(1).unwrap();
let mut ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone());
// now let's isolate 1 of 3 nodes owning key share // run share add session
ml.nodes.remove(&isolated_node_id); let master = gml.0.node(0);
ml.old_nodes_set.remove(&isolated_node_id); let node_to_isolate = gml.0.node(1);
ml.new_nodes_set.remove(&isolated_node_id); let add = (0..add).map(|_| Random.generate().unwrap()).collect();
for (_, node) in ml.nodes.iter_mut() { let isolate = ::std::iter::once(node_to_isolate).collect();
node.cluster.remove_node(&isolated_node_id); let ml = MessageLoop::with_gml::<Adapter>(gml, master, Some(add), None, Some(isolate))
} .run_at(master).unwrap();
ml.update_signature();
// initialize session on master node && run to completion
ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set),
Some(ml.old_set_signature.clone()),
Some(ml.new_set_signature.clone())).unwrap();
ml.run();
// check that session has completed on all nodes
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
// check that secret is still the same as before adding the share // check that secret is still the same as before adding the share
check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes ml.check_secret_is_preserved(ml.sessions.keys());
.iter()
.map(|(k, v)| (k.clone(), v.key_storage.clone()))
.collect());
} }
#[test] #[test]
fn nodes_add_to_the_node_with_obsolete_version() { fn nodes_add_to_the_node_with_obsolete_version() {
let (n, nodes_to_add) = (3, 3); let (n, add) = (3, 3);
// generate key (2-of-3) && prepare ShareAdd sessions // generate key
let old_nodes_set = generate_nodes_ids(n); let gml = generate_key(n, 1);
let newest_nodes_set = generate_nodes_ids(nodes_to_add);
let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(newest_nodes_set.clone()).collect();
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
let isolated_node_id = old_nodes_set.iter().cloned().nth(1).unwrap();
let oldest_nodes_set: BTreeSet<_> = old_nodes_set.iter().filter(|n| **n != isolated_node_id).cloned().collect();
let mut ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set.clone(), new_nodes_set.clone());
let isolated_key_storage = ml.nodes[&isolated_node_id].key_storage.clone();
// now let's isolate 1 of 3 nodes owning key share // run share add session
ml.nodes.remove(&isolated_node_id); let master = gml.0.node(0);
ml.old_nodes_set.remove(&isolated_node_id); let node_to_isolate_key_pair = gml.0.node_key_pair(1).clone();
ml.new_nodes_set.remove(&isolated_node_id); let node_to_isolate = gml.0.node(1);
for (_, node) in ml.nodes.iter_mut() { let isolated_key_storage = gml.0.key_storage(1).clone();
node.cluster.remove_node(&isolated_node_id); let mut oldest_nodes_set = gml.0.nodes();
} oldest_nodes_set.remove(&node_to_isolate);
ml.update_signature(); let add = (0..add).map(|_| Random.generate().unwrap()).collect::<Vec<_>>();
let newest_nodes_set = add.iter().map(|kp| *kp.public()).collect::<Vec<_>>();
// initialize session on master node && run to completion (2-of-5) let isolate = ::std::iter::once(node_to_isolate).collect();
ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), let ml = MessageLoop::with_gml::<Adapter>(gml, master, Some(add), None, Some(isolate))
Some(ml.old_set_signature.clone()), .run_at(master).unwrap();
Some(ml.new_set_signature.clone())).unwrap(); let new_key_version = ml.ml.key_storage(0).get(&Default::default())
ml.run(); .unwrap().unwrap().last_version().unwrap().hash;
// now let's add back old node so that key becames 2-of-6 // now let's add back old node so that key becames 2-of-6
let new_nodes_set: BTreeSet<_> = ml.nodes.keys().cloned().chain(::std::iter::once(isolated_node_id.clone())).collect(); let add = vec![node_to_isolate_key_pair.key_pair().clone()];
let mut ml = MessageLoop::new_additional(master_node_id.clone(), ml, new_nodes_set.clone()); let mut ml = ml.and_then::<Adapter>(master.clone(), Some(add), None, None);
ml.nodes.get_mut(&isolated_node_id).unwrap().key_storage = isolated_key_storage.clone(); ml.original_key_version = new_key_version;
ml.nodes.get_mut(&isolated_node_id).unwrap().session.core.key_share = isolated_key_storage.get(&Default::default()).unwrap(); ml.ml.replace_key_storage_of(&node_to_isolate, isolated_key_storage.clone());
ml.nodes.get_mut(&isolated_node_id).unwrap().session.core.key_storage = isolated_key_storage; ml.sessions.get_mut(&node_to_isolate).unwrap().core.key_share =
isolated_key_storage.get(&Default::default()).unwrap();
// initialize session on master node && run to completion (2-of65) ml.sessions.get_mut(&node_to_isolate).unwrap().core.key_storage = isolated_key_storage;
ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), let ml = ml.run_at(master).unwrap();
Some(ml.old_set_signature.clone()),
Some(ml.new_set_signature.clone())).unwrap();
ml.run();
// check that session has completed on all nodes
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
// check that secret is still the same as before adding the share // check that secret is still the same as before adding the share
check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes ml.check_secret_is_preserved(ml.sessions.keys());
.iter()
.map(|(k, v)| (k.clone(), v.key_storage.clone()))
.collect());
// check that all oldest nodes have versions A, B, C // check that all oldest nodes have versions A, B, C
// isolated node has version A, C // isolated node has version A, C
// new nodes have versions B, C // new nodes have versions B, C
let oldest_key_share = ml.nodes[oldest_nodes_set.iter().nth(0).unwrap()].key_storage.get(&Default::default()).unwrap().unwrap(); let oldest_key_share = ml.ml.key_storage_of(oldest_nodes_set.iter().nth(0).unwrap())
.get(&Default::default()).unwrap().unwrap();
debug_assert_eq!(oldest_key_share.versions.len(), 3); debug_assert_eq!(oldest_key_share.versions.len(), 3);
let version_a = oldest_key_share.versions[0].hash.clone(); let version_a = oldest_key_share.versions[0].hash.clone();
let version_b = oldest_key_share.versions[1].hash.clone(); let version_b = oldest_key_share.versions[1].hash.clone();
@ -1287,41 +1080,28 @@ pub mod tests {
debug_assert!(version_a != version_b && version_b != version_c); debug_assert!(version_a != version_b && version_b != version_c);
debug_assert!(oldest_nodes_set.iter().all(|n| vec![version_a.clone(), version_b.clone(), version_c.clone()] == debug_assert!(oldest_nodes_set.iter().all(|n| vec![version_a.clone(), version_b.clone(), version_c.clone()] ==
ml.nodes[n].key_storage.get(&Default::default()).unwrap().unwrap().versions.iter().map(|v| v.hash.clone()).collect::<Vec<_>>())); ml.ml.key_storage_of(n).get(&Default::default()).unwrap().unwrap()
debug_assert!(::std::iter::once(&isolated_node_id).all(|n| vec![version_a.clone(), version_c.clone()] == .versions.iter().map(|v| v.hash).collect::<Vec<_>>()));
ml.nodes[n].key_storage.get(&Default::default()).unwrap().unwrap().versions.iter().map(|v| v.hash.clone()).collect::<Vec<_>>())); debug_assert!(::std::iter::once(&node_to_isolate).all(|n| vec![version_a.clone(), version_c.clone()] ==
ml.ml.key_storage_of(n).get(&Default::default()).unwrap().unwrap()
.versions.iter().map(|v| v.hash).collect::<Vec<_>>()));
debug_assert!(newest_nodes_set.iter().all(|n| vec![version_b.clone(), version_c.clone()] == debug_assert!(newest_nodes_set.iter().all(|n| vec![version_b.clone(), version_c.clone()] ==
ml.nodes[n].key_storage.get(&Default::default()).unwrap().unwrap().versions.iter().map(|v| v.hash.clone()).collect::<Vec<_>>())); ml.ml.key_storage_of(n).get(&Default::default()).unwrap().unwrap()
.versions.iter().map(|v| v.hash).collect::<Vec<_>>()));
} }
#[test] #[test]
fn nodes_add_fails_when_not_enough_share_owners_are_connected() { fn nodes_add_fails_when_not_enough_share_owners_are_connected() {
let (n, nodes_to_add) = (3, 3); let (n, add) = (3, 3);
// generate key (2-of-3) && prepare ShareAdd sessions // generate key
let old_nodes_set = generate_nodes_ids(n); let gml = generate_key(n, 1);
let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(nodes_to_add)).collect();
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
let isolated_node_id1 = old_nodes_set.iter().cloned().nth(1).unwrap();
let isolated_node_id2 = old_nodes_set.iter().cloned().nth(2).unwrap();
let mut ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set.clone(), new_nodes_set.clone());
// now let's isolate 2 of 3 nodes owning key share // run share add session
ml.nodes.remove(&isolated_node_id1); let master = gml.0.node(0);
ml.nodes.remove(&isolated_node_id2); let add = (0..add).map(|_| Random.generate().unwrap()).collect::<Vec<_>>();
ml.old_nodes_set.remove(&isolated_node_id1); let isolate = vec![gml.0.node(1), gml.0.node(2)].into_iter().collect();
ml.new_nodes_set.remove(&isolated_node_id1); assert_eq!(MessageLoop::with_gml::<Adapter>(gml, master, Some(add), None, Some(isolate))
ml.old_nodes_set.remove(&isolated_node_id2); .run_at(master).unwrap_err(), Error::ConsensusUnreachable);
ml.new_nodes_set.remove(&isolated_node_id2);
for (_, node) in ml.nodes.iter_mut() {
node.cluster.remove_node(&isolated_node_id1);
node.cluster.remove_node(&isolated_node_id2);
}
ml.update_signature();
// initialize session on master node && run to completion (2-of-5)
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set),
Some(ml.old_set_signature.clone()),
Some(ml.new_set_signature.clone())).map(|_| ()), Err(Error::ConsensusUnreachable));
} }
} }

View File

@ -940,406 +940,315 @@ fn check_threshold(threshold: usize, nodes: &BTreeSet<NodeId>) -> Result<(), Err
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use std::sync::Arc; use std::sync::Arc;
use std::collections::{BTreeSet, BTreeMap, VecDeque}; use ethereum_types::H256;
use std::time::Duration; use ethkey::{Random, Generator, KeyPair, Secret};
use ethereum_types::Address; use key_server_cluster::{NodeId, Error, KeyStorage};
use ethkey::{Random, Generator, KeyPair}; use key_server_cluster::message::{self, Message, GenerationMessage, KeysDissemination,
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage}; PublicKeyShare, ConfirmInitialization};
use key_server_cluster::message::{self, Message, GenerationMessage}; use key_server_cluster::cluster::tests::{MessageLoop as ClusterMessageLoop, make_clusters_and_preserve_sessions};
use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until,
all_connections_established, new_runtime};
use key_server_cluster::cluster_sessions::ClusterSession; use key_server_cluster::cluster_sessions::ClusterSession;
use key_server_cluster::generation_session::{SessionImpl, SessionState, SessionParams}; use key_server_cluster::generation_session::{SessionImpl, SessionState};
use key_server_cluster::math; use key_server_cluster::math;
use key_server_cluster::math::tests::do_encryption_and_decryption; use key_server_cluster::math::tests::do_encryption_and_decryption;
pub struct Node { #[derive(Debug)]
pub cluster: Arc<DummyCluster>, pub struct MessageLoop(pub ClusterMessageLoop);
pub key_storage: Arc<DummyKeyStorage>,
pub session: SessionImpl,
}
pub struct MessageLoop {
pub session_id: SessionId,
pub nodes: BTreeMap<NodeId, Node>,
pub queue: VecDeque<(NodeId, NodeId, Message)>,
}
pub fn generate_nodes_ids(n: usize) -> BTreeSet<NodeId> {
(0..n).map(|_| math::generate_random_point().unwrap()).collect()
}
impl MessageLoop { impl MessageLoop {
pub fn new(nodes_num: usize) -> Self { pub fn new(num_nodes: usize) -> Self {
Self::with_nodes_ids(generate_nodes_ids(nodes_num)) MessageLoop(make_clusters_and_preserve_sessions(num_nodes))
} }
pub fn with_nodes_ids(nodes_ids: BTreeSet<NodeId>) -> Self { pub fn init(self, threshold: usize) -> Result<Self, Error> {
let mut nodes = BTreeMap::new(); self.0.cluster(0).client().new_generation_session(Default::default(), None, Default::default(), threshold)
let session_id = SessionId::default(); .map(|_| self)
for node_id in nodes_ids { }
let cluster = Arc::new(DummyCluster::new(node_id.clone()));
let key_storage = Arc::new(DummyKeyStorage::default());
let session = SessionImpl::new(SessionParams {
id: session_id.clone(),
self_node_id: node_id.clone(),
key_storage: Some(key_storage.clone()),
cluster: cluster.clone(),
nonce: Some(0),
});
nodes.insert(node_id, Node { cluster: cluster, key_storage: key_storage, session: session });
}
let nodes_ids: Vec<_> = nodes.keys().cloned().collect(); pub fn session_at(&self, idx: usize) -> Arc<SessionImpl> {
for node in nodes.values() { self.0.sessions(idx).generation_sessions.first().unwrap()
for node_id in &nodes_ids { }
node.cluster.add_node(node_id.clone());
}
}
MessageLoop { pub fn session_of(&self, node: &NodeId) -> Arc<SessionImpl> {
session_id: session_id, self.0.sessions_of(node).generation_sessions.first().unwrap()
nodes: nodes, }
queue: VecDeque::new(),
pub fn take_message_confirm_initialization(&self) -> (NodeId, NodeId, ConfirmInitialization) {
match self.0.take_message() {
Some((from, to, Message::Generation(GenerationMessage::ConfirmInitialization(msg)))) =>
(from, to, msg),
_ => panic!("unexpected"),
} }
} }
pub fn master(&self) -> &SessionImpl { pub fn take_message_keys_dissemination(&self) -> (NodeId, NodeId, KeysDissemination) {
&self.nodes.values().nth(0).unwrap().session match self.0.take_message() {
} Some((from, to, Message::Generation(GenerationMessage::KeysDissemination(msg)))) =>
(from, to, msg),
pub fn first_slave(&self) -> &SessionImpl { _ => panic!("unexpected"),
&self.nodes.values().nth(1).unwrap().session
}
pub fn second_slave(&self) -> &SessionImpl {
&self.nodes.values().nth(2).unwrap().session
}
pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> {
self.nodes.values()
.filter_map(|n| n.cluster.take_message().map(|m| (n.session.node().clone(), m.0, m.1)))
.nth(0)
.or_else(|| self.queue.pop_front())
}
pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> {
match {
match msg.2 {
Message::Generation(GenerationMessage::InitializeSession(ref message)) => self.nodes[&msg.1].session.on_initialize_session(msg.0.clone(), &message),
Message::Generation(GenerationMessage::ConfirmInitialization(ref message)) => self.nodes[&msg.1].session.on_confirm_initialization(msg.0.clone(), &message),
Message::Generation(GenerationMessage::CompleteInitialization(ref message)) => self.nodes[&msg.1].session.on_complete_initialization(msg.0.clone(), &message),
Message::Generation(GenerationMessage::KeysDissemination(ref message)) => self.nodes[&msg.1].session.on_keys_dissemination(msg.0.clone(), &message),
Message::Generation(GenerationMessage::PublicKeyShare(ref message)) => self.nodes[&msg.1].session.on_public_key_share(msg.0.clone(), &message),
Message::Generation(GenerationMessage::SessionCompleted(ref message)) => self.nodes[&msg.1].session.on_session_completed(msg.0.clone(), &message),
_ => panic!("unexpected"),
}
} {
Ok(_) => Ok(()),
Err(Error::TooEarlyForRequest) => {
self.queue.push_back(msg);
Ok(())
},
Err(err) => Err(err),
} }
} }
pub fn take_and_process_message(&mut self) -> Result<(), Error> { pub fn take_message_public_key_share(&self) -> (NodeId, NodeId, PublicKeyShare) {
let msg = self.take_message().unwrap(); match self.0.take_message() {
self.process_message(msg) Some((from, to, Message::Generation(GenerationMessage::PublicKeyShare(msg)))) =>
(from, to, msg),
_ => panic!("unexpected"),
}
} }
pub fn compute_key_pair(&self, t: usize) -> KeyPair { pub fn nodes_id_numbers(&self) -> Vec<Secret> {
let secret_shares = self.nodes.values() let session = self.session_at(0);
.map(|nd| nd.key_storage.get(&SessionId::default()).unwrap().unwrap().last_version().unwrap().secret_share.clone()) let session_data = session.data.lock();
.take(t + 1) session_data.nodes.values().map(|n| n.id_number.clone()).collect()
.collect::<Vec<_>>();
let secret_shares = secret_shares.iter().collect::<Vec<_>>();
let id_numbers = self.nodes.iter()
.map(|(n, nd)| nd.key_storage.get(&SessionId::default()).unwrap().unwrap().last_version().unwrap().id_numbers[n].clone())
.take(t + 1)
.collect::<Vec<_>>();
let id_numbers = id_numbers.iter().collect::<Vec<_>>();
let joint_secret1 = math::compute_joint_secret_from_shares(t, &secret_shares, &id_numbers).unwrap();
let secret_values: Vec<_> = self.nodes.values().map(|s| s.session.joint_public_and_secret().unwrap().unwrap().1).collect();
let joint_secret2 = math::compute_joint_secret(secret_values.iter()).unwrap();
assert_eq!(joint_secret1, joint_secret2);
KeyPair::from_secret(joint_secret1).unwrap()
} }
}
fn make_simple_cluster(threshold: usize, num_nodes: usize) -> Result<(SessionId, NodeId, NodeId, MessageLoop), Error> { pub fn nodes_secret_shares(&self) -> Vec<Secret> {
let l = MessageLoop::new(num_nodes); (0..self.0.nodes().len()).map(|i| {
l.master().initialize(Default::default(), Default::default(), false, threshold, l.nodes.keys().cloned().collect::<BTreeSet<_>>().into())?; let session = self.session_at(i);
let session_data = session.data.lock();
session_data.secret_share.as_ref().unwrap().clone()
}).collect()
}
let session_id = l.session_id.clone(); pub fn compute_key_pair(&self) -> KeyPair {
let master_id = l.master().node().clone(); let t = self.0.key_storage(0).get(&Default::default()).unwrap().unwrap().threshold;
let slave_id = l.first_slave().node().clone(); let secret_shares = self.nodes_secret_shares();
Ok((session_id, master_id, slave_id, l)) let id_numbers = self.nodes_id_numbers();
let secret_shares = secret_shares.iter().take(t + 1).collect::<Vec<_>>();
let id_numbers = id_numbers.iter().take(t + 1).collect::<Vec<_>>();
let joint_secret = math::compute_joint_secret_from_shares(t, &secret_shares, &id_numbers).unwrap();
KeyPair::from_secret(joint_secret).unwrap()
}
pub fn key_version(&self) -> H256 {
self.0.key_storage(0).get(&Default::default())
.unwrap().unwrap().versions.iter().last().unwrap().hash
}
} }
#[test] #[test]
fn initializes_in_cluster_of_single_node() { fn initializes_in_cluster_of_single_node() {
let l = MessageLoop::new(1); MessageLoop::new(1).init(0).unwrap();
assert!(l.master().initialize(Default::default(), Default::default(), false, 0, l.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).is_ok());
} }
#[test] #[test]
fn fails_to_initialize_if_threshold_is_wrong() { fn fails_to_initialize_if_threshold_is_wrong() {
match make_simple_cluster(2, 2) { assert_eq!(MessageLoop::new(2).init(2).unwrap_err(), Error::NotEnoughNodesForThreshold);
Err(Error::NotEnoughNodesForThreshold) => (),
_ => panic!("unexpected"),
}
} }
#[test] #[test]
fn fails_to_initialize_when_already_initialized() { fn fails_to_initialize_when_already_initialized() {
let (_, _, _, l) = make_simple_cluster(0, 2).unwrap(); let ml = MessageLoop::new(2).init(0).unwrap();
assert_eq!(l.master().initialize(Default::default(), Default::default(), false, 0, l.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).unwrap_err(), assert_eq!(
Error::InvalidStateForRequest); ml.session_at(0).initialize(Default::default(), Default::default(), false, 0, ml.0.nodes().into()),
Err(Error::InvalidStateForRequest),
);
} }
#[test] #[test]
fn fails_to_accept_initialization_when_already_initialized() { fn fails_to_accept_initialization_when_already_initialized() {
let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); let ml = MessageLoop::new(2).init(0).unwrap();
let message = l.take_message().unwrap(); let (from, to, msg) = ml.0.take_message().unwrap();
l.process_message(message.clone()).unwrap(); ml.0.process_message(from, to, msg.clone());
assert_eq!(l.process_message(message.clone()).unwrap_err(), Error::InvalidStateForRequest); assert_eq!(
ml.session_of(&to).on_message(&from, &msg),
Err(Error::InvalidStateForRequest),
);
} }
#[test] #[test]
fn slave_updates_derived_point_on_initialization() { fn slave_updates_derived_point_on_initialization() {
let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); let ml = MessageLoop::new(2).init(0).unwrap();
let passed_point = match l.take_message().unwrap() { let original_point = match ml.0.take_message().unwrap() {
(f, t, Message::Generation(GenerationMessage::InitializeSession(message))) => { (from, to, Message::Generation(GenerationMessage::InitializeSession(msg))) => {
let point = message.derived_point.clone(); let original_point = msg.derived_point.clone();
l.process_message((f, t, Message::Generation(GenerationMessage::InitializeSession(message)))).unwrap(); let msg = Message::Generation(GenerationMessage::InitializeSession(msg));
point ml.0.process_message(from, to, msg);
original_point
}, },
_ => panic!("unexpected"), _ => panic!("unexpected"),
}; };
match l.take_message().unwrap() { match ml.0.take_message().unwrap() {
(_, _, Message::Generation(GenerationMessage::ConfirmInitialization(message))) => assert!(passed_point != message.derived_point), (_, _, Message::Generation(GenerationMessage::ConfirmInitialization(msg))) =>
assert!(original_point != msg.derived_point),
_ => panic!("unexpected"), _ => panic!("unexpected"),
} }
} }
#[test] #[test]
fn fails_to_accept_initialization_confirmation_if_already_accepted_from_the_same_node() { fn fails_to_accept_initialization_confirmation_if_already_accepted_from_the_same_node() {
let (sid, _, s, mut l) = make_simple_cluster(0, 3).unwrap(); let ml = MessageLoop::new(3).init(0).unwrap();
l.take_and_process_message().unwrap(); ml.0.take_and_process_message();
l.take_and_process_message().unwrap();
l.take_and_process_message().unwrap(); let (from, to, msg) = ml.take_message_confirm_initialization();
assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { ml.0.process_message(from, to, Message::Generation(GenerationMessage::ConfirmInitialization(msg.clone())));
session: sid.into(), assert_eq!(ml.session_of(&to).on_confirm_initialization(from, &msg), Err(Error::InvalidStateForRequest));
session_nonce: 0,
derived_point: math::generate_random_point().unwrap().into(),
}).unwrap_err(), Error::InvalidStateForRequest);
} }
#[test] #[test]
fn fails_to_accept_initialization_confirmation_if_initialization_already_completed() { fn fails_to_accept_initialization_confirmation_if_initialization_already_completed() {
let (sid, _, s, mut l) = make_simple_cluster(0, 2).unwrap(); let ml = MessageLoop::new(2).init(0).unwrap();
l.take_and_process_message().unwrap(); ml.0.take_and_process_message();
l.take_and_process_message().unwrap(); ml.0.take_and_process_message();
assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { assert_eq!(ml.session_at(0).on_confirm_initialization(ml.0.node(1), &message::ConfirmInitialization {
session: sid.into(), session: Default::default(),
session_nonce: 0, session_nonce: 0,
derived_point: math::generate_random_point().unwrap().into(), derived_point: math::generate_random_point().unwrap().into(),
}).unwrap_err(), Error::InvalidStateForRequest); }), Err(Error::InvalidStateForRequest));
} }
#[test] #[test]
fn master_updates_derived_point_on_initialization_completion() { fn master_updates_derived_point_on_initialization_completion() {
let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); let ml = MessageLoop::new(2).init(0).unwrap();
l.take_and_process_message().unwrap(); ml.0.take_and_process_message();
let passed_point = match l.take_message().unwrap() { let original_point = match ml.0.take_message().unwrap() {
(f, t, Message::Generation(GenerationMessage::ConfirmInitialization(message))) => { (from, to, Message::Generation(GenerationMessage::ConfirmInitialization(msg))) => {
let point = message.derived_point.clone(); let original_point = msg.derived_point.clone();
l.process_message((f, t, Message::Generation(GenerationMessage::ConfirmInitialization(message)))).unwrap(); let msg = Message::Generation(GenerationMessage::ConfirmInitialization(msg));
point ml.session_of(&to).on_message(&from, &msg).unwrap();
original_point
}, },
_ => panic!("unexpected"), _ => panic!("unexpected"),
}; };
assert!(l.master().derived_point().unwrap() != passed_point.into()); assert!(ml.session_at(0).derived_point().unwrap() != original_point.into());
}
#[test]
fn fails_to_complete_initialization_if_threshold_is_wrong() {
let (sid, m, s, l) = make_simple_cluster(0, 2).unwrap();
let mut nodes = BTreeMap::new();
nodes.insert(m, math::generate_random_scalar().unwrap());
nodes.insert(s, math::generate_random_scalar().unwrap());
assert_eq!(l.first_slave().on_initialize_session(m, &message::InitializeSession {
session: sid.into(),
session_nonce: 0,
origin: None,
author: Address::default().into(),
nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
is_zero: false,
threshold: 2,
derived_point: math::generate_random_point().unwrap().into(),
}).unwrap_err(), Error::NotEnoughNodesForThreshold);
} }
#[test] #[test]
fn fails_to_complete_initialization_if_not_waiting_for_it() { fn fails_to_complete_initialization_if_not_waiting_for_it() {
let (sid, m, _, l) = make_simple_cluster(0, 2).unwrap(); let ml = MessageLoop::new(2).init(0).unwrap();
assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { ml.0.take_and_process_message();
session: sid.into(), assert_eq!(ml.session_at(0).on_complete_initialization(ml.0.node(1), &message::CompleteInitialization {
session: Default::default(),
session_nonce: 0, session_nonce: 0,
derived_point: math::generate_random_point().unwrap().into(), derived_point: math::generate_random_point().unwrap().into(),
}).unwrap_err(), Error::InvalidStateForRequest); }), Err(Error::InvalidStateForRequest));
} }
#[test] #[test]
fn fails_to_complete_initialization_from_non_master_node() { fn fails_to_complete_initialization_from_non_master_node() {
let (sid, _, _, mut l) = make_simple_cluster(0, 3).unwrap(); let ml = MessageLoop::new(3).init(0).unwrap();
l.take_and_process_message().unwrap(); ml.0.take_and_process_message();
l.take_and_process_message().unwrap(); ml.0.take_and_process_message();
l.take_and_process_message().unwrap(); ml.0.take_and_process_message();
l.take_and_process_message().unwrap(); ml.0.take_and_process_message();
assert_eq!(l.first_slave().on_complete_initialization(l.second_slave().node().clone(), &message::CompleteInitialization { assert_eq!(ml.session_at(1).on_complete_initialization(ml.0.node(2), &message::CompleteInitialization {
session: sid.into(), session: Default::default(),
session_nonce: 0, session_nonce: 0,
derived_point: math::generate_random_point().unwrap().into(), derived_point: math::generate_random_point().unwrap().into(),
}).unwrap_err(), Error::InvalidMessage); }), Err(Error::InvalidMessage));
} }
#[test] #[test]
fn fails_to_accept_keys_dissemination_if_not_waiting_for_it() { fn fails_to_accept_keys_dissemination_if_not_waiting_for_it() {
let (sid, _, s, l) = make_simple_cluster(0, 2).unwrap(); let ml = MessageLoop::new(2).init(0).unwrap();
assert_eq!(l.master().on_keys_dissemination(s, &message::KeysDissemination { assert_eq!(ml.session_at(0).on_keys_dissemination(ml.0.node(1), &message::KeysDissemination {
session: sid.into(), session: Default::default(),
session_nonce: 0, session_nonce: 0,
secret1: math::generate_random_scalar().unwrap().into(), secret1: math::generate_random_scalar().unwrap().into(),
secret2: math::generate_random_scalar().unwrap().into(), secret2: math::generate_random_scalar().unwrap().into(),
publics: vec![math::generate_random_point().unwrap().into()], publics: vec![math::generate_random_point().unwrap().into()],
}).unwrap_err(), Error::TooEarlyForRequest); }), Err(Error::TooEarlyForRequest));
} }
#[test] #[test]
fn fails_to_accept_keys_dissemination_if_wrong_number_of_publics_passed() { fn fails_to_accept_keys_dissemination_if_wrong_number_of_publics_passed() {
let (sid, m, _, mut l) = make_simple_cluster(0, 3).unwrap(); let ml = MessageLoop::new(3).init(0).unwrap();
l.take_and_process_message().unwrap(); // m -> s1: InitializeSession ml.0.take_and_process_message(); // m -> s1: InitializeSession
l.take_and_process_message().unwrap(); // m -> s2: InitializeSession ml.0.take_and_process_message(); // m -> s2: InitializeSession
l.take_and_process_message().unwrap(); // s1 -> m: ConfirmInitialization ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization
l.take_and_process_message().unwrap(); // s2 -> m: ConfirmInitialization ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization
l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization ml.0.take_and_process_message(); // m -> s1: CompleteInitialization
l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization ml.0.take_and_process_message(); // m -> s2: CompleteInitialization
l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination
assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { let (from, to, mut msg) = ml.take_message_keys_dissemination();
session: sid.into(), msg.publics.clear();
session_nonce: 0, assert_eq!(ml.session_of(&to).on_keys_dissemination(from, &msg), Err(Error::InvalidMessage));
secret1: math::generate_random_scalar().unwrap().into(),
secret2: math::generate_random_scalar().unwrap().into(),
publics: vec![math::generate_random_point().unwrap().into(), math::generate_random_point().unwrap().into()],
}).unwrap_err(), Error::InvalidMessage);
} }
#[test] #[test]
fn fails_to_accept_keys_dissemination_second_time_from_the_same_node() { fn fails_to_accept_keys_dissemination_second_time_from_the_same_node() {
let (sid, m, _, mut l) = make_simple_cluster(0, 3).unwrap(); let ml = MessageLoop::new(3).init(0).unwrap();
l.take_and_process_message().unwrap(); // m -> s1: InitializeSession ml.0.take_and_process_message(); // m -> s1: InitializeSession
l.take_and_process_message().unwrap(); // m -> s2: InitializeSession ml.0.take_and_process_message(); // m -> s2: InitializeSession
l.take_and_process_message().unwrap(); // s1 -> m: ConfirmInitialization ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization
l.take_and_process_message().unwrap(); // s2 -> m: ConfirmInitialization ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization
l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization ml.0.take_and_process_message(); // m -> s1: CompleteInitialization
l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization ml.0.take_and_process_message(); // m -> s2: CompleteInitialization
l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination
assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { let (from, to, msg) = ml.take_message_keys_dissemination();
session: sid.into(), ml.0.process_message(from, to, Message::Generation(GenerationMessage::KeysDissemination(msg.clone())));
session_nonce: 0, assert_eq!(ml.session_of(&to).on_keys_dissemination(from, &msg), Err(Error::InvalidStateForRequest));
secret1: math::generate_random_scalar().unwrap().into(),
secret2: math::generate_random_scalar().unwrap().into(),
publics: vec![math::generate_random_point().unwrap().into()],
}).unwrap_err(), Error::InvalidStateForRequest);
} }
#[test] #[test]
fn should_not_accept_public_key_share_when_is_not_waiting_for_it() { fn should_not_accept_public_key_share_when_is_not_waiting_for_it() {
let (sid, _, s, l) = make_simple_cluster(1, 3).unwrap(); let ml = MessageLoop::new(3).init(1).unwrap();
assert_eq!(l.master().on_public_key_share(s, &message::PublicKeyShare { assert_eq!(ml.session_at(0).on_public_key_share(ml.0.node(1), &message::PublicKeyShare {
session: sid.into(), session: Default::default(),
session_nonce: 0, session_nonce: 0,
public_share: math::generate_random_point().unwrap().into(), public_share: math::generate_random_point().unwrap().into(),
}).unwrap_err(), Error::InvalidStateForRequest); }), Err(Error::InvalidStateForRequest));
} }
#[test] #[test]
fn should_not_accept_public_key_share_when_receiving_twice() { fn should_not_accept_public_key_share_when_receiving_twice() {
let (sid, m, _, mut l) = make_simple_cluster(0, 3).unwrap(); let ml = MessageLoop::new(3).init(0).unwrap();
l.take_and_process_message().unwrap(); // m -> s1: InitializeSession ml.0.take_and_process_message(); // m -> s1: InitializeSession
l.take_and_process_message().unwrap(); // m -> s2: InitializeSession ml.0.take_and_process_message(); // m -> s2: InitializeSession
l.take_and_process_message().unwrap(); // s1 -> m: ConfirmInitialization ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization
l.take_and_process_message().unwrap(); // s2 -> m: ConfirmInitialization ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization
l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization ml.0.take_and_process_message(); // m -> s1: CompleteInitialization
l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization ml.0.take_and_process_message(); // m -> s2: CompleteInitialization
l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination ml.0.take_and_process_message(); // m -> s1: KeysDissemination
l.take_and_process_message().unwrap(); // m -> s2: KeysDissemination ml.0.take_and_process_message(); // m -> s2: KeysDissemination
l.take_and_process_message().unwrap(); // s1 -> m: KeysDissemination ml.0.take_and_process_message(); // s1 -> m: KeysDissemination
l.take_and_process_message().unwrap(); // s1 -> s2: KeysDissemination ml.0.take_and_process_message(); // s1 -> s2: KeysDissemination
l.take_and_process_message().unwrap(); // s2 -> m: KeysDissemination ml.0.take_and_process_message(); // s2 -> m: KeysDissemination
l.take_and_process_message().unwrap(); // s2 -> s1: KeysDissemination ml.0.take_and_process_message(); // s2 -> s1: KeysDissemination
let (f, t, msg) = match l.take_message() {
Some((f, t, Message::Generation(GenerationMessage::PublicKeyShare(msg)))) => (f, t, msg), let (from, to, msg) = ml.take_message_public_key_share();
_ => panic!("unexpected"), ml.0.process_message(from, to, Message::Generation(GenerationMessage::PublicKeyShare(msg.clone())));
}; assert_eq!(ml.session_of(&to).on_public_key_share(from, &msg), Err(Error::InvalidMessage));
assert_eq!(&f, l.master().node());
assert_eq!(&t, l.second_slave().node());
l.process_message((f, t, Message::Generation(GenerationMessage::PublicKeyShare(msg.clone())))).unwrap();
assert_eq!(l.second_slave().on_public_key_share(m, &message::PublicKeyShare {
session: sid.into(),
session_nonce: 0,
public_share: math::generate_random_point().unwrap().into(),
}).unwrap_err(), Error::InvalidMessage);
} }
#[test] #[test]
fn encryption_fails_on_session_timeout() { fn encryption_fails_on_session_timeout() {
let (_, _, _, l) = make_simple_cluster(0, 2).unwrap(); let ml = MessageLoop::new(2).init(0).unwrap();
assert!(l.master().joint_public_and_secret().is_none()); assert!(ml.session_at(0).joint_public_and_secret().is_none());
l.master().on_session_timeout(); ml.session_at(0).on_session_timeout();
assert!(l.master().joint_public_and_secret().unwrap().unwrap_err() == Error::NodeDisconnected); assert_eq!(ml.session_at(0).joint_public_and_secret().unwrap(), Err(Error::NodeDisconnected));
} }
#[test] #[test]
fn encryption_fails_on_node_timeout() { fn encryption_fails_on_node_timeout() {
let (_, _, _, l) = make_simple_cluster(0, 2).unwrap(); let ml = MessageLoop::new(2).init(0).unwrap();
assert!(l.master().joint_public_and_secret().is_none()); assert!(ml.session_at(0).joint_public_and_secret().is_none());
l.master().on_node_timeout(l.first_slave().node()); ml.session_at(0).on_node_timeout(&ml.0.node(1));
assert!(l.master().joint_public_and_secret().unwrap().unwrap_err() == Error::NodeDisconnected); assert_eq!(ml.session_at(0).joint_public_and_secret().unwrap(), Err(Error::NodeDisconnected));
} }
#[test] #[test]
fn complete_enc_dec_session() { fn complete_enc_dec_session() {
let test_cases = [(0, 5), (2, 5), (3, 5)]; let test_cases = [(0, 5), (2, 5), (3, 5)];
for &(threshold, num_nodes) in &test_cases { for &(threshold, num_nodes) in &test_cases {
let mut l = MessageLoop::new(num_nodes); let ml = MessageLoop::new(num_nodes).init(threshold).unwrap();
l.master().initialize(Default::default(), Default::default(), false, threshold, l.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).unwrap(); ml.0.loop_until(|| ml.0.is_empty());
assert_eq!(l.nodes.len(), num_nodes);
// let nodes do initialization + keys dissemination
while let Some((from, to, message)) = l.take_message() {
l.process_message((from, to, message)).unwrap();
}
// check that all nodes has finished joint public generation // check that all nodes has finished joint public generation
let joint_public_key = l.master().joint_public_and_secret().unwrap().unwrap().0; let joint_public_key = ml.session_at(0).joint_public_and_secret().unwrap().unwrap().0;
for node in l.nodes.values() { for i in 0..num_nodes {
let state = node.session.state(); let session = ml.session_at(i);
assert_eq!(state, SessionState::Finished); assert_eq!(session.state(), SessionState::Finished);
assert_eq!(node.session.joint_public_and_secret().map(|p| p.map(|p| p.0)), Some(Ok(joint_public_key))); assert_eq!(session.joint_public_and_secret().map(|p| p.map(|p| p.0)), Some(Ok(joint_public_key)));
} }
// now let's encrypt some secret (which is a point on EC) // now let's encrypt some secret (which is a point on EC)
let document_secret_plain = Random.generate().unwrap().public().clone(); let document_secret_plain = Random.generate().unwrap().public().clone();
let all_nodes_id_numbers: Vec<_> = l.master().data.lock().nodes.values().map(|n| n.id_number.clone()).collect(); let all_nodes_id_numbers = ml.nodes_id_numbers();
let all_nodes_secret_shares: Vec<_> = l.nodes.values().map(|n| n.session.data.lock().secret_share.as_ref().unwrap().clone()).collect(); let all_nodes_secret_shares = ml.nodes_secret_shares();
let document_secret_decrypted = do_encryption_and_decryption(threshold, &joint_public_key, let document_secret_decrypted = do_encryption_and_decryption(threshold, &joint_public_key,
&all_nodes_id_numbers, &all_nodes_id_numbers,
&all_nodes_secret_shares, &all_nodes_secret_shares,
@ -1350,41 +1259,18 @@ pub mod tests {
} }
} }
#[test]
fn encryption_session_works_over_network() {
const CONN_TIMEOUT: Duration = Duration::from_millis(300);
const SESSION_TIMEOUT: Duration = Duration::from_millis(1000);
let test_cases = [(1, 3)];
for &(threshold, num_nodes) in &test_cases {
let mut core = new_runtime();
// prepare cluster objects for each node
let clusters = make_clusters(&core, 6031, num_nodes);
run_clusters(&clusters);
// `clusters` contains `Arc<ClusterCore>` and clones will refer to the same cores.
let clusters_clone = clusters.clone();
// establish connections
loop_until(&core.executor(), CONN_TIMEOUT, move || clusters_clone.iter().all(all_connections_established));
// run session to completion
let session_id = SessionId::default();
let session = clusters[0].client().new_generation_session(session_id, Default::default(), Default::default(), threshold).unwrap();
loop_until(&core.executor(), SESSION_TIMEOUT, move || session.joint_public_and_secret().is_some());
}
}
#[test] #[test]
fn generation_message_fails_when_nonce_is_wrong() { fn generation_message_fails_when_nonce_is_wrong() {
let (sid, m, _, l) = make_simple_cluster(0, 2).unwrap(); let ml = MessageLoop::new(2).init(0).unwrap();
assert_eq!(l.first_slave().process_message(&m, &message::GenerationMessage::KeysDissemination(message::KeysDissemination { ml.0.take_and_process_message();
session: sid.into(),
let msg = message::GenerationMessage::KeysDissemination(message::KeysDissemination {
session: Default::default(),
session_nonce: 10, session_nonce: 10,
secret1: math::generate_random_scalar().unwrap().into(), secret1: math::generate_random_scalar().unwrap().into(),
secret2: math::generate_random_scalar().unwrap().into(), secret2: math::generate_random_scalar().unwrap().into(),
publics: vec![math::generate_random_point().unwrap().into()], publics: vec![math::generate_random_point().unwrap().into()],
})).unwrap_err(), Error::ReplayProtection); });
assert_eq!(ml.session_at(1).process_message(&ml.0.node(0), &msg).unwrap_err(), Error::ReplayProtection);
} }
} }

View File

@ -1061,140 +1061,65 @@ impl JobTransport for SigningJobTransport {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::sync::Arc; use std::sync::Arc;
use std::collections::{BTreeSet, BTreeMap, VecDeque};
use ethereum_types::H256; use ethereum_types::H256;
use ethkey::{self, Random, Generator, KeyPair, verify_public, public_to_address}; use ethkey::{self, Random, Generator, Public, verify_public, public_to_address};
use acl_storage::DummyAclStorage; use key_server_cluster::{SessionId, Error, KeyStorage};
use key_server_cluster::{NodeId, DummyKeyStorage, SessionId, SessionMeta, Error, KeyStorage}; use key_server_cluster::cluster::tests::{MessageLoop as ClusterMessageLoop};
use key_server_cluster::cluster_sessions::ClusterSession; use key_server_cluster::signing_session_ecdsa::SessionImpl;
use key_server_cluster::cluster::tests::DummyCluster; use key_server_cluster::generation_session::tests::MessageLoop as GenerationMessageLoop;
use key_server_cluster::generation_session::tests::MessageLoop as KeyGenerationMessageLoop;
use key_server_cluster::message::Message;
use key_server_cluster::signing_session_ecdsa::{SessionImpl, SessionParams};
struct Node { #[derive(Debug)]
pub node_id: NodeId, pub struct MessageLoop(pub ClusterMessageLoop);
pub cluster: Arc<DummyCluster>,
pub key_storage: Arc<DummyKeyStorage>,
pub session: SessionImpl,
}
struct MessageLoop {
pub session_id: SessionId,
pub requester: KeyPair,
pub nodes: BTreeMap<NodeId, Node>,
pub queue: VecDeque<(NodeId, NodeId, Message)>,
pub acl_storages: Vec<Arc<DummyAclStorage>>,
pub version: H256,
}
impl MessageLoop { impl MessageLoop {
pub fn new(gl: &KeyGenerationMessageLoop) -> Self { pub fn new(num_nodes: usize, threshold: usize) -> Result<Self, Error> {
let version = gl.nodes.values().nth(0).unwrap().key_storage.get(&Default::default()).unwrap().unwrap().versions.iter().last().unwrap().hash; let ml = GenerationMessageLoop::new(num_nodes).init(threshold)?;
let mut nodes = BTreeMap::new(); ml.0.loop_until(|| ml.0.is_empty()); // complete generation session
let session_id = gl.session_id.clone();
Ok(MessageLoop(ml.0))
}
pub fn init_with_version(self, key_version: Option<H256>) -> Result<(Self, Public, H256), Error> {
let message_hash = H256::random();
let requester = Random.generate().unwrap(); let requester = Random.generate().unwrap();
let signature = Some(ethkey::sign(requester.secret(), &SessionId::default()).unwrap()); let signature = ethkey::sign(requester.secret(), &SessionId::default()).unwrap();
let master_node_id = gl.nodes.keys().nth(0).unwrap().clone(); self.0.cluster(0).client()
let mut acl_storages = Vec::new(); .new_ecdsa_signing_session(Default::default(), signature.into(), key_version, message_hash)
for (i, (gl_node_id, gl_node)) in gl.nodes.iter().enumerate() { .map(|_| (self, *requester.public(), message_hash))
let acl_storage = Arc::new(DummyAclStorage::default());
acl_storages.push(acl_storage.clone());
let cluster = Arc::new(DummyCluster::new(gl_node_id.clone()));
let session = SessionImpl::new(SessionParams {
meta: SessionMeta {
id: session_id.clone(),
self_node_id: gl_node_id.clone(),
master_node_id: master_node_id.clone(),
threshold: gl_node.key_storage.get(&session_id).unwrap().unwrap().threshold,
configured_nodes_count: gl.nodes.len(),
connected_nodes_count: gl.nodes.len(),
},
access_key: "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec".parse().unwrap(),
key_share: Some(gl_node.key_storage.get(&session_id).unwrap().unwrap()),
acl_storage: acl_storage,
cluster: cluster.clone(),
nonce: 0,
}, if i == 0 { signature.clone().map(Into::into) } else { None }).unwrap();
nodes.insert(gl_node_id.clone(), Node { node_id: gl_node_id.clone(), cluster: cluster, key_storage: gl_node.key_storage.clone(), session: session });
}
let nodes_ids: Vec<_> = nodes.keys().cloned().collect();
for node in nodes.values() {
for node_id in &nodes_ids {
node.cluster.add_node(node_id.clone());
}
}
MessageLoop {
session_id: session_id,
requester: requester,
nodes: nodes,
queue: VecDeque::new(),
acl_storages: acl_storages,
version: version,
}
} }
pub fn master(&self) -> &SessionImpl { pub fn init(self) -> Result<(Self, Public, H256), Error> {
&self.nodes.values().nth(0).unwrap().session let key_version = self.0.key_storage(0).get(&Default::default())
.unwrap().unwrap().versions.iter().last().unwrap().hash;
self.init_with_version(Some(key_version))
} }
pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { pub fn init_delegated(self) -> Result<(Self, Public, H256), Error> {
self.nodes.values() self.0.key_storage(0).remove(&Default::default()).unwrap();
.filter_map(|n| n.cluster.take_message().map(|m| (n.node_id.clone(), m.0, m.1))) self.init_with_version(None)
.nth(0)
.or_else(|| self.queue.pop_front())
} }
pub fn process_message(&mut self, mut msg: (NodeId, NodeId, Message)) -> Result<(), Error> { pub fn init_with_isolated(self) -> Result<(Self, Public, H256), Error> {
let mut is_queued_message = false; self.0.isolate(1);
loop { self.init()
match self.nodes[&msg.1].session.on_message(&msg.0, &msg.2) {
Ok(_) => {
if let Some(message) = self.queue.pop_front() {
msg = message;
is_queued_message = true;
continue;
}
return Ok(());
},
Err(Error::TooEarlyForRequest) => {
if is_queued_message {
self.queue.push_front(msg);
} else {
self.queue.push_back(msg);
}
return Ok(());
},
Err(err) => return Err(err),
}
}
}
}
fn prepare_signing_sessions(threshold: usize, num_nodes: usize) -> (KeyGenerationMessageLoop, MessageLoop) {
// run key generation sessions
let mut gl = KeyGenerationMessageLoop::new(num_nodes);
gl.master().initialize(Default::default(), Default::default(), false, threshold, gl.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).unwrap();
while let Some((from, to, message)) = gl.take_message() {
gl.process_message((from, to, message)).unwrap();
} }
// run signing session pub fn session_at(&self, idx: usize) -> Arc<SessionImpl> {
let sl = MessageLoop::new(&gl); self.0.sessions(idx).ecdsa_signing_sessions.first().unwrap()
(gl, sl) }
pub fn ensure_completed(&self) {
self.0.loop_until(|| self.0.is_empty());
assert!(self.session_at(0).wait().is_ok());
}
} }
#[test] #[test]
fn failed_gen_ecdsa_sign_session_when_threshold_is_too_low() { fn failed_gen_ecdsa_sign_session_when_threshold_is_too_low() {
let test_cases = [(1, 2), (2, 4), (3, 6), (4, 6)]; let test_cases = [(1, 2), (2, 4), (3, 6), (4, 6)];
for &(threshold, num_nodes) in &test_cases { for &(threshold, num_nodes) in &test_cases {
let (_, sl) = prepare_signing_sessions(threshold, num_nodes); assert_eq!(MessageLoop::new(num_nodes, threshold).unwrap().init().unwrap_err(),
Error::ConsensusUnreachable);
// run signing session
let message_hash = H256::random();
assert_eq!(sl.master().initialize(sl.version.clone(), message_hash).unwrap_err(), Error::ConsensusUnreachable);
} }
} }
@ -1202,112 +1127,46 @@ mod tests {
fn complete_gen_ecdsa_sign_session() { fn complete_gen_ecdsa_sign_session() {
let test_cases = [(0, 1), (2, 5), (2, 6), (3, 11), (4, 11)]; let test_cases = [(0, 1), (2, 5), (2, 6), (3, 11), (4, 11)];
for &(threshold, num_nodes) in &test_cases { for &(threshold, num_nodes) in &test_cases {
let (gl, mut sl) = prepare_signing_sessions(threshold, num_nodes); let (ml, _, message) = MessageLoop::new(num_nodes, threshold).unwrap().init().unwrap();
let key_pair = gl.compute_key_pair(threshold); ml.0.loop_until(|| ml.0.is_empty());
// run signing session let signer_public = ml.0.key_storage(0).get(&Default::default()).unwrap().unwrap().public;
let message_hash = H256::random(); let signature = ml.session_at(0).wait().unwrap();
sl.master().initialize(sl.version.clone(), message_hash).unwrap(); assert!(verify_public(&signer_public, &signature, &message).unwrap());
while let Some((from, to, message)) = sl.take_message() {
sl.process_message((from, to, message)).unwrap();
}
// verify signature
let signature = sl.master().wait().unwrap();
assert!(verify_public(key_pair.public(), &signature, &message_hash).unwrap());
} }
} }
#[test] #[test]
fn ecdsa_complete_signing_session_with_single_node_failing() { fn ecdsa_complete_signing_session_with_single_node_failing() {
let (_, mut sl) = prepare_signing_sessions(1, 4); let (ml, requester, _) = MessageLoop::new(4, 1).unwrap().init().unwrap();
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
// we need at least 3-of-4 nodes to agree to reach consensus // we need at least 3-of-4 nodes to agree to reach consensus
// let's say 1 of 4 nodes disagee // let's say 1 of 4 nodes disagee
sl.acl_storages[1].prohibit(public_to_address(sl.requester.public()), SessionId::default()); ml.0.acl_storage(1).prohibit(public_to_address(&requester), Default::default());
// then consensus reachable, but single node will disagree // then consensus reachable, but single node will disagree
while let Some((from, to, message)) = sl.take_message() { ml.ensure_completed();
sl.process_message((from, to, message)).unwrap();
}
let data = sl.master().data.lock();
match data.result {
Some(Ok(_)) => (),
_ => unreachable!(),
}
} }
#[test] #[test]
fn ecdsa_complete_signing_session_with_acl_check_failed_on_master() { fn ecdsa_complete_signing_session_with_acl_check_failed_on_master() {
let (_, mut sl) = prepare_signing_sessions(1, 4); let (ml, requester, _) = MessageLoop::new(4, 1).unwrap().init().unwrap();
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
// we need at least 3-of-4 nodes to agree to reach consensus // we need at least 3-of-4 nodes to agree to reach consensus
// let's say 1 of 4 nodes disagee // let's say 1 of 4 nodes (here: master) disagee
sl.acl_storages[0].prohibit(public_to_address(sl.requester.public()), SessionId::default()); ml.0.acl_storage(0).prohibit(public_to_address(&requester), Default::default());
// then consensus reachable, but single node will disagree // then consensus reachable, but single node will disagree
while let Some((from, to, message)) = sl.take_message() { ml.ensure_completed();
sl.process_message((from, to, message)).unwrap();
}
let data = sl.master().data.lock();
match data.result {
Some(Ok(_)) => (),
_ => unreachable!(),
}
} }
#[test] #[test]
fn ecdsa_signing_works_when_delegated_to_other_node() { fn ecdsa_signing_works_when_delegated_to_other_node() {
let (_, mut sl) = prepare_signing_sessions(1, 4); MessageLoop::new(4, 1).unwrap().init_delegated().unwrap().0.ensure_completed();
// let's say node1 doesn't have a share && delegates decryption request to node0
// initially session is created on node1 => node1 is master for itself, but for other nodes node0 is still master
let actual_master = sl.nodes.keys().nth(0).cloned().unwrap();
let requested_node = sl.nodes.keys().skip(1).nth(0).cloned().unwrap();
let version = sl.nodes[&actual_master].key_storage.get(&Default::default()).unwrap().unwrap().last_version().unwrap().hash.clone();
sl.nodes[&requested_node].key_storage.remove(&Default::default()).unwrap();
sl.nodes.get_mut(&requested_node).unwrap().session.core.key_share = None;
sl.nodes.get_mut(&requested_node).unwrap().session.core.meta.master_node_id = sl.nodes[&requested_node].session.core.meta.self_node_id.clone();
sl.nodes[&requested_node].session.data.lock().consensus_session.consensus_job_mut().executor_mut().set_requester(
sl.nodes[&actual_master].session.data.lock().consensus_session.consensus_job().executor().requester().unwrap().clone()
);
// now let's try to do a decryption
sl.nodes[&requested_node].session.delegate(actual_master, version, H256::random()).unwrap();
// then consensus reachable, but single node will disagree
while let Some((from, to, message)) = sl.take_message() {
sl.process_message((from, to, message)).unwrap();
}
} }
#[test] #[test]
fn ecdsa_signing_works_when_share_owners_are_isolated() { fn ecdsa_signing_works_when_share_owners_are_isolated() {
let (_, mut sl) = prepare_signing_sessions(2, 6); MessageLoop::new(6, 2).unwrap().init_with_isolated().unwrap().0.ensure_completed();
// we need 5 out of 6 nodes to agree to do a decryption
// let's say that 1 of these nodes (master) is isolated
let isolated_node_id = sl.nodes.keys().skip(2).nth(0).cloned().unwrap();
for node in sl.nodes.values() {
node.cluster.remove_node(&isolated_node_id);
}
// now let's try to do a signing
sl.master().initialize(sl.version.clone(), H256::random()).unwrap();
// then consensus reachable, but single node will disagree
while let Some((from, to, message)) = sl.take_message() {
sl.process_message((from, to, message)).unwrap();
}
let data = sl.master().data.lock();
match data.result {
Some(Ok(_)) => (),
_ => unreachable!(),
}
} }
} }

View File

@ -809,279 +809,150 @@ impl JobTransport for SigningJobTransport {
mod tests { mod tests {
use std::sync::Arc; use std::sync::Arc;
use std::str::FromStr; use std::str::FromStr;
use std::collections::{BTreeSet, BTreeMap, VecDeque}; use std::collections::BTreeMap;
use ethereum_types::{Address, H256}; use ethereum_types::{Address, H256};
use ethkey::{self, Random, Generator, Public, Secret, KeyPair, public_to_address}; use ethkey::{self, Random, Generator, Public, Secret, public_to_address};
use acl_storage::DummyAclStorage; use acl_storage::DummyAclStorage;
use key_server_cluster::{NodeId, DummyKeyStorage, DocumentKeyShare, DocumentKeyShareVersion, SessionId, use key_server_cluster::{SessionId, Requester, SessionMeta, Error, KeyStorage};
Requester, SessionMeta, Error, KeyStorage}; use key_server_cluster::cluster::tests::MessageLoop as ClusterMessageLoop;
use key_server_cluster::cluster_sessions::ClusterSession; use key_server_cluster::generation_session::tests::MessageLoop as GenerationMessageLoop;
use key_server_cluster::cluster::tests::DummyCluster;
use key_server_cluster::generation_session::tests::MessageLoop as KeyGenerationMessageLoop;
use key_server_cluster::math; use key_server_cluster::math;
use key_server_cluster::message::{Message, SchnorrSigningMessage, SchnorrSigningConsensusMessage, ConsensusMessage, ConfirmConsensusInitialization, use key_server_cluster::message::{SchnorrSigningMessage, SchnorrSigningConsensusMessage,
SchnorrSigningGenerationMessage, GenerationMessage, ConfirmInitialization, InitializeSession, SchnorrRequestPartialSignature}; ConsensusMessage, ConfirmConsensusInitialization, SchnorrSigningGenerationMessage, GenerationMessage,
ConfirmInitialization, InitializeSession, SchnorrRequestPartialSignature};
use key_server_cluster::signing_session_schnorr::{SessionImpl, SessionState, SessionParams}; use key_server_cluster::signing_session_schnorr::{SessionImpl, SessionState, SessionParams};
struct Node { #[derive(Debug)]
pub node_id: NodeId, pub struct MessageLoop(pub ClusterMessageLoop);
pub cluster: Arc<DummyCluster>,
pub key_storage: Arc<DummyKeyStorage>,
pub session: SessionImpl,
}
struct MessageLoop {
pub session_id: SessionId,
pub requester: KeyPair,
pub nodes: BTreeMap<NodeId, Node>,
pub queue: VecDeque<(NodeId, NodeId, Message)>,
pub acl_storages: Vec<Arc<DummyAclStorage>>,
pub version: H256,
}
impl MessageLoop { impl MessageLoop {
pub fn new(gl: &KeyGenerationMessageLoop) -> Self { pub fn new(num_nodes: usize, threshold: usize) -> Result<Self, Error> {
let version = gl.nodes.values().nth(0).unwrap().key_storage.get(&Default::default()).unwrap().unwrap().versions.iter().last().unwrap().hash; let ml = GenerationMessageLoop::new(num_nodes).init(threshold)?;
let mut nodes = BTreeMap::new(); ml.0.loop_until(|| ml.0.is_empty()); // complete generation session
let session_id = gl.session_id.clone();
Ok(MessageLoop(ml.0))
}
pub fn into_session(&self, at_node: usize) -> SessionImpl {
let requester = Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(),
&SessionId::default()).unwrap()));
SessionImpl::new(SessionParams {
meta: SessionMeta {
id: SessionId::default(),
self_node_id: self.0.node(at_node),
master_node_id: self.0.node(0),
threshold: self.0.key_storage(at_node).get(&Default::default()).unwrap().unwrap().threshold,
configured_nodes_count: self.0.nodes().len(),
connected_nodes_count: self.0.nodes().len(),
},
access_key: Random.generate().unwrap().secret().clone(),
key_share: self.0.key_storage(at_node).get(&Default::default()).unwrap(),
acl_storage: Arc::new(DummyAclStorage::default()),
cluster: self.0.cluster(0).view().unwrap(),
nonce: 0,
}, requester).unwrap()
}
pub fn init_with_version(self, key_version: Option<H256>) -> Result<(Self, Public, H256), Error> {
let message_hash = H256::random();
let requester = Random.generate().unwrap(); let requester = Random.generate().unwrap();
let signature = Some(ethkey::sign(requester.secret(), &SessionId::default()).unwrap()); let signature = ethkey::sign(requester.secret(), &SessionId::default()).unwrap();
let master_node_id = gl.nodes.keys().nth(0).unwrap().clone(); self.0.cluster(0).client().new_schnorr_signing_session(
let mut acl_storages = Vec::new(); Default::default(),
for (i, (gl_node_id, gl_node)) in gl.nodes.iter().enumerate() { signature.into(),
let acl_storage = Arc::new(DummyAclStorage::default()); key_version,
acl_storages.push(acl_storage.clone()); message_hash).map(|_| (self, *requester.public(), message_hash))
let cluster = Arc::new(DummyCluster::new(gl_node_id.clone()));
let session = SessionImpl::new(SessionParams {
meta: SessionMeta {
id: session_id.clone(),
self_node_id: gl_node_id.clone(),
master_node_id: master_node_id.clone(),
threshold: gl_node.key_storage.get(&session_id).unwrap().unwrap().threshold,
configured_nodes_count: gl.nodes.len(),
connected_nodes_count: gl.nodes.len(),
},
access_key: "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec".parse().unwrap(),
key_share: Some(gl_node.key_storage.get(&session_id).unwrap().unwrap()),
acl_storage: acl_storage,
cluster: cluster.clone(),
nonce: 0,
}, if i == 0 { signature.clone().map(Into::into) } else { None }).unwrap();
nodes.insert(gl_node_id.clone(), Node { node_id: gl_node_id.clone(), cluster: cluster, key_storage: gl_node.key_storage.clone(), session: session });
}
let nodes_ids: Vec<_> = nodes.keys().cloned().collect();
for node in nodes.values() {
for node_id in &nodes_ids {
node.cluster.add_node(node_id.clone());
}
}
MessageLoop {
session_id: session_id,
requester: requester,
nodes: nodes,
queue: VecDeque::new(),
acl_storages: acl_storages,
version: version,
}
} }
pub fn master(&self) -> &SessionImpl { pub fn init(self) -> Result<(Self, Public, H256), Error> {
&self.nodes.values().nth(0).unwrap().session let key_version = self.key_version();
self.init_with_version(Some(key_version))
} }
pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { pub fn init_delegated(self) -> Result<(Self, Public, H256), Error> {
self.nodes.values() self.0.key_storage(0).remove(&Default::default()).unwrap();
.filter_map(|n| n.cluster.take_message().map(|m| (n.node_id.clone(), m.0, m.1))) self.init_with_version(None)
.nth(0)
.or_else(|| self.queue.pop_front())
} }
pub fn process_message(&mut self, mut msg: (NodeId, NodeId, Message)) -> Result<(), Error> { pub fn init_with_isolated(self) -> Result<(Self, Public, H256), Error> {
let mut is_queued_message = false; self.0.isolate(1);
loop { self.init()
match self.nodes[&msg.1].session.on_message(&msg.0, &msg.2) {
Ok(_) => {
if let Some(message) = self.queue.pop_front() {
msg = message;
is_queued_message = true;
continue;
}
return Ok(());
},
Err(Error::TooEarlyForRequest) => {
if is_queued_message {
self.queue.push_front(msg);
} else {
self.queue.push_back(msg);
}
return Ok(());
},
Err(err) => return Err(err),
}
}
} }
pub fn run_until<F: Fn(&MessageLoop) -> bool>(&mut self, predicate: F) -> Result<(), Error> { pub fn init_without_share(self) -> Result<(Self, Public, H256), Error> {
while let Some((from, to, message)) = self.take_message() { let key_version = self.key_version();
if predicate(self) { self.0.key_storage(0).remove(&Default::default()).unwrap();
return Ok(()); self.init_with_version(Some(key_version))
}
self.process_message((from, to, message))?;
}
unreachable!("either wrong predicate, or failing test")
}
}
fn prepare_signing_sessions(threshold: usize, num_nodes: usize) -> (KeyGenerationMessageLoop, MessageLoop) {
// run key generation sessions
let mut gl = KeyGenerationMessageLoop::new(num_nodes);
gl.master().initialize(Default::default(), Default::default(), false, threshold, gl.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).unwrap();
while let Some((from, to, message)) = gl.take_message() {
gl.process_message((from, to, message)).unwrap();
} }
// run signing session pub fn session_at(&self, idx: usize) -> Arc<SessionImpl> {
let sl = MessageLoop::new(&gl); self.0.sessions(idx).schnorr_signing_sessions.first().unwrap()
(gl, sl) }
pub fn ensure_completed(&self) {
self.0.loop_until(|| self.0.is_empty());
assert!(self.session_at(0).wait().is_ok());
}
pub fn key_version(&self) -> H256 {
self.0.key_storage(0).get(&Default::default())
.unwrap().unwrap().versions.iter().last().unwrap().hash
}
} }
#[test] #[test]
fn schnorr_complete_gen_sign_session() { fn schnorr_complete_gen_sign_session() {
let test_cases = [(0, 1), (0, 5), (2, 5), (3, 5)]; let test_cases = [(0, 1), (0, 5), (2, 5), (3, 5)];
for &(threshold, num_nodes) in &test_cases { for &(threshold, num_nodes) in &test_cases {
let (gl, mut sl) = prepare_signing_sessions(threshold, num_nodes); let (ml, _, message) = MessageLoop::new(num_nodes, threshold).unwrap().init().unwrap();
ml.0.loop_until(|| ml.0.is_empty());
// run signing session let signer_public = ml.0.key_storage(0).get(&Default::default()).unwrap().unwrap().public;
let message_hash = H256::from(777); let signature = ml.session_at(0).wait().unwrap();
sl.master().initialize(sl.version.clone(), message_hash).unwrap(); assert!(math::verify_schnorr_signature(&signer_public, &signature, &message).unwrap());
while let Some((from, to, message)) = sl.take_message() {
sl.process_message((from, to, message)).unwrap();
}
// verify signature
let public = gl.master().joint_public_and_secret().unwrap().unwrap().0;
let signature = sl.master().wait().unwrap();
assert!(math::verify_schnorr_signature(&public, &signature, &message_hash).unwrap());
} }
} }
#[test] #[test]
fn schnorr_constructs_in_cluster_of_single_node() { fn schnorr_constructs_in_cluster_of_single_node() {
let mut nodes = BTreeMap::new(); MessageLoop::new(1, 0).unwrap().init().unwrap();
let self_node_id = Random.generate().unwrap().public().clone();
nodes.insert(self_node_id, Random.generate().unwrap().secret().clone());
match SessionImpl::new(SessionParams {
meta: SessionMeta {
id: SessionId::default(),
self_node_id: self_node_id.clone(),
master_node_id: self_node_id.clone(),
threshold: 0,
configured_nodes_count: 1,
connected_nodes_count: 1,
},
access_key: Random.generate().unwrap().secret().clone(),
key_share: Some(DocumentKeyShare {
author: Default::default(),
threshold: 0,
public: Default::default(),
common_point: Some(Random.generate().unwrap().public().clone()),
encrypted_point: Some(Random.generate().unwrap().public().clone()),
versions: vec![DocumentKeyShareVersion {
hash: Default::default(),
id_numbers: nodes,
secret_share: Random.generate().unwrap().secret().clone(),
}],
}),
acl_storage: Arc::new(DummyAclStorage::default()),
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
nonce: 0,
}, Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()))) {
Ok(_) => (),
_ => panic!("unexpected"),
}
} }
#[test] #[test]
fn schnorr_fails_to_initialize_if_does_not_have_a_share() { fn schnorr_fails_to_initialize_if_does_not_have_a_share() {
let self_node_id = Random.generate().unwrap().public().clone(); assert!(MessageLoop::new(2, 1).unwrap().init_without_share().is_err());
let session = SessionImpl::new(SessionParams {
meta: SessionMeta {
id: SessionId::default(),
self_node_id: self_node_id.clone(),
master_node_id: self_node_id.clone(),
threshold: 0,
configured_nodes_count: 1,
connected_nodes_count: 1,
},
access_key: Random.generate().unwrap().secret().clone(),
key_share: None,
acl_storage: Arc::new(DummyAclStorage::default()),
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
nonce: 0,
}, Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()))).unwrap();
assert_eq!(session.initialize(Default::default(), Default::default()), Err(Error::InvalidMessage));
} }
#[test] #[test]
fn schnorr_fails_to_initialize_if_threshold_is_wrong() { fn schnorr_fails_to_initialize_if_threshold_is_wrong() {
let mut nodes = BTreeMap::new(); let mut ml = MessageLoop::new(3, 2).unwrap();
let self_node_id = Random.generate().unwrap().public().clone(); ml.0.exclude(2);
nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone()); assert_eq!(ml.init().unwrap_err(), Error::ConsensusUnreachable);
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
let session = SessionImpl::new(SessionParams {
meta: SessionMeta {
id: SessionId::default(),
self_node_id: self_node_id.clone(),
master_node_id: self_node_id.clone(),
threshold: 2,
configured_nodes_count: 1,
connected_nodes_count: 1,
},
access_key: Random.generate().unwrap().secret().clone(),
key_share: Some(DocumentKeyShare {
author: Default::default(),
threshold: 2,
public: Default::default(),
common_point: Some(Random.generate().unwrap().public().clone()),
encrypted_point: Some(Random.generate().unwrap().public().clone()),
versions: vec![DocumentKeyShareVersion {
hash: Default::default(),
id_numbers: nodes,
secret_share: Random.generate().unwrap().secret().clone(),
}],
}),
acl_storage: Arc::new(DummyAclStorage::default()),
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
nonce: 0,
}, Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()))).unwrap();
assert_eq!(session.initialize(Default::default(), Default::default()), Err(Error::ConsensusUnreachable));
} }
#[test] #[test]
fn schnorr_fails_to_initialize_when_already_initialized() { fn schnorr_fails_to_initialize_when_already_initialized() {
let (_, sl) = prepare_signing_sessions(1, 3); let (ml, _, _) = MessageLoop::new(1, 0).unwrap().init().unwrap();
assert_eq!(sl.master().initialize(sl.version.clone(), 777.into()), Ok(())); assert_eq!(ml.session_at(0).initialize(ml.key_version(), 777.into()),
assert_eq!(sl.master().initialize(sl.version.clone(), 777.into()), Err(Error::InvalidStateForRequest)); Err(Error::InvalidStateForRequest));
} }
#[test] #[test]
fn schnorr_does_not_fail_when_consensus_message_received_after_consensus_established() { fn schnorr_does_not_fail_when_consensus_message_received_after_consensus_established() {
let (_, mut sl) = prepare_signing_sessions(1, 3); let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init().unwrap();
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
// consensus is established // consensus is established
sl.run_until(|sl| sl.master().state() == SessionState::SessionKeyGeneration).unwrap(); let session = ml.session_at(0);
ml.0.loop_until(|| session.state() == SessionState::SessionKeyGeneration);
// but 3rd node continues to send its messages // but 3rd node continues to send its messages
// this should not fail session // this should not fail session
let consensus_group = sl.master().data.lock().consensus_session.select_consensus_group().unwrap().clone(); let consensus_group = session.data.lock().consensus_session.select_consensus_group().unwrap().clone();
let mut had_3rd_message = false; let mut had_3rd_message = false;
while let Some((from, to, message)) = sl.take_message() { while let Some((from, to, message)) = ml.0.take_message() {
if !consensus_group.contains(&from) { if !consensus_group.contains(&from) {
had_3rd_message = true; had_3rd_message = true;
sl.process_message((from, to, message)).unwrap(); ml.0.process_message(from, to, message);
} }
} }
assert!(had_3rd_message); assert!(had_3rd_message);
@ -1089,10 +960,11 @@ mod tests {
#[test] #[test]
fn schnorr_fails_when_consensus_message_is_received_when_not_initialized() { fn schnorr_fails_when_consensus_message_is_received_when_not_initialized() {
let (_, sl) = prepare_signing_sessions(1, 3); let ml = MessageLoop::new(3, 1).unwrap();
assert_eq!(sl.master().on_consensus_message(sl.nodes.keys().nth(1).unwrap(), &SchnorrSigningConsensusMessage { let session = ml.into_session(0);
assert_eq!(session.on_consensus_message(&ml.0.node(1), &SchnorrSigningConsensusMessage {
session: SessionId::default().into(), session: SessionId::default().into(),
sub_session: sl.master().core.access_key.clone().into(), sub_session: session.core.access_key.clone().into(),
session_nonce: 0, session_nonce: 0,
message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
is_confirmed: true, is_confirmed: true,
@ -1102,10 +974,11 @@ mod tests {
#[test] #[test]
fn schnorr_fails_when_generation_message_is_received_when_not_initialized() { fn schnorr_fails_when_generation_message_is_received_when_not_initialized() {
let (_, sl) = prepare_signing_sessions(1, 3); let ml = MessageLoop::new(3, 1).unwrap();
assert_eq!(sl.master().on_generation_message(sl.nodes.keys().nth(1).unwrap(), &SchnorrSigningGenerationMessage { let session = ml.into_session(0);
assert_eq!(session.on_generation_message(&ml.0.node(1), &SchnorrSigningGenerationMessage {
session: SessionId::default().into(), session: SessionId::default().into(),
sub_session: sl.master().core.access_key.clone().into(), sub_session: session.core.access_key.clone().into(),
session_nonce: 0, session_nonce: 0,
message: GenerationMessage::ConfirmInitialization(ConfirmInitialization { message: GenerationMessage::ConfirmInitialization(ConfirmInitialization {
session: SessionId::default().into(), session: SessionId::default().into(),
@ -1117,16 +990,16 @@ mod tests {
#[test] #[test]
fn schnorr_fails_when_generation_sesson_is_initialized_by_slave_node() { fn schnorr_fails_when_generation_sesson_is_initialized_by_slave_node() {
let (_, mut sl) = prepare_signing_sessions(1, 3); let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init().unwrap();
sl.master().initialize(sl.version.clone(), 777.into()).unwrap(); let session = ml.session_at(0);
sl.run_until(|sl| sl.master().state() == SessionState::SessionKeyGeneration).unwrap(); ml.0.loop_until(|| session.state() == SessionState::SessionKeyGeneration);
let slave2_id = sl.nodes.keys().nth(2).unwrap().clone(); let slave2_id = ml.0.node(2);
let slave1 = &sl.nodes.values().nth(1).unwrap().session; let slave1_session = ml.session_at(1);
assert_eq!(slave1.on_generation_message(&slave2_id, &SchnorrSigningGenerationMessage { assert_eq!(slave1_session.on_generation_message(&slave2_id, &SchnorrSigningGenerationMessage {
session: SessionId::default().into(), session: SessionId::default().into(),
sub_session: sl.master().core.access_key.clone().into(), sub_session: session.core.access_key.clone().into(),
session_nonce: 0, session_nonce: 0,
message: GenerationMessage::InitializeSession(InitializeSession { message: GenerationMessage::InitializeSession(InitializeSession {
session: SessionId::default().into(), session: SessionId::default().into(),
@ -1143,11 +1016,11 @@ mod tests {
#[test] #[test]
fn schnorr_fails_when_signature_requested_when_not_initialized() { fn schnorr_fails_when_signature_requested_when_not_initialized() {
let (_, sl) = prepare_signing_sessions(1, 3); let ml = MessageLoop::new(3, 1).unwrap();
let slave1 = &sl.nodes.values().nth(1).unwrap().session; let session = ml.into_session(1);
assert_eq!(slave1.on_partial_signature_requested(sl.nodes.keys().nth(0).unwrap(), &SchnorrRequestPartialSignature { assert_eq!(session.on_partial_signature_requested(&ml.0.node(0), &SchnorrRequestPartialSignature {
session: SessionId::default().into(), session: SessionId::default().into(),
sub_session: sl.master().core.access_key.clone().into(), sub_session: session.core.access_key.clone().into(),
session_nonce: 0, session_nonce: 0,
request_id: Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap().into(), request_id: Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap().into(),
message_hash: H256::default().into(), message_hash: H256::default().into(),
@ -1157,10 +1030,11 @@ mod tests {
#[test] #[test]
fn schnorr_fails_when_signature_requested_by_slave_node() { fn schnorr_fails_when_signature_requested_by_slave_node() {
let (_, sl) = prepare_signing_sessions(1, 3); let ml = MessageLoop::new(3, 1).unwrap();
assert_eq!(sl.master().on_partial_signature_requested(sl.nodes.keys().nth(1).unwrap(), &SchnorrRequestPartialSignature { let session = ml.into_session(0);
assert_eq!(session.on_partial_signature_requested(&ml.0.node(1), &SchnorrRequestPartialSignature {
session: SessionId::default().into(), session: SessionId::default().into(),
sub_session: sl.master().core.access_key.clone().into(), sub_session: session.core.access_key.clone().into(),
session_nonce: 0, session_nonce: 0,
request_id: Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap().into(), request_id: Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap().into(),
message_hash: H256::default().into(), message_hash: H256::default().into(),
@ -1170,123 +1044,68 @@ mod tests {
#[test] #[test]
fn schnorr_failed_signing_session() { fn schnorr_failed_signing_session() {
let (_, mut sl) = prepare_signing_sessions(1, 3); let (ml, requester, _) = MessageLoop::new(3, 1).unwrap().init().unwrap();
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
// we need at least 2-of-3 nodes to agree to reach consensus // we need at least 2-of-3 nodes to agree to reach consensus
// let's say 2 of 3 nodes disagee // let's say 2 of 3 nodes disagee
sl.acl_storages[1].prohibit(public_to_address(sl.requester.public()), SessionId::default()); ml.0.acl_storage(1).prohibit(public_to_address(&requester), SessionId::default());
sl.acl_storages[2].prohibit(public_to_address(sl.requester.public()), SessionId::default()); ml.0.acl_storage(2).prohibit(public_to_address(&requester), SessionId::default());
// then consensus is unreachable // then consensus is unreachable
assert_eq!(sl.run_until(|_| false), Err(Error::ConsensusUnreachable)); ml.0.loop_until(|| ml.0.is_empty());
assert_eq!(ml.session_at(0).wait().unwrap_err(), Error::ConsensusUnreachable);
} }
#[test] #[test]
fn schnorr_complete_signing_session_with_single_node_failing() { fn schnorr_complete_signing_session_with_single_node_failing() {
let (_, mut sl) = prepare_signing_sessions(1, 3); let (ml, requester, _) = MessageLoop::new(3, 1).unwrap().init().unwrap();
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
// we need at least 2-of-3 nodes to agree to reach consensus // we need at least 2-of-3 nodes to agree to reach consensus
// let's say 1 of 3 nodes disagee // let's say 1 of 3 nodes disagee
sl.acl_storages[1].prohibit(public_to_address(sl.requester.public()), SessionId::default()); ml.0.acl_storage(1).prohibit(public_to_address(&requester), SessionId::default());
// then consensus reachable, but single node will disagree // then consensus reachable, but single node will disagree
while let Some((from, to, message)) = sl.take_message() { ml.ensure_completed();
sl.process_message((from, to, message)).unwrap();
}
let data = sl.master().data.lock();
match data.result {
Some(Ok(_)) => (),
_ => unreachable!(),
}
} }
#[test] #[test]
fn schnorr_complete_signing_session_with_acl_check_failed_on_master() { fn schnorr_complete_signing_session_with_acl_check_failed_on_master() {
let (_, mut sl) = prepare_signing_sessions(1, 3); let (ml, requester, _) = MessageLoop::new(3, 1).unwrap().init().unwrap();
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
// we need at least 2-of-3 nodes to agree to reach consensus // we need at least 2-of-3 nodes to agree to reach consensus
// let's say 1 of 3 nodes disagee // let's say 1 of 3 nodes disagee
sl.acl_storages[0].prohibit(public_to_address(sl.requester.public()), SessionId::default()); ml.0.acl_storage(0).prohibit(public_to_address(&requester), SessionId::default());
// then consensus reachable, but single node will disagree // then consensus reachable, but single node will disagree
while let Some((from, to, message)) = sl.take_message() { ml.ensure_completed();
sl.process_message((from, to, message)).unwrap();
}
let data = sl.master().data.lock();
match data.result {
Some(Ok(_)) => (),
_ => unreachable!(),
}
} }
#[test] #[test]
fn schnorr_signing_message_fails_when_nonce_is_wrong() { fn schnorr_signing_message_fails_when_nonce_is_wrong() {
let (_, sl) = prepare_signing_sessions(1, 3); let ml = MessageLoop::new(3, 1).unwrap();
assert_eq!(sl.master().process_message(sl.nodes.keys().nth(1).unwrap(), &SchnorrSigningMessage::SchnorrSigningGenerationMessage(SchnorrSigningGenerationMessage { let session = ml.into_session(1);
let msg = SchnorrSigningMessage::SchnorrSigningGenerationMessage(SchnorrSigningGenerationMessage {
session: SessionId::default().into(), session: SessionId::default().into(),
sub_session: sl.master().core.access_key.clone().into(), sub_session: session.core.access_key.clone().into(),
session_nonce: 10, session_nonce: 10,
message: GenerationMessage::ConfirmInitialization(ConfirmInitialization { message: GenerationMessage::ConfirmInitialization(ConfirmInitialization {
session: SessionId::default().into(), session: SessionId::default().into(),
session_nonce: 0, session_nonce: 0,
derived_point: Public::default().into(), derived_point: Public::default().into(),
}), }),
})), Err(Error::ReplayProtection)); });
assert_eq!(session.process_message(&ml.0.node(1), &msg), Err(Error::ReplayProtection));
} }
#[test] #[test]
fn schnorr_signing_works_when_delegated_to_other_node() { fn schnorr_signing_works_when_delegated_to_other_node() {
let (_, mut sl) = prepare_signing_sessions(1, 3); let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init_delegated().unwrap();
ml.ensure_completed();
// let's say node1 doesn't have a share && delegates decryption request to node0
// initially session is created on node1 => node1 is master for itself, but for other nodes node0 is still master
let actual_master = sl.nodes.keys().nth(0).cloned().unwrap();
let requested_node = sl.nodes.keys().skip(1).nth(0).cloned().unwrap();
let version = sl.nodes[&actual_master].key_storage.get(&Default::default()).unwrap().unwrap().last_version().unwrap().hash.clone();
sl.nodes[&requested_node].key_storage.remove(&Default::default()).unwrap();
sl.nodes.get_mut(&requested_node).unwrap().session.core.key_share = None;
sl.nodes.get_mut(&requested_node).unwrap().session.core.meta.master_node_id = sl.nodes[&requested_node].session.core.meta.self_node_id.clone();
sl.nodes[&requested_node].session.data.lock().consensus_session.consensus_job_mut().executor_mut().set_requester(
sl.nodes[&actual_master].session.data.lock().consensus_session.consensus_job().executor().requester().unwrap().clone()
);
// now let's try to do a decryption
sl.nodes[&requested_node].session.delegate(actual_master, version, Default::default()).unwrap();
// then consensus reachable, but single node will disagree
while let Some((from, to, message)) = sl.take_message() {
sl.process_message((from, to, message)).unwrap();
}
} }
#[test] #[test]
fn schnorr_signing_works_when_share_owners_are_isolated() { fn schnorr_signing_works_when_share_owners_are_isolated() {
let (_, mut sl) = prepare_signing_sessions(1, 3); let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init_with_isolated().unwrap();
ml.ensure_completed();
// we need 2 out of 3 nodes to agree to do a decryption
// let's say that 1 of these nodes (master) is isolated
let isolated_node_id = sl.nodes.keys().skip(2).nth(0).cloned().unwrap();
for node in sl.nodes.values() {
node.cluster.remove_node(&isolated_node_id);
}
// now let's try to do a signing
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
// then consensus reachable, but single node will disagree
while let Some((from, to, message)) = sl.take_message() {
sl.process_message((from, to, message)).unwrap();
}
let data = sl.master().data.lock();
match data.result {
Some(Ok(_)) => (),
_ => unreachable!(),
}
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,176 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::BTreeSet;
use std::sync::Arc;
use key_server_cluster::{Error, NodeId};
use key_server_cluster::message::Message;
/// Connection to the single node. Provides basic information about connected node and
/// allows sending messages to this node.
pub trait Connection: Send + Sync {
/// Is this inbound connection? This only matters when both nodes are simultaneously establishing
/// two connections to each other. The agreement is that the inbound connection from the node with
/// lower NodeId is used and the other connection is closed.
fn is_inbound(&self) -> bool;
/// Returns id of the connected node.
fn node_id(&self) -> &NodeId;
/// Returns 'address' of the node to use in traces.
fn node_address(&self) -> String;
/// Send message to the connected node.
fn send_message(&self, message: Message);
}
/// Connections manager. Responsible for keeping us connected to all required nodes.
pub trait ConnectionManager: 'static + Send + Sync {
/// Returns shared reference to connections provider.
fn provider(&self) -> Arc<ConnectionProvider>;
/// Try to reach all disconnected nodes immediately. This method is exposed mostly for
/// tests, where all 'nodes' are starting listening for incoming connections first and
/// only after this, they're actually start connecting to each other.
fn connect(&self);
}
/// Connections provider. Holds all active connections and the set of nodes that we need to
/// connect to. At any moment connection could be lost and the set of connected/disconnected
/// nodes could change (at behalf of the connection manager).
/// Clone operation should be cheap (Arc).
pub trait ConnectionProvider: Send + Sync {
/// Returns the set of currently connected nodes. Error is returned when our node is
/// not a part of the cluster ('isolated' node).
fn connected_nodes(&self) -> Result<BTreeSet<NodeId>, Error>;
/// Returns the set of currently disconnected nodes.
fn disconnected_nodes(&self) -> BTreeSet<NodeId>;
/// Returns the reference to the active node connection or None if the node is not connected.
fn connection(&self, node: &NodeId) -> Option<Arc<Connection>>;
}
#[cfg(test)]
pub mod tests {
use std::collections::{BTreeSet, VecDeque};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use parking_lot::Mutex;
use key_server_cluster::{Error, NodeId};
use key_server_cluster::message::Message;
use super::{ConnectionManager, Connection, ConnectionProvider};
/// Shared messages queue.
pub type MessagesQueue = Arc<Mutex<VecDeque<(NodeId, NodeId, Message)>>>;
/// Single node connections.
pub struct TestConnections {
node: NodeId,
is_isolated: AtomicBool,
connected_nodes: Mutex<BTreeSet<NodeId>>,
disconnected_nodes: Mutex<BTreeSet<NodeId>>,
messages: MessagesQueue,
}
/// Single connection.
pub struct TestConnection {
from: NodeId,
to: NodeId,
messages: MessagesQueue,
}
impl TestConnections {
pub fn isolate(&self) {
let connected_nodes = ::std::mem::replace(&mut *self.connected_nodes.lock(), Default::default());
self.is_isolated.store(true, Ordering::Relaxed);
self.disconnected_nodes.lock().extend(connected_nodes)
}
pub fn disconnect(&self, node: NodeId) {
self.connected_nodes.lock().remove(&node);
self.disconnected_nodes.lock().insert(node);
}
pub fn exclude(&self, node: NodeId) {
self.connected_nodes.lock().remove(&node);
self.disconnected_nodes.lock().remove(&node);
}
pub fn include(&self, node: NodeId) {
self.connected_nodes.lock().insert(node);
}
}
impl ConnectionManager for Arc<TestConnections> {
fn provider(&self) -> Arc<ConnectionProvider> {
self.clone()
}
fn connect(&self) {}
}
impl ConnectionProvider for TestConnections {
fn connected_nodes(&self) -> Result<BTreeSet<NodeId>, Error> {
match self.is_isolated.load(Ordering::Relaxed) {
false => Ok(self.connected_nodes.lock().clone()),
true => Err(Error::NodeDisconnected),
}
}
fn disconnected_nodes(&self) -> BTreeSet<NodeId> {
self.disconnected_nodes.lock().clone()
}
fn connection(&self, node: &NodeId) -> Option<Arc<Connection>> {
match self.connected_nodes.lock().contains(node) {
true => Some(Arc::new(TestConnection {
from: self.node,
to: *node,
messages: self.messages.clone(),
})),
false => None,
}
}
}
impl Connection for TestConnection {
fn is_inbound(&self) -> bool {
false
}
fn node_id(&self) -> &NodeId {
&self.to
}
fn node_address(&self) -> String {
format!("{}", self.to)
}
fn send_message(&self, message: Message) {
self.messages.lock().push_back((self.from, self.to, message))
}
}
pub fn new_test_connections(
messages: MessagesQueue,
node: NodeId,
mut nodes: BTreeSet<NodeId>
) -> Arc<TestConnections> {
let is_isolated = !nodes.remove(&node);
Arc::new(TestConnections {
node,
is_isolated: AtomicBool::new(is_isolated),
connected_nodes: Mutex::new(nodes),
disconnected_nodes: Default::default(),
messages,
})
}
}

View File

@ -0,0 +1,539 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::{BTreeMap, BTreeSet};
use std::collections::btree_map::Entry;
use std::io;
use std::net::{SocketAddr, IpAddr};
use std::sync::Arc;
use std::time::{Duration, Instant};
use futures::{future, Future, Stream};
use parking_lot::{Mutex, RwLock};
use tokio::net::{TcpListener, TcpStream};
use tokio::timer::{Interval, timeout::Error as TimeoutError};
use tokio_io::IoFuture;
use ethkey::KeyPair;
use parity_runtime::Executor;
use key_server_cluster::{Error, NodeId, ClusterConfiguration, NodeKeyPair};
use key_server_cluster::cluster_connections::{ConnectionProvider, Connection, ConnectionManager};
use key_server_cluster::connection_trigger::{Maintain, ConnectionTrigger};
use key_server_cluster::cluster_message_processor::MessageProcessor;
use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream,
read_encrypted_message, WriteMessage, write_encrypted_message};
use key_server_cluster::message::{self, ClusterMessage, Message};
use key_server_cluster::net::{accept_connection as io_accept_connection,
connect as io_connect, Connection as IoConnection};
/// Empty future.
pub type BoxedEmptyFuture = Box<Future<Item = (), Error = ()> + Send>;
/// Maintain interval (seconds). Every MAINTAIN_INTERVAL seconds node:
/// 1) checks if connected nodes are responding to KeepAlive messages
/// 2) tries to connect to disconnected nodes
/// 3) checks if enc/dec sessions are time-outed
const MAINTAIN_INTERVAL: u64 = 10;
/// When no messages have been received from node within KEEP_ALIVE_SEND_INTERVAL seconds,
/// we must send KeepAlive message to the node to check if it still responds to messages.
const KEEP_ALIVE_SEND_INTERVAL: Duration = Duration::from_secs(30);
/// When no messages have been received from node within KEEP_ALIVE_DISCONNECT_INTERVAL seconds,
/// we must treat this node as non-responding && disconnect from it.
const KEEP_ALIVE_DISCONNECT_INTERVAL: Duration = Duration::from_secs(60);
/// Network connection manager configuration.
pub struct NetConnectionsManagerConfig {
/// Allow connecting to 'higher' nodes.
pub allow_connecting_to_higher_nodes: bool,
/// Interface to listen to.
pub listen_address: (String, u16),
/// True if we should autostart key servers set change session when servers set changes?
/// This will only work when servers set is configured using KeyServerSet contract.
pub auto_migrate_enabled: bool,
}
/// Network connections manager.
pub struct NetConnectionsManager {
/// Address we're listening for incoming connections.
listen_address: SocketAddr,
/// Shared cluster connections data reference.
data: Arc<NetConnectionsData>,
}
/// Network connections data. Shared among NetConnectionsManager and spawned futures.
struct NetConnectionsData {
/// Allow connecting to 'higher' nodes.
allow_connecting_to_higher_nodes: bool,
/// Reference to tokio task executor.
executor: Executor,
/// Key pair of this node.
self_key_pair: Arc<NodeKeyPair>,
/// Network messages processor.
message_processor: Arc<MessageProcessor>,
/// Connections trigger.
trigger: Mutex<Box<ConnectionTrigger>>,
/// Mutable connection data.
container: Arc<RwLock<NetConnectionsContainer>>,
}
/// Network connections container. This is the only mutable data of NetConnectionsManager.
/// The set of nodes is mutated by the connection trigger and the connections set is also
/// mutated by spawned futures.
pub struct NetConnectionsContainer {
/// Is this node isolated from cluster?
pub is_isolated: bool,
/// Current key servers set.
pub nodes: BTreeMap<NodeId, SocketAddr>,
/// Active connections to key servers.
pub connections: BTreeMap<NodeId, Arc<NetConnection>>,
}
/// Network connection to single key server node.
pub struct NetConnection {
executor: Executor,
/// Id of the peer node.
node_id: NodeId,
/// Address of the peer node.
node_address: SocketAddr,
/// Is this inbound (true) or outbound (false) connection?
is_inbound: bool,
/// Key pair that is used to encrypt connection' messages.
key: KeyPair,
/// Last message time.
last_message_time: RwLock<Instant>,
/// Underlying TCP stream.
stream: SharedTcpStream,
}
impl NetConnectionsManager {
/// Create new network connections manager.
pub fn new(
executor: Executor,
message_processor: Arc<MessageProcessor>,
trigger: Box<ConnectionTrigger>,
container: Arc<RwLock<NetConnectionsContainer>>,
config: &ClusterConfiguration,
net_config: NetConnectionsManagerConfig,
) -> Result<Self, Error> {
let listen_address = make_socket_address(
&net_config.listen_address.0,
net_config.listen_address.1)?;
Ok(NetConnectionsManager {
listen_address,
data: Arc::new(NetConnectionsData {
allow_connecting_to_higher_nodes: net_config.allow_connecting_to_higher_nodes,
executor,
message_processor,
self_key_pair: config.self_key_pair.clone(),
trigger: Mutex::new(trigger),
container,
}),
})
}
/// Start listening for connections and schedule connections maintenance.
pub fn start(&self) -> Result<(), Error> {
net_listen(&self.listen_address, self.data.clone())?;
net_schedule_maintain(self.data.clone());
Ok(())
}
}
impl ConnectionManager for NetConnectionsManager {
fn provider(&self) -> Arc<ConnectionProvider> {
self.data.container.clone()
}
fn connect(&self) {
net_connect_disconnected(self.data.clone());
}
}
impl ConnectionProvider for RwLock<NetConnectionsContainer> {
fn connected_nodes(&self) -> Result<BTreeSet<NodeId>, Error> {
let connections = self.read();
if connections.is_isolated {
return Err(Error::NodeDisconnected);
}
Ok(connections.connections.keys().cloned().collect())
}
fn disconnected_nodes(&self) -> BTreeSet<NodeId> {
let connections = self.read();
connections.nodes.keys()
.filter(|node_id| !connections.connections.contains_key(node_id))
.cloned()
.collect()
}
fn connection(&self, node: &NodeId) -> Option<Arc<Connection>> {
match self.read().connections.get(node).cloned() {
Some(connection) => Some(connection),
None => None,
}
}
}
impl NetConnection {
/// Create new connection.
pub fn new(executor: Executor, is_inbound: bool, connection: IoConnection) -> NetConnection {
NetConnection {
executor,
node_id: connection.node_id,
node_address: connection.address,
is_inbound: is_inbound,
stream: connection.stream,
key: connection.key,
last_message_time: RwLock::new(Instant::now()),
}
}
/// Get last message time.
pub fn last_message_time(&self) -> Instant {
*self.last_message_time.read()
}
/// Update last message time
pub fn set_last_message_time(&self, last_message_time: Instant) {
*self.last_message_time.write() = last_message_time
}
/// Returns future that sends encrypted message over this connection.
pub fn send_message_future(&self, message: Message) -> WriteMessage<SharedTcpStream> {
write_encrypted_message(self.stream.clone(), &self.key, message)
}
/// Returns future that reads encrypted message from this connection.
pub fn read_message_future(&self) -> ReadMessage<SharedTcpStream> {
read_encrypted_message(self.stream.clone(), self.key.clone())
}
}
impl Connection for NetConnection {
fn is_inbound(&self) -> bool {
self.is_inbound
}
fn node_id(&self) -> &NodeId {
&self.node_id
}
fn node_address(&self) -> String {
format!("{}", self.node_address)
}
fn send_message(&self, message: Message) {
execute(&self.executor, self.send_message_future(message).then(|_| Ok(())));
}
}
impl NetConnectionsData {
/// Executes closure for each active connection.
pub fn active_connections(&self) -> Vec<Arc<NetConnection>> {
self.container.read().connections.values().cloned().collect()
}
/// Executes closure for each disconnected node.
pub fn disconnected_nodes(&self) -> Vec<(NodeId, SocketAddr)> {
let container = self.container.read();
container.nodes.iter()
.filter(|(node_id, _)| !container.connections.contains_key(node_id))
.map(|(node_id, addr)| (*node_id, *addr))
.collect()
}
/// Try to insert new connection. Returns true if connection has been inserted.
/// Returns false (and ignores connections) if:
/// - we do not expect connection from this node
/// - we are already connected to the node and existing connection 'supersede'
/// new connection by agreement
pub fn insert(&self, connection: Arc<NetConnection>) -> bool {
let node = *connection.node_id();
let mut container = self.container.write();
if !container.nodes.contains_key(&node) {
trace!(target: "secretstore_net", "{}: ignoring unknown connection from {} at {}",
self.self_key_pair.public(), node, connection.node_address());
return false;
}
if container.connections.contains_key(&node) {
// we have already connected to the same node
// the agreement is that node with lower id must establish connection to node with higher id
if (*self.self_key_pair.public() < node && connection.is_inbound())
|| (*self.self_key_pair.public() > node && !connection.is_inbound()) {
return false;
}
}
trace!(target: "secretstore_net",
"{}: inserting connection to {} at {}. Connected to {} of {} nodes",
self.self_key_pair.public(), node, connection.node_address(),
container.connections.len() + 1, container.nodes.len());
container.connections.insert(node, connection);
true
}
/// Tries to remove connection. Returns true if connection has been removed.
/// Returns false if we do not know this connection.
pub fn remove(&self, connection: &NetConnection) -> bool {
let node_id = *connection.node_id();
let is_inbound = connection.is_inbound();
let mut container = self.container.write();
if let Entry::Occupied(entry) = container.connections.entry(node_id) {
if entry.get().is_inbound() != is_inbound {
return false;
}
trace!(target: "secretstore_net", "{}: removing connection to {} at {}",
self.self_key_pair.public(), node_id, entry.get().node_address());
entry.remove_entry();
true
} else {
false
}
}
}
/// Listen incoming connections.
fn net_listen(
listen_address: &SocketAddr,
data: Arc<NetConnectionsData>,
) -> Result<(), Error> {
execute(&data.executor, net_listen_future(listen_address, data.clone())?);
Ok(())
}
/// Listen incoming connections future.
fn net_listen_future(
listen_address: &SocketAddr,
data: Arc<NetConnectionsData>,
) -> Result<BoxedEmptyFuture, Error> {
Ok(Box::new(TcpListener::bind(listen_address)?
.incoming()
.and_then(move |stream| {
net_accept_connection(data.clone(), stream);
Ok(())
})
.for_each(|_| Ok(()))
.then(|_| future::ok(()))))
}
/// Accept incoming connection.
fn net_accept_connection(
data: Arc<NetConnectionsData>,
stream: TcpStream,
) {
execute(&data.executor, net_accept_connection_future(data.clone(), stream));
}
/// Accept incoming connection future.
fn net_accept_connection_future(data: Arc<NetConnectionsData>, stream: TcpStream) -> BoxedEmptyFuture {
Box::new(io_accept_connection(stream, data.self_key_pair.clone())
.then(move |result| net_process_connection_result(data, None, result))
.then(|_| future::ok(())))
}
/// Connect to remote node.
fn net_connect(
data: Arc<NetConnectionsData>,
remote: SocketAddr,
) {
execute(&data.executor, net_connect_future(data.clone(), remote));
}
/// Connect to remote node future.
fn net_connect_future(
data: Arc<NetConnectionsData>,
remote: SocketAddr,
) -> BoxedEmptyFuture {
let disconnected_nodes = data.container.disconnected_nodes();
Box::new(io_connect(&remote, data.self_key_pair.clone(), disconnected_nodes)
.then(move |result| net_process_connection_result(data, Some(remote), result))
.then(|_| future::ok(())))
}
/// Process network connection result.
fn net_process_connection_result(
data: Arc<NetConnectionsData>,
outbound_addr: Option<SocketAddr>,
result: Result<DeadlineStatus<Result<IoConnection, Error>>, TimeoutError<io::Error>>,
) -> IoFuture<Result<(), Error>> {
match result {
Ok(DeadlineStatus::Meet(Ok(connection))) => {
let connection = Arc::new(NetConnection::new(data.executor.clone(), outbound_addr.is_none(), connection));
if data.insert(connection.clone()) {
let maintain_action = data.trigger.lock().on_connection_established(connection.node_id());
maintain_connection_trigger(data.clone(), maintain_action);
return net_process_connection_messages(data, connection);
}
},
Ok(DeadlineStatus::Meet(Err(err))) => {
warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}",
data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" },
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
},
Ok(DeadlineStatus::Timeout) => {
warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}",
data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" },
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
},
Err(err) => {
warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}",
data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" },
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
},
}
Box::new(future::ok(Ok(())))
}
/// Process connection messages.
fn net_process_connection_messages(
data: Arc<NetConnectionsData>,
connection: Arc<NetConnection>,
) -> IoFuture<Result<(), Error>> {
Box::new(connection
.read_message_future()
.then(move |result|
match result {
Ok((_, Ok(message))) => {
connection.set_last_message_time(Instant::now());
data.message_processor.process_connection_message(connection.clone(), message);
// continue serving connection
let process_messages_future = net_process_connection_messages(
data.clone(), connection).then(|_| Ok(()));
execute(&data.executor, process_messages_future);
Box::new(future::ok(Ok(())))
},
Ok((_, Err(err))) => {
warn!(target: "secretstore_net", "{}: protocol error '{}' when reading message from node {}",
data.self_key_pair.public(), err, connection.node_id());
// continue serving connection
let process_messages_future = net_process_connection_messages(
data.clone(), connection).then(|_| Ok(()));
execute(&data.executor, process_messages_future);
Box::new(future::ok(Err(err)))
},
Err(err) => {
let node_id = *connection.node_id();
warn!(target: "secretstore_net", "{}: network error '{}' when reading message from node {}",
data.self_key_pair.public(), err, node_id);
// close connection
if data.remove(&*connection) {
let maintain_action = data.trigger.lock().on_connection_closed(&node_id);
maintain_connection_trigger(data, maintain_action);
}
Box::new(future::err(err))
},
}
))
}
/// Schedule connections. maintain.
fn net_schedule_maintain(data: Arc<NetConnectionsData>) {
let closure_data = data.clone();
execute(&data.executor, Interval::new_interval(Duration::new(MAINTAIN_INTERVAL, 0))
.and_then(move |_| Ok(net_maintain(closure_data.clone())))
.for_each(|_| Ok(()))
.then(|_| future::ok(())));
}
/// Maintain network connections.
fn net_maintain(data: Arc<NetConnectionsData>) {
trace!(target: "secretstore_net", "{}: executing maintain procedures", data.self_key_pair.public());
update_nodes_set(data.clone());
data.message_processor.maintain_sessions();
net_keep_alive(data.clone());
net_connect_disconnected(data);
}
/// Send keep alive messages to remote nodes.
fn net_keep_alive(data: Arc<NetConnectionsData>) {
let now = Instant::now();
let active_connections = data.active_connections();
for connection in active_connections {
let last_message_diff = now - connection.last_message_time();
if last_message_diff > KEEP_ALIVE_DISCONNECT_INTERVAL {
warn!(target: "secretstore_net", "{}: keep alive timeout for node {}",
data.self_key_pair.public(), connection.node_id());
let node_id = *connection.node_id();
if data.remove(&*connection) {
let maintain_action = data.trigger.lock().on_connection_closed(&node_id);
maintain_connection_trigger(data.clone(), maintain_action);
}
data.message_processor.process_disconnect(&node_id);
}
else if last_message_diff > KEEP_ALIVE_SEND_INTERVAL {
connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {})));
}
}
}
/// Connect disconnected nodes.
fn net_connect_disconnected(data: Arc<NetConnectionsData>) {
let disconnected_nodes = data.disconnected_nodes();
for (node_id, address) in disconnected_nodes {
if data.allow_connecting_to_higher_nodes || *data.self_key_pair.public() < node_id {
net_connect(data.clone(), address);
}
}
}
/// Schedule future execution.
fn execute<F: Future<Item = (), Error = ()> + Send + 'static>(executor: &Executor, f: F) {
if let Err(err) = future::Executor::execute(executor, Box::new(f)) {
error!("Secret store runtime unable to spawn task. Runtime is shutting down. ({:?})", err);
}
}
/// Try to update active nodes set from connection trigger.
fn update_nodes_set(data: Arc<NetConnectionsData>) {
let maintain_action = data.trigger.lock().on_maintain();
maintain_connection_trigger(data, maintain_action);
}
/// Execute maintain procedures of connections trigger.
fn maintain_connection_trigger(data: Arc<NetConnectionsData>, maintain_action: Option<Maintain>) {
if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Session) {
let session_params = data.trigger.lock().maintain_session();
if let Some(session_params) = session_params {
let session = data.message_processor.start_servers_set_change_session(session_params);
match session {
Ok(_) => trace!(target: "secretstore_net", "{}: started auto-migrate session",
data.self_key_pair.public()),
Err(err) => trace!(target: "secretstore_net", "{}: failed to start auto-migrate session with: {}",
data.self_key_pair.public(), err),
}
}
}
if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Connections) {
let mut trigger = data.trigger.lock();
let mut data = data.container.write();
trigger.maintain_connections(&mut *data);
}
}
/// Compose SocketAddr from configuration' address and port.
fn make_socket_address(address: &str, port: u16) -> Result<SocketAddr, Error> {
let ip_address: IpAddr = address.parse().map_err(|_| Error::InvalidNodeAddress)?;
Ok(SocketAddr::new(ip_address, port))
}

View File

@ -0,0 +1,357 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use key_server_cluster::{Error, NodeId, NodeKeyPair};
use key_server_cluster::cluster::{ServersSetChangeParams, new_servers_set_change_session};
use key_server_cluster::cluster_sessions::{AdminSession};
use key_server_cluster::cluster_connections::{ConnectionProvider, Connection};
use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, ClusterSessionsContainer,
create_cluster_view};
use key_server_cluster::cluster_sessions_creator::{ClusterSessionCreator, IntoSessionId};
use key_server_cluster::message::{self, Message, ClusterMessage};
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession,
IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction};
use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector;
/// Something that is able to process signals/messages from other nodes.
pub trait MessageProcessor: Send + Sync {
/// Process disconnect from the remote node.
fn process_disconnect(&self, node: &NodeId);
/// Process single message from the connection.
fn process_connection_message(&self, connection: Arc<Connection>, message: Message);
/// Start servers set change session. This is typically used by ConnectionManager when
/// it detects that auto-migration session needs to be started.
fn start_servers_set_change_session(&self, params: ServersSetChangeParams) -> Result<Arc<AdminSession>, Error>;
/// Try to continue session after key version negotiation session is completed.
fn try_continue_session(
&self,
session: Option<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>>
);
/// Maintain active sessions. Typically called by the ConnectionManager at some intervals.
/// Should cancel stalled sessions and send keep-alive messages for sessions that support it.
fn maintain_sessions(&self);
}
/// Bridge between ConnectionManager and ClusterSessions.
pub struct SessionsMessageProcessor {
self_key_pair: Arc<NodeKeyPair>,
servers_set_change_creator_connector: Arc<ServersSetChangeSessionCreatorConnector>,
sessions: Arc<ClusterSessions>,
connections: Arc<ConnectionProvider>,
}
impl SessionsMessageProcessor {
/// Create new instance of SessionsMessageProcessor.
pub fn new(
self_key_pair: Arc<NodeKeyPair>,
servers_set_change_creator_connector: Arc<ServersSetChangeSessionCreatorConnector>,
sessions: Arc<ClusterSessions>,
connections: Arc<ConnectionProvider>,
) -> Self {
SessionsMessageProcessor {
self_key_pair,
servers_set_change_creator_connector,
sessions,
connections,
}
}
/// Process single session message from connection.
fn process_message<S: ClusterSession, SC: ClusterSessionCreator<S, D>, D>(
&self,
sessions: &ClusterSessionsContainer<S, SC, D>,
connection: Arc<Connection>,
mut message: Message,
) -> Option<Arc<S>>
where
Message: IntoSessionId<S::Id>
{
// get or create new session, if required
let mut sender = *connection.node_id();
let session = self.prepare_session(sessions, &sender, &message);
// send error if session is not found, or failed to create
let session = match session {
Ok(session) => session,
Err(error) => {
// this is new session => it is not yet in container
warn!(target: "secretstore_net",
"{}: {} session read error '{}' when requested for session from node {}",
self.self_key_pair.public(), S::type_name(), error, sender);
if !message.is_error_message() {
let qed = "session_id only fails for cluster messages;
only session messages are passed to process_message;
qed";
let session_id = message.into_session_id().expect(qed);
let session_nonce = message.session_nonce().expect(qed);
connection.send_message(SC::make_error_message(session_id, session_nonce, error));
}
return None;
},
};
let session_id = session.id();
let mut is_queued_message = false;
loop {
let message_result = session.on_message(&sender, &message);
match message_result {
Ok(_) => {
// if session is completed => stop
if session.is_finished() {
info!(target: "secretstore_net",
"{}: {} session completed", self.self_key_pair.public(), S::type_name());
sessions.remove(&session_id);
return Some(session);
}
// try to dequeue message
match sessions.dequeue_message(&session_id) {
Some((msg_sender, msg)) => {
is_queued_message = true;
sender = msg_sender;
message = msg;
},
None => return Some(session),
}
},
Err(Error::TooEarlyForRequest) => {
sessions.enqueue_message(&session_id, sender, message, is_queued_message);
return Some(session);
},
Err(err) => {
warn!(
target: "secretstore_net",
"{}: {} session error '{}' when processing message {} from node {}",
self.self_key_pair.public(),
S::type_name(),
err,
message,
sender);
session.on_session_error(self.self_key_pair.public(), err);
sessions.remove(&session_id);
return Some(session);
},
}
}
}
/// Get or insert new session.
fn prepare_session<S: ClusterSession, SC: ClusterSessionCreator<S, D>, D>(
&self,
sessions: &ClusterSessionsContainer<S, SC, D>,
sender: &NodeId,
message: &Message
) -> Result<Arc<S>, Error>
where
Message: IntoSessionId<S::Id>
{
fn requires_all_connections(message: &Message) -> bool {
match *message {
Message::Generation(_) => true,
Message::ShareAdd(_) => true,
Message::ServersSetChange(_) => true,
_ => false,
}
}
// get or create new session, if required
let session_id = message.into_session_id()
.expect("into_session_id fails for cluster messages only;
only session messages are passed to prepare_session;
qed");
let is_initialization_message = message.is_initialization_message();
let is_delegation_message = message.is_delegation_message();
match is_initialization_message || is_delegation_message {
false => sessions.get(&session_id, true).ok_or(Error::NoActiveSessionWithId),
true => {
let creation_data = SC::creation_data_from_message(&message)?;
let master = if is_initialization_message {
*sender
} else {
*self.self_key_pair.public()
};
let cluster = create_cluster_view(
self.self_key_pair.clone(),
self.connections.clone(),
requires_all_connections(&message))?;
let nonce = Some(message.session_nonce().ok_or(Error::InvalidMessage)?);
let exclusive = message.is_exclusive_session_message();
sessions.insert(cluster, master, session_id, nonce, exclusive, creation_data)
},
}
}
/// Process single cluster message from the connection.
fn process_cluster_message(&self, connection: Arc<Connection>, message: ClusterMessage) {
match message {
ClusterMessage::KeepAlive(_) => {
let msg = Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse {
session_id: None,
}));
connection.send_message(msg)
},
ClusterMessage::KeepAliveResponse(msg) => if let Some(session_id) = msg.session_id {
self.sessions.on_session_keep_alive(connection.node_id(), session_id.into());
},
_ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}",
self.self_key_pair.public(), message, connection.node_id(), connection.node_address()),
}
}
}
impl MessageProcessor for SessionsMessageProcessor {
fn process_disconnect(&self, node: &NodeId) {
self.sessions.on_connection_timeout(node);
}
fn process_connection_message(&self, connection: Arc<Connection>, message: Message) {
trace!(target: "secretstore_net", "{}: received message {} from {}",
self.self_key_pair.public(), message, connection.node_id());
// error is ignored as we only process errors on session level
match message {
Message::Generation(message) => self
.process_message(&self.sessions.generation_sessions, connection, Message::Generation(message))
.map(|_| ()).unwrap_or_default(),
Message::Encryption(message) => self
.process_message(&self.sessions.encryption_sessions, connection, Message::Encryption(message))
.map(|_| ()).unwrap_or_default(),
Message::Decryption(message) => self
.process_message(&self.sessions.decryption_sessions, connection, Message::Decryption(message))
.map(|_| ()).unwrap_or_default(),
Message::SchnorrSigning(message) => self
.process_message(&self.sessions.schnorr_signing_sessions, connection, Message::SchnorrSigning(message))
.map(|_| ()).unwrap_or_default(),
Message::EcdsaSigning(message) => self
.process_message(&self.sessions.ecdsa_signing_sessions, connection, Message::EcdsaSigning(message))
.map(|_| ()).unwrap_or_default(),
Message::ServersSetChange(message) => {
let message = Message::ServersSetChange(message);
let is_initialization_message = message.is_initialization_message();
let session = self.process_message(&self.sessions.admin_sessions, connection, message);
if is_initialization_message {
if let Some(session) = session {
self.servers_set_change_creator_connector
.set_key_servers_set_change_session(session.clone());
}
}
},
Message::KeyVersionNegotiation(message) => {
let session = self.process_message(
&self.sessions.negotiation_sessions, connection, Message::KeyVersionNegotiation(message));
self.try_continue_session(session);
},
Message::ShareAdd(message) => self.process_message(
&self.sessions.admin_sessions, connection, Message::ShareAdd(message))
.map(|_| ()).unwrap_or_default(),
Message::Cluster(message) => self.process_cluster_message(connection, message),
}
}
fn try_continue_session(
&self,
session: Option<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>>
) {
if let Some(session) = session {
let meta = session.meta();
let is_master_node = meta.self_node_id == meta.master_node_id;
if is_master_node && session.is_finished() {
self.sessions.negotiation_sessions.remove(&session.id());
match session.wait() {
Ok(Some((version, master))) => match session.take_continue_action() {
Some(ContinueAction::Decrypt(
session, origin, is_shadow_decryption, is_broadcast_decryption
)) => {
let initialization_error = if self.self_key_pair.public() == &master {
session.initialize(
origin, version, is_shadow_decryption, is_broadcast_decryption)
} else {
session.delegate(
master, origin, version, is_shadow_decryption, is_broadcast_decryption)
};
if let Err(error) = initialization_error {
session.on_session_error(&meta.self_node_id, error);
self.sessions.decryption_sessions.remove(&session.id());
}
},
Some(ContinueAction::SchnorrSign(session, message_hash)) => {
let initialization_error = if self.self_key_pair.public() == &master {
session.initialize(version, message_hash)
} else {
session.delegate(master, version, message_hash)
};
if let Err(error) = initialization_error {
session.on_session_error(&meta.self_node_id, error);
self.sessions.schnorr_signing_sessions.remove(&session.id());
}
},
Some(ContinueAction::EcdsaSign(session, message_hash)) => {
let initialization_error = if self.self_key_pair.public() == &master {
session.initialize(version, message_hash)
} else {
session.delegate(master, version, message_hash)
};
if let Err(error) = initialization_error {
session.on_session_error(&meta.self_node_id, error);
self.sessions.ecdsa_signing_sessions.remove(&session.id());
}
},
None => (),
},
Ok(None) => unreachable!("is_master_node; session is finished;
negotiation version always finished with result on master;
qed"),
Err(error) => match session.take_continue_action() {
Some(ContinueAction::Decrypt(session, _, _, _)) => {
session.on_session_error(&meta.self_node_id, error);
self.sessions.decryption_sessions.remove(&session.id());
},
Some(ContinueAction::SchnorrSign(session, _)) => {
session.on_session_error(&meta.self_node_id, error);
self.sessions.schnorr_signing_sessions.remove(&session.id());
},
Some(ContinueAction::EcdsaSign(session, _)) => {
session.on_session_error(&meta.self_node_id, error);
self.sessions.ecdsa_signing_sessions.remove(&session.id());
},
None => (),
},
}
}
}
}
fn maintain_sessions(&self) {
self.sessions.stop_stalled_sessions();
self.sessions.sessions_keep_alive();
}
fn start_servers_set_change_session(&self, params: ServersSetChangeParams) -> Result<Arc<AdminSession>, Error> {
new_servers_set_change_session(
self.self_key_pair.clone(),
&*self.sessions,
self.connections.clone(),
self.servers_set_change_creator_connector.clone(),
params,
)
}
}

View File

@ -21,8 +21,9 @@ use std::collections::{VecDeque, BTreeMap, BTreeSet};
use parking_lot::{Mutex, RwLock, Condvar}; use parking_lot::{Mutex, RwLock, Condvar};
use ethereum_types::H256; use ethereum_types::H256;
use ethkey::Secret; use ethkey::Secret;
use key_server_cluster::{Error, NodeId, SessionId, Requester}; use key_server_cluster::{Error, NodeId, SessionId, Requester, NodeKeyPair};
use key_server_cluster::cluster::{Cluster, ClusterData, ClusterConfiguration, ClusterView}; use key_server_cluster::cluster::{Cluster, ClusterConfiguration, ClusterView};
use key_server_cluster::cluster_connections::ConnectionProvider;
use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector; use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector;
use key_server_cluster::message::{self, Message}; use key_server_cluster::message::{self, Message};
use key_server_cluster::generation_session::{SessionImpl as GenerationSessionImpl}; use key_server_cluster::generation_session::{SessionImpl as GenerationSessionImpl};
@ -158,6 +159,8 @@ pub struct ClusterSessionsContainer<S: ClusterSession, SC: ClusterSessionCreator
listeners: Mutex<Vec<Weak<ClusterSessionsListener<S>>>>, listeners: Mutex<Vec<Weak<ClusterSessionsListener<S>>>>,
/// Sessions container state. /// Sessions container state.
container_state: Arc<Mutex<ClusterSessionsContainerState>>, container_state: Arc<Mutex<ClusterSessionsContainerState>>,
/// Do not actually remove sessions.
preserve_sessions: bool,
/// Phantom data. /// Phantom data.
_pd: ::std::marker::PhantomData<D>, _pd: ::std::marker::PhantomData<D>,
} }
@ -229,6 +232,17 @@ impl ClusterSessions {
self.generation_sessions.creator.make_faulty_generation_sessions(); self.generation_sessions.creator.make_faulty_generation_sessions();
} }
#[cfg(test)]
pub fn preserve_sessions(&mut self) {
self.generation_sessions.preserve_sessions = true;
self.encryption_sessions.preserve_sessions = true;
self.decryption_sessions.preserve_sessions = true;
self.schnorr_signing_sessions.preserve_sessions = true;
self.ecdsa_signing_sessions.preserve_sessions = true;
self.negotiation_sessions.preserve_sessions = true;
self.admin_sessions.preserve_sessions = true;
}
/// Send session-level keep-alive messages. /// Send session-level keep-alive messages.
pub fn sessions_keep_alive(&self) { pub fn sessions_keep_alive(&self) {
self.admin_sessions.send_keep_alive(&*SERVERS_SET_CHANGE_SESSION_ID, &self.self_node_id); self.admin_sessions.send_keep_alive(&*SERVERS_SET_CHANGE_SESSION_ID, &self.self_node_id);
@ -272,6 +286,7 @@ impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: C
sessions: RwLock::new(BTreeMap::new()), sessions: RwLock::new(BTreeMap::new()),
listeners: Mutex::new(Vec::new()), listeners: Mutex::new(Vec::new()),
container_state: container_state, container_state: container_state,
preserve_sessions: false,
_pd: Default::default(), _pd: Default::default(),
} }
} }
@ -379,9 +394,11 @@ impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: C
} }
fn do_remove(&self, session_id: &S::Id, sessions: &mut BTreeMap<S::Id, QueuedSession<S>>) { fn do_remove(&self, session_id: &S::Id, sessions: &mut BTreeMap<S::Id, QueuedSession<S>>) {
if let Some(session) = sessions.remove(session_id) { if !self.preserve_sessions {
self.container_state.lock().on_session_completed(); if let Some(session) = sessions.remove(session_id) {
self.notify_listeners(|l| l.on_session_removed(session.session.clone())); self.container_state.lock().on_session_completed();
self.notify_listeners(|l| l.on_session_removed(session.session.clone()));
}
} }
} }
@ -551,19 +568,22 @@ impl ClusterSession for AdminSession {
} }
} }
} }
pub fn create_cluster_view(data: &Arc<ClusterData>, requires_all_connections: bool) -> Result<Arc<Cluster>, Error> {
let disconnected_nodes_count = data.connections.disconnected_nodes().len(); pub fn create_cluster_view(self_key_pair: Arc<NodeKeyPair>, connections: Arc<ConnectionProvider>, requires_all_connections: bool) -> Result<Arc<Cluster>, Error> {
let mut connected_nodes = connections.connected_nodes()?;
let disconnected_nodes = connections.disconnected_nodes();
let disconnected_nodes_count = disconnected_nodes.len();
if requires_all_connections { if requires_all_connections {
if disconnected_nodes_count != 0 { if disconnected_nodes_count != 0 {
return Err(Error::NodeDisconnected); return Err(Error::NodeDisconnected);
} }
} }
let mut connected_nodes = data.connections.connected_nodes()?; connected_nodes.insert(self_key_pair.public().clone());
connected_nodes.insert(data.self_key_pair.public().clone());
let connected_nodes_count = connected_nodes.len(); let connected_nodes_count = connected_nodes.len();
Ok(Arc::new(ClusterView::new(data.clone(), connected_nodes, connected_nodes_count + disconnected_nodes_count))) Ok(Arc::new(ClusterView::new(self_key_pair, connections, connected_nodes, connected_nodes_count + disconnected_nodes_count)))
} }
#[cfg(test)] #[cfg(test)]
@ -583,13 +603,11 @@ mod tests {
let key_pair = Random.generate().unwrap(); let key_pair = Random.generate().unwrap();
let config = ClusterConfiguration { let config = ClusterConfiguration {
self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pair.clone())), self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pair.clone())),
listen_address: ("127.0.0.1".to_owned(), 100_u16),
key_server_set: Arc::new(MapKeyServerSet::new(false, vec![(key_pair.public().clone(), format!("127.0.0.1:{}", 100).parse().unwrap())].into_iter().collect())), key_server_set: Arc::new(MapKeyServerSet::new(false, vec![(key_pair.public().clone(), format!("127.0.0.1:{}", 100).parse().unwrap())].into_iter().collect())),
allow_connecting_to_higher_nodes: false,
key_storage: Arc::new(DummyKeyStorage::default()), key_storage: Arc::new(DummyKeyStorage::default()),
acl_storage: Arc::new(DummyAclStorage::default()), acl_storage: Arc::new(DummyAclStorage::default()),
admin_public: Some(Random.generate().unwrap().public().clone()), admin_public: Some(Random.generate().unwrap().public().clone()),
auto_migrate_enabled: false, preserve_sessions: false,
}; };
ClusterSessions::new(&config, Arc::new(SimpleServersSetChangeSessionCreatorConnector { ClusterSessions::new(&config, Arc::new(SimpleServersSetChangeSessionCreatorConnector {
admin_public: Some(Random.generate().unwrap().public().clone()), admin_public: Some(Random.generate().unwrap().public().clone()),

View File

@ -21,10 +21,12 @@ use std::sync::Arc;
use ethereum_types::H256; use ethereum_types::H256;
use ethkey::Public; use ethkey::Public;
use key_server_cluster::{KeyServerSet, KeyServerSetSnapshot}; use key_server_cluster::{KeyServerSet, KeyServerSetSnapshot};
use key_server_cluster::cluster::{ClusterClient, ClusterConnectionsData}; use key_server_cluster::cluster::{ClusterConfiguration, ServersSetChangeParams};
use key_server_cluster::cluster_sessions::AdminSession; use key_server_cluster::cluster_sessions::AdminSession;
use key_server_cluster::cluster_connections::{Connection};
use key_server_cluster::cluster_connections_net::{NetConnectionsContainer};
use types::{Error, NodeId}; use types::{Error, NodeId};
use {NodeKeyPair}; use NodeKeyPair;
#[derive(Debug, Clone, Copy, PartialEq)] #[derive(Debug, Clone, Copy, PartialEq)]
/// Describes which maintain() call is required. /// Describes which maintain() call is required.
@ -45,10 +47,10 @@ pub trait ConnectionTrigger: Send + Sync {
fn on_connection_established(&mut self, node: &NodeId) -> Option<Maintain>; fn on_connection_established(&mut self, node: &NodeId) -> Option<Maintain>;
/// When connection is closed. /// When connection is closed.
fn on_connection_closed(&mut self, node: &NodeId) -> Option<Maintain>; fn on_connection_closed(&mut self, node: &NodeId) -> Option<Maintain>;
/// Maintain active sessions. /// Maintain active sessions. Returns Some if servers set session creation required.
fn maintain_session(&mut self, sessions: &ClusterClient); fn maintain_session(&mut self) -> Option<ServersSetChangeParams>;
/// Maintain active connections. /// Maintain active connections.
fn maintain_connections(&mut self, connections: &mut ClusterConnectionsData); fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer);
/// Return connector for the servers set change session creator. /// Return connector for the servers set change session creator.
fn servers_set_change_creator_connector(&self) -> Arc<ServersSetChangeSessionCreatorConnector>; fn servers_set_change_creator_connector(&self) -> Arc<ServersSetChangeSessionCreatorConnector>;
} }
@ -95,6 +97,11 @@ pub struct TriggerConnections {
} }
impl SimpleConnectionTrigger { impl SimpleConnectionTrigger {
/// Create new simple from cluster configuration.
pub fn with_config(config: &ClusterConfiguration) -> Self {
Self::new(config.key_server_set.clone(), config.self_key_pair.clone(), config.admin_public)
}
/// Create new simple connection trigger. /// Create new simple connection trigger.
pub fn new(key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>, admin_public: Option<Public>) -> Self { pub fn new(key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>, admin_public: Option<Public>) -> Self {
SimpleConnectionTrigger { SimpleConnectionTrigger {
@ -124,10 +131,11 @@ impl ConnectionTrigger for SimpleConnectionTrigger {
None None
} }
fn maintain_session(&mut self, _sessions: &ClusterClient) { fn maintain_session(&mut self) -> Option<ServersSetChangeParams> {
None
} }
fn maintain_connections(&mut self, connections: &mut ClusterConnectionsData) { fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer) {
self.connections.maintain(ConnectionsAction::ConnectToCurrentSet, connections, &self.key_server_set.snapshot()) self.connections.maintain(ConnectionsAction::ConnectToCurrentSet, connections, &self.key_server_set.snapshot())
} }
@ -146,7 +154,7 @@ impl ServersSetChangeSessionCreatorConnector for SimpleServersSetChangeSessionCr
} }
impl TriggerConnections { impl TriggerConnections {
pub fn maintain(&self, action: ConnectionsAction, data: &mut ClusterConnectionsData, server_set: &KeyServerSetSnapshot) { pub fn maintain(&self, action: ConnectionsAction, data: &mut NetConnectionsContainer, server_set: &KeyServerSetSnapshot) {
match action { match action {
ConnectionsAction::ConnectToCurrentSet => { ConnectionsAction::ConnectToCurrentSet => {
adjust_connections(self.self_key_pair.public(), data, &server_set.current_set); adjust_connections(self.self_key_pair.public(), data, &server_set.current_set);
@ -159,7 +167,11 @@ impl TriggerConnections {
} }
} }
fn adjust_connections(self_node_id: &NodeId, data: &mut ClusterConnectionsData, required_set: &BTreeMap<NodeId, SocketAddr>) { fn adjust_connections(
self_node_id: &NodeId,
data: &mut NetConnectionsContainer,
required_set: &BTreeMap<NodeId, SocketAddr>
) {
if !required_set.contains_key(self_node_id) { if !required_set.contains_key(self_node_id) {
if !data.is_isolated { if !data.is_isolated {
trace!(target: "secretstore_net", "{}: isolated from cluser", self_node_id); trace!(target: "secretstore_net", "{}: isolated from cluser", self_node_id);
@ -204,13 +216,13 @@ mod tests {
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::sync::Arc; use std::sync::Arc;
use ethkey::{Random, Generator}; use ethkey::{Random, Generator};
use key_server_cluster::cluster::ClusterConnectionsData;
use key_server_cluster::{MapKeyServerSet, PlainNodeKeyPair, KeyServerSetSnapshot, KeyServerSetMigration}; use key_server_cluster::{MapKeyServerSet, PlainNodeKeyPair, KeyServerSetSnapshot, KeyServerSetMigration};
use key_server_cluster::cluster_connections_net::NetConnectionsContainer;
use super::{Maintain, TriggerConnections, ConnectionsAction, ConnectionTrigger, SimpleConnectionTrigger, use super::{Maintain, TriggerConnections, ConnectionsAction, ConnectionTrigger, SimpleConnectionTrigger,
select_nodes_to_disconnect, adjust_connections}; select_nodes_to_disconnect, adjust_connections};
fn default_connection_data() -> ClusterConnectionsData { fn default_connection_data() -> NetConnectionsContainer {
ClusterConnectionsData { NetConnectionsContainer {
is_isolated: false, is_isolated: false,
nodes: Default::default(), nodes: Default::default(),
connections: Default::default(), connections: Default::default(),

View File

@ -21,7 +21,8 @@ use ethereum_types::H256;
use ethkey::Public; use ethkey::Public;
use parking_lot::Mutex; use parking_lot::Mutex;
use key_server_cluster::{KeyServerSet, KeyServerSetSnapshot, KeyServerSetMigration, is_migration_required}; use key_server_cluster::{KeyServerSet, KeyServerSetSnapshot, KeyServerSetMigration, is_migration_required};
use key_server_cluster::cluster::{ClusterClient, ClusterConnectionsData}; use key_server_cluster::cluster::{ClusterConfiguration, ServersSetChangeParams};
use key_server_cluster::cluster_connections_net::NetConnectionsContainer;
use key_server_cluster::cluster_sessions::{AdminSession, ClusterSession}; use key_server_cluster::cluster_sessions::{AdminSession, ClusterSession};
use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash; use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash;
use key_server_cluster::connection_trigger::{Maintain, ConnectionsAction, ConnectionTrigger, use key_server_cluster::connection_trigger::{Maintain, ConnectionsAction, ConnectionTrigger,
@ -110,6 +111,11 @@ struct TriggerSession {
} }
impl ConnectionTriggerWithMigration { impl ConnectionTriggerWithMigration {
/// Create new simple from cluster configuration.
pub fn with_config(config: &ClusterConfiguration) -> Self {
Self::new(config.key_server_set.clone(), config.self_key_pair.clone())
}
/// Create new trigge with migration. /// Create new trigge with migration.
pub fn new(key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>) -> Self { pub fn new(key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>) -> Self {
let snapshot = key_server_set.snapshot(); let snapshot = key_server_set.snapshot();
@ -187,13 +193,11 @@ impl ConnectionTrigger for ConnectionTriggerWithMigration {
self.do_maintain() self.do_maintain()
} }
fn maintain_session(&mut self, sessions: &ClusterClient) { fn maintain_session(&mut self) -> Option<ServersSetChangeParams> {
if let Some(action) = self.session_action { self.session_action.and_then(|action| self.session.maintain(action, &self.snapshot))
self.session.maintain(action, sessions, &self.snapshot);
}
} }
fn maintain_connections(&mut self, connections: &mut ClusterConnectionsData) { fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer) {
if let Some(action) = self.connections_action { if let Some(action) = self.connections_action {
self.connections.maintain(action, connections, &self.snapshot); self.connections.maintain(action, connections, &self.snapshot);
} }
@ -255,30 +259,42 @@ impl TriggerSession {
} }
/// Maintain session. /// Maintain session.
pub fn maintain(&mut self, action: SessionAction, sessions: &ClusterClient, server_set: &KeyServerSetSnapshot) { pub fn maintain(
if action == SessionAction::Start { // all other actions are processed in maintain &mut self,
let migration = server_set.migration.as_ref() action: SessionAction,
.expect("action is Start only when migration is started (see maintain_session); qed"); server_set: &KeyServerSetSnapshot
) -> Option<ServersSetChangeParams> {
if action != SessionAction::Start { // all other actions are processed in maintain
return None;
}
let migration = server_set.migration.as_ref()
.expect("action is Start only when migration is started (see maintain_session); qed");
// we assume that authorities that are removed from the servers set are either offline, or malicious // we assume that authorities that are removed from the servers set are either offline, or malicious
// => they're not involved in ServersSetChangeSession // => they're not involved in ServersSetChangeSession
// => both sets are the same // => both sets are the same
let old_set: BTreeSet<_> = migration.set.keys().cloned().collect(); let old_set: BTreeSet<_> = migration.set.keys().cloned().collect();
let new_set = old_set.clone(); let new_set = old_set.clone();
let signatures = self.self_key_pair.sign(&ordered_nodes_hash(&old_set)) let signatures = self.self_key_pair.sign(&ordered_nodes_hash(&old_set))
.and_then(|old_set_signature| self.self_key_pair.sign(&ordered_nodes_hash(&new_set)) .and_then(|old_set_signature| self.self_key_pair.sign(&ordered_nodes_hash(&new_set))
.map(|new_set_signature| (old_set_signature, new_set_signature))) .map(|new_set_signature| (old_set_signature, new_set_signature)));
.map_err(Into::into);
let session = signatures.and_then(|(old_set_signature, new_set_signature)|
sessions.new_servers_set_change_session(None, Some(migration.id.clone()), new_set, old_set_signature, new_set_signature));
match session { match signatures {
Ok(_) => trace!(target: "secretstore_net", "{}: started auto-migrate session", Ok((old_set_signature, new_set_signature)) => Some(ServersSetChangeParams {
self.self_key_pair.public()), session_id: None,
Err(err) => trace!(target: "secretstore_net", "{}: failed to start auto-migrate session with: {}", migration_id: Some(migration.id),
self.self_key_pair.public(), err), new_nodes_set: new_set,
} old_set_signature,
new_set_signature,
}),
Err(err) => {
trace!(
target: "secretstore_net",
"{}: failed to sign servers set for auto-migrate session with: {}",
self.self_key_pair.public(), err);
None
},
} }
} }
} }

View File

@ -23,7 +23,8 @@ pub use super::key_storage::{KeyStorage, DocumentKeyShare, DocumentKeyShareVersi
pub use super::key_server_set::{is_migration_required, KeyServerSet, KeyServerSetSnapshot, KeyServerSetMigration}; pub use super::key_server_set::{is_migration_required, KeyServerSet, KeyServerSetSnapshot, KeyServerSetMigration};
pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic, pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic,
SerializableRequester, SerializableMessageHash, SerializableAddress}; SerializableRequester, SerializableMessageHash, SerializableAddress};
pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient}; pub use self::cluster::{new_network_cluster, ClusterCore, ClusterConfiguration, ClusterClient};
pub use self::cluster_connections_net::NetConnectionsManagerConfig;
pub use self::cluster_sessions::{ClusterSession, ClusterSessionsListener}; pub use self::cluster_sessions::{ClusterSession, ClusterSessionsListener};
#[cfg(test)] #[cfg(test)]
pub use self::cluster::tests::DummyClusterClient; pub use self::cluster::tests::DummyClusterClient;
@ -70,6 +71,9 @@ pub use self::client_sessions::signing_session_ecdsa;
pub use self::client_sessions::signing_session_schnorr; pub use self::client_sessions::signing_session_schnorr;
mod cluster; mod cluster;
mod cluster_connections;
mod cluster_connections_net;
mod cluster_message_processor;
mod cluster_sessions; mod cluster_sessions;
mod cluster_sessions_creator; mod cluster_sessions_creator;
mod connection_trigger; mod connection_trigger;

View File

@ -29,6 +29,11 @@ impl PlainNodeKeyPair {
key_pair: key_pair, key_pair: key_pair,
} }
} }
#[cfg(test)]
pub fn key_pair(&self) -> &KeyPair {
&self.key_pair
}
} }
impl NodeKeyPair for PlainNodeKeyPair { impl NodeKeyPair for PlainNodeKeyPair {

View File

@ -0,0 +1,176 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::BTreeSet;
use std::sync::Arc;
use key_server_cluster::{Error, NodeId};
use key_server_cluster::message::Message;
/// Connection to the single node. Provides basic information about connected node and
/// allows sending messages to this node.
pub trait Connection: Send + Sync {
/// Is this inbound connection? This only matters when both nodes are simultaneously establishing
/// two connections to each other. The agreement is that the inbound connection from the node with
/// lower NodeId is used and the other connection is closed.
fn is_inbound(&self) -> bool;
/// Returns id of the connected node.
fn node_id(&self) -> &NodeId;
/// Returns 'address' of the node to use in traces.
fn node_address(&self) -> String;
/// Send message to the connected node.
fn send_message(&self, message: Message);
}
/// Connections manager. Responsible for keeping us connected to all required nodes.
pub trait ConnectionManager: 'static + Send + Sync {
/// Returns shared reference to connections provider.
fn provider(&self) -> Arc<ConnectionProvider>;
/// Try to reach all disconnected nodes immediately. This method is exposed mostly for
/// tests, where all 'nodes' are starting listening for incoming connections first and
/// only after this, they're actually start connecting to each other.
fn connect(&self);
}
/// Connections provider. Holds all active connections and the set of nodes that we need to
/// connect to. At any moment connection could be lost and the set of connected/disconnected
/// nodes could change (at behalf of the connection manager).
/// Clone operation should be cheap (Arc).
pub trait ConnectionProvider: Send + Sync {
/// Returns the set of currently connected nodes. Error is returned when our node is
/// not a part of the cluster ('isolated' node).
fn connected_nodes(&self) -> Result<BTreeSet<NodeId>, Error>;
/// Returns the set of currently disconnected nodes.
fn disconnected_nodes(&self) -> BTreeSet<NodeId>;
/// Returns the reference to the active node connection or None if the node is not connected.
fn connection(&self, node: &NodeId) -> Option<Arc<Connection>>;
}
#[cfg(test)]
pub mod tests {
use std::collections::{BTreeSet, VecDeque};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use parking_lot::Mutex;
use key_server_cluster::{Error, NodeId};
use key_server_cluster::message::Message;
use super::{ConnectionManager, Connection, ConnectionProvider};
/// Shared messages queue.
pub type MessagesQueue = Arc<Mutex<VecDeque<(NodeId, NodeId, Message)>>>;
/// Single node connections.
pub struct TestConnections {
node: NodeId,
is_isolated: AtomicBool,
connected_nodes: Mutex<BTreeSet<NodeId>>,
disconnected_nodes: Mutex<BTreeSet<NodeId>>,
messages: MessagesQueue,
}
/// Single connection.
pub struct TestConnection {
from: NodeId,
to: NodeId,
messages: MessagesQueue,
}
impl TestConnections {
pub fn isolate(&self) {
let connected_nodes = ::std::mem::replace(&mut *self.connected_nodes.lock(), Default::default());
self.is_isolated.store(true, Ordering::Relaxed);
self.disconnected_nodes.lock().extend(connected_nodes)
}
pub fn disconnect(&self, node: NodeId) {
self.connected_nodes.lock().remove(&node);
self.disconnected_nodes.lock().insert(node);
}
pub fn exclude(&self, node: NodeId) {
self.connected_nodes.lock().remove(&node);
self.disconnected_nodes.lock().remove(&node);
}
pub fn include(&self, node: NodeId) {
self.connected_nodes.lock().insert(node);
}
}
impl ConnectionManager for Arc<TestConnections> {
fn provider(&self) -> Arc<ConnectionProvider> {
self.clone()
}
fn connect(&self) {}
}
impl ConnectionProvider for TestConnections {
fn connected_nodes(&self) -> Result<BTreeSet<NodeId>, Error> {
match self.is_isolated.load(Ordering::Relaxed) {
false => Ok(self.connected_nodes.lock().clone()),
true => Err(Error::NodeDisconnected),
}
}
fn disconnected_nodes(&self) -> BTreeSet<NodeId> {
self.disconnected_nodes.lock().clone()
}
fn connection(&self, node: &NodeId) -> Option<Arc<Connection>> {
match self.connected_nodes.lock().contains(node) {
true => Some(Arc::new(TestConnection {
from: self.node,
to: *node,
messages: self.messages.clone(),
})),
false => None,
}
}
}
impl Connection for TestConnection {
fn is_inbound(&self) -> bool {
false
}
fn node_id(&self) -> &NodeId {
&self.to
}
fn node_address(&self) -> String {
format!("{}", self.to)
}
fn send_message(&self, message: Message) {
self.messages.lock().push_back((self.from, self.to, message))
}
}
pub fn new_test_connections(
messages: MessagesQueue,
node: NodeId,
mut nodes: BTreeSet<NodeId>
) -> Arc<TestConnections> {
let is_isolated = !nodes.remove(&node);
Arc::new(TestConnections {
node,
is_isolated: AtomicBool::new(is_isolated),
connected_nodes: Mutex::new(nodes),
disconnected_nodes: Default::default(),
messages,
})
}
}

View File

@ -0,0 +1,539 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::{BTreeMap, BTreeSet};
use std::collections::btree_map::Entry;
use std::io;
use std::net::{SocketAddr, IpAddr};
use std::sync::Arc;
use std::time::{Duration, Instant};
use futures::{future, Future, Stream};
use parking_lot::{Mutex, RwLock};
use tokio::net::{TcpListener, TcpStream};
use tokio::timer::{Interval, timeout::Error as TimeoutError};
use tokio_io::IoFuture;
use ethkey::KeyPair;
use parity_runtime::Executor;
use key_server_cluster::{Error, NodeId, ClusterConfiguration, NodeKeyPair};
use key_server_cluster::cluster_connections::{ConnectionProvider, Connection, ConnectionManager};
use key_server_cluster::connection_trigger::{Maintain, ConnectionTrigger};
use key_server_cluster::cluster_message_processor::MessageProcessor;
use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream,
read_encrypted_message, WriteMessage, write_encrypted_message};
use key_server_cluster::message::{self, ClusterMessage, Message};
use key_server_cluster::net::{accept_connection as io_accept_connection,
connect as io_connect, Connection as IoConnection};
/// Empty future.
pub type BoxedEmptyFuture = Box<Future<Item = (), Error = ()> + Send>;
/// Maintain interval (seconds). Every MAINTAIN_INTERVAL seconds node:
/// 1) checks if connected nodes are responding to KeepAlive messages
/// 2) tries to connect to disconnected nodes
/// 3) checks if enc/dec sessions are time-outed
const MAINTAIN_INTERVAL: u64 = 10;
/// When no messages have been received from node within KEEP_ALIVE_SEND_INTERVAL seconds,
/// we must send KeepAlive message to the node to check if it still responds to messages.
const KEEP_ALIVE_SEND_INTERVAL: Duration = Duration::from_secs(30);
/// When no messages have been received from node within KEEP_ALIVE_DISCONNECT_INTERVAL seconds,
/// we must treat this node as non-responding && disconnect from it.
const KEEP_ALIVE_DISCONNECT_INTERVAL: Duration = Duration::from_secs(60);
/// Network connection manager configuration.
pub struct NetConnectionsManagerConfig {
/// Allow connecting to 'higher' nodes.
pub allow_connecting_to_higher_nodes: bool,
/// Interface to listen to.
pub listen_address: (String, u16),
/// True if we should autostart key servers set change session when servers set changes?
/// This will only work when servers set is configured using KeyServerSet contract.
pub auto_migrate_enabled: bool,
}
/// Network connections manager.
pub struct NetConnectionsManager {
/// Address we're listening for incoming connections.
listen_address: SocketAddr,
/// Shared cluster connections data reference.
data: Arc<NetConnectionsData>,
}
/// Network connections data. Shared among NetConnectionsManager and spawned futures.
struct NetConnectionsData {
/// Allow connecting to 'higher' nodes.
allow_connecting_to_higher_nodes: bool,
/// Reference to tokio task executor.
executor: Executor,
/// Key pair of this node.
self_key_pair: Arc<NodeKeyPair>,
/// Network messages processor.
message_processor: Arc<MessageProcessor>,
/// Connections trigger.
trigger: Mutex<Box<ConnectionTrigger>>,
/// Mutable connection data.
container: Arc<RwLock<NetConnectionsContainer>>,
}
/// Network connections container. This is the only mutable data of NetConnectionsManager.
/// The set of nodes is mutated by the connection trigger and the connections set is also
/// mutated by spawned futures.
pub struct NetConnectionsContainer {
/// Is this node isolated from cluster?
pub is_isolated: bool,
/// Current key servers set.
pub nodes: BTreeMap<NodeId, SocketAddr>,
/// Active connections to key servers.
pub connections: BTreeMap<NodeId, Arc<NetConnection>>,
}
/// Network connection to single key server node.
pub struct NetConnection {
executor: Executor,
/// Id of the peer node.
node_id: NodeId,
/// Address of the peer node.
node_address: SocketAddr,
/// Is this inbound (true) or outbound (false) connection?
is_inbound: bool,
/// Key pair that is used to encrypt connection' messages.
key: KeyPair,
/// Last message time.
last_message_time: RwLock<Instant>,
/// Underlying TCP stream.
stream: SharedTcpStream,
}
impl NetConnectionsManager {
/// Create new network connections manager.
pub fn new(
executor: Executor,
message_processor: Arc<MessageProcessor>,
trigger: Box<ConnectionTrigger>,
container: Arc<RwLock<NetConnectionsContainer>>,
config: &ClusterConfiguration,
net_config: NetConnectionsManagerConfig,
) -> Result<Self, Error> {
let listen_address = make_socket_address(
&net_config.listen_address.0,
net_config.listen_address.1)?;
Ok(NetConnectionsManager {
listen_address,
data: Arc::new(NetConnectionsData {
allow_connecting_to_higher_nodes: net_config.allow_connecting_to_higher_nodes,
executor,
message_processor,
self_key_pair: config.self_key_pair.clone(),
trigger: Mutex::new(trigger),
container,
}),
})
}
/// Start listening for connections and schedule connections maintenance.
pub fn start(&self) -> Result<(), Error> {
net_listen(&self.listen_address, self.data.clone())?;
net_schedule_maintain(self.data.clone());
Ok(())
}
}
impl ConnectionManager for NetConnectionsManager {
fn provider(&self) -> Arc<ConnectionProvider> {
self.data.container.clone()
}
fn connect(&self) {
net_connect_disconnected(self.data.clone());
}
}
impl ConnectionProvider for RwLock<NetConnectionsContainer> {
fn connected_nodes(&self) -> Result<BTreeSet<NodeId>, Error> {
let connections = self.read();
if connections.is_isolated {
return Err(Error::NodeDisconnected);
}
Ok(connections.connections.keys().cloned().collect())
}
fn disconnected_nodes(&self) -> BTreeSet<NodeId> {
let connections = self.read();
connections.nodes.keys()
.filter(|node_id| !connections.connections.contains_key(node_id))
.cloned()
.collect()
}
fn connection(&self, node: &NodeId) -> Option<Arc<Connection>> {
match self.read().connections.get(node).cloned() {
Some(connection) => Some(connection),
None => None,
}
}
}
impl NetConnection {
/// Create new connection.
pub fn new(executor: Executor, is_inbound: bool, connection: IoConnection) -> NetConnection {
NetConnection {
executor,
node_id: connection.node_id,
node_address: connection.address,
is_inbound: is_inbound,
stream: connection.stream,
key: connection.key,
last_message_time: RwLock::new(Instant::now()),
}
}
/// Get last message time.
pub fn last_message_time(&self) -> Instant {
*self.last_message_time.read()
}
/// Update last message time
pub fn set_last_message_time(&self, last_message_time: Instant) {
*self.last_message_time.write() = last_message_time
}
/// Returns future that sends encrypted message over this connection.
pub fn send_message_future(&self, message: Message) -> WriteMessage<SharedTcpStream> {
write_encrypted_message(self.stream.clone(), &self.key, message)
}
/// Returns future that reads encrypted message from this connection.
pub fn read_message_future(&self) -> ReadMessage<SharedTcpStream> {
read_encrypted_message(self.stream.clone(), self.key.clone())
}
}
impl Connection for NetConnection {
fn is_inbound(&self) -> bool {
self.is_inbound
}
fn node_id(&self) -> &NodeId {
&self.node_id
}
fn node_address(&self) -> String {
format!("{}", self.node_address)
}
fn send_message(&self, message: Message) {
execute(&self.executor, self.send_message_future(message).then(|_| Ok(())));
}
}
impl NetConnectionsData {
/// Executes closure for each active connection.
pub fn active_connections(&self) -> Vec<Arc<NetConnection>> {
self.container.read().connections.values().cloned().collect()
}
/// Executes closure for each disconnected node.
pub fn disconnected_nodes(&self) -> Vec<(NodeId, SocketAddr)> {
let container = self.container.read();
container.nodes.iter()
.filter(|(node_id, _)| !container.connections.contains_key(node_id))
.map(|(node_id, addr)| (*node_id, *addr))
.collect()
}
/// Try to insert new connection. Returns true if connection has been inserted.
/// Returns false (and ignores connections) if:
/// - we do not expect connection from this node
/// - we are already connected to the node and existing connection 'supersede'
/// new connection by agreement
pub fn insert(&self, connection: Arc<NetConnection>) -> bool {
let node = *connection.node_id();
let mut container = self.container.write();
if !container.nodes.contains_key(&node) {
trace!(target: "secretstore_net", "{}: ignoring unknown connection from {} at {}",
self.self_key_pair.public(), node, connection.node_address());
return false;
}
if container.connections.contains_key(&node) {
// we have already connected to the same node
// the agreement is that node with lower id must establish connection to node with higher id
if (*self.self_key_pair.public() < node && connection.is_inbound())
|| (*self.self_key_pair.public() > node && !connection.is_inbound()) {
return false;
}
}
trace!(target: "secretstore_net",
"{}: inserting connection to {} at {}. Connected to {} of {} nodes",
self.self_key_pair.public(), node, connection.node_address(),
container.connections.len() + 1, container.nodes.len());
container.connections.insert(node, connection);
true
}
/// Tries to remove connection. Returns true if connection has been removed.
/// Returns false if we do not know this connection.
pub fn remove(&self, connection: &NetConnection) -> bool {
let node_id = *connection.node_id();
let is_inbound = connection.is_inbound();
let mut container = self.container.write();
if let Entry::Occupied(entry) = container.connections.entry(node_id) {
if entry.get().is_inbound() != is_inbound {
return false;
}
trace!(target: "secretstore_net", "{}: removing connection to {} at {}",
self.self_key_pair.public(), node_id, entry.get().node_address());
entry.remove_entry();
true
} else {
false
}
}
}
/// Listen incoming connections.
fn net_listen(
listen_address: &SocketAddr,
data: Arc<NetConnectionsData>,
) -> Result<(), Error> {
execute(&data.executor, net_listen_future(listen_address, data.clone())?);
Ok(())
}
/// Listen incoming connections future.
fn net_listen_future(
listen_address: &SocketAddr,
data: Arc<NetConnectionsData>,
) -> Result<BoxedEmptyFuture, Error> {
Ok(Box::new(TcpListener::bind(listen_address)?
.incoming()
.and_then(move |stream| {
net_accept_connection(data.clone(), stream);
Ok(())
})
.for_each(|_| Ok(()))
.then(|_| future::ok(()))))
}
/// Accept incoming connection.
fn net_accept_connection(
data: Arc<NetConnectionsData>,
stream: TcpStream,
) {
execute(&data.executor, net_accept_connection_future(data.clone(), stream));
}
/// Accept incoming connection future.
fn net_accept_connection_future(data: Arc<NetConnectionsData>, stream: TcpStream) -> BoxedEmptyFuture {
Box::new(io_accept_connection(stream, data.self_key_pair.clone())
.then(move |result| net_process_connection_result(data, None, result))
.then(|_| future::ok(())))
}
/// Connect to remote node.
fn net_connect(
data: Arc<NetConnectionsData>,
remote: SocketAddr,
) {
execute(&data.executor, net_connect_future(data.clone(), remote));
}
/// Connect to remote node future.
fn net_connect_future(
data: Arc<NetConnectionsData>,
remote: SocketAddr,
) -> BoxedEmptyFuture {
let disconnected_nodes = data.container.disconnected_nodes();
Box::new(io_connect(&remote, data.self_key_pair.clone(), disconnected_nodes)
.then(move |result| net_process_connection_result(data, Some(remote), result))
.then(|_| future::ok(())))
}
/// Process network connection result.
fn net_process_connection_result(
data: Arc<NetConnectionsData>,
outbound_addr: Option<SocketAddr>,
result: Result<DeadlineStatus<Result<IoConnection, Error>>, TimeoutError<io::Error>>,
) -> IoFuture<Result<(), Error>> {
match result {
Ok(DeadlineStatus::Meet(Ok(connection))) => {
let connection = Arc::new(NetConnection::new(data.executor.clone(), outbound_addr.is_none(), connection));
if data.insert(connection.clone()) {
let maintain_action = data.trigger.lock().on_connection_established(connection.node_id());
maintain_connection_trigger(data.clone(), maintain_action);
return net_process_connection_messages(data, connection);
}
},
Ok(DeadlineStatus::Meet(Err(err))) => {
warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}",
data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" },
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
},
Ok(DeadlineStatus::Timeout) => {
warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}",
data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" },
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
},
Err(err) => {
warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}",
data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" },
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
},
}
Box::new(future::ok(Ok(())))
}
/// Process connection messages.
fn net_process_connection_messages(
data: Arc<NetConnectionsData>,
connection: Arc<NetConnection>,
) -> IoFuture<Result<(), Error>> {
Box::new(connection
.read_message_future()
.then(move |result|
match result {
Ok((_, Ok(message))) => {
connection.set_last_message_time(Instant::now());
data.message_processor.process_connection_message(connection.clone(), message);
// continue serving connection
let process_messages_future = net_process_connection_messages(
data.clone(), connection).then(|_| Ok(()));
execute(&data.executor, process_messages_future);
Box::new(future::ok(Ok(())))
},
Ok((_, Err(err))) => {
warn!(target: "secretstore_net", "{}: protocol error '{}' when reading message from node {}",
data.self_key_pair.public(), err, connection.node_id());
// continue serving connection
let process_messages_future = net_process_connection_messages(
data.clone(), connection).then(|_| Ok(()));
execute(&data.executor, process_messages_future);
Box::new(future::ok(Err(err)))
},
Err(err) => {
let node_id = *connection.node_id();
warn!(target: "secretstore_net", "{}: network error '{}' when reading message from node {}",
data.self_key_pair.public(), err, node_id);
// close connection
if data.remove(&*connection) {
let maintain_action = data.trigger.lock().on_connection_closed(&node_id);
maintain_connection_trigger(data, maintain_action);
}
Box::new(future::err(err))
},
}
))
}
/// Schedule connections. maintain.
fn net_schedule_maintain(data: Arc<NetConnectionsData>) {
let closure_data = data.clone();
execute(&data.executor, Interval::new_interval(Duration::new(MAINTAIN_INTERVAL, 0))
.and_then(move |_| Ok(net_maintain(closure_data.clone())))
.for_each(|_| Ok(()))
.then(|_| future::ok(())));
}
/// Maintain network connections.
fn net_maintain(data: Arc<NetConnectionsData>) {
trace!(target: "secretstore_net", "{}: executing maintain procedures", data.self_key_pair.public());
update_nodes_set(data.clone());
data.message_processor.maintain_sessions();
net_keep_alive(data.clone());
net_connect_disconnected(data);
}
/// Send keep alive messages to remote nodes.
fn net_keep_alive(data: Arc<NetConnectionsData>) {
let now = Instant::now();
let active_connections = data.active_connections();
for connection in active_connections {
let last_message_diff = now - connection.last_message_time();
if last_message_diff > KEEP_ALIVE_DISCONNECT_INTERVAL {
warn!(target: "secretstore_net", "{}: keep alive timeout for node {}",
data.self_key_pair.public(), connection.node_id());
let node_id = *connection.node_id();
if data.remove(&*connection) {
let maintain_action = data.trigger.lock().on_connection_closed(&node_id);
maintain_connection_trigger(data.clone(), maintain_action);
}
data.message_processor.process_disconnect(&node_id);
}
else if last_message_diff > KEEP_ALIVE_SEND_INTERVAL {
connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {})));
}
}
}
/// Connect disconnected nodes.
fn net_connect_disconnected(data: Arc<NetConnectionsData>) {
let disconnected_nodes = data.disconnected_nodes();
for (node_id, address) in disconnected_nodes {
if data.allow_connecting_to_higher_nodes || *data.self_key_pair.public() < node_id {
net_connect(data.clone(), address);
}
}
}
/// Schedule future execution.
fn execute<F: Future<Item = (), Error = ()> + Send + 'static>(executor: &Executor, f: F) {
if let Err(err) = future::Executor::execute(executor, Box::new(f)) {
error!("Secret store runtime unable to spawn task. Runtime is shutting down. ({:?})", err);
}
}
/// Try to update active nodes set from connection trigger.
fn update_nodes_set(data: Arc<NetConnectionsData>) {
let maintain_action = data.trigger.lock().on_maintain();
maintain_connection_trigger(data, maintain_action);
}
/// Execute maintain procedures of connections trigger.
fn maintain_connection_trigger(data: Arc<NetConnectionsData>, maintain_action: Option<Maintain>) {
if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Session) {
let session_params = data.trigger.lock().maintain_session();
if let Some(session_params) = session_params {
let session = data.message_processor.start_servers_set_change_session(session_params);
match session {
Ok(_) => trace!(target: "secretstore_net", "{}: started auto-migrate session",
data.self_key_pair.public()),
Err(err) => trace!(target: "secretstore_net", "{}: failed to start auto-migrate session with: {}",
data.self_key_pair.public(), err),
}
}
}
if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Connections) {
let mut trigger = data.trigger.lock();
let mut data = data.container.write();
trigger.maintain_connections(&mut *data);
}
}
/// Compose SocketAddr from configuration' address and port.
fn make_socket_address(address: &str, port: u16) -> Result<SocketAddr, Error> {
let ip_address: IpAddr = address.parse().map_err(|_| Error::InvalidNodeAddress)?;
Ok(SocketAddr::new(ip_address, port))
}

View File

@ -0,0 +1,357 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use key_server_cluster::{Error, NodeId, NodeKeyPair};
use key_server_cluster::cluster::{ServersSetChangeParams, new_servers_set_change_session};
use key_server_cluster::cluster_sessions::{AdminSession};
use key_server_cluster::cluster_connections::{ConnectionProvider, Connection};
use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, ClusterSessionsContainer,
create_cluster_view};
use key_server_cluster::cluster_sessions_creator::{ClusterSessionCreator, IntoSessionId};
use key_server_cluster::message::{self, Message, ClusterMessage};
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession,
IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction};
use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector;
/// Something that is able to process signals/messages from other nodes.
pub trait MessageProcessor: Send + Sync {
/// Process disconnect from the remote node.
fn process_disconnect(&self, node: &NodeId);
/// Process single message from the connection.
fn process_connection_message(&self, connection: Arc<Connection>, message: Message);
/// Start servers set change session. This is typically used by ConnectionManager when
/// it detects that auto-migration session needs to be started.
fn start_servers_set_change_session(&self, params: ServersSetChangeParams) -> Result<Arc<AdminSession>, Error>;
/// Try to continue session after key version negotiation session is completed.
fn try_continue_session(
&self,
session: Option<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>>
);
/// Maintain active sessions. Typically called by the ConnectionManager at some intervals.
/// Should cancel stalled sessions and send keep-alive messages for sessions that support it.
fn maintain_sessions(&self);
}
/// Bridge between ConnectionManager and ClusterSessions.
pub struct SessionsMessageProcessor {
self_key_pair: Arc<NodeKeyPair>,
servers_set_change_creator_connector: Arc<ServersSetChangeSessionCreatorConnector>,
sessions: Arc<ClusterSessions>,
connections: Arc<ConnectionProvider>,
}
impl SessionsMessageProcessor {
/// Create new instance of SessionsMessageProcessor.
pub fn new(
self_key_pair: Arc<NodeKeyPair>,
servers_set_change_creator_connector: Arc<ServersSetChangeSessionCreatorConnector>,
sessions: Arc<ClusterSessions>,
connections: Arc<ConnectionProvider>,
) -> Self {
SessionsMessageProcessor {
self_key_pair,
servers_set_change_creator_connector,
sessions,
connections,
}
}
/// Process single session message from connection.
fn process_message<S: ClusterSession, SC: ClusterSessionCreator<S, D>, D>(
&self,
sessions: &ClusterSessionsContainer<S, SC, D>,
connection: Arc<Connection>,
mut message: Message,
) -> Option<Arc<S>>
where
Message: IntoSessionId<S::Id>
{
// get or create new session, if required
let mut sender = *connection.node_id();
let session = self.prepare_session(sessions, &sender, &message);
// send error if session is not found, or failed to create
let session = match session {
Ok(session) => session,
Err(error) => {
// this is new session => it is not yet in container
warn!(target: "secretstore_net",
"{}: {} session read error '{}' when requested for session from node {}",
self.self_key_pair.public(), S::type_name(), error, sender);
if !message.is_error_message() {
let qed = "session_id only fails for cluster messages;
only session messages are passed to process_message;
qed";
let session_id = message.into_session_id().expect(qed);
let session_nonce = message.session_nonce().expect(qed);
connection.send_message(SC::make_error_message(session_id, session_nonce, error));
}
return None;
},
};
let session_id = session.id();
let mut is_queued_message = false;
loop {
let message_result = session.on_message(&sender, &message);
match message_result {
Ok(_) => {
// if session is completed => stop
if session.is_finished() {
info!(target: "secretstore_net",
"{}: {} session completed", self.self_key_pair.public(), S::type_name());
sessions.remove(&session_id);
return Some(session);
}
// try to dequeue message
match sessions.dequeue_message(&session_id) {
Some((msg_sender, msg)) => {
is_queued_message = true;
sender = msg_sender;
message = msg;
},
None => return Some(session),
}
},
Err(Error::TooEarlyForRequest) => {
sessions.enqueue_message(&session_id, sender, message, is_queued_message);
return Some(session);
},
Err(err) => {
warn!(
target: "secretstore_net",
"{}: {} session error '{}' when processing message {} from node {}",
self.self_key_pair.public(),
S::type_name(),
err,
message,
sender);
session.on_session_error(self.self_key_pair.public(), err);
sessions.remove(&session_id);
return Some(session);
},
}
}
}
/// Get or insert new session.
fn prepare_session<S: ClusterSession, SC: ClusterSessionCreator<S, D>, D>(
&self,
sessions: &ClusterSessionsContainer<S, SC, D>,
sender: &NodeId,
message: &Message
) -> Result<Arc<S>, Error>
where
Message: IntoSessionId<S::Id>
{
fn requires_all_connections(message: &Message) -> bool {
match *message {
Message::Generation(_) => true,
Message::ShareAdd(_) => true,
Message::ServersSetChange(_) => true,
_ => false,
}
}
// get or create new session, if required
let session_id = message.into_session_id()
.expect("into_session_id fails for cluster messages only;
only session messages are passed to prepare_session;
qed");
let is_initialization_message = message.is_initialization_message();
let is_delegation_message = message.is_delegation_message();
match is_initialization_message || is_delegation_message {
false => sessions.get(&session_id, true).ok_or(Error::NoActiveSessionWithId),
true => {
let creation_data = SC::creation_data_from_message(&message)?;
let master = if is_initialization_message {
*sender
} else {
*self.self_key_pair.public()
};
let cluster = create_cluster_view(
self.self_key_pair.clone(),
self.connections.clone(),
requires_all_connections(&message))?;
let nonce = Some(message.session_nonce().ok_or(Error::InvalidMessage)?);
let exclusive = message.is_exclusive_session_message();
sessions.insert(cluster, master, session_id, nonce, exclusive, creation_data)
},
}
}
/// Process single cluster message from the connection.
fn process_cluster_message(&self, connection: Arc<Connection>, message: ClusterMessage) {
match message {
ClusterMessage::KeepAlive(_) => {
let msg = Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse {
session_id: None,
}));
connection.send_message(msg)
},
ClusterMessage::KeepAliveResponse(msg) => if let Some(session_id) = msg.session_id {
self.sessions.on_session_keep_alive(connection.node_id(), session_id.into());
},
_ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}",
self.self_key_pair.public(), message, connection.node_id(), connection.node_address()),
}
}
}
impl MessageProcessor for SessionsMessageProcessor {
fn process_disconnect(&self, node: &NodeId) {
self.sessions.on_connection_timeout(node);
}
fn process_connection_message(&self, connection: Arc<Connection>, message: Message) {
trace!(target: "secretstore_net", "{}: received message {} from {}",
self.self_key_pair.public(), message, connection.node_id());
// error is ignored as we only process errors on session level
match message {
Message::Generation(message) => self
.process_message(&self.sessions.generation_sessions, connection, Message::Generation(message))
.map(|_| ()).unwrap_or_default(),
Message::Encryption(message) => self
.process_message(&self.sessions.encryption_sessions, connection, Message::Encryption(message))
.map(|_| ()).unwrap_or_default(),
Message::Decryption(message) => self
.process_message(&self.sessions.decryption_sessions, connection, Message::Decryption(message))
.map(|_| ()).unwrap_or_default(),
Message::SchnorrSigning(message) => self
.process_message(&self.sessions.schnorr_signing_sessions, connection, Message::SchnorrSigning(message))
.map(|_| ()).unwrap_or_default(),
Message::EcdsaSigning(message) => self
.process_message(&self.sessions.ecdsa_signing_sessions, connection, Message::EcdsaSigning(message))
.map(|_| ()).unwrap_or_default(),
Message::ServersSetChange(message) => {
let message = Message::ServersSetChange(message);
let is_initialization_message = message.is_initialization_message();
let session = self.process_message(&self.sessions.admin_sessions, connection, message);
if is_initialization_message {
if let Some(session) = session {
self.servers_set_change_creator_connector
.set_key_servers_set_change_session(session.clone());
}
}
},
Message::KeyVersionNegotiation(message) => {
let session = self.process_message(
&self.sessions.negotiation_sessions, connection, Message::KeyVersionNegotiation(message));
self.try_continue_session(session);
},
Message::ShareAdd(message) => self.process_message(
&self.sessions.admin_sessions, connection, Message::ShareAdd(message))
.map(|_| ()).unwrap_or_default(),
Message::Cluster(message) => self.process_cluster_message(connection, message),
}
}
fn try_continue_session(
&self,
session: Option<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>>
) {
if let Some(session) = session {
let meta = session.meta();
let is_master_node = meta.self_node_id == meta.master_node_id;
if is_master_node && session.is_finished() {
self.sessions.negotiation_sessions.remove(&session.id());
match session.wait() {
Ok(Some((version, master))) => match session.take_continue_action() {
Some(ContinueAction::Decrypt(
session, origin, is_shadow_decryption, is_broadcast_decryption
)) => {
let initialization_error = if self.self_key_pair.public() == &master {
session.initialize(
origin, version, is_shadow_decryption, is_broadcast_decryption)
} else {
session.delegate(
master, origin, version, is_shadow_decryption, is_broadcast_decryption)
};
if let Err(error) = initialization_error {
session.on_session_error(&meta.self_node_id, error);
self.sessions.decryption_sessions.remove(&session.id());
}
},
Some(ContinueAction::SchnorrSign(session, message_hash)) => {
let initialization_error = if self.self_key_pair.public() == &master {
session.initialize(version, message_hash)
} else {
session.delegate(master, version, message_hash)
};
if let Err(error) = initialization_error {
session.on_session_error(&meta.self_node_id, error);
self.sessions.schnorr_signing_sessions.remove(&session.id());
}
},
Some(ContinueAction::EcdsaSign(session, message_hash)) => {
let initialization_error = if self.self_key_pair.public() == &master {
session.initialize(version, message_hash)
} else {
session.delegate(master, version, message_hash)
};
if let Err(error) = initialization_error {
session.on_session_error(&meta.self_node_id, error);
self.sessions.ecdsa_signing_sessions.remove(&session.id());
}
},
None => (),
},
Ok(None) => unreachable!("is_master_node; session is finished;
negotiation version always finished with result on master;
qed"),
Err(error) => match session.take_continue_action() {
Some(ContinueAction::Decrypt(session, _, _, _)) => {
session.on_session_error(&meta.self_node_id, error);
self.sessions.decryption_sessions.remove(&session.id());
},
Some(ContinueAction::SchnorrSign(session, _)) => {
session.on_session_error(&meta.self_node_id, error);
self.sessions.schnorr_signing_sessions.remove(&session.id());
},
Some(ContinueAction::EcdsaSign(session, _)) => {
session.on_session_error(&meta.self_node_id, error);
self.sessions.ecdsa_signing_sessions.remove(&session.id());
},
None => (),
},
}
}
}
}
fn maintain_sessions(&self) {
self.sessions.stop_stalled_sessions();
self.sessions.sessions_keep_alive();
}
fn start_servers_set_change_session(&self, params: ServersSetChangeParams) -> Result<Arc<AdminSession>, Error> {
new_servers_set_change_session(
self.self_key_pair.clone(),
&*self.sessions,
self.connections.clone(),
self.servers_set_change_creator_connector.clone(),
params,
)
}
}