diff --git a/secret-store/src/key_server.rs b/secret-store/src/key_server.rs index 24d8c8f9e..3f03e7e25 100644 --- a/secret-store/src/key_server.rs +++ b/secret-store/src/key_server.rs @@ -23,11 +23,11 @@ use parity_runtime::Executor; use super::acl_storage::AclStorage; use super::key_storage::KeyStorage; use super::key_server_set::KeyServerSet; -use key_server_cluster::{math, ClusterCore}; +use key_server_cluster::{math, new_network_cluster}; use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer, NodeKeyPair}; use types::{Error, Public, RequestSignature, Requester, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow, ClusterConfiguration, MessageHash, EncryptedMessageSignature, NodeId}; -use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration}; +use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration, NetConnectionsManagerConfig}; /// Secret store key server implementation pub struct KeyServerImpl { @@ -175,20 +175,23 @@ impl KeyServerCore { pub fn new(config: &ClusterConfiguration, key_server_set: Arc, self_key_pair: Arc, acl_storage: Arc, key_storage: Arc, executor: Executor) -> Result { - let config = NetClusterConfiguration { + let cconfig = NetClusterConfiguration { self_key_pair: self_key_pair.clone(), - listen_address: (config.listener_address.address.clone(), config.listener_address.port), key_server_set: key_server_set, - allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes, acl_storage: acl_storage, key_storage: key_storage, - admin_public: config.admin_public.clone(), + admin_public: config.admin_public, + preserve_sessions: false, + }; + let net_config = NetConnectionsManagerConfig { + listen_address: (config.listener_address.address.clone(), config.listener_address.port), + allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes, auto_migrate_enabled: config.auto_migrate_enabled, }; - let cluster = ClusterCore::new(executor, config) - .and_then(|c| c.run().map(|_| c.client())) - .map_err(|err| Error::from(err))?; + let core = new_network_cluster(executor, cconfig, net_config)?; + let cluster = core.client(); + core.run()?; Ok(KeyServerCore { cluster, @@ -297,14 +300,14 @@ pub mod tests { let start = time::Instant::now(); let mut tried_reconnections = false; loop { - if key_servers.iter().all(|ks| ks.cluster().cluster_state().connected.len() == num_nodes - 1) { + if key_servers.iter().all(|ks| ks.cluster().is_fully_connected()) { break; } let old_tried_reconnections = tried_reconnections; let mut fully_connected = true; for key_server in &key_servers { - if key_server.cluster().cluster_state().connected.len() != num_nodes - 1 { + if !key_server.cluster().is_fully_connected() { fully_connected = false; if !old_tried_reconnections { tried_reconnections = true; @@ -434,7 +437,7 @@ pub mod tests { #[test] fn decryption_session_is_delegated_when_node_does_not_have_key_share() { let _ = ::env_logger::try_init(); - let (key_servers, _, runtime) = make_key_servers(6110, 3); + let (key_servers, key_storages, runtime) = make_key_servers(6110, 3); // generate document key let threshold = 0; @@ -445,7 +448,7 @@ pub mod tests { let generated_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap(); // remove key from node0 - key_servers[0].cluster().key_storage().remove(&document).unwrap(); + key_storages[0].remove(&document).unwrap(); // now let's try to retrieve key back by requesting it from node0, so that session must be delegated let retrieved_key = key_servers[0].restore_document_key(&document, &signature.into()).unwrap(); @@ -457,7 +460,7 @@ pub mod tests { #[test] fn schnorr_signing_session_is_delegated_when_node_does_not_have_key_share() { let _ = ::env_logger::try_init(); - let (key_servers, _, runtime) = make_key_servers(6114, 3); + let (key_servers, key_storages, runtime) = make_key_servers(6114, 3); let threshold = 1; // generate server key @@ -467,7 +470,7 @@ pub mod tests { let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), threshold).unwrap(); // remove key from node0 - key_servers[0].cluster().key_storage().remove(&server_key_id).unwrap(); + key_storages[0].remove(&server_key_id).unwrap(); // sign message let message_hash = H256::from(42); @@ -484,7 +487,7 @@ pub mod tests { #[test] fn ecdsa_signing_session_is_delegated_when_node_does_not_have_key_share() { let _ = ::env_logger::try_init(); - let (key_servers, _, runtime) = make_key_servers(6117, 4); + let (key_servers, key_storages, runtime) = make_key_servers(6117, 4); let threshold = 1; // generate server key @@ -494,7 +497,7 @@ pub mod tests { let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), threshold).unwrap(); // remove key from node0 - key_servers[0].cluster().key_storage().remove(&server_key_id).unwrap(); + key_storages[0].remove(&server_key_id).unwrap(); // sign message let message_hash = H256::random(); diff --git a/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs b/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs index 4d3de9500..18c635879 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs @@ -1045,148 +1045,204 @@ fn check_nodes_set(all_nodes_set: &BTreeSet, new_nodes_set: &BTreeSet, - pub key_storage: Arc, - pub session: SessionImpl, + pub trait AdminSessionAdapter { + const SIGN_NEW_NODES: bool; + + fn create( + meta: ShareChangeSessionMeta, + admin_public: Public, + all_nodes_set: BTreeSet, + ml: &ClusterMessageLoop, + idx: usize + ) -> S; } - struct MessageLoop { + pub struct MessageLoop { + pub ml: ClusterMessageLoop, pub admin_key_pair: KeyPair, pub original_key_pair: KeyPair, + pub original_key_version: H256, pub all_nodes_set: BTreeSet, pub new_nodes_set: BTreeSet, pub all_set_signature: Signature, pub new_set_signature: Signature, - pub nodes: BTreeMap, + pub sessions: BTreeMap, pub queue: VecDeque<(NodeId, NodeId, Message)>, } - fn create_session(mut meta: ShareChangeSessionMeta, self_node_id: NodeId, admin_public: Public, all_nodes_set: BTreeSet, cluster: Arc, key_storage: Arc) -> SessionImpl { - meta.self_node_id = self_node_id; - SessionImpl::new(SessionParams { - meta: meta, - all_nodes_set: all_nodes_set, - cluster: cluster, - key_storage: key_storage, - nonce: 1, - admin_public: admin_public, - migration_id: None, - }).unwrap() - } - - fn create_node(meta: ShareChangeSessionMeta, admin_public: Public, all_nodes_set: BTreeSet, node: &GenerationNode) -> Node { - for n in &all_nodes_set { - node.cluster.add_node(n.clone()); - } - - Node { - cluster: node.cluster.clone(), - key_storage: node.key_storage.clone(), - session: create_session(meta, node.session.node().clone(), admin_public, all_nodes_set, node.cluster.clone(), node.key_storage.clone()), + impl ::std::fmt::Debug for MessageLoop { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + write!(f, "{:?}", self.ml) } } - impl MessageLoop { - pub fn new(gml: &GenerationMessageLoop, master_node_id: NodeId, original_key_pair: Option, new_nodes_ids: BTreeSet, removed_nodes_ids: BTreeSet, isolated_nodes_ids: BTreeSet) -> Self { + struct Adapter; + + impl AdminSessionAdapter for Adapter { + const SIGN_NEW_NODES: bool = true; + + fn create( + mut meta: ShareChangeSessionMeta, + admin_public: Public, + all_nodes_set: BTreeSet, + ml: &ClusterMessageLoop, + idx: usize + ) -> SessionImpl { + meta.self_node_id = *ml.node_key_pair(idx).public(); + SessionImpl::new(SessionParams { + meta: meta, + all_nodes_set: all_nodes_set, + cluster: ml.cluster(idx).view().unwrap(), + key_storage: ml.key_storage(idx).clone(), + nonce: 1, + admin_public: admin_public, + migration_id: None, + }).unwrap() + } + } + + impl MessageLoop { + pub fn with_gml>( + gml: GenerationMessageLoop, + master: NodeId, + add: Option>, + removed_nodes_ids: Option>, + isolated_nodes_ids: Option>, + ) -> Self { + // read generated key data + let original_key_pair = gml.compute_key_pair(); + let original_key_version = gml.key_version(); + Self::with_ml::( + gml.0, + original_key_pair, + original_key_version, + master, + add, + removed_nodes_ids, + isolated_nodes_ids) + } + + pub fn and_then>( + self, + master: NodeId, + add: Option>, + removed_nodes_ids: Option>, + isolated_nodes_ids: Option>, + ) -> Self { + Self::with_ml::( + self.ml, + self.original_key_pair, + self.original_key_version, + master, + add, + removed_nodes_ids, + isolated_nodes_ids, + ) + } + + pub fn with_ml>( + mut ml: ClusterMessageLoop, + original_key_pair: KeyPair, + original_key_version: H256, + master: NodeId, + add: Option>, + removed_nodes_ids: Option>, + isolated_nodes_ids: Option>, + ) -> Self { + let add = add.unwrap_or_default(); + let removed_nodes_ids = removed_nodes_ids.unwrap_or_default(); + let isolated_nodes_ids = isolated_nodes_ids.unwrap_or_default(); + // generate admin key pair let admin_key_pair = Random.generate().unwrap(); let admin_public = admin_key_pair.public().clone(); - // compute original secret key - let original_key_pair = original_key_pair.unwrap_or_else(|| gml.compute_key_pair(1)); - // all active nodes set - let mut all_nodes_set: BTreeSet<_> = gml.nodes.keys() + let mut all_nodes_set: BTreeSet<_> = ml.nodes().into_iter() .filter(|n| !isolated_nodes_ids.contains(n)) - .cloned() .collect(); // new nodes set includes all old nodes, except nodes being removed + all nodes being added let new_nodes_set: BTreeSet = all_nodes_set.iter().cloned() - .chain(new_nodes_ids.iter().cloned()) + .chain(add.iter().map(|kp| *kp.public())) .filter(|n| !removed_nodes_ids.contains(n)) .collect(); - all_nodes_set.extend(new_nodes_ids.iter().cloned()); + let mut old_set_to_sign = all_nodes_set.clone(); + all_nodes_set.extend(add.iter().map(|kp| *kp.public())); + if C::SIGN_NEW_NODES { + old_set_to_sign.extend(add.iter().map(|kp| *kp.public())); + } for isolated_node_id in &isolated_nodes_ids { all_nodes_set.remove(isolated_node_id); } let meta = ShareChangeSessionMeta { - self_node_id: master_node_id.clone(), - master_node_id: master_node_id.clone(), + self_node_id: master, + master_node_id: master, id: SessionId::default(), configured_nodes_count: all_nodes_set.len(), connected_nodes_count: all_nodes_set.len(), }; - let old_nodes = gml.nodes.iter().map(|n| create_node(meta.clone(), admin_public.clone(), all_nodes_set.clone(), n.1)); - let new_nodes = new_nodes_ids.into_iter().map(|new_node_id| { - let new_node_cluster = Arc::new(DummyCluster::new(new_node_id.clone())); - for node in &all_nodes_set { - new_node_cluster.add_node(node.clone()); - } - - let new_node_key_storage = Arc::new(DummyKeyStorage::default()); - let new_node_session = create_session(meta.clone(), new_node_id, admin_public.clone(), all_nodes_set.clone(), new_node_cluster.clone(), new_node_key_storage.clone()); - Node { - cluster: new_node_cluster, - key_storage: new_node_key_storage, - session: new_node_session, - } - }); - let nodes: BTreeMap<_, _> = old_nodes.chain(new_nodes).map(|n| (n.session.core.meta.self_node_id.clone(), n)).collect(); - - for node in nodes.values() { - for isolated_node_id in &isolated_nodes_ids { - node.cluster.remove_node(isolated_node_id); - } + // include new nodes in the cluster + for node_key_pair in &add { + ml.include(Arc::new(PlainNodeKeyPair::new(node_key_pair.clone()))); + } + // isolate nodes from the cluster + for isolated_node_id in &isolated_nodes_ids { + let idx = ml.nodes().iter().position(|n| n == isolated_node_id).unwrap(); + ml.exclude(idx); } - let all_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&all_nodes_set)).unwrap(); + // prepare set of nodes + let sessions: BTreeMap<_, _> = (0..ml.nodes().len()) + .map(|idx| (ml.node(idx), C::create(meta.clone(), admin_public, all_nodes_set.clone(), &ml, idx))) + .collect(); + + let all_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&old_set_to_sign)).unwrap(); let new_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap(); MessageLoop { + ml, admin_key_pair: admin_key_pair, - original_key_pair: original_key_pair, + original_key_pair, + original_key_version, all_nodes_set: all_nodes_set.clone(), new_nodes_set: new_nodes_set, all_set_signature: all_set_signature, new_set_signature: new_set_signature, - nodes: nodes, + sessions, queue: Default::default(), } } pub fn run(&mut self) { + // run session until completion while let Some((from, to, message)) = self.take_message() { self.process_message((from, to, message)).unwrap(); } + + // check that all sessions have finished + assert!(self.sessions.values().all(|s| s.is_finished())); } pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { - self.nodes.values() - .filter_map(|n| n.cluster.take_message().map(|m| (n.session.core.meta.self_node_id.clone(), m.0, m.1))) - .nth(0) - .or_else(|| self.queue.pop_front()) + self.ml.take_message().or_else(|| self.queue.pop_front()) } pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { - match { match msg.2 { - Message::ServersSetChange(ref message) => self.nodes[&msg.1].session.process_message(&msg.0, message), - _ => unreachable!("only servers set change messages are expected"), - } } { + match self.sessions[&msg.1].on_message(&msg.0, &msg.2) { Ok(_) => Ok(()), Err(Error::TooEarlyForRequest) => { self.queue.push_back(msg); @@ -1195,213 +1251,201 @@ pub mod tests { Err(err) => Err(err), } } + + /// This only works for schemes where threshold = 1 + pub fn check_secret_is_preserved<'a, I: IntoIterator>(&self, nodes: I) { + let nodes: Vec<_> = nodes.into_iter().collect(); + let key_storages: Vec<_> = nodes.iter().map(|n| self.ml.key_storage_of(n)).collect(); + let n = nodes.len(); + let document_secret_plain = math::generate_random_point().unwrap(); + for n1 in 0..n { + for n2 in n1+1..n { + let share1 = key_storages[n1].get(&SessionId::default()).unwrap(); + let share2 = key_storages[n2].get(&SessionId::default()).unwrap(); + + let id_number1 = share1.as_ref().unwrap().last_version().unwrap().id_numbers[nodes[n1]].clone(); + let id_number2 = share1.as_ref().unwrap().last_version().unwrap().id_numbers[nodes[n2]].clone(); + // now encrypt and decrypt data + let (document_secret_decrypted, document_secret_decrypted_test) = + math::tests::do_encryption_and_decryption(1, + self.original_key_pair.public(), + &[id_number1, id_number2], + &[share1.unwrap().last_version().unwrap().secret_share.clone(), + share2.unwrap().last_version().unwrap().secret_share.clone()], + Some(self.original_key_pair.secret()), + document_secret_plain.clone()); + + assert_eq!(document_secret_plain, document_secret_decrypted_test); + assert_eq!(document_secret_plain, document_secret_decrypted); + } + } + } } - pub fn generate_key(threshold: usize, nodes_ids: BTreeSet) -> GenerationMessageLoop { - let mut gml = GenerationMessageLoop::with_nodes_ids(nodes_ids); - gml.master().initialize(Default::default(), Default::default(), false, threshold, gml.nodes.keys().cloned().collect::>().into()).unwrap(); - while let Some((from, to, message)) = gml.take_message() { - gml.process_message((from, to, message)).unwrap(); + impl MessageLoop { + pub fn run_at(mut self, master: NodeId) -> Self { + self.sessions[&master].initialize( + self.new_nodes_set.clone(), + self.all_set_signature.clone(), + self.new_set_signature.clone()).unwrap(); + self.run(); + self } + } + + pub fn generate_key(num_nodes: usize, threshold: usize) -> GenerationMessageLoop { + let gml = GenerationMessageLoop::new(num_nodes).init(threshold).unwrap(); + gml.0.loop_until(|| gml.0.is_empty()); gml } #[test] fn node_added_using_servers_set_change() { // initial 2-of-3 session - let gml = generate_key(1, generate_nodes_ids(3)); - let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap(); + let gml = generate_key(3, 1); - // insert 1 node so that it becames 2-of-4 session - let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); - let mut ml = MessageLoop::new(&gml, master_node_id, None, nodes_to_add, BTreeSet::new(), BTreeSet::new()); - ml.nodes[&master_node_id].session.initialize(ml.nodes.keys().cloned().collect(), ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); - ml.run(); + // add 1 node so that it becames 2-of-4 session + let add = vec![Random.generate().unwrap()]; + let master = gml.0.node(0); + let ml = MessageLoop::with_gml::(gml, master, Some(add), None, None).run_at(master); // try to recover secret for every possible combination of nodes && check that secret is the same - check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter().map(|(k, v)| (k.clone(), v.key_storage.clone())).collect()); - - // check that all sessions have finished - assert!(ml.nodes.values().all(|n| n.session.is_finished())); + ml.check_secret_is_preserved(ml.sessions.keys()); } #[test] fn node_added_using_server_set_change_from_this_node() { // initial 2-of-3 session - let gml = generate_key(1, generate_nodes_ids(3)); + let gml = generate_key(3, 1); // insert 1 node so that it becames 2-of-4 session // master node is the node we are adding => // 1) add session is delegated to one of old nodes // 2) key share is pushed to new node // 3) delegated session is returned back to added node - let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); - let master_node_id = nodes_to_add.iter().cloned().nth(0).unwrap(); - let mut ml = MessageLoop::new(&gml, master_node_id, None, nodes_to_add, BTreeSet::new(), BTreeSet::new()); - ml.nodes[&master_node_id].session.initialize(ml.nodes.keys().cloned().collect(), ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); - ml.run(); + let add = vec![Random.generate().unwrap()]; + let master = add[0].public().clone(); + let ml = MessageLoop::with_gml::(gml, master, Some(add), None, None).run_at(master); - // check that all sessions have finished - assert!(ml.nodes.values().all(|n| n.session.is_finished())); + // try to recover secret for every possible combination of nodes && check that secret is the same + ml.check_secret_is_preserved(ml.sessions.keys()); } #[test] fn node_moved_using_servers_set_change() { // initial 2-of-3 session - let gml = generate_key(1, generate_nodes_ids(3)); - let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap(); + let gml = generate_key(3, 1); // remove 1 node && insert 1 node so that one share is moved - let nodes_to_remove: BTreeSet<_> = gml.nodes.keys().cloned().skip(1).take(1).collect(); - let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); - let mut ml = MessageLoop::new(&gml, master_node_id, None, nodes_to_add.clone(), nodes_to_remove.clone(), BTreeSet::new()); - let new_nodes_set = ml.nodes.keys().cloned().filter(|n| !nodes_to_remove.contains(n)).collect(); - ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); - ml.run(); + let master = gml.0.node(0); + let remove: BTreeSet<_> = ::std::iter::once(gml.0.node(1)).collect(); + let add = vec![Random.generate().unwrap()]; + let ml = MessageLoop::with_gml::(gml, master, Some(add), Some(remove.clone()), None).run_at(master); // check that secret is still the same as before moving the share - check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter() - .filter(|&(k, _)| !nodes_to_remove.contains(k)) - .map(|(k, v)| (k.clone(), v.key_storage.clone())) - .collect()); + ml.check_secret_is_preserved(ml.sessions.keys() + .filter(|k| !remove.contains(k))); // check that all removed nodes do not own key share - assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_none())); - - // check that all sessions have finished - assert!(ml.nodes.values().all(|n| n.session.is_finished())); + assert!(ml.sessions.keys().filter(|k| remove.contains(k)) + .all(|k| ml.ml.key_storage_of(k).get(&SessionId::default()).unwrap().is_none())); } #[test] fn node_removed_using_servers_set_change() { // initial 2-of-3 session - let gml = generate_key(1, generate_nodes_ids(3)); - let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap(); + let gml = generate_key(3, 1); // remove 1 node so that session becames 2-of-2 - let nodes_to_remove: BTreeSet<_> = gml.nodes.keys().cloned().skip(1).take(1).collect(); - let new_nodes_set: BTreeSet<_> = gml.nodes.keys().cloned().filter(|n| !nodes_to_remove.contains(&n)).collect(); - let mut ml = MessageLoop::new(&gml, master_node_id, None, BTreeSet::new(), nodes_to_remove.clone(), BTreeSet::new()); - ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); - ml.run(); + let remove: BTreeSet<_> = ::std::iter::once(gml.0.node(0)).collect(); + let master = gml.0.node(0); + let ml = MessageLoop::with_gml::(gml, master, None, Some(remove.clone()), None).run_at(master); // try to recover secret for every possible combination of nodes && check that secret is the same - check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter() - .filter(|&(k, _)| !nodes_to_remove.contains(k)) - .map(|(k, v)| (k.clone(), v.key_storage.clone())) - .collect()); + ml.check_secret_is_preserved(ml.sessions.keys() + .filter(|k| !remove.contains(k))); // check that all removed nodes do not own key share - assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_none())); - - // check that all sessions have finished - assert!(ml.nodes.values().all(|n| n.session.is_finished())); + assert!(ml.sessions.keys().filter(|k| remove.contains(k)) + .all(|k| ml.ml.key_storage_of(k).get(&SessionId::default()).unwrap().is_none())); } #[test] fn isolated_node_removed_using_servers_set_change() { // initial 2-of-3 session - let gml = generate_key(1, generate_nodes_ids(3)); - let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap(); + let gml = generate_key(3, 1); // remove 1 node so that session becames 2-of-2 - let nodes_to_isolate: BTreeSet<_> = gml.nodes.keys().cloned().skip(1).take(1).collect(); - let new_nodes_set: BTreeSet<_> = gml.nodes.keys().cloned().filter(|n| !nodes_to_isolate.contains(&n)).collect(); - let mut ml = MessageLoop::new(&gml, master_node_id, None, BTreeSet::new(), BTreeSet::new(), nodes_to_isolate.clone()); - ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); - ml.run(); + let isolate: BTreeSet<_> = ::std::iter::once(gml.0.node(1)).collect(); + let master = gml.0.node(0); + let ml = MessageLoop::with_gml::(gml, master, None, None, Some(isolate.clone())) + .run_at(master); // try to recover secret for every possible combination of nodes && check that secret is the same - check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter() - .filter(|&(k, _)| !nodes_to_isolate.contains(k)) - .map(|(k, v)| (k.clone(), v.key_storage.clone())) - .collect()); + ml.check_secret_is_preserved(ml.sessions.keys() + .filter(|k| !isolate.contains(k))); // check that all isolated nodes still OWN key share - assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_isolate.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_some())); - - // check that all sessions have finished - assert!(ml.nodes.iter().filter(|&(k, _)| !nodes_to_isolate.contains(k)).all(|(_, v)| v.session.is_finished())); + assert!(ml.sessions.keys().filter(|k| isolate.contains(k)) + .all(|k| ml.ml.key_storage_of(k).get(&SessionId::default()).unwrap().is_some())); } #[test] fn having_less_than_required_nodes_after_change_does_not_fail_change_session() { // initial 2-of-3 session - let gml = generate_key(1, generate_nodes_ids(3)); - let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap(); + let gml = generate_key(3, 1); - // remove 2 nodes so that key becomes irrecoverable (make sure the session is completed, even though key is irrecoverable) - let nodes_to_remove: BTreeSet<_> = gml.nodes.keys().cloned().skip(1).take(2).collect(); - let new_nodes_set: BTreeSet<_> = gml.nodes.keys().cloned().filter(|n| !nodes_to_remove.contains(&n)).collect(); - let mut ml = MessageLoop::new(&gml, master_node_id, None, BTreeSet::new(), nodes_to_remove.clone(), BTreeSet::new()); - ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); - ml.run(); + // remove 2 nodes so that key becomes irrecoverable (make sure the session is completed + // even though key is irrecoverable) + let remove: BTreeSet<_> = gml.0.nodes().into_iter().skip(1).take(2).collect(); + let master = gml.0.node(0); + let ml = MessageLoop::with_gml::(gml, master, None, Some(remove.clone()), None).run_at(master); // check that all removed nodes do not own key share - assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_none())); - - // check that all sessions have finished - assert!(ml.nodes.values().all(|n| n.session.is_finished())); + assert!(ml.sessions.keys().filter(|k| remove.contains(k)) + .all(|k| ml.ml.key_storage_of(k).get(&SessionId::default()).unwrap().is_none())); // and now let's add new node (make sure the session is completed, even though key is still irrecoverable) // isolated here are not actually isolated, but removed on the previous step - let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); - let new_nodes_set: BTreeSet<_> = gml.nodes.keys().cloned().filter(|n| !nodes_to_remove.contains(&n)) - .chain(nodes_to_add.iter().cloned()) - .collect(); - let master_node_id = nodes_to_add.iter().cloned().nth(0).unwrap(); - let mut ml = MessageLoop::new(&gml, master_node_id, Some(ml.original_key_pair.clone()), nodes_to_add.clone(), BTreeSet::new(), nodes_to_remove.clone()); - ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); - ml.run(); + let add = vec![Random.generate().unwrap()]; + let master = add[0].public().clone(); + let ml = ml.and_then::(master, Some(add.clone()), None, Some(remove)).run_at(master); // check that all added nodes do not own key share (there's not enough nodes to run share add session) - assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_add.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_none())); - - // check that all sessions have finished - assert!(ml.nodes.iter().filter(|&(k, _)| !nodes_to_remove.contains(k)).all(|(_, n)| n.session.is_finished())); + assert!(ml.sessions.keys().filter(|k| add.iter().any(|n| n.public() == *k)) + .all(|k| ml.ml.key_storage_of(k).get(&SessionId::default()).unwrap().is_none())); } #[test] fn removing_node_from_cluster_of_2_works() { // initial 2-of-2 session - let gml = generate_key(1, generate_nodes_ids(2)); - let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap(); + let gml = generate_key(2, 1); - // make 2nd node isolated so that key becomes irrecoverable (make sure the session is completed, even though key is irrecoverable) - let nodes_to_isolate: BTreeSet<_> = gml.nodes.keys().cloned().skip(1).take(1).collect(); - let new_nodes_set: BTreeSet<_> = gml.nodes.keys().cloned().filter(|n| !nodes_to_isolate.contains(&n)).collect(); - let mut ml = MessageLoop::new(&gml, master_node_id, None, BTreeSet::new(), BTreeSet::new(), nodes_to_isolate.clone()); - ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); - ml.run(); - - // check that session on master node has completed (session on 2nd node is not even started in network mode) - assert!(ml.nodes.values().take(1).all(|n| n.session.is_finished())); + // make 2nd node isolated so that key becomes irrecoverable (make sure the session is completed, + // even though key is irrecoverable) + let isolate: BTreeSet<_> = gml.0.nodes().into_iter().skip(1).take(1).collect(); + let master = gml.0.node(0); + MessageLoop::with_gml::(gml, master, None, None, Some(isolate)).run_at(master); } #[test] fn adding_node_that_has_lost_its_database_works() { // initial 2-of-2 session - let gml = generate_key(1, generate_nodes_ids(2)); - let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap(); + let gml = generate_key(2, 1); // insert 1 node so that it becames 2-of-3 session - let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); - let mut ml = MessageLoop::new(&gml, master_node_id, None, nodes_to_add.clone(), BTreeSet::new(), BTreeSet::new()); - ml.nodes[&master_node_id].session.initialize(ml.nodes.keys().cloned().collect(), ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); - ml.run(); + let add = vec![Random.generate().unwrap()]; + let master = gml.0.node(0); + let ml = MessageLoop::with_gml::(gml, master, Some(add.clone()), None, None) + .run_at(master); // now let's say new node has lost its db and we're trying to join it again - ml.nodes[nodes_to_add.iter().nth(0).unwrap()].key_storage.clear().unwrap(); + ml.ml.key_storage_of(add[0].public()).clear().unwrap(); // this time old nodes have version, where new node is mentioned, but it doesn't report it when negotiating - let mut ml = MessageLoop::new(&gml, master_node_id, None, nodes_to_add, BTreeSet::new(), BTreeSet::new()); - ml.nodes[&master_node_id].session.initialize(ml.nodes.keys().cloned().collect(), ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); - ml.run(); + let ml = ml.and_then::(master, Some(add), None, None).run_at(master); // try to recover secret for every possible combination of nodes && check that secret is the same - check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter().map(|(k, v)| (k.clone(), v.key_storage.clone())).collect()); - - // check that all sessions have finished - assert!(ml.nodes.values().all(|n| n.session.is_finished())); + ml.check_secret_is_preserved(ml.sessions.keys()); } } diff --git a/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs b/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs index 0c771006a..e2af7bc7f 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs @@ -318,6 +318,7 @@ impl SessionImpl where T: SessionTransport { new_set_signature), consensus_transport: consensus_transport, })?; + consensus_session.initialize(new_nodes_map.keys().cloned().collect())?; // update data @@ -881,405 +882,197 @@ impl SessionTransport for IsolatedSessionTransport { #[cfg(test)] pub mod tests { - use std::sync::Arc; - use std::collections::{VecDeque, BTreeMap, BTreeSet, HashSet}; - use ethkey::{Random, Generator, Public, KeyPair, Signature, sign}; - use ethereum_types::H256; - use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage}; - use key_server_cluster::cluster::Cluster; - use key_server_cluster::cluster::tests::DummyCluster; - use key_server_cluster::cluster_sessions::ClusterSession; - use key_server_cluster::generation_session::tests::{Node as GenerationNode, generate_nodes_ids}; - use key_server_cluster::math; - use key_server_cluster::message::Message; - use key_server_cluster::servers_set_change_session::tests::generate_key; - use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash; + use std::collections::BTreeSet; + use ethkey::{Random, Generator, Public}; + use key_server_cluster::{NodeId, Error, KeyStorage, NodeKeyPair}; + use key_server_cluster::cluster::tests::MessageLoop as ClusterMessageLoop; + use key_server_cluster::servers_set_change_session::tests::{MessageLoop, AdminSessionAdapter, generate_key}; use key_server_cluster::admin_sessions::ShareChangeSessionMeta; use super::{SessionImpl, SessionParams, IsolatedSessionTransport}; - struct Node { - pub cluster: Arc, - pub key_storage: Arc, - pub session: SessionImpl, - } + struct Adapter; - struct MessageLoop { - pub admin_key_pair: KeyPair, - pub original_key_pair: KeyPair, - pub old_nodes_set: BTreeSet, - pub new_nodes_set: BTreeSet, - pub old_set_signature: Signature, - pub new_set_signature: Signature, - pub nodes: BTreeMap, - pub queue: VecDeque<(NodeId, NodeId, Message)>, - pub version: H256, - } + impl AdminSessionAdapter> for Adapter { + const SIGN_NEW_NODES: bool = false; - fn create_session(mut meta: ShareChangeSessionMeta, admin_public: Public, self_node_id: NodeId, cluster: Arc, key_storage: Arc) -> SessionImpl { - let session_id = meta.id.clone(); - meta.self_node_id = self_node_id; - let key_version = key_storage.get(&session_id).unwrap().map(|ks| ks.versions.iter().last().unwrap().hash.clone()); + fn create( + mut meta: ShareChangeSessionMeta, + admin_public: Public, + _: BTreeSet, + ml: &ClusterMessageLoop, + idx: usize + ) -> SessionImpl { + let key_storage = ml.key_storage(idx).clone(); + let key_version = key_storage.get(&meta.id).unwrap().map(|ks| ks.last_version().unwrap().hash); - SessionImpl::new(SessionParams { - meta: meta.clone(), - transport: IsolatedSessionTransport::new(session_id, key_version, 1, cluster), - key_storage: key_storage, - admin_public: Some(admin_public), - nonce: 1, - }).unwrap() - } - - fn create_node(meta: ShareChangeSessionMeta, admin_public: Public, node: GenerationNode, added_nodes: &BTreeSet) -> Node { - node.cluster.add_nodes(added_nodes.iter().cloned()); - Node { - cluster: node.cluster.clone(), - key_storage: node.key_storage.clone(), - session: create_session(meta, admin_public, node.session.node().clone(), node.cluster, node.key_storage), + meta.self_node_id = *ml.node_key_pair(idx).public(); + SessionImpl::new(SessionParams { + meta: meta.clone(), + transport: IsolatedSessionTransport::new(meta.id, key_version, 1, ml.cluster(idx).view().unwrap()), + key_storage, + admin_public: Some(admin_public), + nonce: 1, + }).unwrap() } } - /// This only works for schemes where threshold = 1 - pub fn check_secret_is_preserved(joint_key_pair: KeyPair, nodes: BTreeMap>) { - let n = nodes.len(); - let document_secret_plain = math::generate_random_point().unwrap(); - for n1 in 0..n { - for n2 in n1+1..n { - let share1 = nodes.values().nth(n1).unwrap().get(&SessionId::default()).unwrap(); - let share2 = nodes.values().nth(n2).unwrap().get(&SessionId::default()).unwrap(); - let id_number1 = share1.as_ref().unwrap().last_version().unwrap().id_numbers[nodes.keys().nth(n1).unwrap()].clone(); - let id_number2 = share1.as_ref().unwrap().last_version().unwrap().id_numbers[nodes.keys().nth(n2).unwrap()].clone(); - - // now encrypt and decrypt data - let (document_secret_decrypted, document_secret_decrypted_test) = - math::tests::do_encryption_and_decryption(1, - joint_key_pair.public(), - &[id_number1, id_number2], - &[share1.unwrap().last_version().unwrap().secret_share.clone(), - share2.unwrap().last_version().unwrap().secret_share.clone()], - Some(joint_key_pair.secret()), - document_secret_plain.clone()); - - assert_eq!(document_secret_plain, document_secret_decrypted_test); - assert_eq!(document_secret_plain, document_secret_decrypted); - } - } - } - - impl MessageLoop { - pub fn new(t: usize, master_node_id: NodeId, old_nodes_set: BTreeSet, new_nodes_set: BTreeSet) -> Self { - // generate admin key pair - let admin_key_pair = Random.generate().unwrap(); - let admin_public = admin_key_pair.public().clone(); - - // run initial generation session - let gml = generate_key(t, old_nodes_set.clone()); - - // compute original secret key - let version = gml.nodes.values().nth(0).unwrap().key_storage.get(&Default::default()).unwrap().unwrap().versions[0].hash.clone(); - let original_key_pair = gml.compute_key_pair(t); - - // prepare sessions on all nodes - let meta = ShareChangeSessionMeta { - id: SessionId::default(), - self_node_id: NodeId::default(), - master_node_id: master_node_id, - configured_nodes_count: new_nodes_set.iter().chain(old_nodes_set.iter()).collect::>().len(), - connected_nodes_count: new_nodes_set.iter().chain(old_nodes_set.iter()).collect::>().len(), - }; - let new_nodes = new_nodes_set.iter() - .filter(|n| !old_nodes_set.contains(&n)) - .map(|new_node_id| { - let new_node_cluster = Arc::new(DummyCluster::new(new_node_id.clone())); - let new_node_key_storage = Arc::new(DummyKeyStorage::default()); - let new_node_session = create_session(meta.clone(), admin_public.clone(), new_node_id.clone(), new_node_cluster.clone(), new_node_key_storage.clone()); - new_node_cluster.add_nodes(new_nodes_set.iter().cloned()); - Node { - cluster: new_node_cluster, - key_storage: new_node_key_storage, - session: new_node_session, - } - }); - let old_nodes = gml.nodes.into_iter().map(|gn| create_node(meta.clone(), admin_public.clone(), gn.1, &new_nodes_set)); - let nodes = old_nodes.chain(new_nodes).map(|n| (n.session.core.meta.self_node_id.clone(), n)).collect(); - - let old_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&old_nodes_set)).unwrap(); - let new_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap(); - MessageLoop { - admin_key_pair: admin_key_pair, - original_key_pair: original_key_pair, - version: version, - old_nodes_set: old_nodes_set.clone(), - new_nodes_set: new_nodes_set.clone(), - old_set_signature: old_set_signature, - new_set_signature: new_set_signature, - nodes: nodes, - queue: Default::default(), - } + impl MessageLoop> { + pub fn init_at(self, master: NodeId) -> Result { + self.sessions[&master].initialize( + Some(self.original_key_version), + Some(self.new_nodes_set.clone()), + Some(self.all_set_signature.clone()), + Some(self.new_set_signature.clone()))?; + Ok(self) } - pub fn new_additional(master_node_id: NodeId, ml: MessageLoop, new_nodes_set: BTreeSet) -> Self { - let version = ml.nodes.values().nth(0).unwrap().key_storage.get(&Default::default()).unwrap().unwrap().versions.last().unwrap().hash.clone(); - - // prepare sessions on all nodes - let meta = ShareChangeSessionMeta { - id: SessionId::default(), - self_node_id: NodeId::default(), - master_node_id: master_node_id, - configured_nodes_count: new_nodes_set.iter().chain(ml.nodes.keys()).collect::>().len(), - connected_nodes_count: new_nodes_set.iter().chain(ml.nodes.keys()).collect::>().len(), - }; - let old_nodes_set = ml.nodes.keys().cloned().collect(); - let nodes = ml.nodes.iter() - .map(|(n, nd)| { - let node_cluster = nd.cluster.clone(); - let node_key_storage = nd.key_storage.clone(); - let node_session = create_session(meta.clone(), ml.admin_key_pair.public().clone(), n.clone(), node_cluster.clone(), node_key_storage.clone()); - node_cluster.add_nodes(new_nodes_set.iter().cloned()); - (n.clone(), Node { - cluster: node_cluster, - key_storage: node_key_storage, - session: node_session, - }) - }).chain(new_nodes_set.difference(&old_nodes_set).map(|n| { - let new_node_cluster = Arc::new(DummyCluster::new(n.clone())); - let new_node_key_storage = Arc::new(DummyKeyStorage::default()); - let new_node_session = create_session(meta.clone(), ml.admin_key_pair.public().clone(), n.clone(), new_node_cluster.clone(), new_node_key_storage.clone()); - new_node_cluster.add_nodes(new_nodes_set.iter().cloned()); - (n.clone(), Node { - cluster: new_node_cluster, - key_storage: new_node_key_storage, - session: new_node_session, - }) - })).collect(); - - let old_set_signature = sign(ml.admin_key_pair.secret(), &ordered_nodes_hash(&old_nodes_set)).unwrap(); - let new_set_signature = sign(ml.admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap(); - MessageLoop { - admin_key_pair: ml.admin_key_pair, - original_key_pair: ml.original_key_pair, - version: version, - old_nodes_set: old_nodes_set.clone(), - new_nodes_set: new_nodes_set.clone(), - old_set_signature: old_set_signature, - new_set_signature: new_set_signature, - nodes: nodes, - queue: Default::default(), - } - } - - pub fn update_signature(&mut self) { - self.old_set_signature = sign(self.admin_key_pair.secret(), &ordered_nodes_hash(&self.old_nodes_set)).unwrap(); - self.new_set_signature = sign(self.admin_key_pair.secret(), &ordered_nodes_hash(&self.new_nodes_set)).unwrap(); - } - - pub fn run(&mut self) { - while let Some((from, to, message)) = self.take_message() { - self.process_message((from, to, message)).unwrap(); - } - } - - pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { - self.nodes.values() - .filter_map(|n| n.cluster.take_message().map(|m| (n.session.core.meta.self_node_id.clone(), m.0, m.1))) - .nth(0) - .or_else(|| self.queue.pop_front()) - } - - pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { - match { match msg.2 { - Message::ShareAdd(ref message) => - self.nodes[&msg.1].session.process_message(&msg.0, message), - _ => unreachable!("only servers set change messages are expected"), - } } { - Ok(_) => Ok(()), - Err(Error::TooEarlyForRequest) => { - self.queue.push_back(msg); - Ok(()) - }, - Err(err) => Err(err), - } + pub fn run_at(self, master: NodeId) -> Result { + let mut ml = self.init_at(master)?; + ml.run(); + Ok(ml) } } #[test] fn node_add_fails_if_nodes_removed() { - let old_nodes_set = generate_nodes_ids(3); - let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); - let node_to_remove_id = old_nodes_set.iter().cloned().nth(1).unwrap(); - let mut new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(1)).collect(); - new_nodes_set.remove(&node_to_remove_id); - let ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); - assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), - Some(ml.old_set_signature.clone()), - Some(ml.new_set_signature.clone()) - ).unwrap_err(), Error::ConsensusUnreachable); + // initial 2-of-3 session + let gml = generate_key(3, 1); + + // try to remove 1 node + let add = vec![Random.generate().unwrap()]; + let remove: BTreeSet<_> = ::std::iter::once(gml.0.node(1)).collect(); + let master = gml.0.node(0); + assert_eq!(MessageLoop::with_gml::(gml, master, Some(add), Some(remove), None) + .run_at(master).unwrap_err(), Error::ConsensusUnreachable); } #[test] fn node_add_fails_if_no_nodes_added() { - let old_nodes_set = generate_nodes_ids(3); - let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); - let new_nodes_set = old_nodes_set.clone(); - let ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); - assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), - Some(ml.old_set_signature.clone()), - Some(ml.new_set_signature.clone()) - ).unwrap_err(), Error::ConsensusUnreachable); + // initial 2-of-3 session + let gml = generate_key(3, 1); + + // try to add 0 nodes + let add = vec![]; + let master = gml.0.node(0); + assert_eq!(MessageLoop::with_gml::(gml, master, Some(add), None, None) + .run_at(master).unwrap_err(), Error::ConsensusUnreachable); } #[test] fn node_add_fails_if_started_on_adding_node() { - let old_nodes_set = generate_nodes_ids(3); - let nodes_to_add_set = generate_nodes_ids(1); - let master_node_id = nodes_to_add_set.iter().cloned().nth(0).unwrap(); - let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(nodes_to_add_set.into_iter()).collect(); - let ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); - assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), - Some(ml.old_set_signature.clone()), - Some(ml.new_set_signature.clone()) - ).unwrap_err(), Error::ServerKeyIsNotFound); + // initial 2-of-3 session + let gml = generate_key(3, 1); + + // try to add 1 node using this node as a master node + let add = vec![Random.generate().unwrap()]; + let master = *add[0].public(); + assert_eq!(MessageLoop::with_gml::(gml, master, Some(add), None, None) + .run_at(master).unwrap_err(), Error::ServerKeyIsNotFound); } #[test] fn node_add_fails_if_initialized_twice() { - let old_nodes_set = generate_nodes_ids(3); - let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); - let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(1)).collect(); - let ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); - assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set.clone()), - Some(ml.old_set_signature.clone()), - Some(ml.new_set_signature.clone()) - ), Ok(())); - assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), - Some(ml.old_set_signature.clone()), - Some(ml.new_set_signature.clone()) - ), Err(Error::InvalidStateForRequest)); + // initial 2-of-3 session + let gml = generate_key(3, 1); + + // try to add 1 node using this node as a master node + let add = vec![Random.generate().unwrap()]; + let master = gml.0.node(0); + assert_eq!(MessageLoop::with_gml::(gml, master, Some(add), None, None) + .init_at(master).unwrap() + .init_at(master).unwrap_err(), Error::InvalidStateForRequest); } #[test] fn node_add_fails_if_started_without_signatures() { - let old_nodes_set = generate_nodes_ids(3); - let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); - let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(1)).collect(); - let ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); - assert_eq!(ml.nodes[&master_node_id].session.initialize(None, None, None, None), Err(Error::InvalidMessage)); + // initial 2-of-3 session + let gml = generate_key(3, 1); + + // try to add 1 node using this node as a master node + let add = vec![Random.generate().unwrap()]; + let master = gml.0.node(0); + assert_eq!(MessageLoop::with_gml::(gml, master, Some(add), None, None) + .sessions[&master] + .initialize(None, None, None, None).unwrap_err(), Error::InvalidMessage); } #[test] fn nodes_added_using_share_add() { let test_cases = vec![(3, 1), (3, 3)]; - for (n, nodes_to_add) in test_cases { - // generate key && prepare ShareAdd sessions - let old_nodes_set = generate_nodes_ids(n); - let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(nodes_to_add)).collect(); - let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); - let mut ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); + for (n, add) in test_cases { + // generate key + let gml = generate_key(n, 1); - // initialize session on master node && run to completion - ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), - Some(ml.old_set_signature.clone()), - Some(ml.new_set_signature.clone())).unwrap(); - ml.run(); - - // check that session has completed on all nodes - assert!(ml.nodes.values().all(|n| n.session.is_finished())); + // run share add session + let add = (0..add).map(|_| Random.generate().unwrap()).collect(); + let master = gml.0.node(0); + let ml = MessageLoop::with_gml::(gml, master, Some(add), None, None) + .run_at(master).unwrap(); // check that secret is still the same as before adding the share - check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter().map(|(k, v)| (k.clone(), v.key_storage.clone())).collect()); + ml.check_secret_is_preserved(ml.sessions.keys()); } } #[test] fn nodes_added_using_share_add_with_isolated_nodes() { - let (n, nodes_to_add) = (3, 3); + let (n, add) = (3, 3); - // generate key && prepare ShareAdd sessions - let old_nodes_set = generate_nodes_ids(n); - let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(nodes_to_add)).collect(); - let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); - let isolated_node_id = old_nodes_set.iter().cloned().nth(1).unwrap(); - let mut ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); + // generate key + let gml = generate_key(n, 1); - // now let's isolate 1 of 3 nodes owning key share - ml.nodes.remove(&isolated_node_id); - ml.old_nodes_set.remove(&isolated_node_id); - ml.new_nodes_set.remove(&isolated_node_id); - for (_, node) in ml.nodes.iter_mut() { - node.cluster.remove_node(&isolated_node_id); - } - ml.update_signature(); - - // initialize session on master node && run to completion - ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), - Some(ml.old_set_signature.clone()), - Some(ml.new_set_signature.clone())).unwrap(); - ml.run(); - - // check that session has completed on all nodes - assert!(ml.nodes.values().all(|n| n.session.is_finished())); + // run share add session + let master = gml.0.node(0); + let node_to_isolate = gml.0.node(1); + let add = (0..add).map(|_| Random.generate().unwrap()).collect(); + let isolate = ::std::iter::once(node_to_isolate).collect(); + let ml = MessageLoop::with_gml::(gml, master, Some(add), None, Some(isolate)) + .run_at(master).unwrap(); // check that secret is still the same as before adding the share - check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes - .iter() - .map(|(k, v)| (k.clone(), v.key_storage.clone())) - .collect()); + ml.check_secret_is_preserved(ml.sessions.keys()); } #[test] fn nodes_add_to_the_node_with_obsolete_version() { - let (n, nodes_to_add) = (3, 3); + let (n, add) = (3, 3); - // generate key (2-of-3) && prepare ShareAdd sessions - let old_nodes_set = generate_nodes_ids(n); - let newest_nodes_set = generate_nodes_ids(nodes_to_add); - let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(newest_nodes_set.clone()).collect(); - let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); - let isolated_node_id = old_nodes_set.iter().cloned().nth(1).unwrap(); - let oldest_nodes_set: BTreeSet<_> = old_nodes_set.iter().filter(|n| **n != isolated_node_id).cloned().collect(); - let mut ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set.clone(), new_nodes_set.clone()); - let isolated_key_storage = ml.nodes[&isolated_node_id].key_storage.clone(); + // generate key + let gml = generate_key(n, 1); - // now let's isolate 1 of 3 nodes owning key share - ml.nodes.remove(&isolated_node_id); - ml.old_nodes_set.remove(&isolated_node_id); - ml.new_nodes_set.remove(&isolated_node_id); - for (_, node) in ml.nodes.iter_mut() { - node.cluster.remove_node(&isolated_node_id); - } - ml.update_signature(); - - // initialize session on master node && run to completion (2-of-5) - ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), - Some(ml.old_set_signature.clone()), - Some(ml.new_set_signature.clone())).unwrap(); - ml.run(); + // run share add session + let master = gml.0.node(0); + let node_to_isolate_key_pair = gml.0.node_key_pair(1).clone(); + let node_to_isolate = gml.0.node(1); + let isolated_key_storage = gml.0.key_storage(1).clone(); + let mut oldest_nodes_set = gml.0.nodes(); + oldest_nodes_set.remove(&node_to_isolate); + let add = (0..add).map(|_| Random.generate().unwrap()).collect::>(); + let newest_nodes_set = add.iter().map(|kp| *kp.public()).collect::>(); + let isolate = ::std::iter::once(node_to_isolate).collect(); + let ml = MessageLoop::with_gml::(gml, master, Some(add), None, Some(isolate)) + .run_at(master).unwrap(); + let new_key_version = ml.ml.key_storage(0).get(&Default::default()) + .unwrap().unwrap().last_version().unwrap().hash; // now let's add back old node so that key becames 2-of-6 - let new_nodes_set: BTreeSet<_> = ml.nodes.keys().cloned().chain(::std::iter::once(isolated_node_id.clone())).collect(); - let mut ml = MessageLoop::new_additional(master_node_id.clone(), ml, new_nodes_set.clone()); - ml.nodes.get_mut(&isolated_node_id).unwrap().key_storage = isolated_key_storage.clone(); - ml.nodes.get_mut(&isolated_node_id).unwrap().session.core.key_share = isolated_key_storage.get(&Default::default()).unwrap(); - ml.nodes.get_mut(&isolated_node_id).unwrap().session.core.key_storage = isolated_key_storage; - - // initialize session on master node && run to completion (2-of65) - ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), - Some(ml.old_set_signature.clone()), - Some(ml.new_set_signature.clone())).unwrap(); - ml.run(); - - // check that session has completed on all nodes - assert!(ml.nodes.values().all(|n| n.session.is_finished())); + let add = vec![node_to_isolate_key_pair.key_pair().clone()]; + let mut ml = ml.and_then::(master.clone(), Some(add), None, None); + ml.original_key_version = new_key_version; + ml.ml.replace_key_storage_of(&node_to_isolate, isolated_key_storage.clone()); + ml.sessions.get_mut(&node_to_isolate).unwrap().core.key_share = + isolated_key_storage.get(&Default::default()).unwrap(); + ml.sessions.get_mut(&node_to_isolate).unwrap().core.key_storage = isolated_key_storage; + let ml = ml.run_at(master).unwrap(); // check that secret is still the same as before adding the share - check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes - .iter() - .map(|(k, v)| (k.clone(), v.key_storage.clone())) - .collect()); + ml.check_secret_is_preserved(ml.sessions.keys()); // check that all oldest nodes have versions A, B, C // isolated node has version A, C // new nodes have versions B, C - let oldest_key_share = ml.nodes[oldest_nodes_set.iter().nth(0).unwrap()].key_storage.get(&Default::default()).unwrap().unwrap(); + let oldest_key_share = ml.ml.key_storage_of(oldest_nodes_set.iter().nth(0).unwrap()) + .get(&Default::default()).unwrap().unwrap(); debug_assert_eq!(oldest_key_share.versions.len(), 3); let version_a = oldest_key_share.versions[0].hash.clone(); let version_b = oldest_key_share.versions[1].hash.clone(); @@ -1287,41 +1080,28 @@ pub mod tests { debug_assert!(version_a != version_b && version_b != version_c); debug_assert!(oldest_nodes_set.iter().all(|n| vec![version_a.clone(), version_b.clone(), version_c.clone()] == - ml.nodes[n].key_storage.get(&Default::default()).unwrap().unwrap().versions.iter().map(|v| v.hash.clone()).collect::>())); - debug_assert!(::std::iter::once(&isolated_node_id).all(|n| vec![version_a.clone(), version_c.clone()] == - ml.nodes[n].key_storage.get(&Default::default()).unwrap().unwrap().versions.iter().map(|v| v.hash.clone()).collect::>())); + ml.ml.key_storage_of(n).get(&Default::default()).unwrap().unwrap() + .versions.iter().map(|v| v.hash).collect::>())); + debug_assert!(::std::iter::once(&node_to_isolate).all(|n| vec![version_a.clone(), version_c.clone()] == + ml.ml.key_storage_of(n).get(&Default::default()).unwrap().unwrap() + .versions.iter().map(|v| v.hash).collect::>())); debug_assert!(newest_nodes_set.iter().all(|n| vec![version_b.clone(), version_c.clone()] == - ml.nodes[n].key_storage.get(&Default::default()).unwrap().unwrap().versions.iter().map(|v| v.hash.clone()).collect::>())); + ml.ml.key_storage_of(n).get(&Default::default()).unwrap().unwrap() + .versions.iter().map(|v| v.hash).collect::>())); } #[test] fn nodes_add_fails_when_not_enough_share_owners_are_connected() { - let (n, nodes_to_add) = (3, 3); + let (n, add) = (3, 3); - // generate key (2-of-3) && prepare ShareAdd sessions - let old_nodes_set = generate_nodes_ids(n); - let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(nodes_to_add)).collect(); - let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); - let isolated_node_id1 = old_nodes_set.iter().cloned().nth(1).unwrap(); - let isolated_node_id2 = old_nodes_set.iter().cloned().nth(2).unwrap(); - let mut ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set.clone(), new_nodes_set.clone()); + // generate key + let gml = generate_key(n, 1); - // now let's isolate 2 of 3 nodes owning key share - ml.nodes.remove(&isolated_node_id1); - ml.nodes.remove(&isolated_node_id2); - ml.old_nodes_set.remove(&isolated_node_id1); - ml.new_nodes_set.remove(&isolated_node_id1); - ml.old_nodes_set.remove(&isolated_node_id2); - ml.new_nodes_set.remove(&isolated_node_id2); - for (_, node) in ml.nodes.iter_mut() { - node.cluster.remove_node(&isolated_node_id1); - node.cluster.remove_node(&isolated_node_id2); - } - ml.update_signature(); - - // initialize session on master node && run to completion (2-of-5) - assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(ml.version), Some(new_nodes_set), - Some(ml.old_set_signature.clone()), - Some(ml.new_set_signature.clone())).map(|_| ()), Err(Error::ConsensusUnreachable)); + // run share add session + let master = gml.0.node(0); + let add = (0..add).map(|_| Random.generate().unwrap()).collect::>(); + let isolate = vec![gml.0.node(1), gml.0.node(2)].into_iter().collect(); + assert_eq!(MessageLoop::with_gml::(gml, master, Some(add), None, Some(isolate)) + .run_at(master).unwrap_err(), Error::ConsensusUnreachable); } } diff --git a/secret-store/src/key_server_cluster/client_sessions/generation_session.rs b/secret-store/src/key_server_cluster/client_sessions/generation_session.rs index da2ffebc7..0fa805f57 100644 --- a/secret-store/src/key_server_cluster/client_sessions/generation_session.rs +++ b/secret-store/src/key_server_cluster/client_sessions/generation_session.rs @@ -940,406 +940,315 @@ fn check_threshold(threshold: usize, nodes: &BTreeSet) -> Result<(), Err #[cfg(test)] pub mod tests { use std::sync::Arc; - use std::collections::{BTreeSet, BTreeMap, VecDeque}; - use std::time::Duration; - use ethereum_types::Address; - use ethkey::{Random, Generator, KeyPair}; - use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage}; - use key_server_cluster::message::{self, Message, GenerationMessage}; - use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, - all_connections_established, new_runtime}; + use ethereum_types::H256; + use ethkey::{Random, Generator, KeyPair, Secret}; + use key_server_cluster::{NodeId, Error, KeyStorage}; + use key_server_cluster::message::{self, Message, GenerationMessage, KeysDissemination, + PublicKeyShare, ConfirmInitialization}; + use key_server_cluster::cluster::tests::{MessageLoop as ClusterMessageLoop, make_clusters_and_preserve_sessions}; use key_server_cluster::cluster_sessions::ClusterSession; - use key_server_cluster::generation_session::{SessionImpl, SessionState, SessionParams}; + use key_server_cluster::generation_session::{SessionImpl, SessionState}; use key_server_cluster::math; use key_server_cluster::math::tests::do_encryption_and_decryption; - pub struct Node { - pub cluster: Arc, - pub key_storage: Arc, - pub session: SessionImpl, - } - - pub struct MessageLoop { - pub session_id: SessionId, - pub nodes: BTreeMap, - pub queue: VecDeque<(NodeId, NodeId, Message)>, - } - - pub fn generate_nodes_ids(n: usize) -> BTreeSet { - (0..n).map(|_| math::generate_random_point().unwrap()).collect() - } + #[derive(Debug)] + pub struct MessageLoop(pub ClusterMessageLoop); impl MessageLoop { - pub fn new(nodes_num: usize) -> Self { - Self::with_nodes_ids(generate_nodes_ids(nodes_num)) + pub fn new(num_nodes: usize) -> Self { + MessageLoop(make_clusters_and_preserve_sessions(num_nodes)) } - pub fn with_nodes_ids(nodes_ids: BTreeSet) -> Self { - let mut nodes = BTreeMap::new(); - let session_id = SessionId::default(); - for node_id in nodes_ids { - let cluster = Arc::new(DummyCluster::new(node_id.clone())); - let key_storage = Arc::new(DummyKeyStorage::default()); - let session = SessionImpl::new(SessionParams { - id: session_id.clone(), - self_node_id: node_id.clone(), - key_storage: Some(key_storage.clone()), - cluster: cluster.clone(), - nonce: Some(0), - }); - nodes.insert(node_id, Node { cluster: cluster, key_storage: key_storage, session: session }); - } + pub fn init(self, threshold: usize) -> Result { + self.0.cluster(0).client().new_generation_session(Default::default(), None, Default::default(), threshold) + .map(|_| self) + } - let nodes_ids: Vec<_> = nodes.keys().cloned().collect(); - for node in nodes.values() { - for node_id in &nodes_ids { - node.cluster.add_node(node_id.clone()); - } - } + pub fn session_at(&self, idx: usize) -> Arc { + self.0.sessions(idx).generation_sessions.first().unwrap() + } - MessageLoop { - session_id: session_id, - nodes: nodes, - queue: VecDeque::new(), + pub fn session_of(&self, node: &NodeId) -> Arc { + self.0.sessions_of(node).generation_sessions.first().unwrap() + } + + pub fn take_message_confirm_initialization(&self) -> (NodeId, NodeId, ConfirmInitialization) { + match self.0.take_message() { + Some((from, to, Message::Generation(GenerationMessage::ConfirmInitialization(msg)))) => + (from, to, msg), + _ => panic!("unexpected"), } } - pub fn master(&self) -> &SessionImpl { - &self.nodes.values().nth(0).unwrap().session - } - - pub fn first_slave(&self) -> &SessionImpl { - &self.nodes.values().nth(1).unwrap().session - } - - pub fn second_slave(&self) -> &SessionImpl { - &self.nodes.values().nth(2).unwrap().session - } - - pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { - self.nodes.values() - .filter_map(|n| n.cluster.take_message().map(|m| (n.session.node().clone(), m.0, m.1))) - .nth(0) - .or_else(|| self.queue.pop_front()) - } - - pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { - match { - match msg.2 { - Message::Generation(GenerationMessage::InitializeSession(ref message)) => self.nodes[&msg.1].session.on_initialize_session(msg.0.clone(), &message), - Message::Generation(GenerationMessage::ConfirmInitialization(ref message)) => self.nodes[&msg.1].session.on_confirm_initialization(msg.0.clone(), &message), - Message::Generation(GenerationMessage::CompleteInitialization(ref message)) => self.nodes[&msg.1].session.on_complete_initialization(msg.0.clone(), &message), - Message::Generation(GenerationMessage::KeysDissemination(ref message)) => self.nodes[&msg.1].session.on_keys_dissemination(msg.0.clone(), &message), - Message::Generation(GenerationMessage::PublicKeyShare(ref message)) => self.nodes[&msg.1].session.on_public_key_share(msg.0.clone(), &message), - Message::Generation(GenerationMessage::SessionCompleted(ref message)) => self.nodes[&msg.1].session.on_session_completed(msg.0.clone(), &message), - _ => panic!("unexpected"), - } - } { - Ok(_) => Ok(()), - Err(Error::TooEarlyForRequest) => { - self.queue.push_back(msg); - Ok(()) - }, - Err(err) => Err(err), + pub fn take_message_keys_dissemination(&self) -> (NodeId, NodeId, KeysDissemination) { + match self.0.take_message() { + Some((from, to, Message::Generation(GenerationMessage::KeysDissemination(msg)))) => + (from, to, msg), + _ => panic!("unexpected"), } } - pub fn take_and_process_message(&mut self) -> Result<(), Error> { - let msg = self.take_message().unwrap(); - self.process_message(msg) + pub fn take_message_public_key_share(&self) -> (NodeId, NodeId, PublicKeyShare) { + match self.0.take_message() { + Some((from, to, Message::Generation(GenerationMessage::PublicKeyShare(msg)))) => + (from, to, msg), + _ => panic!("unexpected"), + } } - pub fn compute_key_pair(&self, t: usize) -> KeyPair { - let secret_shares = self.nodes.values() - .map(|nd| nd.key_storage.get(&SessionId::default()).unwrap().unwrap().last_version().unwrap().secret_share.clone()) - .take(t + 1) - .collect::>(); - let secret_shares = secret_shares.iter().collect::>(); - let id_numbers = self.nodes.iter() - .map(|(n, nd)| nd.key_storage.get(&SessionId::default()).unwrap().unwrap().last_version().unwrap().id_numbers[n].clone()) - .take(t + 1) - .collect::>(); - let id_numbers = id_numbers.iter().collect::>(); - let joint_secret1 = math::compute_joint_secret_from_shares(t, &secret_shares, &id_numbers).unwrap(); - - let secret_values: Vec<_> = self.nodes.values().map(|s| s.session.joint_public_and_secret().unwrap().unwrap().1).collect(); - let joint_secret2 = math::compute_joint_secret(secret_values.iter()).unwrap(); - assert_eq!(joint_secret1, joint_secret2); - - KeyPair::from_secret(joint_secret1).unwrap() + pub fn nodes_id_numbers(&self) -> Vec { + let session = self.session_at(0); + let session_data = session.data.lock(); + session_data.nodes.values().map(|n| n.id_number.clone()).collect() } - } - fn make_simple_cluster(threshold: usize, num_nodes: usize) -> Result<(SessionId, NodeId, NodeId, MessageLoop), Error> { - let l = MessageLoop::new(num_nodes); - l.master().initialize(Default::default(), Default::default(), false, threshold, l.nodes.keys().cloned().collect::>().into())?; + pub fn nodes_secret_shares(&self) -> Vec { + (0..self.0.nodes().len()).map(|i| { + let session = self.session_at(i); + let session_data = session.data.lock(); + session_data.secret_share.as_ref().unwrap().clone() + }).collect() + } - let session_id = l.session_id.clone(); - let master_id = l.master().node().clone(); - let slave_id = l.first_slave().node().clone(); - Ok((session_id, master_id, slave_id, l)) + pub fn compute_key_pair(&self) -> KeyPair { + let t = self.0.key_storage(0).get(&Default::default()).unwrap().unwrap().threshold; + let secret_shares = self.nodes_secret_shares(); + let id_numbers = self.nodes_id_numbers(); + let secret_shares = secret_shares.iter().take(t + 1).collect::>(); + let id_numbers = id_numbers.iter().take(t + 1).collect::>(); + let joint_secret = math::compute_joint_secret_from_shares(t, &secret_shares, &id_numbers).unwrap(); + + KeyPair::from_secret(joint_secret).unwrap() + } + + pub fn key_version(&self) -> H256 { + self.0.key_storage(0).get(&Default::default()) + .unwrap().unwrap().versions.iter().last().unwrap().hash + } } #[test] fn initializes_in_cluster_of_single_node() { - let l = MessageLoop::new(1); - assert!(l.master().initialize(Default::default(), Default::default(), false, 0, l.nodes.keys().cloned().collect::>().into()).is_ok()); + MessageLoop::new(1).init(0).unwrap(); } #[test] fn fails_to_initialize_if_threshold_is_wrong() { - match make_simple_cluster(2, 2) { - Err(Error::NotEnoughNodesForThreshold) => (), - _ => panic!("unexpected"), - } + assert_eq!(MessageLoop::new(2).init(2).unwrap_err(), Error::NotEnoughNodesForThreshold); } #[test] fn fails_to_initialize_when_already_initialized() { - let (_, _, _, l) = make_simple_cluster(0, 2).unwrap(); - assert_eq!(l.master().initialize(Default::default(), Default::default(), false, 0, l.nodes.keys().cloned().collect::>().into()).unwrap_err(), - Error::InvalidStateForRequest); + let ml = MessageLoop::new(2).init(0).unwrap(); + assert_eq!( + ml.session_at(0).initialize(Default::default(), Default::default(), false, 0, ml.0.nodes().into()), + Err(Error::InvalidStateForRequest), + ); } #[test] fn fails_to_accept_initialization_when_already_initialized() { - let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); - let message = l.take_message().unwrap(); - l.process_message(message.clone()).unwrap(); - assert_eq!(l.process_message(message.clone()).unwrap_err(), Error::InvalidStateForRequest); + let ml = MessageLoop::new(2).init(0).unwrap(); + let (from, to, msg) = ml.0.take_message().unwrap(); + ml.0.process_message(from, to, msg.clone()); + assert_eq!( + ml.session_of(&to).on_message(&from, &msg), + Err(Error::InvalidStateForRequest), + ); } #[test] fn slave_updates_derived_point_on_initialization() { - let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); - let passed_point = match l.take_message().unwrap() { - (f, t, Message::Generation(GenerationMessage::InitializeSession(message))) => { - let point = message.derived_point.clone(); - l.process_message((f, t, Message::Generation(GenerationMessage::InitializeSession(message)))).unwrap(); - point + let ml = MessageLoop::new(2).init(0).unwrap(); + let original_point = match ml.0.take_message().unwrap() { + (from, to, Message::Generation(GenerationMessage::InitializeSession(msg))) => { + let original_point = msg.derived_point.clone(); + let msg = Message::Generation(GenerationMessage::InitializeSession(msg)); + ml.0.process_message(from, to, msg); + original_point }, _ => panic!("unexpected"), }; - match l.take_message().unwrap() { - (_, _, Message::Generation(GenerationMessage::ConfirmInitialization(message))) => assert!(passed_point != message.derived_point), + match ml.0.take_message().unwrap() { + (_, _, Message::Generation(GenerationMessage::ConfirmInitialization(msg))) => + assert!(original_point != msg.derived_point), _ => panic!("unexpected"), } } #[test] fn fails_to_accept_initialization_confirmation_if_already_accepted_from_the_same_node() { - let (sid, _, s, mut l) = make_simple_cluster(0, 3).unwrap(); - l.take_and_process_message().unwrap(); - l.take_and_process_message().unwrap(); - l.take_and_process_message().unwrap(); - assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { - session: sid.into(), - session_nonce: 0, - derived_point: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::InvalidStateForRequest); + let ml = MessageLoop::new(3).init(0).unwrap(); + ml.0.take_and_process_message(); + + let (from, to, msg) = ml.take_message_confirm_initialization(); + ml.0.process_message(from, to, Message::Generation(GenerationMessage::ConfirmInitialization(msg.clone()))); + assert_eq!(ml.session_of(&to).on_confirm_initialization(from, &msg), Err(Error::InvalidStateForRequest)); } #[test] fn fails_to_accept_initialization_confirmation_if_initialization_already_completed() { - let (sid, _, s, mut l) = make_simple_cluster(0, 2).unwrap(); - l.take_and_process_message().unwrap(); - l.take_and_process_message().unwrap(); - assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { - session: sid.into(), + let ml = MessageLoop::new(2).init(0).unwrap(); + ml.0.take_and_process_message(); + ml.0.take_and_process_message(); + assert_eq!(ml.session_at(0).on_confirm_initialization(ml.0.node(1), &message::ConfirmInitialization { + session: Default::default(), session_nonce: 0, derived_point: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::InvalidStateForRequest); + }), Err(Error::InvalidStateForRequest)); } #[test] fn master_updates_derived_point_on_initialization_completion() { - let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); - l.take_and_process_message().unwrap(); - let passed_point = match l.take_message().unwrap() { - (f, t, Message::Generation(GenerationMessage::ConfirmInitialization(message))) => { - let point = message.derived_point.clone(); - l.process_message((f, t, Message::Generation(GenerationMessage::ConfirmInitialization(message)))).unwrap(); - point + let ml = MessageLoop::new(2).init(0).unwrap(); + ml.0.take_and_process_message(); + let original_point = match ml.0.take_message().unwrap() { + (from, to, Message::Generation(GenerationMessage::ConfirmInitialization(msg))) => { + let original_point = msg.derived_point.clone(); + let msg = Message::Generation(GenerationMessage::ConfirmInitialization(msg)); + ml.session_of(&to).on_message(&from, &msg).unwrap(); + original_point }, _ => panic!("unexpected"), }; - assert!(l.master().derived_point().unwrap() != passed_point.into()); - } - - #[test] - fn fails_to_complete_initialization_if_threshold_is_wrong() { - let (sid, m, s, l) = make_simple_cluster(0, 2).unwrap(); - let mut nodes = BTreeMap::new(); - nodes.insert(m, math::generate_random_scalar().unwrap()); - nodes.insert(s, math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_initialize_session(m, &message::InitializeSession { - session: sid.into(), - session_nonce: 0, - origin: None, - author: Address::default().into(), - nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), - is_zero: false, - threshold: 2, - derived_point: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::NotEnoughNodesForThreshold); + assert!(ml.session_at(0).derived_point().unwrap() != original_point.into()); } #[test] fn fails_to_complete_initialization_if_not_waiting_for_it() { - let (sid, m, _, l) = make_simple_cluster(0, 2).unwrap(); - assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { - session: sid.into(), + let ml = MessageLoop::new(2).init(0).unwrap(); + ml.0.take_and_process_message(); + assert_eq!(ml.session_at(0).on_complete_initialization(ml.0.node(1), &message::CompleteInitialization { + session: Default::default(), session_nonce: 0, derived_point: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::InvalidStateForRequest); + }), Err(Error::InvalidStateForRequest)); } #[test] fn fails_to_complete_initialization_from_non_master_node() { - let (sid, _, _, mut l) = make_simple_cluster(0, 3).unwrap(); - l.take_and_process_message().unwrap(); - l.take_and_process_message().unwrap(); - l.take_and_process_message().unwrap(); - l.take_and_process_message().unwrap(); - assert_eq!(l.first_slave().on_complete_initialization(l.second_slave().node().clone(), &message::CompleteInitialization { - session: sid.into(), + let ml = MessageLoop::new(3).init(0).unwrap(); + ml.0.take_and_process_message(); + ml.0.take_and_process_message(); + ml.0.take_and_process_message(); + ml.0.take_and_process_message(); + assert_eq!(ml.session_at(1).on_complete_initialization(ml.0.node(2), &message::CompleteInitialization { + session: Default::default(), session_nonce: 0, derived_point: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::InvalidMessage); + }), Err(Error::InvalidMessage)); } #[test] fn fails_to_accept_keys_dissemination_if_not_waiting_for_it() { - let (sid, _, s, l) = make_simple_cluster(0, 2).unwrap(); - assert_eq!(l.master().on_keys_dissemination(s, &message::KeysDissemination { - session: sid.into(), + let ml = MessageLoop::new(2).init(0).unwrap(); + assert_eq!(ml.session_at(0).on_keys_dissemination(ml.0.node(1), &message::KeysDissemination { + session: Default::default(), session_nonce: 0, secret1: math::generate_random_scalar().unwrap().into(), secret2: math::generate_random_scalar().unwrap().into(), publics: vec![math::generate_random_point().unwrap().into()], - }).unwrap_err(), Error::TooEarlyForRequest); + }), Err(Error::TooEarlyForRequest)); } #[test] fn fails_to_accept_keys_dissemination_if_wrong_number_of_publics_passed() { - let (sid, m, _, mut l) = make_simple_cluster(0, 3).unwrap(); - l.take_and_process_message().unwrap(); // m -> s1: InitializeSession - l.take_and_process_message().unwrap(); // m -> s2: InitializeSession - l.take_and_process_message().unwrap(); // s1 -> m: ConfirmInitialization - l.take_and_process_message().unwrap(); // s2 -> m: ConfirmInitialization - l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization - l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization - l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination - assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { - session: sid.into(), - session_nonce: 0, - secret1: math::generate_random_scalar().unwrap().into(), - secret2: math::generate_random_scalar().unwrap().into(), - publics: vec![math::generate_random_point().unwrap().into(), math::generate_random_point().unwrap().into()], - }).unwrap_err(), Error::InvalidMessage); + let ml = MessageLoop::new(3).init(0).unwrap(); + ml.0.take_and_process_message(); // m -> s1: InitializeSession + ml.0.take_and_process_message(); // m -> s2: InitializeSession + ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization + ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization + ml.0.take_and_process_message(); // m -> s1: CompleteInitialization + ml.0.take_and_process_message(); // m -> s2: CompleteInitialization + + let (from, to, mut msg) = ml.take_message_keys_dissemination(); + msg.publics.clear(); + assert_eq!(ml.session_of(&to).on_keys_dissemination(from, &msg), Err(Error::InvalidMessage)); } #[test] fn fails_to_accept_keys_dissemination_second_time_from_the_same_node() { - let (sid, m, _, mut l) = make_simple_cluster(0, 3).unwrap(); - l.take_and_process_message().unwrap(); // m -> s1: InitializeSession - l.take_and_process_message().unwrap(); // m -> s2: InitializeSession - l.take_and_process_message().unwrap(); // s1 -> m: ConfirmInitialization - l.take_and_process_message().unwrap(); // s2 -> m: ConfirmInitialization - l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization - l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization - l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination - assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { - session: sid.into(), - session_nonce: 0, - secret1: math::generate_random_scalar().unwrap().into(), - secret2: math::generate_random_scalar().unwrap().into(), - publics: vec![math::generate_random_point().unwrap().into()], - }).unwrap_err(), Error::InvalidStateForRequest); + let ml = MessageLoop::new(3).init(0).unwrap(); + ml.0.take_and_process_message(); // m -> s1: InitializeSession + ml.0.take_and_process_message(); // m -> s2: InitializeSession + ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization + ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization + ml.0.take_and_process_message(); // m -> s1: CompleteInitialization + ml.0.take_and_process_message(); // m -> s2: CompleteInitialization + + let (from, to, msg) = ml.take_message_keys_dissemination(); + ml.0.process_message(from, to, Message::Generation(GenerationMessage::KeysDissemination(msg.clone()))); + assert_eq!(ml.session_of(&to).on_keys_dissemination(from, &msg), Err(Error::InvalidStateForRequest)); } #[test] fn should_not_accept_public_key_share_when_is_not_waiting_for_it() { - let (sid, _, s, l) = make_simple_cluster(1, 3).unwrap(); - assert_eq!(l.master().on_public_key_share(s, &message::PublicKeyShare { - session: sid.into(), + let ml = MessageLoop::new(3).init(1).unwrap(); + assert_eq!(ml.session_at(0).on_public_key_share(ml.0.node(1), &message::PublicKeyShare { + session: Default::default(), session_nonce: 0, public_share: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::InvalidStateForRequest); + }), Err(Error::InvalidStateForRequest)); } #[test] fn should_not_accept_public_key_share_when_receiving_twice() { - let (sid, m, _, mut l) = make_simple_cluster(0, 3).unwrap(); - l.take_and_process_message().unwrap(); // m -> s1: InitializeSession - l.take_and_process_message().unwrap(); // m -> s2: InitializeSession - l.take_and_process_message().unwrap(); // s1 -> m: ConfirmInitialization - l.take_and_process_message().unwrap(); // s2 -> m: ConfirmInitialization - l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization - l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization - l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination - l.take_and_process_message().unwrap(); // m -> s2: KeysDissemination - l.take_and_process_message().unwrap(); // s1 -> m: KeysDissemination - l.take_and_process_message().unwrap(); // s1 -> s2: KeysDissemination - l.take_and_process_message().unwrap(); // s2 -> m: KeysDissemination - l.take_and_process_message().unwrap(); // s2 -> s1: KeysDissemination - let (f, t, msg) = match l.take_message() { - Some((f, t, Message::Generation(GenerationMessage::PublicKeyShare(msg)))) => (f, t, msg), - _ => panic!("unexpected"), - }; - assert_eq!(&f, l.master().node()); - assert_eq!(&t, l.second_slave().node()); - l.process_message((f, t, Message::Generation(GenerationMessage::PublicKeyShare(msg.clone())))).unwrap(); - assert_eq!(l.second_slave().on_public_key_share(m, &message::PublicKeyShare { - session: sid.into(), - session_nonce: 0, - public_share: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::InvalidMessage); + let ml = MessageLoop::new(3).init(0).unwrap(); + ml.0.take_and_process_message(); // m -> s1: InitializeSession + ml.0.take_and_process_message(); // m -> s2: InitializeSession + ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization + ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization + ml.0.take_and_process_message(); // m -> s1: CompleteInitialization + ml.0.take_and_process_message(); // m -> s2: CompleteInitialization + ml.0.take_and_process_message(); // m -> s1: KeysDissemination + ml.0.take_and_process_message(); // m -> s2: KeysDissemination + ml.0.take_and_process_message(); // s1 -> m: KeysDissemination + ml.0.take_and_process_message(); // s1 -> s2: KeysDissemination + ml.0.take_and_process_message(); // s2 -> m: KeysDissemination + ml.0.take_and_process_message(); // s2 -> s1: KeysDissemination + + let (from, to, msg) = ml.take_message_public_key_share(); + ml.0.process_message(from, to, Message::Generation(GenerationMessage::PublicKeyShare(msg.clone()))); + assert_eq!(ml.session_of(&to).on_public_key_share(from, &msg), Err(Error::InvalidMessage)); } #[test] fn encryption_fails_on_session_timeout() { - let (_, _, _, l) = make_simple_cluster(0, 2).unwrap(); - assert!(l.master().joint_public_and_secret().is_none()); - l.master().on_session_timeout(); - assert!(l.master().joint_public_and_secret().unwrap().unwrap_err() == Error::NodeDisconnected); + let ml = MessageLoop::new(2).init(0).unwrap(); + assert!(ml.session_at(0).joint_public_and_secret().is_none()); + ml.session_at(0).on_session_timeout(); + assert_eq!(ml.session_at(0).joint_public_and_secret().unwrap(), Err(Error::NodeDisconnected)); } #[test] fn encryption_fails_on_node_timeout() { - let (_, _, _, l) = make_simple_cluster(0, 2).unwrap(); - assert!(l.master().joint_public_and_secret().is_none()); - l.master().on_node_timeout(l.first_slave().node()); - assert!(l.master().joint_public_and_secret().unwrap().unwrap_err() == Error::NodeDisconnected); + let ml = MessageLoop::new(2).init(0).unwrap(); + assert!(ml.session_at(0).joint_public_and_secret().is_none()); + ml.session_at(0).on_node_timeout(&ml.0.node(1)); + assert_eq!(ml.session_at(0).joint_public_and_secret().unwrap(), Err(Error::NodeDisconnected)); } #[test] fn complete_enc_dec_session() { let test_cases = [(0, 5), (2, 5), (3, 5)]; for &(threshold, num_nodes) in &test_cases { - let mut l = MessageLoop::new(num_nodes); - l.master().initialize(Default::default(), Default::default(), false, threshold, l.nodes.keys().cloned().collect::>().into()).unwrap(); - assert_eq!(l.nodes.len(), num_nodes); - - // let nodes do initialization + keys dissemination - while let Some((from, to, message)) = l.take_message() { - l.process_message((from, to, message)).unwrap(); - } + let ml = MessageLoop::new(num_nodes).init(threshold).unwrap(); + ml.0.loop_until(|| ml.0.is_empty()); // check that all nodes has finished joint public generation - let joint_public_key = l.master().joint_public_and_secret().unwrap().unwrap().0; - for node in l.nodes.values() { - let state = node.session.state(); - assert_eq!(state, SessionState::Finished); - assert_eq!(node.session.joint_public_and_secret().map(|p| p.map(|p| p.0)), Some(Ok(joint_public_key))); + let joint_public_key = ml.session_at(0).joint_public_and_secret().unwrap().unwrap().0; + for i in 0..num_nodes { + let session = ml.session_at(i); + assert_eq!(session.state(), SessionState::Finished); + assert_eq!(session.joint_public_and_secret().map(|p| p.map(|p| p.0)), Some(Ok(joint_public_key))); } // now let's encrypt some secret (which is a point on EC) let document_secret_plain = Random.generate().unwrap().public().clone(); - let all_nodes_id_numbers: Vec<_> = l.master().data.lock().nodes.values().map(|n| n.id_number.clone()).collect(); - let all_nodes_secret_shares: Vec<_> = l.nodes.values().map(|n| n.session.data.lock().secret_share.as_ref().unwrap().clone()).collect(); + let all_nodes_id_numbers = ml.nodes_id_numbers(); + let all_nodes_secret_shares = ml.nodes_secret_shares(); let document_secret_decrypted = do_encryption_and_decryption(threshold, &joint_public_key, &all_nodes_id_numbers, &all_nodes_secret_shares, @@ -1350,41 +1259,18 @@ pub mod tests { } } - #[test] - fn encryption_session_works_over_network() { - const CONN_TIMEOUT: Duration = Duration::from_millis(300); - const SESSION_TIMEOUT: Duration = Duration::from_millis(1000); - - let test_cases = [(1, 3)]; - for &(threshold, num_nodes) in &test_cases { - let mut core = new_runtime(); - - // prepare cluster objects for each node - let clusters = make_clusters(&core, 6031, num_nodes); - run_clusters(&clusters); - - // `clusters` contains `Arc` and clones will refer to the same cores. - let clusters_clone = clusters.clone(); - - // establish connections - loop_until(&core.executor(), CONN_TIMEOUT, move || clusters_clone.iter().all(all_connections_established)); - - // run session to completion - let session_id = SessionId::default(); - let session = clusters[0].client().new_generation_session(session_id, Default::default(), Default::default(), threshold).unwrap(); - loop_until(&core.executor(), SESSION_TIMEOUT, move || session.joint_public_and_secret().is_some()); - } - } - #[test] fn generation_message_fails_when_nonce_is_wrong() { - let (sid, m, _, l) = make_simple_cluster(0, 2).unwrap(); - assert_eq!(l.first_slave().process_message(&m, &message::GenerationMessage::KeysDissemination(message::KeysDissemination { - session: sid.into(), + let ml = MessageLoop::new(2).init(0).unwrap(); + ml.0.take_and_process_message(); + + let msg = message::GenerationMessage::KeysDissemination(message::KeysDissemination { + session: Default::default(), session_nonce: 10, secret1: math::generate_random_scalar().unwrap().into(), secret2: math::generate_random_scalar().unwrap().into(), publics: vec![math::generate_random_point().unwrap().into()], - })).unwrap_err(), Error::ReplayProtection); + }); + assert_eq!(ml.session_at(1).process_message(&ml.0.node(0), &msg).unwrap_err(), Error::ReplayProtection); } } diff --git a/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs b/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs index 3744f6c20..fe3bd4f11 100644 --- a/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs +++ b/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs @@ -1061,140 +1061,65 @@ impl JobTransport for SigningJobTransport { #[cfg(test)] mod tests { use std::sync::Arc; - use std::collections::{BTreeSet, BTreeMap, VecDeque}; use ethereum_types::H256; - use ethkey::{self, Random, Generator, KeyPair, verify_public, public_to_address}; - use acl_storage::DummyAclStorage; - use key_server_cluster::{NodeId, DummyKeyStorage, SessionId, SessionMeta, Error, KeyStorage}; - use key_server_cluster::cluster_sessions::ClusterSession; - use key_server_cluster::cluster::tests::DummyCluster; - use key_server_cluster::generation_session::tests::MessageLoop as KeyGenerationMessageLoop; - use key_server_cluster::message::Message; - use key_server_cluster::signing_session_ecdsa::{SessionImpl, SessionParams}; + use ethkey::{self, Random, Generator, Public, verify_public, public_to_address}; + use key_server_cluster::{SessionId, Error, KeyStorage}; + use key_server_cluster::cluster::tests::{MessageLoop as ClusterMessageLoop}; + use key_server_cluster::signing_session_ecdsa::SessionImpl; + use key_server_cluster::generation_session::tests::MessageLoop as GenerationMessageLoop; - struct Node { - pub node_id: NodeId, - pub cluster: Arc, - pub key_storage: Arc, - pub session: SessionImpl, - } - - struct MessageLoop { - pub session_id: SessionId, - pub requester: KeyPair, - pub nodes: BTreeMap, - pub queue: VecDeque<(NodeId, NodeId, Message)>, - pub acl_storages: Vec>, - pub version: H256, - } + #[derive(Debug)] + pub struct MessageLoop(pub ClusterMessageLoop); impl MessageLoop { - pub fn new(gl: &KeyGenerationMessageLoop) -> Self { - let version = gl.nodes.values().nth(0).unwrap().key_storage.get(&Default::default()).unwrap().unwrap().versions.iter().last().unwrap().hash; - let mut nodes = BTreeMap::new(); - let session_id = gl.session_id.clone(); + pub fn new(num_nodes: usize, threshold: usize) -> Result { + let ml = GenerationMessageLoop::new(num_nodes).init(threshold)?; + ml.0.loop_until(|| ml.0.is_empty()); // complete generation session + + Ok(MessageLoop(ml.0)) + } + + pub fn init_with_version(self, key_version: Option) -> Result<(Self, Public, H256), Error> { + let message_hash = H256::random(); let requester = Random.generate().unwrap(); - let signature = Some(ethkey::sign(requester.secret(), &SessionId::default()).unwrap()); - let master_node_id = gl.nodes.keys().nth(0).unwrap().clone(); - let mut acl_storages = Vec::new(); - for (i, (gl_node_id, gl_node)) in gl.nodes.iter().enumerate() { - let acl_storage = Arc::new(DummyAclStorage::default()); - acl_storages.push(acl_storage.clone()); - let cluster = Arc::new(DummyCluster::new(gl_node_id.clone())); - let session = SessionImpl::new(SessionParams { - meta: SessionMeta { - id: session_id.clone(), - self_node_id: gl_node_id.clone(), - master_node_id: master_node_id.clone(), - threshold: gl_node.key_storage.get(&session_id).unwrap().unwrap().threshold, - configured_nodes_count: gl.nodes.len(), - connected_nodes_count: gl.nodes.len(), - }, - access_key: "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec".parse().unwrap(), - key_share: Some(gl_node.key_storage.get(&session_id).unwrap().unwrap()), - acl_storage: acl_storage, - cluster: cluster.clone(), - nonce: 0, - }, if i == 0 { signature.clone().map(Into::into) } else { None }).unwrap(); - nodes.insert(gl_node_id.clone(), Node { node_id: gl_node_id.clone(), cluster: cluster, key_storage: gl_node.key_storage.clone(), session: session }); - } - - let nodes_ids: Vec<_> = nodes.keys().cloned().collect(); - for node in nodes.values() { - for node_id in &nodes_ids { - node.cluster.add_node(node_id.clone()); - } - } - - MessageLoop { - session_id: session_id, - requester: requester, - nodes: nodes, - queue: VecDeque::new(), - acl_storages: acl_storages, - version: version, - } + let signature = ethkey::sign(requester.secret(), &SessionId::default()).unwrap(); + self.0.cluster(0).client() + .new_ecdsa_signing_session(Default::default(), signature.into(), key_version, message_hash) + .map(|_| (self, *requester.public(), message_hash)) } - pub fn master(&self) -> &SessionImpl { - &self.nodes.values().nth(0).unwrap().session + pub fn init(self) -> Result<(Self, Public, H256), Error> { + let key_version = self.0.key_storage(0).get(&Default::default()) + .unwrap().unwrap().versions.iter().last().unwrap().hash; + self.init_with_version(Some(key_version)) } - pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { - self.nodes.values() - .filter_map(|n| n.cluster.take_message().map(|m| (n.node_id.clone(), m.0, m.1))) - .nth(0) - .or_else(|| self.queue.pop_front()) + pub fn init_delegated(self) -> Result<(Self, Public, H256), Error> { + self.0.key_storage(0).remove(&Default::default()).unwrap(); + self.init_with_version(None) } - pub fn process_message(&mut self, mut msg: (NodeId, NodeId, Message)) -> Result<(), Error> { - let mut is_queued_message = false; - loop { - match self.nodes[&msg.1].session.on_message(&msg.0, &msg.2) { - Ok(_) => { - if let Some(message) = self.queue.pop_front() { - msg = message; - is_queued_message = true; - continue; - } - return Ok(()); - }, - Err(Error::TooEarlyForRequest) => { - if is_queued_message { - self.queue.push_front(msg); - } else { - self.queue.push_back(msg); - } - return Ok(()); - }, - Err(err) => return Err(err), - } - } - } - } - - fn prepare_signing_sessions(threshold: usize, num_nodes: usize) -> (KeyGenerationMessageLoop, MessageLoop) { - // run key generation sessions - let mut gl = KeyGenerationMessageLoop::new(num_nodes); - gl.master().initialize(Default::default(), Default::default(), false, threshold, gl.nodes.keys().cloned().collect::>().into()).unwrap(); - while let Some((from, to, message)) = gl.take_message() { - gl.process_message((from, to, message)).unwrap(); + pub fn init_with_isolated(self) -> Result<(Self, Public, H256), Error> { + self.0.isolate(1); + self.init() } - // run signing session - let sl = MessageLoop::new(&gl); - (gl, sl) + pub fn session_at(&self, idx: usize) -> Arc { + self.0.sessions(idx).ecdsa_signing_sessions.first().unwrap() + } + + pub fn ensure_completed(&self) { + self.0.loop_until(|| self.0.is_empty()); + assert!(self.session_at(0).wait().is_ok()); + } } #[test] fn failed_gen_ecdsa_sign_session_when_threshold_is_too_low() { let test_cases = [(1, 2), (2, 4), (3, 6), (4, 6)]; for &(threshold, num_nodes) in &test_cases { - let (_, sl) = prepare_signing_sessions(threshold, num_nodes); - - // run signing session - let message_hash = H256::random(); - assert_eq!(sl.master().initialize(sl.version.clone(), message_hash).unwrap_err(), Error::ConsensusUnreachable); + assert_eq!(MessageLoop::new(num_nodes, threshold).unwrap().init().unwrap_err(), + Error::ConsensusUnreachable); } } @@ -1202,112 +1127,46 @@ mod tests { fn complete_gen_ecdsa_sign_session() { let test_cases = [(0, 1), (2, 5), (2, 6), (3, 11), (4, 11)]; for &(threshold, num_nodes) in &test_cases { - let (gl, mut sl) = prepare_signing_sessions(threshold, num_nodes); - let key_pair = gl.compute_key_pair(threshold); + let (ml, _, message) = MessageLoop::new(num_nodes, threshold).unwrap().init().unwrap(); + ml.0.loop_until(|| ml.0.is_empty()); - // run signing session - let message_hash = H256::random(); - sl.master().initialize(sl.version.clone(), message_hash).unwrap(); - while let Some((from, to, message)) = sl.take_message() { - sl.process_message((from, to, message)).unwrap(); - } - - // verify signature - let signature = sl.master().wait().unwrap(); - assert!(verify_public(key_pair.public(), &signature, &message_hash).unwrap()); + let signer_public = ml.0.key_storage(0).get(&Default::default()).unwrap().unwrap().public; + let signature = ml.session_at(0).wait().unwrap(); + assert!(verify_public(&signer_public, &signature, &message).unwrap()); } } #[test] fn ecdsa_complete_signing_session_with_single_node_failing() { - let (_, mut sl) = prepare_signing_sessions(1, 4); - sl.master().initialize(sl.version.clone(), 777.into()).unwrap(); + let (ml, requester, _) = MessageLoop::new(4, 1).unwrap().init().unwrap(); // we need at least 3-of-4 nodes to agree to reach consensus // let's say 1 of 4 nodes disagee - sl.acl_storages[1].prohibit(public_to_address(sl.requester.public()), SessionId::default()); + ml.0.acl_storage(1).prohibit(public_to_address(&requester), Default::default()); // then consensus reachable, but single node will disagree - while let Some((from, to, message)) = sl.take_message() { - sl.process_message((from, to, message)).unwrap(); - } - - let data = sl.master().data.lock(); - match data.result { - Some(Ok(_)) => (), - _ => unreachable!(), - } + ml.ensure_completed(); } #[test] fn ecdsa_complete_signing_session_with_acl_check_failed_on_master() { - let (_, mut sl) = prepare_signing_sessions(1, 4); - sl.master().initialize(sl.version.clone(), 777.into()).unwrap(); + let (ml, requester, _) = MessageLoop::new(4, 1).unwrap().init().unwrap(); // we need at least 3-of-4 nodes to agree to reach consensus - // let's say 1 of 4 nodes disagee - sl.acl_storages[0].prohibit(public_to_address(sl.requester.public()), SessionId::default()); + // let's say 1 of 4 nodes (here: master) disagee + ml.0.acl_storage(0).prohibit(public_to_address(&requester), Default::default()); // then consensus reachable, but single node will disagree - while let Some((from, to, message)) = sl.take_message() { - sl.process_message((from, to, message)).unwrap(); - } - - let data = sl.master().data.lock(); - match data.result { - Some(Ok(_)) => (), - _ => unreachable!(), - } + ml.ensure_completed(); } #[test] fn ecdsa_signing_works_when_delegated_to_other_node() { - let (_, mut sl) = prepare_signing_sessions(1, 4); - - // let's say node1 doesn't have a share && delegates decryption request to node0 - // initially session is created on node1 => node1 is master for itself, but for other nodes node0 is still master - let actual_master = sl.nodes.keys().nth(0).cloned().unwrap(); - let requested_node = sl.nodes.keys().skip(1).nth(0).cloned().unwrap(); - let version = sl.nodes[&actual_master].key_storage.get(&Default::default()).unwrap().unwrap().last_version().unwrap().hash.clone(); - sl.nodes[&requested_node].key_storage.remove(&Default::default()).unwrap(); - sl.nodes.get_mut(&requested_node).unwrap().session.core.key_share = None; - sl.nodes.get_mut(&requested_node).unwrap().session.core.meta.master_node_id = sl.nodes[&requested_node].session.core.meta.self_node_id.clone(); - sl.nodes[&requested_node].session.data.lock().consensus_session.consensus_job_mut().executor_mut().set_requester( - sl.nodes[&actual_master].session.data.lock().consensus_session.consensus_job().executor().requester().unwrap().clone() - ); - - // now let's try to do a decryption - sl.nodes[&requested_node].session.delegate(actual_master, version, H256::random()).unwrap(); - - // then consensus reachable, but single node will disagree - while let Some((from, to, message)) = sl.take_message() { - sl.process_message((from, to, message)).unwrap(); - } + MessageLoop::new(4, 1).unwrap().init_delegated().unwrap().0.ensure_completed(); } #[test] fn ecdsa_signing_works_when_share_owners_are_isolated() { - let (_, mut sl) = prepare_signing_sessions(2, 6); - - // we need 5 out of 6 nodes to agree to do a decryption - // let's say that 1 of these nodes (master) is isolated - let isolated_node_id = sl.nodes.keys().skip(2).nth(0).cloned().unwrap(); - for node in sl.nodes.values() { - node.cluster.remove_node(&isolated_node_id); - } - - // now let's try to do a signing - sl.master().initialize(sl.version.clone(), H256::random()).unwrap(); - - // then consensus reachable, but single node will disagree - while let Some((from, to, message)) = sl.take_message() { - sl.process_message((from, to, message)).unwrap(); - } - - let data = sl.master().data.lock(); - match data.result { - Some(Ok(_)) => (), - _ => unreachable!(), - } + MessageLoop::new(6, 2).unwrap().init_with_isolated().unwrap().0.ensure_completed(); } } diff --git a/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs b/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs index 075e456fd..0b0619f96 100644 --- a/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs +++ b/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs @@ -809,279 +809,150 @@ impl JobTransport for SigningJobTransport { mod tests { use std::sync::Arc; use std::str::FromStr; - use std::collections::{BTreeSet, BTreeMap, VecDeque}; + use std::collections::BTreeMap; use ethereum_types::{Address, H256}; - use ethkey::{self, Random, Generator, Public, Secret, KeyPair, public_to_address}; + use ethkey::{self, Random, Generator, Public, Secret, public_to_address}; use acl_storage::DummyAclStorage; - use key_server_cluster::{NodeId, DummyKeyStorage, DocumentKeyShare, DocumentKeyShareVersion, SessionId, - Requester, SessionMeta, Error, KeyStorage}; - use key_server_cluster::cluster_sessions::ClusterSession; - use key_server_cluster::cluster::tests::DummyCluster; - use key_server_cluster::generation_session::tests::MessageLoop as KeyGenerationMessageLoop; + use key_server_cluster::{SessionId, Requester, SessionMeta, Error, KeyStorage}; + use key_server_cluster::cluster::tests::MessageLoop as ClusterMessageLoop; + use key_server_cluster::generation_session::tests::MessageLoop as GenerationMessageLoop; use key_server_cluster::math; - use key_server_cluster::message::{Message, SchnorrSigningMessage, SchnorrSigningConsensusMessage, ConsensusMessage, ConfirmConsensusInitialization, - SchnorrSigningGenerationMessage, GenerationMessage, ConfirmInitialization, InitializeSession, SchnorrRequestPartialSignature}; + use key_server_cluster::message::{SchnorrSigningMessage, SchnorrSigningConsensusMessage, + ConsensusMessage, ConfirmConsensusInitialization, SchnorrSigningGenerationMessage, GenerationMessage, + ConfirmInitialization, InitializeSession, SchnorrRequestPartialSignature}; use key_server_cluster::signing_session_schnorr::{SessionImpl, SessionState, SessionParams}; - struct Node { - pub node_id: NodeId, - pub cluster: Arc, - pub key_storage: Arc, - pub session: SessionImpl, - } - - struct MessageLoop { - pub session_id: SessionId, - pub requester: KeyPair, - pub nodes: BTreeMap, - pub queue: VecDeque<(NodeId, NodeId, Message)>, - pub acl_storages: Vec>, - pub version: H256, - } + #[derive(Debug)] + pub struct MessageLoop(pub ClusterMessageLoop); impl MessageLoop { - pub fn new(gl: &KeyGenerationMessageLoop) -> Self { - let version = gl.nodes.values().nth(0).unwrap().key_storage.get(&Default::default()).unwrap().unwrap().versions.iter().last().unwrap().hash; - let mut nodes = BTreeMap::new(); - let session_id = gl.session_id.clone(); + pub fn new(num_nodes: usize, threshold: usize) -> Result { + let ml = GenerationMessageLoop::new(num_nodes).init(threshold)?; + ml.0.loop_until(|| ml.0.is_empty()); // complete generation session + + Ok(MessageLoop(ml.0)) + } + + pub fn into_session(&self, at_node: usize) -> SessionImpl { + let requester = Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), + &SessionId::default()).unwrap())); + SessionImpl::new(SessionParams { + meta: SessionMeta { + id: SessionId::default(), + self_node_id: self.0.node(at_node), + master_node_id: self.0.node(0), + threshold: self.0.key_storage(at_node).get(&Default::default()).unwrap().unwrap().threshold, + configured_nodes_count: self.0.nodes().len(), + connected_nodes_count: self.0.nodes().len(), + }, + access_key: Random.generate().unwrap().secret().clone(), + key_share: self.0.key_storage(at_node).get(&Default::default()).unwrap(), + acl_storage: Arc::new(DummyAclStorage::default()), + cluster: self.0.cluster(0).view().unwrap(), + nonce: 0, + }, requester).unwrap() + } + + pub fn init_with_version(self, key_version: Option) -> Result<(Self, Public, H256), Error> { + let message_hash = H256::random(); let requester = Random.generate().unwrap(); - let signature = Some(ethkey::sign(requester.secret(), &SessionId::default()).unwrap()); - let master_node_id = gl.nodes.keys().nth(0).unwrap().clone(); - let mut acl_storages = Vec::new(); - for (i, (gl_node_id, gl_node)) in gl.nodes.iter().enumerate() { - let acl_storage = Arc::new(DummyAclStorage::default()); - acl_storages.push(acl_storage.clone()); - let cluster = Arc::new(DummyCluster::new(gl_node_id.clone())); - let session = SessionImpl::new(SessionParams { - meta: SessionMeta { - id: session_id.clone(), - self_node_id: gl_node_id.clone(), - master_node_id: master_node_id.clone(), - threshold: gl_node.key_storage.get(&session_id).unwrap().unwrap().threshold, - configured_nodes_count: gl.nodes.len(), - connected_nodes_count: gl.nodes.len(), - }, - access_key: "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec".parse().unwrap(), - key_share: Some(gl_node.key_storage.get(&session_id).unwrap().unwrap()), - acl_storage: acl_storage, - cluster: cluster.clone(), - nonce: 0, - }, if i == 0 { signature.clone().map(Into::into) } else { None }).unwrap(); - nodes.insert(gl_node_id.clone(), Node { node_id: gl_node_id.clone(), cluster: cluster, key_storage: gl_node.key_storage.clone(), session: session }); - } - - let nodes_ids: Vec<_> = nodes.keys().cloned().collect(); - for node in nodes.values() { - for node_id in &nodes_ids { - node.cluster.add_node(node_id.clone()); - } - } - - MessageLoop { - session_id: session_id, - requester: requester, - nodes: nodes, - queue: VecDeque::new(), - acl_storages: acl_storages, - version: version, - } + let signature = ethkey::sign(requester.secret(), &SessionId::default()).unwrap(); + self.0.cluster(0).client().new_schnorr_signing_session( + Default::default(), + signature.into(), + key_version, + message_hash).map(|_| (self, *requester.public(), message_hash)) } - pub fn master(&self) -> &SessionImpl { - &self.nodes.values().nth(0).unwrap().session + pub fn init(self) -> Result<(Self, Public, H256), Error> { + let key_version = self.key_version(); + self.init_with_version(Some(key_version)) } - pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { - self.nodes.values() - .filter_map(|n| n.cluster.take_message().map(|m| (n.node_id.clone(), m.0, m.1))) - .nth(0) - .or_else(|| self.queue.pop_front()) + pub fn init_delegated(self) -> Result<(Self, Public, H256), Error> { + self.0.key_storage(0).remove(&Default::default()).unwrap(); + self.init_with_version(None) } - pub fn process_message(&mut self, mut msg: (NodeId, NodeId, Message)) -> Result<(), Error> { - let mut is_queued_message = false; - loop { - match self.nodes[&msg.1].session.on_message(&msg.0, &msg.2) { - Ok(_) => { - if let Some(message) = self.queue.pop_front() { - msg = message; - is_queued_message = true; - continue; - } - return Ok(()); - }, - Err(Error::TooEarlyForRequest) => { - if is_queued_message { - self.queue.push_front(msg); - } else { - self.queue.push_back(msg); - } - return Ok(()); - }, - Err(err) => return Err(err), - } - } + pub fn init_with_isolated(self) -> Result<(Self, Public, H256), Error> { + self.0.isolate(1); + self.init() } - pub fn run_until bool>(&mut self, predicate: F) -> Result<(), Error> { - while let Some((from, to, message)) = self.take_message() { - if predicate(self) { - return Ok(()); - } - - self.process_message((from, to, message))?; - } - - unreachable!("either wrong predicate, or failing test") - } - } - - fn prepare_signing_sessions(threshold: usize, num_nodes: usize) -> (KeyGenerationMessageLoop, MessageLoop) { - // run key generation sessions - let mut gl = KeyGenerationMessageLoop::new(num_nodes); - gl.master().initialize(Default::default(), Default::default(), false, threshold, gl.nodes.keys().cloned().collect::>().into()).unwrap(); - while let Some((from, to, message)) = gl.take_message() { - gl.process_message((from, to, message)).unwrap(); + pub fn init_without_share(self) -> Result<(Self, Public, H256), Error> { + let key_version = self.key_version(); + self.0.key_storage(0).remove(&Default::default()).unwrap(); + self.init_with_version(Some(key_version)) } - // run signing session - let sl = MessageLoop::new(&gl); - (gl, sl) + pub fn session_at(&self, idx: usize) -> Arc { + self.0.sessions(idx).schnorr_signing_sessions.first().unwrap() + } + + pub fn ensure_completed(&self) { + self.0.loop_until(|| self.0.is_empty()); + assert!(self.session_at(0).wait().is_ok()); + } + + pub fn key_version(&self) -> H256 { + self.0.key_storage(0).get(&Default::default()) + .unwrap().unwrap().versions.iter().last().unwrap().hash + } } #[test] fn schnorr_complete_gen_sign_session() { let test_cases = [(0, 1), (0, 5), (2, 5), (3, 5)]; for &(threshold, num_nodes) in &test_cases { - let (gl, mut sl) = prepare_signing_sessions(threshold, num_nodes); + let (ml, _, message) = MessageLoop::new(num_nodes, threshold).unwrap().init().unwrap(); + ml.0.loop_until(|| ml.0.is_empty()); - // run signing session - let message_hash = H256::from(777); - sl.master().initialize(sl.version.clone(), message_hash).unwrap(); - while let Some((from, to, message)) = sl.take_message() { - sl.process_message((from, to, message)).unwrap(); - } - - // verify signature - let public = gl.master().joint_public_and_secret().unwrap().unwrap().0; - let signature = sl.master().wait().unwrap(); - assert!(math::verify_schnorr_signature(&public, &signature, &message_hash).unwrap()); + let signer_public = ml.0.key_storage(0).get(&Default::default()).unwrap().unwrap().public; + let signature = ml.session_at(0).wait().unwrap(); + assert!(math::verify_schnorr_signature(&signer_public, &signature, &message).unwrap()); } } #[test] fn schnorr_constructs_in_cluster_of_single_node() { - let mut nodes = BTreeMap::new(); - let self_node_id = Random.generate().unwrap().public().clone(); - nodes.insert(self_node_id, Random.generate().unwrap().secret().clone()); - match SessionImpl::new(SessionParams { - meta: SessionMeta { - id: SessionId::default(), - self_node_id: self_node_id.clone(), - master_node_id: self_node_id.clone(), - threshold: 0, - configured_nodes_count: 1, - connected_nodes_count: 1, - }, - access_key: Random.generate().unwrap().secret().clone(), - key_share: Some(DocumentKeyShare { - author: Default::default(), - threshold: 0, - public: Default::default(), - common_point: Some(Random.generate().unwrap().public().clone()), - encrypted_point: Some(Random.generate().unwrap().public().clone()), - versions: vec![DocumentKeyShareVersion { - hash: Default::default(), - id_numbers: nodes, - secret_share: Random.generate().unwrap().secret().clone(), - }], - }), - acl_storage: Arc::new(DummyAclStorage::default()), - cluster: Arc::new(DummyCluster::new(self_node_id.clone())), - nonce: 0, - }, Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()))) { - Ok(_) => (), - _ => panic!("unexpected"), - } + MessageLoop::new(1, 0).unwrap().init().unwrap(); } #[test] fn schnorr_fails_to_initialize_if_does_not_have_a_share() { - let self_node_id = Random.generate().unwrap().public().clone(); - let session = SessionImpl::new(SessionParams { - meta: SessionMeta { - id: SessionId::default(), - self_node_id: self_node_id.clone(), - master_node_id: self_node_id.clone(), - threshold: 0, - configured_nodes_count: 1, - connected_nodes_count: 1, - }, - access_key: Random.generate().unwrap().secret().clone(), - key_share: None, - acl_storage: Arc::new(DummyAclStorage::default()), - cluster: Arc::new(DummyCluster::new(self_node_id.clone())), - nonce: 0, - }, Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()))).unwrap(); - assert_eq!(session.initialize(Default::default(), Default::default()), Err(Error::InvalidMessage)); + assert!(MessageLoop::new(2, 1).unwrap().init_without_share().is_err()); } #[test] fn schnorr_fails_to_initialize_if_threshold_is_wrong() { - let mut nodes = BTreeMap::new(); - let self_node_id = Random.generate().unwrap().public().clone(); - nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone()); - nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); - let session = SessionImpl::new(SessionParams { - meta: SessionMeta { - id: SessionId::default(), - self_node_id: self_node_id.clone(), - master_node_id: self_node_id.clone(), - threshold: 2, - configured_nodes_count: 1, - connected_nodes_count: 1, - }, - access_key: Random.generate().unwrap().secret().clone(), - key_share: Some(DocumentKeyShare { - author: Default::default(), - threshold: 2, - public: Default::default(), - common_point: Some(Random.generate().unwrap().public().clone()), - encrypted_point: Some(Random.generate().unwrap().public().clone()), - versions: vec![DocumentKeyShareVersion { - hash: Default::default(), - id_numbers: nodes, - secret_share: Random.generate().unwrap().secret().clone(), - }], - }), - acl_storage: Arc::new(DummyAclStorage::default()), - cluster: Arc::new(DummyCluster::new(self_node_id.clone())), - nonce: 0, - }, Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()))).unwrap(); - assert_eq!(session.initialize(Default::default(), Default::default()), Err(Error::ConsensusUnreachable)); + let mut ml = MessageLoop::new(3, 2).unwrap(); + ml.0.exclude(2); + assert_eq!(ml.init().unwrap_err(), Error::ConsensusUnreachable); } #[test] fn schnorr_fails_to_initialize_when_already_initialized() { - let (_, sl) = prepare_signing_sessions(1, 3); - assert_eq!(sl.master().initialize(sl.version.clone(), 777.into()), Ok(())); - assert_eq!(sl.master().initialize(sl.version.clone(), 777.into()), Err(Error::InvalidStateForRequest)); + let (ml, _, _) = MessageLoop::new(1, 0).unwrap().init().unwrap(); + assert_eq!(ml.session_at(0).initialize(ml.key_version(), 777.into()), + Err(Error::InvalidStateForRequest)); } #[test] fn schnorr_does_not_fail_when_consensus_message_received_after_consensus_established() { - let (_, mut sl) = prepare_signing_sessions(1, 3); - sl.master().initialize(sl.version.clone(), 777.into()).unwrap(); + let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); + // consensus is established - sl.run_until(|sl| sl.master().state() == SessionState::SessionKeyGeneration).unwrap(); + let session = ml.session_at(0); + ml.0.loop_until(|| session.state() == SessionState::SessionKeyGeneration); + // but 3rd node continues to send its messages // this should not fail session - let consensus_group = sl.master().data.lock().consensus_session.select_consensus_group().unwrap().clone(); + let consensus_group = session.data.lock().consensus_session.select_consensus_group().unwrap().clone(); let mut had_3rd_message = false; - while let Some((from, to, message)) = sl.take_message() { + while let Some((from, to, message)) = ml.0.take_message() { if !consensus_group.contains(&from) { had_3rd_message = true; - sl.process_message((from, to, message)).unwrap(); + ml.0.process_message(from, to, message); } } assert!(had_3rd_message); @@ -1089,10 +960,11 @@ mod tests { #[test] fn schnorr_fails_when_consensus_message_is_received_when_not_initialized() { - let (_, sl) = prepare_signing_sessions(1, 3); - assert_eq!(sl.master().on_consensus_message(sl.nodes.keys().nth(1).unwrap(), &SchnorrSigningConsensusMessage { + let ml = MessageLoop::new(3, 1).unwrap(); + let session = ml.into_session(0); + assert_eq!(session.on_consensus_message(&ml.0.node(1), &SchnorrSigningConsensusMessage { session: SessionId::default().into(), - sub_session: sl.master().core.access_key.clone().into(), + sub_session: session.core.access_key.clone().into(), session_nonce: 0, message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { is_confirmed: true, @@ -1102,10 +974,11 @@ mod tests { #[test] fn schnorr_fails_when_generation_message_is_received_when_not_initialized() { - let (_, sl) = prepare_signing_sessions(1, 3); - assert_eq!(sl.master().on_generation_message(sl.nodes.keys().nth(1).unwrap(), &SchnorrSigningGenerationMessage { + let ml = MessageLoop::new(3, 1).unwrap(); + let session = ml.into_session(0); + assert_eq!(session.on_generation_message(&ml.0.node(1), &SchnorrSigningGenerationMessage { session: SessionId::default().into(), - sub_session: sl.master().core.access_key.clone().into(), + sub_session: session.core.access_key.clone().into(), session_nonce: 0, message: GenerationMessage::ConfirmInitialization(ConfirmInitialization { session: SessionId::default().into(), @@ -1117,16 +990,16 @@ mod tests { #[test] fn schnorr_fails_when_generation_sesson_is_initialized_by_slave_node() { - let (_, mut sl) = prepare_signing_sessions(1, 3); - sl.master().initialize(sl.version.clone(), 777.into()).unwrap(); - sl.run_until(|sl| sl.master().state() == SessionState::SessionKeyGeneration).unwrap(); + let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); + let session = ml.session_at(0); + ml.0.loop_until(|| session.state() == SessionState::SessionKeyGeneration); - let slave2_id = sl.nodes.keys().nth(2).unwrap().clone(); - let slave1 = &sl.nodes.values().nth(1).unwrap().session; + let slave2_id = ml.0.node(2); + let slave1_session = ml.session_at(1); - assert_eq!(slave1.on_generation_message(&slave2_id, &SchnorrSigningGenerationMessage { + assert_eq!(slave1_session.on_generation_message(&slave2_id, &SchnorrSigningGenerationMessage { session: SessionId::default().into(), - sub_session: sl.master().core.access_key.clone().into(), + sub_session: session.core.access_key.clone().into(), session_nonce: 0, message: GenerationMessage::InitializeSession(InitializeSession { session: SessionId::default().into(), @@ -1143,11 +1016,11 @@ mod tests { #[test] fn schnorr_fails_when_signature_requested_when_not_initialized() { - let (_, sl) = prepare_signing_sessions(1, 3); - let slave1 = &sl.nodes.values().nth(1).unwrap().session; - assert_eq!(slave1.on_partial_signature_requested(sl.nodes.keys().nth(0).unwrap(), &SchnorrRequestPartialSignature { + let ml = MessageLoop::new(3, 1).unwrap(); + let session = ml.into_session(1); + assert_eq!(session.on_partial_signature_requested(&ml.0.node(0), &SchnorrRequestPartialSignature { session: SessionId::default().into(), - sub_session: sl.master().core.access_key.clone().into(), + sub_session: session.core.access_key.clone().into(), session_nonce: 0, request_id: Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap().into(), message_hash: H256::default().into(), @@ -1157,10 +1030,11 @@ mod tests { #[test] fn schnorr_fails_when_signature_requested_by_slave_node() { - let (_, sl) = prepare_signing_sessions(1, 3); - assert_eq!(sl.master().on_partial_signature_requested(sl.nodes.keys().nth(1).unwrap(), &SchnorrRequestPartialSignature { + let ml = MessageLoop::new(3, 1).unwrap(); + let session = ml.into_session(0); + assert_eq!(session.on_partial_signature_requested(&ml.0.node(1), &SchnorrRequestPartialSignature { session: SessionId::default().into(), - sub_session: sl.master().core.access_key.clone().into(), + sub_session: session.core.access_key.clone().into(), session_nonce: 0, request_id: Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap().into(), message_hash: H256::default().into(), @@ -1170,123 +1044,68 @@ mod tests { #[test] fn schnorr_failed_signing_session() { - let (_, mut sl) = prepare_signing_sessions(1, 3); - sl.master().initialize(sl.version.clone(), 777.into()).unwrap(); + let (ml, requester, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); // we need at least 2-of-3 nodes to agree to reach consensus // let's say 2 of 3 nodes disagee - sl.acl_storages[1].prohibit(public_to_address(sl.requester.public()), SessionId::default()); - sl.acl_storages[2].prohibit(public_to_address(sl.requester.public()), SessionId::default()); + ml.0.acl_storage(1).prohibit(public_to_address(&requester), SessionId::default()); + ml.0.acl_storage(2).prohibit(public_to_address(&requester), SessionId::default()); // then consensus is unreachable - assert_eq!(sl.run_until(|_| false), Err(Error::ConsensusUnreachable)); + ml.0.loop_until(|| ml.0.is_empty()); + assert_eq!(ml.session_at(0).wait().unwrap_err(), Error::ConsensusUnreachable); } #[test] fn schnorr_complete_signing_session_with_single_node_failing() { - let (_, mut sl) = prepare_signing_sessions(1, 3); - sl.master().initialize(sl.version.clone(), 777.into()).unwrap(); + let (ml, requester, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); // we need at least 2-of-3 nodes to agree to reach consensus // let's say 1 of 3 nodes disagee - sl.acl_storages[1].prohibit(public_to_address(sl.requester.public()), SessionId::default()); + ml.0.acl_storage(1).prohibit(public_to_address(&requester), SessionId::default()); // then consensus reachable, but single node will disagree - while let Some((from, to, message)) = sl.take_message() { - sl.process_message((from, to, message)).unwrap(); - } - - let data = sl.master().data.lock(); - match data.result { - Some(Ok(_)) => (), - _ => unreachable!(), - } + ml.ensure_completed(); } #[test] fn schnorr_complete_signing_session_with_acl_check_failed_on_master() { - let (_, mut sl) = prepare_signing_sessions(1, 3); - sl.master().initialize(sl.version.clone(), 777.into()).unwrap(); + let (ml, requester, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); // we need at least 2-of-3 nodes to agree to reach consensus // let's say 1 of 3 nodes disagee - sl.acl_storages[0].prohibit(public_to_address(sl.requester.public()), SessionId::default()); + ml.0.acl_storage(0).prohibit(public_to_address(&requester), SessionId::default()); // then consensus reachable, but single node will disagree - while let Some((from, to, message)) = sl.take_message() { - sl.process_message((from, to, message)).unwrap(); - } - - let data = sl.master().data.lock(); - match data.result { - Some(Ok(_)) => (), - _ => unreachable!(), - } + ml.ensure_completed(); } #[test] fn schnorr_signing_message_fails_when_nonce_is_wrong() { - let (_, sl) = prepare_signing_sessions(1, 3); - assert_eq!(sl.master().process_message(sl.nodes.keys().nth(1).unwrap(), &SchnorrSigningMessage::SchnorrSigningGenerationMessage(SchnorrSigningGenerationMessage { + let ml = MessageLoop::new(3, 1).unwrap(); + let session = ml.into_session(1); + let msg = SchnorrSigningMessage::SchnorrSigningGenerationMessage(SchnorrSigningGenerationMessage { session: SessionId::default().into(), - sub_session: sl.master().core.access_key.clone().into(), + sub_session: session.core.access_key.clone().into(), session_nonce: 10, message: GenerationMessage::ConfirmInitialization(ConfirmInitialization { session: SessionId::default().into(), session_nonce: 0, derived_point: Public::default().into(), }), - })), Err(Error::ReplayProtection)); + }); + assert_eq!(session.process_message(&ml.0.node(1), &msg), Err(Error::ReplayProtection)); } #[test] fn schnorr_signing_works_when_delegated_to_other_node() { - let (_, mut sl) = prepare_signing_sessions(1, 3); - - // let's say node1 doesn't have a share && delegates decryption request to node0 - // initially session is created on node1 => node1 is master for itself, but for other nodes node0 is still master - let actual_master = sl.nodes.keys().nth(0).cloned().unwrap(); - let requested_node = sl.nodes.keys().skip(1).nth(0).cloned().unwrap(); - let version = sl.nodes[&actual_master].key_storage.get(&Default::default()).unwrap().unwrap().last_version().unwrap().hash.clone(); - sl.nodes[&requested_node].key_storage.remove(&Default::default()).unwrap(); - sl.nodes.get_mut(&requested_node).unwrap().session.core.key_share = None; - sl.nodes.get_mut(&requested_node).unwrap().session.core.meta.master_node_id = sl.nodes[&requested_node].session.core.meta.self_node_id.clone(); - sl.nodes[&requested_node].session.data.lock().consensus_session.consensus_job_mut().executor_mut().set_requester( - sl.nodes[&actual_master].session.data.lock().consensus_session.consensus_job().executor().requester().unwrap().clone() - ); - - // now let's try to do a decryption - sl.nodes[&requested_node].session.delegate(actual_master, version, Default::default()).unwrap(); - - // then consensus reachable, but single node will disagree - while let Some((from, to, message)) = sl.take_message() { - sl.process_message((from, to, message)).unwrap(); - } + let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init_delegated().unwrap(); + ml.ensure_completed(); } #[test] fn schnorr_signing_works_when_share_owners_are_isolated() { - let (_, mut sl) = prepare_signing_sessions(1, 3); - - // we need 2 out of 3 nodes to agree to do a decryption - // let's say that 1 of these nodes (master) is isolated - let isolated_node_id = sl.nodes.keys().skip(2).nth(0).cloned().unwrap(); - for node in sl.nodes.values() { - node.cluster.remove_node(&isolated_node_id); - } - - // now let's try to do a signing - sl.master().initialize(sl.version.clone(), 777.into()).unwrap(); - - // then consensus reachable, but single node will disagree - while let Some((from, to, message)) = sl.take_message() { - sl.process_message((from, to, message)).unwrap(); - } - - let data = sl.master().data.lock(); - match data.result { - Some(Ok(_)) => (), - _ => unreachable!(), - } + let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init_with_isolated().unwrap(); + ml.ensure_completed(); } } diff --git a/secret-store/src/key_server_cluster/cluster.rs b/secret-store/src/key_server_cluster/cluster.rs index 09a1a6a5a..a8416a8f7 100644 --- a/secret-store/src/key_server_cluster/cluster.rs +++ b/secret-store/src/key_server_cluster/cluster.rs @@ -14,26 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use std::io; -use std::time::{Duration, Instant}; use std::sync::Arc; -use std::sync::atomic::{AtomicBool, Ordering}; use std::collections::{BTreeMap, BTreeSet}; -use std::collections::btree_map::Entry; -use std::net::{SocketAddr, IpAddr}; -use futures::{future, Future, Stream}; -use parking_lot::{Mutex, RwLock}; -use tokio_io::IoFuture; -use tokio::timer::{Interval, timeout::Error as TimeoutError}; -use tokio::net::{TcpListener, TcpStream}; -use ethkey::{Public, KeyPair, Signature, Random, Generator}; +use parking_lot::RwLock; +use ethkey::{Public, Signature, Random, Generator}; use ethereum_types::{Address, H256}; use parity_runtime::Executor; use key_server_cluster::{Error, NodeId, SessionId, Requester, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair}; use key_server_cluster::cluster_sessions::{ClusterSession, AdminSession, ClusterSessions, SessionIdWithSubSession, - ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, AdminSessionCreationData, ClusterSessionsListener}; -use key_server_cluster::cluster_sessions_creator::{ClusterSessionCreator, IntoSessionId}; -use key_server_cluster::message::{self, Message, ClusterMessage}; + ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, + AdminSessionCreationData, ClusterSessionsListener}; +use key_server_cluster::cluster_sessions_creator::ClusterSessionCreator; +use key_server_cluster::cluster_connections::{ConnectionProvider, ConnectionManager}; +use key_server_cluster::cluster_connections_net::{NetConnectionsManager, + NetConnectionsContainer, NetConnectionsManagerConfig}; +use key_server_cluster::cluster_message_processor::{MessageProcessor, SessionsMessageProcessor}; +use key_server_cluster::message::Message; use key_server_cluster::generation_session::{SessionImpl as GenerationSession}; use key_server_cluster::decryption_session::{SessionImpl as DecryptionSession}; use key_server_cluster::encryption_session::{SessionImpl as EncryptionSession}; @@ -41,31 +37,15 @@ use key_server_cluster::signing_session_ecdsa::{SessionImpl as EcdsaSigningSessi use key_server_cluster::signing_session_schnorr::{SessionImpl as SchnorrSigningSession}; use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession, IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction}; -use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, read_encrypted_message, WriteMessage, write_encrypted_message}; -use key_server_cluster::net::{accept_connection as net_accept_connection, connect as net_connect, Connection as NetConnection}; -use key_server_cluster::connection_trigger::{Maintain, ConnectionTrigger, SimpleConnectionTrigger, ServersSetChangeSessionCreatorConnector}; +use key_server_cluster::connection_trigger::{ConnectionTrigger, + SimpleConnectionTrigger, ServersSetChangeSessionCreatorConnector}; use key_server_cluster::connection_trigger_with_migration::ConnectionTriggerWithMigration; -/// Maintain interval (seconds). Every MAINTAIN_INTERVAL seconds node: -/// 1) checks if connected nodes are responding to KeepAlive messages -/// 2) tries to connect to disconnected nodes -/// 3) checks if enc/dec sessions are time-outed -const MAINTAIN_INTERVAL: u64 = 10; - -/// When no messages have been received from node within KEEP_ALIVE_SEND_INTERVAL seconds, -/// we must send KeepAlive message to the node to check if it still responds to messages. -const KEEP_ALIVE_SEND_INTERVAL: Duration = Duration::from_secs(30); -/// When no messages have been received from node within KEEP_ALIVE_DISCONNECT_INTERVAL seconds, -/// we must treat this node as non-responding && disconnect from it. -const KEEP_ALIVE_DISCONNECT_INTERVAL: Duration = Duration::from_secs(60); - -/// Empty future. -pub type BoxedEmptyFuture = Box + Send>; +#[cfg(test)] +use key_server_cluster::cluster_connections::tests::{MessagesQueue, TestConnections, new_test_connections}; /// Cluster interface for external clients. pub trait ClusterClient: Send + Sync { - /// Get cluster state. - fn cluster_state(&self) -> ClusterState; /// Start new generation session. fn new_generation_session(&self, session_id: SessionId, origin: Option
, author: Address, threshold: usize) -> Result, Error>; /// Start new encryption session. @@ -94,12 +74,11 @@ pub trait ClusterClient: Send + Sync { /// Get active generation session with given id. #[cfg(test)] fn generation_session(&self, session_id: &SessionId) -> Option>; + #[cfg(test)] + fn is_fully_connected(&self) -> bool; /// Try connect to disconnected nodes. #[cfg(test)] fn connect(&self); - /// Get key storage. - #[cfg(test)] - fn key_storage(&self) -> Arc; } /// Cluster access for single session participant. @@ -121,12 +100,8 @@ pub trait Cluster: Send + Sync { /// Cluster initialization parameters. #[derive(Clone)] pub struct ClusterConfiguration { - /// Allow connecting to 'higher' nodes. - pub allow_connecting_to_higher_nodes: bool, /// KeyPair this node holds. pub self_key_pair: Arc, - /// Interface to listen to. - pub listen_address: (String, u16), /// Cluster nodes set. pub key_server_set: Arc, /// Reference to key storage @@ -135,114 +110,132 @@ pub struct ClusterConfiguration { pub acl_storage: Arc, /// Administrator public key. pub admin_public: Option, - /// Should key servers set change session when servers set changes? This - /// will only work when servers set is configured using KeyServerSet - /// contract. - pub auto_migrate_enabled: bool, -} - -/// Cluster state. -pub struct ClusterState { - /// Nodes, to which connections are established. - pub connected: BTreeSet, + /// Do not remove sessions from container. + pub preserve_sessions: bool, } /// Network cluster implementation. -pub struct ClusterCore { - /// Listen address. - listen_address: SocketAddr, +pub struct ClusterCore { /// Cluster data. - data: Arc, + data: Arc>, } /// Network cluster client interface implementation. -pub struct ClusterClientImpl { +pub struct ClusterClientImpl { /// Cluster data. - data: Arc, + data: Arc>, } /// Network cluster view. It is a communication channel, required in single session. pub struct ClusterView { - core: Arc>, configured_nodes_count: usize, - connected_nodes_count: usize, + connected_nodes: BTreeSet, + connections: Arc, + self_key_pair: Arc, } /// Cross-thread shareable cluster data. -pub struct ClusterData { +pub struct ClusterData { /// Cluster configuration. pub config: ClusterConfiguration, - /// Handle to the event loop. - pub executor: Executor, /// KeyPair this node holds. pub self_key_pair: Arc, /// Connections data. - pub connections: ClusterConnections, + pub connections: C, /// Active sessions data. - pub sessions: ClusterSessions, - /// A shutdown flag. - pub is_shutdown: Arc, + pub sessions: Arc, + // Messages processor. + pub message_processor: Arc, + /// Link between servers set chnage session and the connections manager. + pub servers_set_change_creator_connector: Arc, } -/// Connections that are forming the cluster. Lock order: trigger.lock() -> data.lock(). -pub struct ClusterConnections { - /// Self node id. - pub self_node_id: NodeId, - /// All known other key servers. - pub key_server_set: Arc, - /// Connections trigger. - pub trigger: Mutex>, - /// Servers set change session creator connector. - pub connector: Arc, - /// Connections data. - pub data: RwLock, +/// Create new network-backed cluster. +pub fn new_network_cluster( + executor: Executor, + config: ClusterConfiguration, + net_config: NetConnectionsManagerConfig +) -> Result>, Error> { + let mut nodes = config.key_server_set.snapshot().current_set; + let is_isolated = nodes.remove(config.self_key_pair.public()).is_none(); + let connections_data = Arc::new(RwLock::new(NetConnectionsContainer { + is_isolated, + nodes, + connections: BTreeMap::new(), + })); + + let connection_trigger: Box = match net_config.auto_migrate_enabled { + false => Box::new(SimpleConnectionTrigger::with_config(&config)), + true if config.admin_public.is_none() => Box::new(ConnectionTriggerWithMigration::with_config(&config)), + true => return Err(Error::Internal( + "secret store admininstrator public key is specified with auto-migration enabled".into() + )), + }; + + let servers_set_change_creator_connector = connection_trigger.servers_set_change_creator_connector(); + let sessions = Arc::new(ClusterSessions::new(&config, servers_set_change_creator_connector.clone())); + let message_processor = Arc::new(SessionsMessageProcessor::new( + config.self_key_pair.clone(), + servers_set_change_creator_connector.clone(), + sessions.clone(), + connections_data.clone())); + + let connections = NetConnectionsManager::new( + executor, + message_processor.clone(), + connection_trigger, + connections_data, + &config, + net_config)?; + connections.start()?; + + ClusterCore::new(sessions, message_processor, connections, servers_set_change_creator_connector, config) } -/// Cluster connections data. -pub struct ClusterConnectionsData { - /// Is this node isolated from cluster? - pub is_isolated: bool, - /// Active key servers set. - pub nodes: BTreeMap, - /// Active connections to key servers. - pub connections: BTreeMap>, +/// Create new in-memory backed cluster +#[cfg(test)] +pub fn new_test_cluster( + messages: MessagesQueue, + config: ClusterConfiguration, +) -> Result>>, Error> { + let nodes = config.key_server_set.snapshot().current_set; + let connections = new_test_connections(messages, *config.self_key_pair.public(), nodes.keys().cloned().collect()); + + let connection_trigger = Box::new(SimpleConnectionTrigger::with_config(&config)); + let servers_set_change_creator_connector = connection_trigger.servers_set_change_creator_connector(); + let mut sessions = ClusterSessions::new(&config, servers_set_change_creator_connector.clone()); + if config.preserve_sessions { + sessions.preserve_sessions(); + } + let sessions = Arc::new(sessions); + + let message_processor = Arc::new(SessionsMessageProcessor::new( + config.self_key_pair.clone(), + servers_set_change_creator_connector.clone(), + sessions.clone(), + connections.provider(), + )); + + ClusterCore::new(sessions, message_processor, connections, servers_set_change_creator_connector, config) } -/// Cluster view core. -struct ClusterViewCore { - /// Cluster reference. - cluster: Arc, - /// Subset of nodes, required for this session. - nodes: BTreeSet, -} - -/// Connection to single node. -pub struct Connection { - /// Node id. - node_id: NodeId, - /// Node address. - node_address: SocketAddr, - /// Is inbound connection? - is_inbound: bool, - /// Tcp stream. - stream: SharedTcpStream, - /// Connection key. - key: KeyPair, - /// Last message time. - last_message_time: RwLock, -} - -impl ClusterCore { - pub fn new(executor: Executor, config: ClusterConfiguration) -> Result, Error> { - let listen_address = make_socket_address(&config.listen_address.0, config.listen_address.1)?; - let connections = ClusterConnections::new(&config)?; - let servers_set_change_creator_connector = connections.connector.clone(); - let sessions = ClusterSessions::new(&config, servers_set_change_creator_connector); - let data = ClusterData::new(&executor, config, connections, sessions); - +impl ClusterCore { + pub fn new( + sessions: Arc, + message_processor: Arc, + connections: C, + servers_set_change_creator_connector: Arc, + config: ClusterConfiguration, + ) -> Result, Error> { Ok(Arc::new(ClusterCore { - listen_address: listen_address, - data: data, + data: Arc::new(ClusterData { + self_key_pair: config.self_key_pair.clone(), + connections, + sessions: sessions.clone(), + config, + message_processor, + servers_set_change_creator_connector + }), })) } @@ -251,657 +244,68 @@ impl ClusterCore { Arc::new(ClusterClientImpl::new(self.data.clone())) } - /// Get cluster configuration. - #[cfg(test)] - pub fn config(&self) -> &ClusterConfiguration { - &self.data.config - } - - /// Get connection to given node. - #[cfg(test)] - pub fn connection(&self, node: &NodeId) -> Option> { - self.data.connection(node) - } - /// Run cluster. pub fn run(&self) -> Result<(), Error> { - self.run_listener() - .and_then(|_| self.run_connections())?; - - // schedule maintain procedures - ClusterCore::schedule_maintain(self.data.clone()); - + self.data.connections.connect(); Ok(()) } - /// Start listening for incoming connections. - pub fn run_listener(&self) -> Result<(), Error> { - // start listeining for incoming connections - self.data.spawn(ClusterCore::listen(self.data.clone(), self.listen_address.clone())?); - Ok(()) - } - - /// Start connecting to other nodes. - pub fn run_connections(&self) -> Result<(), Error> { - // try to connect to every other peer - ClusterCore::connect_disconnected_nodes(self.data.clone()); - Ok(()) - } - - /// Connect to peer. - fn connect(data: Arc, node_address: SocketAddr) { - data.clone().spawn(ClusterCore::connect_future(data, node_address)); - } - - /// Connect to socket using given context and executor. - fn connect_future(data: Arc, node_address: SocketAddr) -> BoxedEmptyFuture { - let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect(); - Box::new(net_connect(&node_address, data.self_key_pair.clone(), disconnected_nodes) - .then(move |result| ClusterCore::process_connection_result(data, Some(node_address), result)) - .then(|_| future::ok(()))) - } - - /// Start listening for incoming connections. - fn listen(data: Arc, listen_address: SocketAddr) -> Result { - Ok(Box::new(TcpListener::bind(&listen_address)? - .incoming() - .and_then(move |stream| { - ClusterCore::accept_connection(data.clone(), stream); - Ok(()) - }) - .for_each(|_| Ok(())) - .then(|_| future::ok(())))) - } - - /// Accept connection. - fn accept_connection(data: Arc, stream: TcpStream) { - data.clone().spawn(ClusterCore::accept_connection_future(data, stream)) - } - - /// Accept connection future. - fn accept_connection_future(data: Arc, stream: TcpStream) -> BoxedEmptyFuture { - Box::new(net_accept_connection(stream, data.self_key_pair.clone()) - .then(move |result| ClusterCore::process_connection_result(data, None, result)) - .then(|_| future::ok(()))) - } - - /// Schedule mainatain procedures. - fn schedule_maintain(data: Arc) { - let d = data.clone(); - - let interval = Interval::new_interval(Duration::new(MAINTAIN_INTERVAL, 0)) - .and_then(move |_| Ok(ClusterCore::maintain(data.clone()))) - .for_each(|_| Ok(())) - .then(|_| future::ok(())); - - d.spawn(interval); - } - - /// Execute maintain procedures. - fn maintain(data: Arc) { - trace!(target: "secretstore_net", "{}: executing maintain procedures", data.self_key_pair.public()); - - ClusterCore::keep_alive(data.clone()); - ClusterCore::connect_disconnected_nodes(data.clone()); - data.sessions.stop_stalled_sessions(); - } - - /// Called for every incomming mesage. - fn process_connection_messages(data: Arc, connection: Arc) -> IoFuture> { - Box::new(connection - .read_message() - .then(move |result| - match result { - Ok((_, Ok(message))) => { - ClusterCore::process_connection_message(data.clone(), connection.clone(), message); - // continue serving connection - data.spawn(ClusterCore::process_connection_messages(data.clone(), connection).then(|_| Ok(()))); - Box::new(future::ok(Ok(()))) - }, - Ok((_, Err(err))) => { - warn!(target: "secretstore_net", "{}: protocol error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); - // continue serving connection - data.spawn(ClusterCore::process_connection_messages(data.clone(), connection).then(|_| Ok(()))); - Box::new(future::ok(Err(err))) - }, - Err(err) => { - warn!(target: "secretstore_net", "{}: network error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); - // close connection - data.connections.remove(data.clone(), connection.node_id(), connection.is_inbound()); - Box::new(future::err(err)) - }, - } - )) - } - - /// Send keepalive messages to every othe node. - fn keep_alive(data: Arc) { - data.sessions.sessions_keep_alive(); - for connection in data.connections.active_connections() { - let last_message_diff = Instant::now() - connection.last_message_time(); - if last_message_diff > KEEP_ALIVE_DISCONNECT_INTERVAL { - warn!(target: "secretstore_net", "{}: keep alive timeout for node {}", - data.self_key_pair.public(), connection.node_id()); - - data.connections.remove(data.clone(), connection.node_id(), connection.is_inbound()); - data.sessions.on_connection_timeout(connection.node_id()); - } - else if last_message_diff > KEEP_ALIVE_SEND_INTERVAL { - data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {}))).then(|_| Ok(()))); - } - } - } - - /// Try to connect to every disconnected node. - fn connect_disconnected_nodes(data: Arc) { - let r = data.connections.update_nodes_set(data.clone()); - if let Some(r) = r { - data.spawn(r); - } - - // connect to disconnected nodes - for (node_id, node_address) in data.connections.disconnected_nodes() { - if data.config.allow_connecting_to_higher_nodes || data.self_key_pair.public() < &node_id { - ClusterCore::connect(data.clone(), node_address); - } - } - } - - /// Process connection future result. - fn process_connection_result(data: Arc, outbound_addr: Option, - result: Result>, TimeoutError>) -> IoFuture> - { - match result { - Ok(DeadlineStatus::Meet(Ok(connection))) => { - let connection = Connection::new(outbound_addr.is_none(), connection); - if data.connections.insert(data.clone(), connection.clone()) { - ClusterCore::process_connection_messages(data.clone(), connection) - } else { - Box::new(future::ok(Ok(()))) - } - }, - Ok(DeadlineStatus::Meet(Err(err))) => { - warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}", - data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, - outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); - Box::new(future::ok(Ok(()))) - }, - Ok(DeadlineStatus::Timeout) => { - warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}", - data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" }, - outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); - Box::new(future::ok(Ok(()))) - }, - Err(err) => { - warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}", - data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, - outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); - Box::new(future::ok(Ok(()))) - }, - } - } - - /// Process single message from the connection. - fn process_connection_message(data: Arc, connection: Arc, message: Message) { - connection.set_last_message_time(Instant::now()); - trace!(target: "secretstore_net", "{}: received message {} from {}", data.self_key_pair.public(), message, connection.node_id()); - // error is ignored as we only process errors on session level - match message { - Message::Generation(message) => Self::process_message(&data, &data.sessions.generation_sessions, connection, Message::Generation(message)) - .map(|_| ()).unwrap_or_default(), - Message::Encryption(message) => Self::process_message(&data, &data.sessions.encryption_sessions, connection, Message::Encryption(message)) - .map(|_| ()).unwrap_or_default(), - Message::Decryption(message) => Self::process_message(&data, &data.sessions.decryption_sessions, connection, Message::Decryption(message)) - .map(|_| ()).unwrap_or_default(), - Message::SchnorrSigning(message) => Self::process_message(&data, &data.sessions.schnorr_signing_sessions, connection, Message::SchnorrSigning(message)) - .map(|_| ()).unwrap_or_default(), - Message::EcdsaSigning(message) => Self::process_message(&data, &data.sessions.ecdsa_signing_sessions, connection, Message::EcdsaSigning(message)) - .map(|_| ()).unwrap_or_default(), - Message::ServersSetChange(message) => { - let message = Message::ServersSetChange(message); - let is_initialization_message = message.is_initialization_message(); - let session = Self::process_message(&data, &data.sessions.admin_sessions, connection, message); - if is_initialization_message { - if let Some(session) = session { - data.connections.servers_set_change_creator_connector().set_key_servers_set_change_session(session.clone()); - } - } - } - Message::KeyVersionNegotiation(message) => { - let session = Self::process_message(&data, &data.sessions.negotiation_sessions, connection, Message::KeyVersionNegotiation(message)); - Self::try_continue_session(&data, session); - }, - Message::ShareAdd(message) => Self::process_message(&data, &data.sessions.admin_sessions, connection, Message::ShareAdd(message)) - .map(|_| ()).unwrap_or_default(), - Message::Cluster(message) => ClusterCore::process_cluster_message(data, connection, message), - } - } - - /// Try to contnue session. - fn try_continue_session(data: &Arc, session: Option>>) { - if let Some(session) = session { - let meta = session.meta(); - let is_master_node = meta.self_node_id == meta.master_node_id; - if is_master_node && session.is_finished() { - data.sessions.negotiation_sessions.remove(&session.id()); - match session.wait() { - Ok(Some((version, master))) => match session.take_continue_action() { - Some(ContinueAction::Decrypt(session, origin, is_shadow_decryption, is_broadcast_decryption)) => { - let initialization_error = if data.self_key_pair.public() == &master { - session.initialize(origin, version, is_shadow_decryption, is_broadcast_decryption) - } else { - session.delegate(master, origin, version, is_shadow_decryption, is_broadcast_decryption) - }; - - if let Err(error) = initialization_error { - session.on_session_error(&meta.self_node_id, error); - data.sessions.decryption_sessions.remove(&session.id()); - } - }, - Some(ContinueAction::SchnorrSign(session, message_hash)) => { - let initialization_error = if data.self_key_pair.public() == &master { - session.initialize(version, message_hash) - } else { - session.delegate(master, version, message_hash) - }; - - if let Err(error) = initialization_error { - session.on_session_error(&meta.self_node_id, error); - data.sessions.schnorr_signing_sessions.remove(&session.id()); - } - }, - Some(ContinueAction::EcdsaSign(session, message_hash)) => { - let initialization_error = if data.self_key_pair.public() == &master { - session.initialize(version, message_hash) - } else { - session.delegate(master, version, message_hash) - }; - - if let Err(error) = initialization_error { - session.on_session_error(&meta.self_node_id, error); - data.sessions.ecdsa_signing_sessions.remove(&session.id()); - } - }, - None => (), - }, - Ok(None) => unreachable!("is_master_node; session is finished; negotiation version always finished with result on master; qed"), - Err(error) => match session.take_continue_action() { - Some(ContinueAction::Decrypt(session, _, _, _)) => { - session.on_session_error(&meta.self_node_id, error); - data.sessions.decryption_sessions.remove(&session.id()); - }, - Some(ContinueAction::SchnorrSign(session, _)) => { - session.on_session_error(&meta.self_node_id, error); - data.sessions.schnorr_signing_sessions.remove(&session.id()); - }, - Some(ContinueAction::EcdsaSign(session, _)) => { - session.on_session_error(&meta.self_node_id, error); - data.sessions.ecdsa_signing_sessions.remove(&session.id()); - }, - None => (), - }, - } - } - } - } - - /// Get or insert new session. - fn prepare_session, D>(data: &Arc, sessions: &ClusterSessionsContainer, sender: &NodeId, message: &Message) -> Result, Error> - where Message: IntoSessionId { - fn requires_all_connections(message: &Message) -> bool { - match *message { - Message::Generation(_) => true, - Message::ShareAdd(_) => true, - Message::ServersSetChange(_) => true, - _ => false, - } - } - - // get or create new session, if required - let session_id = message.into_session_id().expect("into_session_id fails for cluster messages only; only session messages are passed to prepare_session; qed"); - let is_initialization_message = message.is_initialization_message(); - let is_delegation_message = message.is_delegation_message(); - match is_initialization_message || is_delegation_message { - false => sessions.get(&session_id, true).ok_or(Error::NoActiveSessionWithId), - true => { - let creation_data = SC::creation_data_from_message(&message)?; - let master = if is_initialization_message { sender.clone() } else { data.self_key_pair.public().clone() }; - let cluster = create_cluster_view(data, requires_all_connections(&message))?; - - sessions.insert(cluster, master, session_id, Some(message.session_nonce().ok_or(Error::InvalidMessage)?), message.is_exclusive_session_message(), creation_data) - }, - } - } - - /// Process single session message from connection. - fn process_message, D>(data: &Arc, sessions: &ClusterSessionsContainer, connection: Arc, mut message: Message) -> Option> - where Message: IntoSessionId { - - // get or create new session, if required - let mut sender = connection.node_id().clone(); - let session = Self::prepare_session(data, sessions, &sender, &message); - // send error if session is not found, or failed to create - let session = match session { - Ok(session) => session, - Err(error) => { - // this is new session => it is not yet in container - warn!(target: "secretstore_net", "{}: {} session read error '{}' when requested for session from node {}", - data.self_key_pair.public(), S::type_name(), error, sender); - if !message.is_error_message() { - let session_id = message.into_session_id().expect("session_id only fails for cluster messages; only session messages are passed to process_message; qed"); - let session_nonce = message.session_nonce().expect("session_nonce only fails for cluster messages; only session messages are passed to process_message; qed"); - data.spawn(connection.send_message(SC::make_error_message(session_id, session_nonce, error)).then(|_| Ok(()))); - } - return None; - }, - }; - - let session_id = session.id(); - let mut is_queued_message = false; - loop { - let message_result = session.on_message(&sender, &message); - match message_result { - Ok(_) => { - // if session is completed => stop - if session.is_finished() { - info!(target: "secretstore_net", "{}: {} session completed", data.self_key_pair.public(), S::type_name()); - sessions.remove(&session_id); - return Some(session); - } - - // try to dequeue message - match sessions.dequeue_message(&session_id) { - Some((msg_sender, msg)) => { - is_queued_message = true; - sender = msg_sender; - message = msg; - }, - None => return Some(session), - } - }, - Err(Error::TooEarlyForRequest) => { - sessions.enqueue_message(&session_id, sender, message, is_queued_message); - return Some(session); - }, - Err(err) => { - warn!(target: "secretstore_net", "{}: {} session error '{}' when processing message {} from node {}", - data.self_key_pair.public(), - S::type_name(), - err, - message, - sender); - session.on_session_error(data.self_key_pair.public(), err); - sessions.remove(&session_id); - return Some(session); - }, - } - } - } - - /// Process single cluster message from the connection. - fn process_cluster_message(data: Arc, connection: Arc, message: ClusterMessage) { - match message { - ClusterMessage::KeepAlive(_) => data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse { - session_id: None, - }))).then(|_| Ok(()))), - ClusterMessage::KeepAliveResponse(msg) => if let Some(session_id) = msg.session_id { - data.sessions.on_session_keep_alive(connection.node_id(), session_id.into()); - }, - _ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}", data.self_key_pair.public(), message, connection.node_id(), connection.node_address()), - } - } - - /// Prevents new tasks from being spawned. #[cfg(test)] - pub fn shutdown(&self) { - self.data.shutdown() - } -} + pub fn view(&self) -> Result, Error> { + let connections = self.data.connections.provider(); + let mut connected_nodes = connections.connected_nodes()?; + let disconnected_nodes = connections.disconnected_nodes(); + connected_nodes.insert(self.data.self_key_pair.public().clone()); -impl ClusterConnections { - pub fn new(config: &ClusterConfiguration) -> Result { - let mut nodes = config.key_server_set.snapshot().current_set; - let is_isolated = nodes.remove(config.self_key_pair.public()).is_none(); - - let trigger: Box = match config.auto_migrate_enabled { - false => Box::new(SimpleConnectionTrigger::new(config.key_server_set.clone(), config.self_key_pair.clone(), config.admin_public.clone())), - true if config.admin_public.is_none() => Box::new(ConnectionTriggerWithMigration::new(config.key_server_set.clone(), config.self_key_pair.clone())), - true => return Err(Error::Internal("secret store admininstrator public key is specified with auto-migration enabled".into())), - }; - let connector = trigger.servers_set_change_creator_connector(); - - Ok(ClusterConnections { - self_node_id: config.self_key_pair.public().clone(), - key_server_set: config.key_server_set.clone(), - trigger: Mutex::new(trigger), - connector: connector, - data: RwLock::new(ClusterConnectionsData { - is_isolated: is_isolated, - nodes: nodes, - connections: BTreeMap::new(), - }), - }) - } - - pub fn cluster_state(&self) -> ClusterState { - ClusterState { - connected: self.data.read().connections.keys().cloned().collect(), - } - } - - pub fn get(&self, node: &NodeId) -> Option> { - self.data.read().connections.get(node).cloned() - } - - pub fn insert(&self, data: Arc, connection: Arc) -> bool { - { - let mut data = self.data.write(); - if !data.nodes.contains_key(connection.node_id()) { - // incoming connections are checked here - trace!(target: "secretstore_net", "{}: ignoring unknown connection from {} at {}", self.self_node_id, connection.node_id(), connection.node_address()); - debug_assert!(connection.is_inbound()); - return false; - } - - if data.connections.contains_key(connection.node_id()) { - // we have already connected to the same node - // the agreement is that node with lower id must establish connection to node with higher id - if (&self.self_node_id < connection.node_id() && connection.is_inbound()) - || (&self.self_node_id > connection.node_id() && !connection.is_inbound()) { - return false; - } - } - - let node = connection.node_id().clone(); - trace!(target: "secretstore_net", "{}: inserting connection to {} at {}. Connected to {} of {} nodes", - self.self_node_id, node, connection.node_address(), data.connections.len() + 1, data.nodes.len()); - data.connections.insert(node.clone(), connection.clone()); - } - - let maintain_action = self.trigger.lock().on_connection_established(connection.node_id()); - self.maintain_connection_trigger(maintain_action, data); - - true - } - - pub fn remove(&self, data: Arc, node: &NodeId, is_inbound: bool) { - { - let mut data = self.data.write(); - if let Entry::Occupied(entry) = data.connections.entry(node.clone()) { - if entry.get().is_inbound() != is_inbound { - return; - } - - trace!(target: "secretstore_net", "{}: removing connection to {} at {}", self.self_node_id, entry.get().node_id(), entry.get().node_address()); - entry.remove_entry(); - } else { - return; - } - } - - let maintain_action = self.trigger.lock().on_connection_closed(node); - self.maintain_connection_trigger(maintain_action, data); - } - - pub fn connected_nodes(&self) -> Result, Error> { - let data = self.data.read(); - if data.is_isolated { - return Err(Error::NodeDisconnected); - } - - Ok(data.connections.keys().cloned().collect()) - } - - pub fn active_connections(&self)-> Vec> { - self.data.read().connections.values().cloned().collect() - } - - pub fn disconnected_nodes(&self) -> BTreeMap { - let data = self.data.read(); - data.nodes.iter() - .filter(|&(node_id, _)| !data.connections.contains_key(node_id)) - .map(|(node_id, node_address)| (node_id.clone(), node_address.clone())) - .collect() - } - - pub fn servers_set_change_creator_connector(&self) -> Arc { - self.connector.clone() - } - - pub fn update_nodes_set(&self, data: Arc) -> Option { - let maintain_action = self.trigger.lock().on_maintain(); - self.maintain_connection_trigger(maintain_action, data); - None - } - - fn maintain_connection_trigger(&self, maintain_action: Option, data: Arc) { - if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Session) { - let client = ClusterClientImpl::new(data); - self.trigger.lock().maintain_session(&client); - } - if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Connections) { - let mut trigger = self.trigger.lock(); - let mut data = self.data.write(); - trigger.maintain_connections(&mut *data); - } - } -} - -impl ClusterData { - pub fn new(executor: &Executor, config: ClusterConfiguration, connections: ClusterConnections, sessions: ClusterSessions) -> Arc { - Arc::new(ClusterData { - executor: executor.clone(), - self_key_pair: config.self_key_pair.clone(), - connections: connections, - sessions: sessions, - config: config, - is_shutdown: Arc::new(AtomicBool::new(false)), - }) - } - - /// Get connection to given node. - pub fn connection(&self, node: &NodeId) -> Option> { - self.connections.get(node) - } - - /// Spawns a future on the runtime. - pub fn spawn(&self, f: F) where F: Future + Send + 'static { - if self.is_shutdown.load(Ordering::Acquire) == false { - if let Err(err) = future::Executor::execute(&self.executor, Box::new(f)) { - error!("Secret store runtime unable to spawn task. Runtime is shutting down. ({:?})", err); - } - } else { - error!("Secret store runtime unable to spawn task. Shutdown has been started."); - } - } - - /// Sets the `is_shutdown` flag which prevents future tasks from being - /// spawned via `::spawn`. - #[cfg(test)] - pub fn shutdown(&self) { - self.is_shutdown.store(true, Ordering::Release); - } -} - -impl Connection { - pub fn new(is_inbound: bool, connection: NetConnection) -> Arc { - Arc::new(Connection { - node_id: connection.node_id, - node_address: connection.address, - is_inbound: is_inbound, - stream: connection.stream, - key: connection.key, - last_message_time: RwLock::new(Instant::now()), - }) - } - - pub fn is_inbound(&self) -> bool { - self.is_inbound - } - - pub fn node_id(&self) -> &NodeId { - &self.node_id - } - - pub fn last_message_time(&self) -> Instant { - *self.last_message_time.read() - } - - pub fn set_last_message_time(&self, last_message_time: Instant) { - *self.last_message_time.write() = last_message_time; - } - - pub fn node_address(&self) -> &SocketAddr { - &self.node_address - } - - pub fn send_message(&self, message: Message) -> WriteMessage { - write_encrypted_message(self.stream.clone(), &self.key, message) - } - - pub fn read_message(&self) -> ReadMessage { - read_encrypted_message(self.stream.clone(), self.key.clone()) + let connected_nodes_count = connected_nodes.len(); + let disconnected_nodes_count = disconnected_nodes.len(); + Ok(Arc::new(ClusterView::new( + self.data.self_key_pair.clone(), + connections, + connected_nodes, + connected_nodes_count + disconnected_nodes_count))) } } impl ClusterView { - pub fn new(cluster: Arc, nodes: BTreeSet, configured_nodes_count: usize) -> Self { + pub fn new( + self_key_pair: Arc, + connections: Arc, + nodes: BTreeSet, + configured_nodes_count: usize + ) -> Self { ClusterView { configured_nodes_count: configured_nodes_count, - connected_nodes_count: nodes.len(), - core: Arc::new(RwLock::new(ClusterViewCore { - cluster: cluster, - nodes: nodes, - })), + connected_nodes: nodes, + connections, + self_key_pair, } } } impl Cluster for ClusterView { fn broadcast(&self, message: Message) -> Result<(), Error> { - let core = self.core.read(); - for node in core.nodes.iter().filter(|n| *n != core.cluster.self_key_pair.public()) { - trace!(target: "secretstore_net", "{}: sent message {} to {}", core.cluster.self_key_pair.public(), message, node); - let connection = core.cluster.connection(node).ok_or(Error::NodeDisconnected)?; - core.cluster.spawn(connection.send_message(message.clone()).then(|_| Ok(()))) + for node in self.connected_nodes.iter().filter(|n| *n != self.self_key_pair.public()) { + trace!(target: "secretstore_net", "{}: sent message {} to {}", self.self_key_pair.public(), message, node); + let connection = self.connections.connection(node).ok_or(Error::NodeDisconnected)?; + connection.send_message(message.clone()); } Ok(()) } fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { - let core = self.core.read(); - trace!(target: "secretstore_net", "{}: sent message {} to {}", core.cluster.self_key_pair.public(), message, to); - let connection = core.cluster.connection(to).ok_or(Error::NodeDisconnected)?; - core.cluster.spawn(connection.send_message(message).then(|_| Ok(()))); + trace!(target: "secretstore_net", "{}: sent message {} to {}", self.self_key_pair.public(), message, to); + let connection = self.connections.connection(to).ok_or(Error::NodeDisconnected)?; + connection.send_message(message); Ok(()) } fn is_connected(&self, node: &NodeId) -> bool { - self.core.read().nodes.contains(node) + self.connected_nodes.contains(node) } fn nodes(&self) -> BTreeSet { - self.core.read().nodes.clone() + self.connected_nodes.clone() } fn configured_nodes_count(&self) -> usize { @@ -909,24 +313,24 @@ impl Cluster for ClusterView { } fn connected_nodes_count(&self) -> usize { - self.connected_nodes_count + self.connected_nodes.len() } } -impl ClusterClientImpl { - pub fn new(data: Arc) -> Self { +impl ClusterClientImpl { + pub fn new(data: Arc>) -> Self { ClusterClientImpl { data: data, } } fn create_key_version_negotiation_session(&self, session_id: SessionId) -> Result>, Error> { - let mut connected_nodes = self.data.connections.connected_nodes()?; + let mut connected_nodes = self.data.connections.provider().connected_nodes()?; connected_nodes.insert(self.data.self_key_pair.public().clone()); let access_key = Random.generate()?.secret().clone(); let session_id = SessionIdWithSubSession::new(session_id, access_key); - let cluster = create_cluster_view(&self.data, false)?; + let cluster = create_cluster_view(self.data.self_key_pair.clone(), self.data.connections.provider(), false)?; let session = self.data.sessions.negotiation_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id.clone(), None, false, None)?; match session.initialize(connected_nodes) { Ok(()) => Ok(session), @@ -936,56 +340,38 @@ impl ClusterClientImpl { } } } - - fn process_initialization_result, D>(result: Result<(), Error>, session: Arc, sessions: &ClusterSessionsContainer) -> Result, Error> { - match result { - Ok(()) if session.is_finished() => { - sessions.remove(&session.id()); - Ok(session) - }, - Ok(()) => Ok(session), - Err(error) => { - sessions.remove(&session.id()); - Err(error) - }, - } - } } -impl ClusterClient for ClusterClientImpl { - fn cluster_state(&self) -> ClusterState { - self.data.connections.cluster_state() - } - +impl ClusterClient for ClusterClientImpl { fn new_generation_session(&self, session_id: SessionId, origin: Option
, author: Address, threshold: usize) -> Result, Error> { - let mut connected_nodes = self.data.connections.connected_nodes()?; + let mut connected_nodes = self.data.connections.provider().connected_nodes()?; connected_nodes.insert(self.data.self_key_pair.public().clone()); - let cluster = create_cluster_view(&self.data, true)?; + let cluster = create_cluster_view(self.data.self_key_pair.clone(), self.data.connections.provider(), true)?; let session = self.data.sessions.generation_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?; - Self::process_initialization_result( + process_initialization_result( session.initialize(origin, author, false, threshold, connected_nodes.into()), session, &self.data.sessions.generation_sessions) } fn new_encryption_session(&self, session_id: SessionId, requester: Requester, common_point: Public, encrypted_point: Public) -> Result, Error> { - let mut connected_nodes = self.data.connections.connected_nodes()?; + let mut connected_nodes = self.data.connections.provider().connected_nodes()?; connected_nodes.insert(self.data.self_key_pair.public().clone()); - let cluster = create_cluster_view(&self.data, true)?; + let cluster = create_cluster_view(self.data.self_key_pair.clone(), self.data.connections.provider(), true)?; let session = self.data.sessions.encryption_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?; - Self::process_initialization_result( + process_initialization_result( session.initialize(requester, common_point, encrypted_point), session, &self.data.sessions.encryption_sessions) } fn new_decryption_session(&self, session_id: SessionId, origin: Option
, requester: Requester, version: Option, is_shadow_decryption: bool, is_broadcast_decryption: bool) -> Result, Error> { - let mut connected_nodes = self.data.connections.connected_nodes()?; + let mut connected_nodes = self.data.connections.provider().connected_nodes()?; connected_nodes.insert(self.data.self_key_pair.public().clone()); let access_key = Random.generate()?.secret().clone(); let session_id = SessionIdWithSubSession::new(session_id, access_key); - let cluster = create_cluster_view(&self.data, false)?; + let cluster = create_cluster_view(self.data.self_key_pair.clone(), self.data.connections.provider(), false)?; let session = self.data.sessions.decryption_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id.clone(), None, false, Some(requester))?; @@ -995,23 +381,23 @@ impl ClusterClient for ClusterClientImpl { self.create_key_version_negotiation_session(session_id.id.clone()) .map(|version_session| { version_session.set_continue_action(ContinueAction::Decrypt(session.clone(), origin, is_shadow_decryption, is_broadcast_decryption)); - ClusterCore::try_continue_session(&self.data, Some(version_session)); + self.data.message_processor.try_continue_session(Some(version_session)); }) }, }; - Self::process_initialization_result( + process_initialization_result( initialization_result, session, &self.data.sessions.decryption_sessions) } fn new_schnorr_signing_session(&self, session_id: SessionId, requester: Requester, version: Option, message_hash: H256) -> Result, Error> { - let mut connected_nodes = self.data.connections.connected_nodes()?; + let mut connected_nodes = self.data.connections.provider().connected_nodes()?; connected_nodes.insert(self.data.self_key_pair.public().clone()); let access_key = Random.generate()?.secret().clone(); let session_id = SessionIdWithSubSession::new(session_id, access_key); - let cluster = create_cluster_view(&self.data, false)?; + let cluster = create_cluster_view(self.data.self_key_pair.clone(), self.data.connections.provider(), false)?; let session = self.data.sessions.schnorr_signing_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id.clone(), None, false, Some(requester))?; let initialization_result = match version { @@ -1020,23 +406,23 @@ impl ClusterClient for ClusterClientImpl { self.create_key_version_negotiation_session(session_id.id.clone()) .map(|version_session| { version_session.set_continue_action(ContinueAction::SchnorrSign(session.clone(), message_hash)); - ClusterCore::try_continue_session(&self.data, Some(version_session)); + self.data.message_processor.try_continue_session(Some(version_session)); }) }, }; - Self::process_initialization_result( + process_initialization_result( initialization_result, session, &self.data.sessions.schnorr_signing_sessions) } fn new_ecdsa_signing_session(&self, session_id: SessionId, requester: Requester, version: Option, message_hash: H256) -> Result, Error> { - let mut connected_nodes = self.data.connections.connected_nodes()?; + let mut connected_nodes = self.data.connections.provider().connected_nodes()?; connected_nodes.insert(self.data.self_key_pair.public().clone()); let access_key = Random.generate()?.secret().clone(); let session_id = SessionIdWithSubSession::new(session_id, access_key); - let cluster = create_cluster_view(&self.data, false)?; + let cluster = create_cluster_view(self.data.self_key_pair.clone(), self.data.connections.provider(), false)?; let session = self.data.sessions.ecdsa_signing_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id.clone(), None, false, Some(requester))?; let initialization_result = match version { @@ -1045,12 +431,12 @@ impl ClusterClient for ClusterClientImpl { self.create_key_version_negotiation_session(session_id.id.clone()) .map(|version_session| { version_session.set_continue_action(ContinueAction::EcdsaSign(session.clone(), message_hash)); - ClusterCore::try_continue_session(&self.data, Some(version_session)); + self.data.message_processor.try_continue_session(Some(version_session)); }) }, }; - Self::process_initialization_result( + process_initialization_result( initialization_result, session, &self.data.sessions.ecdsa_signing_sessions) } @@ -1061,28 +447,18 @@ impl ClusterClient for ClusterClientImpl { } fn new_servers_set_change_session(&self, session_id: Option, migration_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error> { - let mut connected_nodes = self.data.connections.connected_nodes()?; - connected_nodes.insert(self.data.self_key_pair.public().clone()); - - let session_id = match session_id { - Some(session_id) if session_id == *SERVERS_SET_CHANGE_SESSION_ID => session_id, - Some(_) => return Err(Error::InvalidMessage), - None => *SERVERS_SET_CHANGE_SESSION_ID, - }; - - let cluster = create_cluster_view(&self.data, true)?; - let creation_data = Some(AdminSessionCreationData::ServersSetChange(migration_id, new_nodes_set.clone())); - let session = self.data.sessions.admin_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, true, creation_data)?; - let initialization_result = session.as_servers_set_change().expect("servers set change session is created; qed") - .initialize(new_nodes_set, old_set_signature, new_set_signature); - - if initialization_result.is_ok() { - self.data.connections.servers_set_change_creator_connector().set_key_servers_set_change_session(session.clone()); - } - - Self::process_initialization_result( - initialization_result, - session, &self.data.sessions.admin_sessions) + new_servers_set_change_session( + self.data.self_key_pair.clone(), + &self.data.sessions, + self.data.connections.provider(), + self.data.servers_set_change_creator_connector.clone(), + ServersSetChangeParams { + session_id, + migration_id, + new_nodes_set, + old_set_signature, + new_set_signature, + }) } fn add_generation_listener(&self, listener: Arc>) { @@ -1097,11 +473,6 @@ impl ClusterClient for ClusterClientImpl { self.data.sessions.negotiation_sessions.add_listener(listener); } - #[cfg(test)] - fn connect(&self) { - ClusterCore::connect_disconnected_nodes(self.data.clone()); - } - #[cfg(test)] fn make_faulty_generation_sessions(&self) { self.data.sessions.make_faulty_generation_sessions(); @@ -1113,38 +484,92 @@ impl ClusterClient for ClusterClientImpl { } #[cfg(test)] - fn key_storage(&self) -> Arc { - self.data.config.key_storage.clone() + fn is_fully_connected(&self) -> bool { + self.data.connections.provider().disconnected_nodes().is_empty() + } + + #[cfg(test)] + fn connect(&self) { + self.data.connections.connect() } } -fn make_socket_address(address: &str, port: u16) -> Result { - let ip_address: IpAddr = address.parse().map_err(|_| Error::InvalidNodeAddress)?; - Ok(SocketAddr::new(ip_address, port)) +pub struct ServersSetChangeParams { + pub session_id: Option, + pub migration_id: Option, + pub new_nodes_set: BTreeSet, + pub old_set_signature: Signature, + pub new_set_signature: Signature, +} + +pub fn new_servers_set_change_session( + self_key_pair: Arc, + sessions: &ClusterSessions, + connections: Arc, + servers_set_change_creator_connector: Arc, + params: ServersSetChangeParams, +) -> Result, Error> { + let session_id = match params.session_id { + Some(session_id) if session_id == *SERVERS_SET_CHANGE_SESSION_ID => session_id, + Some(_) => return Err(Error::InvalidMessage), + None => *SERVERS_SET_CHANGE_SESSION_ID, + }; + + let cluster = create_cluster_view(self_key_pair.clone(), connections, true)?; + let creation_data = AdminSessionCreationData::ServersSetChange(params.migration_id, params.new_nodes_set.clone()); + let session = sessions.admin_sessions + .insert(cluster, *self_key_pair.public(), session_id, None, true, Some(creation_data))?; + let initialization_result = session.as_servers_set_change().expect("servers set change session is created; qed") + .initialize(params.new_nodes_set, params.old_set_signature, params.new_set_signature); + + if initialization_result.is_ok() { + servers_set_change_creator_connector.set_key_servers_set_change_session(session.clone()); + } + + process_initialization_result( + initialization_result, + session, &sessions.admin_sessions) +} + +fn process_initialization_result( + result: Result<(), Error>, + session: Arc, + sessions: &ClusterSessionsContainer +) -> Result, Error> + where + S: ClusterSession, + SC: ClusterSessionCreator +{ + match result { + Ok(()) if session.is_finished() => { + sessions.remove(&session.id()); + Ok(session) + }, + Ok(()) => Ok(session), + Err(error) => { + sessions.remove(&session.id()); + Err(error) + }, + } } #[cfg(test)] pub mod tests { use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; - use std::time::{Duration, Instant}; - use std::collections::{BTreeSet, VecDeque}; - use parking_lot::RwLock; - use tokio::{ - prelude::{future, Future}, - }; - use parity_runtime::{ - futures::sync::oneshot, - Runtime, Executor, - }; + use std::collections::{BTreeMap, BTreeSet, VecDeque}; + use parking_lot::{Mutex, RwLock}; use ethereum_types::{Address, H256}; use ethkey::{Random, Generator, Public, Signature, sign}; use key_server_cluster::{NodeId, SessionId, Requester, Error, DummyAclStorage, DummyKeyStorage, - MapKeyServerSet, PlainNodeKeyPair, KeyStorage}; + MapKeyServerSet, PlainNodeKeyPair, NodeKeyPair}; use key_server_cluster::message::Message; - use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration, ClusterClient, ClusterState}; - use key_server_cluster::cluster_sessions::{ClusterSession, AdminSession, ClusterSessionsListener}; - use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionState as GenerationSessionState}; + use key_server_cluster::cluster::{new_test_cluster, Cluster, ClusterCore, ClusterConfiguration, ClusterClient}; + use key_server_cluster::cluster_connections::ConnectionManager; + use key_server_cluster::cluster_connections::tests::{MessagesQueue, TestConnections}; + use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, AdminSession, ClusterSessionsListener}; + use key_server_cluster::generation_session::{SessionImpl as GenerationSession, + SessionState as GenerationSessionState}; use key_server_cluster::decryption_session::{SessionImpl as DecryptionSession}; use key_server_cluster::encryption_session::{SessionImpl as EncryptionSession}; use key_server_cluster::signing_session_ecdsa::{SessionImpl as EcdsaSigningSession}; @@ -1152,8 +577,6 @@ pub mod tests { use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession, IsolatedSessionTransport as KeyVersionNegotiationSessionTransport}; - const TIMEOUT: Duration = Duration::from_millis(1000); - #[derive(Default)] pub struct DummyClusterClient { pub generation_requests_count: AtomicUsize, @@ -1172,7 +595,6 @@ pub mod tests { } impl ClusterClient for DummyClusterClient { - fn cluster_state(&self) -> ClusterState { unimplemented!("test-only") } fn new_generation_session(&self, _session_id: SessionId, _origin: Option
, _author: Address, _threshold: usize) -> Result, Error> { self.generation_requests_count.fetch_add(1, Ordering::Relaxed); Err(Error::Internal("test-error".into())) @@ -1191,8 +613,8 @@ pub mod tests { fn make_faulty_generation_sessions(&self) { unimplemented!("test-only") } fn generation_session(&self, _session_id: &SessionId) -> Option> { unimplemented!("test-only") } - fn connect(&self) { unimplemented!("test-only") } - fn key_storage(&self) -> Arc { unimplemented!("test-only") } + fn is_fully_connected(&self) -> bool { true } + fn connect(&self) {} } impl DummyCluster { @@ -1258,366 +680,431 @@ pub mod tests { } } - /// Blocks the calling thread, looping until `predicate` returns `true` or - /// `timeout` has elapsed. - pub fn loop_until(executor: &Executor, timeout: Duration, predicate: F) - where F: Send + 'static + Fn() -> bool - { - use futures::Stream; - use tokio::timer::Interval; + /// Test message loop. + pub struct MessageLoop { + messages: MessagesQueue, + preserve_sessions: bool, + key_pairs_map: BTreeMap>, + acl_storages_map: BTreeMap>, + key_storages_map: BTreeMap>, + clusters_map: BTreeMap>>>, + } - let start = Instant::now(); - let (complete_tx, complete_rx) = oneshot::channel(); + impl ::std::fmt::Debug for MessageLoop { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + write!(f, "MessageLoop({})", self.clusters_map.len()) + } + } - executor.spawn(Interval::new_interval(Duration::from_millis(1)) - .and_then(move |_| { - if Instant::now() - start > timeout { - panic!("no result in {:?}", timeout); + impl MessageLoop { + /// Returns set of all nodes ids. + pub fn nodes(&self) -> BTreeSet { + self.clusters_map.keys().cloned().collect() + } + + /// Returns nodes id by its index. + pub fn node(&self, idx: usize) -> NodeId { + *self.clusters_map.keys().nth(idx).unwrap() + } + + /// Returns key pair of the node by its idx. + pub fn node_key_pair(&self, idx: usize) -> &Arc { + self.key_pairs_map.values().nth(idx).unwrap() + } + + /// Get cluster reference by its index. + pub fn cluster(&self, idx: usize) -> &Arc>> { + self.clusters_map.values().nth(idx).unwrap() + } + + /// Get keys storage reference by its index. + pub fn key_storage(&self, idx: usize) -> &Arc { + self.key_storages_map.values().nth(idx).unwrap() + } + + /// Get keys storage reference by node id. + pub fn key_storage_of(&self, node: &NodeId) -> &Arc { + &self.key_storages_map[node] + } + + /// Replace key storage of the node by its id. + pub fn replace_key_storage_of(&mut self, node: &NodeId, key_storage: Arc) { + *self.key_storages_map.get_mut(node).unwrap() = key_storage; + } + + /// Get ACL storage reference by its index. + pub fn acl_storage(&self, idx: usize) -> &Arc { + self.acl_storages_map.values().nth(idx).unwrap() + } + + /// Get sessions container reference by its index. + pub fn sessions(&self, idx: usize) -> &Arc { + &self.cluster(idx).data.sessions + } + + /// Get sessions container reference by node id. + pub fn sessions_of(&self, node: &NodeId) -> &Arc { + &self.clusters_map[node].data.sessions + } + + /// Isolate node from others. + pub fn isolate(&self, idx: usize) { + let node = self.node(idx); + for (i, cluster) in self.clusters_map.values().enumerate() { + if i == idx { + cluster.data.connections.isolate(); + } else { + cluster.data.connections.disconnect(node); } + } + } - Ok(()) - }) - .take_while(move |_| future::ok(!predicate())) - .for_each(|_| Ok(())) - .then(|_| { - complete_tx.send(()).expect("receiver dropped"); - future::ok::<(), ()>(()) - }) - ); + /// Exclude node from cluster. + pub fn exclude(&mut self, idx: usize) { + let node = self.node(idx); + for (i, cluster) in self.clusters_map.values().enumerate() { + if i != idx { + cluster.data.connections.exclude(node); + } + } + self.key_storages_map.remove(&node); + self.acl_storages_map.remove(&node); + self.key_pairs_map.remove(&node); + self.clusters_map.remove(&node); + } - complete_rx.wait().unwrap(); + /// Include new node to the cluster. + pub fn include(&mut self, node_key_pair: Arc) -> usize { + let key_storage = Arc::new(DummyKeyStorage::default()); + let acl_storage = Arc::new(DummyAclStorage::default()); + let cluster_params = ClusterConfiguration { + self_key_pair: node_key_pair.clone(), + key_server_set: Arc::new(MapKeyServerSet::new(false, self.nodes().iter() + .chain(::std::iter::once(node_key_pair.public())) + .map(|n| (*n, format!("127.0.0.1:{}", 13).parse().unwrap())) + .collect())), + key_storage: key_storage.clone(), + acl_storage: acl_storage.clone(), + admin_public: None, + preserve_sessions: self.preserve_sessions, + }; + let cluster = new_test_cluster(self.messages.clone(), cluster_params).unwrap(); + + for cluster in self.clusters_map.values(){ + cluster.data.connections.include(node_key_pair.public().clone()); + } + self.acl_storages_map.insert(*node_key_pair.public(), acl_storage); + self.key_storages_map.insert(*node_key_pair.public(), key_storage); + self.clusters_map.insert(*node_key_pair.public(), cluster); + self.key_pairs_map.insert(*node_key_pair.public(), node_key_pair.clone()); + self.clusters_map.keys().position(|k| k == node_key_pair.public()).unwrap() + } + + /// Is empty message queue? + pub fn is_empty(&self) -> bool { + self.messages.lock().is_empty() + } + + /// Takes next message from the queue. + pub fn take_message(&self) -> Option<(NodeId, NodeId, Message)> { + self.messages.lock().pop_front() + } + + /// Process single message. + pub fn process_message(&self, from: NodeId, to: NodeId, message: Message) { + let cluster_data = &self.clusters_map[&to].data; + let connection = cluster_data.connections.provider().connection(&from).unwrap(); + cluster_data.message_processor.process_connection_message(connection, message); + } + + /// Take next message and process it. + pub fn take_and_process_message(&self) -> bool { + let (from, to, message) = match self.take_message() { + Some((from, to, message)) => (from, to, message), + None => return false, + }; + + self.process_message(from, to, message); + true + } + + /// Loops until `predicate` returns `true` or there are no messages in the queue. + pub fn loop_until(&self, predicate: F) where F: Fn() -> bool { + while !predicate() { + if !self.take_and_process_message() { + panic!("message queue is empty but goal is not achieved"); + } + } + } } - pub fn all_connections_established(cluster: &Arc) -> bool { - cluster.config().key_server_set.snapshot().new_set.keys() - .filter(|p| *p != cluster.config().self_key_pair.public()) - .all(|p| cluster.connection(p).is_some()) + pub fn make_clusters(num_nodes: usize) -> MessageLoop { + do_make_clusters(num_nodes, false) } - pub fn make_clusters(runtime: &Runtime, ports_begin: u16, num_nodes: usize) -> Vec> { - let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); + pub fn make_clusters_and_preserve_sessions(num_nodes: usize) -> MessageLoop { + do_make_clusters(num_nodes, true) + } + + fn do_make_clusters(num_nodes: usize, preserve_sessions: bool) -> MessageLoop { + let ports_begin = 0; + let messages = Arc::new(Mutex::new(VecDeque::new())); + let key_pairs: Vec<_> = (0..num_nodes) + .map(|_| Arc::new(PlainNodeKeyPair::new(Random.generate().unwrap()))).collect(); + let key_storages: Vec<_> = (0..num_nodes).map(|_| Arc::new(DummyKeyStorage::default())).collect(); + let acl_storages: Vec<_> = (0..num_nodes).map(|_| Arc::new(DummyAclStorage::default())).collect(); let cluster_params: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration { - self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())), - listen_address: ("127.0.0.1".to_owned(), ports_begin + i as u16), + self_key_pair: key_pairs[i].clone(), key_server_set: Arc::new(MapKeyServerSet::new(false, key_pairs.iter().enumerate() - .map(|(j, kp)| (kp.public().clone(), format!("127.0.0.1:{}", ports_begin + j as u16).parse().unwrap())) + .map(|(j, kp)| (*kp.public(), format!("127.0.0.1:{}", ports_begin + j as u16).parse().unwrap())) .collect())), - allow_connecting_to_higher_nodes: false, - key_storage: Arc::new(DummyKeyStorage::default()), - acl_storage: Arc::new(DummyAclStorage::default()), + key_storage: key_storages[i].clone(), + acl_storage: acl_storages[i].clone(), admin_public: None, - auto_migrate_enabled: false, + preserve_sessions, }).collect(); - let clusters: Vec<_> = cluster_params.into_iter().enumerate() - .map(|(_, params)| ClusterCore::new(runtime.executor(), params).unwrap()) + let clusters: Vec<_> = cluster_params.into_iter() + .map(|params| new_test_cluster(messages.clone(), params).unwrap()) .collect(); - clusters - } - - pub fn run_clusters(clusters: &[Arc]) { - for cluster in clusters { - cluster.run_listener().unwrap(); - } - for cluster in clusters { - cluster.run_connections().unwrap(); - } - } - - pub fn shutdown_clusters(clusters: &[Arc]) { - for cluster in clusters { - cluster.shutdown() - } - } - - /// Returns a new runtime with a static number of threads. - pub fn new_runtime() -> Runtime { - Runtime::with_thread_count(4) - } - - #[test] - fn cluster_connects_to_other_nodes() { - let runtime = new_runtime(); - let clusters = make_clusters(&runtime, 6010, 3); - run_clusters(&clusters); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || clusters_clone.iter().all(all_connections_established)); - shutdown_clusters(&clusters); + let clusters_map = clusters.iter().map(|c| (*c.data.config.self_key_pair.public(), c.clone())).collect(); + let key_pairs_map = key_pairs.into_iter().map(|kp| (*kp.public(), kp)).collect(); + let key_storages_map = clusters.iter().zip(key_storages.into_iter()) + .map(|(c, ks)| (*c.data.config.self_key_pair.public(), ks)).collect(); + let acl_storages_map = clusters.iter().zip(acl_storages.into_iter()) + .map(|(c, acls)| (*c.data.config.self_key_pair.public(), acls)).collect(); + MessageLoop { preserve_sessions, messages, key_pairs_map, acl_storages_map, key_storages_map, clusters_map } } #[test] fn cluster_wont_start_generation_session_if_not_fully_connected() { - let runtime = new_runtime(); - let clusters = make_clusters(&runtime, 6013, 3); - clusters[0].run().unwrap(); - match clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1) { + let ml = make_clusters(3); + ml.cluster(0).data.connections.disconnect(*ml.cluster(0).data.self_key_pair.public()); + match ml.cluster(0).client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1) { Err(Error::NodeDisconnected) => (), Err(e) => panic!("unexpected error {:?}", e), _ => panic!("unexpected success"), } - shutdown_clusters(&clusters); } #[test] fn error_in_generation_session_broadcasted_to_all_other_nodes() { let _ = ::env_logger::try_init(); - let runtime = new_runtime(); - let clusters = make_clusters(&runtime, 6016, 3); - run_clusters(&clusters); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || clusters_clone.iter().all(all_connections_established)); + let ml = make_clusters(3); // ask one of nodes to produce faulty generation sessions - clusters[1].client().make_faulty_generation_sessions(); + ml.cluster(1).client().make_faulty_generation_sessions(); // start && wait for generation session to fail - let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); - let session_clone = session.clone(); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || session_clone.joint_public_and_secret().is_some() - && clusters_clone[0].client().generation_session(&SessionId::default()).is_none()); + let session = ml.cluster(0).client() + .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); + ml.loop_until(|| session.joint_public_and_secret().is_some() + && ml.cluster(0).client().generation_session(&SessionId::default()).is_none()); assert!(session.joint_public_and_secret().unwrap().is_err()); // check that faulty session is either removed from all nodes, or nonexistent (already removed) for i in 1..3 { - if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) { - let session_clone = session.clone(); - let clusters_clone = clusters.clone(); + if let Some(session) = ml.cluster(i).client().generation_session(&SessionId::default()) { // wait for both session completion && session removal (session completion event is fired // before session is removed from its own container by cluster) - loop_until(&runtime.executor(), TIMEOUT, move || session_clone.joint_public_and_secret().is_some() - && clusters_clone[i].client().generation_session(&SessionId::default()).is_none()); + ml.loop_until(|| session.joint_public_and_secret().is_some() + && ml.cluster(i).client().generation_session(&SessionId::default()).is_none()); assert!(session.joint_public_and_secret().unwrap().is_err()); } } - shutdown_clusters(&clusters); } #[test] fn generation_session_completion_signalled_if_failed_on_master() { let _ = ::env_logger::try_init(); - let runtime = new_runtime(); - let clusters = make_clusters(&runtime, 6025, 3); - run_clusters(&clusters); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || clusters_clone.iter().all(all_connections_established)); + let ml = make_clusters(3); // ask one of nodes to produce faulty generation sessions - clusters[0].client().make_faulty_generation_sessions(); + ml.cluster(0).client().make_faulty_generation_sessions(); // start && wait for generation session to fail - let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); - let session_clone = session.clone(); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || session_clone.joint_public_and_secret().is_some() - && clusters_clone[0].client().generation_session(&SessionId::default()).is_none()); + let session = ml.cluster(0).client() + .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); + ml.loop_until(|| session.joint_public_and_secret().is_some() + && ml.cluster(0).client().generation_session(&SessionId::default()).is_none()); assert!(session.joint_public_and_secret().unwrap().is_err()); // check that faulty session is either removed from all nodes, or nonexistent (already removed) for i in 1..3 { - if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) { - let session_clone = session.clone(); - let clusters_clone = clusters.clone(); + if let Some(session) = ml.cluster(i).client().generation_session(&SessionId::default()) { + let session = session.clone(); // wait for both session completion && session removal (session completion event is fired // before session is removed from its own container by cluster) - loop_until(&runtime.executor(), TIMEOUT, move || session_clone.joint_public_and_secret().is_some() - && clusters_clone[i].client().generation_session(&SessionId::default()).is_none()); + ml.loop_until(|| session.joint_public_and_secret().is_some() + && ml.cluster(i).client().generation_session(&SessionId::default()).is_none()); assert!(session.joint_public_and_secret().unwrap().is_err()); } } - shutdown_clusters(&clusters); } #[test] fn generation_session_is_removed_when_succeeded() { let _ = ::env_logger::try_init(); - let runtime = new_runtime(); - let clusters = make_clusters(&runtime, 6019, 3); - run_clusters(&clusters); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || clusters_clone.iter().all(all_connections_established)); + let ml = make_clusters(3); // start && wait for generation session to complete - let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); - let session_clone = session.clone(); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished - || session_clone.state() == GenerationSessionState::Failed) - && clusters_clone[0].client().generation_session(&SessionId::default()).is_none()); + let session = ml.cluster(0).client() + .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); + ml.loop_until(|| (session.state() == GenerationSessionState::Finished + || session.state() == GenerationSessionState::Failed) + && ml.cluster(0).client().generation_session(&SessionId::default()).is_none()); assert!(session.joint_public_and_secret().unwrap().is_ok()); // check that on non-master nodes session is either: // already removed // or it is removed right after completion for i in 1..3 { - if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) { + if let Some(session) = ml.cluster(i).client().generation_session(&SessionId::default()) { // run to completion if completion message is still on the way // AND check that it is actually removed from cluster sessions - let session_clone = session.clone(); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished - || session_clone.state() == GenerationSessionState::Failed) - && clusters_clone[i].client().generation_session(&SessionId::default()).is_none()); + ml.loop_until(|| (session.state() == GenerationSessionState::Finished + || session.state() == GenerationSessionState::Failed) + && ml.cluster(i).client().generation_session(&SessionId::default()).is_none()); } } - shutdown_clusters(&clusters); } #[test] fn sessions_are_removed_when_initialization_fails() { - let runtime = new_runtime(); - let clusters = make_clusters(&runtime, 6022, 3); - run_clusters(&clusters); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || clusters_clone.iter().all(all_connections_established)); + let ml = make_clusters(3); + let client = ml.cluster(0).client(); // generation session { // try to start generation session => fail in initialization - assert_eq!(clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 100).map(|_| ()), + assert_eq!( + client.new_generation_session(SessionId::default(), None, Default::default(), 100).map(|_| ()), Err(Error::NotEnoughNodesForThreshold)); // try to start generation session => fails in initialization - assert_eq!(clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 100).map(|_| ()), + assert_eq!( + client.new_generation_session(SessionId::default(), None, Default::default(), 100).map(|_| ()), Err(Error::NotEnoughNodesForThreshold)); - assert!(clusters[0].data.sessions.generation_sessions.is_empty()); + assert!(ml.cluster(0).data.sessions.generation_sessions.is_empty()); } // decryption session { // try to start decryption session => fails in initialization - assert_eq!(clusters[0].client().new_decryption_session(Default::default(), Default::default(), Default::default(), Some(Default::default()), false, false).map(|_| ()), + assert_eq!( + client.new_decryption_session( + Default::default(), Default::default(), Default::default(), Some(Default::default()), false, false + ).map(|_| ()), Err(Error::InvalidMessage)); // try to start generation session => fails in initialization - assert_eq!(clusters[0].client().new_decryption_session(Default::default(), Default::default(), Default::default(), Some(Default::default()), false, false).map(|_| ()), + assert_eq!( + client.new_decryption_session( + Default::default(), Default::default(), Default::default(), Some(Default::default()), false, false + ).map(|_| ()), Err(Error::InvalidMessage)); - assert!(clusters[0].data.sessions.decryption_sessions.is_empty()); - assert!(clusters[0].data.sessions.negotiation_sessions.is_empty()); + assert!(ml.cluster(0).data.sessions.decryption_sessions.is_empty()); + assert!(ml.cluster(0).data.sessions.negotiation_sessions.is_empty()); } - shutdown_clusters(&clusters); } - // test ignored because of - // - // https://github.com/paritytech/parity-ethereum/issues/9635 #[test] - #[ignore] fn schnorr_signing_session_completes_if_node_does_not_have_a_share() { let _ = ::env_logger::try_init(); - let runtime = new_runtime(); - let clusters = make_clusters(&runtime, 6028, 3); - run_clusters(&clusters); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || clusters_clone.iter().all(all_connections_established)); + let ml = make_clusters(3); // start && wait for generation session to complete - let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); - let session_clone = session.clone(); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished - || session_clone.state() == GenerationSessionState::Failed) - && clusters_clone[0].client().generation_session(&SessionId::default()).is_none()); + let session = ml.cluster(0).client(). + new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); + ml.loop_until(|| (session.state() == GenerationSessionState::Finished + || session.state() == GenerationSessionState::Failed) + && ml.cluster(0).client().generation_session(&SessionId::default()).is_none()); assert!(session.joint_public_and_secret().unwrap().is_ok()); // now remove share from node2 - assert!((0..3).all(|i| clusters[i].data.sessions.generation_sessions.is_empty())); - clusters[2].data.config.key_storage.remove(&Default::default()).unwrap(); + assert!((0..3).all(|i| ml.cluster(i).data.sessions.generation_sessions.is_empty())); + ml.cluster(2).data.config.key_storage.remove(&Default::default()).unwrap(); // and try to sign message with generated key let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session0 = clusters[0].client().new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap(); - let session = clusters[0].data.sessions.schnorr_signing_sessions.first().unwrap(); + let session0 = ml.cluster(0).client() + .new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap(); + let session = ml.cluster(0).data.sessions.schnorr_signing_sessions.first().unwrap(); - let session_clone = session.clone(); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || session_clone.is_finished() && (0..3).all(|i| - clusters_clone[i].data.sessions.schnorr_signing_sessions.is_empty())); + ml.loop_until(|| session.is_finished() && (0..3).all(|i| + ml.cluster(i).data.sessions.schnorr_signing_sessions.is_empty())); session0.wait().unwrap(); // and try to sign message with generated key using node that has no key share let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session2 = clusters[2].client().new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap(); - let session = clusters[2].data.sessions.schnorr_signing_sessions.first().unwrap(); + let session2 = ml.cluster(2).client() + .new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap(); + let session = ml.cluster(2).data.sessions.schnorr_signing_sessions.first().unwrap(); - let session_clone = session.clone(); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || session_clone.is_finished() && (0..3).all(|i| - clusters_clone[i].data.sessions.schnorr_signing_sessions.is_empty())); + ml.loop_until(|| session.is_finished() && (0..3).all(|i| + ml.cluster(i).data.sessions.schnorr_signing_sessions.is_empty())); session2.wait().unwrap(); // now remove share from node1 - clusters[1].data.config.key_storage.remove(&Default::default()).unwrap(); + ml.cluster(1).data.config.key_storage.remove(&Default::default()).unwrap(); // and try to sign message with generated key let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session1 = clusters[0].client().new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap(); - let session = clusters[0].data.sessions.schnorr_signing_sessions.first().unwrap(); + let session1 = ml.cluster(0).client() + .new_schnorr_signing_session(Default::default(), signature.into(), None, Default::default()).unwrap(); + let session = ml.cluster(0).data.sessions.schnorr_signing_sessions.first().unwrap(); - let session = session.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || session.is_finished()); + ml.loop_until(|| session.is_finished()); session1.wait().unwrap_err(); - shutdown_clusters(&clusters); } - // test ignored because of - // - // https://github.com/paritytech/parity-ethereum/issues/9635 #[test] - #[ignore] fn ecdsa_signing_session_completes_if_node_does_not_have_a_share() { let _ = ::env_logger::try_init(); - let runtime = new_runtime(); - let clusters = make_clusters(&runtime, 6041, 4); - run_clusters(&clusters); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || clusters_clone.iter().all(all_connections_established)); + let ml = make_clusters(4); // start && wait for generation session to complete - let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); - let session_clone = session.clone(); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), TIMEOUT, move || (session_clone.state() == GenerationSessionState::Finished - || session_clone.state() == GenerationSessionState::Failed) - && clusters_clone[0].client().generation_session(&SessionId::default()).is_none()); + let session = ml.cluster(0).client() + .new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap(); + ml.loop_until(|| (session.state() == GenerationSessionState::Finished + || session.state() == GenerationSessionState::Failed) + && ml.cluster(0).client().generation_session(&SessionId::default()).is_none()); assert!(session.joint_public_and_secret().unwrap().is_ok()); // now remove share from node2 - assert!((0..3).all(|i| clusters[i].data.sessions.generation_sessions.is_empty())); - clusters[2].data.config.key_storage.remove(&Default::default()).unwrap(); + assert!((0..3).all(|i| ml.cluster(i).data.sessions.generation_sessions.is_empty())); + ml.cluster(2).data.config.key_storage.remove(&Default::default()).unwrap(); // and try to sign message with generated key let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session0 = clusters[0].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap(); - let session = clusters[0].data.sessions.ecdsa_signing_sessions.first().unwrap(); + let session0 = ml.cluster(0).client() + .new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap(); + let session = ml.cluster(0).data.sessions.ecdsa_signing_sessions.first().unwrap(); - let session_clone = session.clone(); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), Duration::from_millis(1000), move || session_clone.is_finished() && (0..3).all(|i| - clusters_clone[i].data.sessions.ecdsa_signing_sessions.is_empty())); + ml.loop_until(|| session.is_finished() && (0..3).all(|i| + ml.cluster(i).data.sessions.ecdsa_signing_sessions.is_empty())); session0.wait().unwrap(); // and try to sign message with generated key using node that has no key share let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session2 = clusters[2].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap(); - let session = clusters[2].data.sessions.ecdsa_signing_sessions.first().unwrap(); - let session_clone = session.clone(); - let clusters_clone = clusters.clone(); - loop_until(&runtime.executor(), Duration::from_millis(1000), move || session_clone.is_finished() && (0..3).all(|i| - clusters_clone[i].data.sessions.ecdsa_signing_sessions.is_empty())); + let session2 = ml.cluster(2).client() + .new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap(); + let session = ml.cluster(2).data.sessions.ecdsa_signing_sessions.first().unwrap(); + ml.loop_until(|| session.is_finished() && (0..3).all(|i| + ml.cluster(i).data.sessions.ecdsa_signing_sessions.is_empty())); session2.wait().unwrap(); // now remove share from node1 - clusters[1].data.config.key_storage.remove(&Default::default()).unwrap(); + ml.cluster(1).data.config.key_storage.remove(&Default::default()).unwrap(); // and try to sign message with generated key let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session1 = clusters[0].client().new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap(); - let session = clusters[0].data.sessions.ecdsa_signing_sessions.first().unwrap(); - loop_until(&runtime.executor(), Duration::from_millis(1000), move || session.is_finished()); + let session1 = ml.cluster(0).client() + .new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()).unwrap(); + let session = ml.cluster(0).data.sessions.ecdsa_signing_sessions.first().unwrap(); + ml.loop_until(|| session.is_finished()); session1.wait().unwrap_err(); - shutdown_clusters(&clusters); } } diff --git a/secret-store/src/key_server_cluster/cluster_connections.rs b/secret-store/src/key_server_cluster/cluster_connections.rs new file mode 100644 index 000000000..b484e6d8e --- /dev/null +++ b/secret-store/src/key_server_cluster/cluster_connections.rs @@ -0,0 +1,176 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::BTreeSet; +use std::sync::Arc; +use key_server_cluster::{Error, NodeId}; +use key_server_cluster::message::Message; + +/// Connection to the single node. Provides basic information about connected node and +/// allows sending messages to this node. +pub trait Connection: Send + Sync { + /// Is this inbound connection? This only matters when both nodes are simultaneously establishing + /// two connections to each other. The agreement is that the inbound connection from the node with + /// lower NodeId is used and the other connection is closed. + fn is_inbound(&self) -> bool; + /// Returns id of the connected node. + fn node_id(&self) -> &NodeId; + /// Returns 'address' of the node to use in traces. + fn node_address(&self) -> String; + /// Send message to the connected node. + fn send_message(&self, message: Message); +} + +/// Connections manager. Responsible for keeping us connected to all required nodes. +pub trait ConnectionManager: 'static + Send + Sync { + /// Returns shared reference to connections provider. + fn provider(&self) -> Arc; + /// Try to reach all disconnected nodes immediately. This method is exposed mostly for + /// tests, where all 'nodes' are starting listening for incoming connections first and + /// only after this, they're actually start connecting to each other. + fn connect(&self); +} + +/// Connections provider. Holds all active connections and the set of nodes that we need to +/// connect to. At any moment connection could be lost and the set of connected/disconnected +/// nodes could change (at behalf of the connection manager). +/// Clone operation should be cheap (Arc). +pub trait ConnectionProvider: Send + Sync { + /// Returns the set of currently connected nodes. Error is returned when our node is + /// not a part of the cluster ('isolated' node). + fn connected_nodes(&self) -> Result, Error>; + /// Returns the set of currently disconnected nodes. + fn disconnected_nodes(&self) -> BTreeSet; + /// Returns the reference to the active node connection or None if the node is not connected. + fn connection(&self, node: &NodeId) -> Option>; +} + +#[cfg(test)] +pub mod tests { + use std::collections::{BTreeSet, VecDeque}; + use std::sync::Arc; + use std::sync::atomic::{AtomicBool, Ordering}; + use parking_lot::Mutex; + use key_server_cluster::{Error, NodeId}; + use key_server_cluster::message::Message; + use super::{ConnectionManager, Connection, ConnectionProvider}; + + /// Shared messages queue. + pub type MessagesQueue = Arc>>; + + /// Single node connections. + pub struct TestConnections { + node: NodeId, + is_isolated: AtomicBool, + connected_nodes: Mutex>, + disconnected_nodes: Mutex>, + messages: MessagesQueue, + } + + /// Single connection. + pub struct TestConnection { + from: NodeId, + to: NodeId, + messages: MessagesQueue, + } + + impl TestConnections { + pub fn isolate(&self) { + let connected_nodes = ::std::mem::replace(&mut *self.connected_nodes.lock(), Default::default()); + self.is_isolated.store(true, Ordering::Relaxed); + self.disconnected_nodes.lock().extend(connected_nodes) + } + + pub fn disconnect(&self, node: NodeId) { + self.connected_nodes.lock().remove(&node); + self.disconnected_nodes.lock().insert(node); + } + + pub fn exclude(&self, node: NodeId) { + self.connected_nodes.lock().remove(&node); + self.disconnected_nodes.lock().remove(&node); + } + + pub fn include(&self, node: NodeId) { + self.connected_nodes.lock().insert(node); + } + } + + impl ConnectionManager for Arc { + fn provider(&self) -> Arc { + self.clone() + } + + fn connect(&self) {} + } + + impl ConnectionProvider for TestConnections { + fn connected_nodes(&self) -> Result, Error> { + match self.is_isolated.load(Ordering::Relaxed) { + false => Ok(self.connected_nodes.lock().clone()), + true => Err(Error::NodeDisconnected), + } + } + + fn disconnected_nodes(&self) -> BTreeSet { + self.disconnected_nodes.lock().clone() + } + + fn connection(&self, node: &NodeId) -> Option> { + match self.connected_nodes.lock().contains(node) { + true => Some(Arc::new(TestConnection { + from: self.node, + to: *node, + messages: self.messages.clone(), + })), + false => None, + } + } + } + + impl Connection for TestConnection { + fn is_inbound(&self) -> bool { + false + } + + fn node_id(&self) -> &NodeId { + &self.to + } + + fn node_address(&self) -> String { + format!("{}", self.to) + } + + fn send_message(&self, message: Message) { + self.messages.lock().push_back((self.from, self.to, message)) + } + } + + pub fn new_test_connections( + messages: MessagesQueue, + node: NodeId, + mut nodes: BTreeSet + ) -> Arc { + let is_isolated = !nodes.remove(&node); + Arc::new(TestConnections { + node, + is_isolated: AtomicBool::new(is_isolated), + connected_nodes: Mutex::new(nodes), + disconnected_nodes: Default::default(), + messages, + }) + } +} diff --git a/secret-store/src/key_server_cluster/cluster_connections_net.rs b/secret-store/src/key_server_cluster/cluster_connections_net.rs new file mode 100644 index 000000000..bda7f7dd2 --- /dev/null +++ b/secret-store/src/key_server_cluster/cluster_connections_net.rs @@ -0,0 +1,539 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::{BTreeMap, BTreeSet}; +use std::collections::btree_map::Entry; +use std::io; +use std::net::{SocketAddr, IpAddr}; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use futures::{future, Future, Stream}; +use parking_lot::{Mutex, RwLock}; +use tokio::net::{TcpListener, TcpStream}; +use tokio::timer::{Interval, timeout::Error as TimeoutError}; +use tokio_io::IoFuture; +use ethkey::KeyPair; +use parity_runtime::Executor; +use key_server_cluster::{Error, NodeId, ClusterConfiguration, NodeKeyPair}; +use key_server_cluster::cluster_connections::{ConnectionProvider, Connection, ConnectionManager}; +use key_server_cluster::connection_trigger::{Maintain, ConnectionTrigger}; +use key_server_cluster::cluster_message_processor::MessageProcessor; +use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, + read_encrypted_message, WriteMessage, write_encrypted_message}; +use key_server_cluster::message::{self, ClusterMessage, Message}; +use key_server_cluster::net::{accept_connection as io_accept_connection, + connect as io_connect, Connection as IoConnection}; + +/// Empty future. +pub type BoxedEmptyFuture = Box + Send>; + +/// Maintain interval (seconds). Every MAINTAIN_INTERVAL seconds node: +/// 1) checks if connected nodes are responding to KeepAlive messages +/// 2) tries to connect to disconnected nodes +/// 3) checks if enc/dec sessions are time-outed +const MAINTAIN_INTERVAL: u64 = 10; + +/// When no messages have been received from node within KEEP_ALIVE_SEND_INTERVAL seconds, +/// we must send KeepAlive message to the node to check if it still responds to messages. +const KEEP_ALIVE_SEND_INTERVAL: Duration = Duration::from_secs(30); +/// When no messages have been received from node within KEEP_ALIVE_DISCONNECT_INTERVAL seconds, +/// we must treat this node as non-responding && disconnect from it. +const KEEP_ALIVE_DISCONNECT_INTERVAL: Duration = Duration::from_secs(60); + +/// Network connection manager configuration. +pub struct NetConnectionsManagerConfig { + /// Allow connecting to 'higher' nodes. + pub allow_connecting_to_higher_nodes: bool, + /// Interface to listen to. + pub listen_address: (String, u16), + /// True if we should autostart key servers set change session when servers set changes? + /// This will only work when servers set is configured using KeyServerSet contract. + pub auto_migrate_enabled: bool, +} + +/// Network connections manager. +pub struct NetConnectionsManager { + /// Address we're listening for incoming connections. + listen_address: SocketAddr, + /// Shared cluster connections data reference. + data: Arc, +} + +/// Network connections data. Shared among NetConnectionsManager and spawned futures. +struct NetConnectionsData { + /// Allow connecting to 'higher' nodes. + allow_connecting_to_higher_nodes: bool, + /// Reference to tokio task executor. + executor: Executor, + /// Key pair of this node. + self_key_pair: Arc, + /// Network messages processor. + message_processor: Arc, + /// Connections trigger. + trigger: Mutex>, + /// Mutable connection data. + container: Arc>, +} + +/// Network connections container. This is the only mutable data of NetConnectionsManager. +/// The set of nodes is mutated by the connection trigger and the connections set is also +/// mutated by spawned futures. +pub struct NetConnectionsContainer { + /// Is this node isolated from cluster? + pub is_isolated: bool, + /// Current key servers set. + pub nodes: BTreeMap, + /// Active connections to key servers. + pub connections: BTreeMap>, +} + +/// Network connection to single key server node. +pub struct NetConnection { + executor: Executor, + /// Id of the peer node. + node_id: NodeId, + /// Address of the peer node. + node_address: SocketAddr, + /// Is this inbound (true) or outbound (false) connection? + is_inbound: bool, + /// Key pair that is used to encrypt connection' messages. + key: KeyPair, + /// Last message time. + last_message_time: RwLock, + /// Underlying TCP stream. + stream: SharedTcpStream, +} + +impl NetConnectionsManager { + /// Create new network connections manager. + pub fn new( + executor: Executor, + message_processor: Arc, + trigger: Box, + container: Arc>, + config: &ClusterConfiguration, + net_config: NetConnectionsManagerConfig, + ) -> Result { + let listen_address = make_socket_address( + &net_config.listen_address.0, + net_config.listen_address.1)?; + + Ok(NetConnectionsManager { + listen_address, + data: Arc::new(NetConnectionsData { + allow_connecting_to_higher_nodes: net_config.allow_connecting_to_higher_nodes, + executor, + message_processor, + self_key_pair: config.self_key_pair.clone(), + trigger: Mutex::new(trigger), + container, + }), + }) + } + + /// Start listening for connections and schedule connections maintenance. + pub fn start(&self) -> Result<(), Error> { + net_listen(&self.listen_address, self.data.clone())?; + net_schedule_maintain(self.data.clone()); + Ok(()) + } +} + +impl ConnectionManager for NetConnectionsManager { + fn provider(&self) -> Arc { + self.data.container.clone() + } + + fn connect(&self) { + net_connect_disconnected(self.data.clone()); + } +} + +impl ConnectionProvider for RwLock { + fn connected_nodes(&self) -> Result, Error> { + let connections = self.read(); + if connections.is_isolated { + return Err(Error::NodeDisconnected); + } + + Ok(connections.connections.keys().cloned().collect()) + } + + fn disconnected_nodes(&self) -> BTreeSet { + let connections = self.read(); + connections.nodes.keys() + .filter(|node_id| !connections.connections.contains_key(node_id)) + .cloned() + .collect() + } + + fn connection(&self, node: &NodeId) -> Option> { + match self.read().connections.get(node).cloned() { + Some(connection) => Some(connection), + None => None, + } + } +} + +impl NetConnection { + /// Create new connection. + pub fn new(executor: Executor, is_inbound: bool, connection: IoConnection) -> NetConnection { + NetConnection { + executor, + node_id: connection.node_id, + node_address: connection.address, + is_inbound: is_inbound, + stream: connection.stream, + key: connection.key, + last_message_time: RwLock::new(Instant::now()), + } + } + + /// Get last message time. + pub fn last_message_time(&self) -> Instant { + *self.last_message_time.read() + } + + /// Update last message time + pub fn set_last_message_time(&self, last_message_time: Instant) { + *self.last_message_time.write() = last_message_time + } + + /// Returns future that sends encrypted message over this connection. + pub fn send_message_future(&self, message: Message) -> WriteMessage { + write_encrypted_message(self.stream.clone(), &self.key, message) + } + + /// Returns future that reads encrypted message from this connection. + pub fn read_message_future(&self) -> ReadMessage { + read_encrypted_message(self.stream.clone(), self.key.clone()) + } +} + +impl Connection for NetConnection { + fn is_inbound(&self) -> bool { + self.is_inbound + } + + fn node_id(&self) -> &NodeId { + &self.node_id + } + + fn node_address(&self) -> String { + format!("{}", self.node_address) + } + + fn send_message(&self, message: Message) { + execute(&self.executor, self.send_message_future(message).then(|_| Ok(()))); + } +} + +impl NetConnectionsData { + /// Executes closure for each active connection. + pub fn active_connections(&self) -> Vec> { + self.container.read().connections.values().cloned().collect() + } + + /// Executes closure for each disconnected node. + pub fn disconnected_nodes(&self) -> Vec<(NodeId, SocketAddr)> { + let container = self.container.read(); + container.nodes.iter() + .filter(|(node_id, _)| !container.connections.contains_key(node_id)) + .map(|(node_id, addr)| (*node_id, *addr)) + .collect() + } + + /// Try to insert new connection. Returns true if connection has been inserted. + /// Returns false (and ignores connections) if: + /// - we do not expect connection from this node + /// - we are already connected to the node and existing connection 'supersede' + /// new connection by agreement + pub fn insert(&self, connection: Arc) -> bool { + let node = *connection.node_id(); + let mut container = self.container.write(); + if !container.nodes.contains_key(&node) { + trace!(target: "secretstore_net", "{}: ignoring unknown connection from {} at {}", + self.self_key_pair.public(), node, connection.node_address()); + return false; + } + + if container.connections.contains_key(&node) { + // we have already connected to the same node + // the agreement is that node with lower id must establish connection to node with higher id + if (*self.self_key_pair.public() < node && connection.is_inbound()) + || (*self.self_key_pair.public() > node && !connection.is_inbound()) { + return false; + } + } + + trace!(target: "secretstore_net", + "{}: inserting connection to {} at {}. Connected to {} of {} nodes", + self.self_key_pair.public(), node, connection.node_address(), + container.connections.len() + 1, container.nodes.len()); + container.connections.insert(node, connection); + + true + } + + /// Tries to remove connection. Returns true if connection has been removed. + /// Returns false if we do not know this connection. + pub fn remove(&self, connection: &NetConnection) -> bool { + let node_id = *connection.node_id(); + let is_inbound = connection.is_inbound(); + let mut container = self.container.write(); + if let Entry::Occupied(entry) = container.connections.entry(node_id) { + if entry.get().is_inbound() != is_inbound { + return false; + } + + trace!(target: "secretstore_net", "{}: removing connection to {} at {}", + self.self_key_pair.public(), node_id, entry.get().node_address()); + entry.remove_entry(); + + true + } else { + false + } + } +} + +/// Listen incoming connections. +fn net_listen( + listen_address: &SocketAddr, + data: Arc, +) -> Result<(), Error> { + execute(&data.executor, net_listen_future(listen_address, data.clone())?); + Ok(()) +} + +/// Listen incoming connections future. +fn net_listen_future( + listen_address: &SocketAddr, + data: Arc, +) -> Result { + Ok(Box::new(TcpListener::bind(listen_address)? + .incoming() + .and_then(move |stream| { + net_accept_connection(data.clone(), stream); + Ok(()) + }) + .for_each(|_| Ok(())) + .then(|_| future::ok(())))) +} + +/// Accept incoming connection. +fn net_accept_connection( + data: Arc, + stream: TcpStream, +) { + execute(&data.executor, net_accept_connection_future(data.clone(), stream)); +} + +/// Accept incoming connection future. +fn net_accept_connection_future(data: Arc, stream: TcpStream) -> BoxedEmptyFuture { + Box::new(io_accept_connection(stream, data.self_key_pair.clone()) + .then(move |result| net_process_connection_result(data, None, result)) + .then(|_| future::ok(()))) +} + +/// Connect to remote node. +fn net_connect( + data: Arc, + remote: SocketAddr, +) { + execute(&data.executor, net_connect_future(data.clone(), remote)); +} + +/// Connect to remote node future. +fn net_connect_future( + data: Arc, + remote: SocketAddr, +) -> BoxedEmptyFuture { + let disconnected_nodes = data.container.disconnected_nodes(); + Box::new(io_connect(&remote, data.self_key_pair.clone(), disconnected_nodes) + .then(move |result| net_process_connection_result(data, Some(remote), result)) + .then(|_| future::ok(()))) +} + +/// Process network connection result. +fn net_process_connection_result( + data: Arc, + outbound_addr: Option, + result: Result>, TimeoutError>, +) -> IoFuture> { + match result { + Ok(DeadlineStatus::Meet(Ok(connection))) => { + let connection = Arc::new(NetConnection::new(data.executor.clone(), outbound_addr.is_none(), connection)); + if data.insert(connection.clone()) { + let maintain_action = data.trigger.lock().on_connection_established(connection.node_id()); + maintain_connection_trigger(data.clone(), maintain_action); + + return net_process_connection_messages(data, connection); + } + }, + Ok(DeadlineStatus::Meet(Err(err))) => { + warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}", + data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, + outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); + }, + Ok(DeadlineStatus::Timeout) => { + warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}", + data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" }, + outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); + }, + Err(err) => { + warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}", + data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, + outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); + }, + } + + Box::new(future::ok(Ok(()))) +} + +/// Process connection messages. +fn net_process_connection_messages( + data: Arc, + connection: Arc, +) -> IoFuture> { + Box::new(connection + .read_message_future() + .then(move |result| + match result { + Ok((_, Ok(message))) => { + connection.set_last_message_time(Instant::now()); + data.message_processor.process_connection_message(connection.clone(), message); + // continue serving connection + let process_messages_future = net_process_connection_messages( + data.clone(), connection).then(|_| Ok(())); + execute(&data.executor, process_messages_future); + Box::new(future::ok(Ok(()))) + }, + Ok((_, Err(err))) => { + warn!(target: "secretstore_net", "{}: protocol error '{}' when reading message from node {}", + data.self_key_pair.public(), err, connection.node_id()); + // continue serving connection + let process_messages_future = net_process_connection_messages( + data.clone(), connection).then(|_| Ok(())); + execute(&data.executor, process_messages_future); + Box::new(future::ok(Err(err))) + }, + Err(err) => { + let node_id = *connection.node_id(); + warn!(target: "secretstore_net", "{}: network error '{}' when reading message from node {}", + data.self_key_pair.public(), err, node_id); + // close connection + if data.remove(&*connection) { + let maintain_action = data.trigger.lock().on_connection_closed(&node_id); + maintain_connection_trigger(data, maintain_action); + } + Box::new(future::err(err)) + }, + } + )) +} + +/// Schedule connections. maintain. +fn net_schedule_maintain(data: Arc) { + let closure_data = data.clone(); + execute(&data.executor, Interval::new_interval(Duration::new(MAINTAIN_INTERVAL, 0)) + .and_then(move |_| Ok(net_maintain(closure_data.clone()))) + .for_each(|_| Ok(())) + .then(|_| future::ok(()))); +} + +/// Maintain network connections. +fn net_maintain(data: Arc) { + trace!(target: "secretstore_net", "{}: executing maintain procedures", data.self_key_pair.public()); + + update_nodes_set(data.clone()); + data.message_processor.maintain_sessions(); + net_keep_alive(data.clone()); + net_connect_disconnected(data); +} + +/// Send keep alive messages to remote nodes. +fn net_keep_alive(data: Arc) { + let now = Instant::now(); + let active_connections = data.active_connections(); + for connection in active_connections { + let last_message_diff = now - connection.last_message_time(); + if last_message_diff > KEEP_ALIVE_DISCONNECT_INTERVAL { + warn!(target: "secretstore_net", "{}: keep alive timeout for node {}", + data.self_key_pair.public(), connection.node_id()); + + let node_id = *connection.node_id(); + if data.remove(&*connection) { + let maintain_action = data.trigger.lock().on_connection_closed(&node_id); + maintain_connection_trigger(data.clone(), maintain_action); + } + data.message_processor.process_disconnect(&node_id); + } + else if last_message_diff > KEEP_ALIVE_SEND_INTERVAL { + connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {}))); + } + } +} + +/// Connect disconnected nodes. +fn net_connect_disconnected(data: Arc) { + let disconnected_nodes = data.disconnected_nodes(); + for (node_id, address) in disconnected_nodes { + if data.allow_connecting_to_higher_nodes || *data.self_key_pair.public() < node_id { + net_connect(data.clone(), address); + } + } +} + +/// Schedule future execution. +fn execute + Send + 'static>(executor: &Executor, f: F) { + if let Err(err) = future::Executor::execute(executor, Box::new(f)) { + error!("Secret store runtime unable to spawn task. Runtime is shutting down. ({:?})", err); + } +} + +/// Try to update active nodes set from connection trigger. +fn update_nodes_set(data: Arc) { + let maintain_action = data.trigger.lock().on_maintain(); + maintain_connection_trigger(data, maintain_action); +} + +/// Execute maintain procedures of connections trigger. +fn maintain_connection_trigger(data: Arc, maintain_action: Option) { + if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Session) { + let session_params = data.trigger.lock().maintain_session(); + if let Some(session_params) = session_params { + let session = data.message_processor.start_servers_set_change_session(session_params); + match session { + Ok(_) => trace!(target: "secretstore_net", "{}: started auto-migrate session", + data.self_key_pair.public()), + Err(err) => trace!(target: "secretstore_net", "{}: failed to start auto-migrate session with: {}", + data.self_key_pair.public(), err), + } + } + } + if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Connections) { + let mut trigger = data.trigger.lock(); + let mut data = data.container.write(); + trigger.maintain_connections(&mut *data); + } +} + +/// Compose SocketAddr from configuration' address and port. +fn make_socket_address(address: &str, port: u16) -> Result { + let ip_address: IpAddr = address.parse().map_err(|_| Error::InvalidNodeAddress)?; + Ok(SocketAddr::new(ip_address, port)) +} diff --git a/secret-store/src/key_server_cluster/cluster_message_processor.rs b/secret-store/src/key_server_cluster/cluster_message_processor.rs new file mode 100644 index 000000000..b4ba5ef03 --- /dev/null +++ b/secret-store/src/key_server_cluster/cluster_message_processor.rs @@ -0,0 +1,357 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use key_server_cluster::{Error, NodeId, NodeKeyPair}; +use key_server_cluster::cluster::{ServersSetChangeParams, new_servers_set_change_session}; +use key_server_cluster::cluster_sessions::{AdminSession}; +use key_server_cluster::cluster_connections::{ConnectionProvider, Connection}; +use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, ClusterSessionsContainer, + create_cluster_view}; +use key_server_cluster::cluster_sessions_creator::{ClusterSessionCreator, IntoSessionId}; +use key_server_cluster::message::{self, Message, ClusterMessage}; +use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession, + IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction}; +use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector; + +/// Something that is able to process signals/messages from other nodes. +pub trait MessageProcessor: Send + Sync { + /// Process disconnect from the remote node. + fn process_disconnect(&self, node: &NodeId); + /// Process single message from the connection. + fn process_connection_message(&self, connection: Arc, message: Message); + + /// Start servers set change session. This is typically used by ConnectionManager when + /// it detects that auto-migration session needs to be started. + fn start_servers_set_change_session(&self, params: ServersSetChangeParams) -> Result, Error>; + /// Try to continue session after key version negotiation session is completed. + fn try_continue_session( + &self, + session: Option>> + ); + /// Maintain active sessions. Typically called by the ConnectionManager at some intervals. + /// Should cancel stalled sessions and send keep-alive messages for sessions that support it. + fn maintain_sessions(&self); +} + +/// Bridge between ConnectionManager and ClusterSessions. +pub struct SessionsMessageProcessor { + self_key_pair: Arc, + servers_set_change_creator_connector: Arc, + sessions: Arc, + connections: Arc, +} + +impl SessionsMessageProcessor { + /// Create new instance of SessionsMessageProcessor. + pub fn new( + self_key_pair: Arc, + servers_set_change_creator_connector: Arc, + sessions: Arc, + connections: Arc, + ) -> Self { + SessionsMessageProcessor { + self_key_pair, + servers_set_change_creator_connector, + sessions, + connections, + } + } + + /// Process single session message from connection. + fn process_message, D>( + &self, + sessions: &ClusterSessionsContainer, + connection: Arc, + mut message: Message, + ) -> Option> + where + Message: IntoSessionId + { + // get or create new session, if required + let mut sender = *connection.node_id(); + let session = self.prepare_session(sessions, &sender, &message); + // send error if session is not found, or failed to create + let session = match session { + Ok(session) => session, + Err(error) => { + // this is new session => it is not yet in container + warn!(target: "secretstore_net", + "{}: {} session read error '{}' when requested for session from node {}", + self.self_key_pair.public(), S::type_name(), error, sender); + if !message.is_error_message() { + let qed = "session_id only fails for cluster messages; + only session messages are passed to process_message; + qed"; + let session_id = message.into_session_id().expect(qed); + let session_nonce = message.session_nonce().expect(qed); + + connection.send_message(SC::make_error_message(session_id, session_nonce, error)); + } + return None; + }, + }; + + let session_id = session.id(); + let mut is_queued_message = false; + loop { + let message_result = session.on_message(&sender, &message); + match message_result { + Ok(_) => { + // if session is completed => stop + if session.is_finished() { + info!(target: "secretstore_net", + "{}: {} session completed", self.self_key_pair.public(), S::type_name()); + sessions.remove(&session_id); + return Some(session); + } + + // try to dequeue message + match sessions.dequeue_message(&session_id) { + Some((msg_sender, msg)) => { + is_queued_message = true; + sender = msg_sender; + message = msg; + }, + None => return Some(session), + } + }, + Err(Error::TooEarlyForRequest) => { + sessions.enqueue_message(&session_id, sender, message, is_queued_message); + return Some(session); + }, + Err(err) => { + warn!( + target: "secretstore_net", + "{}: {} session error '{}' when processing message {} from node {}", + self.self_key_pair.public(), + S::type_name(), + err, + message, + sender); + session.on_session_error(self.self_key_pair.public(), err); + sessions.remove(&session_id); + return Some(session); + }, + } + } + } + + /// Get or insert new session. + fn prepare_session, D>( + &self, + sessions: &ClusterSessionsContainer, + sender: &NodeId, + message: &Message + ) -> Result, Error> + where + Message: IntoSessionId + { + fn requires_all_connections(message: &Message) -> bool { + match *message { + Message::Generation(_) => true, + Message::ShareAdd(_) => true, + Message::ServersSetChange(_) => true, + _ => false, + } + } + + // get or create new session, if required + let session_id = message.into_session_id() + .expect("into_session_id fails for cluster messages only; + only session messages are passed to prepare_session; + qed"); + let is_initialization_message = message.is_initialization_message(); + let is_delegation_message = message.is_delegation_message(); + match is_initialization_message || is_delegation_message { + false => sessions.get(&session_id, true).ok_or(Error::NoActiveSessionWithId), + true => { + let creation_data = SC::creation_data_from_message(&message)?; + let master = if is_initialization_message { + *sender + } else { + *self.self_key_pair.public() + }; + let cluster = create_cluster_view( + self.self_key_pair.clone(), + self.connections.clone(), + requires_all_connections(&message))?; + + let nonce = Some(message.session_nonce().ok_or(Error::InvalidMessage)?); + let exclusive = message.is_exclusive_session_message(); + sessions.insert(cluster, master, session_id, nonce, exclusive, creation_data) + }, + } + } + + /// Process single cluster message from the connection. + fn process_cluster_message(&self, connection: Arc, message: ClusterMessage) { + match message { + ClusterMessage::KeepAlive(_) => { + let msg = Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse { + session_id: None, + })); + connection.send_message(msg) + }, + ClusterMessage::KeepAliveResponse(msg) => if let Some(session_id) = msg.session_id { + self.sessions.on_session_keep_alive(connection.node_id(), session_id.into()); + }, + _ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}", + self.self_key_pair.public(), message, connection.node_id(), connection.node_address()), + } + } +} + +impl MessageProcessor for SessionsMessageProcessor { + fn process_disconnect(&self, node: &NodeId) { + self.sessions.on_connection_timeout(node); + } + + fn process_connection_message(&self, connection: Arc, message: Message) { + trace!(target: "secretstore_net", "{}: received message {} from {}", + self.self_key_pair.public(), message, connection.node_id()); + + // error is ignored as we only process errors on session level + match message { + Message::Generation(message) => self + .process_message(&self.sessions.generation_sessions, connection, Message::Generation(message)) + .map(|_| ()).unwrap_or_default(), + Message::Encryption(message) => self + .process_message(&self.sessions.encryption_sessions, connection, Message::Encryption(message)) + .map(|_| ()).unwrap_or_default(), + Message::Decryption(message) => self + .process_message(&self.sessions.decryption_sessions, connection, Message::Decryption(message)) + .map(|_| ()).unwrap_or_default(), + Message::SchnorrSigning(message) => self + .process_message(&self.sessions.schnorr_signing_sessions, connection, Message::SchnorrSigning(message)) + .map(|_| ()).unwrap_or_default(), + Message::EcdsaSigning(message) => self + .process_message(&self.sessions.ecdsa_signing_sessions, connection, Message::EcdsaSigning(message)) + .map(|_| ()).unwrap_or_default(), + Message::ServersSetChange(message) => { + let message = Message::ServersSetChange(message); + let is_initialization_message = message.is_initialization_message(); + let session = self.process_message(&self.sessions.admin_sessions, connection, message); + if is_initialization_message { + if let Some(session) = session { + self.servers_set_change_creator_connector + .set_key_servers_set_change_session(session.clone()); + } + } + }, + Message::KeyVersionNegotiation(message) => { + let session = self.process_message( + &self.sessions.negotiation_sessions, connection, Message::KeyVersionNegotiation(message)); + self.try_continue_session(session); + }, + Message::ShareAdd(message) => self.process_message( + &self.sessions.admin_sessions, connection, Message::ShareAdd(message)) + .map(|_| ()).unwrap_or_default(), + Message::Cluster(message) => self.process_cluster_message(connection, message), + } + } + + fn try_continue_session( + &self, + session: Option>> + ) { + if let Some(session) = session { + let meta = session.meta(); + let is_master_node = meta.self_node_id == meta.master_node_id; + if is_master_node && session.is_finished() { + self.sessions.negotiation_sessions.remove(&session.id()); + match session.wait() { + Ok(Some((version, master))) => match session.take_continue_action() { + Some(ContinueAction::Decrypt( + session, origin, is_shadow_decryption, is_broadcast_decryption + )) => { + let initialization_error = if self.self_key_pair.public() == &master { + session.initialize( + origin, version, is_shadow_decryption, is_broadcast_decryption) + } else { + session.delegate( + master, origin, version, is_shadow_decryption, is_broadcast_decryption) + }; + + if let Err(error) = initialization_error { + session.on_session_error(&meta.self_node_id, error); + self.sessions.decryption_sessions.remove(&session.id()); + } + }, + Some(ContinueAction::SchnorrSign(session, message_hash)) => { + let initialization_error = if self.self_key_pair.public() == &master { + session.initialize(version, message_hash) + } else { + session.delegate(master, version, message_hash) + }; + + if let Err(error) = initialization_error { + session.on_session_error(&meta.self_node_id, error); + self.sessions.schnorr_signing_sessions.remove(&session.id()); + } + }, + Some(ContinueAction::EcdsaSign(session, message_hash)) => { + let initialization_error = if self.self_key_pair.public() == &master { + session.initialize(version, message_hash) + } else { + session.delegate(master, version, message_hash) + }; + + if let Err(error) = initialization_error { + session.on_session_error(&meta.self_node_id, error); + self.sessions.ecdsa_signing_sessions.remove(&session.id()); + } + }, + None => (), + }, + Ok(None) => unreachable!("is_master_node; session is finished; + negotiation version always finished with result on master; + qed"), + Err(error) => match session.take_continue_action() { + Some(ContinueAction::Decrypt(session, _, _, _)) => { + session.on_session_error(&meta.self_node_id, error); + self.sessions.decryption_sessions.remove(&session.id()); + }, + Some(ContinueAction::SchnorrSign(session, _)) => { + session.on_session_error(&meta.self_node_id, error); + self.sessions.schnorr_signing_sessions.remove(&session.id()); + }, + Some(ContinueAction::EcdsaSign(session, _)) => { + session.on_session_error(&meta.self_node_id, error); + self.sessions.ecdsa_signing_sessions.remove(&session.id()); + }, + None => (), + }, + } + } + } + } + + fn maintain_sessions(&self) { + self.sessions.stop_stalled_sessions(); + self.sessions.sessions_keep_alive(); + } + + fn start_servers_set_change_session(&self, params: ServersSetChangeParams) -> Result, Error> { + new_servers_set_change_session( + self.self_key_pair.clone(), + &*self.sessions, + self.connections.clone(), + self.servers_set_change_creator_connector.clone(), + params, + ) + } +} diff --git a/secret-store/src/key_server_cluster/cluster_sessions.rs b/secret-store/src/key_server_cluster/cluster_sessions.rs index cce4b18c3..53eec1334 100644 --- a/secret-store/src/key_server_cluster/cluster_sessions.rs +++ b/secret-store/src/key_server_cluster/cluster_sessions.rs @@ -21,8 +21,9 @@ use std::collections::{VecDeque, BTreeMap, BTreeSet}; use parking_lot::{Mutex, RwLock, Condvar}; use ethereum_types::H256; use ethkey::Secret; -use key_server_cluster::{Error, NodeId, SessionId, Requester}; -use key_server_cluster::cluster::{Cluster, ClusterData, ClusterConfiguration, ClusterView}; +use key_server_cluster::{Error, NodeId, SessionId, Requester, NodeKeyPair}; +use key_server_cluster::cluster::{Cluster, ClusterConfiguration, ClusterView}; +use key_server_cluster::cluster_connections::ConnectionProvider; use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector; use key_server_cluster::message::{self, Message}; use key_server_cluster::generation_session::{SessionImpl as GenerationSessionImpl}; @@ -158,6 +159,8 @@ pub struct ClusterSessionsContainer>>>, /// Sessions container state. container_state: Arc>, + /// Do not actually remove sessions. + preserve_sessions: bool, /// Phantom data. _pd: ::std::marker::PhantomData, } @@ -229,6 +232,17 @@ impl ClusterSessions { self.generation_sessions.creator.make_faulty_generation_sessions(); } + #[cfg(test)] + pub fn preserve_sessions(&mut self) { + self.generation_sessions.preserve_sessions = true; + self.encryption_sessions.preserve_sessions = true; + self.decryption_sessions.preserve_sessions = true; + self.schnorr_signing_sessions.preserve_sessions = true; + self.ecdsa_signing_sessions.preserve_sessions = true; + self.negotiation_sessions.preserve_sessions = true; + self.admin_sessions.preserve_sessions = true; + } + /// Send session-level keep-alive messages. pub fn sessions_keep_alive(&self) { self.admin_sessions.send_keep_alive(&*SERVERS_SET_CHANGE_SESSION_ID, &self.self_node_id); @@ -272,6 +286,7 @@ impl ClusterSessionsContainer where S: ClusterSession, SC: C sessions: RwLock::new(BTreeMap::new()), listeners: Mutex::new(Vec::new()), container_state: container_state, + preserve_sessions: false, _pd: Default::default(), } } @@ -379,9 +394,11 @@ impl ClusterSessionsContainer where S: ClusterSession, SC: C } fn do_remove(&self, session_id: &S::Id, sessions: &mut BTreeMap>) { - if let Some(session) = sessions.remove(session_id) { - self.container_state.lock().on_session_completed(); - self.notify_listeners(|l| l.on_session_removed(session.session.clone())); + if !self.preserve_sessions { + if let Some(session) = sessions.remove(session_id) { + self.container_state.lock().on_session_completed(); + self.notify_listeners(|l| l.on_session_removed(session.session.clone())); + } } } @@ -551,19 +568,22 @@ impl ClusterSession for AdminSession { } } } -pub fn create_cluster_view(data: &Arc, requires_all_connections: bool) -> Result, Error> { - let disconnected_nodes_count = data.connections.disconnected_nodes().len(); + +pub fn create_cluster_view(self_key_pair: Arc, connections: Arc, requires_all_connections: bool) -> Result, Error> { + let mut connected_nodes = connections.connected_nodes()?; + let disconnected_nodes = connections.disconnected_nodes(); + + let disconnected_nodes_count = disconnected_nodes.len(); if requires_all_connections { if disconnected_nodes_count != 0 { return Err(Error::NodeDisconnected); } } - let mut connected_nodes = data.connections.connected_nodes()?; - connected_nodes.insert(data.self_key_pair.public().clone()); + connected_nodes.insert(self_key_pair.public().clone()); let connected_nodes_count = connected_nodes.len(); - Ok(Arc::new(ClusterView::new(data.clone(), connected_nodes, connected_nodes_count + disconnected_nodes_count))) + Ok(Arc::new(ClusterView::new(self_key_pair, connections, connected_nodes, connected_nodes_count + disconnected_nodes_count))) } #[cfg(test)] @@ -583,13 +603,11 @@ mod tests { let key_pair = Random.generate().unwrap(); let config = ClusterConfiguration { self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pair.clone())), - listen_address: ("127.0.0.1".to_owned(), 100_u16), key_server_set: Arc::new(MapKeyServerSet::new(false, vec![(key_pair.public().clone(), format!("127.0.0.1:{}", 100).parse().unwrap())].into_iter().collect())), - allow_connecting_to_higher_nodes: false, key_storage: Arc::new(DummyKeyStorage::default()), acl_storage: Arc::new(DummyAclStorage::default()), admin_public: Some(Random.generate().unwrap().public().clone()), - auto_migrate_enabled: false, + preserve_sessions: false, }; ClusterSessions::new(&config, Arc::new(SimpleServersSetChangeSessionCreatorConnector { admin_public: Some(Random.generate().unwrap().public().clone()), diff --git a/secret-store/src/key_server_cluster/connection_trigger.rs b/secret-store/src/key_server_cluster/connection_trigger.rs index 3ea1a0a30..7b3649861 100644 --- a/secret-store/src/key_server_cluster/connection_trigger.rs +++ b/secret-store/src/key_server_cluster/connection_trigger.rs @@ -21,10 +21,12 @@ use std::sync::Arc; use ethereum_types::H256; use ethkey::Public; use key_server_cluster::{KeyServerSet, KeyServerSetSnapshot}; -use key_server_cluster::cluster::{ClusterClient, ClusterConnectionsData}; +use key_server_cluster::cluster::{ClusterConfiguration, ServersSetChangeParams}; use key_server_cluster::cluster_sessions::AdminSession; +use key_server_cluster::cluster_connections::{Connection}; +use key_server_cluster::cluster_connections_net::{NetConnectionsContainer}; use types::{Error, NodeId}; -use {NodeKeyPair}; +use NodeKeyPair; #[derive(Debug, Clone, Copy, PartialEq)] /// Describes which maintain() call is required. @@ -45,10 +47,10 @@ pub trait ConnectionTrigger: Send + Sync { fn on_connection_established(&mut self, node: &NodeId) -> Option; /// When connection is closed. fn on_connection_closed(&mut self, node: &NodeId) -> Option; - /// Maintain active sessions. - fn maintain_session(&mut self, sessions: &ClusterClient); + /// Maintain active sessions. Returns Some if servers set session creation required. + fn maintain_session(&mut self) -> Option; /// Maintain active connections. - fn maintain_connections(&mut self, connections: &mut ClusterConnectionsData); + fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer); /// Return connector for the servers set change session creator. fn servers_set_change_creator_connector(&self) -> Arc; } @@ -95,6 +97,11 @@ pub struct TriggerConnections { } impl SimpleConnectionTrigger { + /// Create new simple from cluster configuration. + pub fn with_config(config: &ClusterConfiguration) -> Self { + Self::new(config.key_server_set.clone(), config.self_key_pair.clone(), config.admin_public) + } + /// Create new simple connection trigger. pub fn new(key_server_set: Arc, self_key_pair: Arc, admin_public: Option) -> Self { SimpleConnectionTrigger { @@ -124,10 +131,11 @@ impl ConnectionTrigger for SimpleConnectionTrigger { None } - fn maintain_session(&mut self, _sessions: &ClusterClient) { + fn maintain_session(&mut self) -> Option { + None } - fn maintain_connections(&mut self, connections: &mut ClusterConnectionsData) { + fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer) { self.connections.maintain(ConnectionsAction::ConnectToCurrentSet, connections, &self.key_server_set.snapshot()) } @@ -146,7 +154,7 @@ impl ServersSetChangeSessionCreatorConnector for SimpleServersSetChangeSessionCr } impl TriggerConnections { - pub fn maintain(&self, action: ConnectionsAction, data: &mut ClusterConnectionsData, server_set: &KeyServerSetSnapshot) { + pub fn maintain(&self, action: ConnectionsAction, data: &mut NetConnectionsContainer, server_set: &KeyServerSetSnapshot) { match action { ConnectionsAction::ConnectToCurrentSet => { adjust_connections(self.self_key_pair.public(), data, &server_set.current_set); @@ -159,7 +167,11 @@ impl TriggerConnections { } } -fn adjust_connections(self_node_id: &NodeId, data: &mut ClusterConnectionsData, required_set: &BTreeMap) { +fn adjust_connections( + self_node_id: &NodeId, + data: &mut NetConnectionsContainer, + required_set: &BTreeMap +) { if !required_set.contains_key(self_node_id) { if !data.is_isolated { trace!(target: "secretstore_net", "{}: isolated from cluser", self_node_id); @@ -204,13 +216,13 @@ mod tests { use std::collections::BTreeSet; use std::sync::Arc; use ethkey::{Random, Generator}; - use key_server_cluster::cluster::ClusterConnectionsData; use key_server_cluster::{MapKeyServerSet, PlainNodeKeyPair, KeyServerSetSnapshot, KeyServerSetMigration}; + use key_server_cluster::cluster_connections_net::NetConnectionsContainer; use super::{Maintain, TriggerConnections, ConnectionsAction, ConnectionTrigger, SimpleConnectionTrigger, select_nodes_to_disconnect, adjust_connections}; - fn default_connection_data() -> ClusterConnectionsData { - ClusterConnectionsData { + fn default_connection_data() -> NetConnectionsContainer { + NetConnectionsContainer { is_isolated: false, nodes: Default::default(), connections: Default::default(), diff --git a/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs b/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs index 9607949c5..559bab18c 100644 --- a/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs +++ b/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs @@ -21,7 +21,8 @@ use ethereum_types::H256; use ethkey::Public; use parking_lot::Mutex; use key_server_cluster::{KeyServerSet, KeyServerSetSnapshot, KeyServerSetMigration, is_migration_required}; -use key_server_cluster::cluster::{ClusterClient, ClusterConnectionsData}; +use key_server_cluster::cluster::{ClusterConfiguration, ServersSetChangeParams}; +use key_server_cluster::cluster_connections_net::NetConnectionsContainer; use key_server_cluster::cluster_sessions::{AdminSession, ClusterSession}; use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash; use key_server_cluster::connection_trigger::{Maintain, ConnectionsAction, ConnectionTrigger, @@ -110,6 +111,11 @@ struct TriggerSession { } impl ConnectionTriggerWithMigration { + /// Create new simple from cluster configuration. + pub fn with_config(config: &ClusterConfiguration) -> Self { + Self::new(config.key_server_set.clone(), config.self_key_pair.clone()) + } + /// Create new trigge with migration. pub fn new(key_server_set: Arc, self_key_pair: Arc) -> Self { let snapshot = key_server_set.snapshot(); @@ -187,13 +193,11 @@ impl ConnectionTrigger for ConnectionTriggerWithMigration { self.do_maintain() } - fn maintain_session(&mut self, sessions: &ClusterClient) { - if let Some(action) = self.session_action { - self.session.maintain(action, sessions, &self.snapshot); - } + fn maintain_session(&mut self) -> Option { + self.session_action.and_then(|action| self.session.maintain(action, &self.snapshot)) } - fn maintain_connections(&mut self, connections: &mut ClusterConnectionsData) { + fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer) { if let Some(action) = self.connections_action { self.connections.maintain(action, connections, &self.snapshot); } @@ -255,30 +259,42 @@ impl TriggerSession { } /// Maintain session. - pub fn maintain(&mut self, action: SessionAction, sessions: &ClusterClient, server_set: &KeyServerSetSnapshot) { - if action == SessionAction::Start { // all other actions are processed in maintain - let migration = server_set.migration.as_ref() - .expect("action is Start only when migration is started (see maintain_session); qed"); + pub fn maintain( + &mut self, + action: SessionAction, + server_set: &KeyServerSetSnapshot + ) -> Option { + if action != SessionAction::Start { // all other actions are processed in maintain + return None; + } + let migration = server_set.migration.as_ref() + .expect("action is Start only when migration is started (see maintain_session); qed"); - // we assume that authorities that are removed from the servers set are either offline, or malicious - // => they're not involved in ServersSetChangeSession - // => both sets are the same - let old_set: BTreeSet<_> = migration.set.keys().cloned().collect(); - let new_set = old_set.clone(); + // we assume that authorities that are removed from the servers set are either offline, or malicious + // => they're not involved in ServersSetChangeSession + // => both sets are the same + let old_set: BTreeSet<_> = migration.set.keys().cloned().collect(); + let new_set = old_set.clone(); - let signatures = self.self_key_pair.sign(&ordered_nodes_hash(&old_set)) - .and_then(|old_set_signature| self.self_key_pair.sign(&ordered_nodes_hash(&new_set)) - .map(|new_set_signature| (old_set_signature, new_set_signature))) - .map_err(Into::into); - let session = signatures.and_then(|(old_set_signature, new_set_signature)| - sessions.new_servers_set_change_session(None, Some(migration.id.clone()), new_set, old_set_signature, new_set_signature)); + let signatures = self.self_key_pair.sign(&ordered_nodes_hash(&old_set)) + .and_then(|old_set_signature| self.self_key_pair.sign(&ordered_nodes_hash(&new_set)) + .map(|new_set_signature| (old_set_signature, new_set_signature))); - match session { - Ok(_) => trace!(target: "secretstore_net", "{}: started auto-migrate session", - self.self_key_pair.public()), - Err(err) => trace!(target: "secretstore_net", "{}: failed to start auto-migrate session with: {}", - self.self_key_pair.public(), err), - } + match signatures { + Ok((old_set_signature, new_set_signature)) => Some(ServersSetChangeParams { + session_id: None, + migration_id: Some(migration.id), + new_nodes_set: new_set, + old_set_signature, + new_set_signature, + }), + Err(err) => { + trace!( + target: "secretstore_net", + "{}: failed to sign servers set for auto-migrate session with: {}", + self.self_key_pair.public(), err); + None + }, } } } diff --git a/secret-store/src/key_server_cluster/mod.rs b/secret-store/src/key_server_cluster/mod.rs index 3db29ba7e..fc46e1031 100644 --- a/secret-store/src/key_server_cluster/mod.rs +++ b/secret-store/src/key_server_cluster/mod.rs @@ -23,7 +23,8 @@ pub use super::key_storage::{KeyStorage, DocumentKeyShare, DocumentKeyShareVersi pub use super::key_server_set::{is_migration_required, KeyServerSet, KeyServerSetSnapshot, KeyServerSetMigration}; pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic, SerializableRequester, SerializableMessageHash, SerializableAddress}; -pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient}; +pub use self::cluster::{new_network_cluster, ClusterCore, ClusterConfiguration, ClusterClient}; +pub use self::cluster_connections_net::NetConnectionsManagerConfig; pub use self::cluster_sessions::{ClusterSession, ClusterSessionsListener}; #[cfg(test)] pub use self::cluster::tests::DummyClusterClient; @@ -70,6 +71,9 @@ pub use self::client_sessions::signing_session_ecdsa; pub use self::client_sessions::signing_session_schnorr; mod cluster; +mod cluster_connections; +mod cluster_connections_net; +mod cluster_message_processor; mod cluster_sessions; mod cluster_sessions_creator; mod connection_trigger; diff --git a/secret-store/src/node_key_pair.rs b/secret-store/src/node_key_pair.rs index e7227d754..f50f75ad1 100644 --- a/secret-store/src/node_key_pair.rs +++ b/secret-store/src/node_key_pair.rs @@ -29,6 +29,11 @@ impl PlainNodeKeyPair { key_pair: key_pair, } } + + #[cfg(test)] + pub fn key_pair(&self) -> &KeyPair { + &self.key_pair + } } impl NodeKeyPair for PlainNodeKeyPair { diff --git a/secret_store/src/key_server_cluster/cluster_connections.rs b/secret_store/src/key_server_cluster/cluster_connections.rs new file mode 100644 index 000000000..b484e6d8e --- /dev/null +++ b/secret_store/src/key_server_cluster/cluster_connections.rs @@ -0,0 +1,176 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::BTreeSet; +use std::sync::Arc; +use key_server_cluster::{Error, NodeId}; +use key_server_cluster::message::Message; + +/// Connection to the single node. Provides basic information about connected node and +/// allows sending messages to this node. +pub trait Connection: Send + Sync { + /// Is this inbound connection? This only matters when both nodes are simultaneously establishing + /// two connections to each other. The agreement is that the inbound connection from the node with + /// lower NodeId is used and the other connection is closed. + fn is_inbound(&self) -> bool; + /// Returns id of the connected node. + fn node_id(&self) -> &NodeId; + /// Returns 'address' of the node to use in traces. + fn node_address(&self) -> String; + /// Send message to the connected node. + fn send_message(&self, message: Message); +} + +/// Connections manager. Responsible for keeping us connected to all required nodes. +pub trait ConnectionManager: 'static + Send + Sync { + /// Returns shared reference to connections provider. + fn provider(&self) -> Arc; + /// Try to reach all disconnected nodes immediately. This method is exposed mostly for + /// tests, where all 'nodes' are starting listening for incoming connections first and + /// only after this, they're actually start connecting to each other. + fn connect(&self); +} + +/// Connections provider. Holds all active connections and the set of nodes that we need to +/// connect to. At any moment connection could be lost and the set of connected/disconnected +/// nodes could change (at behalf of the connection manager). +/// Clone operation should be cheap (Arc). +pub trait ConnectionProvider: Send + Sync { + /// Returns the set of currently connected nodes. Error is returned when our node is + /// not a part of the cluster ('isolated' node). + fn connected_nodes(&self) -> Result, Error>; + /// Returns the set of currently disconnected nodes. + fn disconnected_nodes(&self) -> BTreeSet; + /// Returns the reference to the active node connection or None if the node is not connected. + fn connection(&self, node: &NodeId) -> Option>; +} + +#[cfg(test)] +pub mod tests { + use std::collections::{BTreeSet, VecDeque}; + use std::sync::Arc; + use std::sync::atomic::{AtomicBool, Ordering}; + use parking_lot::Mutex; + use key_server_cluster::{Error, NodeId}; + use key_server_cluster::message::Message; + use super::{ConnectionManager, Connection, ConnectionProvider}; + + /// Shared messages queue. + pub type MessagesQueue = Arc>>; + + /// Single node connections. + pub struct TestConnections { + node: NodeId, + is_isolated: AtomicBool, + connected_nodes: Mutex>, + disconnected_nodes: Mutex>, + messages: MessagesQueue, + } + + /// Single connection. + pub struct TestConnection { + from: NodeId, + to: NodeId, + messages: MessagesQueue, + } + + impl TestConnections { + pub fn isolate(&self) { + let connected_nodes = ::std::mem::replace(&mut *self.connected_nodes.lock(), Default::default()); + self.is_isolated.store(true, Ordering::Relaxed); + self.disconnected_nodes.lock().extend(connected_nodes) + } + + pub fn disconnect(&self, node: NodeId) { + self.connected_nodes.lock().remove(&node); + self.disconnected_nodes.lock().insert(node); + } + + pub fn exclude(&self, node: NodeId) { + self.connected_nodes.lock().remove(&node); + self.disconnected_nodes.lock().remove(&node); + } + + pub fn include(&self, node: NodeId) { + self.connected_nodes.lock().insert(node); + } + } + + impl ConnectionManager for Arc { + fn provider(&self) -> Arc { + self.clone() + } + + fn connect(&self) {} + } + + impl ConnectionProvider for TestConnections { + fn connected_nodes(&self) -> Result, Error> { + match self.is_isolated.load(Ordering::Relaxed) { + false => Ok(self.connected_nodes.lock().clone()), + true => Err(Error::NodeDisconnected), + } + } + + fn disconnected_nodes(&self) -> BTreeSet { + self.disconnected_nodes.lock().clone() + } + + fn connection(&self, node: &NodeId) -> Option> { + match self.connected_nodes.lock().contains(node) { + true => Some(Arc::new(TestConnection { + from: self.node, + to: *node, + messages: self.messages.clone(), + })), + false => None, + } + } + } + + impl Connection for TestConnection { + fn is_inbound(&self) -> bool { + false + } + + fn node_id(&self) -> &NodeId { + &self.to + } + + fn node_address(&self) -> String { + format!("{}", self.to) + } + + fn send_message(&self, message: Message) { + self.messages.lock().push_back((self.from, self.to, message)) + } + } + + pub fn new_test_connections( + messages: MessagesQueue, + node: NodeId, + mut nodes: BTreeSet + ) -> Arc { + let is_isolated = !nodes.remove(&node); + Arc::new(TestConnections { + node, + is_isolated: AtomicBool::new(is_isolated), + connected_nodes: Mutex::new(nodes), + disconnected_nodes: Default::default(), + messages, + }) + } +} diff --git a/secret_store/src/key_server_cluster/cluster_connections_net.rs b/secret_store/src/key_server_cluster/cluster_connections_net.rs new file mode 100644 index 000000000..bda7f7dd2 --- /dev/null +++ b/secret_store/src/key_server_cluster/cluster_connections_net.rs @@ -0,0 +1,539 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::{BTreeMap, BTreeSet}; +use std::collections::btree_map::Entry; +use std::io; +use std::net::{SocketAddr, IpAddr}; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use futures::{future, Future, Stream}; +use parking_lot::{Mutex, RwLock}; +use tokio::net::{TcpListener, TcpStream}; +use tokio::timer::{Interval, timeout::Error as TimeoutError}; +use tokio_io::IoFuture; +use ethkey::KeyPair; +use parity_runtime::Executor; +use key_server_cluster::{Error, NodeId, ClusterConfiguration, NodeKeyPair}; +use key_server_cluster::cluster_connections::{ConnectionProvider, Connection, ConnectionManager}; +use key_server_cluster::connection_trigger::{Maintain, ConnectionTrigger}; +use key_server_cluster::cluster_message_processor::MessageProcessor; +use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, + read_encrypted_message, WriteMessage, write_encrypted_message}; +use key_server_cluster::message::{self, ClusterMessage, Message}; +use key_server_cluster::net::{accept_connection as io_accept_connection, + connect as io_connect, Connection as IoConnection}; + +/// Empty future. +pub type BoxedEmptyFuture = Box + Send>; + +/// Maintain interval (seconds). Every MAINTAIN_INTERVAL seconds node: +/// 1) checks if connected nodes are responding to KeepAlive messages +/// 2) tries to connect to disconnected nodes +/// 3) checks if enc/dec sessions are time-outed +const MAINTAIN_INTERVAL: u64 = 10; + +/// When no messages have been received from node within KEEP_ALIVE_SEND_INTERVAL seconds, +/// we must send KeepAlive message to the node to check if it still responds to messages. +const KEEP_ALIVE_SEND_INTERVAL: Duration = Duration::from_secs(30); +/// When no messages have been received from node within KEEP_ALIVE_DISCONNECT_INTERVAL seconds, +/// we must treat this node as non-responding && disconnect from it. +const KEEP_ALIVE_DISCONNECT_INTERVAL: Duration = Duration::from_secs(60); + +/// Network connection manager configuration. +pub struct NetConnectionsManagerConfig { + /// Allow connecting to 'higher' nodes. + pub allow_connecting_to_higher_nodes: bool, + /// Interface to listen to. + pub listen_address: (String, u16), + /// True if we should autostart key servers set change session when servers set changes? + /// This will only work when servers set is configured using KeyServerSet contract. + pub auto_migrate_enabled: bool, +} + +/// Network connections manager. +pub struct NetConnectionsManager { + /// Address we're listening for incoming connections. + listen_address: SocketAddr, + /// Shared cluster connections data reference. + data: Arc, +} + +/// Network connections data. Shared among NetConnectionsManager and spawned futures. +struct NetConnectionsData { + /// Allow connecting to 'higher' nodes. + allow_connecting_to_higher_nodes: bool, + /// Reference to tokio task executor. + executor: Executor, + /// Key pair of this node. + self_key_pair: Arc, + /// Network messages processor. + message_processor: Arc, + /// Connections trigger. + trigger: Mutex>, + /// Mutable connection data. + container: Arc>, +} + +/// Network connections container. This is the only mutable data of NetConnectionsManager. +/// The set of nodes is mutated by the connection trigger and the connections set is also +/// mutated by spawned futures. +pub struct NetConnectionsContainer { + /// Is this node isolated from cluster? + pub is_isolated: bool, + /// Current key servers set. + pub nodes: BTreeMap, + /// Active connections to key servers. + pub connections: BTreeMap>, +} + +/// Network connection to single key server node. +pub struct NetConnection { + executor: Executor, + /// Id of the peer node. + node_id: NodeId, + /// Address of the peer node. + node_address: SocketAddr, + /// Is this inbound (true) or outbound (false) connection? + is_inbound: bool, + /// Key pair that is used to encrypt connection' messages. + key: KeyPair, + /// Last message time. + last_message_time: RwLock, + /// Underlying TCP stream. + stream: SharedTcpStream, +} + +impl NetConnectionsManager { + /// Create new network connections manager. + pub fn new( + executor: Executor, + message_processor: Arc, + trigger: Box, + container: Arc>, + config: &ClusterConfiguration, + net_config: NetConnectionsManagerConfig, + ) -> Result { + let listen_address = make_socket_address( + &net_config.listen_address.0, + net_config.listen_address.1)?; + + Ok(NetConnectionsManager { + listen_address, + data: Arc::new(NetConnectionsData { + allow_connecting_to_higher_nodes: net_config.allow_connecting_to_higher_nodes, + executor, + message_processor, + self_key_pair: config.self_key_pair.clone(), + trigger: Mutex::new(trigger), + container, + }), + }) + } + + /// Start listening for connections and schedule connections maintenance. + pub fn start(&self) -> Result<(), Error> { + net_listen(&self.listen_address, self.data.clone())?; + net_schedule_maintain(self.data.clone()); + Ok(()) + } +} + +impl ConnectionManager for NetConnectionsManager { + fn provider(&self) -> Arc { + self.data.container.clone() + } + + fn connect(&self) { + net_connect_disconnected(self.data.clone()); + } +} + +impl ConnectionProvider for RwLock { + fn connected_nodes(&self) -> Result, Error> { + let connections = self.read(); + if connections.is_isolated { + return Err(Error::NodeDisconnected); + } + + Ok(connections.connections.keys().cloned().collect()) + } + + fn disconnected_nodes(&self) -> BTreeSet { + let connections = self.read(); + connections.nodes.keys() + .filter(|node_id| !connections.connections.contains_key(node_id)) + .cloned() + .collect() + } + + fn connection(&self, node: &NodeId) -> Option> { + match self.read().connections.get(node).cloned() { + Some(connection) => Some(connection), + None => None, + } + } +} + +impl NetConnection { + /// Create new connection. + pub fn new(executor: Executor, is_inbound: bool, connection: IoConnection) -> NetConnection { + NetConnection { + executor, + node_id: connection.node_id, + node_address: connection.address, + is_inbound: is_inbound, + stream: connection.stream, + key: connection.key, + last_message_time: RwLock::new(Instant::now()), + } + } + + /// Get last message time. + pub fn last_message_time(&self) -> Instant { + *self.last_message_time.read() + } + + /// Update last message time + pub fn set_last_message_time(&self, last_message_time: Instant) { + *self.last_message_time.write() = last_message_time + } + + /// Returns future that sends encrypted message over this connection. + pub fn send_message_future(&self, message: Message) -> WriteMessage { + write_encrypted_message(self.stream.clone(), &self.key, message) + } + + /// Returns future that reads encrypted message from this connection. + pub fn read_message_future(&self) -> ReadMessage { + read_encrypted_message(self.stream.clone(), self.key.clone()) + } +} + +impl Connection for NetConnection { + fn is_inbound(&self) -> bool { + self.is_inbound + } + + fn node_id(&self) -> &NodeId { + &self.node_id + } + + fn node_address(&self) -> String { + format!("{}", self.node_address) + } + + fn send_message(&self, message: Message) { + execute(&self.executor, self.send_message_future(message).then(|_| Ok(()))); + } +} + +impl NetConnectionsData { + /// Executes closure for each active connection. + pub fn active_connections(&self) -> Vec> { + self.container.read().connections.values().cloned().collect() + } + + /// Executes closure for each disconnected node. + pub fn disconnected_nodes(&self) -> Vec<(NodeId, SocketAddr)> { + let container = self.container.read(); + container.nodes.iter() + .filter(|(node_id, _)| !container.connections.contains_key(node_id)) + .map(|(node_id, addr)| (*node_id, *addr)) + .collect() + } + + /// Try to insert new connection. Returns true if connection has been inserted. + /// Returns false (and ignores connections) if: + /// - we do not expect connection from this node + /// - we are already connected to the node and existing connection 'supersede' + /// new connection by agreement + pub fn insert(&self, connection: Arc) -> bool { + let node = *connection.node_id(); + let mut container = self.container.write(); + if !container.nodes.contains_key(&node) { + trace!(target: "secretstore_net", "{}: ignoring unknown connection from {} at {}", + self.self_key_pair.public(), node, connection.node_address()); + return false; + } + + if container.connections.contains_key(&node) { + // we have already connected to the same node + // the agreement is that node with lower id must establish connection to node with higher id + if (*self.self_key_pair.public() < node && connection.is_inbound()) + || (*self.self_key_pair.public() > node && !connection.is_inbound()) { + return false; + } + } + + trace!(target: "secretstore_net", + "{}: inserting connection to {} at {}. Connected to {} of {} nodes", + self.self_key_pair.public(), node, connection.node_address(), + container.connections.len() + 1, container.nodes.len()); + container.connections.insert(node, connection); + + true + } + + /// Tries to remove connection. Returns true if connection has been removed. + /// Returns false if we do not know this connection. + pub fn remove(&self, connection: &NetConnection) -> bool { + let node_id = *connection.node_id(); + let is_inbound = connection.is_inbound(); + let mut container = self.container.write(); + if let Entry::Occupied(entry) = container.connections.entry(node_id) { + if entry.get().is_inbound() != is_inbound { + return false; + } + + trace!(target: "secretstore_net", "{}: removing connection to {} at {}", + self.self_key_pair.public(), node_id, entry.get().node_address()); + entry.remove_entry(); + + true + } else { + false + } + } +} + +/// Listen incoming connections. +fn net_listen( + listen_address: &SocketAddr, + data: Arc, +) -> Result<(), Error> { + execute(&data.executor, net_listen_future(listen_address, data.clone())?); + Ok(()) +} + +/// Listen incoming connections future. +fn net_listen_future( + listen_address: &SocketAddr, + data: Arc, +) -> Result { + Ok(Box::new(TcpListener::bind(listen_address)? + .incoming() + .and_then(move |stream| { + net_accept_connection(data.clone(), stream); + Ok(()) + }) + .for_each(|_| Ok(())) + .then(|_| future::ok(())))) +} + +/// Accept incoming connection. +fn net_accept_connection( + data: Arc, + stream: TcpStream, +) { + execute(&data.executor, net_accept_connection_future(data.clone(), stream)); +} + +/// Accept incoming connection future. +fn net_accept_connection_future(data: Arc, stream: TcpStream) -> BoxedEmptyFuture { + Box::new(io_accept_connection(stream, data.self_key_pair.clone()) + .then(move |result| net_process_connection_result(data, None, result)) + .then(|_| future::ok(()))) +} + +/// Connect to remote node. +fn net_connect( + data: Arc, + remote: SocketAddr, +) { + execute(&data.executor, net_connect_future(data.clone(), remote)); +} + +/// Connect to remote node future. +fn net_connect_future( + data: Arc, + remote: SocketAddr, +) -> BoxedEmptyFuture { + let disconnected_nodes = data.container.disconnected_nodes(); + Box::new(io_connect(&remote, data.self_key_pair.clone(), disconnected_nodes) + .then(move |result| net_process_connection_result(data, Some(remote), result)) + .then(|_| future::ok(()))) +} + +/// Process network connection result. +fn net_process_connection_result( + data: Arc, + outbound_addr: Option, + result: Result>, TimeoutError>, +) -> IoFuture> { + match result { + Ok(DeadlineStatus::Meet(Ok(connection))) => { + let connection = Arc::new(NetConnection::new(data.executor.clone(), outbound_addr.is_none(), connection)); + if data.insert(connection.clone()) { + let maintain_action = data.trigger.lock().on_connection_established(connection.node_id()); + maintain_connection_trigger(data.clone(), maintain_action); + + return net_process_connection_messages(data, connection); + } + }, + Ok(DeadlineStatus::Meet(Err(err))) => { + warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}", + data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, + outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); + }, + Ok(DeadlineStatus::Timeout) => { + warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}", + data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" }, + outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); + }, + Err(err) => { + warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}", + data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, + outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); + }, + } + + Box::new(future::ok(Ok(()))) +} + +/// Process connection messages. +fn net_process_connection_messages( + data: Arc, + connection: Arc, +) -> IoFuture> { + Box::new(connection + .read_message_future() + .then(move |result| + match result { + Ok((_, Ok(message))) => { + connection.set_last_message_time(Instant::now()); + data.message_processor.process_connection_message(connection.clone(), message); + // continue serving connection + let process_messages_future = net_process_connection_messages( + data.clone(), connection).then(|_| Ok(())); + execute(&data.executor, process_messages_future); + Box::new(future::ok(Ok(()))) + }, + Ok((_, Err(err))) => { + warn!(target: "secretstore_net", "{}: protocol error '{}' when reading message from node {}", + data.self_key_pair.public(), err, connection.node_id()); + // continue serving connection + let process_messages_future = net_process_connection_messages( + data.clone(), connection).then(|_| Ok(())); + execute(&data.executor, process_messages_future); + Box::new(future::ok(Err(err))) + }, + Err(err) => { + let node_id = *connection.node_id(); + warn!(target: "secretstore_net", "{}: network error '{}' when reading message from node {}", + data.self_key_pair.public(), err, node_id); + // close connection + if data.remove(&*connection) { + let maintain_action = data.trigger.lock().on_connection_closed(&node_id); + maintain_connection_trigger(data, maintain_action); + } + Box::new(future::err(err)) + }, + } + )) +} + +/// Schedule connections. maintain. +fn net_schedule_maintain(data: Arc) { + let closure_data = data.clone(); + execute(&data.executor, Interval::new_interval(Duration::new(MAINTAIN_INTERVAL, 0)) + .and_then(move |_| Ok(net_maintain(closure_data.clone()))) + .for_each(|_| Ok(())) + .then(|_| future::ok(()))); +} + +/// Maintain network connections. +fn net_maintain(data: Arc) { + trace!(target: "secretstore_net", "{}: executing maintain procedures", data.self_key_pair.public()); + + update_nodes_set(data.clone()); + data.message_processor.maintain_sessions(); + net_keep_alive(data.clone()); + net_connect_disconnected(data); +} + +/// Send keep alive messages to remote nodes. +fn net_keep_alive(data: Arc) { + let now = Instant::now(); + let active_connections = data.active_connections(); + for connection in active_connections { + let last_message_diff = now - connection.last_message_time(); + if last_message_diff > KEEP_ALIVE_DISCONNECT_INTERVAL { + warn!(target: "secretstore_net", "{}: keep alive timeout for node {}", + data.self_key_pair.public(), connection.node_id()); + + let node_id = *connection.node_id(); + if data.remove(&*connection) { + let maintain_action = data.trigger.lock().on_connection_closed(&node_id); + maintain_connection_trigger(data.clone(), maintain_action); + } + data.message_processor.process_disconnect(&node_id); + } + else if last_message_diff > KEEP_ALIVE_SEND_INTERVAL { + connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {}))); + } + } +} + +/// Connect disconnected nodes. +fn net_connect_disconnected(data: Arc) { + let disconnected_nodes = data.disconnected_nodes(); + for (node_id, address) in disconnected_nodes { + if data.allow_connecting_to_higher_nodes || *data.self_key_pair.public() < node_id { + net_connect(data.clone(), address); + } + } +} + +/// Schedule future execution. +fn execute + Send + 'static>(executor: &Executor, f: F) { + if let Err(err) = future::Executor::execute(executor, Box::new(f)) { + error!("Secret store runtime unable to spawn task. Runtime is shutting down. ({:?})", err); + } +} + +/// Try to update active nodes set from connection trigger. +fn update_nodes_set(data: Arc) { + let maintain_action = data.trigger.lock().on_maintain(); + maintain_connection_trigger(data, maintain_action); +} + +/// Execute maintain procedures of connections trigger. +fn maintain_connection_trigger(data: Arc, maintain_action: Option) { + if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Session) { + let session_params = data.trigger.lock().maintain_session(); + if let Some(session_params) = session_params { + let session = data.message_processor.start_servers_set_change_session(session_params); + match session { + Ok(_) => trace!(target: "secretstore_net", "{}: started auto-migrate session", + data.self_key_pair.public()), + Err(err) => trace!(target: "secretstore_net", "{}: failed to start auto-migrate session with: {}", + data.self_key_pair.public(), err), + } + } + } + if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Connections) { + let mut trigger = data.trigger.lock(); + let mut data = data.container.write(); + trigger.maintain_connections(&mut *data); + } +} + +/// Compose SocketAddr from configuration' address and port. +fn make_socket_address(address: &str, port: u16) -> Result { + let ip_address: IpAddr = address.parse().map_err(|_| Error::InvalidNodeAddress)?; + Ok(SocketAddr::new(ip_address, port)) +} diff --git a/secret_store/src/key_server_cluster/cluster_message_processor.rs b/secret_store/src/key_server_cluster/cluster_message_processor.rs new file mode 100644 index 000000000..b4ba5ef03 --- /dev/null +++ b/secret_store/src/key_server_cluster/cluster_message_processor.rs @@ -0,0 +1,357 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use key_server_cluster::{Error, NodeId, NodeKeyPair}; +use key_server_cluster::cluster::{ServersSetChangeParams, new_servers_set_change_session}; +use key_server_cluster::cluster_sessions::{AdminSession}; +use key_server_cluster::cluster_connections::{ConnectionProvider, Connection}; +use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, ClusterSessionsContainer, + create_cluster_view}; +use key_server_cluster::cluster_sessions_creator::{ClusterSessionCreator, IntoSessionId}; +use key_server_cluster::message::{self, Message, ClusterMessage}; +use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession, + IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction}; +use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector; + +/// Something that is able to process signals/messages from other nodes. +pub trait MessageProcessor: Send + Sync { + /// Process disconnect from the remote node. + fn process_disconnect(&self, node: &NodeId); + /// Process single message from the connection. + fn process_connection_message(&self, connection: Arc, message: Message); + + /// Start servers set change session. This is typically used by ConnectionManager when + /// it detects that auto-migration session needs to be started. + fn start_servers_set_change_session(&self, params: ServersSetChangeParams) -> Result, Error>; + /// Try to continue session after key version negotiation session is completed. + fn try_continue_session( + &self, + session: Option>> + ); + /// Maintain active sessions. Typically called by the ConnectionManager at some intervals. + /// Should cancel stalled sessions and send keep-alive messages for sessions that support it. + fn maintain_sessions(&self); +} + +/// Bridge between ConnectionManager and ClusterSessions. +pub struct SessionsMessageProcessor { + self_key_pair: Arc, + servers_set_change_creator_connector: Arc, + sessions: Arc, + connections: Arc, +} + +impl SessionsMessageProcessor { + /// Create new instance of SessionsMessageProcessor. + pub fn new( + self_key_pair: Arc, + servers_set_change_creator_connector: Arc, + sessions: Arc, + connections: Arc, + ) -> Self { + SessionsMessageProcessor { + self_key_pair, + servers_set_change_creator_connector, + sessions, + connections, + } + } + + /// Process single session message from connection. + fn process_message, D>( + &self, + sessions: &ClusterSessionsContainer, + connection: Arc, + mut message: Message, + ) -> Option> + where + Message: IntoSessionId + { + // get or create new session, if required + let mut sender = *connection.node_id(); + let session = self.prepare_session(sessions, &sender, &message); + // send error if session is not found, or failed to create + let session = match session { + Ok(session) => session, + Err(error) => { + // this is new session => it is not yet in container + warn!(target: "secretstore_net", + "{}: {} session read error '{}' when requested for session from node {}", + self.self_key_pair.public(), S::type_name(), error, sender); + if !message.is_error_message() { + let qed = "session_id only fails for cluster messages; + only session messages are passed to process_message; + qed"; + let session_id = message.into_session_id().expect(qed); + let session_nonce = message.session_nonce().expect(qed); + + connection.send_message(SC::make_error_message(session_id, session_nonce, error)); + } + return None; + }, + }; + + let session_id = session.id(); + let mut is_queued_message = false; + loop { + let message_result = session.on_message(&sender, &message); + match message_result { + Ok(_) => { + // if session is completed => stop + if session.is_finished() { + info!(target: "secretstore_net", + "{}: {} session completed", self.self_key_pair.public(), S::type_name()); + sessions.remove(&session_id); + return Some(session); + } + + // try to dequeue message + match sessions.dequeue_message(&session_id) { + Some((msg_sender, msg)) => { + is_queued_message = true; + sender = msg_sender; + message = msg; + }, + None => return Some(session), + } + }, + Err(Error::TooEarlyForRequest) => { + sessions.enqueue_message(&session_id, sender, message, is_queued_message); + return Some(session); + }, + Err(err) => { + warn!( + target: "secretstore_net", + "{}: {} session error '{}' when processing message {} from node {}", + self.self_key_pair.public(), + S::type_name(), + err, + message, + sender); + session.on_session_error(self.self_key_pair.public(), err); + sessions.remove(&session_id); + return Some(session); + }, + } + } + } + + /// Get or insert new session. + fn prepare_session, D>( + &self, + sessions: &ClusterSessionsContainer, + sender: &NodeId, + message: &Message + ) -> Result, Error> + where + Message: IntoSessionId + { + fn requires_all_connections(message: &Message) -> bool { + match *message { + Message::Generation(_) => true, + Message::ShareAdd(_) => true, + Message::ServersSetChange(_) => true, + _ => false, + } + } + + // get or create new session, if required + let session_id = message.into_session_id() + .expect("into_session_id fails for cluster messages only; + only session messages are passed to prepare_session; + qed"); + let is_initialization_message = message.is_initialization_message(); + let is_delegation_message = message.is_delegation_message(); + match is_initialization_message || is_delegation_message { + false => sessions.get(&session_id, true).ok_or(Error::NoActiveSessionWithId), + true => { + let creation_data = SC::creation_data_from_message(&message)?; + let master = if is_initialization_message { + *sender + } else { + *self.self_key_pair.public() + }; + let cluster = create_cluster_view( + self.self_key_pair.clone(), + self.connections.clone(), + requires_all_connections(&message))?; + + let nonce = Some(message.session_nonce().ok_or(Error::InvalidMessage)?); + let exclusive = message.is_exclusive_session_message(); + sessions.insert(cluster, master, session_id, nonce, exclusive, creation_data) + }, + } + } + + /// Process single cluster message from the connection. + fn process_cluster_message(&self, connection: Arc, message: ClusterMessage) { + match message { + ClusterMessage::KeepAlive(_) => { + let msg = Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse { + session_id: None, + })); + connection.send_message(msg) + }, + ClusterMessage::KeepAliveResponse(msg) => if let Some(session_id) = msg.session_id { + self.sessions.on_session_keep_alive(connection.node_id(), session_id.into()); + }, + _ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}", + self.self_key_pair.public(), message, connection.node_id(), connection.node_address()), + } + } +} + +impl MessageProcessor for SessionsMessageProcessor { + fn process_disconnect(&self, node: &NodeId) { + self.sessions.on_connection_timeout(node); + } + + fn process_connection_message(&self, connection: Arc, message: Message) { + trace!(target: "secretstore_net", "{}: received message {} from {}", + self.self_key_pair.public(), message, connection.node_id()); + + // error is ignored as we only process errors on session level + match message { + Message::Generation(message) => self + .process_message(&self.sessions.generation_sessions, connection, Message::Generation(message)) + .map(|_| ()).unwrap_or_default(), + Message::Encryption(message) => self + .process_message(&self.sessions.encryption_sessions, connection, Message::Encryption(message)) + .map(|_| ()).unwrap_or_default(), + Message::Decryption(message) => self + .process_message(&self.sessions.decryption_sessions, connection, Message::Decryption(message)) + .map(|_| ()).unwrap_or_default(), + Message::SchnorrSigning(message) => self + .process_message(&self.sessions.schnorr_signing_sessions, connection, Message::SchnorrSigning(message)) + .map(|_| ()).unwrap_or_default(), + Message::EcdsaSigning(message) => self + .process_message(&self.sessions.ecdsa_signing_sessions, connection, Message::EcdsaSigning(message)) + .map(|_| ()).unwrap_or_default(), + Message::ServersSetChange(message) => { + let message = Message::ServersSetChange(message); + let is_initialization_message = message.is_initialization_message(); + let session = self.process_message(&self.sessions.admin_sessions, connection, message); + if is_initialization_message { + if let Some(session) = session { + self.servers_set_change_creator_connector + .set_key_servers_set_change_session(session.clone()); + } + } + }, + Message::KeyVersionNegotiation(message) => { + let session = self.process_message( + &self.sessions.negotiation_sessions, connection, Message::KeyVersionNegotiation(message)); + self.try_continue_session(session); + }, + Message::ShareAdd(message) => self.process_message( + &self.sessions.admin_sessions, connection, Message::ShareAdd(message)) + .map(|_| ()).unwrap_or_default(), + Message::Cluster(message) => self.process_cluster_message(connection, message), + } + } + + fn try_continue_session( + &self, + session: Option>> + ) { + if let Some(session) = session { + let meta = session.meta(); + let is_master_node = meta.self_node_id == meta.master_node_id; + if is_master_node && session.is_finished() { + self.sessions.negotiation_sessions.remove(&session.id()); + match session.wait() { + Ok(Some((version, master))) => match session.take_continue_action() { + Some(ContinueAction::Decrypt( + session, origin, is_shadow_decryption, is_broadcast_decryption + )) => { + let initialization_error = if self.self_key_pair.public() == &master { + session.initialize( + origin, version, is_shadow_decryption, is_broadcast_decryption) + } else { + session.delegate( + master, origin, version, is_shadow_decryption, is_broadcast_decryption) + }; + + if let Err(error) = initialization_error { + session.on_session_error(&meta.self_node_id, error); + self.sessions.decryption_sessions.remove(&session.id()); + } + }, + Some(ContinueAction::SchnorrSign(session, message_hash)) => { + let initialization_error = if self.self_key_pair.public() == &master { + session.initialize(version, message_hash) + } else { + session.delegate(master, version, message_hash) + }; + + if let Err(error) = initialization_error { + session.on_session_error(&meta.self_node_id, error); + self.sessions.schnorr_signing_sessions.remove(&session.id()); + } + }, + Some(ContinueAction::EcdsaSign(session, message_hash)) => { + let initialization_error = if self.self_key_pair.public() == &master { + session.initialize(version, message_hash) + } else { + session.delegate(master, version, message_hash) + }; + + if let Err(error) = initialization_error { + session.on_session_error(&meta.self_node_id, error); + self.sessions.ecdsa_signing_sessions.remove(&session.id()); + } + }, + None => (), + }, + Ok(None) => unreachable!("is_master_node; session is finished; + negotiation version always finished with result on master; + qed"), + Err(error) => match session.take_continue_action() { + Some(ContinueAction::Decrypt(session, _, _, _)) => { + session.on_session_error(&meta.self_node_id, error); + self.sessions.decryption_sessions.remove(&session.id()); + }, + Some(ContinueAction::SchnorrSign(session, _)) => { + session.on_session_error(&meta.self_node_id, error); + self.sessions.schnorr_signing_sessions.remove(&session.id()); + }, + Some(ContinueAction::EcdsaSign(session, _)) => { + session.on_session_error(&meta.self_node_id, error); + self.sessions.ecdsa_signing_sessions.remove(&session.id()); + }, + None => (), + }, + } + } + } + } + + fn maintain_sessions(&self) { + self.sessions.stop_stalled_sessions(); + self.sessions.sessions_keep_alive(); + } + + fn start_servers_set_change_session(&self, params: ServersSetChangeParams) -> Result, Error> { + new_servers_set_change_session( + self.self_key_pair.clone(), + &*self.sessions, + self.connections.clone(), + self.servers_set_change_creator_connector.clone(), + params, + ) + } +}