From ebda6d391601a5e80e1e33700c4c4ff44a5e7d9b Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 5 Feb 2018 22:59:14 +0300 Subject: [PATCH] SecretStore: ignore removed authorities when running auto-migration (#7674) --- .../key_server_cluster/connection_trigger.rs | 27 +++++------- .../connection_trigger_with_migration.rs | 43 ++++--------------- 2 files changed, 20 insertions(+), 50 deletions(-) diff --git a/secret_store/src/key_server_cluster/connection_trigger.rs b/secret_store/src/key_server_cluster/connection_trigger.rs index 208700c03..e0172b89a 100644 --- a/secret_store/src/key_server_cluster/connection_trigger.rs +++ b/secret_store/src/key_server_cluster/connection_trigger.rs @@ -84,8 +84,8 @@ pub struct SimpleServersSetChangeSessionCreatorConnector { pub enum ConnectionsAction { /// Connect to nodes from old set only. ConnectToCurrentSet, - /// Connect to nodes from both old and migration sets. - ConnectToCurrentAndMigrationSet, + /// Connect to nodes from migration set. + ConnectToMigrationSet, } /// Trigger connections. @@ -151,14 +151,9 @@ impl TriggerConnections { ConnectionsAction::ConnectToCurrentSet => { adjust_connections(self.self_key_pair.public(), data, &server_set.current_set); }, - ConnectionsAction::ConnectToCurrentAndMigrationSet => { - let mut old_and_migration_set = BTreeMap::new(); - if let Some(migration) = server_set.migration.as_ref() { - old_and_migration_set.extend(migration.set.iter().map(|(node_id, node_addr)| (node_id.clone(), node_addr.clone()))); - } - old_and_migration_set.extend(server_set.current_set.iter().map(|(node_id, node_addr)| (node_id.clone(), node_addr.clone()))); - - adjust_connections(self.self_key_pair.public(), data, &old_and_migration_set); + ConnectionsAction::ConnectToMigrationSet => { + let migration_set = server_set.migration.as_ref().map(|s| s.set.clone()).unwrap_or_default(); + adjust_connections(self.self_key_pair.public(), data, &migration_set); }, } } @@ -335,7 +330,7 @@ mod tests { } #[test] - fn maintain_connects_to_current_and_migration_set_works() { + fn maintain_connects_to_migration_set_works() { let connections = create_connections(); let self_node_id = connections.self_key_pair.public().clone(); let current_node_id = Random.generate().unwrap().public().clone(); @@ -343,17 +338,17 @@ mod tests { let new_node_id = Random.generate().unwrap().public().clone(); let mut connections_data: ClusterConnectionsData = Default::default(); - connections.maintain(ConnectionsAction::ConnectToCurrentAndMigrationSet, &mut connections_data, &KeyServerSetSnapshot { - current_set: vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap()), - (current_node_id.clone(), "127.0.0.1:8082".parse().unwrap())].into_iter().collect(), + connections.maintain(ConnectionsAction::ConnectToMigrationSet, &mut connections_data, &KeyServerSetSnapshot { + current_set: vec![(current_node_id.clone(), "127.0.0.1:8082".parse().unwrap())].into_iter().collect(), new_set: vec![(new_node_id.clone(), "127.0.0.1:8083".parse().unwrap())].into_iter().collect(), migration: Some(KeyServerSetMigration { - set: vec![(migration_node_id.clone(), "127.0.0.1:8084".parse().unwrap())].into_iter().collect(), + set: vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap()), + (migration_node_id.clone(), "127.0.0.1:8084".parse().unwrap())].into_iter().collect(), ..Default::default() }), }); - assert_eq!(vec![current_node_id, migration_node_id].into_iter().collect::>(), + assert_eq!(vec![migration_node_id].into_iter().collect::>(), connections_data.nodes.keys().cloned().collect::>()); } diff --git a/secret_store/src/key_server_cluster/connection_trigger_with_migration.rs b/secret_store/src/key_server_cluster/connection_trigger_with_migration.rs index 42dfb0574..91679928f 100644 --- a/secret_store/src/key_server_cluster/connection_trigger_with_migration.rs +++ b/secret_store/src/key_server_cluster/connection_trigger_with_migration.rs @@ -260,12 +260,11 @@ impl TriggerSession { let migration = server_set.migration.as_ref() .expect("action is Start only when migration is started (see maintain_session); qed"); - let old_set: BTreeSet<_> = server_set.current_set.keys() - .chain(migration.set.keys()) - .cloned().collect(); - let new_set: BTreeSet<_> = migration.set.keys() - .cloned() - .collect(); + // we assume that authorities that are removed from the servers set are either offline, or malicious + // => they're not involved in ServersSetChangeSession + // => both sets are the same + let old_set: BTreeSet<_> = migration.set.keys().cloned().collect(); + let new_set = old_set.clone(); let signatures = self.self_key_pair.sign(&ordered_nodes_hash(&old_set)) .and_then(|old_set_signature| self.self_key_pair.sign(&ordered_nodes_hash(&new_set)) @@ -336,8 +335,7 @@ fn maintain_session(self_node_id: &NodeId, connected: &BTreeSet, snapsho }, // migration is active && there's no active session => start it (MigrationState::Started, SessionState::Idle) => { - match is_connected_to_all_nodes(self_node_id, &snapshot.current_set, connected) && - is_connected_to_all_nodes(self_node_id, &snapshot.migration.as_ref().expect(migration_data_proof).set, connected) && + match is_connected_to_all_nodes(self_node_id, &snapshot.migration.as_ref().expect(migration_data_proof).set, connected) && select_master_node(snapshot) == self_node_id { true => Some(SessionAction::Start), // we are not connected to all required nodes yet or we are not on master node => wait for it @@ -406,7 +404,7 @@ fn maintain_connections(migration_state: MigrationState, session_state: SessionS // but it participates in new key generation session // it is ok, since 'officialy' here means that this node is a owner of all old shares (MigrationState::Required, _) | - (MigrationState::Started, _) => Some(ConnectionsAction::ConnectToCurrentAndMigrationSet), + (MigrationState::Started, _) => Some(ConnectionsAction::ConnectToMigrationSet), } } @@ -430,15 +428,6 @@ fn select_master_node(snapshot: &KeyServerSetSnapshot) -> &NodeId { when Started: migration.is_some() && we return migration.master; qed;\ when Required: current_set != new_set; this means that at least one set is non-empty; we try to take node from each set; qed")) } - /*server_set_state.migration.as_ref() - .map(|m| &m.master) - .unwrap_or_else(|| server_set_state.current_set.keys() - .filter(|n| server_set_state.new_set.contains_key(n)) - .nth(0) - .or_else(|| server_set_state.new_set.keys().nth(0))) - .expect("select_master_node is only called when migration is Required or Started;" - "when Started: migration.is_some() && we have migration.master; qed" - "when Required: current_set != migration_set; this means that at least one set is non-empty; we select")*/ } #[cfg(test)] @@ -558,13 +547,13 @@ mod tests { #[test] fn maintain_connections_connects_to_current_and_old_set_when_migration_is_required() { assert_eq!(maintain_connections(MigrationState::Required, - SessionState::Idle), Some(ConnectionsAction::ConnectToCurrentAndMigrationSet)); + SessionState::Idle), Some(ConnectionsAction::ConnectToMigrationSet)); } #[test] fn maintain_connections_connects_to_current_and_old_set_when_migration_is_started() { assert_eq!(maintain_connections(MigrationState::Started, - SessionState::Idle), Some(ConnectionsAction::ConnectToCurrentAndMigrationSet)); + SessionState::Idle), Some(ConnectionsAction::ConnectToMigrationSet)); } #[test] @@ -597,20 +586,6 @@ mod tests { }, MigrationState::Started, SessionState::Idle), None); } - #[test] - fn maintain_session_does_nothing_when_migration_started_on_master_node_and_no_session_and_not_connected_to_current_nodes() { - assert_eq!(maintain_session(&1.into(), &Default::default(), &KeyServerSetSnapshot { - current_set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap()), - (2.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - new_set: Default::default(), - migration: Some(KeyServerSetMigration { - master: 1.into(), - set: vec![(1.into(), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(), - ..Default::default() - }), - }, MigrationState::Started, SessionState::Idle), None); - } - #[test] fn maintain_session_does_nothing_when_migration_started_on_master_node_and_no_session_and_not_connected_to_migration_nodes() { assert_eq!(maintain_session(&1.into(), &Default::default(), &KeyServerSetSnapshot {