diff --git a/Cargo.lock b/Cargo.lock index f8e7c89eb..413ae6362 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -832,6 +832,7 @@ dependencies = [ "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-proto 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index e181bf88d..4c0a05fd9 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -591,6 +591,10 @@ usage! { "--secretstore-secret=[SECRET]", "Hex-encoded secret key of this node.", + ARG arg_secretstore_admin_public: (Option) = None, or |c: &Config| otry!(c.secretstore).admin_public.clone(), + "--secretstore-admin-public=[PUBLIC]", + "Hex-encoded public key of secret store administrator.", + ["Sealing/Mining options"] FLAG flag_force_sealing: (bool) = false, or |c: &Config| otry!(c.mining).force_sealing.clone(), "--force-sealing", @@ -1089,6 +1093,7 @@ struct SecretStore { disable_http: Option, disable_acl_check: Option, self_secret: Option, + admin_public: Option, nodes: Option>, interface: Option, port: Option, @@ -1445,6 +1450,7 @@ mod tests { flag_no_secretstore_http: false, flag_no_secretstore_acl_check: false, arg_secretstore_secret: None, + arg_secretstore_admin_public: None, arg_secretstore_nodes: "".into(), arg_secretstore_interface: "local".into(), arg_secretstore_port: 8083u16, @@ -1684,6 +1690,7 @@ mod tests { disable_http: None, disable_acl_check: None, self_secret: None, + admin_public: None, nodes: None, interface: None, port: Some(8083), diff --git a/parity/configuration.rs b/parity/configuration.rs index 08df1d6a6..1c751ab64 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -626,6 +626,7 @@ impl Configuration { http_interface: self.secretstore_http_interface(), http_port: self.args.arg_ports_shift + self.args.arg_secretstore_http_port, data_path: self.directories().secretstore, + admin_public: self.secretstore_admin_public()?, }) } @@ -1037,6 +1038,13 @@ impl Configuration { } } + fn secretstore_admin_public(&self) -> Result, String> { + match self.args.arg_secretstore_admin_public.as_ref() { + Some(admin_public) => Ok(Some(admin_public.parse().map_err(|e| format!("Invalid secret store admin public: {}", e))?)), + None => Ok(None), + } + } + fn secretstore_nodes(&self) -> Result, String> { let mut nodes = BTreeMap::new(); for node in self.args.arg_secretstore_nodes.split(',').filter(|n| n != &"") { diff --git a/parity/secretstore.rs b/parity/secretstore.rs index eb5922540..416c9d547 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -55,6 +55,8 @@ pub struct Configuration { pub http_port: u16, /// Data directory path for secret store pub data_path: String, + /// Administrator public key. + pub admin_public: Option, } /// Secret store dependencies @@ -145,6 +147,7 @@ mod server { port: port, })).collect(), allow_connecting_to_higher_nodes: true, + admin_public: conf.admin_public, }, }; @@ -170,6 +173,7 @@ impl Default for Configuration { http_enabled: true, acl_check_enabled: true, self_secret: None, + admin_public: None, nodes: BTreeMap::new(), interface: "127.0.0.1".to_owned(), port: 8083, diff --git a/secret_store/Cargo.toml b/secret_store/Cargo.toml index ab13b9caa..4354ea2e3 100644 --- a/secret_store/Cargo.toml +++ b/secret_store/Cargo.toml @@ -20,6 +20,7 @@ serde_derive = "1.0" futures = "0.1" futures-cpupool = "0.1" rustc-hex = "1.0" +tiny-keccak = "1.3" tokio-core = "0.1.6" tokio-io = "0.1.0" tokio-service = "0.1" diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index 550983eb8..29798e503 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -153,6 +153,7 @@ impl KeyServerCore { allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes, acl_storage: acl_storage, key_storage: key_storage, + admin_public: None, }; let (stop, stopped) = futures::oneshot(); @@ -255,6 +256,7 @@ pub mod tests { port: start_port + (j as u16), })).collect(), allow_connecting_to_higher_nodes: false, + admin_public: None, }).collect(); let key_servers_set: BTreeMap = configs[0].nodes.iter() .map(|(k, a)| (k.clone(), format!("{}:{}", a.address, a.port).parse().unwrap())) diff --git a/secret_store/src/key_server_cluster/admin_sessions/mod.rs b/secret_store/src/key_server_cluster/admin_sessions/mod.rs new file mode 100644 index 000000000..68fddf6fa --- /dev/null +++ b/secret_store/src/key_server_cluster/admin_sessions/mod.rs @@ -0,0 +1,48 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +pub mod servers_set_change_session; +pub mod share_add_session; +pub mod share_change_session; +pub mod share_move_session; +pub mod share_remove_session; + +mod sessions_queue; + +use key_server_cluster::{SessionId, NodeId, SessionMeta}; + +/// Share change session metadata. +#[derive(Debug, Clone)] +pub struct ShareChangeSessionMeta { + /// Key id. + pub id: SessionId, + /// Id of node, which has started this session. + pub master_node_id: NodeId, + /// Id of node, on which this session is running. + pub self_node_id: NodeId, +} + +impl ShareChangeSessionMeta { + /// Convert to consensus session meta. `all_nodes_set` is the union of `old_nodes_set` && `new_nodes_set`. + pub fn into_consensus_meta(self, all_nodes_set_len: usize) -> SessionMeta { + SessionMeta { + id: self.id, + master_node_id: self.master_node_id, + self_node_id: self.self_node_id, + threshold: all_nodes_set_len - 1, + } + } +} diff --git a/secret_store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs b/secret_store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs new file mode 100644 index 000000000..651fd8a4c --- /dev/null +++ b/secret_store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs @@ -0,0 +1,1104 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use std::collections::{BTreeSet, BTreeMap}; +use std::collections::btree_map::Entry; +use parking_lot::{Mutex, Condvar}; +use ethkey::{Public, Signature}; +use key_server_cluster::{Error, NodeId, SessionId, KeyStorage}; +use key_server_cluster::cluster::Cluster; +use key_server_cluster::cluster_sessions::ClusterSession; +use key_server_cluster::message::{Message, ServersSetChangeMessage, + ConsensusMessageWithServersSet, InitializeConsensusSessionWithServersSet, + ServersSetChangeConsensusMessage, ConfirmConsensusInitialization, UnknownSessionsRequest, UnknownSessions, + ServersSetChangeShareAddMessage, ServersSetChangeError, ServersSetChangeCompleted, + ServersSetChangeShareMoveMessage, ServersSetChangeShareRemoveMessage, + ServersSetChangeDelegate, ServersSetChangeDelegateResponse, InitializeShareChangeSession, + ConfirmShareChangeSessionInitialization}; +use key_server_cluster::share_change_session::{ShareChangeSession, ShareChangeSessionParams, ShareChangeSessionPlan, + prepare_share_change_session_plan}; +use key_server_cluster::jobs::job_session::JobTransport; +use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest}; +use key_server_cluster::jobs::unknown_sessions_job::{UnknownSessionsJob}; +use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; +use key_server_cluster::admin_sessions::sessions_queue::{SessionsQueue, QueuedSession}; +use key_server_cluster::admin_sessions::ShareChangeSessionMeta; + +/// Maximal number of active share change sessions. +const MAX_ACTIVE_KEY_SESSIONS: usize = 64; + +/// Servers set change session API. +pub trait Session: Send + Sync + 'static { + /// Wait until session is completed. + fn wait(&self) -> Result<(), Error>; +} + +/// Servers set change session. +/// Brief overview: +/// 1) consensus establishing +/// 2) master node requests all other nodes for sessions he is not participating (aka unknown sessions) +/// 3) every slave node responds with sessions id => we are able to collect Map of unknown sessions on master +/// 4) for every known session (i.e. session that master participates in): +/// 4.1) share change plan is created = nodes to add shares for, nodes to move shares from/to, nodes to remove shares from +/// 4.2) share change session is started. Share change session = sequential execution of ShareAdd, then ShareMove && then ShareRemove sessions (order matters here) for single key +/// 5) for every unknown session: +/// 5.1) sub_master is selected from sessions participants +/// 5.2) share change session is delegated from master to this sub_master +/// 5.3) share change session is executed by this sub_master +/// 5.4) share change confirm is sent from sub_master to master +/// 6) upon completing all known share change sessions && receiving confirmations for all unknown share change sessions, session completion signal is sent to all slave nodes && session is completed +pub struct SessionImpl { + /// Session core. + core: SessionCore, + /// Session data. + data: Mutex, +} + +/// Session state. +#[derive(PartialEq)] +enum SessionState { + /// Establishing consensus. + EstablishingConsensus, + /// Running share change sessions. + RunningShareChangeSessions, + /// Session is completed. + Finished, +} + +/// Immutable session data. +struct SessionCore { + /// Servers set change session meta (id is computed from new_nodes_set). + pub meta: ShareChangeSessionMeta, + /// Cluster which allows this node to send messages to other nodes in the cluster. + pub cluster: Arc, + /// Keys storage. + pub key_storage: Arc, + /// Session-level nonce. + pub nonce: u64, + /// All known nodes. + pub all_nodes_set: BTreeSet, + /// Administrator public key. + pub admin_public: Public, + /// SessionImpl completion condvar. + pub completed: Condvar, +} + +/// Servers set change consensus session type. +type ServersSetChangeConsensusSession = ConsensusSession; + +/// Mutable session data. +struct SessionData { + /// Session state. + pub state: SessionState, + /// Consensus-based servers set change session. + pub consensus_session: Option, + /// New nodes set. + pub new_nodes_set: Option>, + /// Share change sessions queue (valid on master nodes only). + pub sessions_queue: Option, + /// Share change sessions initialization state (valid on master nodes only). + pub sessions_initialization_state: BTreeMap, + /// Sessions delegated to other nodes (valid on master node only). + pub delegated_key_sessions: BTreeMap, + /// Active share change sessions. + pub active_key_sessions: BTreeMap, + /// Servers set change result. + pub result: Option>, +} + +/// Session initialization data. +struct SessionInitializationData { + /// Master node id. + pub master: NodeId, + /// Nodes that have confirmed session initialization request. + pub confirmations: BTreeSet, +} + +/// SessionImpl creation parameters +pub struct SessionParams { + /// Session meta (artificial). + pub meta: ShareChangeSessionMeta, + /// Cluster. + pub cluster: Arc, + /// Keys storage. + pub key_storage: Arc, + /// Session nonce. + pub nonce: u64, + /// All known nodes. + pub all_nodes_set: BTreeSet, + /// Administrator public key. + pub admin_public: Public, +} + +/// Servers set change consensus transport. +struct ServersSetChangeConsensusTransport { + /// Session id. + id: SessionId, + /// Session-level nonce. + nonce: u64, + /// Cluster. + cluster: Arc, +} + +/// Unknown sessions job transport. +struct UnknownSessionsJobTransport { + /// Session id. + id: SessionId, + /// Session-level nonce. + nonce: u64, + /// Cluster. + cluster: Arc, +} + +impl SessionImpl { + /// Create new servers set change session. + pub fn new(params: SessionParams) -> Result { + Ok(SessionImpl { + core: SessionCore { + meta: params.meta, + cluster: params.cluster, + key_storage: params.key_storage, + nonce: params.nonce, + all_nodes_set: params.all_nodes_set, + admin_public: params.admin_public, + completed: Condvar::new(), + }, + data: Mutex::new(SessionData { + state: SessionState::EstablishingConsensus, + consensus_session: None, + new_nodes_set: None, + sessions_queue: None, + sessions_initialization_state: BTreeMap::new(), + delegated_key_sessions: BTreeMap::new(), + active_key_sessions: BTreeMap::new(), + result: None, + }), + }) + } + + /// Get session id. + pub fn id(&self) -> &SessionId { + &self.core.meta.id + } + + /// Initialize servers set change session on master node. + pub fn initialize(&self, new_nodes_set: BTreeSet, all_set_signature: Signature, new_set_signature: Signature) -> Result<(), Error> { + check_nodes_set(&self.core.all_nodes_set, &new_nodes_set)?; + + let mut data = self.data.lock(); + if data.state != SessionState::EstablishingConsensus || data.consensus_session.is_some() { + return Err(Error::InvalidStateForRequest); + } + + let mut consensus_session = ConsensusSession::new(ConsensusSessionParams { + meta: self.core.meta.clone().into_consensus_meta(self.core.all_nodes_set.len()), + consensus_executor: ServersSetChangeAccessJob::new_on_master(self.core.admin_public.clone(), + self.core.all_nodes_set.clone(), + self.core.all_nodes_set.clone(), + new_nodes_set.clone(), + all_set_signature, + new_set_signature), + consensus_transport: ServersSetChangeConsensusTransport { + id: self.core.meta.id.clone(), + nonce: self.core.nonce, + cluster: self.core.cluster.clone(), + }, + })?; + + consensus_session.initialize(self.core.all_nodes_set.clone())?; + debug_assert!(consensus_session.state() != ConsensusSessionState::ConsensusEstablished); + data.consensus_session = Some(consensus_session); + data.new_nodes_set = Some(new_nodes_set); + + Ok(()) + } + + /// Process servers set change message. + pub fn process_message(&self, sender: &NodeId, message: &ServersSetChangeMessage) -> Result<(), Error> { + if self.core.nonce != message.session_nonce() { + return Err(Error::ReplayProtection); + } + + match message { + &ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref message) => + self.on_consensus_message(sender, message), + &ServersSetChangeMessage::UnknownSessionsRequest(ref message) => + self.on_unknown_sessions_requested(sender, message), + &ServersSetChangeMessage::UnknownSessions(ref message) => + self.on_unknown_sessions(sender, message), + &ServersSetChangeMessage::InitializeShareChangeSession(ref message) => + self.on_initialize_share_change_session(sender, message), + &ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref message) => + self.on_share_change_session_confirmation(sender, message), + &ServersSetChangeMessage::ServersSetChangeDelegate(ref message) => + self.on_sessions_delegation(sender, message), + &ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref message) => + self.on_delegated_session_completed(sender, message), + &ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref message) => + self.on_share_add_message(sender, message), + &ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ref message) => + self.on_share_move_message(sender, message), + &ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ref message) => + self.on_share_remove_message(sender, message), + &ServersSetChangeMessage::ServersSetChangeError(ref message) => + self.on_session_error(sender, message), + &ServersSetChangeMessage::ServersSetChangeCompleted(ref message) => + self.on_session_completed(sender, message), + } + } + + /// When consensus-related message is received. + pub fn on_consensus_message(&self, sender: &NodeId, message: &ServersSetChangeConsensusMessage) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + + // check state + let mut data = self.data.lock(); + if data.state != SessionState::EstablishingConsensus { + return Err(Error::InvalidStateForRequest); + } + + // start slave consensus session if needed + if self.core.meta.self_node_id != self.core.meta.master_node_id { + if data.consensus_session.is_none() { + match &message.message { + &ConsensusMessageWithServersSet::InitializeConsensusSession(_) => { + data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams { + meta: self.core.meta.clone().into_consensus_meta(self.core.all_nodes_set.len()), + consensus_executor: ServersSetChangeAccessJob::new_on_slave(self.core.admin_public.clone(), + self.core.all_nodes_set.clone(), + ), + consensus_transport: ServersSetChangeConsensusTransport { + id: self.core.meta.id.clone(), + nonce: self.core.nonce, + cluster: self.core.cluster.clone(), + }, + })?); + }, + _ => return Err(Error::InvalidStateForRequest), + } + } + } + + // process consensus message + let consensus_session = data.consensus_session.as_mut().ok_or(Error::InvalidMessage)?; + let is_establishing_consensus = consensus_session.state() == ConsensusSessionState::EstablishingConsensus; + match &message.message { + &ConsensusMessageWithServersSet::InitializeConsensusSession(ref message) => + consensus_session.on_consensus_partial_request(sender, ServersSetChangeAccessRequest::from(message))?, + &ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ref message) => + consensus_session.on_consensus_partial_response(sender, message.is_confirmed)?, + } + + // when consensus is established => request unknown sessions + let is_consensus_established = consensus_session.state() == ConsensusSessionState::ConsensusEstablished; + if self.core.meta.self_node_id != self.core.meta.master_node_id || !is_establishing_consensus || !is_consensus_established { + return Ok(()); + } + + let unknown_sessions_job = UnknownSessionsJob::new_on_master(self.core.key_storage.clone(), self.core.meta.self_node_id.clone()); + consensus_session.disseminate_jobs(unknown_sessions_job, self.unknown_sessions_transport()) + } + + /// When unknown sessions are requested. + pub fn on_unknown_sessions_requested(&self, sender: &NodeId, message: &UnknownSessionsRequest) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + let mut data = self.data.lock(); + + let new_nodes_set = { + let consensus_session = data.consensus_session.as_mut().ok_or(Error::InvalidMessage)?; + let unknown_sessions_job = UnknownSessionsJob::new_on_slave(self.core.key_storage.clone()); + let unknown_sessions_transport = self.unknown_sessions_transport(); + + // and respond with unknown sessions + consensus_session.on_job_request(&sender, sender.clone(), unknown_sessions_job, unknown_sessions_transport)?; + + consensus_session.consensus_job().executor() + .new_servers_set() + .expect("consensus session is now completed; new_servers_set is intermediate result of consensus session; qed") + .clone() + }; + + // update state + data.state = SessionState::RunningShareChangeSessions; + data.new_nodes_set = Some(new_nodes_set); + + Ok(()) + } + + /// When unknown sessions are received. + pub fn on_unknown_sessions(&self, sender: &NodeId, message: &UnknownSessions) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + // check state + let mut data = self.data.lock(); + if data.state != SessionState::EstablishingConsensus { + return Err(Error::InvalidStateForRequest); + } + + // process message + let unknown_sessions = { + let consensus_session = data.consensus_session.as_mut().ok_or(Error::InvalidMessage)?; + consensus_session.on_job_response(sender, message.unknown_sessions.iter().cloned().map(Into::into).collect())?; + if consensus_session.state() != ConsensusSessionState::Finished { + return Ok(()); + } + + // all nodes have reported their unknown sessions + // => we are ready to start adding/moving/removing shares + consensus_session.result()? + }; + + // initialize sessions queue + data.state = SessionState::RunningShareChangeSessions; + data.sessions_queue = Some(SessionsQueue::new(self.core.key_storage.clone(), unknown_sessions)); + + // and disseminate session initialization requests + Self::disseminate_session_initialization_requests(&self.core, &mut *data) + } + + /// When share change session initialization is requested. + pub fn on_initialize_share_change_session(&self, sender: &NodeId, message: &InitializeShareChangeSession) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + // we only accept delegation requests from master node + if sender != &self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } + + // check state + let mut data = self.data.lock(); + if data.state != SessionState::RunningShareChangeSessions { + return Err(Error::InvalidStateForRequest); + } + + // insert new session + let key_id = message.key_id.clone().into(); + match data.active_key_sessions.contains_key(&key_id) { + true => return Err(Error::InvalidMessage), + false => { + let master_plan = ShareChangeSessionPlan { + nodes_to_add: message.shares_to_add.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(), + nodes_to_move: message.shares_to_move.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(), + nodes_to_remove: message.shares_to_remove.iter().cloned().map(Into::into).collect(), + }; + + // if master plan is empty, it is cheating + if master_plan.is_empty() { + return Err(Error::InvalidMessage); + } + + // on nodes, which have their own key share, we could check if master node plan is correct + if let Ok(key_share) = self.core.key_storage.get(&key_id) { + let new_nodes_set = data.new_nodes_set.as_ref() + .expect("new_nodes_set is filled during consensus establishing; change sessions are running after this; qed"); + let local_plan = prepare_share_change_session_plan(&key_share.id_numbers.keys().cloned().collect(), new_nodes_set)?; + if local_plan.nodes_to_add.keys().any(|n| !local_plan.nodes_to_add.contains_key(n)) + || local_plan.nodes_to_add.keys().any(|n| !master_plan.nodes_to_add.contains_key(n)) + || local_plan.nodes_to_move != master_plan.nodes_to_move + || local_plan.nodes_to_remove != master_plan.nodes_to_remove { + return Err(Error::InvalidMessage); + } + } + + data.active_key_sessions.insert(key_id.clone(), Self::create_share_change_session(&self.core, key_id, + message.master_node_id.clone().into(), + message.old_shares_set.iter().cloned().map(Into::into).collect(), + master_plan)?); + }, + }; + + // send confirmation + self.core.cluster.send(sender, Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ConfirmShareChangeSessionInitialization { + session: message.session.clone(), + session_nonce: message.session_nonce.clone(), + key_id: message.key_id.clone(), + }))) + } + + /// When share change session initialization is confirmed. + pub fn on_share_change_session_confirmation(&self, sender: &NodeId, message: &ConfirmShareChangeSessionInitialization) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + // we only accept delegation requests from master node + if self.core.meta.self_node_id != self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } + + // check state + let mut data = self.data.lock(); + if data.state != SessionState::RunningShareChangeSessions { + return Err(Error::InvalidStateForRequest); + } + + // add confirmation + let key_id = message.key_id.clone().into(); + let session_master = { + let session_init_data = data.sessions_initialization_state.get_mut(&key_id).ok_or(Error::InvalidMessage)?; + if !session_init_data.confirmations.remove(sender) { + return Err(Error::InvalidMessage); + } + + if !session_init_data.confirmations.is_empty() { + return Ok(()); + } + + session_init_data.master.clone() + }; + + // and start/delegate session if required + data.sessions_initialization_state.remove(&key_id); + if self.core.meta.self_node_id != session_master { + data.delegated_key_sessions.insert(key_id, session_master.clone()); + return self.core.cluster.send(&session_master, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(ServersSetChangeDelegate { + session: self.core.meta.id.clone().into(), + session_nonce: self.core.nonce, + key_id: key_id.into(), + }))); + } + + let key_session = data.active_key_sessions.get_mut(&key_id).ok_or(Error::InvalidMessage)?; + key_session.initialize() + } + + /// When sessions execution is delegated to this node. + pub fn on_sessions_delegation(&self, sender: &NodeId, message: &ServersSetChangeDelegate) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + // we only accept delegation requests from master node + if sender != &self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } + + // check state + let mut data = self.data.lock(); + if data.state != SessionState::RunningShareChangeSessions { + return Err(Error::InvalidStateForRequest); + } + + // start session + let key_session = data.active_key_sessions.get_mut(&message.key_id.clone().into()).ok_or(Error::InvalidMessage)?; + key_session.initialize() + } + + /// When delegated session execution is completed. + pub fn on_delegated_session_completed(&self, sender: &NodeId, message: &ServersSetChangeDelegateResponse) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + // we only accept delegation requests on master node + if self.core.meta.self_node_id != self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } + + // check state + let mut data = self.data.lock(); + if data.state != SessionState::RunningShareChangeSessions { + return Err(Error::InvalidStateForRequest); + } + + // forget delegated session + let key_id = message.key_id.clone().into(); + match data.delegated_key_sessions.entry(key_id) { + Entry::Occupied(entry) => if entry.get() == sender { + entry.remove() + } else { + return Err(Error::InvalidMessage); + }, + _ => return Err(Error::InvalidMessage), + }; + + // check if we need to complete the whole change session + Self::disseminate_session_initialization_requests(&self.core, &mut *data) + } + + /// When share add message is received. + pub fn on_share_add_message(&self, sender: &NodeId, message: &ServersSetChangeShareAddMessage) -> Result<(), Error> { + self.on_share_change_message(message.message.session_id().clone().into(), |session| + session.on_share_add_message(sender, &message.message)) + } + + /// When share move message is received. + pub fn on_share_move_message(&self, sender: &NodeId, message: &ServersSetChangeShareMoveMessage) -> Result<(), Error> { + self.on_share_change_message(message.message.session_id().clone().into(), |session| + session.on_share_move_message(sender, &message.message)) + } + + /// When share remove message is received. + pub fn on_share_remove_message(&self, sender: &NodeId, message: &ServersSetChangeShareRemoveMessage) -> Result<(), Error> { + self.on_share_change_message(message.message.session_id().clone().into(), |session| + session.on_share_remove_message(sender, &message.message)) + } + + /// When error has occured on another node. + pub fn on_session_error(&self, sender: &NodeId, message: &ServersSetChangeError) -> Result<(), Error> { + let mut data = self.data.lock(); + + warn!("{}: servers set change session failed with error: {} from {}", self.core.meta.self_node_id, message.error, sender); + + data.state = SessionState::Finished; + data.result = Some(Err(Error::Io(message.error.clone()))); + self.core.completed.notify_all(); + + Ok(()) + } + + /// When session completion message is received. + pub fn on_session_completed(&self, sender: &NodeId, message: &ServersSetChangeCompleted) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + if sender != &self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } + + let mut data = self.data.lock(); + data.result = Some(Ok(())); + if data.active_key_sessions.len() != 0 { + return Err(Error::TooEarlyForRequest); + } + + data.state = SessionState::Finished; + self.core.completed.notify_all(); + + Ok(()) + } + + /// Create unknown sessions transport. + fn unknown_sessions_transport(&self) -> UnknownSessionsJobTransport { + UnknownSessionsJobTransport { + id: self.core.meta.id.clone(), + nonce: self.core.nonce, + cluster: self.core.cluster.clone(), + } + } + + /// When share change message is received. + fn on_share_change_message Result<(), Error>>(&self, session_id: SessionId, message_processor: F) -> Result<(), Error> { + // check state + let mut data = self.data.lock(); + if data.state != SessionState::RunningShareChangeSessions { + return Err(Error::InvalidStateForRequest); + } + + // process message + let (is_finished, is_master) = { + let key_session = data.active_key_sessions.get_mut(&session_id).ok_or(Error::InvalidMessage)?; + message_processor(key_session)?; + (key_session.is_finished(), key_session.is_master()) + }; + + if is_finished { + data.active_key_sessions.remove(&session_id); + let is_general_master = self.core.meta.self_node_id == self.core.meta.master_node_id; + if is_master && !is_general_master { + Self::return_delegated_session(&self.core, &session_id)?; + } + if is_general_master { + Self::disseminate_session_initialization_requests(&self.core, &mut *data)?; + } + + if data.result.is_some() && data.active_key_sessions.len() == 0 { + data.state = SessionState::Finished; + self.core.completed.notify_all(); + } + } + + Ok(()) + } + + /// Create share change session. + fn create_share_change_session(core: &SessionCore, key_id: SessionId, master_node_id: NodeId, old_nodes_set: BTreeSet, session_plan: ShareChangeSessionPlan) -> Result { + ShareChangeSession::new(ShareChangeSessionParams { + session_id: key_id.clone(), + nonce: core.nonce, + meta: ShareChangeSessionMeta { + id: key_id, + self_node_id: core.meta.self_node_id.clone(), + master_node_id: master_node_id, + }, + cluster: core.cluster.clone(), + key_storage: core.key_storage.clone(), + old_nodes_set: old_nodes_set, + plan: session_plan, + }) + } + + /// Disseminate session initialization requests. + fn disseminate_session_initialization_requests(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + debug_assert_eq!(core.meta.self_node_id, core.meta.master_node_id); + if let Some(sessions_queue) = data.sessions_queue.as_mut() { + let mut number_of_sessions_to_start = MAX_ACTIVE_KEY_SESSIONS.saturating_sub(data.active_key_sessions.len() + data.delegated_key_sessions.len()); + let new_nodes_set = data.new_nodes_set.as_ref() + .expect("this method is called after consensus estabished; new_nodes_set is a result of consensus session; qed"); + while number_of_sessions_to_start > 0 { + let queued_session = match sessions_queue.next() { + None => break, // complete session + Some(Err(e)) => return Err(e), + Some(Ok(session)) => session, + }; + + // prepare session change plan && check if something needs to be changed + let old_nodes_set = queued_session.nodes(); + let session_plan = prepare_share_change_session_plan(&old_nodes_set, new_nodes_set)?; + if session_plan.is_empty() { + continue; + } + + // select master for this session + let session_master = match &queued_session { + &QueuedSession::Known(_, _) => core.meta.self_node_id.clone(), + &QueuedSession::Unknown(_, ref nodes) => nodes.iter().cloned().nth(0) + .expect("unknown session is received is reported by at least one node; qed"), + }; + + // send key session initialization requests + let key_id = queued_session.id().clone(); + let mut confirmations: BTreeSet<_> = old_nodes_set.iter().cloned() + .chain(session_plan.nodes_to_add.keys().cloned()) + .chain(session_plan.nodes_to_move.keys().cloned()) + .collect(); + let need_create_session = confirmations.remove(&core.meta.self_node_id); + let initialization_message = Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(InitializeShareChangeSession { + session: core.meta.id.clone().into(), + session_nonce: core.nonce, + key_id: key_id.clone().into(), + master_node_id: session_master.clone().into(), + old_shares_set: old_nodes_set.iter().cloned().map(Into::into).collect(), + shares_to_add: session_plan.nodes_to_add.iter() + .map(|(n, nid)| (n.clone().into(), nid.clone().into())) + .collect(), + shares_to_move: session_plan.nodes_to_move.iter() + .map(|(source, target)| (source.clone().into(), target.clone().into())) + .collect(), + shares_to_remove: session_plan.nodes_to_remove.iter().cloned().map(Into::into).collect(), + })); + for node in &confirmations { + core.cluster.send(&node, initialization_message.clone())?; + } + + // create session on this node if required + if need_create_session { + data.active_key_sessions.insert(key_id.clone(), Self::create_share_change_session(core, key_id, + session_master.clone(), + queued_session.nodes(), + session_plan)?); + } + + // initialize session if required + let wait_for_confirmations = !confirmations.is_empty(); + if !wait_for_confirmations { + data.active_key_sessions.get_mut(&key_id) + .expect("!wait_for_confirmations is true only if this is the only session participant; if this is session participant, session is created above; qed") + .initialize()?; + } else { + data.sessions_initialization_state.insert(key_id, SessionInitializationData { + master: session_master, + confirmations: confirmations, + }); + } + + number_of_sessions_to_start = number_of_sessions_to_start - 1; + } + + // if iteration is not yet finished => return + if number_of_sessions_to_start == 0 { + return Ok(()); + } + } + + // iteration is finished => complete session + if data.state != SessionState::Finished { + data.sessions_queue = None; + if data.active_key_sessions.len() == 0 && data.delegated_key_sessions.len() == 0 { + Self::complete_session(core, data)?; + } + } + + Ok(()) + } + + /// Return delegated session to master. + fn return_delegated_session(core: &SessionCore, key_id: &SessionId) -> Result<(), Error> { + assert!(core.meta.self_node_id != core.meta.master_node_id); + core.cluster.send(&core.meta.master_node_id, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(ServersSetChangeDelegateResponse { + session: core.meta.id.clone().into(), + session_nonce: core.nonce, + key_id: key_id.clone().into(), + }))) + } + + /// Complete servers set change session. + fn complete_session(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + debug_assert_eq!(core.meta.self_node_id, core.meta.master_node_id); + core.cluster.broadcast(Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(ServersSetChangeCompleted { + session: core.meta.id.clone().into(), + session_nonce: core.nonce, + })))?; + + data.state = SessionState::Finished; + data.result = Some(Ok(())); + core.completed.notify_all(); + + Ok(()) + } +} + +impl Session for SessionImpl { + fn wait(&self) -> Result<(), Error> { + let mut data = self.data.lock(); + if !data.result.is_some() { + self.core.completed.wait(&mut data); + } + + data.result.clone() + .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") + } +} + +impl ClusterSession for SessionImpl { + fn is_finished(&self) -> bool { + self.data.lock().state == SessionState::Finished + } + + fn on_session_timeout(&self) { + let mut data = self.data.lock(); + + warn!("{}: servers set change session failed with timeout", self.core.meta.self_node_id); + + data.state = SessionState::Finished; + data.result = Some(Err(Error::NodeDisconnected)); + self.core.completed.notify_all(); + } + + fn on_node_timeout(&self, node: &NodeId) { + let mut data = self.data.lock(); + + warn!("{}: servers set change session failed because {} connection has timeouted", self.core.meta.self_node_id, node); + + data.state = SessionState::Finished; + data.result = Some(Err(Error::NodeDisconnected)); + self.core.completed.notify_all(); + } +} + +impl JobTransport for ServersSetChangeConsensusTransport { + type PartialJobRequest=ServersSetChangeAccessRequest; + type PartialJobResponse=bool; + + fn send_partial_request(&self, node: &NodeId, request: ServersSetChangeAccessRequest) -> Result<(), Error> { + self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(ServersSetChangeConsensusMessage { + session: self.id.clone().into(), + session_nonce: self.nonce, + message: ConsensusMessageWithServersSet::InitializeConsensusSession(InitializeConsensusSessionWithServersSet { + old_nodes_set: request.old_servers_set.into_iter().map(Into::into).collect(), + new_nodes_set: request.new_servers_set.into_iter().map(Into::into).collect(), + old_set_signature: request.old_set_signature.into(), + new_set_signature: request.new_set_signature.into(), + }), + }))) + } + + fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { + self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(ServersSetChangeConsensusMessage { + session: self.id.clone().into(), + session_nonce: self.nonce, + message: ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: response, + }), + }))) + } +} + +impl JobTransport for UnknownSessionsJobTransport { + type PartialJobRequest=NodeId; + type PartialJobResponse=BTreeSet; + + fn send_partial_request(&self, node: &NodeId, _request: NodeId) -> Result<(), Error> { + self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(UnknownSessionsRequest { + session: self.id.clone().into(), + session_nonce: self.nonce, + }))) + } + + fn send_partial_response(&self, node: &NodeId, response: BTreeSet) -> Result<(), Error> { + self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(UnknownSessions { + session: self.id.clone().into(), + session_nonce: self.nonce, + unknown_sessions: response.into_iter().map(Into::into).collect(), + }))) + } +} + +fn check_nodes_set(all_nodes_set: &BTreeSet, new_nodes_set: &BTreeSet) -> Result<(), Error> { + // all new nodes must be a part of all nodes set + match new_nodes_set.iter().any(|n| !all_nodes_set.contains(n)) { + true => Err(Error::InvalidNodesConfiguration), + false => Ok(()) + } +} + +#[cfg(test)] +pub mod tests { + use std::sync::Arc; + use std::collections::{VecDeque, BTreeMap, BTreeSet}; + use ethkey::{Random, Generator, Public, Signature, KeyPair, sign}; + use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage}; + use key_server_cluster::cluster::Cluster; + use key_server_cluster::cluster_sessions::ClusterSession; + use key_server_cluster::cluster::tests::DummyCluster; + use key_server_cluster::generation_session::tests::{MessageLoop as GenerationMessageLoop, Node as GenerationNode, generate_nodes_ids}; + use key_server_cluster::math; + use key_server_cluster::message::Message; + use key_server_cluster::admin_sessions::ShareChangeSessionMeta; + use key_server_cluster::admin_sessions::share_add_session::tests::check_secret_is_preserved; + use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash; + use super::{SessionImpl, SessionParams}; + + struct Node { + pub cluster: Arc, + pub key_storage: Arc, + pub session: SessionImpl, + } + + struct MessageLoop { + pub admin_key_pair: KeyPair, + pub original_key_pair: KeyPair, + pub all_nodes_set: BTreeSet, + pub new_nodes_set: BTreeSet, + pub all_set_signature: Signature, + pub new_set_signature: Signature, + pub nodes: BTreeMap, + pub queue: VecDeque<(NodeId, NodeId, Message)>, + } + + fn create_session(mut meta: ShareChangeSessionMeta, self_node_id: NodeId, admin_public: Public, all_nodes_set: BTreeSet, cluster: Arc, key_storage: Arc) -> SessionImpl { + meta.self_node_id = self_node_id; + SessionImpl::new(SessionParams { + meta: meta, + all_nodes_set: all_nodes_set, + cluster: cluster, + key_storage: key_storage, + nonce: 1, + admin_public: admin_public, + }).unwrap() + } + + fn create_node(meta: ShareChangeSessionMeta, admin_public: Public, all_nodes_set: BTreeSet, node: GenerationNode) -> Node { + for n in &all_nodes_set { + node.cluster.add_node(n.clone()); + } + + Node { + cluster: node.cluster.clone(), + key_storage: node.key_storage.clone(), + session: create_session(meta, node.session.node().clone(), admin_public, all_nodes_set, node.cluster, node.key_storage), + } + } + + impl MessageLoop { + pub fn new(gml: GenerationMessageLoop, master_node_id: NodeId, new_nodes_ids: BTreeSet, removed_nodes_ids: BTreeSet) -> Self { + // generate admin key pair + let admin_key_pair = Random.generate().unwrap(); + let admin_public = admin_key_pair.public().clone(); + + // compute original secret key + let original_secret = math::compute_joint_secret(gml.nodes.values() + .map(|nd| nd.key_storage.get(&SessionId::default()).unwrap().polynom1[0].clone()) + .collect::>() + .iter()).unwrap(); + let original_key_pair = KeyPair::from_secret(original_secret).unwrap(); + + let mut all_nodes_set: BTreeSet<_> = gml.nodes.keys().cloned().collect(); + let new_nodes_set: BTreeSet = all_nodes_set.iter().cloned() + .chain(new_nodes_ids.iter().cloned()) + .filter(|n| !removed_nodes_ids.contains(n)) + .collect(); + all_nodes_set.extend(new_nodes_ids.iter().cloned()); + + let meta = ShareChangeSessionMeta { + self_node_id: master_node_id.clone(), + master_node_id: master_node_id.clone(), + id: SessionId::default(), + }; + + let old_nodes = gml.nodes.into_iter().map(|n| create_node(meta.clone(), admin_public.clone(), all_nodes_set.clone(), n.1)); + let new_nodes = new_nodes_ids.into_iter().map(|new_node_id| { + let new_node_cluster = Arc::new(DummyCluster::new(new_node_id.clone())); + for node in &all_nodes_set { + new_node_cluster.add_node(node.clone()); + } + + let new_node_key_storage = Arc::new(DummyKeyStorage::default()); + let new_node_session = create_session(meta.clone(), new_node_id, admin_public.clone(), all_nodes_set.clone(), new_node_cluster.clone(), new_node_key_storage.clone()); + Node { + cluster: new_node_cluster, + key_storage: new_node_key_storage, + session: new_node_session, + } + }); + let nodes: BTreeMap<_, _> = old_nodes.chain(new_nodes).map(|n| (n.session.core.meta.self_node_id.clone(), n)).collect(); + + let all_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&all_nodes_set)).unwrap(); + let new_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap(); + + MessageLoop { + admin_key_pair: admin_key_pair, + original_key_pair: original_key_pair, + all_nodes_set: all_nodes_set.clone(), + new_nodes_set: new_nodes_set, + all_set_signature: all_set_signature, + new_set_signature: new_set_signature, + nodes: nodes, + queue: Default::default(), + } + } + + pub fn run(&mut self) { + while let Some((from, to, message)) = self.take_message() { + self.process_message((from, to, message)).unwrap(); + } + } + + pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { + self.nodes.values() + .filter_map(|n| n.cluster.take_message().map(|m| (n.session.core.meta.self_node_id.clone(), m.0, m.1))) + .nth(0) + .or_else(|| self.queue.pop_front()) + } + + pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { + match { match msg.2 { + Message::ServersSetChange(ref message) => self.nodes[&msg.1].session.process_message(&msg.0, message), + _ => unreachable!("only servers set change messages are expected"), + } } { + Ok(_) => Ok(()), + Err(Error::TooEarlyForRequest) => { + self.queue.push_back(msg); + Ok(()) + }, + Err(err) => Err(err), + } + } + } + + pub fn generate_key(threshold: usize, nodes_ids: BTreeSet) -> GenerationMessageLoop { + let mut gml = GenerationMessageLoop::with_nodes_ids(nodes_ids); + gml.master().initialize(Public::default(), threshold, gml.nodes.keys().cloned().collect()).unwrap(); + while let Some((from, to, message)) = gml.take_message() { + gml.process_message((from, to, message)).unwrap(); + } + gml + } + + #[test] + fn node_added_using_servers_set_change() { + // initial 2-of-3 session + let gml = generate_key(1, generate_nodes_ids(3)); + let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap(); + + // insert 1 node so that it becames 2-of-4 session + let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); + let mut ml = MessageLoop::new(gml, master_node_id, nodes_to_add, BTreeSet::new()); + ml.nodes[&master_node_id].session.initialize(ml.nodes.keys().cloned().collect(), ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); + ml.run(); + + // try to recover secret for every possible combination of nodes && check that secret is the same + check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter().map(|(k, v)| (k.clone(), v.key_storage.clone())).collect()); + + // check that all sessions have finished + assert!(ml.nodes.values().all(|n| n.session.is_finished())); + } + + #[test] + fn node_added_using_server_set_change_from_this_node() { + // initial 2-of-3 session + let gml = generate_key(1, generate_nodes_ids(3)); + + // insert 1 node so that it becames 2-of-4 session + // master node is the node we are adding => + // 1) add session is delegated to one of old nodes + // 2) key share is pushed to new node + // 3) delegated session is returned back to added node + let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); + let master_node_id = nodes_to_add.iter().cloned().nth(0).unwrap(); + let mut ml = MessageLoop::new(gml, master_node_id, nodes_to_add, BTreeSet::new()); + ml.nodes[&master_node_id].session.initialize(ml.nodes.keys().cloned().collect(), ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); + ml.run(); + + // check that all sessions have finished + assert!(ml.nodes.values().all(|n| n.session.is_finished())); + } + + #[test] + fn node_moved_using_servers_set_change() { + // initial 2-of-3 session + let gml = generate_key(1, generate_nodes_ids(3)); + let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap(); + + // remove 1 node && insert 1 node so that one share is moved + let nodes_to_remove: BTreeSet<_> = gml.nodes.keys().cloned().skip(1).take(1).collect(); + let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); + let mut ml = MessageLoop::new(gml, master_node_id, nodes_to_add.clone(), nodes_to_remove.clone()); + let new_nodes_set = ml.nodes.keys().cloned().filter(|n| !nodes_to_remove.contains(n)).collect(); + ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); + ml.run(); + + // check that secret is still the same as before moving the share + check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter() + .filter(|&(k, _)| !nodes_to_remove.contains(k)) + .map(|(k, v)| (k.clone(), v.key_storage.clone())) + .collect()); + + // check that all removed nodes do not own key share + assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).is_err())); + + // check that all sessions have finished + assert!(ml.nodes.values().all(|n| n.session.is_finished())); + } + + #[test] + fn node_removed_using_servers_set_change() { + // initial 2-of-3 session + let gml = generate_key(1, generate_nodes_ids(3)); + let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap(); + + // remove 1 node so that session becames 2-of-2 + let nodes_to_remove: BTreeSet<_> = gml.nodes.keys().cloned().skip(1).take(1).collect(); + let new_nodes_set: BTreeSet<_> = gml.nodes.keys().cloned().filter(|n| !nodes_to_remove.contains(&n)).collect(); + let mut ml = MessageLoop::new(gml, master_node_id, BTreeSet::new(), nodes_to_remove.clone()); + ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); + ml.run(); + + // try to recover secret for every possible combination of nodes && check that secret is the same + check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter() + .filter(|&(k, _)| !nodes_to_remove.contains(k)) + .map(|(k, v)| (k.clone(), v.key_storage.clone())) + .collect()); + + // check that all removed nodes do not own key share + assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).is_err())); + + // check that all sessions have finished + assert!(ml.nodes.values().all(|n| n.session.is_finished())); + } +} diff --git a/secret_store/src/key_server_cluster/admin_sessions/sessions_queue.rs b/secret_store/src/key_server_cluster/admin_sessions/sessions_queue.rs new file mode 100644 index 000000000..8a311c91b --- /dev/null +++ b/secret_store/src/key_server_cluster/admin_sessions/sessions_queue.rs @@ -0,0 +1,87 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use std::collections::{VecDeque, BTreeSet, BTreeMap}; +use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare}; + +/// Session, queued for change. +pub enum QueuedSession { + /// Session is known on this node. + Known(SessionId, DocumentKeyShare), + /// Session is unknown on this node. + Unknown(SessionId, BTreeSet), +} + +/// Queue of share change sessions. +pub struct SessionsQueue { + /// Key storage. + key_storage: Arc, + /// Sessions, known on this node. + known_sessions: VecDeque, + /// Unknown sessions. + unknown_sessions: VecDeque<(SessionId, BTreeSet)>, +} + +impl SessionsQueue { + /// Create new sessions queue. + pub fn new(key_storage: Arc, unknown_sessions: BTreeMap>) -> Self { + // TODO: optimizations: + // 1) known sessions - change to iter + // 2) unknown sesions - request chunk-by-chunk + SessionsQueue { + key_storage: key_storage.clone(), + known_sessions: key_storage.iter().map(|(k, _)| k).collect(), + unknown_sessions: unknown_sessions.into_iter().collect(), + } + } +} + +impl Iterator for SessionsQueue { + type Item = Result; + + fn next(&mut self) -> Option { + if let Some(known_session) = self.known_sessions.pop_front() { + return Some(self.key_storage.get(&known_session) + .map(|session| QueuedSession::Known(known_session, session)) + .map_err(|e| Error::KeyStorage(e.into()))); + } + + if let Some(unknown_session) = self.unknown_sessions.pop_front() { + return Some(Ok(QueuedSession::Unknown(unknown_session.0, unknown_session.1))); + } + + None + } +} + +impl QueuedSession { + /// Queued session (key) id. + pub fn id(&self) -> &SessionId { + match *self { + QueuedSession::Known(ref session_id, _) => session_id, + QueuedSession::Unknown(ref session_id, _) => session_id, + } + } + + /// OWners of key shares (aka session nodes). + pub fn nodes(&self) -> BTreeSet { + match *self { + QueuedSession::Known(_, ref key_share) => key_share.id_numbers.keys().cloned().collect(), + QueuedSession::Unknown(_, ref nodes) => nodes.clone(), + } + } +} diff --git a/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs b/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs new file mode 100644 index 000000000..bee88891e --- /dev/null +++ b/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs @@ -0,0 +1,1109 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use std::collections::{BTreeSet, BTreeMap}; +use ethkey::{Public, Secret, Signature}; +use parking_lot::{Mutex, Condvar}; +use key_server_cluster::{Error, SessionId, NodeId, DocumentKeyShare, KeyStorage}; +use key_server_cluster::cluster::Cluster; +use key_server_cluster::cluster_sessions::ClusterSession; +use key_server_cluster::math; +use key_server_cluster::message::{Message, ShareAddMessage, ShareAddConsensusMessage, ConsensusMessageWithServersSecretMap, + InitializeConsensusSessionWithServersSecretMap, KeyShareCommon, NewAbsoluteTermShare, NewKeysDissemination, ShareAddError, + ConfirmConsensusInitialization}; +use key_server_cluster::jobs::job_session::JobTransport; +use key_server_cluster::jobs::dummy_job::{DummyJob, DummyJobTransport}; +use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest}; +use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; +use key_server_cluster::admin_sessions::ShareChangeSessionMeta; + +/// Share addition session API. +pub trait Session: Send + Sync + 'static { + /// Wait until session is completed. + fn wait(&self) -> Result<(), Error>; +} + +/// Share addition session transport. +pub trait SessionTransport: Clone + JobTransport { + /// Send message to given node. + fn send(&self, node: &NodeId, message: ShareAddMessage) -> Result<(), Error>; + /// Set all nodes id numbers (required for consensus messages). + fn set_id_numbers(&mut self, id_numbers: BTreeMap); +} + +/// Share addition session. +/// Based on "Efficient Multi-Party Digital Signature using Adaptive Secret Sharing for Low-Power Devices in Wireless Networks" paper: +/// http://www.wu.ece.ufl.edu/mypapers/msig.pdf +/// Brief overview: +/// 1) initialization: master node (which has received request for shares addition the message) asks all other nodes to support addition +/// 2) key refreshing distribution (KRD): node generates new random polynom && sends required data to all other nodes +/// 3) key refreshing verification (KRV): node verifies received data +/// 4) node updates its own key share using generated (&& received) data +pub struct SessionImpl { + /// Session core. + core: SessionCore, + /// Session data. + data: Mutex>, +} + +/// Immutable session data. +struct SessionCore { + /// Session metadata. + pub meta: ShareChangeSessionMeta, + /// Session-level nonce. + pub nonce: u64, + /// Original key share (for old nodes only). + pub key_share: Option, + /// Session transport to communicate to other cluster nodes. + pub transport: T, + /// Key storage. + pub key_storage: Arc, + /// Administrator public key. + pub admin_public: Option, + /// SessionImpl completion condvar. + pub completed: Condvar, +} + +/// Share add consensus session type. +type ShareAddChangeConsensusSession = ConsensusSession; + +/// Mutable session data. +struct SessionData { + /// Session state. + pub state: SessionState, + /// Consensus session. + pub consensus_session: Option>, + /// Consensus result: nodes-specific data. + pub nodes: Option>, + /// Sum of old polynom1 and new polynom1. + pub refreshed_polynom1_sum: Option>, + /// NewKeyShare: threshold. + pub key_share_threshold: Option, + /// NewKeyShare: author. + pub key_share_author: Option, + /// NewKeyShare: Common (shared) encryption point. + pub key_share_common_point: Option, + /// NewKeyShare: Encrypted point. + pub key_share_encrypted_point: Option, + /// Share add change result. + pub result: Option>, +} + +/// Single node data. +#[derive(Debug)] +struct NodeData { + // === Values, filled during initialization phase === + /// Random unique scalar. Persistent. + pub id_number: Option, + /// Has node confirmed session initialization? + pub is_initialization_confirmed: bool, + /// Is this a new node? + pub is_new_node: bool, + + // === Values, filled during KRD phase === + /// Absolute term share, received from this node (for new nodes only). + pub absolute_term_share: Option, + /// Refreshed secret value, which has been received from this node. + pub refreshed_secret1: Option, + /// Refreshed Public values, which have been received from this node. + pub refreshed_publics: Option>, +} + +/// Session state. +#[derive(Debug, PartialEq)] +enum SessionState { + /// State when consensus is establishing. + ConsensusEstablishing, + /// Waiting for absolute term share. + WaitingForAbsoluteTermShare, + /// Waiting for keys dissemination. + WaitingForKeysDissemination, + /// Session is completed. + Finished, +} + +/// SessionImpl creation parameters +pub struct SessionParams { + /// Session metadata. + pub meta: ShareChangeSessionMeta, + /// Session transport. + pub transport: T, + /// Key storage. + pub key_storage: Arc, + /// Administrator public key. + pub admin_public: Option, + /// Session nonce. + pub nonce: u64, +} + +/// Isolated ShareAdd session transport. +#[derive(Clone)] +pub struct IsolatedSessionTransport { + /// Key id. + session: SessionId, + /// Session-level nonce. + nonce: u64, + /// ID numbers of all participating nodes. + id_numbers: Option>, + /// Cluster. + cluster: Arc, +} + +impl SessionImpl where T: SessionTransport { + /// Create new share addition session. + pub fn new(params: SessionParams) -> Result { + let key_id = params.meta.id.clone(); + // it is ok for new nodes not to have key shares => ignore here + let key_share = params.key_storage.get(&key_id).ok(); + if key_share.as_ref().map(|ks| ks.polynom1.len() != ks.threshold + 1).unwrap_or_default() { + return Err(Error::KeyStorage("unsupported key share in storage".into())); + } + + Ok(SessionImpl { + core: SessionCore { + meta: params.meta, + nonce: params.nonce, + key_share: key_share, + transport: params.transport, + key_storage: params.key_storage, + admin_public: params.admin_public, + completed: Condvar::new(), + }, + data: Mutex::new(SessionData { + consensus_session: None, + state: SessionState::ConsensusEstablishing, + nodes: None, + refreshed_polynom1_sum: None, + key_share_threshold: None, + key_share_author: None, + key_share_common_point: None, + key_share_encrypted_point: None, + result: None, + }), + }) + } + + /// Set pre-established consensus data. + pub fn set_consensus_output(&self, old_nodes_set: BTreeSet, mut new_nodes_set: BTreeMap>) -> Result<(), Error> { + let mut data = self.data.lock(); + + // check state + if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() || data.nodes.is_some() { + return Err(Error::InvalidStateForRequest); + } + + // check && update passed data + match self.core.key_share.as_ref() { + Some(key_share) => { + // old_nodes_set should be exactly the same as when key was generated + if old_nodes_set.symmetric_difference(&key_share.id_numbers.keys().cloned().collect()).nth(0).is_some() { + return Err(Error::InvalidNodesConfiguration); + } + // update id_numbers for old nodes + for (new_node, new_node_id) in new_nodes_set.iter_mut().filter(|&(_, ref v)| v.is_none()) { + match key_share.id_numbers.get(new_node) { + Some(old_node_id) => *new_node_id = Some(old_node_id.clone()), + None => return Err(Error::InvalidNodesConfiguration), + } + } + }, + None => { + if old_nodes_set.contains(&self.core.meta.self_node_id) + || !new_nodes_set.contains_key(&self.core.meta.self_node_id) { + return Err(Error::InvalidNodesConfiguration); + } + }, + } + + check_nodes_set(&old_nodes_set, &new_nodes_set)?; + data.nodes = Some(new_nodes_set.into_iter() + .map(|(n, nn)| (n, NodeData::new(nn, !old_nodes_set.contains(&n)))) + .collect()); + + Ok(()) + } + + /// Initialize share add session on master node. + pub fn initialize(&self, new_nodes_set: Option>, old_set_signature: Option, new_set_signature: Option) -> Result<(), Error> { + debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id); + + let mut data = self.data.lock(); + + // check state + if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() { + return Err(Error::InvalidStateForRequest); + } + + // if consensus is not yet established => start consensus session + let is_consensus_pre_established = data.nodes.is_some(); + if !is_consensus_pre_established { + // TODO: when session is started on the node, which doesn't have share, it must be delegated to another node + // this is also true for other sessions (signing, decryption, ...) + let key_share = self.core.key_share.as_ref().ok_or(Error::KeyStorage("key share is not found on master node".into()))?; + let new_nodes_set = new_nodes_set.ok_or(Error::InvalidMessage)?; + let old_nodes_set: BTreeSet<_> = key_share.id_numbers.keys().cloned().collect(); + let new_nodes_map = new_nodes_set.iter() + .map(|n| key_share.id_numbers.get(n) + .cloned() + .map(Ok) + .unwrap_or_else(|| math::generate_random_scalar()) + .map(|nn| (n.clone(), Some(nn)))) + .collect::, _>>()?; + check_nodes_set(&old_nodes_set, &new_nodes_map)?; + + let old_set_signature = old_set_signature.ok_or(Error::InvalidMessage)?; + let new_set_signature = new_set_signature.ok_or(Error::InvalidMessage)?; + let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?; + let mut consensus_transport = self.core.transport.clone(); + consensus_transport.set_id_numbers(new_nodes_map.iter() + .map(|(k, v)| (k.clone(), v.clone().expect("new_nodes_map is updated above so that every value is_some; qed"))) + .collect()); + let mut consensus_session = ConsensusSession::new(ConsensusSessionParams { + meta: self.core.meta.clone().into_consensus_meta(new_nodes_set.len()), + consensus_executor: ServersSetChangeAccessJob::new_on_master(admin_public, + old_nodes_set.clone(), + old_nodes_set.clone(), + new_nodes_set.clone(), + old_set_signature, + new_set_signature), + consensus_transport: consensus_transport, + })?; + consensus_session.initialize(new_nodes_set)?; + data.consensus_session = Some(consensus_session); + data.nodes = Some(new_nodes_map.into_iter() + .map(|(n, nn)| (n, NodeData::new(nn, !old_nodes_set.contains(&n)))) + .collect()); + return Ok(()); + } + + // otherwise => start sending ShareAdd-specific messages + Self::on_consensus_established(&self.core, &mut *data) + } + + /// Process single message. + pub fn process_message(&self, sender: &NodeId, message: &ShareAddMessage) -> Result<(), Error> { + if self.core.nonce != message.session_nonce() { + return Err(Error::ReplayProtection); + } + + match message { + &ShareAddMessage::ShareAddConsensusMessage(ref message) => + self.on_consensus_message(sender, message), + &ShareAddMessage::KeyShareCommon(ref message) => + self.on_common_key_share_data(sender, message), + &ShareAddMessage::NewAbsoluteTermShare(ref message) => + self.on_new_absolute_term(sender, message), + &ShareAddMessage::NewKeysDissemination(ref message) => + self.on_new_keys_dissemination(sender, message), + &ShareAddMessage::ShareAddError(ref message) => + self.on_session_error(sender, message), + } + } + + /// When consensus-related message is received. + pub fn on_consensus_message(&self, sender: &NodeId, message: &ShareAddConsensusMessage) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + // start slave consensus session if needed + let mut data = self.data.lock(); + if data.consensus_session.is_none() && sender == &self.core.meta.master_node_id { + match &message.message { + &ConsensusMessageWithServersSecretMap::InitializeConsensusSession(ref message) => { + let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?; + let current_nodes_set = self.core.key_share.as_ref() + .map(|ks| ks.id_numbers.keys().cloned().collect()) + .unwrap_or_else(|| message.old_nodes_set.clone().into_iter().map(Into::into).collect()); + data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams { + meta: self.core.meta.clone().into_consensus_meta(message.new_nodes_set.len()), + consensus_executor: ServersSetChangeAccessJob::new_on_slave(admin_public, current_nodes_set), + consensus_transport: self.core.transport.clone(), + })?); + }, + _ => return Err(Error::InvalidStateForRequest), + } + } + + let (is_establishing_consensus, is_consensus_established, new_nodes_set) = { + let consensus_session = data.consensus_session.as_mut().ok_or(Error::InvalidMessage)?; + let is_establishing_consensus = consensus_session.state() == ConsensusSessionState::EstablishingConsensus; + let new_nodes_set = match &message.message { + &ConsensusMessageWithServersSecretMap::InitializeConsensusSession(ref message) => { + consensus_session.on_consensus_partial_request(sender, ServersSetChangeAccessRequest::from(message))?; + let new_nodes_set = message.new_nodes_set.iter() + .map(|(n, nn)| (n.clone().into(), Some(nn.clone().into()))) + .collect(); + // check nodes set on old nodes + if let Some(key_share) = self.core.key_share.as_ref() { + check_nodes_set(&key_share.id_numbers.keys().cloned().collect(), &new_nodes_set)?; + } + Some(new_nodes_set.into_iter() + .map(|(n, nn)| (n, NodeData::new(nn, !message.old_nodes_set.contains(&n.clone().into())))) + .collect()) + }, + &ConsensusMessageWithServersSecretMap::ConfirmConsensusInitialization(ref message) => { + consensus_session.on_consensus_partial_response(sender, message.is_confirmed)?; + None + }, + }; + + ( + is_establishing_consensus, + consensus_session.state() == ConsensusSessionState::ConsensusEstablished, + new_nodes_set + ) + }; + if let Some(new_nodes_set) = new_nodes_set { + data.nodes = Some(new_nodes_set); + } + if self.core.meta.self_node_id != self.core.meta.master_node_id || !is_establishing_consensus || !is_consensus_established { + return Ok(()); + } + + Self::on_consensus_established(&self.core, &mut *data) + } + + /// When common key share data is received by new node. + pub fn on_common_key_share_data(&self, sender: &NodeId, message: &KeyShareCommon) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + // only master can send this message + if sender != &self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } + + let mut data = self.data.lock(); + + // check state + if data.state == SessionState::ConsensusEstablishing && data.nodes.is_some() { + data.state = SessionState::WaitingForAbsoluteTermShare; + } else if data.state != SessionState::WaitingForAbsoluteTermShare { + return Err(Error::InvalidStateForRequest); + } + + { + // only new nodes are waiting for absolute term share + let nodes = data.nodes.as_ref() + .expect("nodes are filled during consensus establishing; WaitingForAbsoluteTermShare starts after consensus is established; qed"); + if !nodes[&self.core.meta.self_node_id].is_new_node { + return Err(Error::InvalidMessage); + } + + // we only expect this message once + if data.key_share_threshold.is_some() || data.key_share_author.is_some() || data.key_share_common_point.is_some() || data.key_share_encrypted_point.is_some() { + return Err(Error::InvalidStateForRequest); + } + } + + data.key_share_threshold = Some(message.threshold); + data.key_share_author = Some(message.author.clone().into()); + data.key_share_common_point = message.common_point.clone().map(Into::into); + data.key_share_encrypted_point = message.encrypted_point.clone().map(Into::into); + + Ok(()) + } + + /// When absolute term share is received. + pub fn on_new_absolute_term(&self, sender: &NodeId, message: &NewAbsoluteTermShare) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + let mut data = self.data.lock(); + + // check state + if data.state == SessionState::ConsensusEstablishing && data.nodes.is_some() { + data.state = SessionState::WaitingForAbsoluteTermShare; + } else if data.state != SessionState::WaitingForAbsoluteTermShare { + return Err(Error::InvalidStateForRequest); + } + + let refreshed_polynom1_sum = { + // only new nodes are waiting for absolute term share + let threshold = data.key_share_threshold.clone().ok_or(Error::InvalidMessage)?; + let nodes = data.nodes.as_mut() + .expect("nodes are filled during consensus establishing; WaitingForAbsoluteTermShare starts after consensus is established; qed"); + if !nodes[&self.core.meta.self_node_id].is_new_node { + return Err(Error::InvalidMessage); + } + + // update node data + { + let node_data = nodes.get_mut(sender).ok_or(Error::InvalidMessage)?; + if node_data.absolute_term_share.is_some() { + return Err(Error::InvalidStateForRequest); + } + if node_data.id_number.is_some() { + if node_data.id_number != Some(message.sender_id.clone().into()) { + return Err(Error::InvalidMessage); + } + } + + node_data.id_number = Some(message.sender_id.clone().into()); + node_data.absolute_term_share = Some(message.absolute_term_share.clone().into()); + } + + // if we haven't received shares from all old nodes => wait for more + if nodes.values().any(|nd| !nd.is_new_node && nd.absolute_term_share.is_none()) { + return Ok(()); + } + + // all old nodes have sent us its shares => generate/calculate secret polynom + { + let absolute_term_shares = nodes.values().filter_map(|nd| nd.absolute_term_share.as_ref()); + generate_refreshed_polynoms_for_new_nodes(absolute_term_shares, threshold)? + } + }; + data.refreshed_polynom1_sum = Some(refreshed_polynom1_sum); + + // now it is time to disseminate keys to all other nodes + data.state = SessionState::WaitingForKeysDissemination; + Self::disseminate_keys(&self.core, &mut *data) + } + + /// When keys dissemination message is received. + pub fn on_new_keys_dissemination(&self, sender: &NodeId, message: &NewKeysDissemination) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + let mut data = self.data.lock(); + + // check state + if data.state == SessionState::ConsensusEstablishing && data.nodes.is_some() { + data.state = SessionState::WaitingForKeysDissemination; + } else if data.state == SessionState::WaitingForAbsoluteTermShare { + return Err(Error::TooEarlyForRequest); + } else if data.state != SessionState::WaitingForKeysDissemination { + return Err(Error::InvalidStateForRequest); + } + + // check message + let threshold = self.core.key_share.as_ref().map(|ks| ks.threshold) + .unwrap_or_else(|| data.key_share_threshold.clone() + .expect("on old nodes key_share is_some; on new nodes key_share_threshold is_some after common share data is received; qed")); + if message.refreshed_publics.len() != threshold + 1 { + return Err(Error::InvalidMessage); + } + + // update node data + let is_new_node = { + let nodes = data.nodes.as_mut() + .expect("nodes are filled during consensus establishing; WaitingForKeysDissemination starts after consensus is established; qed"); + { + let node_data = nodes.get_mut(&sender).ok_or(Error::InvalidMessage)?; + if node_data.refreshed_secret1.is_some() || node_data.refreshed_publics.is_some() { + return Err(Error::InvalidStateForRequest); + } + + node_data.refreshed_secret1 = Some(message.refreshed_secret1.clone().into()); + node_data.refreshed_publics = Some(message.refreshed_publics.iter().cloned().map(Into::into).collect()); + } + + nodes[&self.core.meta.self_node_id].is_new_node + }; + + // receiving this message from master node on old node means that initialization has completed => disseminate our own keys + if !is_new_node && sender == &self.core.meta.master_node_id { + Self::disseminate_absolute_term_shares(&self.core, &mut *data)?; + Self::disseminate_keys(&self.core, &mut *data)?; + } + + // check if we have received keys from every other node + if data.nodes.as_ref() + .expect("nodes are filled during consensus establishing; WaitingForKeysDissemination starts after consensus is established; qed") + .iter().any(|(node_id, node_data)| node_id != &self.core.meta.self_node_id && + (node_data.refreshed_publics.is_none() || node_data.refreshed_secret1.is_none())) { + return Ok(()) + } + + // verify keys && complete session if keys are ok + Self::verify_keys(&self.core, &mut *data)?; + Self::complete_session(&self.core, &mut *data) + } + + /// When error has occured on another node. + pub fn on_session_error(&self, sender: &NodeId, message: &ShareAddError) -> Result<(), Error> { + let mut data = self.data.lock(); + + warn!("{}: share add session failed with error: {} from {}", self.core.meta.self_node_id, message.error, sender); + + data.state = SessionState::Finished; + data.result = Some(Err(Error::Io(message.error.clone()))); + self.core.completed.notify_all(); + + Ok(()) + } + + /// Start sending ShareAdd-specific messages, when consensus is established. + fn on_consensus_established(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + // update state + let is_old_node = !data.nodes.as_ref() + .expect("consensus is about nodes set; consensus is established; qed") + [&core.meta.self_node_id].is_new_node; + data.state = if is_old_node { SessionState::WaitingForKeysDissemination } else { SessionState::WaitingForAbsoluteTermShare }; + + // if on master node, send common shared data to every new node + let is_master_node = core.meta.self_node_id == core.meta.master_node_id; + if is_master_node { + Self::disseminate_common_share_data(core, data)?; + } + + // if on old node, send absolute term shares to every new node + if is_old_node { + Self::disseminate_absolute_term_shares(core, data)?; + } + + // if on old node, send keys to every node + if is_old_node { + Self::disseminate_keys(core, data)?; + } + + Ok(()) + } + + /// Disseminate absolute term of polynom1 data. + fn disseminate_absolute_term_shares(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + // compute/generate refreshed polynom1 + let old_key_share = core.key_share.as_ref() + .expect("disseminate_absolute_term_shares is only called on old nodes; key_share is filled in initialization phase on old nodes; qed"); + let nodes = data.nodes.as_ref() + .expect("nodes are filled during consensus establishing; absolute term shares are sent after consensus is established; qed"); + let num_new_nodes = nodes.values().filter(|nd| nd.is_new_node).count(); + let (absolute_term_shares, refreshed_polynom1_sum) = generate_refreshed_polynoms_for_existing_nodes( + num_new_nodes, old_key_share.threshold, &old_key_share.polynom1)?; + data.refreshed_polynom1_sum = Some(refreshed_polynom1_sum); + + // send absolute term share to every new node + let sender_id: &Secret = nodes[&core.meta.self_node_id].id_number.as_ref() + .expect("id_numbers are filled during consensus establishing; this method is called after consensus establishing; qed"); + for (i, new_node) in nodes.iter().filter(|&(_, nd)| nd.is_new_node).map(|(n, _)| n).enumerate() { + core.transport.send(new_node, ShareAddMessage::NewAbsoluteTermShare(NewAbsoluteTermShare { + session: core.meta.id.clone().into(), + session_nonce: core.nonce, + sender_id: sender_id.clone().into(), + absolute_term_share: absolute_term_shares[i].clone().into(), + }))?; + } + + Ok(()) + } + + /// Send common share data to evey new node. + fn disseminate_common_share_data(core: &SessionCore, data: &SessionData) -> Result<(), Error> { + let old_key_share = core.key_share.as_ref() + .expect("disseminate_common_share_data is only called on master node; key_share is filled in initialization phase on master node; qed"); + let nodes = data.nodes.as_ref() + .expect("nodes are filled during consensus establishing; common share data sent after consensus is established; qed"); + for new_node in nodes.iter().filter(|&(_, nd)| nd.is_new_node).map(|(n, _)| n) { + core.transport.send(new_node, ShareAddMessage::KeyShareCommon(KeyShareCommon { + session: core.meta.id.clone().into(), + session_nonce: core.nonce, + threshold: old_key_share.threshold, + author: old_key_share.author.clone().into(), + common_point: old_key_share.common_point.clone().map(Into::into), + encrypted_point: old_key_share.encrypted_point.clone().map(Into::into), + }))?; + } + + Ok(()) + } + + /// Disseminate key refreshing data. + fn disseminate_keys(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + // send required messages + let threshold = core.key_share.as_ref().map(|ks| ks.threshold) + .unwrap_or_else(|| data.key_share_threshold.clone() + .expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed")); + let refreshed_polynom1_sum = data.refreshed_polynom1_sum.as_ref() + .expect("disseminate_keys is only called after generating refreshed_polynom1_sum; qed"); + let refreshed_publics = math::refreshed_public_values_generation(threshold, &refreshed_polynom1_sum)?; + + // send calculated values + let nodes = data.nodes.as_mut() + .expect("nodes are filled during consensus establishing; keys are disseminated after consensus is established; qed"); + for (node, node_number) in nodes.iter().filter(|&(n, _)| n != &core.meta.self_node_id).map(|(n, nd)| (n, &nd.id_number)) { + // also send keys to every other node + let refreshed_secret1 = math::compute_polynom(refreshed_polynom1_sum, node_number.as_ref() + .expect("id_numbers are filled during consensus establishing; keys are disseminated after consensus is established; qed"))?; + core.transport.send(node, ShareAddMessage::NewKeysDissemination(NewKeysDissemination { + session: core.meta.id.clone().into(), + session_nonce: core.nonce, + refreshed_secret1: refreshed_secret1.into(), + refreshed_publics: refreshed_publics.iter().cloned().map(Into::into).collect(), + }))?; + } + + // 'receive' data from self + let self_node_data = nodes.get_mut(&core.meta.self_node_id) + .expect("data.nodes contains entry for every session node; this node is a part of the session; qed"); + self_node_data.refreshed_secret1 = Some(math::compute_polynom(refreshed_polynom1_sum, &self_node_data.id_number.as_ref() + .expect("id_numbers are filled during consensus establishing; keys are disseminated after consensus is established; qed"))?); + self_node_data.refreshed_publics = Some(refreshed_publics); + + Ok(()) + } + + /// Verify received keys values. + fn verify_keys(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + let threshold = core.key_share.as_ref().map(|ks| ks.threshold) + .unwrap_or_else(|| data.key_share_threshold.clone() + .expect("on old nodes key_share is_some; on new nodes key_share_threshold is_some after common share data is received; qed")); + let nodes = data.nodes.as_ref() + .expect("nodes are filled during consensus establishing; keys are verified after consensus is established; qed"); + let number_id = nodes[&core.meta.self_node_id].id_number.as_ref() + .expect("id_numbers are filled during consensus establishing; keys are verified after consensus is established; qed"); + for node_data in nodes.iter().filter(|&(n, _)| n != &core.meta.self_node_id).map(|(_, nd)| nd) { + let refreshed_secret1 = node_data.refreshed_secret1.as_ref().expect("keys received on KRD phase; KRV phase follows KRD phase; qed"); + let refreshed_publics = node_data.refreshed_publics.as_ref().expect("keys received on KRD phase; KRV phase follows KRD phase; qed"); + let is_key_verification_ok = math::refreshed_keys_verification(threshold, &number_id, refreshed_secret1, refreshed_publics)?; + + if !is_key_verification_ok { + // node has sent us incorrect values. In original ECDKG protocol we should have sent complaint here. + return Err(Error::InvalidMessage); + } + } + + Ok(()) + } + + /// Complete session. + fn complete_session(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + // compose updated key share + let nodes = data.nodes.as_ref() + .expect("nodes are filled during consensus establishing; session is completed after consensus is established; qed"); + let refreshed_key_share = DocumentKeyShare { + // values with the same value as before beginning of the session + threshold: core.key_share.as_ref().map(|ks| ks.threshold) + .unwrap_or_else(|| data.key_share_threshold.clone() + .expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed")), + author: core.key_share.as_ref().map(|ks| ks.author.clone()) + .unwrap_or_else(|| data.key_share_author.clone() + .expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed")), + common_point: core.key_share.as_ref().map(|ks| ks.common_point.clone()) + .unwrap_or_else(|| data.key_share_common_point.clone()), + encrypted_point: core.key_share.as_ref().map(|ks| ks.encrypted_point.clone()) + .unwrap_or_else(|| data.key_share_encrypted_point.clone()), + // below are updated values + id_numbers: nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.as_ref() + .expect("id_numbers are filled during consensus establishing; session is completed after consensus is established; qed").clone())).collect(), + polynom1: data.refreshed_polynom1_sum.clone().expect("this field is filled during KRD; session is completed after KRD; qed"), + secret_share: math::compute_secret_share(nodes.values() + .filter_map(|nd| nd.refreshed_secret1.as_ref()))?, + }; + + // save encrypted data to the key storage + data.state = SessionState::Finished; + if core.key_share.is_some() { + // TODO: if db was updated on some nodes && wasn't updated on others, this could lead to secret loss + // => need mechanism to confirm insert/update OR store all versions of shares and negotiate version on session start (part of consensus) + core.key_storage.update(core.meta.id.clone(), refreshed_key_share.clone()) + } else { + core.key_storage.insert(core.meta.id.clone(), refreshed_key_share.clone()) + }.map_err(|e| Error::KeyStorage(e.into()))?; + + // signal session completion + data.state = SessionState::Finished; + data.result = Some(Ok(())); + core.completed.notify_all(); + + Ok(()) + } +} + +impl Session for SessionImpl where T: SessionTransport + Send + Sync + 'static { + fn wait(&self) -> Result<(), Error> { + let mut data = self.data.lock(); + if !data.result.is_some() { + self.core.completed.wait(&mut data); + } + + data.result.clone() + .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") + } +} + +impl ClusterSession for SessionImpl where T: SessionTransport { + fn is_finished(&self) -> bool { + self.data.lock().state == SessionState::Finished + } + + fn on_session_timeout(&self) { + let mut data = self.data.lock(); + + warn!("{}: share add session failed with timeout", self.core.meta.self_node_id); + + data.state = SessionState::Finished; + data.result = Some(Err(Error::NodeDisconnected)); + self.core.completed.notify_all(); + } + + fn on_node_timeout(&self, node: &NodeId) { + let mut data = self.data.lock(); + + warn!("{}: share add session failed because {} connection has timeouted", self.core.meta.self_node_id, node); + + data.state = SessionState::Finished; + data.result = Some(Err(Error::NodeDisconnected)); + self.core.completed.notify_all(); + } +} + +impl NodeData { + /// Create new node data. + pub fn new(id_number: Option, is_new_node: bool) -> Self { + NodeData { + id_number: id_number, + is_initialization_confirmed: false, + is_new_node: is_new_node, + absolute_term_share: None, + refreshed_secret1: None, + refreshed_publics: None, + } + } +} + +impl IsolatedSessionTransport { + pub fn new(session_id: SessionId, nonce: u64, cluster: Arc) -> Self { + IsolatedSessionTransport { + session: session_id, + nonce: nonce, + cluster: cluster, + id_numbers: None, + } + } +} + +impl JobTransport for IsolatedSessionTransport { + type PartialJobRequest = ServersSetChangeAccessRequest; + type PartialJobResponse = bool; + + fn send_partial_request(&self, node: &NodeId, request: ServersSetChangeAccessRequest) -> Result<(), Error> { + let id_numbers = self.id_numbers.as_ref() + .expect("partial requests are sent from master node only; on master node id_numers are filled during creation; qed"); + self.cluster.send(node, Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(ShareAddConsensusMessage { + session: self.session.clone().into(), + session_nonce: self.nonce, + message: ConsensusMessageWithServersSecretMap::InitializeConsensusSession(InitializeConsensusSessionWithServersSecretMap { + old_nodes_set: request.old_servers_set.into_iter().map(Into::into).collect(), + new_nodes_set: request.new_servers_set.into_iter().map(|n| (n.into(), id_numbers[&n].clone().into())).collect(), + old_set_signature: request.old_set_signature.into(), + new_set_signature: request.new_set_signature.into(), + }), + }))) + } + + fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { + self.cluster.send(node, Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(ShareAddConsensusMessage { + session: self.session.clone().into(), + session_nonce: self.nonce, + message: ConsensusMessageWithServersSecretMap::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: response, + }), + }))) + } +} + +impl SessionTransport for IsolatedSessionTransport { + fn set_id_numbers(&mut self, id_numbers: BTreeMap) { + self.id_numbers = Some(id_numbers); + } + + fn send(&self, node: &NodeId, message: ShareAddMessage) -> Result<(), Error> { + self.cluster.send(node, Message::ShareAdd(message)) + } +} + +fn check_nodes_set(old_nodes_set: &BTreeSet, new_nodes_set: &BTreeMap>) -> Result<(), Error> { + // it is impossible to remove nodes using share add session + if old_nodes_set.iter().any(|n| !new_nodes_set.contains_key(n)) { + return Err(Error::InvalidNodesConfiguration); + } + // it is impossible to not to add any nodes using share add session + if new_nodes_set.len() == old_nodes_set.len() { + return Err(Error::InvalidNodesConfiguration); + } + + Ok(()) +} + +fn generate_refreshed_polynoms_for_existing_nodes(new_nodes: usize, threshold: usize, existing_polynom1: &Vec) -> Result<(Vec, Vec), Error> { + // TODO: optimization: could add secrets instead of polynoms + let refreshed_polynoms1 = (0..new_nodes).map(|_| math::generate_random_polynom(threshold)).collect::, _>>()?; + let mut refreshed_polynom1_sum = existing_polynom1.clone(); + for refreshed_polynom1 in &refreshed_polynoms1 { + refreshed_polynom1_sum = math::add_polynoms(&refreshed_polynom1_sum, refreshed_polynom1, false)?; + } + + Ok(( + refreshed_polynoms1.into_iter().map(|p| p[0].clone()).collect(), + refreshed_polynom1_sum, + )) +} + +fn generate_refreshed_polynoms_for_new_nodes<'a, I>(absolute_term_shares: I, threshold: usize) -> Result, Error> where I: Iterator { + let mut new_polynom1 = math::generate_random_polynom(threshold)?; + let new_polynom_absolute_term = math::compute_additional_polynom1_absolute_term(absolute_term_shares)?; + new_polynom1[0] = new_polynom_absolute_term; + Ok(new_polynom1) +} + +#[cfg(test)] +pub mod tests { + use std::sync::Arc; + use std::collections::{VecDeque, BTreeMap, BTreeSet}; + use ethkey::{Random, Generator, Public, KeyPair, Signature, sign}; + use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage}; + use key_server_cluster::cluster::Cluster; + use key_server_cluster::cluster::tests::DummyCluster; + use key_server_cluster::cluster_sessions::ClusterSession; + use key_server_cluster::generation_session::tests::{Node as GenerationNode, generate_nodes_ids}; + use key_server_cluster::math; + use key_server_cluster::message::Message; + use key_server_cluster::servers_set_change_session::tests::generate_key; + use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash; + use key_server_cluster::admin_sessions::ShareChangeSessionMeta; + use super::{SessionImpl, SessionParams, IsolatedSessionTransport}; + + struct Node { + pub cluster: Arc, + pub key_storage: Arc, + pub session: SessionImpl, + } + + struct MessageLoop { + pub admin_key_pair: KeyPair, + pub original_key_pair: KeyPair, + pub old_nodes_set: BTreeSet, + pub new_nodes_set: BTreeSet, + pub old_set_signature: Signature, + pub new_set_signature: Signature, + pub nodes: BTreeMap, + pub queue: VecDeque<(NodeId, NodeId, Message)>, + } + + fn create_session(mut meta: ShareChangeSessionMeta, admin_public: Public, self_node_id: NodeId, cluster: Arc, key_storage: Arc) -> SessionImpl { + let session_id = meta.id.clone(); + meta.self_node_id = self_node_id; + SessionImpl::new(SessionParams { + meta: meta.clone(), + transport: IsolatedSessionTransport::new(session_id, 1, cluster), + key_storage: key_storage, + admin_public: Some(admin_public), + nonce: 1, + }).unwrap() + } + + fn create_node(meta: ShareChangeSessionMeta, admin_public: Public, node: GenerationNode) -> Node { + Node { + cluster: node.cluster.clone(), + key_storage: node.key_storage.clone(), + session: create_session(meta, admin_public, node.session.node().clone(), node.cluster, node.key_storage), + } + } + + /// This only works for schemes where threshold = 1 + pub fn check_secret_is_preserved(joint_key_pair: KeyPair, nodes: BTreeMap>) { + let n = nodes.len(); + let document_secret_plain = math::generate_random_point().unwrap(); + for n1 in 0..n { + for n2 in n1+1..n { + let share1 = nodes.values().nth(n1).unwrap().get(&SessionId::default()).unwrap(); + let share2 = nodes.values().nth(n2).unwrap().get(&SessionId::default()).unwrap(); + let id_number1 = share1.id_numbers[nodes.keys().nth(n1).unwrap()].clone(); + let id_number2 = share1.id_numbers[nodes.keys().nth(n2).unwrap()].clone(); + + // now encrypt and decrypt data + let (document_secret_decrypted, document_secret_decrypted_test) = + math::tests::do_encryption_and_decryption(1, + joint_key_pair.public(), + &[id_number1, id_number2], + &[share1.secret_share, share2.secret_share], + Some(joint_key_pair.secret()), + document_secret_plain.clone()); + + assert_eq!(document_secret_plain, document_secret_decrypted_test); + assert_eq!(document_secret_plain, document_secret_decrypted); + } + } + } + + impl MessageLoop { + pub fn new(t: usize, master_node_id: NodeId, old_nodes_set: BTreeSet, new_nodes_set: BTreeSet) -> Self { + // generate admin key pair + let admin_key_pair = Random.generate().unwrap(); + let admin_public = admin_key_pair.public().clone(); + + // run initial generation session + let gml = generate_key(t, old_nodes_set.clone()); + let original_secret = math::compute_joint_secret(gml.nodes.values() + .map(|nd| nd.key_storage.get(&SessionId::default()).unwrap().polynom1[0].clone()) + .collect::>() + .iter()).unwrap(); + let original_key_pair = KeyPair::from_secret(original_secret).unwrap(); + + // prepare sessions on all nodes + let meta = ShareChangeSessionMeta { + id: SessionId::default(), + self_node_id: NodeId::default(), + master_node_id: master_node_id, + }; + let new_nodes = new_nodes_set.iter() + .filter(|n| !old_nodes_set.contains(&n)) + .map(|new_node_id| { + let new_node_cluster = Arc::new(DummyCluster::new(new_node_id.clone())); + let new_node_key_storage = Arc::new(DummyKeyStorage::default()); + let new_node_session = create_session(meta.clone(), admin_public.clone(), new_node_id.clone(), new_node_cluster.clone(), new_node_key_storage.clone()); + Node { + cluster: new_node_cluster, + key_storage: new_node_key_storage, + session: new_node_session, + } + }); + let old_nodes = gml.nodes.into_iter().map(|gn| create_node(meta.clone(), admin_public.clone(), gn.1)); + let nodes = old_nodes.chain(new_nodes).map(|n| (n.session.core.meta.self_node_id.clone(), n)).collect(); + + let old_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&old_nodes_set)).unwrap(); + let new_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap(); + MessageLoop { + admin_key_pair: admin_key_pair, + original_key_pair: original_key_pair, + old_nodes_set: old_nodes_set.clone(), + new_nodes_set: new_nodes_set.clone(), + old_set_signature: old_set_signature, + new_set_signature: new_set_signature, + nodes: nodes, + queue: Default::default(), + } + } + + pub fn run(&mut self) { + while let Some((from, to, message)) = self.take_message() { + self.process_message((from, to, message)).unwrap(); + } + } + + pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { + self.nodes.values() + .filter_map(|n| n.cluster.take_message().map(|m| (n.session.core.meta.self_node_id.clone(), m.0, m.1))) + .nth(0) + .or_else(|| self.queue.pop_front()) + } + + pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { + match { match msg.2 { + Message::ShareAdd(ref message) => + self.nodes[&msg.1].session.process_message(&msg.0, message), + _ => unreachable!("only servers set change messages are expected"), + } } { + Ok(_) => Ok(()), + Err(Error::TooEarlyForRequest) => { + self.queue.push_back(msg); + Ok(()) + }, + Err(err) => Err(err), + } + } + } + + #[test] + fn node_add_fails_if_nodes_removed() { + let old_nodes_set = generate_nodes_ids(3); + let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); + let node_to_remove_id = old_nodes_set.iter().cloned().nth(1).unwrap(); + let mut new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(1)).collect(); + new_nodes_set.remove(&node_to_remove_id); + let ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); + assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(new_nodes_set), + Some(ml.old_set_signature.clone()), + Some(ml.new_set_signature.clone()) + ).unwrap_err(), Error::InvalidNodesConfiguration); + } + + #[test] + fn node_add_fails_if_no_nodes_added() { + let old_nodes_set = generate_nodes_ids(3); + let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); + let new_nodes_set = old_nodes_set.clone(); + let ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); + assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(new_nodes_set), + Some(ml.old_set_signature.clone()), + Some(ml.new_set_signature.clone()) + ).unwrap_err(), Error::InvalidNodesConfiguration); + } + + #[test] + fn node_add_fails_if_started_on_adding_node() { + let old_nodes_set = generate_nodes_ids(3); + let nodes_to_add_set = generate_nodes_ids(1); + let master_node_id = nodes_to_add_set.iter().cloned().nth(0).unwrap(); + let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(nodes_to_add_set.into_iter()).collect(); + let ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); + assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(new_nodes_set), + Some(ml.old_set_signature.clone()), + Some(ml.new_set_signature.clone()) + ).unwrap_err(), Error::KeyStorage("key share is not found on master node".into())); + } + + #[test] + fn node_add_fails_if_initialized_twice() { + let old_nodes_set = generate_nodes_ids(3); + let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); + let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(1)).collect(); + let ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); + assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(new_nodes_set.clone()), + Some(ml.old_set_signature.clone()), + Some(ml.new_set_signature.clone()) + ), Ok(())); + assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(new_nodes_set), + Some(ml.old_set_signature.clone()), + Some(ml.new_set_signature.clone()) + ), Err(Error::InvalidStateForRequest)); + } + + #[test] + fn node_add_fails_if_started_without_signatures() { + let old_nodes_set = generate_nodes_ids(3); + let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); + let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(1)).collect(); + let ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); + assert_eq!(ml.nodes[&master_node_id].session.initialize(None, None, None), Err(Error::InvalidMessage)); + } + + #[test] + fn nodes_added_using_share_add() { + let test_cases = vec![(3, 1), (3, 3)]; + for (n, nodes_to_add) in test_cases { + // generate key && prepare ShareAdd sessions + let old_nodes_set = generate_nodes_ids(n); + let new_nodes_set: BTreeSet<_> = old_nodes_set.clone().into_iter().chain(generate_nodes_ids(nodes_to_add)).collect(); + let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); + let mut ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, new_nodes_set.clone()); + + // initialize session on master node && run to completion + ml.nodes[&master_node_id].session.initialize(Some(new_nodes_set), + Some(ml.old_set_signature.clone()), + Some(ml.new_set_signature.clone())).unwrap(); + ml.run(); + + // check that session has completed on all nodes + assert!(ml.nodes.values().all(|n| n.session.is_finished())); + + // check that secret is still the same as before adding the share + check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter().map(|(k, v)| (k.clone(), v.key_storage.clone())).collect()); + } + } +} diff --git a/secret_store/src/key_server_cluster/admin_sessions/share_change_session.rs b/secret_store/src/key_server_cluster/admin_sessions/share_change_session.rs new file mode 100644 index 000000000..5cf9da377 --- /dev/null +++ b/secret_store/src/key_server_cluster/admin_sessions/share_change_session.rs @@ -0,0 +1,384 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use std::collections::{BTreeSet, BTreeMap}; +use ethkey::Secret; +use key_server_cluster::{Error, NodeId, SessionId, KeyStorage}; +use key_server_cluster::cluster::Cluster; +use key_server_cluster::cluster_sessions::ClusterSession; +use key_server_cluster::math; +use key_server_cluster::jobs::servers_set_change_access_job::ServersSetChangeAccessRequest; +use key_server_cluster::jobs::job_session::JobTransport; +use key_server_cluster::message::{Message, ServersSetChangeMessage, ServersSetChangeShareAddMessage, ServersSetChangeShareMoveMessage, + ServersSetChangeShareRemoveMessage}; +use key_server_cluster::share_add_session::{SessionTransport as ShareAddSessionTransport, + SessionImpl as ShareAddSessionImpl, SessionParams as ShareAddSessionParams}; +use key_server_cluster::share_move_session::{SessionTransport as ShareMoveSessionTransport, + SessionImpl as ShareMoveSessionImpl, SessionParams as ShareMoveSessionParams}; +use key_server_cluster::share_remove_session::{SessionTransport as ShareRemoveSessionTransport, + SessionImpl as ShareRemoveSessionImpl, SessionParams as ShareRemoveSessionParams}; +use key_server_cluster::message::{ShareAddMessage, ShareMoveMessage, ShareRemoveMessage}; +use key_server_cluster::admin_sessions::ShareChangeSessionMeta; + +/// Single session meta-change session. Brief overview: +/// 1) new shares are added to the session +/// 2) shares are moved between nodes +/// 3) shares are removed from nodes +pub struct ShareChangeSession { + /// Servers set change session id. + session_id: SessionId, + /// Session nonce. + nonce: u64, + /// Share change session meta. + meta: ShareChangeSessionMeta, + /// Cluster. + cluster: Arc, + /// Key storage. + key_storage: Arc, + /// Old nodes set. + old_nodes_set: BTreeSet, + /// Nodes to add shares for. + nodes_to_add: Option>, + /// Nodes to move shares from/to. + nodes_to_move: Option>, + /// Nodes to remove shares from. + nodes_to_remove: Option>, + /// Share add session. + share_add_session: Option>, + /// Share move session. + share_move_session: Option>, + /// Share remove session. + share_remove_session: Option>, + /// Is finished. + is_finished: bool, +} + +/// Share change session plan. +pub struct ShareChangeSessionPlan { + /// Nodes to add shares for. + pub nodes_to_add: BTreeMap, + /// Nodes to move shares from/to (keys = target nodes, values = source nodes). + pub nodes_to_move: BTreeMap, + /// Nodes to remove shares from. + pub nodes_to_remove: BTreeSet, +} + +/// Session parameters. +pub struct ShareChangeSessionParams { + /// Servers set change session id. + pub session_id: SessionId, + /// Session nonce. + pub nonce: u64, + /// Share change session meta. + pub meta: ShareChangeSessionMeta, + /// Cluster. + pub cluster: Arc, + /// Keys storage. + pub key_storage: Arc, + /// Old nodes set. + pub old_nodes_set: BTreeSet, + /// Session plan. + pub plan: ShareChangeSessionPlan, +} + +/// Share add session transport. +#[derive(Clone)] +pub struct ShareChangeTransport { + /// Servers set change session id. + session_id: SessionId, + /// Session nonce. + nonce: u64, + /// Cluster. + cluster: Arc, +} + +impl ShareChangeSession { + /// Create new share change session. + pub fn new(params: ShareChangeSessionParams) -> Result { + // we can't create sessions right now, because key share is read when session is created, but it can change in previous session + let nodes_to_add = if !params.plan.nodes_to_add.is_empty() { Some(params.plan.nodes_to_add) } else { None }; + let nodes_to_remove = if !params.plan.nodes_to_remove.is_empty() { Some(params.plan.nodes_to_remove) } else { None }; + let nodes_to_move = if !params.plan.nodes_to_move.is_empty() { Some(params.plan.nodes_to_move) } else { None }; + debug_assert!(nodes_to_add.is_some() || nodes_to_move.is_some() || nodes_to_remove.is_some()); + + Ok(ShareChangeSession { + session_id: params.session_id, + nonce: params.nonce, + meta: params.meta, + cluster: params.cluster, + key_storage: params.key_storage, + old_nodes_set: params.old_nodes_set, + nodes_to_add: nodes_to_add, + nodes_to_remove: nodes_to_remove, + nodes_to_move: nodes_to_move, + share_add_session: None, + share_move_session: None, + share_remove_session: None, + is_finished: false, + }) + } + + /// Is finished?. + pub fn is_finished(&self) -> bool { + self.is_finished + } + + /// Is master node?. + pub fn is_master(&self) -> bool { + self.meta.self_node_id == self.meta.master_node_id + } + + /// Initialize session (on master node). + pub fn initialize(&mut self) -> Result<(), Error> { + self.proceed_to_next_state() + } + + /// When share-add message is received. + pub fn on_share_add_message(&mut self, sender: &NodeId, message: &ShareAddMessage) -> Result<(), Error> { + if self.share_add_session.is_none() { + self.create_share_add_session()?; + } + + let change_state_needed = self.share_add_session.as_ref() + .map(|share_add_session| { + let was_finished = share_add_session.is_finished(); + share_add_session.process_message(sender, message) + .map(|_| share_add_session.is_finished() && !was_finished) + }) + .unwrap_or(Err(Error::InvalidMessage))?; + if change_state_needed { + self.proceed_to_next_state()?; + } + + Ok(()) + } + + /// When share-move message is received. + pub fn on_share_move_message(&mut self, sender: &NodeId, message: &ShareMoveMessage) -> Result<(), Error> { + if self.share_move_session.is_none() { + self.create_share_move_session()?; + } + + let change_state_needed = self.share_move_session.as_ref() + .map(|share_move_session| { + let was_finished = share_move_session.is_finished(); + share_move_session.process_message(sender, message) + .map(|_| share_move_session.is_finished() && !was_finished) + }) + .unwrap_or(Err(Error::InvalidMessage))?; + if change_state_needed { + self.proceed_to_next_state()?; + } + + Ok(()) + } + + /// When share-remove message is received. + pub fn on_share_remove_message(&mut self, sender: &NodeId, message: &ShareRemoveMessage) -> Result<(), Error> { + if self.share_remove_session.is_none() { + self.create_share_remove_session()?; + } + + let change_state_needed = self.share_remove_session.as_ref() + .map(|share_remove_session| { + let was_finished = share_remove_session.is_finished(); + share_remove_session.process_message(sender, message) + .map(|_| share_remove_session.is_finished() && !was_finished) + }) + .unwrap_or(Err(Error::InvalidMessage))?; + if change_state_needed { + self.proceed_to_next_state()?; + } + + Ok(()) + } + + /// Create new share add session. + fn create_share_add_session(&mut self) -> Result<(), Error> { + let nodes_to_add = self.nodes_to_add.take().ok_or(Error::InvalidStateForRequest)?; + let new_nodes_set = self.old_nodes_set.iter().map(|n| (n.clone(), None)) + .chain(nodes_to_add.clone().into_iter().map(|(k, v)| (k, Some(v)))) + .collect(); + let share_add_session = ShareAddSessionImpl::new(ShareAddSessionParams { + meta: self.meta.clone(), + nonce: self.nonce, + transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()), + key_storage: self.key_storage.clone(), + admin_public: None, + })?; + share_add_session.set_consensus_output(self.old_nodes_set.clone(), new_nodes_set)?; + self.share_add_session = Some(share_add_session); + Ok(()) + } + + /// Create new share move session. + fn create_share_move_session(&mut self) -> Result<(), Error> { + let nodes_to_move = self.nodes_to_move.take().ok_or(Error::InvalidStateForRequest)?; + let share_move_session = ShareMoveSessionImpl::new(ShareMoveSessionParams { + meta: self.meta.clone(), + nonce: self.nonce, + transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()), + key_storage: self.key_storage.clone(), + admin_public: None, + })?; + share_move_session.set_consensus_output(nodes_to_move)?; + self.share_move_session = Some(share_move_session); + Ok(()) + } + + /// Create new share remove session. + fn create_share_remove_session(&mut self) -> Result<(), Error> { + let nodes_to_remove = self.nodes_to_remove.take().ok_or(Error::InvalidStateForRequest)?; + let share_remove_session = ShareRemoveSessionImpl::new(ShareRemoveSessionParams { + meta: self.meta.clone(), + nonce: self.nonce, + transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()), + key_storage: self.key_storage.clone(), + admin_public: None, + })?; + share_remove_session.set_consensus_output(nodes_to_remove)?; + self.share_remove_session = Some(share_remove_session); + Ok(()) + } + + /// Proceed to the next state. + fn proceed_to_next_state(&mut self) -> Result<(), Error> { + if self.meta.self_node_id != self.meta.master_node_id { + if self.nodes_to_add.is_none() && self.nodes_to_move.is_none() && self.nodes_to_remove.is_none() { + self.is_finished = true; + } + return Ok(()); + } + + if self.nodes_to_add.is_some() { + self.create_share_add_session()?; + return self.share_add_session.as_ref() + .expect("either create_share_add_session fails, or session is created; qed") + .initialize(None, None, None); + } + + if self.nodes_to_move.is_some() { + self.create_share_move_session()?; + return self.share_move_session.as_ref() + .expect("either create_share_move_session fails, or session is created; qed") + .initialize(None, None, None); + } + + if self.nodes_to_remove.is_some() { + self.create_share_remove_session()?; + return self.share_remove_session.as_ref() + .expect("either create_share_remove_session fails, or session is created; qed") + .initialize(None, None, None); + } + + self.is_finished = true; + + Ok(()) + } +} + +impl ShareChangeTransport { + pub fn new(session_id: SessionId, nonce: u64, cluster: Arc) -> Self { + ShareChangeTransport { + session_id: session_id, + nonce: nonce, + cluster: cluster, + } + } +} + +impl JobTransport for ShareChangeTransport { + type PartialJobRequest = ServersSetChangeAccessRequest; + type PartialJobResponse = bool; + + fn send_partial_request(&self, _node: &NodeId, _request: ServersSetChangeAccessRequest) -> Result<(), Error> { + unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed") + } + + fn send_partial_response(&self, _node: &NodeId, _response: bool) -> Result<(), Error> { + unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed") + } +} + +impl ShareAddSessionTransport for ShareChangeTransport { + fn set_id_numbers(&mut self, _id_numbers: BTreeMap) { + unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed") + } + + fn send(&self, node: &NodeId, message: ShareAddMessage) -> Result<(), Error> { + self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(ServersSetChangeShareAddMessage { + session: self.session_id.clone().into(), + session_nonce: self.nonce, + message: message, + }))) + } +} + +impl ShareMoveSessionTransport for ShareChangeTransport { + fn set_shares_to_move_reversed(&mut self, _shares_to_move: BTreeMap) { + unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed") + } + + fn send(&self, node: &NodeId, message: ShareMoveMessage) -> Result<(), Error> { + self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ServersSetChangeShareMoveMessage { + session: self.session_id.clone().into(), + session_nonce: self.nonce, + message: message, + }))) + } +} + +impl ShareRemoveSessionTransport for ShareChangeTransport { + fn send(&self, node: &NodeId, message: ShareRemoveMessage) -> Result<(), Error> { + self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ServersSetChangeShareRemoveMessage { + session: self.session_id.clone().into(), + session_nonce: self.nonce, + message: message, + }))) + } +} + +/// Prepare share change plan for moving from old `session_nodes` to `new_nodes_set`. +pub fn prepare_share_change_session_plan(session_nodes: &BTreeSet, new_nodes_set: &BTreeSet) -> Result { + let mut nodes_to_add: BTreeSet<_> = new_nodes_set.difference(&session_nodes).cloned().collect(); + let mut nodes_to_move = BTreeMap::new(); + let mut nodes_to_remove: BTreeSet<_> = session_nodes.difference(&new_nodes_set).cloned().collect(); + while !nodes_to_remove.is_empty() && !nodes_to_add.is_empty() { + let source_node = nodes_to_remove.iter().cloned().nth(0).expect("nodes_to_remove.is_empty is checked in while condition; qed"); + let target_node = nodes_to_add.iter().cloned().nth(0).expect("nodes_to_add.is_empty is checked in while condition; qed"); + nodes_to_remove.remove(&source_node); + nodes_to_add.remove(&target_node); + nodes_to_move.insert(target_node, source_node); + } + + Ok(ShareChangeSessionPlan { + nodes_to_add: nodes_to_add.into_iter() + .map(|n| math::generate_random_scalar().map(|s| (n, s))) + .collect::, _>>()?, + nodes_to_move: nodes_to_move, + nodes_to_remove: nodes_to_remove, + }) +} + +impl ShareChangeSessionPlan { + /// Is empty (nothing-to-do) plan? + pub fn is_empty(&self) -> bool { + self.nodes_to_add.is_empty() + && self.nodes_to_move.is_empty() + && self.nodes_to_remove.is_empty() + } +} diff --git a/secret_store/src/key_server_cluster/admin_sessions/share_move_session.rs b/secret_store/src/key_server_cluster/admin_sessions/share_move_session.rs new file mode 100644 index 000000000..0dc3cce84 --- /dev/null +++ b/secret_store/src/key_server_cluster/admin_sessions/share_move_session.rs @@ -0,0 +1,829 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use std::collections::{BTreeMap, BTreeSet}; +use parking_lot::{Mutex, Condvar}; +use ethkey::{Public, Secret, Signature}; +use key_server_cluster::{Error, NodeId, SessionId, DocumentKeyShare, KeyStorage}; +use key_server_cluster::cluster::Cluster; +use key_server_cluster::cluster_sessions::ClusterSession; +use key_server_cluster::message::{Message, ShareMoveMessage, ShareMoveConsensusMessage, + ShareMoveRequest, ShareMove, ShareMoveConfirm, ShareMoveError, ConsensusMessageWithServersMap, + InitializeConsensusSessionWithServersMap, ConfirmConsensusInitialization}; +use key_server_cluster::jobs::job_session::JobTransport; +use key_server_cluster::jobs::dummy_job::{DummyJob, DummyJobTransport}; +use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest}; +use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; +use key_server_cluster::admin_sessions::ShareChangeSessionMeta; + +/// Share move session API. +pub trait Session: Send + Sync + 'static { + /// Wait until session is completed. + fn wait(&self) -> Result<(), Error>; +} + +/// Share move session transport. +pub trait SessionTransport: Clone + JobTransport { + /// Send message to given node. + fn send(&self, node: &NodeId, message: ShareMoveMessage) -> Result<(), Error>; + /// Set share destinations. + fn set_shares_to_move_reversed(&mut self, shares_to_move_reversed: BTreeMap); +} + +/// Share move session. +pub struct SessionImpl { + /// Session core. + core: SessionCore, + /// Session data. + data: Mutex>, +} + +/// Immutable session data. +struct SessionCore { + /// Session metadata. + pub meta: ShareChangeSessionMeta, + /// Session-level nonce. + pub nonce: u64, + /// Original key share (for old nodes only). + pub key_share: Option, + /// Session transport to communicate to other cluster nodes. + pub transport: T, + /// Key storage. + pub key_storage: Arc, + /// Administrator public key. + pub admin_public: Option, + /// SessionImpl completion condvar. + pub completed: Condvar, +} + +/// Share move consensus session type. +type ShareMoveChangeConsensusSession = ConsensusSession; + +/// Mutable session data. +struct SessionData { + /// Session state. + pub state: SessionState, + /// Consensus session. + pub consensus_session: Option>, + /// Shares to move. Keys = new nodes, Values = old nodes. + pub shares_to_move_reversed: Option>, + /// Reversed shares to move. Keys = old nodes, Values = new nodes. + pub shares_to_move: Option>, + /// Move confirmations to receive. + pub move_confirmations_to_receive: Option>, + /// Received key share (filled on destination nodes only). + pub received_key_share: Option, + /// Share move change result. + pub result: Option>, +} + +/// SessionImpl creation parameters +pub struct SessionParams { + /// Session meta. + pub meta: ShareChangeSessionMeta, + /// Session nonce. + pub nonce: u64, + /// Session transport to communicate to other cluster nodes. + pub transport: T, + /// Key storage. + pub key_storage: Arc, + /// Administrator public key. + pub admin_public: Option, +} + +/// Share move session state. +#[derive(Debug, PartialEq)] +enum SessionState { + /// State when consensus is establishing. + ConsensusEstablishing, + /// Waiting for move confirmation. + WaitingForMoveConfirmation, + /// Session is completed. + Finished, +} + +/// Isolated ShareMove session transport. +#[derive(Clone)] +pub struct IsolatedSessionTransport { + /// Key id. + session: SessionId, + /// Session-level nonce. + nonce: u64, + /// Shares to move between. Keys = new nodes, values = old nodes. + shares_to_move_reversed: Option>, + /// Cluster. + cluster: Arc, +} + +impl SessionImpl where T: SessionTransport { + /// Create new share move session. + pub fn new(params: SessionParams) -> Result { + Ok(SessionImpl { + core: SessionCore { + meta: params.meta.clone(), + nonce: params.nonce, + key_share: params.key_storage.get(¶ms.meta.id).ok(), // ignore error, it will be checked later + transport: params.transport, + key_storage: params.key_storage, + admin_public: params.admin_public, + completed: Condvar::new(), + }, + data: Mutex::new(SessionData { + state: SessionState::ConsensusEstablishing, + consensus_session: None, + shares_to_move_reversed: None, + shares_to_move: None, + move_confirmations_to_receive: None, + received_key_share: None, + result: None, + }), + }) + } + + /// Set pre-established consensus data. + pub fn set_consensus_output(&self, shares_to_move_reversed: BTreeMap) -> Result<(), Error> { + let mut data = self.data.lock(); + + // check state + if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() { + return Err(Error::InvalidStateForRequest); + } + + let old_id_numbers = self.core.key_share.as_ref().map(|ks| &ks.id_numbers); + check_shares_to_move(&self.core.meta.self_node_id, &shares_to_move_reversed, old_id_numbers)?; + + data.move_confirmations_to_receive = Some(shares_to_move_reversed.keys().cloned().collect()); + data.shares_to_move = Some(shares_to_move_reversed.iter().map(|(k, v)| (v.clone(), k.clone())).collect()); + data.shares_to_move_reversed = Some(shares_to_move_reversed); + + Ok(()) + } + + /// Initialize share add session on master node. + pub fn initialize(&self, shares_to_move_reversed: Option>, old_set_signature: Option, new_set_signature: Option) -> Result<(), Error> { + debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id); + + let mut data = self.data.lock(); + + // check state + if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() { + return Err(Error::InvalidStateForRequest); + } + + // if consensus is not yet established => start consensus session + let is_consensus_pre_established = data.shares_to_move.is_some(); + if !is_consensus_pre_established { + let shares_to_move_reversed = shares_to_move_reversed.ok_or(Error::InvalidMessage)?; + let key_share = self.core.key_share.as_ref().ok_or(Error::KeyStorage("key share is not found on master node".into()))?; + check_shares_to_move(&self.core.meta.self_node_id, &shares_to_move_reversed, Some(&key_share.id_numbers))?; + + let old_set_signature = old_set_signature.ok_or(Error::InvalidMessage)?; + let new_set_signature = new_set_signature.ok_or(Error::InvalidMessage)?; + let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?; + let old_nodes_set: BTreeSet<_> = key_share.id_numbers.keys().cloned().collect(); + let mut all_nodes_set = old_nodes_set.clone(); + let mut new_nodes_set = all_nodes_set.clone(); + for (target, source) in &shares_to_move_reversed { + new_nodes_set.remove(source); + new_nodes_set.insert(target.clone()); + all_nodes_set.insert(target.clone()); + } + let mut consensus_transport = self.core.transport.clone(); + consensus_transport.set_shares_to_move_reversed(shares_to_move_reversed.clone()); + + let mut consensus_session = ConsensusSession::new(ConsensusSessionParams { + meta: self.core.meta.clone().into_consensus_meta(all_nodes_set.len()), + consensus_executor: ServersSetChangeAccessJob::new_on_master(admin_public, + old_nodes_set.clone(), + old_nodes_set.clone(), + new_nodes_set, + old_set_signature, + new_set_signature), + consensus_transport: consensus_transport, + })?; + consensus_session.initialize(all_nodes_set)?; + data.consensus_session = Some(consensus_session); + data.move_confirmations_to_receive = Some(shares_to_move_reversed.keys().cloned().collect()); + data.shares_to_move = Some(shares_to_move_reversed.iter().map(|(k, v)| (v.clone(), k.clone())).collect()); + data.shares_to_move_reversed = Some(shares_to_move_reversed); + return Ok(()); + } + + // otherwise => start sending ShareMove-specific messages + Self::on_consensus_established(&self.core, &mut *data) + } + + /// Process single message. + pub fn process_message(&self, sender: &NodeId, message: &ShareMoveMessage) -> Result<(), Error> { + if self.core.nonce != message.session_nonce() { + return Err(Error::ReplayProtection); + } + + match message { + &ShareMoveMessage::ShareMoveConsensusMessage(ref message) => + self.on_consensus_message(sender, message), + &ShareMoveMessage::ShareMoveRequest(ref message) => + self.on_share_move_request(sender, message), + &ShareMoveMessage::ShareMove(ref message) => + self.on_share_move(sender, message), + &ShareMoveMessage::ShareMoveConfirm(ref message) => + self.on_share_move_confirmation(sender, message), + &ShareMoveMessage::ShareMoveError(ref message) => + self.on_session_error(sender, message), + } + } + + /// When consensus-related message is received. + pub fn on_consensus_message(&self, sender: &NodeId, message: &ShareMoveConsensusMessage) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + // start slave consensus session if needed + let mut data = self.data.lock(); + if data.consensus_session.is_none() && sender == &self.core.meta.master_node_id { + match &message.message { + &ConsensusMessageWithServersMap::InitializeConsensusSession(ref message) => { + let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?; + let current_nodes_set = self.core.key_share.as_ref() + .map(|ks| ks.id_numbers.keys().cloned().collect()) + .unwrap_or_else(|| message.old_nodes_set.clone().into_iter().map(Into::into).collect()); + let all_nodes_set_len = message.new_nodes_set.keys().chain(message.old_nodes_set.iter()).collect::>().len(); + data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams { + meta: self.core.meta.clone().into_consensus_meta(all_nodes_set_len), + consensus_executor: ServersSetChangeAccessJob::new_on_slave(admin_public, current_nodes_set), + consensus_transport: self.core.transport.clone(), + })?); + }, + _ => return Err(Error::InvalidStateForRequest), + } + } + + let (is_establishing_consensus, is_consensus_established, shares_to_move_reversed) = { + let consensus_session = data.consensus_session.as_mut().ok_or(Error::InvalidMessage)?; + let is_establishing_consensus = consensus_session.state() == ConsensusSessionState::EstablishingConsensus; + let shares_to_move_reversed = match &message.message { + &ConsensusMessageWithServersMap::InitializeConsensusSession(ref message) => { + consensus_session.on_consensus_partial_request(sender, ServersSetChangeAccessRequest::from(message))?; + let shares_to_move_reversed = message.new_nodes_set.iter() + .filter(|&(old, new)| old != new) + .map(|(old, new)| (old.clone().into(), new.clone().into())) + .collect::>(); + check_shares_to_move(&self.core.meta.self_node_id, &shares_to_move_reversed, self.core.key_share.as_ref().map(|ks| &ks.id_numbers))?; + Some(shares_to_move_reversed) + }, + &ConsensusMessageWithServersMap::ConfirmConsensusInitialization(ref message) => { + consensus_session.on_consensus_partial_response(sender, message.is_confirmed)?; + None + }, + }; + + ( + is_establishing_consensus, + consensus_session.state() == ConsensusSessionState::ConsensusEstablished, + shares_to_move_reversed + ) + }; + + if let Some(shares_to_move_reversed) = shares_to_move_reversed { + data.move_confirmations_to_receive = Some(shares_to_move_reversed.keys().cloned().collect()); + data.shares_to_move = Some(shares_to_move_reversed.iter().map(|(k, v)| (v.clone(), k.clone())).collect()); + data.shares_to_move_reversed = Some(shares_to_move_reversed); + } + if self.core.meta.self_node_id != self.core.meta.master_node_id || !is_establishing_consensus || !is_consensus_established { + return Ok(()); + } + + Self::on_consensus_established(&self.core, &mut *data) + } + + /// When share move request is received. + pub fn on_share_move_request(&self, sender: &NodeId, message: &ShareMoveRequest) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + // awaiting this message from master node only + if sender != &self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } + + // check state + let mut data = self.data.lock(); + if data.state == SessionState::ConsensusEstablishing && data.shares_to_move.is_some() { + data.state = SessionState::WaitingForMoveConfirmation; + } else if data.state != SessionState::WaitingForMoveConfirmation { + return Err(Error::InvalidStateForRequest); + } + + // move share + { + let shares_to_move = data.shares_to_move.as_ref() + .expect("shares_to_move are filled during consensus establishing; share move requests are processed after this; qed"); + if let Some(share_destination) = shares_to_move.get(&self.core.meta.self_node_id) { + Self::move_share(&self.core, share_destination)?; + } else { + return Err(Error::InvalidMessage); + } + } + + // and complete session + Self::complete_session(&self.core, &mut *data) + } + + /// When moving share is received. + pub fn on_share_move(&self, sender: &NodeId, message: &ShareMove) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + // check state + let mut data = self.data.lock(); + if data.state == SessionState::ConsensusEstablishing && data.shares_to_move.is_some() { + data.state = SessionState::WaitingForMoveConfirmation; + } else if data.state != SessionState::WaitingForMoveConfirmation { + return Err(Error::InvalidStateForRequest); + } + + // check that we are expecting this share + if data.shares_to_move_reversed.as_ref() + .expect("shares_to_move are filled during consensus establishing; share moves are processed after this; qed") + .get(&self.core.meta.self_node_id) != Some(sender) { + return Err(Error::InvalidMessage); + } + + // update state + let is_last_confirmation = { + let move_confirmations_to_receive = data.move_confirmations_to_receive.as_mut() + .expect("move_confirmations_to_receive are filled during consensus establishing; share moves are processed after this; qed"); + move_confirmations_to_receive.remove(&self.core.meta.self_node_id); + move_confirmations_to_receive.is_empty() + }; + data.received_key_share = Some(DocumentKeyShare { + author: message.author.clone().into(), + threshold: message.threshold, + id_numbers: message.id_numbers.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(), + polynom1: message.polynom1.iter().cloned().map(Into::into).collect(), + secret_share: message.secret_share.clone().into(), + common_point: message.common_point.clone().map(Into::into), + encrypted_point: message.encrypted_point.clone().map(Into::into), + }); + + // send confirmation to all other nodes + { + let shares_to_move = data.shares_to_move.as_ref() + .expect("shares_to_move are filled during consensus establishing; share moves are processed after this; qed"); + let new_nodes_set: BTreeSet<_> = shares_to_move.values().cloned() + .chain(message.id_numbers.keys().filter(|n| !shares_to_move.contains_key(n)).cloned().map(Into::into)) + .collect(); + + for node in new_nodes_set.into_iter().filter(|n| n != &self.core.meta.self_node_id) { + self.core.transport.send(&node, ShareMoveMessage::ShareMoveConfirm(ShareMoveConfirm { + session: self.core.meta.id.clone().into(), + session_nonce: self.core.nonce, + }))?; + } + } + + // complete session if this was last share + if is_last_confirmation { + Self::complete_session(&self.core, &mut *data)?; + } + + Ok(()) + } + + /// When share is received from destination node. + pub fn on_share_move_confirmation(&self, sender: &NodeId, message: &ShareMoveConfirm) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + // check state + let mut data = self.data.lock(); + if data.state == SessionState::ConsensusEstablishing && data.shares_to_move.is_some() { + data.state = SessionState::WaitingForMoveConfirmation; + } else if data.state != SessionState::WaitingForMoveConfirmation { + return Err(Error::InvalidStateForRequest); + } + + // find share source + { + let mut move_confirmations_to_receive = data.move_confirmations_to_receive.as_mut() + .expect("move_confirmations_to_receive are filled during consensus establishing; move confirmations are processed after this; qed"); + if !move_confirmations_to_receive.remove(sender) { + return Err(Error::InvalidMessage); + } + + if !move_confirmations_to_receive.is_empty() { + return Ok(()); + } + } + + Self::complete_session(&self.core, &mut *data) + } + + /// When error has occured on another node. + pub fn on_session_error(&self, sender: &NodeId, message: &ShareMoveError) -> Result<(), Error> { + let mut data = self.data.lock(); + + warn!("{}: share move session failed with error: {} from {}", self.core.meta.self_node_id, message.error, sender); + + data.state = SessionState::Finished; + + Ok(()) + } + + /// Start sending ShareMove-specific messages, when consensus is established. + fn on_consensus_established(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + // update state + data.state = SessionState::WaitingForMoveConfirmation; + + // send share move requests to every required node + Self::disseminate_share_move_requests(core, data)?; + + { + let shares_to_move = data.shares_to_move.as_ref() + .expect("shares_to_move are filled during consensus establishing; this method is called after consensus established; qed"); + if let Some(share_destination) = shares_to_move.get(&core.meta.self_node_id) { + // move share + Self::move_share(core, share_destination)?; + } else { + // remember move confirmations to receive + data.move_confirmations_to_receive = Some(shares_to_move.values().cloned().collect()); + return Ok(()); + } + } + + // complete session if share is lost + Self::complete_session(core, data) + } + + /// Disseminate share move requests. + fn disseminate_share_move_requests(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + let shares_to_move = data.shares_to_move.as_ref() + .expect("shares_to_move are filled during consensus establishing; this method is called after consensus established; qed"); + for share_source in shares_to_move.keys().filter(|n| **n != core.meta.self_node_id) { + core.transport.send(share_source, ShareMoveMessage::ShareMoveRequest(ShareMoveRequest { + session: core.meta.id.clone().into(), + session_nonce: core.nonce, + }))?; + } + + Ok(()) + } + + /// Send share move message. + fn move_share(core: &SessionCore, share_destination: &NodeId) -> Result<(), Error> { + let key_share = core.key_share.as_ref() + .expect("move_share is called on nodes from shares_to_move.values(); all 'values' nodes have shares; qed"); + core.transport.send(share_destination, ShareMoveMessage::ShareMove(ShareMove { + session: core.meta.id.clone().into(), + session_nonce: core.nonce, + author: key_share.author.clone().into(), + threshold: key_share.threshold, + id_numbers: key_share.id_numbers.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(), + polynom1: key_share.polynom1.iter().cloned().map(Into::into).collect(), + secret_share: key_share.secret_share.clone().into(), + common_point: key_share.common_point.clone().map(Into::into), + encrypted_point: key_share.encrypted_point.clone().map(Into::into), + })) + } + + /// Complete session on this node. + fn complete_session(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + // update state + data.state = SessionState::Finished; + + // if we are source node => remove share from storage + let shares_to_move = data.shares_to_move.as_ref() + .expect("shares_to_move are filled during consensus establishing; this method is called after consensus established; qed"); + if shares_to_move.contains_key(&core.meta.self_node_id) { + return core.key_storage.remove(&core.meta.id) + .map_err(|e| Error::KeyStorage(e.into())); + } + + // else we need to update key_share.id_numbers.keys() + let is_old_node = data.received_key_share.is_none(); + let mut key_share = data.received_key_share.take() + .unwrap_or_else(|| core.key_share.as_ref() + .expect("on target nodes received_key_share is non-empty; on old nodes key_share is not empty; qed") + .clone()); + for (source_node, target_node) in shares_to_move { + let id_number = key_share.id_numbers.remove(source_node) + .expect("source_node is old node; there's entry in id_numbers for each old node; qed"); + key_share.id_numbers.insert(target_node.clone(), id_number); + } + + // ... and update key share in storage + if is_old_node { + core.key_storage.update(core.meta.id.clone(), key_share) + } else { + core.key_storage.insert(core.meta.id.clone(), key_share) + }.map_err(|e| Error::KeyStorage(e.into())) + } +} + +impl Session for SessionImpl where T: SessionTransport + Send + Sync + 'static { + fn wait(&self) -> Result<(), Error> { + let mut data = self.data.lock(); + if !data.result.is_some() { + self.core.completed.wait(&mut data); + } + + data.result.clone() + .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") + } +} + +impl ClusterSession for SessionImpl where T: SessionTransport { + fn is_finished(&self) -> bool { + self.data.lock().state == SessionState::Finished + } + + fn on_session_timeout(&self) { + let mut data = self.data.lock(); + + warn!("{}: share move session failed with timeout", self.core.meta.self_node_id); + + data.state = SessionState::Finished; + data.result = Some(Err(Error::NodeDisconnected)); + self.core.completed.notify_all(); + } + + fn on_node_timeout(&self, node: &NodeId) { + let mut data = self.data.lock(); + + warn!("{}: share move session failed because {} connection has timeouted", self.core.meta.self_node_id, node); + + data.state = SessionState::Finished; + data.result = Some(Err(Error::NodeDisconnected)); + self.core.completed.notify_all(); + } +} + +impl IsolatedSessionTransport { + pub fn new(session_id: SessionId, nonce: u64, cluster: Arc) -> Self { + IsolatedSessionTransport { + session: session_id, + nonce: nonce, + cluster: cluster, + shares_to_move_reversed: None, + } + } +} + +impl JobTransport for IsolatedSessionTransport { + type PartialJobRequest = ServersSetChangeAccessRequest; + type PartialJobResponse = bool; + + fn send_partial_request(&self, node: &NodeId, request: ServersSetChangeAccessRequest) -> Result<(), Error> { + let shares_to_move_reversed = self.shares_to_move_reversed.as_ref() + .expect("partial requests are sent from master node only; on master node shares_to_move_reversed are filled during creation; qed"); + self.cluster.send(node, Message::ShareMove(ShareMoveMessage::ShareMoveConsensusMessage(ShareMoveConsensusMessage { + session: self.session.clone().into(), + session_nonce: self.nonce, + message: ConsensusMessageWithServersMap::InitializeConsensusSession(InitializeConsensusSessionWithServersMap { + old_nodes_set: request.old_servers_set.into_iter().map(Into::into).collect(), + new_nodes_set: request.new_servers_set.into_iter().map(|n| (n.into(), + shares_to_move_reversed.get(&n).cloned().unwrap_or_else(|| n.clone()).into())).collect(), + old_set_signature: request.old_set_signature.into(), + new_set_signature: request.new_set_signature.into(), + }), + }))) + } + + fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { + self.cluster.send(node, Message::ShareMove(ShareMoveMessage::ShareMoveConsensusMessage(ShareMoveConsensusMessage { + session: self.session.clone().into(), + session_nonce: self.nonce, + message: ConsensusMessageWithServersMap::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: response, + }), + }))) + } +} + +impl SessionTransport for IsolatedSessionTransport { + fn set_shares_to_move_reversed(&mut self, shares_to_move_reversed: BTreeMap) { + self.shares_to_move_reversed = Some(shares_to_move_reversed); + } + + fn send(&self, node: &NodeId, message: ShareMoveMessage) -> Result<(), Error> { + self.cluster.send(node, Message::ShareMove(message)) + } +} + +fn check_shares_to_move(self_node_id: &NodeId, shares_to_move_reversed: &BTreeMap, id_numbers: Option<&BTreeMap>) -> Result<(), Error> { + // shares to move must not be empty + if shares_to_move_reversed.is_empty() { + return Err(Error::InvalidMessage); + } + + if let Some(id_numbers) = id_numbers { + // all values in share_to_move_reversed must be old nodes of the session + if shares_to_move_reversed.values().any(|n| !id_numbers.contains_key(n)) { + return Err(Error::InvalidNodesConfiguration); + } + // all keys in share_to_move_reversed must be new nodes for the session + if shares_to_move_reversed.keys().any(|n| id_numbers.contains_key(n)) { + return Err(Error::InvalidNodesConfiguration); + } + } else { + // this node must NOT in values of share_to_move_reversed + if shares_to_move_reversed.values().any(|n| n == self_node_id) { + return Err(Error::InvalidMessage); + } + // this node must be in keys of share_to_move_reversed + if !shares_to_move_reversed.contains_key(self_node_id) { + return Err(Error::InvalidMessage); + } + } + + // all values of the shares_to_move must be distinct + if shares_to_move_reversed.values().collect::>().len() != shares_to_move_reversed.len() { + return Err(Error::InvalidNodesConfiguration); + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + use std::collections::{VecDeque, BTreeMap, BTreeSet}; + use ethkey::{Random, Generator, Public, Signature, KeyPair, sign}; + use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage}; + use key_server_cluster::cluster::Cluster; + use key_server_cluster::cluster_sessions::ClusterSession; + use key_server_cluster::cluster::tests::DummyCluster; + use key_server_cluster::generation_session::tests::{Node as GenerationNode, generate_nodes_ids}; + use key_server_cluster::math; + use key_server_cluster::message::Message; + use key_server_cluster::servers_set_change_session::tests::generate_key; + use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash; + use key_server_cluster::admin_sessions::ShareChangeSessionMeta; + use key_server_cluster::admin_sessions::share_add_session::tests::check_secret_is_preserved; + use super::{SessionImpl, SessionParams, IsolatedSessionTransport}; + + struct Node { + pub cluster: Arc, + pub key_storage: Arc, + pub session: SessionImpl, + } + + struct MessageLoop { + pub admin_key_pair: KeyPair, + pub original_key_pair: KeyPair, + pub old_nodes_set: BTreeSet, + pub new_nodes_set: BTreeSet, + pub old_set_signature: Signature, + pub new_set_signature: Signature, + pub nodes: BTreeMap, + pub queue: VecDeque<(NodeId, NodeId, Message)>, + } + + fn create_session(mut meta: ShareChangeSessionMeta, admin_public: Public, self_node_id: NodeId, cluster: Arc, key_storage: Arc) -> SessionImpl { + let session_id = meta.id.clone(); + meta.self_node_id = self_node_id; + SessionImpl::new(SessionParams { + meta: meta.clone(), + transport: IsolatedSessionTransport::new(session_id, 1, cluster), + key_storage: key_storage, + admin_public: Some(admin_public), + nonce: 1, + }).unwrap() + } + + fn create_node(meta: ShareChangeSessionMeta, admin_public: Public, node: GenerationNode) -> Node { + Node { + cluster: node.cluster.clone(), + key_storage: node.key_storage.clone(), + session: create_session(meta, admin_public, node.session.node().clone(), node.cluster, node.key_storage), + } + } + + impl MessageLoop { + pub fn new(t: usize, master_node_id: NodeId, old_nodes_set: BTreeSet, shares_to_move: BTreeMap) -> Self { + // generate admin key pair + let admin_key_pair = Random.generate().unwrap(); + let admin_public = admin_key_pair.public().clone(); + + // run initial generation session + let gml = generate_key(t, old_nodes_set.clone()); + let original_secret = math::compute_joint_secret(gml.nodes.values() + .map(|nd| nd.key_storage.get(&SessionId::default()).unwrap().polynom1[0].clone()) + .collect::>() + .iter()).unwrap(); + let original_key_pair = KeyPair::from_secret(original_secret).unwrap(); + + // prepare sessions on all nodes + let meta = ShareChangeSessionMeta { + id: SessionId::default(), + self_node_id: NodeId::default(), + master_node_id: master_node_id, + }; + let new_nodes_set: BTreeSet<_> = old_nodes_set.iter() + .filter(|n| !shares_to_move.values().any(|n2| *n == n2)) + .cloned() + .chain(shares_to_move.keys().cloned()) + .collect(); + let new_nodes = new_nodes_set.iter() + .filter(|n| !old_nodes_set.contains(&n)) + .map(|new_node_id| { + let new_node_cluster = Arc::new(DummyCluster::new(new_node_id.clone())); + let new_node_key_storage = Arc::new(DummyKeyStorage::default()); + let new_node_session = create_session(meta.clone(), admin_public.clone(), new_node_id.clone(), new_node_cluster.clone(), new_node_key_storage.clone()); + Node { + cluster: new_node_cluster, + key_storage: new_node_key_storage, + session: new_node_session, + } + }); + let old_nodes = gml.nodes.into_iter().map(|gn| create_node(meta.clone(), admin_public.clone(), gn.1)); + let nodes = old_nodes.chain(new_nodes).map(|n| (n.session.core.meta.self_node_id.clone(), n)).collect(); + + let old_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&old_nodes_set)).unwrap(); + let new_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap(); + MessageLoop { + admin_key_pair: admin_key_pair, + original_key_pair: original_key_pair, + old_nodes_set: old_nodes_set.clone(), + new_nodes_set: new_nodes_set.clone(), + old_set_signature: old_set_signature, + new_set_signature: new_set_signature, + nodes: nodes, + queue: Default::default(), + } + } + + pub fn run(&mut self) { + while let Some((from, to, message)) = self.take_message() { + self.process_message((from, to, message)).unwrap(); + } + } + + pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { + self.nodes.values() + .filter_map(|n| n.cluster.take_message().map(|m| (n.session.core.meta.self_node_id.clone(), m.0, m.1))) + .nth(0) + .or_else(|| self.queue.pop_front()) + } + + pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { + match { match msg.2 { + Message::ShareMove(ref message) => + self.nodes[&msg.1].session.process_message(&msg.0, message), + _ => unreachable!("only servers set change messages are expected"), + } } { + Ok(_) => Ok(()), + Err(Error::TooEarlyForRequest) => { + self.queue.push_back(msg); + Ok(()) + }, + Err(err) => Err(err), + } + } + } + + #[test] + fn nodes_moved_using_share_move_from_master_node() { + let test_cases = vec![(3, 1), (3, 3)]; + for (n, nodes_to_add) in test_cases { + // generate key && prepare ShareAdd sessions + let old_nodes_set = generate_nodes_ids(n); + let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); + let nodes_to_add = generate_nodes_ids(nodes_to_add); + let mut shares_to_move = BTreeMap::new(); + for (source, target) in old_nodes_set.iter().zip(nodes_to_add.iter()) { + shares_to_move.insert(target.clone(), source.clone()); + } + let mut ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, shares_to_move.clone()); + + // initialize session on master node && run to completion + ml.nodes[&master_node_id].session.initialize(Some(shares_to_move.clone()), + Some(ml.old_set_signature.clone()), + Some(ml.new_set_signature.clone())).unwrap(); + ml.run(); + + // check that session has completed on all nodes + assert!(ml.nodes.values().all(|n| n.session.is_finished())); + + // check that secret is still the same as before adding the share + check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter() + .filter(|&(k, _)| !shares_to_move.values().any(|v| v == k)) + .map(|(k, v)| (k.clone(), v.key_storage.clone())) + .collect()); + } + } +} diff --git a/secret_store/src/key_server_cluster/admin_sessions/share_remove_session.rs b/secret_store/src/key_server_cluster/admin_sessions/share_remove_session.rs new file mode 100644 index 000000000..83824fe93 --- /dev/null +++ b/secret_store/src/key_server_cluster/admin_sessions/share_remove_session.rs @@ -0,0 +1,740 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use std::collections::BTreeSet; +use parking_lot::{Mutex, Condvar}; +use ethkey::{Public, Signature}; +use key_server_cluster::{Error, NodeId, SessionId, DocumentKeyShare, KeyStorage}; +use key_server_cluster::cluster::Cluster; +use key_server_cluster::cluster_sessions::ClusterSession; +use key_server_cluster::message::{Message, ShareRemoveMessage, ShareRemoveConsensusMessage, ConsensusMessageWithServersSet, + ShareRemoveRequest, ShareRemoveConfirm, ShareRemoveError, InitializeConsensusSessionWithServersSet, + ConfirmConsensusInitialization}; +use key_server_cluster::jobs::job_session::JobTransport; +use key_server_cluster::jobs::dummy_job::{DummyJob, DummyJobTransport}; +use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest}; +use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; +use key_server_cluster::admin_sessions::ShareChangeSessionMeta; + +/// Share remove session API. +pub trait Session: Send + Sync + 'static { + /// Wait until session is completed. + fn wait(&self) -> Result<(), Error>; +} + +/// Share remove session transport. +pub trait SessionTransport: Clone + JobTransport { + /// Send message to given node. + fn send(&self, node: &NodeId, message: ShareRemoveMessage) -> Result<(), Error>; +} + +/// Share remove session. +pub struct SessionImpl { + /// Session core. + core: SessionCore, + /// Session data. + data: Mutex>, +} + +/// Immutable session data. +struct SessionCore { + /// Session metadata. + pub meta: ShareChangeSessionMeta, + /// Session-level nonce. + pub nonce: u64, + /// Original key share. + pub key_share: DocumentKeyShare, + /// Session transport to communicate to other cluster nodes. + pub transport: T, + /// Key storage. + pub key_storage: Arc, + /// Administrator public key. + pub admin_public: Option, + /// SessionImpl completion condvar. + pub completed: Condvar, +} + +/// Share remove consensus session type. +type ShareRemoveChangeConsensusSession = ConsensusSession; + +/// Mutable session data. +struct SessionData { + /// Session state. + pub state: SessionState, + /// Consensus session. + pub consensus_session: Option>, + /// Shares to remove. + pub shares_to_remove: Option>, + /// Remove confirmations to receive. + pub remove_confirmations_to_receive: Option>, + /// Share remove change result. + pub result: Option>, +} + +/// SessionImpl creation parameters +pub struct SessionParams { + /// Session meta. + pub meta: ShareChangeSessionMeta, + /// Session nonce. + pub nonce: u64, + /// Session transport to communicate to other cluster nodes. + pub transport: T, + /// Key storage. + pub key_storage: Arc, + /// Administrator public key. + pub admin_public: Option, +} + +/// Share move session state. +#[derive(Debug, PartialEq)] +enum SessionState { + /// State when consensus is establishing. + ConsensusEstablishing, + /// Waiting for remove confirmation. + WaitingForRemoveConfirmation, + /// Session is finished. + Finished, +} + +/// Isolated ShareRemove session transport. +#[derive(Clone)] +pub struct IsolatedSessionTransport { + /// Key id. + session: SessionId, + /// Session-level nonce. + nonce: u64, + /// Cluster. + cluster: Arc, +} + +impl SessionImpl where T: SessionTransport { + /// Create new share remove session. + pub fn new(params: SessionParams) -> Result { + Ok(SessionImpl { + core: SessionCore { + meta: params.meta.clone(), + nonce: params.nonce, + key_share: params.key_storage.get(¶ms.meta.id).map_err(|e| Error::KeyStorage(e.into()))?, + transport: params.transport, + key_storage: params.key_storage, + admin_public: params.admin_public, + completed: Condvar::new(), + }, + data: Mutex::new(SessionData { + state: SessionState::ConsensusEstablishing, + consensus_session: None, + shares_to_remove: None, + remove_confirmations_to_receive: None, + result: None, + }), + }) + } + + /// Set pre-established consensus data. + pub fn set_consensus_output(&self, shares_to_remove: BTreeSet) -> Result<(), Error> { + let mut data = self.data.lock(); + + // check state + if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() { + return Err(Error::InvalidStateForRequest); + } + + check_shares_to_remove(&self.core, &shares_to_remove)?; + + data.remove_confirmations_to_receive = Some(shares_to_remove.clone()); + data.shares_to_remove = Some(shares_to_remove); + + Ok(()) + } + + /// Initialize share remove session on master node. + pub fn initialize(&self, shares_to_remove: Option>, old_set_signature: Option, new_set_signature: Option) -> Result<(), Error> { + debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id); + + let mut data = self.data.lock(); + // check state + if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() { + return Err(Error::InvalidStateForRequest); + } + + // if consensus is not yet established => start consensus session + let is_consensus_pre_established = data.shares_to_remove.is_some(); + if !is_consensus_pre_established { + // TODO: even if node was lost, it is still required for ShareRemove session to complete. + // It is wrong - if node is not in all_nodes_set, it must be excluded from consensus. + let shares_to_remove = shares_to_remove.ok_or(Error::InvalidMessage)?; + check_shares_to_remove(&self.core, &shares_to_remove)?; + + let old_set_signature = old_set_signature.ok_or(Error::InvalidMessage)?; + let new_set_signature = new_set_signature.ok_or(Error::InvalidMessage)?; + let all_nodes_set: BTreeSet<_> = self.core.key_share.id_numbers.keys().cloned().collect(); + let new_nodes_set: BTreeSet<_> = all_nodes_set.iter().cloned().filter(|n| !shares_to_remove.contains(&n)).collect(); + let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?; + + let mut consensus_session = ConsensusSession::new(ConsensusSessionParams { + meta: self.core.meta.clone().into_consensus_meta(all_nodes_set.len()), + consensus_executor: ServersSetChangeAccessJob::new_on_master(admin_public, + all_nodes_set.clone(), + all_nodes_set.clone(), + new_nodes_set, + old_set_signature, + new_set_signature), + consensus_transport: self.core.transport.clone(), + })?; + consensus_session.initialize(all_nodes_set)?; + data.consensus_session = Some(consensus_session); + data.remove_confirmations_to_receive = Some(shares_to_remove.clone()); + data.shares_to_remove = Some(shares_to_remove); + return Ok(()); + } + + // otherwise => start sending ShareRemove-specific messages + Self::on_consensus_established(&self.core, &mut *data) + } + + /// Process single message. + pub fn process_message(&self, sender: &NodeId, message: &ShareRemoveMessage) -> Result<(), Error> { + if self.core.nonce != message.session_nonce() { + return Err(Error::ReplayProtection); + } + + match message { + &ShareRemoveMessage::ShareRemoveConsensusMessage(ref message) => + self.on_consensus_message(sender, message), + &ShareRemoveMessage::ShareRemoveRequest(ref message) => + self.on_share_remove_request(sender, message), + &ShareRemoveMessage::ShareRemoveConfirm(ref message) => + self.on_share_remove_confirmation(sender, message), + &ShareRemoveMessage::ShareRemoveError(ref message) => + self.on_session_error(sender, message), + } + } + + /// When consensus-related message is received. + pub fn on_consensus_message(&self, sender: &NodeId, message: &ShareRemoveConsensusMessage) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + // start slave consensus session if needed + let mut data = self.data.lock(); + if data.consensus_session.is_none() && sender == &self.core.meta.master_node_id { + match &message.message { + &ConsensusMessageWithServersSet::InitializeConsensusSession(ref message) => { + let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?; + let current_nodes_set = self.core.key_share.id_numbers.keys().cloned().collect(); + data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams { + meta: self.core.meta.clone().into_consensus_meta(message.old_nodes_set.len()), + consensus_executor: ServersSetChangeAccessJob::new_on_slave(admin_public, current_nodes_set), + consensus_transport: self.core.transport.clone(), + })?); + }, + _ => return Err(Error::InvalidStateForRequest), + } + } + + let (is_establishing_consensus, is_consensus_established, shares_to_remove) = { + let consensus_session = data.consensus_session.as_mut().ok_or(Error::InvalidMessage)?; + let is_establishing_consensus = consensus_session.state() == ConsensusSessionState::EstablishingConsensus; + let shares_to_remove = match &message.message { + &ConsensusMessageWithServersSet::InitializeConsensusSession(ref message) => { + consensus_session.on_consensus_partial_request(sender, ServersSetChangeAccessRequest::from(message))?; + let shares_to_remove = message.old_nodes_set.difference(&message.new_nodes_set).cloned().map(Into::into).collect::>(); + check_shares_to_remove(&self.core, &shares_to_remove)?; + Some(shares_to_remove) + }, + &ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ref message) => { + consensus_session.on_consensus_partial_response(sender, message.is_confirmed)?; + None + }, + }; + + ( + is_establishing_consensus, + consensus_session.state() == ConsensusSessionState::ConsensusEstablished, + shares_to_remove + ) + }; + + if let Some(shares_to_remove) = shares_to_remove { + data.remove_confirmations_to_receive = Some(shares_to_remove.clone()); + data.shares_to_remove = Some(shares_to_remove); + } + if self.core.meta.self_node_id != self.core.meta.master_node_id || !is_establishing_consensus || !is_consensus_established { + return Ok(()); + } + + Self::on_consensus_established(&self.core, &mut *data) + } + + /// When share remove request is received. + pub fn on_share_remove_request(&self, sender: &NodeId, message: &ShareRemoveRequest) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + // awaiting this message from master node only + if sender != &self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } + + // check state + let mut data = self.data.lock(); + if data.state == SessionState::ConsensusEstablishing && data.shares_to_remove.is_some() { + data.state = SessionState::WaitingForRemoveConfirmation; + } else if data.state != SessionState::WaitingForRemoveConfirmation { + return Err(Error::InvalidStateForRequest); + } + // only process if we are waiting for this request + { + let shares_to_remove = data.shares_to_remove.as_ref() + .expect("shares_to_remove is filled when consensus is established; we only process share move request after consensus is established; qed"); + if !shares_to_remove.contains(&self.core.meta.self_node_id) { + return Err(Error::InvalidMessage); + } + } + + // remove share + Self::complete_session(&self.core, &mut *data) + } + + /// When share is received from destination node. + pub fn on_share_remove_confirmation(&self, sender: &NodeId, message: &ShareRemoveConfirm) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(sender != &self.core.meta.self_node_id); + + // check state + let mut data = self.data.lock(); + if data.state == SessionState::ConsensusEstablishing && data.shares_to_remove.is_some() { + data.state = SessionState::WaitingForRemoveConfirmation; + } else if data.state != SessionState::WaitingForRemoveConfirmation { + return Err(Error::InvalidStateForRequest); + } + // find share source + { + let remove_confirmations_to_receive = data.remove_confirmations_to_receive.as_mut() + .expect("remove_confirmations_to_receive is filled when consensus is established; we only process share move confirmations after consensus is established; qed"); + if !remove_confirmations_to_receive.remove(sender) { + return Err(Error::InvalidMessage); + } + + if !remove_confirmations_to_receive.is_empty() { + return Ok(()); + } + } + + Self::complete_session(&self.core, &mut *data) + } + + /// When error has occured on another node. + pub fn on_session_error(&self, sender: &NodeId, message: &ShareRemoveError) -> Result<(), Error> { + let mut data = self.data.lock(); + + warn!("{}: share remove session failed with error: {} from {}", self.core.meta.self_node_id, message.error, sender); + + data.state = SessionState::Finished; + + Ok(()) + } + + /// Start sending ShareMove-specific messages, when consensus is established. + fn on_consensus_established(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + // update state + data.state = SessionState::WaitingForRemoveConfirmation; + + // send share remove requests to every required node + Self::disseminate_share_remove_requests(core, data)?; + + { + let shares_to_remove = data.shares_to_remove.as_ref() + .expect("shares_to_remove is filled when consensus is established; on_consensus_established is called after consensus is established; qed"); + if !shares_to_remove.contains(&core.meta.self_node_id) { + // remember remove confirmations to receive + data.remove_confirmations_to_receive = Some(shares_to_remove.iter().cloned().collect()); + return Ok(()); + } + } + + // complete session if share is lost + Self::complete_session(core, data) + } + + /// Disseminate share remove requests. + fn disseminate_share_remove_requests(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + let shares_to_remove = data.shares_to_remove.as_ref() + .expect("shares_to_remove is filled when consensus is established; disseminate_share_remove_requests is called after consensus is established; qed"); + for node in shares_to_remove.iter().filter(|n| **n != core.meta.self_node_id) { + core.transport.send(node, ShareRemoveMessage::ShareRemoveRequest(ShareRemoveRequest { + session: core.meta.id.clone().into(), + session_nonce: core.nonce, + }))?; + } + + Ok(()) + } + + /// Complete session on this node. + fn complete_session(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { + // update state + data.state = SessionState::Finished; + + // if we are 'removing' node => remove share from storage + let shares_to_remove = data.shares_to_remove.as_ref() + .expect("shares_to_remove is filled when consensus is established; complete_session is called after consensus is established; qed"); + if shares_to_remove.contains(&core.meta.self_node_id) { + // send confirmation to all other nodes + let new_nodes_set = core.key_share.id_numbers.keys().filter(|n| !shares_to_remove.contains(n)).collect::>(); + for node in new_nodes_set.into_iter().filter(|n| **n != core.meta.self_node_id) { + core.transport.send(&node, ShareRemoveMessage::ShareRemoveConfirm(ShareRemoveConfirm { + session: core.meta.id.clone().into(), + session_nonce: core.nonce, + }))?; + } + + return core.key_storage.remove(&core.meta.id) + .map_err(|e| Error::KeyStorage(e.into())); + } + + // else we need to update key_share.id_numbers.keys() + let mut key_share = core.key_share.clone(); + for share_to_remove in shares_to_remove { + key_share.id_numbers.remove(share_to_remove); + } + + // ... and update key share in storage + core.key_storage.update(core.meta.id.clone(), key_share) + .map_err(|e| Error::KeyStorage(e.into())) + } +} + +impl Session for SessionImpl where T: SessionTransport + Send + Sync + 'static { + fn wait(&self) -> Result<(), Error> { + let mut data = self.data.lock(); + if !data.result.is_some() { + self.core.completed.wait(&mut data); + } + + data.result.clone() + .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") + } +} + +impl ClusterSession for SessionImpl where T: SessionTransport { + fn is_finished(&self) -> bool { + self.data.lock().state == SessionState::Finished + } + + fn on_session_timeout(&self) { + let mut data = self.data.lock(); + + warn!("{}: share remove session failed with timeout", self.core.meta.self_node_id); + + data.state = SessionState::Finished; + data.result = Some(Err(Error::NodeDisconnected)); + self.core.completed.notify_all(); + } + + fn on_node_timeout(&self, node: &NodeId) { + let mut data = self.data.lock(); + + warn!("{}: share remove session failed because {} connection has timeouted", self.core.meta.self_node_id, node); + + data.state = SessionState::Finished; + data.result = Some(Err(Error::NodeDisconnected)); + self.core.completed.notify_all(); + } +} + +impl IsolatedSessionTransport { + pub fn new(session_id: SessionId, nonce: u64, cluster: Arc) -> Self { + IsolatedSessionTransport { + session: session_id, + nonce: nonce, + cluster: cluster, + } + } +} + +impl JobTransport for IsolatedSessionTransport { + type PartialJobRequest = ServersSetChangeAccessRequest; + type PartialJobResponse = bool; + + fn send_partial_request(&self, node: &NodeId, request: ServersSetChangeAccessRequest) -> Result<(), Error> { + self.cluster.send(node, Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(ShareRemoveConsensusMessage { + session: self.session.clone().into(), + session_nonce: self.nonce, + message: ConsensusMessageWithServersSet::InitializeConsensusSession(InitializeConsensusSessionWithServersSet { + old_nodes_set: request.old_servers_set.into_iter().map(Into::into).collect(), + new_nodes_set: request.new_servers_set.into_iter().map(Into::into).collect(), + old_set_signature: request.old_set_signature.into(), + new_set_signature: request.new_set_signature.into(), + }), + }))) + } + + fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { + self.cluster.send(node, Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(ShareRemoveConsensusMessage { + session: self.session.clone().into(), + session_nonce: self.nonce, + message: ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: response, + }), + }))) + } +} + +impl SessionTransport for IsolatedSessionTransport { + fn send(&self, node: &NodeId, message: ShareRemoveMessage) -> Result<(), Error> { + self.cluster.send(node, Message::ShareRemove(message)) + } +} + +fn check_shares_to_remove(core: &SessionCore, shares_to_remove: &BTreeSet) -> Result<(), Error> { + // shares to remove must not be empty + if shares_to_remove.is_empty() { + return Err(Error::InvalidMessage); + } + + // all shares_to_remove nodes must be old nodes of the session + if shares_to_remove.iter().any(|n| !core.key_share.id_numbers.contains_key(n)) { + return Err(Error::InvalidNodesConfiguration); + } + + // do not allow removing more shares than possible + let nodes_left = core.key_share.id_numbers.len() - shares_to_remove.len(); + if core.key_share.threshold + 1 > nodes_left { + return Err(Error::InvalidNodesConfiguration); + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + use std::collections::{VecDeque, BTreeMap, BTreeSet}; + use ethkey::{Random, Generator, Public, Signature, KeyPair, sign}; + use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage}; + use key_server_cluster::cluster::Cluster; + use key_server_cluster::cluster_sessions::ClusterSession; + use key_server_cluster::cluster::tests::DummyCluster; + use key_server_cluster::generation_session::tests::{Node as GenerationNode, generate_nodes_ids}; + use key_server_cluster::math; + use key_server_cluster::message::Message; + use key_server_cluster::servers_set_change_session::tests::generate_key; + use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash; + use key_server_cluster::admin_sessions::ShareChangeSessionMeta; + use key_server_cluster::admin_sessions::share_add_session::tests::check_secret_is_preserved; + use super::{SessionImpl, SessionParams, IsolatedSessionTransport}; + + struct Node { + pub cluster: Arc, + pub key_storage: Arc, + pub session: SessionImpl, + } + + struct MessageLoop { + pub admin_key_pair: KeyPair, + pub original_key_pair: KeyPair, + pub old_nodes_set: BTreeSet, + pub new_nodes_set: BTreeSet, + pub old_set_signature: Signature, + pub new_set_signature: Signature, + pub nodes: BTreeMap, + pub queue: VecDeque<(NodeId, NodeId, Message)>, + } + + fn create_session(mut meta: ShareChangeSessionMeta, admin_public: Public, self_node_id: NodeId, cluster: Arc, key_storage: Arc) -> SessionImpl { + let session_id = meta.id.clone(); + meta.self_node_id = self_node_id; + SessionImpl::new(SessionParams { + meta: meta.clone(), + transport: IsolatedSessionTransport::new(session_id, 1, cluster), + key_storage: key_storage, + admin_public: Some(admin_public), + nonce: 1, + }).unwrap() + } + + fn create_node(meta: ShareChangeSessionMeta, admin_public: Public, node: GenerationNode) -> Node { + Node { + cluster: node.cluster.clone(), + key_storage: node.key_storage.clone(), + session: create_session(meta, admin_public, node.session.node().clone(), node.cluster, node.key_storage), + } + } + + impl MessageLoop { + pub fn new(t: usize, master_node_id: NodeId, old_nodes_set: BTreeSet, shares_to_remove: BTreeSet) -> Self { + // generate admin key pair + let admin_key_pair = Random.generate().unwrap(); + let admin_public = admin_key_pair.public().clone(); + + // run initial generation session + let gml = generate_key(t, old_nodes_set.clone()); + let original_secret = math::compute_joint_secret(gml.nodes.values() + .map(|nd| nd.key_storage.get(&SessionId::default()).unwrap().polynom1[0].clone()) + .collect::>() + .iter()).unwrap(); + let original_key_pair = KeyPair::from_secret(original_secret).unwrap(); + + // prepare sessions on all nodes + let meta = ShareChangeSessionMeta { + id: SessionId::default(), + self_node_id: NodeId::default(), + master_node_id: master_node_id, + }; + let new_nodes_set: BTreeSet<_> = old_nodes_set.iter() + .filter(|n| !shares_to_remove.contains(n)) + .cloned() + .collect(); + let nodes = gml.nodes.into_iter().map(|gn| create_node(meta.clone(), admin_public.clone(), gn.1)); + let nodes = nodes.map(|n| (n.session.core.meta.self_node_id.clone(), n)).collect(); + + let old_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&old_nodes_set)).unwrap(); + let new_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap(); + MessageLoop { + admin_key_pair: admin_key_pair, + original_key_pair: original_key_pair, + old_nodes_set: old_nodes_set.clone(), + new_nodes_set: new_nodes_set.clone(), + old_set_signature: old_set_signature, + new_set_signature: new_set_signature, + nodes: nodes, + queue: Default::default(), + } + } + + pub fn run(&mut self) { + while let Some((from, to, message)) = self.take_message() { + self.process_message((from, to, message)).unwrap(); + } + } + + pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { + self.nodes.values() + .filter_map(|n| n.cluster.take_message().map(|m| (n.session.core.meta.self_node_id.clone(), m.0, m.1))) + .nth(0) + .or_else(|| self.queue.pop_front()) + } + + pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { + match { match msg.2 { + Message::ShareRemove(ref message) => + self.nodes[&msg.1].session.process_message(&msg.0, message), + _ => unreachable!("only servers set change messages are expected"), + } } { + Ok(_) => Ok(()), + Err(Error::TooEarlyForRequest) => { + self.queue.push_back(msg); + Ok(()) + }, + Err(err) => Err(err), + } + } + } + + #[test] + fn remove_session_fails_if_no_nodes_are_removed() { + let (t, n) = (1, 3); + let old_nodes_set = generate_nodes_ids(n); + let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); + let nodes_to_remove = BTreeSet::new(); + let ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone()); + assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()), + Some(ml.old_set_signature.clone()), + Some(ml.new_set_signature.clone())), Err(Error::InvalidMessage)); + } + + #[test] + fn remove_session_fails_if_foreign_nodes_are_removed() { + let (t, n) = (1, 3); + let old_nodes_set = generate_nodes_ids(n); + let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); + let nodes_to_remove: BTreeSet<_> = vec![math::generate_random_point().unwrap()].into_iter().collect(); + let ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone()); + assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()), + Some(ml.old_set_signature.clone()), + Some(ml.new_set_signature.clone())), Err(Error::InvalidNodesConfiguration)); + } + + #[test] + fn remove_session_fails_if_too_many_nodes_are_removed() { + let (t, n) = (1, 3); + let old_nodes_set = generate_nodes_ids(n); + let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); + let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().take(2).collect(); + let ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone()); + assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()), + Some(ml.old_set_signature.clone()), + Some(ml.new_set_signature.clone())), Err(Error::InvalidNodesConfiguration)); + } + + #[test] + fn nodes_removed_using_share_remove_from_master_node() { + let t = 1; + let test_cases = vec![(3, 1), (5, 3)]; + for (n, nodes_to_remove) in test_cases { + // generate key && prepare ShareMove sessions + let old_nodes_set = generate_nodes_ids(n); + let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); + let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().take(nodes_to_remove).collect(); + let mut ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone()); + + // initialize session on master node && run to completion + ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()), + Some(ml.old_set_signature.clone()), + Some(ml.new_set_signature.clone())).unwrap(); + ml.run(); + + // check that session has completed on all nodes + assert!(ml.nodes.values().all(|n| n.session.is_finished())); + + // check that secret is still the same as before adding the share + check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter() + .filter(|&(k, _)| !nodes_to_remove.contains(k)) + .map(|(k, v)| (k.clone(), v.key_storage.clone())) + .collect()); + } + } + + #[test] + fn nodes_removed_using_share_remove_from_non_master_node() { + let t = 1; + let test_cases = vec![(3, 1), (5, 3)]; + for (n, nodes_to_remove) in test_cases { + // generate key && prepare ShareMove sessions + let old_nodes_set = generate_nodes_ids(n); + let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); + let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().skip(1).take(nodes_to_remove).collect(); + let mut ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone()); + + // initialize session on master node && run to completion + ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()), + Some(ml.old_set_signature.clone()), + Some(ml.new_set_signature.clone())).unwrap(); + ml.run(); + + // check that session has completed on all nodes + assert!(ml.nodes.values().all(|n| n.session.is_finished())); + + // check that secret is still the same as before adding the share + check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter() + .filter(|&(k, _)| !nodes_to_remove.contains(k)) + .map(|(k, v)| (k.clone(), v.key_storage.clone())) + .collect()); + } + } +} diff --git a/secret_store/src/key_server_cluster/decryption_session.rs b/secret_store/src/key_server_cluster/client_sessions/decryption_session.rs similarity index 97% rename from secret_store/src/key_server_cluster/decryption_session.rs rename to secret_store/src/key_server_cluster/client_sessions/decryption_session.rs index 3f7bdba04..a52a354fe 100644 --- a/secret_store/src/key_server_cluster/decryption_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/decryption_session.rs @@ -25,6 +25,7 @@ use key_server_cluster::message::{Message, DecryptionMessage, DecryptionConsensu PartialDecryption, DecryptionSessionError, DecryptionSessionCompleted, ConsensusMessage, InitializeConsensusSession, ConfirmConsensusInitialization}; use key_server_cluster::jobs::job_session::JobTransport; +use key_server_cluster::jobs::key_access_job::KeyAccessJob; use key_server_cluster::jobs::decryption_job::{PartialDecryptionRequest, PartialDecryptionResponse, DecryptionJob}; use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; @@ -66,7 +67,7 @@ struct SessionCore { } /// Decryption consensus session type. -type DecryptionConsensusSession = ConsensusSession; +type DecryptionConsensusSession = ConsensusSession; /// Mutable session data. struct SessionData { @@ -151,10 +152,18 @@ impl SessionImpl { nonce: params.nonce, cluster: params.cluster.clone(), }; + let consensus_session = ConsensusSession::new(ConsensusSessionParams { + meta: params.meta.clone(), + consensus_executor: match requester_signature { + Some(requester_signature) => KeyAccessJob::new_on_master(params.meta.id.clone(), params.acl_storage.clone(), requester_signature), + None => KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()), + }, + consensus_transport: consensus_transport, + })?; Ok(SessionImpl { core: SessionCore { - meta: params.meta.clone(), + meta: params.meta, access_key: params.access_key, key_share: params.key_share, cluster: params.cluster, @@ -162,18 +171,7 @@ impl SessionImpl { completed: Condvar::new(), }, data: Mutex::new(SessionData { - consensus_session: match requester_signature { - Some(requester_signature) => ConsensusSession::new_on_master(ConsensusSessionParams { - meta: params.meta, - acl_storage: params.acl_storage.clone(), - consensus_transport: consensus_transport, - }, requester_signature)?, - None => ConsensusSession::new_on_slave(ConsensusSessionParams { - meta: params.meta, - acl_storage: params.acl_storage.clone(), - consensus_transport: consensus_transport, - })?, - }, + consensus_session: consensus_session, is_shadow_decryption: None, result: None, }), @@ -267,7 +265,7 @@ impl SessionImpl { debug_assert!(sender != &self.core.meta.self_node_id); let mut data = self.data.lock(); - let requester = data.consensus_session.requester()?.clone(); + let requester = data.consensus_session.consensus_job().executor().requester()?.ok_or(Error::InvalidStateForRequest)?.clone(); let decryption_job = DecryptionJob::new_on_slave(self.core.meta.self_node_id.clone(), self.core.access_key.clone(), requester, self.core.key_share.clone())?; let decryption_transport = self.core.decryption_transport(); @@ -401,7 +399,7 @@ impl SessionCore { } pub fn disseminate_jobs(&self, consensus_session: &mut DecryptionConsensusSession, is_shadow_decryption: bool) -> Result<(), Error> { - let requester = consensus_session.requester()?.clone(); + let requester = consensus_session.consensus_job().executor().requester()?.ok_or(Error::InvalidStateForRequest)?.clone(); let decryption_job = DecryptionJob::new_on_master(self.meta.self_node_id.clone(), self.access_key.clone(), requester, self.key_share.clone(), is_shadow_decryption)?; consensus_session.disseminate_jobs(decryption_job, self.decryption_transport()) } @@ -532,6 +530,7 @@ mod tests { threshold: 3, id_numbers: id_numbers.clone().into_iter().collect(), secret_share: secret_shares[i].clone(), + polynom1: Vec::new(), common_point: Some(common_point.clone()), encrypted_point: Some(encrypted_point.clone()), }).collect(); @@ -600,6 +599,7 @@ mod tests { threshold: 0, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), + polynom1: Vec::new(), common_point: Some(Random.generate().unwrap().public().clone()), encrypted_point: Some(Random.generate().unwrap().public().clone()), }, @@ -631,6 +631,7 @@ mod tests { threshold: 0, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), + polynom1: Vec::new(), common_point: Some(Random.generate().unwrap().public().clone()), encrypted_point: Some(Random.generate().unwrap().public().clone()), }, @@ -662,6 +663,7 @@ mod tests { threshold: 2, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), + polynom1: Vec::new(), common_point: Some(Random.generate().unwrap().public().clone()), encrypted_point: Some(Random.generate().unwrap().public().clone()), }, diff --git a/secret_store/src/key_server_cluster/encryption_session.rs b/secret_store/src/key_server_cluster/client_sessions/encryption_session.rs similarity index 99% rename from secret_store/src/key_server_cluster/encryption_session.rs rename to secret_store/src/key_server_cluster/client_sessions/encryption_session.rs index 1a1304a53..b49c391e9 100644 --- a/secret_store/src/key_server_cluster/encryption_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/encryption_session.rs @@ -252,7 +252,7 @@ impl SessionImpl { } /// When error has occured on another node. - pub fn on_session_error(&self, sender: NodeId, message: &EncryptionSessionError) -> Result<(), Error> { + pub fn on_session_error(&self, sender: &NodeId, message: &EncryptionSessionError) -> Result<(), Error> { self.check_nonce(message.session_nonce)?; let mut data = self.data.lock(); diff --git a/secret_store/src/key_server_cluster/generation_session.rs b/secret_store/src/key_server_cluster/client_sessions/generation_session.rs similarity index 98% rename from secret_store/src/key_server_cluster/generation_session.rs rename to secret_store/src/key_server_cluster/client_sessions/generation_session.rs index 68ecb3519..bdbf02eb9 100644 --- a/secret_store/src/key_server_cluster/generation_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/generation_session.rs @@ -100,6 +100,8 @@ struct SessionData { nodes: BTreeMap, // === Values, filled during KD phase === + /// Polynom1. + polynom1: Option>, /// Value of polynom1[0], generated by this node. secret_coeff: Option, @@ -121,10 +123,6 @@ struct NodeData { pub id_number: Secret, // === Values, filled during KD phase === - /// Secret value1, which has been sent to this node. - pub secret1_sent: Option, - /// Secret value2, which has been sent to this node. - pub secret2_sent: Option, /// Secret value1, which has been received from this node. pub secret1: Option, /// Secret value2, which has been received from this node. @@ -203,6 +201,7 @@ impl SessionImpl { threshold: None, derived_point: None, nodes: BTreeMap::new(), + polynom1: None, secret_coeff: None, secret_share: None, key_share: None, @@ -293,7 +292,7 @@ impl SessionImpl { &GenerationMessage::PublicKeyShare(ref message) => self.on_public_key_share(sender.clone(), message), &GenerationMessage::SessionError(ref message) => - self.on_session_error(sender.clone(), message), + self.on_session_error(sender, message), &GenerationMessage::SessionCompleted(ref message) => self.on_session_completed(sender.clone(), message), } @@ -462,7 +461,7 @@ impl SessionImpl { // update node data with received public share { let node_data = &mut data.nodes.get_mut(&sender).ok_or(Error::InvalidMessage)?; - if node_data.public_share.is_some() { + if node_data.public_share.is_some() { return Err(Error::InvalidMessage); } @@ -507,6 +506,7 @@ impl SessionImpl { threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(), secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(), + polynom1: data.polynom1.as_ref().expect("polynom1 is filled in KG phase; we are at the end of KG phase; qed").clone(), common_point: None, encrypted_point: None, }; @@ -547,7 +547,7 @@ impl SessionImpl { } /// When error has occured on another node. - pub fn on_session_error(&self, sender: NodeId, message: &SessionError) -> Result<(), Error> { + pub fn on_session_error(&self, sender: &NodeId, message: &SessionError) -> Result<(), Error> { let mut data = self.data.lock(); warn!("{}: generation session failed with error: {} from {}", self.node(), message.error, sender); @@ -585,8 +585,9 @@ impl SessionImpl { let threshold = data.threshold.expect("threshold is filled on initialization phase; KD phase follows initialization phase; qed"); let polynom1 = math::generate_random_polynom(threshold)?; let polynom2 = math::generate_random_polynom(threshold)?; + data.polynom1 = Some(polynom1.clone()); data.secret_coeff = Some(polynom1[0].clone()); - + // compute t+1 public values let publics = math::public_values_generation(threshold, data.derived_point.as_ref().expect("keys dissemination occurs after derived point is agreed; qed"), @@ -600,9 +601,6 @@ impl SessionImpl { // send a message containing secret1 && secret2 to other node if node != self.node() { - node_data.secret1_sent = Some(secret1.clone()); - node_data.secret2_sent = Some(secret2.clone()); - self.cluster.send(&node, Message::Generation(GenerationMessage::KeysDissemination(KeysDissemination { session: self.id.clone().into(), session_nonce: self.nonce, @@ -687,6 +685,7 @@ impl SessionImpl { threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(), secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(), + polynom1: data.polynom1.as_ref().expect("polynom1 is filled in KG phase; we are at the end of KG phase; qed").clone(), common_point: None, encrypted_point: None, }; @@ -810,8 +809,6 @@ impl NodeData { fn with_id_number(node_id_number: Secret) -> Self { NodeData { id_number: node_id_number, - secret1_sent: None, - secret2_sent: None, secret1: None, secret2: None, publics: None, @@ -876,13 +873,19 @@ pub mod tests { pub queue: VecDeque<(NodeId, NodeId, Message)>, } + pub fn generate_nodes_ids(n: usize) -> BTreeSet { + (0..n).map(|_| math::generate_random_point().unwrap()).collect() + } + impl MessageLoop { pub fn new(nodes_num: usize) -> Self { + Self::with_nodes_ids(generate_nodes_ids(nodes_num)) + } + + pub fn with_nodes_ids(nodes_ids: BTreeSet) -> Self { let mut nodes = BTreeMap::new(); let session_id = SessionId::default(); - for _ in 0..nodes_num { - let key_pair = Random.generate().unwrap(); - let node_id = key_pair.public().clone(); + for node_id in nodes_ids { let cluster = Arc::new(DummyCluster::new(node_id.clone())); let key_storage = Arc::new(DummyKeyStorage::default()); let session = SessionImpl::new(SessionParams { diff --git a/secret_store/src/key_server_cluster/client_sessions/mod.rs b/secret_store/src/key_server_cluster/client_sessions/mod.rs new file mode 100644 index 000000000..0ee5f5841 --- /dev/null +++ b/secret_store/src/key_server_cluster/client_sessions/mod.rs @@ -0,0 +1,20 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +pub mod decryption_session; +pub mod encryption_session; +pub mod generation_session; +pub mod signing_session; diff --git a/secret_store/src/key_server_cluster/signing_session.rs b/secret_store/src/key_server_cluster/client_sessions/signing_session.rs similarity index 98% rename from secret_store/src/key_server_cluster/signing_session.rs rename to secret_store/src/key_server_cluster/client_sessions/signing_session.rs index 822d8228b..d094e6516 100644 --- a/secret_store/src/key_server_cluster/signing_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/signing_session.rs @@ -28,6 +28,7 @@ use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessa RequestPartialSignature, PartialSignature, SigningSessionCompleted, GenerationMessage, ConsensusMessage, SigningSessionError, InitializeConsensusSession, ConfirmConsensusInitialization}; use key_server_cluster::jobs::job_session::JobTransport; +use key_server_cluster::jobs::key_access_job::KeyAccessJob; use key_server_cluster::jobs::signing_job::{PartialSigningRequest, PartialSigningResponse, SigningJob}; use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; @@ -70,7 +71,7 @@ struct SessionCore { } /// Signing consensus session type. -type SigningConsensusSession = ConsensusSession; +type SigningConsensusSession = ConsensusSession; /// Mutable session data. struct SessionData { @@ -169,10 +170,18 @@ impl SessionImpl { nonce: params.nonce, cluster: params.cluster.clone(), }; + let consensus_session = ConsensusSession::new(ConsensusSessionParams { + meta: params.meta.clone(), + consensus_executor: match requester_signature { + Some(requester_signature) => KeyAccessJob::new_on_master(params.meta.id.clone(), params.acl_storage.clone(), requester_signature), + None => KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()), + }, + consensus_transport: consensus_transport, + })?; Ok(SessionImpl { core: SessionCore { - meta: params.meta.clone(), + meta: params.meta, access_key: params.access_key, key_share: params.key_share, cluster: params.cluster, @@ -182,18 +191,7 @@ impl SessionImpl { data: Mutex::new(SessionData { state: SessionState::ConsensusEstablishing, message_hash: None, - consensus_session: match requester_signature { - Some(requester_signature) => ConsensusSession::new_on_master(ConsensusSessionParams { - meta: params.meta, - acl_storage: params.acl_storage.clone(), - consensus_transport: consensus_transport, - }, requester_signature)?, - None => ConsensusSession::new_on_slave(ConsensusSessionParams { - meta: params.meta, - acl_storage: params.acl_storage.clone(), - consensus_transport: consensus_transport, - })?, - }, + consensus_session: consensus_session, generation_session: None, result: None, }), @@ -789,6 +787,7 @@ mod tests { threshold: 0, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), + polynom1: Vec::new(), common_point: Some(Random.generate().unwrap().public().clone()), encrypted_point: Some(Random.generate().unwrap().public().clone()), }, @@ -820,6 +819,7 @@ mod tests { threshold: 0, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), + polynom1: Vec::new(), common_point: Some(Random.generate().unwrap().public().clone()), encrypted_point: Some(Random.generate().unwrap().public().clone()), }, @@ -851,6 +851,7 @@ mod tests { threshold: 2, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), + polynom1: Vec::new(), common_point: Some(Random.generate().unwrap().public().clone()), encrypted_point: Some(Random.generate().unwrap().public().clone()), }, diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 4deeb1244..b6107bd8d 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -20,7 +20,7 @@ use std::sync::Arc; use std::collections::{BTreeMap, BTreeSet}; use std::collections::btree_map::Entry; use std::net::{SocketAddr, IpAddr}; -use futures::{finished, failed, Future, Stream, BoxFuture}; +use futures::{finished, failed, Future, Stream}; use futures_cpupool::CpuPool; use parking_lot::{RwLock, Mutex}; use tokio_io::IoFuture; @@ -30,9 +30,10 @@ use ethkey::{Public, KeyPair, Signature, Random, Generator}; use bigint::hash::H256; use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair}; use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, GenerationSessionWrapper, EncryptionSessionWrapper, - DecryptionSessionWrapper, SigningSessionWrapper}; + DecryptionSessionWrapper, SigningSessionWrapper, AdminSessionWrapper}; use key_server_cluster::message::{self, Message, ClusterMessage, GenerationMessage, EncryptionMessage, DecryptionMessage, - SigningMessage, ConsensusMessage}; + SigningMessage, ServersSetChangeMessage, ConsensusMessage, ShareAddMessage, ShareMoveMessage, ShareRemoveMessage, + ConsensusMessageWithServersSecretMap, ConsensusMessageWithServersMap, ConsensusMessageWithServersSet}; use key_server_cluster::generation_session::{Session as GenerationSession, SessionState as GenerationSessionState}; #[cfg(test)] use key_server_cluster::generation_session::SessionImpl as GenerationSessionImpl; @@ -55,9 +56,8 @@ const KEEP_ALIVE_SEND_INTERVAL: u64 = 30; /// we must treat this node as non-responding && disconnect from it. const KEEP_ALIVE_DISCONNECT_INTERVAL: u64 = 60; -/// Encryption sesion timeout interval. It works /// Empty future. -type BoxedEmptyFuture = BoxFuture<(), ()>; +type BoxedEmptyFuture = ::std::boxed::Box + Send>; /// Cluster interface for external clients. pub trait ClusterClient: Send + Sync { @@ -71,6 +71,14 @@ pub trait ClusterClient: Send + Sync { fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature, is_shadow_decryption: bool) -> Result, Error>; /// Start new signing session. fn new_signing_session(&self, session_id: SessionId, requestor_signature: Signature, message_hash: H256) -> Result, Error>; + /// Start new share add session. + fn new_share_add_session(&self, session_id: SessionId, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error>; + /// Start new share move session. + fn new_share_move_session(&self, session_id: SessionId, shares_to_move: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error>; + /// Start new share remove session. + fn new_share_remove_session(&self, session_id: SessionId, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error>; + /// Start new servers set change session. + fn new_servers_set_change_session(&self, session_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error>; /// Ask node to make 'faulty' generation sessions. #[cfg(test)] @@ -83,7 +91,7 @@ pub trait ClusterClient: Send + Sync { fn connect(&self); } -/// Cluster access for single encryption/decryption/signing participant. +/// Cluster access for single session participant. pub trait Cluster: Send + Sync { /// Broadcast message to all other nodes. fn broadcast(&self, message: Message) -> Result<(), Error>; @@ -108,6 +116,8 @@ pub struct ClusterConfiguration { pub key_storage: Arc, /// Reference to ACL storage pub acl_storage: Arc, + /// Administrator public key. + pub admin_public: Option, } /// Cluster state. @@ -261,23 +271,21 @@ impl ClusterCore { /// Connect to socket using given context and handle. fn connect_future(handle: &Handle, data: Arc, node_address: SocketAddr) -> BoxedEmptyFuture { let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect(); - net_connect(&node_address, handle, data.self_key_pair.clone(), disconnected_nodes) + Box::new(net_connect(&node_address, handle, data.self_key_pair.clone(), disconnected_nodes) .then(move |result| ClusterCore::process_connection_result(data, Some(node_address), result)) - .then(|_| finished(())) - .boxed() + .then(|_| finished(()))) } /// Start listening for incoming connections. fn listen(handle: &Handle, data: Arc, listen_address: SocketAddr) -> Result { - Ok(TcpListener::bind(&listen_address, &handle)? + Ok(Box::new(TcpListener::bind(&listen_address, &handle)? .incoming() .and_then(move |(stream, node_address)| { ClusterCore::accept_connection(data.clone(), stream, node_address); Ok(()) }) .for_each(|_| Ok(())) - .then(|_| finished(())) - .boxed()) + .then(|_| finished(())))) } /// Accept connection. @@ -289,21 +297,19 @@ impl ClusterCore { /// Accept connection future. fn accept_connection_future(handle: &Handle, data: Arc, stream: TcpStream, node_address: SocketAddr) -> BoxedEmptyFuture { - net_accept_connection(node_address, stream, handle, data.self_key_pair.clone()) + Box::new(net_accept_connection(node_address, stream, handle, data.self_key_pair.clone()) .then(move |result| ClusterCore::process_connection_result(data, None, result)) - .then(|_| finished(())) - .boxed() + .then(|_| finished(()))) } /// Schedule mainatain procedures. fn schedule_maintain(handle: &Handle, data: Arc) { let d = data.clone(); - let interval: BoxedEmptyFuture = Interval::new(time::Duration::new(MAINTAIN_INTERVAL, 0), handle) + let interval: BoxedEmptyFuture = Box::new(Interval::new(time::Duration::new(MAINTAIN_INTERVAL, 0), handle) .expect("failed to create interval") .and_then(move |_| Ok(ClusterCore::maintain(data.clone()))) .for_each(|_| Ok(())) - .then(|_| finished(())) - .boxed(); + .then(|_| finished(()))); d.spawn(interval); } @@ -319,7 +325,7 @@ impl ClusterCore { /// Called for every incomming mesage. fn process_connection_messages(data: Arc, connection: Arc) -> IoFuture> { - connection + Box::new(connection .read_message() .then(move |result| match result { @@ -327,22 +333,22 @@ impl ClusterCore { ClusterCore::process_connection_message(data.clone(), connection.clone(), message); // continue serving connection data.spawn(ClusterCore::process_connection_messages(data.clone(), connection)); - finished(Ok(())).boxed() + Box::new(finished(Ok(()))) }, Ok((_, Err(err))) => { warn!(target: "secretstore_net", "{}: protocol error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); // continue serving connection data.spawn(ClusterCore::process_connection_messages(data.clone(), connection)); - finished(Err(err)).boxed() + Box::new(finished(Err(err))) }, Err(err) => { warn!(target: "secretstore_net", "{}: network error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); // close connection data.connections.remove(connection.node_id(), connection.is_inbound()); - failed(err).boxed() + Box::new(failed(err)) }, } - ).boxed() + )) } /// Send keepalive messages to every othe node. @@ -361,7 +367,13 @@ impl ClusterCore { /// Try to connect to every disconnected node. fn connect_disconnected_nodes(data: Arc) { - data.connections.update_nodes_set(); + // do not update nodes set if any admin session is active + // this could happen, but will possibly lead to admin session error + // => should be performed later + if data.sessions.admin_sessions.is_empty() { + data.connections.update_nodes_set(); + } + for (node_id, node_address) in data.connections.disconnected_nodes() { if data.config.allow_connecting_to_higher_nodes || data.self_key_pair.public() < &node_id { ClusterCore::connect(data.clone(), node_address); @@ -377,26 +389,26 @@ impl ClusterCore { if data.connections.insert(connection.clone()) { ClusterCore::process_connection_messages(data.clone(), connection) } else { - finished(Ok(())).boxed() + Box::new(finished(Ok(()))) } }, Ok(DeadlineStatus::Meet(Err(err))) => { warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}", data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); - finished(Ok(())).boxed() + Box::new(finished(Ok(()))) }, Ok(DeadlineStatus::Timeout) => { warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}", data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" }, outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); - finished(Ok(())).boxed() + Box::new(finished(Ok(()))) }, Err(err) => { warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}", data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); - finished(Ok(())).boxed() + Box::new(finished(Ok(()))) }, } } @@ -410,6 +422,10 @@ impl ClusterCore { Message::Encryption(message) => ClusterCore::process_encryption_message(data, connection, message), Message::Decryption(message) => ClusterCore::process_decryption_message(data, connection, message), Message::Signing(message) => ClusterCore::process_signing_message(data, connection, message), + Message::ServersSetChange(message) => ClusterCore::process_servers_set_change_message(data, connection, message), + Message::ShareAdd(message) => ClusterCore::process_share_add_message(data, connection, message), + Message::ShareMove(message) => ClusterCore::process_share_move_message(data, connection, message), + Message::ShareRemove(message) => ClusterCore::process_share_remove_message(data, connection, message), Message::Cluster(message) => ClusterCore::process_cluster_message(data, connection, message), } } @@ -476,11 +492,13 @@ impl ClusterCore { }, Err(err) => { warn!(target: "secretstore_net", "{}: generation session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); - data.sessions.respond_with_generation_error(&session_id, message::SessionError { + let error_message = message::SessionError { session: session_id.clone().into(), session_nonce: session_nonce, error: format!("{:?}", err), - }); + }; + let _ = session.and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message)); // processing error => ignore error + data.sessions.respond_with_generation_error(&session_id, error_message); if err != Error::InvalidSessionId { data.sessions.generation_sessions.remove(&session_id); } @@ -529,7 +547,7 @@ impl ClusterCore { EncryptionMessage::ConfirmEncryptionInitialization(ref message) => session.on_confirm_initialization(sender.clone(), message), EncryptionMessage::EncryptionSessionError(ref message) => - session.on_session_error(sender.clone(), message), + session.on_session_error(&sender, message), }) { Ok(_) => { // if session is completed => stop @@ -559,11 +577,13 @@ impl ClusterCore { }, Err(err) => { warn!(target: "secretstore_net", "{}: encryption session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); - data.sessions.respond_with_encryption_error(&session_id, message::EncryptionSessionError { + let error_message = message::EncryptionSessionError { session: session_id.clone().into(), session_nonce: session_nonce, error: format!("{:?}", err), - }); + }; + let _ = session.and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message)); // processing error => ignore error + data.sessions.respond_with_encryption_error(&session_id, error_message); if err != Error::InvalidSessionId { data.sessions.encryption_sessions.remove(&session_id); } @@ -632,12 +652,14 @@ impl ClusterCore { }, Err(err) => { warn!(target: "secretstore_net", "{}: decryption session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); - data.sessions.respond_with_decryption_error(&session_id, &sub_session_id, &sender, message::DecryptionSessionError { + let error_message = message::DecryptionSessionError { session: session_id.clone().into(), sub_session: sub_session_id.clone().into(), session_nonce: session_nonce, error: format!("{:?}", err), - }); + }; + let _ = session.and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message)); // processing error => ignore error + data.sessions.respond_with_decryption_error(&session_id, &sub_session_id, &sender, error_message); if err != Error::InvalidSessionId { data.sessions.decryption_sessions.remove(&decryption_session_id); } @@ -647,7 +669,7 @@ impl ClusterCore { } } - /// Process singlesigning message from the connection. + /// Process single signing message from the connection. fn process_signing_message(data: Arc, connection: Arc, mut message: SigningMessage) { let session_id = message.session_id().clone(); let sub_session_id = message.sub_session_id().clone(); @@ -712,12 +734,14 @@ impl ClusterCore { }, Err(err) => { warn!(target: "secretstore_net", "{}: signing session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); - data.sessions.respond_with_signing_error(&session_id, &sub_session_id, &sender, message::SigningSessionError { + let error_message = message::SigningSessionError { session: session_id.clone().into(), sub_session: sub_session_id.clone().into(), session_nonce: session_nonce, error: format!("{:?}", err), - }); + }; + let _ = session.and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message)); // processing error => ignore error + data.sessions.respond_with_signing_error(&session_id, &sub_session_id, &sender, error_message); if err != Error::InvalidSessionId { data.sessions.signing_sessions.remove(&signing_session_id); } @@ -727,6 +751,330 @@ impl ClusterCore { } } + /// Process singlesigning message from the connection. + fn process_servers_set_change_message(data: Arc, connection: Arc, mut message: ServersSetChangeMessage) { + let session_id = message.session_id().clone(); + let session_nonce = message.session_nonce(); + let mut sender = connection.node_id().clone(); + let session = match message { + ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref message) if match message.message { + ConsensusMessageWithServersSet::InitializeConsensusSession(_) => true, + _ => false, + } => { + let mut connected_nodes = data.connections.connected_nodes(); + connected_nodes.insert(data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes.clone())); + match data.sessions.new_servers_set_change_session(sender.clone(), Some(session_id.clone()), Some(session_nonce), cluster, connected_nodes) { + Ok(session) => Ok(session), + Err(err) => { + // this is new session => it is not yet in container + warn!(target: "secretstore_net", "{}: servers set change session initialization error '{}' when requested for new session from node {}", data.self_key_pair.public(), err, sender); + data.spawn(connection.send_message(Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(message::ServersSetChangeError { + session: session_id.into(), + session_nonce: session_nonce, + error: format!("{:?}", err), + })))); + return; + }, + } + }, + _ => { + data.sessions.admin_sessions.get(&session_id) + .ok_or(Error::InvalidSessionId) + }, + }; + + let mut is_queued_message = false; + loop { + match session.clone().and_then(|session| session.as_servers_set_change().ok_or(Error::InvalidMessage)?.process_message(&sender, &message)) { + Ok(_) => { + // if session is completed => stop + let session = session.clone().expect("session.method() call finished with success; session exists; qed"); + if session.is_finished() { + info!(target: "secretstore_net", "{}: servers set change session completed", data.self_key_pair.public()); + data.sessions.admin_sessions.remove(&session_id); + break; + } + + // try to dequeue message + match data.sessions.admin_sessions.dequeue_message(&session_id) { + Some((msg_sender, Message::ServersSetChange(msg))) => { + is_queued_message = true; + sender = msg_sender; + message = msg; + }, + Some(_) => unreachable!("we only queue message of the same type; qed"), + None => break, + } + }, + Err(Error::TooEarlyForRequest) => { + data.sessions.admin_sessions.enqueue_message(&session_id, sender, Message::ServersSetChange(message), is_queued_message); + break; + }, + Err(err) => { + warn!(target: "secretstore_net", "{}: servers set change session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); + let error_message = message::ServersSetChangeError { + session: session_id.clone().into(), + session_nonce: session_nonce, + error: format!("{:?}", err), + }; + let _ = session.and_then(|s| s.as_servers_set_change() + .ok_or(Error::InvalidMessage) + .and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message))); // processing error => ignore error + data.sessions.respond_with_servers_set_change_error(&session_id, error_message); + if err != Error::InvalidSessionId { + data.sessions.admin_sessions.remove(&session_id); + } + break; + }, + } + } + } + + /// Process single share add message from the connection. + fn process_share_add_message(data: Arc, connection: Arc, mut message: ShareAddMessage) { + let session_id = message.session_id().clone(); + let session_nonce = message.session_nonce(); + let mut sender = connection.node_id().clone(); + let session = match message { + ShareAddMessage::ShareAddConsensusMessage(ref message) if match message.message { + ConsensusMessageWithServersSecretMap::InitializeConsensusSession(_) => true, + _ => false, + } => { + let mut connected_nodes = data.connections.connected_nodes(); + connected_nodes.insert(data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); + match data.sessions.new_share_add_session(sender.clone(), session_id.clone(), Some(session_nonce), cluster) { + Ok(session) => Ok(session), + Err(err) => { + // this is new session => it is not yet in container + warn!(target: "secretstore_net", "{}: share add session initialization error '{}' when requested for new session from node {}", data.self_key_pair.public(), err, sender); + data.spawn(connection.send_message(Message::ShareAdd(ShareAddMessage::ShareAddError(message::ShareAddError { + session: session_id.into(), + session_nonce: session_nonce, + error: format!("{:?}", err), + })))); + return; + }, + } + }, + _ => { + data.sessions.admin_sessions.get(&session_id) + .ok_or(Error::InvalidSessionId) + }, + }; + + let mut is_queued_message = false; + loop { + match session.clone().and_then(|session| session.as_share_add().ok_or(Error::InvalidMessage)?.process_message(&sender, &message)) { + Ok(_) => { + // if session is completed => stop + let session = session.clone().expect("session.method() call finished with success; session exists; qed"); + if session.is_finished() { + info!(target: "secretstore_net", "{}: share add session completed", data.self_key_pair.public()); + data.sessions.admin_sessions.remove(&session_id); + break; + } + + // try to dequeue message + match data.sessions.admin_sessions.dequeue_message(&session_id) { + Some((msg_sender, Message::ShareAdd(msg))) => { + is_queued_message = true; + sender = msg_sender; + message = msg; + }, + Some(_) => unreachable!("we only queue message of the same type; qed"), + None => break, + } + }, + Err(Error::TooEarlyForRequest) => { + data.sessions.admin_sessions.enqueue_message(&session_id, sender, Message::ShareAdd(message), is_queued_message); + break; + }, + Err(err) => { + warn!(target: "secretstore_net", "{}: share add session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); + let error_message = message::ShareAddError { + session: session_id.clone().into(), + session_nonce: session_nonce, + error: format!("{:?}", err), + }; + let _ = session.and_then(|s| s.as_share_add() + .ok_or(Error::InvalidMessage) + .and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message))); // processing error => ignore error + data.sessions.respond_with_share_add_error(&session_id, error_message); + if err != Error::InvalidSessionId { + data.sessions.admin_sessions.remove(&session_id); + } + break; + }, + } + } + } + + /// Process single share move message from the connection. + fn process_share_move_message(data: Arc, connection: Arc, mut message: ShareMoveMessage) { + let session_id = message.session_id().clone(); + let session_nonce = message.session_nonce(); + let mut sender = connection.node_id().clone(); + let session = match message { + ShareMoveMessage::ShareMoveConsensusMessage(ref message) if match message.message { + ConsensusMessageWithServersMap::InitializeConsensusSession(_) => true, + _ => false, + } => { + let mut connected_nodes = data.connections.connected_nodes(); + connected_nodes.insert(data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); + match data.sessions.new_share_move_session(sender.clone(), session_id.clone(), Some(session_nonce), cluster) { + Ok(session) => Ok(session), + Err(err) => { + // this is new session => it is not yet in container + warn!(target: "secretstore_net", "{}: share move session initialization error '{}' when requested for new session from node {}", data.self_key_pair.public(), err, sender); + data.spawn(connection.send_message(Message::ShareMove(ShareMoveMessage::ShareMoveError(message::ShareMoveError { + session: session_id.into(), + session_nonce: session_nonce, + error: format!("{:?}", err), + })))); + return; + }, + } + }, + _ => { + data.sessions.admin_sessions.get(&session_id) + .ok_or(Error::InvalidSessionId) + }, + }; + + let mut is_queued_message = false; + loop { + match session.clone().and_then(|session| session.as_share_move().ok_or(Error::InvalidMessage)?.process_message(&sender, &message)) { + Ok(_) => { + // if session is completed => stop + let session = session.clone().expect("session.method() call finished with success; session exists; qed"); + if session.is_finished() { + info!(target: "secretstore_net", "{}: share move session completed", data.self_key_pair.public()); + data.sessions.admin_sessions.remove(&session_id); + break; + } + + // try to dequeue message + match data.sessions.admin_sessions.dequeue_message(&session_id) { + Some((msg_sender, Message::ShareMove(msg))) => { + is_queued_message = true; + sender = msg_sender; + message = msg; + }, + Some(_) => unreachable!("we only queue message of the same type; qed"), + None => break, + } + }, + Err(Error::TooEarlyForRequest) => { + data.sessions.admin_sessions.enqueue_message(&session_id, sender, Message::ShareMove(message), is_queued_message); + break; + }, + Err(err) => { + warn!(target: "secretstore_net", "{}: share move session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); + let error_message = message::ShareMoveError { + session: session_id.clone().into(), + session_nonce: session_nonce, + error: format!("{:?}", err), + }; + let _ = session.and_then(|s| s.as_share_move() + .ok_or(Error::InvalidMessage) + .and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message))); // processing error => ignore error + data.sessions.respond_with_share_move_error(&session_id, error_message); + if err != Error::InvalidSessionId { + data.sessions.admin_sessions.remove(&session_id); + } + break; + }, + } + } + } + + /// Process single share remove message from the connection. + fn process_share_remove_message(data: Arc, connection: Arc, mut message: ShareRemoveMessage) { + let session_id = message.session_id().clone(); + let session_nonce = message.session_nonce(); + let mut sender = connection.node_id().clone(); + let session = match message { + ShareRemoveMessage::ShareRemoveConsensusMessage(ref message) if match message.message { + ConsensusMessageWithServersSet::InitializeConsensusSession(_) => true, + _ => false, + } => { + let mut connected_nodes = data.connections.connected_nodes(); + connected_nodes.insert(data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); + match data.sessions.new_share_remove_session(sender.clone(), session_id.clone(), Some(session_nonce), cluster) { + Ok(session) => Ok(session), + Err(err) => { + // this is new session => it is not yet in container + warn!(target: "secretstore_net", "{}: share remove session initialization error '{}' when requested for new session from node {}", data.self_key_pair.public(), err, sender); + data.spawn(connection.send_message(Message::ShareRemove(ShareRemoveMessage::ShareRemoveError(message::ShareRemoveError { + session: session_id.into(), + session_nonce: session_nonce, + error: format!("{:?}", err), + })))); + return; + }, + } + }, + _ => { + data.sessions.admin_sessions.get(&session_id) + .ok_or(Error::InvalidSessionId) + }, + }; + + let mut is_queued_message = false; + loop { + match session.clone().and_then(|session| session.as_share_remove().ok_or(Error::InvalidMessage)?.process_message(&sender, &message)) { + Ok(_) => { + // if session is completed => stop + let session = session.clone().expect("session.method() call finished with success; session exists; qed"); + if session.is_finished() { + info!(target: "secretstore_net", "{}: share remove session completed", data.self_key_pair.public()); + data.sessions.admin_sessions.remove(&session_id); + break; + } + + // try to dequeue message + match data.sessions.admin_sessions.dequeue_message(&session_id) { + Some((msg_sender, Message::ShareRemove(msg))) => { + is_queued_message = true; + sender = msg_sender; + message = msg; + }, + Some(_) => unreachable!("we only queue message of the same type; qed"), + None => break, + } + }, + Err(Error::TooEarlyForRequest) => { + data.sessions.admin_sessions.enqueue_message(&session_id, sender, Message::ShareRemove(message), is_queued_message); + break; + }, + Err(err) => { + warn!(target: "secretstore_net", "{}: share remove session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); + let error_message = message::ShareRemoveError { + session: session_id.clone().into(), + session_nonce: session_nonce, + error: format!("{:?}", err), + }; + let _ = session.and_then(|s| s.as_share_remove() + .ok_or(Error::InvalidMessage) + .and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message))); // processing error => ignore error + data.sessions.respond_with_share_remove_error(&session_id, error_message); + if err != Error::InvalidSessionId { + data.sessions.admin_sessions.remove(&session_id); + } + break; + }, + } + } + } + /// Process single cluster message from the connection. fn process_cluster_message(data: Arc, connection: Arc, message: ClusterMessage) { match message { @@ -996,7 +1344,7 @@ impl ClusterClient for ClusterClientImpl { let mut connected_nodes = self.data.connections.connected_nodes(); connected_nodes.insert(self.data.self_key_pair.public().clone()); - let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); + let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes)); let session = self.data.sessions.new_encryption_session(self.data.self_key_pair.public().clone(), session_id, None, cluster)?; session.initialize(requestor_signature, common_point, encrypted_point)?; Ok(EncryptionSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)) @@ -1007,7 +1355,7 @@ impl ClusterClient for ClusterClientImpl { connected_nodes.insert(self.data.self_key_pair.public().clone()); let access_key = Random.generate()?.secret().clone(); - let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); + let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes)); let session = self.data.sessions.new_decryption_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), None, cluster, Some(requestor_signature))?; session.initialize(is_shadow_decryption)?; Ok(DecryptionSessionWrapper::new(Arc::downgrade(&self.data), DecryptionSessionId::new(session_id, access_key), session)) @@ -1018,12 +1366,75 @@ impl ClusterClient for ClusterClientImpl { connected_nodes.insert(self.data.self_key_pair.public().clone()); let access_key = Random.generate()?.secret().clone(); - let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); + let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes)); let session = self.data.sessions.new_signing_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), None, cluster, Some(requestor_signature))?; session.initialize(message_hash)?; Ok(SigningSessionWrapper::new(Arc::downgrade(&self.data), SigningSessionId::new(session_id, access_key), session)) } + fn new_share_add_session(&self, session_id: SessionId, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error> { + let mut connected_nodes = self.data.connections.connected_nodes(); + connected_nodes.insert(self.data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes)); + let session = self.data.sessions.new_share_add_session(self.data.self_key_pair.public().clone(), session_id, None, cluster)?; + session.as_share_add() + .expect("created 1 line above; qed") + .initialize(Some(new_nodes_set), Some(old_set_signature), Some(new_set_signature))?; + Ok(AdminSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)) + } + + fn new_share_move_session(&self, session_id: SessionId, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error> { + let key_share = self.data.config.key_storage.get(&session_id).map_err(|e| Error::KeyStorage(e.into()))?; + if new_nodes_set.len() != key_share.id_numbers.len() { + return Err(Error::InvalidNodesConfiguration); + } + + let old_nodes_set: BTreeSet<_> = key_share.id_numbers.keys().cloned().collect(); + let nodes_to_add: BTreeSet<_> = new_nodes_set.difference(&old_nodes_set).collect(); + let mut shares_to_move = BTreeMap::new(); + for (target_node, source_node) in nodes_to_add.into_iter().zip(key_share.id_numbers.keys()) { + shares_to_move.insert(target_node.clone(), source_node.clone()); + } + + let mut connected_nodes = self.data.connections.connected_nodes(); + connected_nodes.insert(self.data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes)); + let session = self.data.sessions.new_share_move_session(self.data.self_key_pair.public().clone(), session_id, None, cluster)?; + session.as_share_move() + .expect("created 1 line above; qed") + .initialize(Some(shares_to_move), Some(old_set_signature), Some(new_set_signature))?; + Ok(AdminSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)) + } + + fn new_share_remove_session(&self, session_id: SessionId, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error> { + let mut connected_nodes = self.data.connections.connected_nodes(); + connected_nodes.insert(self.data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes)); + let session = self.data.sessions.new_share_remove_session(self.data.self_key_pair.public().clone(), session_id, None, cluster)?; + session.as_share_remove() + .expect("created 1 line above; qed") + .initialize(Some(new_nodes_set), Some(old_set_signature), Some(new_set_signature))?; + Ok(AdminSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)) + } + + fn new_servers_set_change_session(&self, session_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error> { + let mut connected_nodes = self.data.connections.connected_nodes(); + connected_nodes.insert(self.data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); + let session = self.data.sessions.new_servers_set_change_session(self.data.self_key_pair.public().clone(), session_id, None, cluster, connected_nodes)?; + let session_id = { + let servers_set_change_session = session.as_servers_set_change().expect("created 1 line above; qed"); + servers_set_change_session.initialize(new_nodes_set, old_set_signature, new_set_signature)?; + servers_set_change_session.id().clone() + }; + Ok(AdminSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)) + + } + #[cfg(test)] fn connect(&self) { ClusterCore::connect_disconnected_nodes(self.data.clone()); @@ -1140,6 +1551,7 @@ pub mod tests { allow_connecting_to_higher_nodes: false, key_storage: Arc::new(DummyKeyStorage::default()), acl_storage: Arc::new(DummyAclStorage::default()), + admin_public: None, }).collect(); let clusters: Vec<_> = cluster_params.into_iter().enumerate() .map(|(_, params)| ClusterCore::new(core.handle(), params).unwrap()) @@ -1189,16 +1601,47 @@ pub mod tests { // start && wait for generation session to fail let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap(); - loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some()); + loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some() + && clusters[0].client().generation_session(&SessionId::default()).is_none()); assert!(session.joint_public_and_secret().unwrap().is_err()); // check that faulty session is either removed from all nodes, or nonexistent (already removed) - assert!(clusters[0].client().generation_session(&SessionId::default()).is_none()); for i in 1..3 { if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) { - loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some()); + // wait for both session completion && session removal (session completion event is fired + // before session is removed from its own container by cluster) + loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some() + && clusters[i].client().generation_session(&SessionId::default()).is_none()); + assert!(session.joint_public_and_secret().unwrap().is_err()); + } + } + } + + #[test] + fn generation_session_completion_signalled_if_failed_on_master() { + //::logger::init_log(); + let mut core = Core::new().unwrap(); + let clusters = make_clusters(&core, 6023, 3); + run_clusters(&clusters); + loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established)); + + // ask one of nodes to produce faulty generation sessions + clusters[0].client().make_faulty_generation_sessions(); + + // start && wait for generation session to fail + let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap(); + loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some() + && clusters[0].client().generation_session(&SessionId::default()).is_none()); + assert!(session.joint_public_and_secret().unwrap().is_err()); + + // check that faulty session is either removed from all nodes, or nonexistent (already removed) + for i in 1..3 { + if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) { + // wait for both session completion && session removal (session completion event is fired + // before session is removed from its own container by cluster) + loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some() + && clusters[i].client().generation_session(&SessionId::default()).is_none()); assert!(session.joint_public_and_secret().unwrap().is_err()); - assert!(clusters[i].client().generation_session(&SessionId::default()).is_none()); } } } @@ -1213,18 +1656,18 @@ pub mod tests { // start && wait for generation session to complete let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap(); - loop_until(&mut core, time::Duration::from_millis(300), || session.state() == GenerationSessionState::Finished - || session.state() == GenerationSessionState::Failed); + loop_until(&mut core, time::Duration::from_millis(300), || (session.state() == GenerationSessionState::Finished + || session.state() == GenerationSessionState::Failed) + && clusters[0].client().generation_session(&SessionId::default()).is_none()); assert!(session.joint_public_and_secret().unwrap().is_ok()); // check that session is either removed from all nodes, or nonexistent (already removed) - assert!(clusters[0].client().generation_session(&SessionId::default()).is_none()); for i in 1..3 { if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) { - loop_until(&mut core, time::Duration::from_millis(300), || session.state() == GenerationSessionState::Finished - || session.state() == GenerationSessionState::Failed); + loop_until(&mut core, time::Duration::from_millis(300), || (session.state() == GenerationSessionState::Finished + || session.state() == GenerationSessionState::Failed) + && clusters[i].client().generation_session(&SessionId::default()).is_none()); assert!(session.joint_public_and_secret().unwrap().is_err()); - assert!(clusters[i].client().generation_session(&SessionId::default()).is_none()); } } } diff --git a/secret_store/src/key_server_cluster/cluster_sessions.rs b/secret_store/src/key_server_cluster/cluster_sessions.rs index 1168dedc1..cfc00241d 100644 --- a/secret_store/src/key_server_cluster/cluster_sessions.rs +++ b/secret_store/src/key_server_cluster/cluster_sessions.rs @@ -22,7 +22,8 @@ use parking_lot::RwLock; use ethkey::{Public, Secret, Signature}; use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, DocumentKeyShare, EncryptedDocumentKeyShadow, SessionMeta}; use key_server_cluster::cluster::{Cluster, ClusterData, ClusterView, ClusterConfiguration}; -use key_server_cluster::message::{self, Message, GenerationMessage, EncryptionMessage, DecryptionMessage, SigningMessage}; +use key_server_cluster::message::{self, Message, GenerationMessage, EncryptionMessage, DecryptionMessage, SigningMessage, + ShareAddMessage, ShareMoveMessage, ShareRemoveMessage, ServersSetChangeMessage}; use key_server_cluster::generation_session::{Session as GenerationSession, SessionImpl as GenerationSessionImpl, SessionParams as GenerationSessionParams, SessionState as GenerationSessionState}; use key_server_cluster::decryption_session::{Session as DecryptionSession, SessionImpl as DecryptionSessionImpl, @@ -31,6 +32,15 @@ use key_server_cluster::encryption_session::{Session as EncryptionSession, Sessi SessionParams as EncryptionSessionParams, SessionState as EncryptionSessionState}; use key_server_cluster::signing_session::{Session as SigningSession, SessionImpl as SigningSessionImpl, SigningSessionId, SessionParams as SigningSessionParams}; +use key_server_cluster::share_add_session::{Session as ShareAddSession, SessionImpl as ShareAddSessionImpl, + SessionParams as ShareAddSessionParams, IsolatedSessionTransport as ShareAddTransport}; +use key_server_cluster::share_move_session::{Session as ShareMoveSession, SessionImpl as ShareMoveSessionImpl, + SessionParams as ShareMoveSessionParams, IsolatedSessionTransport as ShareMoveTransport}; +use key_server_cluster::share_remove_session::{Session as ShareRemoveSession, SessionImpl as ShareRemoveSessionImpl, + SessionParams as ShareRemoveSessionParams, IsolatedSessionTransport as ShareRemoveTransport}; +use key_server_cluster::servers_set_change_session::{Session as ServersSetChangeSession, SessionImpl as ServersSetChangeSessionImpl, + SessionParams as ServersSetChangeSessionParams}; +use key_server_cluster::admin_sessions::ShareChangeSessionMeta; /// When there are no session-related messages for SESSION_TIMEOUT_INTERVAL seconds, /// we must treat this session as stalled && finish it with an error. @@ -38,6 +48,13 @@ use key_server_cluster::signing_session::{Session as SigningSession, SessionImpl /// session messages. const SESSION_TIMEOUT_INTERVAL: u64 = 60; +lazy_static! { + /// Servers set change session id (there could be at most 1 session => hardcoded id). + static ref SERVERS_SET_CHANGE_SESSION_ID: SessionId = "10b7af423bb551d5dc8645db754163a2145d37d78d468fa7330435ed77064c1c206f4b71d62491dfb9f7dbeccc42a6c112c8bb507de7b4fcad8d646272b2c363" + .parse() + .expect("hardcoded id should parse without errors; qed"); +} + /// Generic cluster session. pub trait ClusterSession { /// If session is finished (either with succcess or not). @@ -48,6 +65,18 @@ pub trait ClusterSession { fn on_node_timeout(&self, node_id: &NodeId); } +/// Administrative session. +pub enum AdminSession { + /// Share add session. + ShareAdd(ShareAddSessionImpl), + /// Share move session. + ShareMove(ShareMoveSessionImpl), + /// Share remove session. + ShareRemove(ShareRemoveSessionImpl), + /// Servers set change session. + ServersSetChange(ServersSetChangeSessionImpl), +} + /// Active sessions on this cluster. pub struct ClusterSessions { /// Key generation sessions. @@ -58,6 +87,8 @@ pub struct ClusterSessions { pub decryption_sessions: ClusterSessionsContainer, /// Signing sessions. pub signing_sessions: ClusterSessionsContainer, + /// Administrative sessions. + pub admin_sessions: ClusterSessionsContainer, /// Self node id. self_node_id: NodeId, /// All nodes ids. @@ -66,6 +97,8 @@ pub struct ClusterSessions { key_storage: Arc, /// Reference to ACL storage acl_storage: Arc, + /// Administrator public. + admin_public: Option, /// Make faulty generation sessions. make_faulty_generation_sessions: AtomicBool, /// Always-increasing sessions counter. Is used as session nonce to prevent replay attacks: @@ -142,6 +175,16 @@ pub struct SigningSessionWrapper { cluster: Weak, } +/// Admin session implementation, which removes session from cluster on drop. +pub struct AdminSessionWrapper { + /// Wrapped session. + session: Arc, + /// Session Id. + session_id: SessionId, + /// Cluster data reference. + cluster: Weak, +} + impl ClusterSessions { /// Create new cluster sessions container. pub fn new(config: &ClusterConfiguration) -> Self { @@ -150,10 +193,12 @@ impl ClusterSessions { nodes: config.key_server_set.get().keys().cloned().collect(), acl_storage: config.acl_storage.clone(), key_storage: config.key_storage.clone(), + admin_public: config.admin_public.clone(), generation_sessions: ClusterSessionsContainer::new(), encryption_sessions: ClusterSessionsContainer::new(), decryption_sessions: ClusterSessionsContainer::new(), signing_sessions: ClusterSessionsContainer::new(), + admin_sessions: ClusterSessionsContainer::new(), make_faulty_generation_sessions: AtomicBool::new(false), session_counter: AtomicUsize::new(0), max_nonce: RwLock::new(BTreeMap::new()), @@ -313,12 +358,146 @@ impl ClusterSessions { }); } + /// Create new share add session. + pub fn new_share_add_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { + let nonce = self.check_session_nonce(&master, nonce)?; + let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?; + + self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), move || ShareAddSessionImpl::new(ShareAddSessionParams { + meta: ShareChangeSessionMeta { + id: session_id, + self_node_id: self.self_node_id.clone(), + master_node_id: master, + }, + transport: ShareAddTransport::new(session_id.clone(), nonce, cluster), + key_storage: self.key_storage.clone(), + admin_public: Some(admin_public), + nonce: nonce, + }).map(AdminSession::ShareAdd)) + } + + /// Send share add session error. + pub fn respond_with_share_add_error(&self, session_id: &SessionId, error: message::ShareAddError) { + self.admin_sessions.sessions.read().get(&session_id) + .map(|s| { + // error in any share change session is considered fatal + // => broadcast error + + // do not bother processing send error, as we already processing error + let _ = s.cluster_view.broadcast(Message::ShareAdd(ShareAddMessage::ShareAddError(error))); + }); + } + + /// Create new share move session. + pub fn new_share_move_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { + let nonce = self.check_session_nonce(&master, nonce)?; + let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?; + + self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), move || ShareMoveSessionImpl::new(ShareMoveSessionParams { + meta: ShareChangeSessionMeta { + id: session_id, + self_node_id: self.self_node_id.clone(), + master_node_id: master, + }, + transport: ShareMoveTransport::new(session_id.clone(), nonce, cluster), + key_storage: self.key_storage.clone(), + admin_public: Some(admin_public), + nonce: nonce, + }).map(AdminSession::ShareMove)) + } + + /// Send share move session error. + pub fn respond_with_share_move_error(&self, session_id: &SessionId, error: message::ShareMoveError) { + self.admin_sessions.sessions.read().get(&session_id) + .map(|s| { + // error in any share change session is considered fatal + // => broadcast error + + // do not bother processing send error, as we already processing error + let _ = s.cluster_view.broadcast(Message::ShareMove(ShareMoveMessage::ShareMoveError(error))); + }); + } + + /// Create new share remove session. + pub fn new_share_remove_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { + let nonce = self.check_session_nonce(&master, nonce)?; + let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?; + + self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), move || ShareRemoveSessionImpl::new(ShareRemoveSessionParams { + meta: ShareChangeSessionMeta { + id: session_id, + self_node_id: self.self_node_id.clone(), + master_node_id: master, + }, + transport: ShareRemoveTransport::new(session_id.clone(), nonce, cluster), + key_storage: self.key_storage.clone(), + admin_public: Some(admin_public), + nonce: nonce, + }).map(AdminSession::ShareRemove)) + } + + /// Send share remove session error. + pub fn respond_with_share_remove_error(&self, session_id: &SessionId, error: message::ShareRemoveError) { + self.admin_sessions.sessions.read().get(&session_id) + .map(|s| { + // error in any share change session is considered fatal + // => broadcast error + + // do not bother processing send error, as we already processing error + let _ = s.cluster_view.broadcast(Message::ShareRemove(ShareRemoveMessage::ShareRemoveError(error))); + }); + } + + /// Create new servers set change session. + pub fn new_servers_set_change_session(&self, master: NodeId, session_id: Option, nonce: Option, cluster: Arc, all_nodes_set: BTreeSet) -> Result, Error> { + // TODO: check if there's no other active sessions + do not allow to start other sessions when this session is active + let session_id = match session_id { + Some(session_id) => if session_id == *SERVERS_SET_CHANGE_SESSION_ID { + session_id + } else { + return Err(Error::InvalidMessage) + }, + None => (*SERVERS_SET_CHANGE_SESSION_ID).clone(), + }; + let nonce = self.check_session_nonce(&master, nonce)?; + let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?; + + self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), move || ServersSetChangeSessionImpl::new(ServersSetChangeSessionParams { + meta: ShareChangeSessionMeta { + id: session_id, + self_node_id: self.self_node_id.clone(), + master_node_id: master, + }, + cluster: cluster, + key_storage: self.key_storage.clone(), + admin_public: admin_public, + nonce: nonce, + all_nodes_set: all_nodes_set, + }).map(AdminSession::ServersSetChange)) + } + + /// Send share remove session error. + pub fn respond_with_servers_set_change_error(&self, session_id: &SessionId, error: message::ServersSetChangeError) { + self.admin_sessions.sessions.read().get(&session_id) + .map(|s| { + // error in any share change session is considered fatal + // => broadcast error + + // do not bother processing send error, as we already processing error + let _ = s.cluster_view.broadcast(Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(error))); + }); + } + /// Stop sessions that are stalling. pub fn stop_stalled_sessions(&self) { self.generation_sessions.stop_stalled_sessions(); self.encryption_sessions.stop_stalled_sessions(); self.decryption_sessions.stop_stalled_sessions(); self.signing_sessions.stop_stalled_sessions(); + // TODO: servers set change session could take a lot of time + // && during that session some nodes could not receive messages + // => they could stop session as stalled. This must be handled + self.admin_sessions.stop_stalled_sessions(); } /// When connection to node is lost. @@ -327,6 +506,7 @@ impl ClusterSessions { self.encryption_sessions.on_connection_timeout(node_id); self.decryption_sessions.on_connection_timeout(node_id); self.signing_sessions.on_connection_timeout(node_id); + self.admin_sessions.on_connection_timeout(node_id); self.max_nonce.write().remove(node_id); } @@ -366,6 +546,10 @@ impl ClusterSessionsContainer where K: Clone + Ord, V: Cluster } } + pub fn is_empty(&self) -> bool { + self.sessions.read().is_empty() + } + pub fn get(&self, session_id: &K) -> Option> { self.sessions.read().get(session_id).map(|s| s.session.clone()) } @@ -437,6 +621,65 @@ impl ClusterSessionsContainer where K: Clone + Ord, V: Cluster } } +impl AdminSession { + pub fn as_share_add(&self) -> Option<&ShareAddSessionImpl> { + match *self { + AdminSession::ShareAdd(ref session) => Some(session), + _ => None + } + } + + pub fn as_share_move(&self) -> Option<&ShareMoveSessionImpl> { + match *self { + AdminSession::ShareMove(ref session) => Some(session), + _ => None + } + } + + pub fn as_share_remove(&self) -> Option<&ShareRemoveSessionImpl> { + match *self { + AdminSession::ShareRemove(ref session) => Some(session), + _ => None + } + } + + pub fn as_servers_set_change(&self) -> Option<&ServersSetChangeSessionImpl> { + match *self { + AdminSession::ServersSetChange(ref session) => Some(session), + _ => None + } + } +} + +impl ClusterSession for AdminSession { + fn is_finished(&self) -> bool { + match *self { + AdminSession::ShareAdd(ref session) => session.is_finished(), + AdminSession::ShareMove(ref session) => session.is_finished(), + AdminSession::ShareRemove(ref session) => session.is_finished(), + AdminSession::ServersSetChange(ref session) => session.is_finished(), + } + } + + fn on_session_timeout(&self) { + match *self { + AdminSession::ShareAdd(ref session) => session.on_session_timeout(), + AdminSession::ShareMove(ref session) => session.on_session_timeout(), + AdminSession::ShareRemove(ref session) => session.on_session_timeout(), + AdminSession::ServersSetChange(ref session) => session.on_session_timeout(), + } + } + + fn on_node_timeout(&self, node_id: &NodeId) { + match *self { + AdminSession::ShareAdd(ref session) => session.on_node_timeout(node_id), + AdminSession::ShareMove(ref session) => session.on_node_timeout(node_id), + AdminSession::ShareRemove(ref session) => session.on_node_timeout(node_id), + AdminSession::ServersSetChange(ref session) => session.on_node_timeout(node_id), + } + } +} + impl GenerationSessionWrapper { pub fn new(cluster: Weak, session_id: SessionId, session: Arc) -> Arc { Arc::new(GenerationSessionWrapper { @@ -544,3 +787,57 @@ impl Drop for SigningSessionWrapper { } } } + +impl AdminSessionWrapper { + pub fn new(cluster: Weak, session_id: SessionId, session: Arc) -> Arc { + Arc::new(AdminSessionWrapper { + session: session, + session_id: session_id, + cluster: cluster, + }) + } +} + +impl ShareAddSession for AdminSessionWrapper { + fn wait(&self) -> Result<(), Error> { + match *self.session { + AdminSession::ShareAdd(ref session) => session.wait(), + _ => Err(Error::InvalidMessage), + } + } +} + +impl ShareMoveSession for AdminSessionWrapper { + fn wait(&self) -> Result<(), Error> { + match *self.session { + AdminSession::ShareMove(ref session) => session.wait(), + _ => Err(Error::InvalidMessage), + } + } +} + +impl ShareRemoveSession for AdminSessionWrapper { + fn wait(&self) -> Result<(), Error> { + match *self.session { + AdminSession::ShareRemove(ref session) => session.wait(), + _ => Err(Error::InvalidMessage), + } + } +} + +impl ServersSetChangeSession for AdminSessionWrapper { + fn wait(&self) -> Result<(), Error> { + match *self.session { + AdminSession::ServersSetChange(ref session) => session.wait(), + _ => Err(Error::InvalidMessage), + } + } +} + +impl Drop for AdminSessionWrapper { + fn drop(&mut self) { + if let Some(cluster) = self.cluster.upgrade() { + cluster.sessions().admin_sessions.remove(&self.session_id); + } + } +} diff --git a/secret_store/src/key_server_cluster/io/deadline.rs b/secret_store/src/key_server_cluster/io/deadline.rs index a2f794e76..620003775 100644 --- a/secret_store/src/key_server_cluster/io/deadline.rs +++ b/secret_store/src/key_server_cluster/io/deadline.rs @@ -16,16 +16,16 @@ use std::io; use std::time::Duration; -use futures::{Future, Select, BoxFuture, Poll, Async}; +use futures::{Future, Select, Poll, Async}; use tokio_core::reactor::{Handle, Timeout}; -type DeadlineBox = BoxFuture::Item>, ::Error>; +type DeadlineBox = ::std::boxed::Box::Item>, Error = ::Error> + Send>; /// Complete a passed future or fail if it is not completed within timeout. pub fn deadline(duration: Duration, handle: &Handle, future: F) -> Result, io::Error> where F: Future + Send + 'static, T: 'static { - let timeout = Timeout::new(duration, handle)?.map(|_| DeadlineStatus::Timeout).boxed(); - let future = future.map(DeadlineStatus::Meet).boxed(); + let timeout: DeadlineBox = Box::new(Timeout::new(duration, handle)?.map(|_| DeadlineStatus::Timeout)); + let future: DeadlineBox = Box::new(future.map(DeadlineStatus::Meet)); let deadline = Deadline { future: timeout.select(future), }; diff --git a/secret_store/src/key_server_cluster/io/message.rs b/secret_store/src/key_server_cluster/io/message.rs index 2e549f1d9..d793c75ed 100644 --- a/secret_store/src/key_server_cluster/io/message.rs +++ b/secret_store/src/key_server_cluster/io/message.rs @@ -26,20 +26,21 @@ use bigint::prelude::U256; use bigint::hash::H256; use key_server_cluster::Error; use key_server_cluster::message::{Message, ClusterMessage, GenerationMessage, EncryptionMessage, - DecryptionMessage, SigningMessage}; + DecryptionMessage, SigningMessage, ServersSetChangeMessage, ShareAddMessage, ShareMoveMessage, + ShareRemoveMessage}; /// Size of serialized header. -pub const MESSAGE_HEADER_SIZE: usize = 4; +pub const MESSAGE_HEADER_SIZE: usize = 18; /// Current header version. -pub const CURRENT_HEADER_VERSION: u8 = 1; +pub const CURRENT_HEADER_VERSION: u64 = 1; /// Message header. #[derive(Debug, PartialEq)] pub struct MessageHeader { /// Message/Header version. - pub version: u8, + pub version: u64, /// Message kind. - pub kind: u8, + pub kind: u64, /// Message payload size (without header). pub size: u16, } @@ -94,6 +95,45 @@ pub fn serialize_message(message: Message) -> Result { Message::Signing(SigningMessage::PartialSignature(payload)) => (203, serde_json::to_vec(&payload)), Message::Signing(SigningMessage::SigningSessionError(payload)) => (204, serde_json::to_vec(&payload)), Message::Signing(SigningMessage::SigningSessionCompleted(payload)) => (205, serde_json::to_vec(&payload)), + + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(payload)) + => (250, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(payload)) => (251, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(payload)) => (252, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(payload)) + => (253, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(payload)) + => (254, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(payload)) + => (255, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(payload)) + => (256, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(payload)) + => (257, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareMoveMessage(payload)) + => (258, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(payload)) + => (259, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(payload)) => (260, serde_json::to_vec(&payload)), + Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(payload)) + => (261, serde_json::to_vec(&payload)), + + Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(payload)) => (300, serde_json::to_vec(&payload)), + Message::ShareAdd(ShareAddMessage::KeyShareCommon(payload)) => (301, serde_json::to_vec(&payload)), + Message::ShareAdd(ShareAddMessage::NewAbsoluteTermShare(payload)) => (302, serde_json::to_vec(&payload)), + Message::ShareAdd(ShareAddMessage::NewKeysDissemination(payload)) => (303, serde_json::to_vec(&payload)), + Message::ShareAdd(ShareAddMessage::ShareAddError(payload)) => (304, serde_json::to_vec(&payload)), + + Message::ShareMove(ShareMoveMessage::ShareMoveConsensusMessage(payload)) => (350, serde_json::to_vec(&payload)), + Message::ShareMove(ShareMoveMessage::ShareMoveRequest(payload)) => (351, serde_json::to_vec(&payload)), + Message::ShareMove(ShareMoveMessage::ShareMove(payload)) => (352, serde_json::to_vec(&payload)), + Message::ShareMove(ShareMoveMessage::ShareMoveConfirm(payload)) => (353, serde_json::to_vec(&payload)), + Message::ShareMove(ShareMoveMessage::ShareMoveError(payload)) => (354, serde_json::to_vec(&payload)), + + Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(payload)) => (400, serde_json::to_vec(&payload)), + Message::ShareRemove(ShareRemoveMessage::ShareRemoveRequest(payload)) => (401, serde_json::to_vec(&payload)), + Message::ShareRemove(ShareRemoveMessage::ShareRemoveConfirm(payload)) => (402, serde_json::to_vec(&payload)), + Message::ShareRemove(ShareRemoveMessage::ShareRemoveError(payload)) => (403, serde_json::to_vec(&payload)), }; let payload = payload.map_err(|err| Error::Serde(err.to_string()))?; @@ -137,6 +177,36 @@ pub fn deserialize_message(header: &MessageHeader, payload: Vec) -> Result Message::Signing(SigningMessage::SigningSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), 205 => Message::Signing(SigningMessage::SigningSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 250 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 251 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 252 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 253 => Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 254 => Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 255 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 256 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 257 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 258 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareMoveMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 259 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 260 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 261 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + + 300 => Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 301 => Message::ShareAdd(ShareAddMessage::KeyShareCommon(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 302 => Message::ShareAdd(ShareAddMessage::NewAbsoluteTermShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 303 => Message::ShareAdd(ShareAddMessage::NewKeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 304 => Message::ShareAdd(ShareAddMessage::ShareAddError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + + 350 => Message::ShareMove(ShareMoveMessage::ShareMoveConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 351 => Message::ShareMove(ShareMoveMessage::ShareMoveRequest(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 352 => Message::ShareMove(ShareMoveMessage::ShareMove(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 353 => Message::ShareMove(ShareMoveMessage::ShareMoveConfirm(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 354 => Message::ShareMove(ShareMoveMessage::ShareMoveError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + + 400 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 401 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveRequest(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 402 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveConfirm(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 403 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + _ => return Err(Error::Serde(format!("unknown message type {}", header.kind))), }) } @@ -170,8 +240,8 @@ pub fn fix_shared_key(shared_secret: &Secret) -> Result { /// Serialize message header. fn serialize_header(header: &MessageHeader) -> Result, Error> { let mut buffer = Vec::with_capacity(MESSAGE_HEADER_SIZE); - buffer.write_u8(header.version)?; - buffer.write_u8(header.kind)?; + buffer.write_u64::(header.version)?; + buffer.write_u64::(header.kind)?; buffer.write_u16::(header.size)?; Ok(buffer) } @@ -179,14 +249,14 @@ fn serialize_header(header: &MessageHeader) -> Result, Error> { /// Deserialize message header. pub fn deserialize_header(data: &[u8]) -> Result { let mut reader = Cursor::new(data); - let version = reader.read_u8()?; + let version = reader.read_u64::()?; if version != CURRENT_HEADER_VERSION { return Err(Error::InvalidMessageVersion); } Ok(MessageHeader { version: version, - kind: reader.read_u8()?, + kind: reader.read_u64::()?, size: reader.read_u16::()?, }) } diff --git a/secret_store/src/key_server_cluster/jobs/consensus_session.rs b/secret_store/src/key_server_cluster/jobs/consensus_session.rs index e789663d1..abc31b9f5 100644 --- a/secret_store/src/key_server_cluster/jobs/consensus_session.rs +++ b/secret_store/src/key_server_cluster/jobs/consensus_session.rs @@ -15,12 +15,10 @@ // along with Parity. If not, see . use std::collections::BTreeSet; -use std::sync::Arc; -use ethkey::{Public, Signature, recover}; -use key_server_cluster::{Error, NodeId, SessionMeta, AclStorage}; +use ethkey::Signature; +use key_server_cluster::{Error, NodeId, SessionMeta}; use key_server_cluster::message::ConsensusMessage; use key_server_cluster::jobs::job_session::{JobSession, JobSessionState, JobTransport, JobExecutor}; -use key_server_cluster::jobs::key_access_job::KeyAccessJob; /// Consensus session state. #[derive(Debug, Clone, Copy, PartialEq)] @@ -47,15 +45,17 @@ pub enum ConsensusSessionState { /// 2) master node sends partial job requests to every member of consensus group /// 3) slave nodes are computing partial responses /// 4) master node computes result from partial responses -pub struct ConsensusSession, ComputationExecutor: JobExecutor, ComputationTransport: JobTransport> { +pub struct ConsensusSession, + ConsensusTransport: JobTransport, + ComputationExecutor: JobExecutor, + ComputationTransport: JobTransport +> { /// Current session state. state: ConsensusSessionState, /// Session metadata. meta: SessionMeta, - /// Requester, for which consensus group has allowed access. - requester: Option, /// Consensus establish job. - consensus_job: JobSession, + consensus_job: JobSession, /// Consensus group. consensus_group: BTreeSet, /// Computation job. @@ -63,38 +63,30 @@ pub struct ConsensusSession> { +pub struct ConsensusSessionParams, + ConsensusTransport: JobTransport +> { /// Session metadata. pub meta: SessionMeta, /// ACL storage for access check. - pub acl_storage: Arc, + pub consensus_executor: ConsensusExecutor, /// Transport for consensus establish job. pub consensus_transport: ConsensusTransport, } -impl ConsensusSession where ConsensusTransport: JobTransport, ComputationExecutor: JobExecutor, ComputationTransport: JobTransport { - /// Create new consensus session on slave node. - pub fn new_on_slave(params: ConsensusSessionParams) -> Result { - debug_assert!(params.meta.self_node_id != params.meta.master_node_id); - Self::new(None, KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()), params) - } - - /// Create new consensus session on master node. - pub fn new_on_master(params: ConsensusSessionParams, signature: Signature) -> Result { - debug_assert!(params.meta.self_node_id == params.meta.master_node_id); - Self::new(Some(recover(&signature, ¶ms.meta.id)?), - KeyAccessJob::new_on_master(params.meta.id.clone(), params.acl_storage.clone(), signature), params) - } - +impl ConsensusSession + where ConsensusExecutor: JobExecutor>, + ConsensusTransport: JobTransport, + ComputationExecutor: JobExecutor, + ComputationTransport: JobTransport { /// Create new consensus session. - fn new(requester: Option, consensus_job_executor: KeyAccessJob, params: ConsensusSessionParams) -> Result { - let consensus_job = JobSession::new(params.meta.clone(), consensus_job_executor, params.consensus_transport); + pub fn new(params: ConsensusSessionParams) -> Result { + let consensus_job = JobSession::new(params.meta.clone(), params.consensus_executor, params.consensus_transport); debug_assert!(consensus_job.state() == JobSessionState::Inactive); Ok(ConsensusSession { state: ConsensusSessionState::WaitingForInitialization, meta: params.meta, - requester: requester, consensus_job: consensus_job, consensus_group: BTreeSet::new(), computation_job: None, @@ -102,12 +94,11 @@ impl ConsensusSes } /// Get consensus job reference. - #[cfg(test)] - pub fn consensus_job(&self) -> &JobSession { + pub fn consensus_job(&self) -> &JobSession { &self.consensus_job } - /// Get all nodes, which chas not rejected consensus request. + /// Get all nodes, which has not rejected consensus request. pub fn consensus_non_rejected_nodes(&self) -> BTreeSet { self.consensus_job.responses().iter() .filter(|r| *r.1) @@ -130,11 +121,6 @@ impl ConsensusSes self.state } - /// Get requester, for which consensus has been reached. - pub fn requester(&self) -> Result<&Public, Error> { - self.requester.as_ref().ok_or(Error::InvalidStateForRequest) - } - /// Get computation result. pub fn result(&self) -> Result { debug_assert!(self.meta.self_node_id == self.meta.master_node_id); @@ -155,17 +141,15 @@ impl ConsensusSes self.process_result(initialization_result) } - /// Process consensus message. - pub fn on_consensus_message(&mut self, sender: &NodeId, message: &ConsensusMessage) -> Result<(), Error> { - let consensus_result = match message { - &ConsensusMessage::InitializeConsensusSession(ref message) => { - let signature = message.requestor_signature.clone().into(); - self.requester = Some(recover(&signature, &self.meta.id)?); - self.consensus_job.on_partial_request(sender, signature) - }, - &ConsensusMessage::ConfirmConsensusInitialization(ref message) => - self.consensus_job.on_partial_response(sender, message.is_confirmed), - }; + /// Process consensus request message. + pub fn on_consensus_partial_request(&mut self, sender: &NodeId, request: ConsensusExecutor::PartialJobRequest) -> Result<(), Error> { + let consensus_result = self.consensus_job.on_partial_request(sender, request); + self.process_result(consensus_result) + } + + /// Process consensus message response. + pub fn on_consensus_partial_response(&mut self, sender: &NodeId, response: bool) -> Result<(), Error> { + let consensus_result = self.consensus_job.on_partial_response(sender, response); self.process_result(consensus_result) } @@ -350,6 +334,24 @@ impl ConsensusSes } } +impl ConsensusSession + where ConsensusExecutor: JobExecutor>, + ConsensusTransport: JobTransport, + ComputationExecutor: JobExecutor, + ComputationTransport: JobTransport { + /// Process basic consensus message. + pub fn on_consensus_message(&mut self, sender: &NodeId, message: &ConsensusMessage) -> Result<(), Error> { + let consensus_result = match message { + + &ConsensusMessage::InitializeConsensusSession(ref message) => + self.consensus_job.on_partial_request(sender, message.requestor_signature.clone().into()), + &ConsensusMessage::ConfirmConsensusInitialization(ref message) => + self.consensus_job.on_partial_response(sender, message.is_confirmed), + }; + self.process_result(consensus_result) + } +} + #[cfg(test)] mod tests { use std::sync::Arc; @@ -357,23 +359,24 @@ mod tests { use key_server_cluster::{Error, NodeId, SessionId, DummyAclStorage}; use key_server_cluster::message::{ConsensusMessage, InitializeConsensusSession, ConfirmConsensusInitialization}; use key_server_cluster::jobs::job_session::tests::{make_master_session_meta, make_slave_session_meta, SquaredSumJobExecutor, DummyJobTransport}; + use key_server_cluster::jobs::key_access_job::KeyAccessJob; use super::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState}; - type SquaredSumConsensusSession = ConsensusSession, SquaredSumJobExecutor, DummyJobTransport>; + type SquaredSumConsensusSession = ConsensusSession, SquaredSumJobExecutor, DummyJobTransport>; fn make_master_consensus_session(threshold: usize, requester: Option, acl_storage: Option) -> SquaredSumConsensusSession { let secret = requester.map(|kp| kp.secret().clone()).unwrap_or(Random.generate().unwrap().secret().clone()); - SquaredSumConsensusSession::new_on_master(ConsensusSessionParams { + SquaredSumConsensusSession::new(ConsensusSessionParams { meta: make_master_session_meta(threshold), - acl_storage: Arc::new(acl_storage.unwrap_or(DummyAclStorage::default())), + consensus_executor: KeyAccessJob::new_on_master(SessionId::default(), Arc::new(acl_storage.unwrap_or(DummyAclStorage::default())), sign(&secret, &SessionId::default()).unwrap()), consensus_transport: DummyJobTransport::default(), - }, sign(&secret, &SessionId::default()).unwrap()).unwrap() + }).unwrap() } fn make_slave_consensus_session(threshold: usize, acl_storage: Option) -> SquaredSumConsensusSession { - SquaredSumConsensusSession::new_on_slave(ConsensusSessionParams { + SquaredSumConsensusSession::new(ConsensusSessionParams { meta: make_slave_session_meta(threshold), - acl_storage: Arc::new(acl_storage.unwrap_or(DummyAclStorage::default())), + consensus_executor: KeyAccessJob::new_on_slave(SessionId::default(), Arc::new(acl_storage.unwrap_or(DummyAclStorage::default()))), consensus_transport: DummyJobTransport::default(), }).unwrap() } diff --git a/secret_store/src/key_server_cluster/jobs/decryption_job.rs b/secret_store/src/key_server_cluster/jobs/decryption_job.rs index 54594c827..b810b462d 100644 --- a/secret_store/src/key_server_cluster/jobs/decryption_job.rs +++ b/secret_store/src/key_server_cluster/jobs/decryption_job.rs @@ -106,7 +106,7 @@ impl JobExecutor for DecryptionJob { }) } - fn process_partial_request(&self, partial_request: PartialDecryptionRequest) -> Result, Error> { + fn process_partial_request(&mut self, partial_request: PartialDecryptionRequest) -> Result, Error> { if partial_request.other_nodes_ids.len() != self.key_share.threshold || partial_request.other_nodes_ids.contains(&self.self_node_id) || partial_request.other_nodes_ids.iter().any(|n| !self.key_share.id_numbers.contains_key(n)) { diff --git a/secret_store/src/key_server_cluster/jobs/dummy_job.rs b/secret_store/src/key_server_cluster/jobs/dummy_job.rs new file mode 100644 index 000000000..7f89439d6 --- /dev/null +++ b/secret_store/src/key_server_cluster/jobs/dummy_job.rs @@ -0,0 +1,60 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::{BTreeMap, BTreeSet}; +use key_server_cluster::{Error, NodeId}; +use key_server_cluster::jobs::job_session::{JobExecutor, JobTransport, JobPartialRequestAction, JobPartialResponseAction}; + +/// No-work job to use in generics (TODO: create separate ShareChangeConsensusSession && remove this) +pub struct DummyJob; + +impl JobExecutor for DummyJob { + type PartialJobRequest = (); + type PartialJobResponse = (); + type JobResponse = (); + + fn prepare_partial_request(&self, _n: &NodeId, _nodes: &BTreeSet) -> Result<(), Error> { + unreachable!("dummy job methods are never called") + } + + fn process_partial_request(&mut self, _r: ()) -> Result, Error> { + unreachable!("dummy job methods are never called") + } + + fn check_partial_response(&self, _r: &()) -> Result { + unreachable!("dummy job methods are never called") + } + + fn compute_response(&self, _r: &BTreeMap) -> Result<(), Error> { + unreachable!("dummy job methods are never called") + } +} + +/// No-work job transport to use in generics (TODO: create separate ShareChangeConsensusSession && remove this) +pub struct DummyJobTransport; + +impl JobTransport for DummyJobTransport { + type PartialJobRequest = (); + type PartialJobResponse = (); + + fn send_partial_request(&self, _node: &NodeId, _request: ()) -> Result<(), Error> { + unreachable!("dummy transport methods are never called") + } + + fn send_partial_response(&self, _node: &NodeId, _response: ()) -> Result<(), Error> { + unreachable!("dummy transport methods are never called") + } +} diff --git a/secret_store/src/key_server_cluster/jobs/job_session.rs b/secret_store/src/key_server_cluster/jobs/job_session.rs index ef8ab66fb..7549a16c4 100644 --- a/secret_store/src/key_server_cluster/jobs/job_session.rs +++ b/secret_store/src/key_server_cluster/jobs/job_session.rs @@ -31,7 +31,7 @@ pub enum JobPartialResponseAction { /// Partial request action. #[derive(Debug, Clone, Copy, PartialEq)] pub enum JobPartialRequestAction { - /// Repond with reject. + /// Respond with reject. Reject(PartialJobResponse), /// Respond with this response. Respond(PartialJobResponse), @@ -46,7 +46,7 @@ pub trait JobExecutor { /// Prepare job request for given node. fn prepare_partial_request(&self, node: &NodeId, nodes: &BTreeSet) -> Result; /// Process partial request. - fn process_partial_request(&self, partial_request: Self::PartialJobRequest) -> Result, Error>; + fn process_partial_request(&mut self, partial_request: Self::PartialJobRequest) -> Result, Error>; /// Check partial response of given node. fn check_partial_response(&self, partial_response: &Self::PartialJobResponse) -> Result; /// Compute final job response. @@ -87,8 +87,6 @@ pub struct JobSession where Transport: JobTran transport: Transport, /// Session data. data: JobSessionData, - //// PartialJobRequest dummy. - // dummy: PhantomData, } /// Data of job session. @@ -129,6 +127,11 @@ impl JobSession where Executor: JobExe &self.transport } + /// Get executor reference. + pub fn executor(&self) -> &Executor { + &self.executor + } + /// Get job state. pub fn state(&self) -> JobSessionState { self.data.state @@ -347,7 +350,7 @@ pub mod tests { type JobResponse = u32; fn prepare_partial_request(&self, _n: &NodeId, _nodes: &BTreeSet) -> Result { Ok(2) } - fn process_partial_request(&self, r: u32) -> Result, Error> { if r <= 10 { Ok(JobPartialRequestAction::Respond(r * r)) } else { Err(Error::InvalidMessage) } } + fn process_partial_request(&mut self, r: u32) -> Result, Error> { if r <= 10 { Ok(JobPartialRequestAction::Respond(r * r)) } else { Err(Error::InvalidMessage) } } fn check_partial_response(&self, r: &u32) -> Result { if r % 2 == 0 { Ok(JobPartialResponseAction::Accept) } else { Ok(JobPartialResponseAction::Reject) } } fn compute_response(&self, r: &BTreeMap) -> Result { Ok(r.values().fold(0, |v1, v2| v1 + v2)) } } diff --git a/secret_store/src/key_server_cluster/jobs/key_access_job.rs b/secret_store/src/key_server_cluster/jobs/key_access_job.rs index 0bbb8bf04..bf40e57d7 100644 --- a/secret_store/src/key_server_cluster/jobs/key_access_job.rs +++ b/secret_store/src/key_server_cluster/jobs/key_access_job.rs @@ -16,7 +16,7 @@ use std::sync::Arc; use std::collections::{BTreeSet, BTreeMap}; -use ethkey::{Signature, recover}; +use ethkey::{Public, Signature, recover}; use key_server_cluster::{Error, NodeId, SessionId, AclStorage}; use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor}; @@ -46,6 +46,13 @@ impl KeyAccessJob { signature: Some(signature), } } + + pub fn requester(&self) -> Result, Error> { + match self.signature.as_ref() { + Some(signature) => Ok(Some(recover(signature, &self.id)?)), + None => Ok(None), + } + } } impl JobExecutor for KeyAccessJob { @@ -57,7 +64,8 @@ impl JobExecutor for KeyAccessJob { Ok(self.signature.as_ref().expect("prepare_partial_request is only called on master nodes; new_on_master fills the signature; qed").clone()) } - fn process_partial_request(&self, partial_request: Signature) -> Result, Error> { + fn process_partial_request(&mut self, partial_request: Signature) -> Result, Error> { + self.signature = Some(partial_request.clone()); self.acl_storage.check(&recover(&partial_request, &self.id)?, &self.id) .map_err(|_| Error::AccessDenied) .map(|is_confirmed| if is_confirmed { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) }) diff --git a/secret_store/src/key_server_cluster/jobs/mod.rs b/secret_store/src/key_server_cluster/jobs/mod.rs index d9a358aba..63eb57786 100644 --- a/secret_store/src/key_server_cluster/jobs/mod.rs +++ b/secret_store/src/key_server_cluster/jobs/mod.rs @@ -16,6 +16,9 @@ pub mod consensus_session; pub mod decryption_job; +pub mod dummy_job; pub mod job_session; pub mod key_access_job; +pub mod servers_set_change_access_job; pub mod signing_job; +pub mod unknown_sessions_job; diff --git a/secret_store/src/key_server_cluster/jobs/servers_set_change_access_job.rs b/secret_store/src/key_server_cluster/jobs/servers_set_change_access_job.rs new file mode 100644 index 000000000..1af73f16d --- /dev/null +++ b/secret_store/src/key_server_cluster/jobs/servers_set_change_access_job.rs @@ -0,0 +1,170 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::{BTreeSet, BTreeMap}; +use ethkey::{Public, Signature, recover}; +use tiny_keccak::Keccak; +use key_server_cluster::{Error, NodeId, SessionId}; +use key_server_cluster::message::{InitializeConsensusSessionWithServersSet, InitializeConsensusSessionWithServersMap, + InitializeConsensusSessionWithServersSecretMap}; +use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor}; + +/// Purpose of this job is to check if requestor is administrator of SecretStore (i.e. it have access to change key servers set). +pub struct ServersSetChangeAccessJob { + /// Servers set administrator public key (this could be changed to ACL-based check later). + administrator: Public, + /// Current servers set (in session/cluster). + current_servers_set: BTreeSet, + /// Old servers set. + old_servers_set: Option>, + /// New servers set. + new_servers_set: Option>, + /// Old servers set, signed by requester. + old_set_signature: Option, + /// New servers set, signed by requester. + new_set_signature: Option, +} + +/// Servers set change job partial request. +pub struct ServersSetChangeAccessRequest { + /// Old servers set. + pub old_servers_set: BTreeSet, + /// New servers set. + pub new_servers_set: BTreeSet, + /// Hash(old_servers_set), signed by requester. + pub old_set_signature: Signature, + /// Hash(new_servers_set), signed by requester. + pub new_set_signature: Signature, +} + +impl<'a> From<&'a InitializeConsensusSessionWithServersSet> for ServersSetChangeAccessRequest { + fn from(message: &InitializeConsensusSessionWithServersSet) -> Self { + ServersSetChangeAccessRequest { + old_servers_set: message.old_nodes_set.iter().cloned().map(Into::into).collect(), + new_servers_set: message.new_nodes_set.iter().cloned().map(Into::into).collect(), + old_set_signature: message.old_set_signature.clone().into(), + new_set_signature: message.new_set_signature.clone().into(), + } + } +} + +impl<'a> From<&'a InitializeConsensusSessionWithServersMap> for ServersSetChangeAccessRequest { + fn from(message: &InitializeConsensusSessionWithServersMap) -> Self { + ServersSetChangeAccessRequest { + old_servers_set: message.old_nodes_set.iter().cloned().map(Into::into).collect(), + new_servers_set: message.new_nodes_set.keys().cloned().map(Into::into).collect(), + old_set_signature: message.old_set_signature.clone().into(), + new_set_signature: message.new_set_signature.clone().into(), + } + } +} + +impl<'a> From<&'a InitializeConsensusSessionWithServersSecretMap> for ServersSetChangeAccessRequest { + fn from(message: &InitializeConsensusSessionWithServersSecretMap) -> Self { + ServersSetChangeAccessRequest { + old_servers_set: message.old_nodes_set.iter().cloned().map(Into::into).collect(), + new_servers_set: message.new_nodes_set.keys().cloned().map(Into::into).collect(), + old_set_signature: message.old_set_signature.clone().into(), + new_set_signature: message.new_set_signature.clone().into(), + } + } +} + +impl ServersSetChangeAccessJob { + pub fn new_on_slave(administrator: Public, current_servers_set: BTreeSet) -> Self { + ServersSetChangeAccessJob { + administrator: administrator, + current_servers_set: current_servers_set, + old_servers_set: None, + new_servers_set: None, + old_set_signature: None, + new_set_signature: None, + } + } + + pub fn new_on_master(administrator: Public, current_servers_set: BTreeSet, old_servers_set: BTreeSet, new_servers_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Self { + ServersSetChangeAccessJob { + administrator: administrator, + current_servers_set: current_servers_set, + old_servers_set: Some(old_servers_set), + new_servers_set: Some(new_servers_set), + old_set_signature: Some(old_set_signature), + new_set_signature: Some(new_set_signature), + } + } + + pub fn new_servers_set(&self) -> Option<&BTreeSet> { + self.new_servers_set.as_ref() + } +} + +impl JobExecutor for ServersSetChangeAccessJob { + type PartialJobRequest = ServersSetChangeAccessRequest; + type PartialJobResponse = bool; + type JobResponse = BTreeSet; + + fn prepare_partial_request(&self, _node: &NodeId, _nodes: &BTreeSet) -> Result { + let explanation = "prepare_partial_request is only called on master nodes; this field is filled on master nodes in constructor; qed"; + Ok(ServersSetChangeAccessRequest { + old_servers_set: self.old_servers_set.clone().expect(explanation), + new_servers_set: self.new_servers_set.clone().expect(explanation), + old_set_signature: self.old_set_signature.clone().expect(explanation), + new_set_signature: self.new_set_signature.clone().expect(explanation), + }) + } + + fn process_partial_request(&mut self, partial_request: ServersSetChangeAccessRequest) -> Result, Error> { + let ServersSetChangeAccessRequest { + old_servers_set, + new_servers_set, + old_set_signature, + new_set_signature, + } = partial_request; + + // check that current set is exactly the same set as old set + if self.current_servers_set.symmetric_difference(&old_servers_set).next().is_some() { + return Ok(JobPartialRequestAction::Reject(false)); + } + + // check old servers set signature + let old_actual_public = recover(&old_set_signature, &ordered_nodes_hash(&old_servers_set).into())?; + let new_actual_public = recover(&new_set_signature, &ordered_nodes_hash(&new_servers_set).into())?; + let is_administrator = old_actual_public == self.administrator && new_actual_public == self.administrator; + self.new_servers_set = Some(new_servers_set); + + Ok(if is_administrator { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) }) + } + + fn check_partial_response(&self, partial_response: &bool) -> Result { + Ok(if *partial_response { JobPartialResponseAction::Accept } else { JobPartialResponseAction::Reject }) + } + + fn compute_response(&self, partial_responses: &BTreeMap) -> Result, Error> { + Ok(partial_responses.keys().cloned().collect()) + } +} + +pub fn ordered_nodes_hash(nodes: &BTreeSet) -> SessionId { + let mut nodes_keccak = Keccak::new_keccak256(); + for node in nodes { + nodes_keccak.update(&*node); + } + + let mut nodes_keccak_value = [0u8; 32]; + nodes_keccak.finalize(&mut nodes_keccak_value); + + nodes_keccak_value.into() +} diff --git a/secret_store/src/key_server_cluster/jobs/signing_job.rs b/secret_store/src/key_server_cluster/jobs/signing_job.rs index acf6047ce..7fe115eee 100644 --- a/secret_store/src/key_server_cluster/jobs/signing_job.rs +++ b/secret_store/src/key_server_cluster/jobs/signing_job.rs @@ -101,7 +101,7 @@ impl JobExecutor for SigningJob { }) } - fn process_partial_request(&self, partial_request: PartialSigningRequest) -> Result, Error> { + fn process_partial_request(&mut self, partial_request: PartialSigningRequest) -> Result, Error> { if partial_request.other_nodes_ids.len() != self.key_share.threshold || partial_request.other_nodes_ids.contains(&self.self_node_id) || partial_request.other_nodes_ids.iter().any(|n| !self.key_share.id_numbers.contains_key(n)) { diff --git a/secret_store/src/key_server_cluster/jobs/unknown_sessions_job.rs b/secret_store/src/key_server_cluster/jobs/unknown_sessions_job.rs new file mode 100644 index 000000000..fca931872 --- /dev/null +++ b/secret_store/src/key_server_cluster/jobs/unknown_sessions_job.rs @@ -0,0 +1,80 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use std::collections::{BTreeSet, BTreeMap}; +use key_server_cluster::{Error, NodeId, SessionId, KeyStorage}; +use key_server_cluster::jobs::job_session::{JobPartialRequestAction, JobPartialResponseAction, JobExecutor}; + +/// Unknown sessions report job. +pub struct UnknownSessionsJob { + /// Target node id. + target_node_id: Option, + /// Keys storage. + key_storage: Arc, +} + +impl UnknownSessionsJob { + pub fn new_on_slave(key_storage: Arc) -> Self { + UnknownSessionsJob { + target_node_id: None, + key_storage: key_storage, + } + } + + pub fn new_on_master(key_storage: Arc, self_node_id: NodeId) -> Self { + UnknownSessionsJob { + target_node_id: Some(self_node_id), + key_storage: key_storage, + } + } +} + +impl JobExecutor for UnknownSessionsJob { + type PartialJobRequest = NodeId; + type PartialJobResponse = BTreeSet; + type JobResponse = BTreeMap>; + + fn prepare_partial_request(&self, _node: &NodeId, _nodes: &BTreeSet) -> Result { + Ok(self.target_node_id.clone().expect("prepare_partial_request is only called on master nodes; this field is filled on master nodes in constructor; qed")) + } + + fn process_partial_request(&mut self, partial_request: NodeId) -> Result>, Error> { + Ok(JobPartialRequestAction::Respond(self.key_storage.iter() + .filter(|&(_, ref key_share)| !key_share.id_numbers.contains_key(&partial_request)) + .map(|(id, _)| id.clone()) + .collect())) + } + + fn check_partial_response(&self, _partial_response: &BTreeSet) -> Result { + Ok(JobPartialResponseAction::Accept) + } + + // TODO: optimizations: + // currently ALL unknown sessions are sent at once - it is better to limit messages by size/len => add partial-partial responses + fn compute_response(&self, partial_responses: &BTreeMap>) -> Result>, Error> { + let mut result: BTreeMap> = BTreeMap::new(); + for (node_id, node_sessions) in partial_responses { + for node_session in node_sessions { + result.entry(node_session.clone()) + .or_insert_with(Default::default) + .insert(node_id.clone()); + } + } + + Ok(result) + } +} diff --git a/secret_store/src/key_server_cluster/math.rs b/secret_store/src/key_server_cluster/math.rs index 52339014f..8600b0b31 100644 --- a/secret_store/src/key_server_cluster/math.rs +++ b/secret_store/src/key_server_cluster/math.rs @@ -94,7 +94,6 @@ pub fn generate_random_polynom(threshold: usize) -> Result, Error> { } /// Compute absolute term of additional polynom1 when new node is added to the existing generation node set -#[cfg(test)] pub fn compute_additional_polynom1_absolute_term<'a, I>(secret_values: I) -> Result where I: Iterator { let mut absolute_term = compute_secret_sum(secret_values)?; absolute_term.neg()?; @@ -102,7 +101,6 @@ pub fn compute_additional_polynom1_absolute_term<'a, I>(secret_values: I) -> Res } /// Add two polynoms together (coeff = coeff1 + coeff2). -#[cfg(test)] pub fn add_polynoms(polynom1: &[Secret], polynom2: &[Secret], is_absolute_term2_zero: bool) -> Result, Error> { polynom1.iter().zip(polynom2.iter()) .enumerate() @@ -162,6 +160,13 @@ pub fn public_values_generation(threshold: usize, derived_point: &Public, polyno Ok(publics) } +/// Generate refreshed public keys for other participants. +pub fn refreshed_public_values_generation(threshold: usize, refreshed_polynom1: &[Secret]) -> Result, Error> { + debug_assert_eq!(refreshed_polynom1.len(), threshold + 1); + + (0..threshold + 1).map(|i| compute_public_share(&refreshed_polynom1[i])).collect() +} + /// Check keys passed by other participants. pub fn keys_verification(threshold: usize, derived_point: &Public, number_id: &Secret, secret1: &Secret, secret2: &Secret, publics: &[Public]) -> Result { // calculate left part @@ -190,7 +195,6 @@ pub fn keys_verification(threshold: usize, derived_point: &Public, number_id: &S } /// Check refreshed keys passed by other participants. -#[cfg(test)] pub fn refreshed_keys_verification(threshold: usize, number_id: &Secret, secret1: &Secret, publics: &[Public]) -> Result { // calculate left part let mut left = math::generation_point(); @@ -545,7 +549,6 @@ pub mod tests { new_nodes_polynom1.push(new_polynom1); } - // new nodes sends its own information to all other nodes let n = n + new_nodes; id_numbers.extend((0..new_nodes).map(|_| Random.generate().unwrap().secret().clone())); @@ -597,10 +600,12 @@ pub mod tests { .filter(|&(j, _)| j != i) .take(t) .map(|(_, id_number)| id_number)).unwrap()).collect(); + let nodes_shadow_points: Vec<_> = nodes_shadows.iter() .map(|s| compute_node_shadow_point(&access_key, &encrypted_secret.common_point, s, None).unwrap()) .map(|sp| sp.0) .collect(); + assert_eq!(nodes_shadows.len(), t + 1); assert_eq!(nodes_shadow_points.len(), t + 1); @@ -752,14 +757,19 @@ pub mod tests { // generate key using 6-of-10 session let (t, n) = (5, 10); let artifacts1 = run_key_generation(t, n, None); + let joint_secret1 = compute_joint_secret(artifacts1.polynoms1.iter().map(|p1| &p1[0])).unwrap(); // let's say we want to include additional server to the set // so that scheme becames 6-of-11 let artifacts2 = run_key_share_refreshing_and_add_new_nodes(t, n, 1, &artifacts1); + let joint_secret2 = compute_joint_secret(artifacts2.polynoms1.iter().map(|p1| &p1[0])).unwrap(); assert_eq!(artifacts1.joint_public, artifacts2.joint_public); + assert_eq!(joint_secret1, joint_secret2); // include another couple of servers (6-of-13) let artifacts3 = run_key_share_refreshing_and_add_new_nodes(t, n + 1, 2, &artifacts2); + let joint_secret3 = compute_joint_secret(artifacts3.polynoms1.iter().map(|p1| &p1[0])).unwrap(); assert_eq!(artifacts1.joint_public, artifacts3.joint_public); + assert_eq!(joint_secret1, joint_secret3); } } diff --git a/secret_store/src/key_server_cluster/message.rs b/secret_store/src/key_server_cluster/message.rs index e5adc39d3..527cf3c2a 100644 --- a/secret_store/src/key_server_cluster/message.rs +++ b/secret_store/src/key_server_cluster/message.rs @@ -36,6 +36,14 @@ pub enum Message { Decryption(DecryptionMessage), /// Signing message. Signing(SigningMessage), + /// Share add message. + ShareAdd(ShareAddMessage), + /// Share move message. + ShareMove(ShareMoveMessage), + /// Share add message. + ShareRemove(ShareRemoveMessage), + /// Servers set change message. + ServersSetChange(ServersSetChangeMessage), } /// All possible cluster-level messages. @@ -90,6 +98,33 @@ pub enum ConsensusMessage { ConfirmConsensusInitialization(ConfirmConsensusInitialization), } +/// All possible messages that can be sent during servers-set consensus establishing. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum ConsensusMessageWithServersSet { + /// Initialize consensus session. + InitializeConsensusSession(InitializeConsensusSessionWithServersSet), + /// Confirm/reject consensus session initialization. + ConfirmConsensusInitialization(ConfirmConsensusInitialization), +} + +/// All possible messages that can be sent during share add consensus establishing. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum ConsensusMessageWithServersMap { + /// Initialize consensus session. + InitializeConsensusSession(InitializeConsensusSessionWithServersMap), + /// Confirm/reject consensus session initialization. + ConfirmConsensusInitialization(ConfirmConsensusInitialization), +} + +/// All possible messages that can be sent during share add consensus establishing. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum ConsensusMessageWithServersSecretMap { + /// Initialize consensus session. + InitializeConsensusSession(InitializeConsensusSessionWithServersSecretMap), + /// Confirm/reject consensus session initialization. + ConfirmConsensusInitialization(ConfirmConsensusInitialization), +} + /// All possible messages that can be sent during decryption session. #[derive(Clone, Debug)] pub enum DecryptionMessage { @@ -122,6 +157,78 @@ pub enum SigningMessage { SigningSessionCompleted(SigningSessionCompleted), } +/// All possible messages that can be sent during servers set change session. +#[derive(Clone, Debug)] +pub enum ServersSetChangeMessage { + /// Consensus establishing message. + ServersSetChangeConsensusMessage(ServersSetChangeConsensusMessage), + /// Unknown sessions ids request. + UnknownSessionsRequest(UnknownSessionsRequest), + /// Unknown sessions ids. + UnknownSessions(UnknownSessions), + /// Initialize share change session(s). + InitializeShareChangeSession(InitializeShareChangeSession), + /// Confirm share change session(s) initialization. + ConfirmShareChangeSessionInitialization(ConfirmShareChangeSessionInitialization), + /// Share change session delegation. + ServersSetChangeDelegate(ServersSetChangeDelegate), + /// Share change session delegation response. + ServersSetChangeDelegateResponse(ServersSetChangeDelegateResponse), + /// Share add message. + ServersSetChangeShareAddMessage(ServersSetChangeShareAddMessage), + /// Share move message. + ServersSetChangeShareMoveMessage(ServersSetChangeShareMoveMessage), + /// Share remove message. + ServersSetChangeShareRemoveMessage(ServersSetChangeShareRemoveMessage), + /// Servers set change session completed. + ServersSetChangeError(ServersSetChangeError), + /// Servers set change session completed. + ServersSetChangeCompleted(ServersSetChangeCompleted), +} + +/// All possible messages that can be sent during share add session. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum ShareAddMessage { + /// Consensus establishing message. + ShareAddConsensusMessage(ShareAddConsensusMessage), + /// Common key share data is sent to new node. + KeyShareCommon(KeyShareCommon), + /// Absolute term share of secret polynom is sent to new node. + NewAbsoluteTermShare(NewAbsoluteTermShare), + /// Generated keys are sent to every node. + NewKeysDissemination(NewKeysDissemination), + /// When session error has occured. + ShareAddError(ShareAddError), +} + +/// All possible messages that can be sent during share move session. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum ShareMoveMessage { + /// Consensus establishing message. + ShareMoveConsensusMessage(ShareMoveConsensusMessage), + /// Share move request. + ShareMoveRequest(ShareMoveRequest), + /// Share move. + ShareMove(ShareMove), + /// Share move confirmation. + ShareMoveConfirm(ShareMoveConfirm), + /// When session error has occured. + ShareMoveError(ShareMoveError), +} + +/// All possible messages that can be sent during share remove session. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum ShareRemoveMessage { + /// Consensus establishing message. + ShareRemoveConsensusMessage(ShareRemoveConsensusMessage), + /// Share remove request. + ShareRemoveRequest(ShareRemoveRequest), + /// Share remove confirmation. + ShareRemoveConfirm(ShareRemoveConfirm), + /// When session error has occured. + ShareRemoveError(ShareRemoveError), +} + /// Introduce node public key. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct NodePublicKey { @@ -226,7 +333,7 @@ pub struct SessionError { pub session: MessageSessionId, /// Session-level nonce. pub session_nonce: u64, - /// Public key share. + /// Error message. pub error: String, } @@ -288,6 +395,45 @@ pub struct ConfirmConsensusInitialization { pub is_confirmed: bool, } +/// Node is asked to be part of servers-set consensus group. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct InitializeConsensusSessionWithServersSet { + /// Old nodes set. + pub old_nodes_set: BTreeSet, + /// New nodes set. + pub new_nodes_set: BTreeSet, + /// Old server set, signed by requester. + pub old_set_signature: SerializableSignature, + /// New server set, signed by requester. + pub new_set_signature: SerializableSignature, +} + +/// Node is asked to be part of servers-set consensus group. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct InitializeConsensusSessionWithServersSecretMap { + /// Old nodes set. + pub old_nodes_set: BTreeSet, + /// New nodes set. + pub new_nodes_set: BTreeMap, + /// Old server set, signed by requester. + pub old_set_signature: SerializableSignature, + /// New server set, signed by requester. + pub new_set_signature: SerializableSignature, +} + +/// Node is asked to be part of servers-set consensus group. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct InitializeConsensusSessionWithServersMap { + /// Old nodes set. + pub old_nodes_set: BTreeSet, + /// New nodes set (keys() = new_nodes_set, values = old nodes [differs from new if share is moved]). + pub new_nodes_set: BTreeMap, + /// Old server set, signed by requester. + pub old_set_signature: SerializableSignature, + /// New server set, signed by requester. + pub new_set_signature: SerializableSignature, +} + /// Consensus-related signing message. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SigningConsensusMessage { @@ -355,7 +501,7 @@ pub struct SigningSessionError { pub sub_session: SerializableSecret, /// Session-level nonce. pub session_nonce: u64, - /// Error description. + /// Error message. pub error: String, } @@ -427,7 +573,7 @@ pub struct DecryptionSessionError { pub sub_session: SerializableSecret, /// Session-level nonce. pub session_nonce: u64, - /// Public key share. + /// Error message. pub error: String, } @@ -442,6 +588,312 @@ pub struct DecryptionSessionCompleted { pub session_nonce: u64, } +/// Consensus-related servers set change message. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ServersSetChangeConsensusMessage { + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Consensus message. + pub message: ConsensusMessageWithServersSet, +} + +/// Unknown sessions ids request. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct UnknownSessionsRequest { + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, +} + +/// Unknown session ids. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct UnknownSessions { + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Unknown session id. + pub unknown_sessions: BTreeSet, +} + +/// Master node opens share initialize session on other nodes. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct InitializeShareChangeSession { + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Key id. + pub key_id: MessageSessionId, + /// Master node. + pub master_node_id: MessageNodeId, + /// Old nodes set. + pub old_shares_set: BTreeSet, + /// Shares to add. Values are filled for new nodes only. + pub shares_to_add: BTreeMap, + /// Shares to move. + pub shares_to_move: BTreeMap, + /// Shares to remove. + pub shares_to_remove: BTreeSet, +} + +/// Slave node confirms session initialization. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ConfirmShareChangeSessionInitialization { + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Sessions that are confirmed. + pub key_id: MessageSessionId, +} + +/// Share change is requested. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ServersSetChangeDelegate { + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Key id. + pub key_id: MessageSessionId, +} + +/// Share change is completed. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ServersSetChangeDelegateResponse { + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Key id. + pub key_id: MessageSessionId, +} + +/// Servers set change share add message. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ServersSetChangeShareAddMessage { + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Unknown session id. + pub message: ShareAddMessage, +} + +/// Servers set change share move message. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ServersSetChangeShareMoveMessage { + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Unknown session id. + pub message: ShareMoveMessage, +} + +/// Servers set change share remove message. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ServersSetChangeShareRemoveMessage { + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Unknown session id. + pub message: ShareRemoveMessage, +} + +/// When servers set change session error has occured. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ServersSetChangeError { + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Error message. + pub error: String, +} + +/// When servers set change session is completed. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ServersSetChangeCompleted { + /// Servers set change session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, +} + +/// Consensus-related share add session message. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ShareAddConsensusMessage { + /// Share add session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Consensus message. + pub message: ConsensusMessageWithServersSecretMap, +} + +/// Key share common data is passed to new node. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct KeyShareCommon { + /// Generation session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Key threshold. + pub threshold: usize, + /// Author of key share entry. + pub author: SerializablePublic, + /// Common (shared) encryption point. + pub common_point: Option, + /// Encrypted point. + pub encrypted_point: Option, +} + +/// Absolute term share is passed to new node. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct NewAbsoluteTermShare { + /// Generation session Id. + pub session: MessageSessionId, + /// Sender id number. + pub sender_id: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, + /// Absolute term share. + pub absolute_term_share: SerializableSecret, +} + +/// Generated keys are sent to every node. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct NewKeysDissemination { + /// Generation session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Refreshed secret1 value. + pub refreshed_secret1: SerializableSecret, + /// Refreshed public values. + pub refreshed_publics: Vec, +} + +/// When share add session error has occured. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ShareAddError { + /// Generation session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Error message. + pub error: String, +} + +/// Consensus-related share move session message. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ShareMoveConsensusMessage { + /// Share move session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Consensus message. + pub message: ConsensusMessageWithServersMap, +} + +/// Share move is requested. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ShareMoveRequest { + /// Generation session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, +} + +/// Share is moved from source to destination. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ShareMove { + /// Generation session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Author of the entry. + pub author: SerializablePublic, + /// Decryption threshold. + pub threshold: usize, + /// Nodes ids numbers. + pub id_numbers: BTreeMap, + /// Polynom1. + pub polynom1: Vec, + /// Node secret share. + pub secret_share: SerializableSecret, + /// Common (shared) encryption point. + pub common_point: Option, + /// Encrypted point. + pub encrypted_point: Option, +} + +/// Share move is confirmed (destination node confirms to all other nodes). +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ShareMoveConfirm { + /// Generation session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, +} + +/// When share move session error has occured. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ShareMoveError { + /// Generation session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Error message. + pub error: String, +} + +/// Consensus-related share remove session message. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ShareRemoveConsensusMessage { + /// Share move session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Consensus message. + pub message: ConsensusMessageWithServersSet, +} + +/// Share remove is requested. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ShareRemoveRequest { + /// Generation session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, +} + +/// Share remove is confirmed (destination node confirms to all other nodes). +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ShareRemoveConfirm { + /// Generation session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, +} + +/// When share remove session error has occured. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ShareRemoveError { + /// Generation session Id. + pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, + /// Error message. + pub error: String, +} + impl GenerationMessage { pub fn session_id(&self) -> &SessionId { match *self { @@ -553,6 +1005,106 @@ impl SigningMessage { } } +impl ServersSetChangeMessage { + pub fn session_id(&self) -> &SessionId { + match *self { + ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref msg) => &msg.session, + ServersSetChangeMessage::UnknownSessionsRequest(ref msg) => &msg.session, + ServersSetChangeMessage::UnknownSessions(ref msg) => &msg.session, + ServersSetChangeMessage::InitializeShareChangeSession(ref msg) => &msg.session, + ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref msg) => &msg.session, + ServersSetChangeMessage::ServersSetChangeDelegate(ref msg) => &msg.session, + ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref msg) => &msg.session, + ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref msg) => &msg.session, + ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ref msg) => &msg.session, + ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ref msg) => &msg.session, + ServersSetChangeMessage::ServersSetChangeError(ref msg) => &msg.session, + ServersSetChangeMessage::ServersSetChangeCompleted(ref msg) => &msg.session, + } + } + + pub fn session_nonce(&self) -> u64 { + match *self { + ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref msg) => msg.session_nonce, + ServersSetChangeMessage::UnknownSessionsRequest(ref msg) => msg.session_nonce, + ServersSetChangeMessage::UnknownSessions(ref msg) => msg.session_nonce, + ServersSetChangeMessage::InitializeShareChangeSession(ref msg) => msg.session_nonce, + ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref msg) => msg.session_nonce, + ServersSetChangeMessage::ServersSetChangeDelegate(ref msg) => msg.session_nonce, + ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref msg) => msg.session_nonce, + ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref msg) => msg.session_nonce, + ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ref msg) => msg.session_nonce, + ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ref msg) => msg.session_nonce, + ServersSetChangeMessage::ServersSetChangeError(ref msg) => msg.session_nonce, + ServersSetChangeMessage::ServersSetChangeCompleted(ref msg) => msg.session_nonce, + } + } +} + +impl ShareAddMessage { + pub fn session_id(&self) -> &SessionId { + match *self { + ShareAddMessage::ShareAddConsensusMessage(ref msg) => &msg.session, + ShareAddMessage::KeyShareCommon(ref msg) => &msg.session, + ShareAddMessage::NewAbsoluteTermShare(ref msg) => &msg.session, + ShareAddMessage::NewKeysDissemination(ref msg) => &msg.session, + ShareAddMessage::ShareAddError(ref msg) => &msg.session, + } + } + + pub fn session_nonce(&self) -> u64 { + match *self { + ShareAddMessage::ShareAddConsensusMessage(ref msg) => msg.session_nonce, + ShareAddMessage::KeyShareCommon(ref msg) => msg.session_nonce, + ShareAddMessage::NewAbsoluteTermShare(ref msg) => msg.session_nonce, + ShareAddMessage::NewKeysDissemination(ref msg) => msg.session_nonce, + ShareAddMessage::ShareAddError(ref msg) => msg.session_nonce, + } + } +} + +impl ShareMoveMessage { + pub fn session_id(&self) -> &SessionId { + match *self { + ShareMoveMessage::ShareMoveConsensusMessage(ref msg) => &msg.session, + ShareMoveMessage::ShareMoveRequest(ref msg) => &msg.session, + ShareMoveMessage::ShareMove(ref msg) => &msg.session, + ShareMoveMessage::ShareMoveConfirm(ref msg) => &msg.session, + ShareMoveMessage::ShareMoveError(ref msg) => &msg.session, + } + } + + pub fn session_nonce(&self) -> u64 { + match *self { + ShareMoveMessage::ShareMoveConsensusMessage(ref msg) => msg.session_nonce, + ShareMoveMessage::ShareMoveRequest(ref msg) => msg.session_nonce, + ShareMoveMessage::ShareMove(ref msg) => msg.session_nonce, + ShareMoveMessage::ShareMoveConfirm(ref msg) => msg.session_nonce, + ShareMoveMessage::ShareMoveError(ref msg) => msg.session_nonce, + } + } +} + +impl ShareRemoveMessage { + pub fn session_id(&self) -> &SessionId { + match *self { + ShareRemoveMessage::ShareRemoveConsensusMessage(ref msg) => &msg.session, + ShareRemoveMessage::ShareRemoveRequest(ref msg) => &msg.session, + ShareRemoveMessage::ShareRemoveConfirm(ref msg) => &msg.session, + ShareRemoveMessage::ShareRemoveError(ref msg) => &msg.session, + } + } + + pub fn session_nonce(&self) -> u64 { + match *self { + ShareRemoveMessage::ShareRemoveConsensusMessage(ref msg) => msg.session_nonce, + ShareRemoveMessage::ShareRemoveRequest(ref msg) => msg.session_nonce, + ShareRemoveMessage::ShareRemoveConfirm(ref msg) => msg.session_nonce, + ShareRemoveMessage::ShareRemoveError(ref msg) => msg.session_nonce, + } + } +} + impl fmt::Display for Message { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { @@ -561,6 +1113,10 @@ impl fmt::Display for Message { Message::Encryption(ref message) => write!(f, "Encryption.{}", message), Message::Decryption(ref message) => write!(f, "Decryption.{}", message), Message::Signing(ref message) => write!(f, "Signing.{}", message), + Message::ServersSetChange(ref message) => write!(f, "ServersSetChange.{}", message), + Message::ShareAdd(ref message) => write!(f, "ShareAdd.{}", message), + Message::ShareMove(ref message) => write!(f, "ShareMove.{}", message), + Message::ShareRemove(ref message) => write!(f, "ShareRemove.{}", message), } } } @@ -609,6 +1165,33 @@ impl fmt::Display for ConsensusMessage { } } +impl fmt::Display for ConsensusMessageWithServersSet { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ConsensusMessageWithServersSet::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"), + ConsensusMessageWithServersSet::ConfirmConsensusInitialization(_) => write!(f, "ConfirmConsensusInitialization"), + } + } +} + +impl fmt::Display for ConsensusMessageWithServersMap { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ConsensusMessageWithServersMap::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"), + ConsensusMessageWithServersMap::ConfirmConsensusInitialization(_) => write!(f, "ConfirmConsensusInitialization"), + } + } +} + +impl fmt::Display for ConsensusMessageWithServersSecretMap { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ConsensusMessageWithServersSecretMap::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"), + ConsensusMessageWithServersSecretMap::ConfirmConsensusInitialization(_) => write!(f, "ConfirmConsensusInitialization"), + } + } +} + impl fmt::Display for DecryptionMessage { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { @@ -633,3 +1216,58 @@ impl fmt::Display for SigningMessage { } } } + +impl fmt::Display for ServersSetChangeMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref m) => write!(f, "ServersSetChangeConsensusMessage.{}", m.message), + ServersSetChangeMessage::UnknownSessionsRequest(_) => write!(f, "UnknownSessionsRequest"), + ServersSetChangeMessage::UnknownSessions(_) => write!(f, "UnknownSessions"), + ServersSetChangeMessage::InitializeShareChangeSession(_) => write!(f, "InitializeShareChangeSession"), + ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(_) => write!(f, "ConfirmShareChangeSessionInitialization"), + ServersSetChangeMessage::ServersSetChangeDelegate(_) => write!(f, "ServersSetChangeDelegate"), + ServersSetChangeMessage::ServersSetChangeDelegateResponse(_) => write!(f, "ServersSetChangeDelegateResponse"), + ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref m) => write!(f, "ServersSetChangeShareAddMessage.{}", m.message), + ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ref m) => write!(f, "ServersSetChangeShareMoveMessage.{}", m.message), + ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ref m) => write!(f, "ServersSetChangeShareRemoveMessage.{}", m.message), + ServersSetChangeMessage::ServersSetChangeError(_) => write!(f, "ServersSetChangeError"), + ServersSetChangeMessage::ServersSetChangeCompleted(_) => write!(f, "ServersSetChangeCompleted"), + } + } +} + +impl fmt::Display for ShareAddMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ShareAddMessage::ShareAddConsensusMessage(ref m) => write!(f, "ShareAddConsensusMessage.{}", m.message), + ShareAddMessage::KeyShareCommon(_) => write!(f, "KeyShareCommon"), + ShareAddMessage::NewAbsoluteTermShare(_) => write!(f, "NewAbsoluteTermShare"), + ShareAddMessage::NewKeysDissemination(_) => write!(f, "NewKeysDissemination"), + ShareAddMessage::ShareAddError(_) => write!(f, "ShareAddError"), + + } + } +} + +impl fmt::Display for ShareMoveMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ShareMoveMessage::ShareMoveConsensusMessage(ref m) => write!(f, "ShareMoveConsensusMessage.{}", m.message), + ShareMoveMessage::ShareMoveRequest(_) => write!(f, "ShareMoveRequest"), + ShareMoveMessage::ShareMove(_) => write!(f, "ShareMove"), + ShareMoveMessage::ShareMoveConfirm(_) => write!(f, "ShareMoveConfirm"), + ShareMoveMessage::ShareMoveError(_) => write!(f, "ShareMoveError"), + } + } +} + +impl fmt::Display for ShareRemoveMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ShareRemoveMessage::ShareRemoveConsensusMessage(ref m) => write!(f, "InitializeShareRemoveSession.{}", m.message), + ShareRemoveMessage::ShareRemoveRequest(_) => write!(f, "ShareRemoveRequest"), + ShareRemoveMessage::ShareRemoveConfirm(_) => write!(f, "ShareRemoveConfirm"), + ShareRemoveMessage::ShareRemoveError(_) => write!(f, "ShareRemoveError"), + } + } +} diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index a71c356ae..f83677830 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -35,7 +35,6 @@ pub use self::decryption_session::Session as DecryptionSession; pub use super::node_key_pair::PlainNodeKeyPair; #[cfg(test)] pub use super::key_storage::tests::DummyKeyStorage; -#[cfg(test)] pub use super::acl_storage::DummyAclStorage; #[cfg(test)] pub use super::key_server_set::tests::MapKeyServerSet; @@ -163,14 +162,24 @@ impl Into for Error { } } +mod admin_sessions; +mod client_sessions; + +pub use self::admin_sessions::servers_set_change_session; +pub use self::admin_sessions::share_add_session; +pub use self::admin_sessions::share_change_session; +pub use self::admin_sessions::share_move_session; +pub use self::admin_sessions::share_remove_session; + +pub use self::client_sessions::decryption_session; +pub use self::client_sessions::encryption_session; +pub use self::client_sessions::generation_session; +pub use self::client_sessions::signing_session; + mod cluster; mod cluster_sessions; -mod decryption_session; -mod encryption_session; -mod generation_session; mod io; mod jobs; pub mod math; mod message; -mod signing_session; mod net; diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index 28e3aa2c9..6143b4a31 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -18,12 +18,16 @@ use std::path::PathBuf; use std::collections::BTreeMap; use serde_json; use ethkey::{Secret, Public}; -use util::Database; +use util::{Database, DatabaseIterator}; use types::all::{Error, ServiceConfiguration, ServerKeyId, NodeId}; use serialization::{SerializablePublic, SerializableSecret}; /// Key of version value. const DB_META_KEY_VERSION: &'static [u8; 7] = b"version"; +/// Current db version. +const CURRENT_VERSION: u8 = 2; +/// Current type of serialized key shares. +type CurrentSerializableDocumentKeyShare = SerializableDocumentKeyShareV2; /// Encrypted key share, stored by key storage on the single key server. #[derive(Debug, Clone, PartialEq)] @@ -34,6 +38,8 @@ pub struct DocumentKeyShare { pub threshold: usize, /// Nodes ids numbers. pub id_numbers: BTreeMap, + /// Polynom1. + pub polynom1: Vec, /// Node secret share. pub secret_share: Secret, /// Common (shared) encryption point. @@ -50,8 +56,12 @@ pub trait KeyStorage: Send + Sync { fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error>; /// Get document encryption key fn get(&self, document: &ServerKeyId) -> Result; + /// Remove document encryption key + fn remove(&self, document: &ServerKeyId) -> Result<(), Error>; /// Check if storage contains document encryption key fn contains(&self, document: &ServerKeyId) -> bool; + /// Iterate through storage + fn iter<'a>(&'a self) -> Box + 'a>; } /// Persistent document encryption keys storage @@ -59,6 +69,11 @@ pub struct PersistentKeyStorage { db: Database, } +/// Persistent document encryption keys storage iterator +pub struct PersistentKeyStorageIterator<'a> { + iter: Option>, +} + /// V0 of encrypted key share, as it is stored by key storage on the single key server. #[derive(Serialize, Deserialize)] struct SerializableDocumentKeyShareV0 { @@ -91,6 +106,25 @@ struct SerializableDocumentKeyShareV1 { pub encrypted_point: Option, } +/// V2 of encrypted key share, as it is stored by key storage on the single key server. +#[derive(Serialize, Deserialize)] +struct SerializableDocumentKeyShareV2 { + /// Authore of the entry. + pub author: SerializablePublic, + /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). + pub threshold: usize, + /// Nodes ids numbers. + pub id_numbers: BTreeMap, + /// Polynom1. + pub polynom1: Vec, + /// Node secret share. + pub secret_share: SerializableSecret, + /// Common (shared) encryption point. + pub common_point: Option, + /// Encrypted point. + pub encrypted_point: Option, +} + impl PersistentKeyStorage { /// Create new persistent document encryption keys storage pub fn new(config: &ServiceConfiguration) -> Result { @@ -113,33 +147,54 @@ fn upgrade_db(db: Database) -> Result { match version { 0 => { let mut batch = db.transaction(); - batch.put(None, DB_META_KEY_VERSION, &[1]); - for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner) { + batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]); + for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner).filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) { let v0_key = serde_json::from_slice::(&db_value).map_err(|e| Error::Database(e.to_string()))?; - let v1_key = SerializableDocumentKeyShareV1 { + let v2_key = CurrentSerializableDocumentKeyShare { // author is used in separate generation + encrypt sessions. // in v0 there have been only simultaneous GenEnc sessions. - author: Public::default().into(), + author: Public::default().into(), // added in v1 threshold: v0_key.threshold, id_numbers: v0_key.id_numbers, secret_share: v0_key.secret_share, + polynom1: Vec::new(), // added in v2 common_point: Some(v0_key.common_point), encrypted_point: Some(v0_key.encrypted_point), }; - let db_value = serde_json::to_vec(&v1_key).map_err(|e| Error::Database(e.to_string()))?; + let db_value = serde_json::to_vec(&v2_key).map_err(|e| Error::Database(e.to_string()))?; batch.put(None, &*db_key, &*db_value); } db.write(batch).map_err(Error::Database)?; Ok(db) }, - 1 => Ok(db), - _ => Err(Error::Database(format!("unsupported SecretStore database version:? {}", version))), + 1 => { + let mut batch = db.transaction(); + batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]); + for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner).filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) { + let v1_key = serde_json::from_slice::(&db_value).map_err(|e| Error::Database(e.to_string()))?; + let v2_key = CurrentSerializableDocumentKeyShare { + author: v1_key.author, // added in v1 + threshold: v1_key.threshold, + id_numbers: v1_key.id_numbers, + secret_share: v1_key.secret_share, + polynom1: Vec::new(), // added in v2 + common_point: v1_key.common_point, + encrypted_point: v1_key.encrypted_point, + }; + let db_value = serde_json::to_vec(&v2_key).map_err(|e| Error::Database(e.to_string()))?; + batch.put(None, &*db_key, &*db_value); + } + db.write(batch).map_err(Error::Database)?; + Ok(db) + } + 2 => Ok(db), + _ => Err(Error::Database(format!("unsupported SecretStore database version: {}", version))), } } impl KeyStorage for PersistentKeyStorage { fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> { - let key: SerializableDocumentKeyShareV1 = key.into(); + let key: CurrentSerializableDocumentKeyShare = key.into(); let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?; let mut batch = self.db.transaction(); batch.put(None, &document, &key); @@ -155,15 +210,53 @@ impl KeyStorage for PersistentKeyStorage { .map_err(Error::Database)? .ok_or(Error::DocumentNotFound) .map(|key| key.into_vec()) - .and_then(|key| serde_json::from_slice::(&key).map_err(|e| Error::Database(e.to_string()))) + .and_then(|key| serde_json::from_slice::(&key).map_err(|e| Error::Database(e.to_string()))) .map(Into::into) } + fn remove(&self, document: &ServerKeyId) -> Result<(), Error> { + let mut batch = self.db.transaction(); + batch.delete(None, &document); + self.db.write(batch).map_err(Error::Database) + } + fn contains(&self, document: &ServerKeyId) -> bool { self.db.get(None, document) .map(|k| k.is_some()) .unwrap_or(false) } + + fn iter<'a>(&'a self) -> Box + 'a> { + Box::new(PersistentKeyStorageIterator { + iter: self.db.iter(None), + }) + } +} + +impl<'a> Iterator for PersistentKeyStorageIterator<'a> { + type Item = (ServerKeyId, DocumentKeyShare); + + fn next(&mut self) -> Option<(ServerKeyId, DocumentKeyShare)> { + self.iter.as_mut() + .and_then(|iter| iter.next() + .and_then(|(db_key, db_val)| serde_json::from_slice::(&db_val) + .ok() + .map(|key| ((*db_key).into(), key.into())))) + } +} + +impl From for SerializableDocumentKeyShareV2 { + fn from(key: DocumentKeyShare) -> Self { + SerializableDocumentKeyShareV2 { + author: key.author.into(), + threshold: key.threshold, + id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), + secret_share: key.secret_share.into(), + polynom1: key.polynom1.into_iter().map(Into::into).collect(), + common_point: key.common_point.map(Into::into), + encrypted_point: key.encrypted_point.map(Into::into), + } + } } impl From for SerializableDocumentKeyShareV1 { @@ -179,13 +272,14 @@ impl From for SerializableDocumentKeyShareV1 { } } -impl From for DocumentKeyShare { - fn from(key: SerializableDocumentKeyShareV1) -> Self { +impl From for DocumentKeyShare { + fn from(key: SerializableDocumentKeyShareV2) -> Self { DocumentKeyShare { author: key.author.into(), threshold: key.threshold, id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), secret_share: key.secret_share.into(), + polynom1: key.polynom1.into_iter().map(Into::into).collect(), common_point: key.common_point.map(Into::into), encrypted_point: key.encrypted_point.map(Into::into), } @@ -201,8 +295,9 @@ pub mod tests { use ethkey::{Random, Generator, Public, Secret}; use util::Database; use types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId}; - use super::{DB_META_KEY_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare, - SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1, upgrade_db}; + use super::{DB_META_KEY_VERSION, CURRENT_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare, + SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1, + CurrentSerializableDocumentKeyShare, upgrade_db}; /// In-memory document encryption keys storage #[derive(Default)] @@ -225,9 +320,18 @@ pub mod tests { self.keys.read().get(document).cloned().ok_or(Error::DocumentNotFound) } + fn remove(&self, document: &ServerKeyId) -> Result<(), Error> { + self.keys.write().remove(document); + Ok(()) + } + fn contains(&self, document: &ServerKeyId) -> bool { self.keys.read().contains_key(document) } + + fn iter<'a>(&'a self) -> Box + 'a> { + Box::new(self.keys.read().clone().into_iter()) + } } #[test] @@ -245,6 +349,7 @@ pub mod tests { }, nodes: BTreeMap::new(), allow_connecting_to_higher_nodes: false, + admin_public: None, }, }; @@ -256,6 +361,7 @@ pub mod tests { (Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()) ].into_iter().collect(), secret_share: Random.generate().unwrap().secret().clone(), + polynom1: Vec::new(), common_point: Some(Random.generate().unwrap().public().clone()), encrypted_point: Some(Random.generate().unwrap().public().clone()), }; @@ -267,6 +373,7 @@ pub mod tests { (Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()) ].into_iter().collect(), secret_share: Random.generate().unwrap().secret().clone(), + polynom1: Vec::new(), common_point: Some(Random.generate().unwrap().public().clone()), encrypted_point: Some(Random.generate().unwrap().public().clone()), }; @@ -287,7 +394,7 @@ pub mod tests { } #[test] - fn upgrade_db_0_to_1() { + fn upgrade_db_from_0() { let db_path = RandomTempPath::create_dir(); let db = Database::open_default(db_path.as_str()).unwrap(); @@ -312,8 +419,8 @@ pub mod tests { let db = upgrade_db(db).unwrap(); // check upgrade - assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], 1); - let key = serde_json::from_slice::(&db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap()).unwrap(); + assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], CURRENT_VERSION); + let key = serde_json::from_slice::(&db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap()).unwrap(); assert_eq!(Public::default(), key.author.clone().into()); assert_eq!(777, key.threshold); assert_eq!(vec![( @@ -324,4 +431,46 @@ pub mod tests { assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::().unwrap()), key.common_point.clone().map(Into::into)); assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::().unwrap()), key.encrypted_point.clone().map(Into::into)); } + + #[test] + fn upgrade_db_from_1() { + let db_path = RandomTempPath::create_dir(); + let db = Database::open_default(db_path.as_str()).unwrap(); + + // prepare v1 database + { + let key = serde_json::to_vec(&SerializableDocumentKeyShareV1 { + author: "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(), + threshold: 777, + id_numbers: vec![( + "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(), + "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse::().unwrap().into(), + )].into_iter().collect(), + secret_share: "00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::().unwrap().into(), + common_point: Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".into()), + encrypted_point: Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".into()), + }).unwrap(); + let mut batch = db.transaction(); + batch.put(None, DB_META_KEY_VERSION, &[1]); + batch.put(None, &[7], &key); + db.write(batch).unwrap(); + } + + // upgrade database + let db = upgrade_db(db).unwrap(); + + // check upgrade + assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], CURRENT_VERSION); + let key = serde_json::from_slice::(&db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap()).unwrap(); + assert_eq!(777, key.threshold); + assert_eq!(vec![( + "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse::().unwrap(), + "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse::().unwrap(), + )], key.id_numbers.clone().into_iter().map(|(k, v)| (k.into(), v.into())).collect::>()); + assert_eq!("00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::().unwrap(), key.secret_share.into()); + assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::().unwrap()), key.common_point.clone().map(Into::into)); + assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::().unwrap()), key.encrypted_point.clone().map(Into::into)); + assert_eq!(key.author, "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into()); + assert_eq!(key.polynom1, vec![]); + } } diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 6c592ccc5..fb6cc0e68 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -29,6 +29,7 @@ extern crate serde; extern crate serde_json; #[macro_use] extern crate serde_derive; +extern crate tiny_keccak; extern crate tokio_io; extern crate tokio_core; extern crate tokio_service; diff --git a/secret_store/src/serialization.rs b/secret_store/src/serialization.rs index 369e647ee..d04a01f59 100644 --- a/secret_store/src/serialization.rs +++ b/secret_store/src/serialization.rs @@ -204,8 +204,29 @@ impl<'a> Deserialize<'a> for SerializableH256 { } } +impl PartialEq for SerializableH256 { + fn eq(&self, other: &Self) -> bool { + self.0.eq(&other.0) + } +} + +impl Eq for SerializableH256 { +} + +impl PartialOrd for SerializableH256 { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl Ord for SerializableH256 { + fn cmp(&self, other: &Self) -> Ordering { + self.0.cmp(&other.0) + } +} + /// Serializable EC scalar/secret key. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub struct SerializableSecret(pub Secret); impl From for SerializableSecret where Secret: From { diff --git a/secret_store/src/types/all.rs b/secret_store/src/types/all.rs index 1c4fb5946..ca1f9eb35 100644 --- a/secret_store/src/types/all.rs +++ b/secret_store/src/types/all.rs @@ -93,6 +93,8 @@ pub struct ClusterConfiguration { /// Allow outbound connections to 'higher' nodes. /// This is useful for tests, but slower a bit for production. pub allow_connecting_to_higher_nodes: bool, + /// Administrator public key. + pub admin_public: Option, } /// Shadow decryption result.