From e3fc3ccadaec4240753efe6a1c521122aca1410f Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 14 Sep 2017 20:29:01 +0300 Subject: [PATCH] SecretStore: use random key to encrypt channel + session-level nonce (#6470) * generate random channel encryption key on restart * session-level nonces * fixed warning after rebase * session_nonce -> nonce --- .../src/key_server_cluster/cluster.rs | 97 +++++++-- .../key_server_cluster/cluster_sessions.rs | 50 ++++- .../key_server_cluster/decryption_session.rs | 45 ++++- .../key_server_cluster/encryption_session.rs | 21 ++ .../key_server_cluster/generation_session.rs | 44 ++++ .../src/key_server_cluster/io/handshake.rs | 188 ++++++++++++------ .../src/key_server_cluster/io/message.rs | 63 ++++-- .../src/key_server_cluster/message.rs | 88 +++++++- secret_store/src/key_server_cluster/mod.rs | 6 + .../src/key_server_cluster/signing_session.rs | 57 +++++- 10 files changed, 556 insertions(+), 103 deletions(-) diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index c85ee9d93..4deeb1244 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -330,13 +330,13 @@ impl ClusterCore { finished(Ok(())).boxed() }, Ok((_, Err(err))) => { - warn!(target: "secretstore_net", "{}: protocol error {} when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); + warn!(target: "secretstore_net", "{}: protocol error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); // continue serving connection data.spawn(ClusterCore::process_connection_messages(data.clone(), connection)); finished(Err(err)).boxed() }, Err(err) => { - warn!(target: "secretstore_net", "{}: network error {} when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); + warn!(target: "secretstore_net", "{}: network error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); // close connection data.connections.remove(connection.node_id(), connection.is_inbound()); failed(err).boxed() @@ -381,7 +381,7 @@ impl ClusterCore { } }, Ok(DeadlineStatus::Meet(Err(err))) => { - warn!(target: "secretstore_net", "{}: protocol error {} when establishing {} connection{}", + warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}", data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); finished(Ok(())).boxed() @@ -393,7 +393,7 @@ impl ClusterCore { finished(Ok(())).boxed() }, Err(err) => { - warn!(target: "secretstore_net", "{}: network error {} when establishing {} connection{}", + warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}", data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); finished(Ok(())).boxed() @@ -417,6 +417,7 @@ impl ClusterCore { /// Process single generation message from the connection. fn process_generation_message(data: Arc, connection: Arc, mut message: GenerationMessage) { let session_id = message.session_id().clone(); + let session_nonce = message.session_nonce(); let mut sender = connection.node_id().clone(); let session = match message { GenerationMessage::InitializeSession(_) => { @@ -424,7 +425,19 @@ impl ClusterCore { connected_nodes.insert(data.self_key_pair.public().clone()); let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); - data.sessions.new_generation_session(sender.clone(), session_id.clone(), cluster) + match data.sessions.new_generation_session(sender.clone(), session_id.clone(), Some(session_nonce), cluster) { + Ok(session) => Ok(session), + Err(err) => { + // this is new session => it is not yet in container + warn!(target: "secretstore_net", "{}: generation session initialization error '{}' when requested for new session from node {}", data.self_key_pair.public(), err, sender); + data.spawn(connection.send_message(Message::Generation(GenerationMessage::SessionError(message::SessionError { + session: session_id.into(), + session_nonce: session_nonce, + error: format!("{:?}", err), + })))); + return; + }, + } }, _ => { data.sessions.generation_sessions.get(&session_id) @@ -462,9 +475,10 @@ impl ClusterCore { break; }, Err(err) => { - warn!(target: "secretstore_net", "{}: generation session error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); + warn!(target: "secretstore_net", "{}: generation session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); data.sessions.respond_with_generation_error(&session_id, message::SessionError { session: session_id.clone().into(), + session_nonce: session_nonce, error: format!("{:?}", err), }); if err != Error::InvalidSessionId { @@ -479,6 +493,7 @@ impl ClusterCore { /// Process single encryption message from the connection. fn process_encryption_message(data: Arc, connection: Arc, mut message: EncryptionMessage) { let session_id = message.session_id().clone(); + let session_nonce = message.session_nonce(); let mut sender = connection.node_id().clone(); let session = match message { EncryptionMessage::InitializeEncryptionSession(_) => { @@ -486,7 +501,19 @@ impl ClusterCore { connected_nodes.insert(data.self_key_pair.public().clone()); let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); - data.sessions.new_encryption_session(sender.clone(), session_id.clone(), cluster) + match data.sessions.new_encryption_session(sender.clone(), session_id.clone(), Some(session_nonce), cluster) { + Ok(session) => Ok(session), + Err(err) => { + // this is new session => it is not yet in container + warn!(target: "secretstore_net", "{}: encryption session initialization error '{}' when requested for new session from node {}", data.self_key_pair.public(), err, sender); + data.spawn(connection.send_message(Message::Encryption(EncryptionMessage::EncryptionSessionError(message::EncryptionSessionError { + session: session_id.into(), + session_nonce: session_nonce, + error: format!("{:?}", err), + })))); + return; + }, + } }, _ => { data.sessions.encryption_sessions.get(&session_id) @@ -531,9 +558,10 @@ impl ClusterCore { break; }, Err(err) => { - warn!(target: "secretstore_net", "{}: encryption session error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); + warn!(target: "secretstore_net", "{}: encryption session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); data.sessions.respond_with_encryption_error(&session_id, message::EncryptionSessionError { session: session_id.clone().into(), + session_nonce: session_nonce, error: format!("{:?}", err), }); if err != Error::InvalidSessionId { @@ -549,6 +577,7 @@ impl ClusterCore { fn process_decryption_message(data: Arc, connection: Arc, mut message: DecryptionMessage) { let session_id = message.session_id().clone(); let sub_session_id = message.sub_session_id().clone(); + let session_nonce = message.session_nonce(); let decryption_session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); let mut sender = connection.node_id().clone(); let session = match message { @@ -560,7 +589,20 @@ impl ClusterCore { connected_nodes.insert(data.self_key_pair.public().clone()); let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); - data.sessions.new_decryption_session(sender.clone(), session_id.clone(), sub_session_id.clone(), cluster, None) + match data.sessions.new_decryption_session(sender.clone(), session_id.clone(), sub_session_id.clone(), Some(session_nonce), cluster, None) { + Ok(session) => Ok(session), + Err(err) => { + // this is new session => it is not yet in container + warn!(target: "secretstore_net", "{}: decryption session initialization error '{}' when requested for new session from node {}", data.self_key_pair.public(), err, sender); + data.spawn(connection.send_message(Message::Decryption(DecryptionMessage::DecryptionSessionError(message::DecryptionSessionError { + session: session_id.into(), + sub_session: sub_session_id.clone().into(), + session_nonce: session_nonce, + error: format!("{:?}", err), + })))); + return; + }, + } }, _ => { data.sessions.decryption_sessions.get(&decryption_session_id) @@ -589,10 +631,11 @@ impl ClusterCore { } }, Err(err) => { - warn!(target: "secretstore_net", "{}: decryption session error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); + warn!(target: "secretstore_net", "{}: decryption session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); data.sessions.respond_with_decryption_error(&session_id, &sub_session_id, &sender, message::DecryptionSessionError { session: session_id.clone().into(), sub_session: sub_session_id.clone().into(), + session_nonce: session_nonce, error: format!("{:?}", err), }); if err != Error::InvalidSessionId { @@ -608,6 +651,7 @@ impl ClusterCore { fn process_signing_message(data: Arc, connection: Arc, mut message: SigningMessage) { let session_id = message.session_id().clone(); let sub_session_id = message.sub_session_id().clone(); + let session_nonce = message.session_nonce(); let signing_session_id = SigningSessionId::new(session_id.clone(), sub_session_id.clone()); let mut sender = connection.node_id().clone(); let session = match message { @@ -619,7 +663,20 @@ impl ClusterCore { connected_nodes.insert(data.self_key_pair.public().clone()); let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); - data.sessions.new_signing_session(sender.clone(), session_id.clone(), sub_session_id.clone(), cluster, None) + match data.sessions.new_signing_session(sender.clone(), session_id.clone(), sub_session_id.clone(), Some(session_nonce), cluster, None) { + Ok(session) => Ok(session), + Err(err) => { + // this is new session => it is not yet in container + warn!(target: "secretstore_net", "{}: signing session initialization error '{}' when requested for new session from node {}", data.self_key_pair.public(), err, sender); + data.spawn(connection.send_message(Message::Signing(SigningMessage::SigningSessionError(message::SigningSessionError { + session: session_id.into(), + sub_session: sub_session_id.clone().into(), + session_nonce: session_nonce, + error: format!("{:?}", err), + })))); + return; + }, + } }, _ => { data.sessions.signing_sessions.get(&signing_session_id) @@ -654,10 +711,11 @@ impl ClusterCore { break; }, Err(err) => { - warn!(target: "secretstore_net", "{}: signing session error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); + warn!(target: "secretstore_net", "{}: signing session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); data.sessions.respond_with_signing_error(&session_id, &sub_session_id, &sender, message::SigningSessionError { session: session_id.clone().into(), sub_session: sub_session_id.clone().into(), + session_nonce: session_nonce, error: format!("{:?}", err), }); if err != Error::InvalidSessionId { @@ -929,7 +987,7 @@ impl ClusterClient for ClusterClientImpl { connected_nodes.insert(self.data.self_key_pair.public().clone()); let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); - let session = self.data.sessions.new_generation_session(self.data.self_key_pair.public().clone(), session_id, cluster)?; + let session = self.data.sessions.new_generation_session(self.data.self_key_pair.public().clone(), session_id, None, cluster)?; session.initialize(author, threshold, connected_nodes)?; Ok(GenerationSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)) } @@ -939,7 +997,7 @@ impl ClusterClient for ClusterClientImpl { connected_nodes.insert(self.data.self_key_pair.public().clone()); let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); - let session = self.data.sessions.new_encryption_session(self.data.self_key_pair.public().clone(), session_id, cluster)?; + let session = self.data.sessions.new_encryption_session(self.data.self_key_pair.public().clone(), session_id, None, cluster)?; session.initialize(requestor_signature, common_point, encrypted_point)?; Ok(EncryptionSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)) } @@ -950,7 +1008,7 @@ impl ClusterClient for ClusterClientImpl { let access_key = Random.generate()?.secret().clone(); let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); - let session = self.data.sessions.new_decryption_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), cluster, Some(requestor_signature))?; + let session = self.data.sessions.new_decryption_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), None, cluster, Some(requestor_signature))?; session.initialize(is_shadow_decryption)?; Ok(DecryptionSessionWrapper::new(Arc::downgrade(&self.data), DecryptionSessionId::new(session_id, access_key), session)) } @@ -961,7 +1019,7 @@ impl ClusterClient for ClusterClientImpl { let access_key = Random.generate()?.secret().clone(); let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); - let session = self.data.sessions.new_signing_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), cluster, Some(requestor_signature))?; + let session = self.data.sessions.new_signing_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), None, cluster, Some(requestor_signature))?; session.initialize(message_hash)?; Ok(SigningSessionWrapper::new(Arc::downgrade(&self.data), SigningSessionId::new(session_id, access_key), session)) } @@ -1147,6 +1205,7 @@ pub mod tests { #[test] fn generation_session_is_removed_when_succeeded() { + //::logger::init_log(); let mut core = Core::new().unwrap(); let clusters = make_clusters(&core, 6019, 3); run_clusters(&clusters); @@ -1154,14 +1213,16 @@ pub mod tests { // start && wait for generation session to complete let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap(); - loop_until(&mut core, time::Duration::from_millis(300), || session.state() == GenerationSessionState::Finished); + loop_until(&mut core, time::Duration::from_millis(300), || session.state() == GenerationSessionState::Finished + || session.state() == GenerationSessionState::Failed); assert!(session.joint_public_and_secret().unwrap().is_ok()); // check that session is either removed from all nodes, or nonexistent (already removed) assert!(clusters[0].client().generation_session(&SessionId::default()).is_none()); for i in 1..3 { if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) { - loop_until(&mut core, time::Duration::from_millis(300), || session.state() == GenerationSessionState::Finished); + loop_until(&mut core, time::Duration::from_millis(300), || session.state() == GenerationSessionState::Finished + || session.state() == GenerationSessionState::Failed); assert!(session.joint_public_and_secret().unwrap().is_err()); assert!(clusters[i].client().generation_session(&SessionId::default()).is_none()); } diff --git a/secret_store/src/key_server_cluster/cluster_sessions.rs b/secret_store/src/key_server_cluster/cluster_sessions.rs index f8e4974b1..1168dedc1 100644 --- a/secret_store/src/key_server_cluster/cluster_sessions.rs +++ b/secret_store/src/key_server_cluster/cluster_sessions.rs @@ -16,7 +16,7 @@ use std::time; use std::sync::{Arc, Weak}; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::collections::{VecDeque, BTreeSet, BTreeMap}; use parking_lot::RwLock; use ethkey::{Public, Secret, Signature}; @@ -68,6 +68,18 @@ pub struct ClusterSessions { acl_storage: Arc, /// Make faulty generation sessions. make_faulty_generation_sessions: AtomicBool, + /// Always-increasing sessions counter. Is used as session nonce to prevent replay attacks: + /// 1) during handshake, KeyServers generate new random key to encrypt messages + /// => there's no way to use messages from previous connections for replay attacks + /// 2) when session (of any type) is started, master node increases its own session counter and broadcasts it + /// 3) when slave KeyServer receives session initialization message, it checks that new nonce is larger than previous (from the same master) + /// => there's no way to use messages from previous sessions for replay attacks + /// 4) KeyServer checks that each session message contains the same nonce that initialization message + /// Given that: (A) handshake is secure and (B) session itself is initially replay-protected + /// => this guarantees that sessions are replay-protected. + session_counter: AtomicUsize, + /// Maximal session nonce, received from given connection. + max_nonce: RwLock>, } /// Active sessions container. @@ -143,6 +155,8 @@ impl ClusterSessions { decryption_sessions: ClusterSessionsContainer::new(), signing_sessions: ClusterSessionsContainer::new(), make_faulty_generation_sessions: AtomicBool::new(false), + session_counter: AtomicUsize::new(0), + max_nonce: RwLock::new(BTreeMap::new()), } } @@ -152,11 +166,12 @@ impl ClusterSessions { } /// Create new generation session. - pub fn new_generation_session(&self, master: NodeId, session_id: SessionId, cluster: Arc) -> Result, Error> { + pub fn new_generation_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { // check that there's no finished encryption session with the same id if self.key_storage.contains(&session_id) { return Err(Error::DuplicateSessionId); } + // communicating to all other nodes is crucial for encryption session // => check that we have connections to all cluster nodes if self.nodes.iter().any(|n| !cluster.is_connected(n)) { @@ -164,12 +179,14 @@ impl ClusterSessions { } // check that there's no active encryption session with the same id + let nonce = self.check_session_nonce(&master, nonce)?; self.generation_sessions.insert(master, session_id, cluster.clone(), move || Ok(GenerationSessionImpl::new(GenerationSessionParams { id: session_id.clone(), self_node_id: self.self_node_id.clone(), key_storage: Some(self.key_storage.clone()), cluster: cluster, + nonce: Some(nonce), }))) .map(|session| { if self.make_faulty_generation_sessions.load(Ordering::Relaxed) { @@ -192,14 +209,17 @@ impl ClusterSessions { } /// Create new encryption session. - pub fn new_encryption_session(&self, master: NodeId, session_id: SessionId, cluster: Arc) -> Result, Error> { + pub fn new_encryption_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { let encrypted_data = self.read_key_share(&session_id, &cluster)?; + let nonce = self.check_session_nonce(&master, nonce)?; + self.encryption_sessions.insert(master, session_id, cluster.clone(), move || EncryptionSessionImpl::new(EncryptionSessionParams { id: session_id.clone(), self_node_id: self.self_node_id.clone(), encrypted_data: encrypted_data, key_storage: self.key_storage.clone(), cluster: cluster, + nonce: nonce, })) } @@ -216,9 +236,10 @@ impl ClusterSessions { } /// Create new decryption session. - pub fn new_decryption_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, cluster: Arc, requester_signature: Option) -> Result, Error> { + pub fn new_decryption_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, nonce: Option, cluster: Arc, requester_signature: Option) -> Result, Error> { let session_id = DecryptionSessionId::new(session_id, sub_session_id); let encrypted_data = self.read_key_share(&session_id.id, &cluster)?; + let nonce = self.check_session_nonce(&master, nonce)?; self.decryption_sessions.insert(master, session_id.clone(), cluster.clone(), move || DecryptionSessionImpl::new(DecryptionSessionParams { meta: SessionMeta { @@ -231,6 +252,7 @@ impl ClusterSessions { key_share: encrypted_data, acl_storage: self.acl_storage.clone(), cluster: cluster, + nonce: nonce, }, requester_signature)) } @@ -253,9 +275,10 @@ impl ClusterSessions { } /// Create new signing session. - pub fn new_signing_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, cluster: Arc, requester_signature: Option) -> Result, Error> { + pub fn new_signing_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, nonce: Option, cluster: Arc, requester_signature: Option) -> Result, Error> { let session_id = SigningSessionId::new(session_id, sub_session_id); let encrypted_data = self.read_key_share(&session_id.id, &cluster)?; + let nonce = self.check_session_nonce(&master, nonce)?; self.signing_sessions.insert(master, session_id.clone(), cluster.clone(), move || SigningSessionImpl::new(SigningSessionParams { meta: SessionMeta { @@ -268,6 +291,7 @@ impl ClusterSessions { key_share: encrypted_data, acl_storage: self.acl_storage.clone(), cluster: cluster, + nonce: nonce, }, requester_signature)) } @@ -303,6 +327,7 @@ impl ClusterSessions { self.encryption_sessions.on_connection_timeout(node_id); self.decryption_sessions.on_connection_timeout(node_id); self.signing_sessions.on_connection_timeout(node_id); + self.max_nonce.write().remove(node_id); } /// Read key share && remove disconnected nodes. @@ -317,6 +342,21 @@ impl ClusterSessions { } Ok(encrypted_data) } + + /// Check or generate new session nonce. + fn check_session_nonce(&self, master: &NodeId, nonce: Option) -> Result { + // if we're master node of the session, then nonce should be generated + // if we're slave node of the session, then nonce should be passed from outside + debug_assert!((master == &self.self_node_id) == nonce.is_none()); + + match nonce { + Some(nonce) => match nonce > *self.max_nonce.write().entry(master.clone()).or_insert(0) { + true => Ok(nonce), + false => Err(Error::ReplayProtection), + }, + None => Ok(self.session_counter.fetch_add(1, Ordering::Relaxed) as u64 + 1), + } + } } impl ClusterSessionsContainer where K: Clone + Ord, V: ClusterSession { diff --git a/secret_store/src/key_server_cluster/decryption_session.rs b/secret_store/src/key_server_cluster/decryption_session.rs index cc19fa0c5..3f7bdba04 100644 --- a/secret_store/src/key_server_cluster/decryption_session.rs +++ b/secret_store/src/key_server_cluster/decryption_session.rs @@ -59,6 +59,8 @@ struct SessionCore { pub key_share: DocumentKeyShare, /// Cluster which allows this node to send messages to other nodes in the cluster. pub cluster: Arc, + /// Session-level nonce. + pub nonce: u64, /// SessionImpl completion condvar. pub completed: Condvar, } @@ -95,8 +97,10 @@ pub struct SessionParams { pub key_share: DocumentKeyShare, /// ACL storage. pub acl_storage: Arc, - /// Cluster + /// Cluster. pub cluster: Arc, + /// Session nonce. + pub nonce: u64, } /// Decryption consensus transport. @@ -105,6 +109,8 @@ struct DecryptionConsensusTransport { id: SessionId, /// Session access key. access_key: Secret, + /// Session-level nonce. + nonce: u64, /// Cluster. cluster: Arc, } @@ -115,6 +121,8 @@ struct DecryptionJobTransport { id: SessionId, //// Session access key. access_key: Secret, + /// Session-level nonce. + nonce: u64, /// Cluster. cluster: Arc, } @@ -140,6 +148,7 @@ impl SessionImpl { let consensus_transport = DecryptionConsensusTransport { id: params.meta.id.clone(), access_key: params.access_key.clone(), + nonce: params.nonce, cluster: params.cluster.clone(), }; @@ -149,6 +158,7 @@ impl SessionImpl { access_key: params.access_key, key_share: params.key_share, cluster: params.cluster, + nonce: params.nonce, completed: Condvar::new(), }, data: Mutex::new(SessionData { @@ -213,6 +223,10 @@ impl SessionImpl { /// Process decryption message. pub fn process_message(&self, sender: &NodeId, message: &DecryptionMessage) -> Result<(), Error> { + if self.core.nonce != message.session_nonce() { + return Err(Error::ReplayProtection); + } + match message { &DecryptionMessage::DecryptionConsensusMessage(ref message) => self.on_consensus_message(sender, message), @@ -286,6 +300,7 @@ impl SessionImpl { self.core.cluster.send(&node, Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(DecryptionSessionCompleted { session: self.core.meta.id.clone().into(), sub_session: self.core.access_key.clone().into(), + session_nonce: self.core.nonce, })))?; } @@ -380,6 +395,7 @@ impl SessionCore { DecryptionJobTransport { id: self.meta.id.clone(), access_key: self.access_key.clone(), + nonce: self.nonce, cluster: self.cluster.clone() } } @@ -399,6 +415,7 @@ impl JobTransport for DecryptionConsensusTransport { self.cluster.send(node, Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(DecryptionConsensusMessage { session: self.id.clone().into(), sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { requestor_signature: request.into(), }) @@ -409,6 +426,7 @@ impl JobTransport for DecryptionConsensusTransport { self.cluster.send(node, Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(DecryptionConsensusMessage { session: self.id.clone().into(), sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { is_confirmed: response, }) @@ -424,6 +442,7 @@ impl JobTransport for DecryptionJobTransport { self.cluster.send(node, Message::Decryption(DecryptionMessage::RequestPartialDecryption(RequestPartialDecryption { session: self.id.clone().into(), sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, request_id: request.id.into(), is_shadow_decryption: request.is_shadow_decryption, nodes: request.other_nodes_ids.into_iter().map(Into::into).collect(), @@ -434,6 +453,7 @@ impl JobTransport for DecryptionJobTransport { self.cluster.send(node, Message::Decryption(DecryptionMessage::PartialDecryption(PartialDecryption { session: self.id.clone().into(), sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, request_id: response.request_id.into(), shadow_point: response.shadow_point.into(), decrypt_shadow: response.decrypt_shadow, @@ -535,7 +555,8 @@ mod tests { access_key: access_key.clone(), key_share: encrypted_datas[i].clone(), acl_storage: acl_storages[i].clone(), - cluster: clusters[i].clone() + cluster: clusters[i].clone(), + nonce: 0, }, if i == 0 { signature.clone() } else { None }).unwrap()).collect(); (requester, clusters, acl_storages, sessions) @@ -584,6 +605,7 @@ mod tests { }, acl_storage: Arc::new(DummyAclStorage::default()), cluster: Arc::new(DummyCluster::new(self_node_id.clone())), + nonce: 0, }, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) { Ok(_) => (), _ => panic!("unexpected"), @@ -614,6 +636,7 @@ mod tests { }, acl_storage: Arc::new(DummyAclStorage::default()), cluster: Arc::new(DummyCluster::new(self_node_id.clone())), + nonce: 0, }, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) { Err(Error::InvalidNodesConfiguration) => (), _ => panic!("unexpected"), @@ -644,6 +667,7 @@ mod tests { }, acl_storage: Arc::new(DummyAclStorage::default()), cluster: Arc::new(DummyCluster::new(self_node_id.clone())), + nonce: 0, }, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) { Err(Error::InvalidThreshold) => (), _ => panic!("unexpected"), @@ -664,6 +688,7 @@ mod tests { assert_eq!(sessions[0].on_consensus_message(sessions[1].node(), &message::DecryptionConsensusMessage { session: SessionId::default().into(), sub_session: sessions[0].access_key().clone().into(), + session_nonce: 0, message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession { requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), }), @@ -676,6 +701,7 @@ mod tests { assert_eq!(sessions[1].on_consensus_message(sessions[0].node(), &message::DecryptionConsensusMessage { session: SessionId::default().into(), sub_session: sessions[0].access_key().clone().into(), + session_nonce: 0, message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession { requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), }), @@ -683,6 +709,7 @@ mod tests { assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node(), &message::RequestPartialDecryption { session: SessionId::default().into(), sub_session: sessions[0].access_key().clone().into(), + session_nonce: 0, request_id: Random.generate().unwrap().secret().clone().into(), is_shadow_decryption: false, nodes: sessions.iter().map(|s| s.node().clone().into()).take(4).collect(), @@ -695,6 +722,7 @@ mod tests { assert_eq!(sessions[1].on_consensus_message(sessions[0].node(), &message::DecryptionConsensusMessage { session: SessionId::default().into(), sub_session: sessions[0].access_key().clone().into(), + session_nonce: 0, message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession { requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), }), @@ -702,6 +730,7 @@ mod tests { assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node(), &message::RequestPartialDecryption { session: SessionId::default().into(), sub_session: sessions[0].access_key().clone().into(), + session_nonce: 0, request_id: Random.generate().unwrap().secret().clone().into(), is_shadow_decryption: false, nodes: sessions.iter().map(|s| s.node().clone().into()).take(2).collect(), @@ -714,6 +743,7 @@ mod tests { assert_eq!(sessions[0].on_partial_decryption(sessions[1].node(), &message::PartialDecryption { session: SessionId::default().into(), sub_session: sessions[0].access_key().clone().into(), + session_nonce: 0, request_id: Random.generate().unwrap().secret().clone().into(), shadow_point: Random.generate().unwrap().public().clone().into(), decrypt_shadow: None, @@ -937,7 +967,14 @@ mod tests { } #[test] - fn decryption_session_works_over_network() { - // TODO + fn decryption_message_fails_when_nonce_is_wrong() { + let (_, _, _, sessions) = prepare_decryption_sessions(); + assert_eq!(sessions[1].process_message(sessions[0].node(), &message::DecryptionMessage::DecryptionSessionCompleted( + message::DecryptionSessionCompleted { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + session_nonce: 10, + } + )), Err(Error::ReplayProtection)); } } diff --git a/secret_store/src/key_server_cluster/encryption_session.rs b/secret_store/src/key_server_cluster/encryption_session.rs index 9a1b9e330..1a1304a53 100644 --- a/secret_store/src/key_server_cluster/encryption_session.rs +++ b/secret_store/src/key_server_cluster/encryption_session.rs @@ -53,6 +53,8 @@ pub struct SessionImpl { key_storage: Arc, /// Cluster which allows this node to send messages to other nodes in the cluster. cluster: Arc, + /// Session nonce. + nonce: u64, /// SessionImpl completion condvar. completed: Condvar, /// Mutable session data. @@ -71,6 +73,8 @@ pub struct SessionParams { pub key_storage: Arc, /// Cluster pub cluster: Arc, + /// Session nonce. + pub nonce: u64, } /// Mutable data of encryption (distributed key generation) session. @@ -119,6 +123,7 @@ impl SessionImpl { encrypted_data: params.encrypted_data, key_storage: params.key_storage, cluster: params.cluster, + nonce: params.nonce, completed: Condvar::new(), data: Mutex::new(SessionData { state: SessionState::WaitingForInitialization, @@ -169,6 +174,7 @@ impl SessionImpl { if self.encrypted_data.id_numbers.len() > 1 { self.cluster.broadcast(Message::Encryption(EncryptionMessage::InitializeEncryptionSession(InitializeEncryptionSession { session: self.id.clone().into(), + session_nonce: self.nonce, requestor_signature: requestor_signature.into(), common_point: common_point.into(), encrypted_point: encrypted_point.into(), @@ -187,6 +193,8 @@ impl SessionImpl { debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); + self.check_nonce(message.session_nonce)?; + let mut data = self.data.lock(); // check state @@ -213,6 +221,7 @@ impl SessionImpl { // send confirmation back to master node self.cluster.send(&sender, Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization(ConfirmEncryptionInitialization { session: self.id.clone().into(), + session_nonce: self.nonce, }))) } @@ -221,6 +230,8 @@ impl SessionImpl { debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); + self.check_nonce(message.session_nonce)?; + let mut data = self.data.lock(); debug_assert!(data.nodes.contains_key(&sender)); @@ -242,6 +253,8 @@ impl SessionImpl { /// When error has occured on another node. pub fn on_session_error(&self, sender: NodeId, message: &EncryptionSessionError) -> Result<(), Error> { + self.check_nonce(message.session_nonce)?; + let mut data = self.data.lock(); warn!("{}: encryption session failed with error: {} from {}", self.node(), message.error, sender); @@ -252,6 +265,14 @@ impl SessionImpl { Ok(()) } + + /// Check session nonce. + fn check_nonce(&self, message_session_nonce: u64) -> Result<(), Error> { + match self.nonce == message_session_nonce { + true => Ok(()), + false => Err(Error::ReplayProtection), + } + } } impl ClusterSession for SessionImpl { diff --git a/secret_store/src/key_server_cluster/generation_session.rs b/secret_store/src/key_server_cluster/generation_session.rs index dfc441bdb..68ecb3519 100644 --- a/secret_store/src/key_server_cluster/generation_session.rs +++ b/secret_store/src/key_server_cluster/generation_session.rs @@ -54,6 +54,8 @@ pub struct SessionImpl { key_storage: Option>, /// Cluster which allows this node to send messages to other nodes in the cluster. cluster: Arc, + /// Session-level nonce. + nonce: u64, /// SessionImpl completion condvar. completed: Condvar, /// Mutable session data. @@ -70,6 +72,8 @@ pub struct SessionParams { pub key_storage: Option>, /// Cluster pub cluster: Arc, + /// Session nonce. + pub nonce: Option, } /// Mutable data of distributed key generation session. @@ -187,6 +191,9 @@ impl SessionImpl { self_node_id: params.self_node_id, key_storage: params.key_storage, cluster: params.cluster, + // when nonce.is_nonce(), generation session is wrapped + // => nonce is checked somewhere else && we can pass any value + nonce: params.nonce.unwrap_or_default(), completed: Condvar::new(), data: Mutex::new(SessionData { state: SessionState::WaitingForInitialization, @@ -251,6 +258,7 @@ impl SessionImpl { // start initialization self.cluster.send(&next_node, Message::Generation(GenerationMessage::InitializeSession(InitializeSession { session: self.id.clone().into(), + session_nonce: self.nonce, author: author.into(), nodes: data.nodes.iter().map(|(k, v)| (k.clone().into(), v.id_number.clone().into())).collect(), threshold: data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"), @@ -269,6 +277,10 @@ impl SessionImpl { /// Process single message. pub fn process_message(&self, sender: &NodeId, message: &GenerationMessage) -> Result<(), Error> { + if self.nonce != message.session_nonce() { + return Err(Error::ReplayProtection); + } + match message { &GenerationMessage::InitializeSession(ref message) => self.on_initialize_session(sender.clone(), message), @@ -311,6 +323,7 @@ impl SessionImpl { // send confirmation back to master node self.cluster.send(&sender, Message::Generation(GenerationMessage::ConfirmInitialization(ConfirmInitialization { session: self.id.clone().into(), + session_nonce: self.nonce, derived_point: derived_point.into(), })))?; @@ -348,6 +361,7 @@ impl SessionImpl { if let Some(next_receiver) = next_receiver { return self.cluster.send(&next_receiver, Message::Generation(GenerationMessage::InitializeSession(InitializeSession { session: self.id.clone().into(), + session_nonce: self.nonce, author: data.author.as_ref().expect("author is filled on initialization step; confrm initialization follows initialization; qed").clone().into(), nodes: data.nodes.iter().map(|(k, v)| (k.clone().into(), v.id_number.clone().into())).collect(), threshold: data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"), @@ -506,6 +520,7 @@ impl SessionImpl { data.state = SessionState::Finished; return self.cluster.send(&sender, Message::Generation(GenerationMessage::SessionCompleted(SessionCompleted { session: self.id.clone().into(), + session_nonce: self.nonce, }))); } @@ -557,6 +572,7 @@ impl SessionImpl { // broadcast derived point && other session paraeters to every other node self.cluster.broadcast(Message::Generation(GenerationMessage::CompleteInitialization(CompleteInitialization { session: self.id.clone().into(), + session_nonce: self.nonce, derived_point: derived_point.into(), }))) } @@ -589,6 +605,7 @@ impl SessionImpl { self.cluster.send(&node, Message::Generation(GenerationMessage::KeysDissemination(KeysDissemination { session: self.id.clone().into(), + session_nonce: self.nonce, secret1: secret1.into(), secret2: secret2.into(), publics: publics.iter().cloned().map(Into::into).collect(), @@ -649,6 +666,7 @@ impl SessionImpl { // broadcast self public key share self.cluster.broadcast(Message::Generation(GenerationMessage::PublicKeyShare(PublicKeyShare { session: self.id.clone().into(), + session_nonce: self.nonce, public_share: self_public_share.into(), }))) } @@ -691,6 +709,7 @@ impl SessionImpl { // then distribute encrypted data to every other node self.cluster.broadcast(Message::Generation(GenerationMessage::SessionCompleted(SessionCompleted { session: self.id.clone().into(), + session_nonce: self.nonce, })))?; // then wait for confirmation from all other nodes @@ -871,6 +890,7 @@ pub mod tests { self_node_id: node_id.clone(), key_storage: Some(key_storage.clone()), cluster: cluster.clone(), + nonce: Some(0), }); nodes.insert(node_id, Node { cluster: cluster, key_storage: key_storage, session: session }); } @@ -960,6 +980,7 @@ pub mod tests { self_node_id: node_id.clone(), key_storage: Some(Arc::new(DummyKeyStorage::default())), cluster: cluster, + nonce: Some(0), }); let cluster_nodes: BTreeSet<_> = (0..2).map(|_| math::generate_random_point().unwrap()).collect(); assert_eq!(session.initialize(Public::default(), 0, cluster_nodes).unwrap_err(), Error::InvalidNodesConfiguration); @@ -1013,6 +1034,7 @@ pub mod tests { l.take_and_process_message().unwrap(); assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { session: sid.into(), + session_nonce: 0, derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -1024,6 +1046,7 @@ pub mod tests { l.take_and_process_message().unwrap(); assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { session: sid.into(), + session_nonce: 0, derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -1052,6 +1075,7 @@ pub mod tests { nodes.insert(math::generate_random_point().unwrap(), math::generate_random_scalar().unwrap()); assert_eq!(l.first_slave().on_initialize_session(m, &message::InitializeSession { session: sid.into(), + session_nonce: 0, author: Public::default().into(), nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 0, @@ -1067,6 +1091,7 @@ pub mod tests { nodes.insert(s, math::generate_random_scalar().unwrap()); assert_eq!(l.first_slave().on_initialize_session(m, &message::InitializeSession { session: sid.into(), + session_nonce: 0, author: Public::default().into(), nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 2, @@ -1079,6 +1104,7 @@ pub mod tests { let (sid, m, _, l) = make_simple_cluster(0, 2).unwrap(); assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { session: sid.into(), + session_nonce: 0, derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -1092,6 +1118,7 @@ pub mod tests { l.take_and_process_message().unwrap(); assert_eq!(l.first_slave().on_complete_initialization(l.second_slave().node().clone(), &message::CompleteInitialization { session: sid.into(), + session_nonce: 0, derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidMessage); } @@ -1101,6 +1128,7 @@ pub mod tests { let (sid, _, s, l) = make_simple_cluster(0, 2).unwrap(); assert_eq!(l.master().on_keys_dissemination(s, &message::KeysDissemination { session: sid.into(), + session_nonce: 0, secret1: math::generate_random_scalar().unwrap().into(), secret2: math::generate_random_scalar().unwrap().into(), publics: vec![math::generate_random_point().unwrap().into()], @@ -1119,6 +1147,7 @@ pub mod tests { l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { session: sid.into(), + session_nonce: 0, secret1: math::generate_random_scalar().unwrap().into(), secret2: math::generate_random_scalar().unwrap().into(), publics: vec![math::generate_random_point().unwrap().into(), math::generate_random_point().unwrap().into()], @@ -1137,6 +1166,7 @@ pub mod tests { l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { session: sid.into(), + session_nonce: 0, secret1: math::generate_random_scalar().unwrap().into(), secret2: math::generate_random_scalar().unwrap().into(), publics: vec![math::generate_random_point().unwrap().into()], @@ -1148,6 +1178,7 @@ pub mod tests { let (sid, _, s, l) = make_simple_cluster(1, 3).unwrap(); assert_eq!(l.master().on_public_key_share(s, &message::PublicKeyShare { session: sid.into(), + session_nonce: 0, public_share: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -1176,6 +1207,7 @@ pub mod tests { l.process_message((f, t, Message::Generation(GenerationMessage::PublicKeyShare(msg.clone())))).unwrap(); assert_eq!(l.second_slave().on_public_key_share(m, &message::PublicKeyShare { session: sid.into(), + session_nonce: 0, public_share: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidMessage); } @@ -1253,4 +1285,16 @@ pub mod tests { loop_until(&mut core, time::Duration::from_millis(1000), || session.joint_public_and_secret().is_some()); } } + + #[test] + fn generation_message_fails_when_nonce_is_wrong() { + let (sid, m, _, l) = make_simple_cluster(0, 2).unwrap(); + assert_eq!(l.first_slave().process_message(&m, &message::GenerationMessage::KeysDissemination(message::KeysDissemination { + session: sid.into(), + session_nonce: 10, + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into()], + })).unwrap_err(), Error::ReplayProtection); + } } diff --git a/secret_store/src/key_server_cluster/io/handshake.rs b/secret_store/src/key_server_cluster/io/handshake.rs index 0aedcc624..22f841a15 100644 --- a/secret_store/src/key_server_cluster/io/handshake.rs +++ b/secret_store/src/key_server_cluster/io/handshake.rs @@ -14,12 +14,31 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +///! Given: two nodes each holding its own `self_key_pair`. +///! +///! Handshake process: +///! 1) both nodes are generating random `KeyPair` (`session_key_pair`), which will be used for channel encryption +///! 2) both nodes are generating random H256 (`confirmation_plain`) +///! 3) both nodes are signing `confirmation_plain` using `session_key_pair` to receive `confirmation_signed_session` +///! 4) nodes exchange with `NodePublicKey` messages, containing: `self_key_pair.public`, `confirmation_plain`, `confirmation_signed_session` +///! 5) both nodes are checking that they're configured to communicate to server with received `message.self_key_pair.public`. Connection is closed otherwise +///! 6) both nodes are recovering peer' `session_key_pair.public` from `message.confirmation_plain` and `message.confirmation_signed_session` +///! 7) both nodes are computing shared session key pair using self' `session_key_pair.secret` && peer' `session_key_pair.public`. All following messages are encrypted using this key_pair. +///! 8) both nodes are signing `message.confirmation_plain` with their own `self_key_pair.private` to receive `confirmation_signed` +///! 9) nodes exchange with `NodePrivateKeySignature` messages, containing `confirmation_signed` +///! 10) both nodes are checking that `confirmation_signed` is actually signed with the owner of peer' `self_key_pair.secret` +///! +///! Result of handshake is: +///! 1) belief, that we are connected to the KS from our KS-set +///! 2) session key pair, which is used to enrypt all connection messages + use std::io; use std::sync::Arc; use std::collections::BTreeSet; use futures::{Future, Poll, Async}; use tokio_io::{AsyncRead, AsyncWrite}; -use ethkey::{Random, Generator, KeyPair, verify_public}; +use ethcrypto::ecdh::agree; +use ethkey::{Random, Generator, KeyPair, Public, Signature, verify_public, sign, recover}; use bigint::hash::H256; use key_server_cluster::{NodeId, Error, NodeKeyPair}; use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature}; @@ -28,16 +47,20 @@ use key_server_cluster::io::{write_message, write_encrypted_message, WriteMessag /// Start handshake procedure with another node from the cluster. pub fn handshake(a: A, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { - let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); - handshake_with_plain_confirmation(a, self_confirmation_plain, self_key_pair, trusted_nodes) + let init_data = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into) + .and_then(|cp| Random.generate().map(|kp| (cp, kp)).map_err(Into::into)); + handshake_with_init_data(a, init_data, self_key_pair, trusted_nodes) } -/// Start handshake procedure with another node from the cluster and given plain confirmation. -pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Result, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { - let (error, state) = match self_confirmation_plain.clone() - .and_then(|c| Handshake::::make_public_key_message(self_key_pair.public().clone(), c)) { - Ok(message) => (None, HandshakeState::SendPublicKey(write_message(a, message))), - Err(err) => (Some((a, Err(err))), HandshakeState::Finished), +/// Start handshake procedure with another node from the cluster and given plain confirmation + session key pair. +pub fn handshake_with_init_data(a: A, init_data: Result<(H256, KeyPair), Error>, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { + let handshake_input_data = init_data + .and_then(|(cp, kp)| sign(kp.secret(), &cp).map(|sp| (cp, kp, sp)).map_err(Into::into)) + .and_then(|(cp, kp, sp)| Handshake::::make_public_key_message(self_key_pair.public().clone(), cp.clone(), sp).map(|msg| (cp, kp, msg))); + + let (error, cp, kp, state) = match handshake_input_data { + Ok((cp, kp, msg)) => (None, cp, Some(kp), HandshakeState::SendPublicKey(write_message(a, msg))), + Err(err) => (Some((a, Err(err))), Default::default(), None, HandshakeState::Finished), }; Handshake { @@ -45,10 +68,12 @@ pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Resul error: error, state: state, self_key_pair: self_key_pair, - self_confirmation_plain: self_confirmation_plain.unwrap_or(Default::default()), + self_session_key_pair: kp, + self_confirmation_plain: cp, trusted_nodes: Some(trusted_nodes), - other_node_id: None, - other_confirmation_plain: None, + peer_node_id: None, + peer_session_public: None, + peer_confirmation_plain: None, shared_key: None, } } @@ -56,9 +81,12 @@ pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Resul /// Wait for handshake procedure to be started by another node from the cluster. pub fn accept_handshake(a: A, self_key_pair: Arc) -> Handshake where A: AsyncWrite + AsyncRead { let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); - let (error, state) = match self_confirmation_plain.clone() { - Ok(_) => (None, HandshakeState::ReceivePublicKey(read_message(a))), - Err(err) => (Some((a, Err(err))), HandshakeState::Finished), + let handshake_input_data = self_confirmation_plain + .and_then(|cp| Random.generate().map(|kp| (cp, kp)).map_err(Into::into)); + + let (error, cp, kp, state) = match handshake_input_data { + Ok((cp, kp)) => (None, cp, Some(kp), HandshakeState::ReceivePublicKey(read_message(a))), + Err(err) => (Some((a, Err(err))), Default::default(), None, HandshakeState::Finished), }; Handshake { @@ -66,10 +94,12 @@ pub fn accept_handshake(a: A, self_key_pair: Arc) -> Handshake { error: Option<(A, Result)>, state: HandshakeState, self_key_pair: Arc, + self_session_key_pair: Option, self_confirmation_plain: H256, trusted_nodes: Option>, - other_node_id: Option, - other_confirmation_plain: Option, + peer_node_id: Option, + peer_session_public: Option, + peer_confirmation_plain: Option, shared_key: Option, } @@ -111,10 +143,16 @@ impl Handshake where A: AsyncRead + AsyncWrite { self.self_confirmation_plain = self_confirmation_plain; } - pub fn make_public_key_message(self_node_id: NodeId, confirmation_plain: H256) -> Result { + #[cfg(test)] + pub fn set_self_session_key_pair(&mut self, self_session_key_pair: KeyPair) { + self.self_session_key_pair = Some(self_session_key_pair); + } + + pub fn make_public_key_message(self_node_id: NodeId, confirmation_plain: H256, confirmation_signed_session: Signature) -> Result { Ok(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey { node_id: self_node_id.into(), confirmation_plain: confirmation_plain.into(), + confirmation_signed_session: confirmation_signed_session.into(), }))) } @@ -123,6 +161,12 @@ impl Handshake where A: AsyncRead + AsyncWrite { confirmation_signed: self_key_pair.sign(confirmation_plain)?.into(), }))) } + + fn compute_shared_key(self_session_key_pair: &KeyPair, peer_session_public: &Public) -> Result { + agree(self_session_key_pair.secret(), peer_session_public) + .map_err(Into::into) + .and_then(|s| fix_shared_key(&s)) + } } impl Future for Handshake where A: AsyncRead + AsyncWrite { @@ -143,20 +187,25 @@ impl Future for Handshake where A: AsyncRead + AsyncWrite { read_message(stream) ), Async::NotReady) } else { - self.shared_key = match self.self_key_pair.compute_shared_key( - self.other_node_id.as_ref().expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; other_node_id is filled in ReceivePublicKey; qed") - ).map_err(Into::into).and_then(|sk| fix_shared_key(sk.secret())) { + let shared_key = Self::compute_shared_key( + self.self_session_key_pair.as_ref().expect( + "self_session_key_pair is not filled only when initialization has failed; if initialization has failed, self.error.is_some(); qed"), + self.peer_session_public.as_ref().expect( + "we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; peer_session_public is filled in ReceivePublicKey; qed"), + ); + + self.shared_key = match shared_key { Ok(shared_key) => Some(shared_key), - Err(err) => return Ok((stream, Err(err.into())).into()), + Err(err) => return Ok((stream, Err(err)).into()), }; - let message = match Handshake::::make_private_key_signature_message( - &*self.self_key_pair, - self.other_confirmation_plain.as_ref().expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; other_confirmation_plain is filled in ReceivePublicKey; qed") - ) { + let peer_confirmation_plain = self.peer_confirmation_plain.as_ref() + .expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; peer_confirmation_plain is filled in ReceivePublicKey; qed"); + let message = match Handshake::::make_private_key_signature_message(&*self.self_key_pair, peer_confirmation_plain) { Ok(message) => message, Err(err) => return Ok((stream, Err(err)).into()), }; + (HandshakeState::SendPrivateKeySignature(write_encrypted_message(stream, self.shared_key.as_ref().expect("filled couple of lines above; qed"), message)), Async::NotReady) @@ -177,28 +226,44 @@ impl Future for Handshake where A: AsyncRead + AsyncWrite { return Ok((stream, Err(Error::InvalidNodeId)).into()); } - self.other_node_id = Some(message.node_id.into()); - self.other_confirmation_plain = Some(message.confirmation_plain.into()); + self.peer_node_id = Some(message.node_id.into()); + self.peer_session_public = Some(match recover(&message.confirmation_signed_session, &message.confirmation_plain) { + Ok(peer_session_public) => peer_session_public, + Err(err) => return Ok((stream, Err(err.into())).into()), + }); + self.peer_confirmation_plain = Some(message.confirmation_plain.into()); if self.is_active { - self.shared_key = match self.self_key_pair.compute_shared_key( - self.other_node_id.as_ref().expect("filled couple of lines above; qed") - ).map_err(Into::into).and_then(|sk| fix_shared_key(sk.secret())) { + let shared_key = Self::compute_shared_key( + self.self_session_key_pair.as_ref().expect( + "self_session_key_pair is not filled only when initialization has failed; if initialization has failed, self.error.is_some(); qed"), + self.peer_session_public.as_ref().expect( + "we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; peer_session_public is filled in ReceivePublicKey; qed"), + ); + + self.shared_key = match shared_key { Ok(shared_key) => Some(shared_key), - Err(err) => return Ok((stream, Err(err.into())).into()), + Err(err) => return Ok((stream, Err(err)).into()), }; - let message = match Handshake::::make_private_key_signature_message( - &*self.self_key_pair, - self.other_confirmation_plain.as_ref().expect("filled couple of lines above; qed") - ) { + let peer_confirmation_plain = self.peer_confirmation_plain.as_ref() + .expect("filled couple of lines above; qed"); + let message = match Handshake::::make_private_key_signature_message(&*self.self_key_pair, peer_confirmation_plain) { Ok(message) => message, Err(err) => return Ok((stream, Err(err)).into()), }; + (HandshakeState::SendPrivateKeySignature(write_encrypted_message(stream, self.shared_key.as_ref().expect("filled couple of lines above; qed"), message)), Async::NotReady) } else { - let message = match Handshake::::make_public_key_message(self.self_key_pair.public().clone(), self.self_confirmation_plain.clone()) { + let self_session_key_pair = self.self_session_key_pair.as_ref() + .expect("self_session_key_pair is not filled only when initialization has failed; if initialization has failed, self.error.is_some(); qed"); + let confirmation_signed_session = match sign(self_session_key_pair.secret(), &self.self_confirmation_plain).map_err(Into::into) { + Ok(confirmation_signed_session) => confirmation_signed_session, + Err(err) => return Ok((stream, Err(err)).into()), + }; + + let message = match Handshake::::make_public_key_message(self.self_key_pair.public().clone(), self.self_confirmation_plain.clone(), confirmation_signed_session) { Ok(message) => message, Err(err) => return Ok((stream, Err(err)).into()), }; @@ -225,13 +290,13 @@ impl Future for Handshake where A: AsyncRead + AsyncWrite { Err(err) => return Ok((stream, Err(err.into())).into()), }; - let other_node_public = self.other_node_id.as_ref().expect("other_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed"); - if !verify_public(other_node_public, &*message.confirmation_signed, &self.self_confirmation_plain).unwrap_or(false) { + let peer_public = self.peer_node_id.as_ref().expect("peer_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed"); + if !verify_public(peer_public, &*message.confirmation_signed, &self.self_confirmation_plain).unwrap_or(false) { return Ok((stream, Err(Error::InvalidMessage)).into()); } (HandshakeState::Finished, Async::Ready((stream, Ok(HandshakeResult { - node_id: self.other_node_id.expect("other_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed"), + node_id: self.peer_node_id.expect("peer_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed"), shared_key: self.shared_key.clone().expect("shared_key is filled in Send/ReceivePublicKey; ReceivePrivateKeySignature follows Send/ReceivePublicKey; qed"), })))) }, @@ -253,27 +318,26 @@ mod tests { use std::collections::BTreeSet; use futures::Future; use ethkey::{Random, Generator, sign}; - use ethcrypto::ecdh::agree; use bigint::hash::H256; use key_server_cluster::PlainNodeKeyPair; - use key_server_cluster::io::message::fix_shared_key; use key_server_cluster::io::message::tests::TestIo; use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature}; - use super::{handshake_with_plain_confirmation, accept_handshake, HandshakeResult}; + use super::{handshake_with_init_data, accept_handshake, HandshakeResult}; fn prepare_test_io() -> (H256, TestIo) { - let self_key_pair = Random.generate().unwrap(); - let peer_key_pair = Random.generate().unwrap(); - let mut io = TestIo::new(self_key_pair.clone(), peer_key_pair.public().clone()); + let mut io = TestIo::new(); let self_confirmation_plain = *Random.generate().unwrap().secret().clone(); let peer_confirmation_plain = *Random.generate().unwrap().secret().clone(); - let self_confirmation_signed = sign(peer_key_pair.secret(), &self_confirmation_plain).unwrap(); + let self_confirmation_signed = sign(io.peer_key_pair().secret(), &self_confirmation_plain).unwrap(); + let peer_confirmation_signed = sign(io.peer_session_key_pair().secret(), &peer_confirmation_plain).unwrap(); + let peer_public = io.peer_key_pair().public().clone(); io.add_input_message(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey { - node_id: peer_key_pair.public().clone().into(), + node_id: peer_public.into(), confirmation_plain: peer_confirmation_plain.into(), + confirmation_signed_session: peer_confirmation_signed.into(), }))); io.add_encrypted_input_message(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature { confirmation_signed: self_confirmation_signed.into(), @@ -285,14 +349,15 @@ mod tests { #[test] fn active_handshake_works() { let (self_confirmation_plain, io) = prepare_test_io(); - let self_key_pair = io.self_key_pair().clone(); - let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect(); - let shared_key = fix_shared_key(&agree(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap()).unwrap(); + let trusted_nodes: BTreeSet<_> = vec![io.peer_key_pair().public().clone()].into_iter().collect(); + let self_session_key_pair = io.self_session_key_pair().clone(); + let self_key_pair = Arc::new(PlainNodeKeyPair::new(io.self_key_pair().clone())); + let shared_key = io.shared_key_pair().clone(); - let handshake = handshake_with_plain_confirmation(io, Ok(self_confirmation_plain), Arc::new(PlainNodeKeyPair::new(self_key_pair)), trusted_nodes); + let handshake = handshake_with_init_data(io, Ok((self_confirmation_plain, self_session_key_pair)), self_key_pair, trusted_nodes); let handshake_result = handshake.wait().unwrap(); assert_eq!(handshake_result.1, Ok(HandshakeResult { - node_id: handshake_result.0.peer_public().clone(), + node_id: handshake_result.0.peer_key_pair().public().clone(), shared_key: shared_key, })); } @@ -300,16 +365,17 @@ mod tests { #[test] fn passive_handshake_works() { let (self_confirmation_plain, io) = prepare_test_io(); - let self_key_pair = io.self_key_pair().clone(); - let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect(); - let shared_key = fix_shared_key(&agree(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap()).unwrap(); + let self_key_pair = Arc::new(PlainNodeKeyPair::new(io.self_key_pair().clone())); + let self_session_key_pair = io.self_session_key_pair().clone(); + let shared_key = io.shared_key_pair().clone(); - let mut handshake = accept_handshake(io, Arc::new(PlainNodeKeyPair::new(self_key_pair))); + let mut handshake = accept_handshake(io, self_key_pair); handshake.set_self_confirmation_plain(self_confirmation_plain); + handshake.set_self_session_key_pair(self_session_key_pair); let handshake_result = handshake.wait().unwrap(); assert_eq!(handshake_result.1, Ok(HandshakeResult { - node_id: handshake_result.0.peer_public().clone(), + node_id: handshake_result.0.peer_key_pair().public().clone(), shared_key: shared_key, })); } diff --git a/secret_store/src/key_server_cluster/io/message.rs b/secret_store/src/key_server_cluster/io/message.rs index e25e96836..2e549f1d9 100644 --- a/secret_store/src/key_server_cluster/io/message.rs +++ b/secret_store/src/key_server_cluster/io/message.rs @@ -30,6 +30,8 @@ use key_server_cluster::message::{Message, ClusterMessage, GenerationMessage, En /// Size of serialized header. pub const MESSAGE_HEADER_SIZE: usize = 4; +/// Current header version. +pub const CURRENT_HEADER_VERSION: u8 = 1; /// Message header. #[derive(Debug, PartialEq)] @@ -97,7 +99,7 @@ pub fn serialize_message(message: Message) -> Result { let payload = payload.map_err(|err| Error::Serde(err.to_string()))?; build_serialized_message(MessageHeader { kind: message_kind, - version: 1, + version: CURRENT_HEADER_VERSION, size: 0, }, payload) } @@ -177,8 +179,13 @@ fn serialize_header(header: &MessageHeader) -> Result, Error> { /// Deserialize message header. pub fn deserialize_header(data: &[u8]) -> Result { let mut reader = Cursor::new(data); + let version = reader.read_u8()?; + if version != CURRENT_HEADER_VERSION { + return Err(Error::InvalidMessageVersion); + } + Ok(MessageHeader { - version: reader.read_u8()?, + version: version, kind: reader.read_u8()?, size: reader.read_u16::()?, }) @@ -202,25 +209,34 @@ pub mod tests { use std::io; use futures::Poll; use tokio_io::{AsyncRead, AsyncWrite}; - use ethkey::{KeyPair, Public}; + use ethkey::{Random, Generator, KeyPair}; use ethcrypto::ecdh::agree; + use key_server_cluster::Error; use key_server_cluster::message::Message; - use super::{MESSAGE_HEADER_SIZE, MessageHeader, fix_shared_key, encrypt_message, serialize_message, - serialize_header, deserialize_header}; + use super::{MESSAGE_HEADER_SIZE, CURRENT_HEADER_VERSION, MessageHeader, fix_shared_key, encrypt_message, + serialize_message, serialize_header, deserialize_header}; pub struct TestIo { self_key_pair: KeyPair, - peer_public: Public, + self_session_key_pair: KeyPair, + peer_key_pair: KeyPair, + peer_session_key_pair: KeyPair, shared_key_pair: KeyPair, input_buffer: io::Cursor>, } impl TestIo { - pub fn new(self_key_pair: KeyPair, peer_public: Public) -> Self { - let shared_key_pair = fix_shared_key(&agree(self_key_pair.secret(), &peer_public).unwrap()).unwrap(); + pub fn new() -> Self { + let self_session_key_pair = Random.generate().unwrap(); + let peer_session_key_pair = Random.generate().unwrap(); + let self_key_pair = Random.generate().unwrap(); + let peer_key_pair = Random.generate().unwrap(); + let shared_key_pair = fix_shared_key(&agree(self_session_key_pair.secret(), peer_session_key_pair.public()).unwrap()).unwrap(); TestIo { self_key_pair: self_key_pair, - peer_public: peer_public, + self_session_key_pair: self_session_key_pair, + peer_key_pair: peer_key_pair, + peer_session_key_pair: peer_session_key_pair, shared_key_pair: shared_key_pair, input_buffer: io::Cursor::new(Vec::new()), } @@ -230,8 +246,20 @@ pub mod tests { &self.self_key_pair } - pub fn peer_public(&self) -> &Public { - &self.peer_public + pub fn self_session_key_pair(&self) -> &KeyPair { + &self.self_session_key_pair + } + + pub fn peer_key_pair(&self) -> &KeyPair { + &self.peer_key_pair + } + + pub fn peer_session_key_pair(&self) -> &KeyPair { + &self.peer_session_key_pair + } + + pub fn shared_key_pair(&self) -> &KeyPair { + &self.shared_key_pair } pub fn add_input_message(&mut self, message: Message) { @@ -281,7 +309,7 @@ pub mod tests { fn header_serialization_works() { let header = MessageHeader { kind: 1, - version: 2, + version: CURRENT_HEADER_VERSION, size: 3, }; @@ -291,4 +319,15 @@ pub mod tests { let deserialized_header = deserialize_header(&serialized_header).unwrap(); assert_eq!(deserialized_header, header); } + + #[test] + fn deserializing_header_of_wrong_version_fails() { + let header = MessageHeader { + kind: 1, + version: CURRENT_HEADER_VERSION + 1, + size: 3, + }; + + assert_eq!(deserialize_header(&serialize_header(&header).unwrap()).unwrap_err(), Error::InvalidMessageVersion); + } } diff --git a/secret_store/src/key_server_cluster/message.rs b/secret_store/src/key_server_cluster/message.rs index c0f5729c0..e5adc39d3 100644 --- a/secret_store/src/key_server_cluster/message.rs +++ b/secret_store/src/key_server_cluster/message.rs @@ -127,8 +127,10 @@ pub enum SigningMessage { pub struct NodePublicKey { /// Node identifier (aka node public key). pub node_id: MessageNodeId, - /// Data, which must be signed by peer to prove that he owns the corresponding private key. + /// Random data, which must be signed by peer to prove that he owns the corresponding private key. pub confirmation_plain: SerializableH256, + /// The same random `confirmation_plain`, signed with one-time session key. + pub confirmation_signed_session: SerializableSignature, } /// Confirm that node owns the private key of previously passed public key (aka node id). @@ -138,7 +140,6 @@ pub struct NodePrivateKeySignature { pub confirmation_signed: SerializableSignature, } - /// Ask if the node is still alive. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct KeepAlive { @@ -154,6 +155,8 @@ pub struct KeepAliveResponse { pub struct InitializeSession { /// Session Id. pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, /// Session author. pub author: SerializablePublic, /// All session participants along with their identification numbers. @@ -173,6 +176,8 @@ pub struct InitializeSession { pub struct ConfirmInitialization { /// Session Id. pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, /// Derived generation point. pub derived_point: SerializablePublic, } @@ -182,6 +187,8 @@ pub struct ConfirmInitialization { pub struct CompleteInitialization { /// Session Id. pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, /// Derived generation point. pub derived_point: SerializablePublic, } @@ -191,6 +198,8 @@ pub struct CompleteInitialization { pub struct KeysDissemination { /// Session Id. pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, /// Secret 1. pub secret1: SerializableSecret, /// Secret 2. @@ -204,6 +213,8 @@ pub struct KeysDissemination { pub struct PublicKeyShare { /// Session Id. pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, /// Public key share. pub public_share: SerializablePublic, } @@ -213,6 +224,8 @@ pub struct PublicKeyShare { pub struct SessionError { /// Session Id. pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, /// Public key share. pub error: String, } @@ -222,6 +235,8 @@ pub struct SessionError { pub struct SessionCompleted { /// Session Id. pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, } /// Node is requested to prepare for saving encrypted data. @@ -229,6 +244,8 @@ pub struct SessionCompleted { pub struct InitializeEncryptionSession { /// Encryption session Id. pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, /// Requestor signature. pub requestor_signature: SerializableSignature, /// Common point. @@ -242,6 +259,8 @@ pub struct InitializeEncryptionSession { pub struct ConfirmEncryptionInitialization { /// Encryption session Id. pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, } /// When encryption session error has occured. @@ -249,6 +268,8 @@ pub struct ConfirmEncryptionInitialization { pub struct EncryptionSessionError { /// Encryption session Id. pub session: MessageSessionId, + /// Session-level nonce. + pub session_nonce: u64, /// Error message. pub error: String, } @@ -274,6 +295,8 @@ pub struct SigningConsensusMessage { pub session: MessageSessionId, /// Signing session Id. pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, /// Consensus message. pub message: ConsensusMessage, } @@ -285,6 +308,8 @@ pub struct SigningGenerationMessage { pub session: MessageSessionId, /// Signing session Id. pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, /// Generation message. pub message: GenerationMessage, } @@ -296,6 +321,8 @@ pub struct RequestPartialSignature { pub session: MessageSessionId, /// Signing session Id. pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, /// Request id. pub request_id: SerializableSecret, /// Message hash. @@ -311,6 +338,8 @@ pub struct PartialSignature { pub session: MessageSessionId, /// Signing session Id. pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, /// Request id. pub request_id: SerializableSecret, /// S part of signature. @@ -324,6 +353,8 @@ pub struct SigningSessionError { pub session: MessageSessionId, /// Signing session Id. pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, /// Error description. pub error: String, } @@ -335,6 +366,8 @@ pub struct SigningSessionCompleted { pub session: MessageSessionId, /// Signing session Id. pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, } /// Consensus-related decryption message. @@ -344,6 +377,8 @@ pub struct DecryptionConsensusMessage { pub session: MessageSessionId, /// Signing session Id. pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, /// Consensus message. pub message: ConsensusMessage, } @@ -355,6 +390,8 @@ pub struct RequestPartialDecryption { pub session: MessageSessionId, /// Decryption session Id. pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, /// Request id. pub request_id: SerializableSecret, /// Is shadow decryption requested? When true, decryption result @@ -371,6 +408,8 @@ pub struct PartialDecryption { pub session: MessageSessionId, /// Decryption session Id. pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, /// Request id. pub request_id: SerializableSecret, /// Partially decrypted secret. @@ -386,6 +425,8 @@ pub struct DecryptionSessionError { pub session: MessageSessionId, /// Decryption session Id. pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, /// Public key share. pub error: String, } @@ -397,6 +438,8 @@ pub struct DecryptionSessionCompleted { pub session: MessageSessionId, /// Decryption session Id. pub sub_session: SerializableSecret, + /// Session-level nonce. + pub session_nonce: u64, } impl GenerationMessage { @@ -411,6 +454,18 @@ impl GenerationMessage { GenerationMessage::SessionCompleted(ref msg) => &msg.session, } } + + pub fn session_nonce(&self) -> u64 { + match *self { + GenerationMessage::InitializeSession(ref msg) => msg.session_nonce, + GenerationMessage::ConfirmInitialization(ref msg) => msg.session_nonce, + GenerationMessage::CompleteInitialization(ref msg) => msg.session_nonce, + GenerationMessage::KeysDissemination(ref msg) => msg.session_nonce, + GenerationMessage::PublicKeyShare(ref msg) => msg.session_nonce, + GenerationMessage::SessionError(ref msg) => msg.session_nonce, + GenerationMessage::SessionCompleted(ref msg) => msg.session_nonce, + } + } } impl EncryptionMessage { @@ -421,6 +476,14 @@ impl EncryptionMessage { EncryptionMessage::EncryptionSessionError(ref msg) => &msg.session, } } + + pub fn session_nonce(&self) -> u64 { + match *self { + EncryptionMessage::InitializeEncryptionSession(ref msg) => msg.session_nonce, + EncryptionMessage::ConfirmEncryptionInitialization(ref msg) => msg.session_nonce, + EncryptionMessage::EncryptionSessionError(ref msg) => msg.session_nonce, + } + } } impl DecryptionMessage { @@ -443,6 +506,16 @@ impl DecryptionMessage { DecryptionMessage::DecryptionSessionCompleted(ref msg) => &msg.sub_session, } } + + pub fn session_nonce(&self) -> u64 { + match *self { + DecryptionMessage::DecryptionConsensusMessage(ref msg) => msg.session_nonce, + DecryptionMessage::RequestPartialDecryption(ref msg) => msg.session_nonce, + DecryptionMessage::PartialDecryption(ref msg) => msg.session_nonce, + DecryptionMessage::DecryptionSessionError(ref msg) => msg.session_nonce, + DecryptionMessage::DecryptionSessionCompleted(ref msg) => msg.session_nonce, + } + } } impl SigningMessage { @@ -467,6 +540,17 @@ impl SigningMessage { SigningMessage::SigningSessionCompleted(ref msg) => &msg.sub_session, } } + + pub fn session_nonce(&self) -> u64 { + match *self { + SigningMessage::SigningConsensusMessage(ref msg) => msg.session_nonce, + SigningMessage::SigningGenerationMessage(ref msg) => msg.session_nonce, + SigningMessage::RequestPartialSignature(ref msg) => msg.session_nonce, + SigningMessage::PartialSignature(ref msg) => msg.session_nonce, + SigningMessage::SigningSessionError(ref msg) => msg.session_nonce, + SigningMessage::SigningSessionCompleted(ref msg) => msg.session_nonce, + } + } } impl fmt::Display for Message { diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index 1ec04e2e0..a71c356ae 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -90,6 +90,10 @@ pub enum Error { /// Message or some data in the message was recognized as invalid. /// This means that node is misbehaving/cheating. InvalidMessage, + /// Message version is not supported. + InvalidMessageVersion, + /// Message is invalid because of replay-attack protection. + ReplayProtection, /// Connection to node, required for this session is not established. NodeDisconnected, /// Cryptographic error. @@ -140,6 +144,8 @@ impl fmt::Display for Error { Error::InvalidStateForRequest => write!(f, "session is in invalid state for processing this request"), Error::InvalidNodeForRequest => write!(f, "invalid node for this request"), Error::InvalidMessage => write!(f, "invalid message is received"), + Error::InvalidMessageVersion => write!(f, "unsupported message is received"), + Error::ReplayProtection => write!(f, "replay message is received"), Error::NodeDisconnected => write!(f, "node required for this operation is currently disconnected"), Error::EthKey(ref e) => write!(f, "cryptographic error {}", e), Error::Io(ref e) => write!(f, "i/o error {}", e), diff --git a/secret_store/src/key_server_cluster/signing_session.rs b/secret_store/src/key_server_cluster/signing_session.rs index 73d65f749..822d8228b 100644 --- a/secret_store/src/key_server_cluster/signing_session.rs +++ b/secret_store/src/key_server_cluster/signing_session.rs @@ -63,6 +63,8 @@ struct SessionCore { pub key_share: DocumentKeyShare, /// Cluster which allows this node to send messages to other nodes in the cluster. pub cluster: Arc, + /// Session-level nonce. + pub nonce: u64, /// SessionImpl completion condvar. pub completed: Condvar, } @@ -108,6 +110,8 @@ pub struct SessionParams { pub acl_storage: Arc, /// Cluster pub cluster: Arc, + /// Session nonce. + pub nonce: u64, } /// Signing consensus transport. @@ -116,6 +120,8 @@ struct SigningConsensusTransport { id: SessionId, /// Session access key. access_key: Secret, + /// Session-level nonce. + nonce: u64, /// Cluster. cluster: Arc, } @@ -126,6 +132,8 @@ struct SessionKeyGenerationTransport { access_key: Secret, /// Cluster. cluster: Arc, + /// Session-level nonce. + nonce: u64, /// Other nodes ids. other_nodes_ids: BTreeSet, } @@ -134,8 +142,10 @@ struct SessionKeyGenerationTransport { struct SigningJobTransport { /// Session id. id: SessionId, - //// Session access key. + /// Session access key. access_key: Secret, + /// Session-level nonce. + nonce: u64, /// Cluster. cluster: Arc, } @@ -156,6 +166,7 @@ impl SessionImpl { let consensus_transport = SigningConsensusTransport { id: params.meta.id.clone(), access_key: params.access_key.clone(), + nonce: params.nonce, cluster: params.cluster.clone(), }; @@ -165,6 +176,7 @@ impl SessionImpl { access_key: params.access_key, key_share: params.key_share, cluster: params.cluster, + nonce: params.nonce, completed: Condvar::new(), }, data: Mutex::new(SessionData { @@ -208,8 +220,10 @@ impl SessionImpl { cluster: Arc::new(SessionKeyGenerationTransport { access_key: self.core.access_key.clone(), cluster: self.core.cluster.clone(), + nonce: self.core.nonce, other_nodes_ids: BTreeSet::new() }), + nonce: None, }); generation_session.initialize(Public::default(), 0, vec![self.core.meta.self_node_id.clone()].into_iter().collect())?; @@ -232,6 +246,10 @@ impl SessionImpl { /// Process signing message. pub fn process_message(&self, sender: &NodeId, message: &SigningMessage) -> Result<(), Error> { + if self.core.nonce != message.session_nonce() { + return Err(Error::ReplayProtection); + } + match message { &SigningMessage::SigningConsensusMessage(ref message) => self.on_consensus_message(sender, message), @@ -274,8 +292,10 @@ impl SessionImpl { cluster: Arc::new(SessionKeyGenerationTransport { access_key: self.core.access_key.clone(), cluster: self.core.cluster.clone(), + nonce: self.core.nonce, other_nodes_ids: other_consensus_group_nodes, }), + nonce: None, }); generation_session.initialize(Public::default(), self.core.key_share.threshold, consensus_group)?; data.generation_session = Some(generation_session); @@ -308,8 +328,10 @@ impl SessionImpl { cluster: Arc::new(SessionKeyGenerationTransport { access_key: self.core.access_key.clone(), cluster: self.core.cluster.clone(), + nonce: self.core.nonce, other_nodes_ids: other_consensus_group_nodes }), + nonce: None, }); data.generation_session = Some(generation_session); data.state = SessionState::SessionKeyGeneration; @@ -390,6 +412,7 @@ impl SessionImpl { self.core.cluster.send(&node, Message::Signing(SigningMessage::SigningSessionCompleted(SigningSessionCompleted { session: self.core.meta.id.clone().into(), sub_session: self.core.access_key.clone().into(), + session_nonce: self.core.nonce, })))?; } @@ -490,6 +513,7 @@ impl SessionKeyGenerationTransport { Message::Generation(message) => Ok(Message::Signing(SigningMessage::SigningGenerationMessage(SigningGenerationMessage { session: message.session_id().clone().into(), sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, message: message, }))), _ => Err(Error::InvalidMessage), @@ -517,6 +541,7 @@ impl SessionCore { SigningJobTransport { id: self.meta.id.clone(), access_key: self.access_key.clone(), + nonce: self.nonce, cluster: self.cluster.clone() } } @@ -535,6 +560,7 @@ impl JobTransport for SigningConsensusTransport { self.cluster.send(node, Message::Signing(SigningMessage::SigningConsensusMessage(SigningConsensusMessage { session: self.id.clone().into(), sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { requestor_signature: request.into(), }) @@ -545,6 +571,7 @@ impl JobTransport for SigningConsensusTransport { self.cluster.send(node, Message::Signing(SigningMessage::SigningConsensusMessage(SigningConsensusMessage { session: self.id.clone().into(), sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { is_confirmed: response, }) @@ -560,6 +587,7 @@ impl JobTransport for SigningJobTransport { self.cluster.send(node, Message::Signing(SigningMessage::RequestPartialSignature(RequestPartialSignature { session: self.id.clone().into(), sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, request_id: request.id.into(), message_hash: request.message_hash.into(), nodes: request.other_nodes_ids.into_iter().map(Into::into).collect(), @@ -570,6 +598,7 @@ impl JobTransport for SigningJobTransport { self.cluster.send(node, Message::Signing(SigningMessage::PartialSignature(PartialSignature { session: self.id.clone().into(), sub_session: self.access_key.clone().into(), + session_nonce: self.nonce, request_id: response.request_id.into(), partial_signature: response.partial_signature.into(), }))) @@ -630,6 +659,7 @@ mod tests { key_share: gl_node.key_storage.get(&session_id).unwrap(), acl_storage: acl_storage, cluster: cluster.clone(), + nonce: 0, }, if i == 0 { signature.clone() } else { None }).unwrap(); nodes.insert(gl_node_id.clone(), Node { node_id: gl_node_id.clone(), cluster: cluster, session: session }); } @@ -764,6 +794,7 @@ mod tests { }, acl_storage: Arc::new(DummyAclStorage::default()), cluster: Arc::new(DummyCluster::new(self_node_id.clone())), + nonce: 0, }, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) { Ok(_) => (), _ => panic!("unexpected"), @@ -794,6 +825,7 @@ mod tests { }, acl_storage: Arc::new(DummyAclStorage::default()), cluster: Arc::new(DummyCluster::new(self_node_id.clone())), + nonce: 0, }, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) { Err(Error::InvalidNodesConfiguration) => (), _ => panic!("unexpected"), @@ -824,6 +856,7 @@ mod tests { }, acl_storage: Arc::new(DummyAclStorage::default()), cluster: Arc::new(DummyCluster::new(self_node_id.clone())), + nonce: 0, }, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) { Err(Error::InvalidThreshold) => (), _ => panic!("unexpected"), @@ -862,6 +895,7 @@ mod tests { assert_eq!(sl.master().on_consensus_message(sl.nodes.keys().nth(1).unwrap(), &SigningConsensusMessage { session: SessionId::default().into(), sub_session: sl.master().core.access_key.clone().into(), + session_nonce: 0, message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { is_confirmed: true, }), @@ -874,8 +908,10 @@ mod tests { assert_eq!(sl.master().on_generation_message(sl.nodes.keys().nth(1).unwrap(), &SigningGenerationMessage { session: SessionId::default().into(), sub_session: sl.master().core.access_key.clone().into(), + session_nonce: 0, message: GenerationMessage::ConfirmInitialization(ConfirmInitialization { session: SessionId::default().into(), + session_nonce: 0, derived_point: Public::default().into(), }), }), Err(Error::InvalidStateForRequest)); @@ -893,8 +929,10 @@ mod tests { assert_eq!(slave1.on_generation_message(&slave2_id, &SigningGenerationMessage { session: SessionId::default().into(), sub_session: sl.master().core.access_key.clone().into(), + session_nonce: 0, message: GenerationMessage::InitializeSession(InitializeSession { session: SessionId::default().into(), + session_nonce: 0, author: Public::default().into(), nodes: BTreeMap::new(), threshold: 1, @@ -910,6 +948,7 @@ mod tests { assert_eq!(slave1.on_partial_signature_requested(sl.nodes.keys().nth(0).unwrap(), &RequestPartialSignature { session: SessionId::default().into(), sub_session: sl.master().core.access_key.clone().into(), + session_nonce: 0, request_id: Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap().into(), message_hash: H256::default().into(), nodes: Default::default(), @@ -922,6 +961,7 @@ mod tests { assert_eq!(sl.master().on_partial_signature_requested(sl.nodes.keys().nth(1).unwrap(), &RequestPartialSignature { session: SessionId::default().into(), sub_session: sl.master().core.access_key.clone().into(), + session_nonce: 0, request_id: Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap().into(), message_hash: H256::default().into(), nodes: Default::default(), @@ -983,4 +1023,19 @@ mod tests { _ => unreachable!(), } } + + #[test] + fn signing_message_fails_when_nonce_is_wrong() { + let (_, sl) = prepare_signing_sessions(1, 3); + assert_eq!(sl.master().process_message(sl.nodes.keys().nth(1).unwrap(), &SigningMessage::SigningGenerationMessage(SigningGenerationMessage { + session: SessionId::default().into(), + sub_session: sl.master().core.access_key.clone().into(), + session_nonce: 10, + message: GenerationMessage::ConfirmInitialization(ConfirmInitialization { + session: SessionId::default().into(), + session_nonce: 0, + derived_point: Public::default().into(), + }), + })), Err(Error::ReplayProtection)); + } }