From 6334893561ca2a4a1a84a88ed573cef1d3ea6dd8 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 6 Jul 2017 15:02:10 +0300 Subject: [PATCH] SecretStore: generating signatures (#5764) * refactoring traits * separate generation session * generalized ClusterSessions * signing session prototype * full_signature_math_session * consensus session prototype * continue signing session * continue signing session * continue signing session * continue signing session * isolated consensus logic * started work on signing test * complete_gen_sign_session works * consensus tests * get rid of duplicated data in SigningSession * TODOs in signing session * fixing tests * fixed last test * signing session in http listener * new key server tests * fix after merge * enabled warnings * fixed possible race * ignore previous jobs responses * include sef node in consensus when confirmed * fixed warning * removed extra clones * consensus_restarts_after_node_timeout * encrypt signature before return * return error text along with HTTP status * fix for odd-of-N (share check fails + not equal to local sign) * fixed t-of-N for odd t * fixed test cases in complete_gen_sign_session * fixed mistimed response reaction * jobs draft * DecryptionJob * consensus session tets * fixed decryption tests * signing job implementation * siginng_session using new consensus_session * added license preambles * same_consensus_group_returned_after_second_selection * database upgrade v0 -> v1 * typo * fixed grumbles --- ethkey/src/secret.rs | 9 + secret_store/src/acl_storage.rs | 14 +- secret_store/src/http_listener.rs | 237 +++- secret_store/src/key_server.rs | 177 ++- .../src/key_server_cluster/cluster.rs | 707 ++++------ .../key_server_cluster/cluster_sessions.rs | 506 +++++++ .../key_server_cluster/decryption_session.rs | 1103 ++++++--------- .../key_server_cluster/encryption_session.rs | 1172 ++------------- .../key_server_cluster/generation_session.rs | 1256 +++++++++++++++++ .../src/key_server_cluster/io/message.rs | 75 +- .../jobs/consensus_session.rs | 756 ++++++++++ .../key_server_cluster/jobs/decryption_job.rs | 162 +++ .../key_server_cluster/jobs/job_session.rs | 536 +++++++ .../key_server_cluster/jobs/key_access_job.rs | 73 + .../src/key_server_cluster/jobs/mod.rs | 21 + .../key_server_cluster/jobs/signing_job.rs | 145 ++ secret_store/src/key_server_cluster/math.rs | 359 +++-- .../src/key_server_cluster/message.rs | 308 +++- secret_store/src/key_server_cluster/mod.rs | 40 +- .../src/key_server_cluster/signing_session.rs | 706 +++++++++ secret_store/src/key_storage.rs | 184 ++- secret_store/src/lib.rs | 2 +- secret_store/src/serialization.rs | 5 +- secret_store/src/traits.rs | 66 +- secret_store/src/types/all.rs | 16 +- 25 files changed, 6164 insertions(+), 2471 deletions(-) create mode 100644 secret_store/src/key_server_cluster/cluster_sessions.rs create mode 100644 secret_store/src/key_server_cluster/generation_session.rs create mode 100644 secret_store/src/key_server_cluster/jobs/consensus_session.rs create mode 100644 secret_store/src/key_server_cluster/jobs/decryption_job.rs create mode 100644 secret_store/src/key_server_cluster/jobs/job_session.rs create mode 100644 secret_store/src/key_server_cluster/jobs/key_access_job.rs create mode 100644 secret_store/src/key_server_cluster/jobs/mod.rs create mode 100644 secret_store/src/key_server_cluster/jobs/signing_job.rs create mode 100644 secret_store/src/key_server_cluster/signing_session.rs diff --git a/ethkey/src/secret.rs b/ethkey/src/secret.rs index de35d6b04..982962684 100644 --- a/ethkey/src/secret.rs +++ b/ethkey/src/secret.rs @@ -92,6 +92,15 @@ impl Secret { Ok(()) } + /// Inplace negate secret key (-scalar) + pub fn neg(&mut self) -> Result<(), Error> { + let mut key_secret = self.to_secp256k1_secret()?; + key_secret.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; + + *self = key_secret.into(); + Ok(()) + } + /// Inplace inverse secret key (1 / scalar) pub fn inv(&mut self) -> Result<(), Error> { let mut key_secret = self.to_secp256k1_secret()?; diff --git a/secret_store/src/acl_storage.rs b/secret_store/src/acl_storage.rs index fea45c920..816d100dc 100644 --- a/secret_store/src/acl_storage.rs +++ b/secret_store/src/acl_storage.rs @@ -20,14 +20,14 @@ use parking_lot::Mutex; use ethkey::public_to_address; use ethcore::client::{Client, BlockChainClient, BlockId}; use native_contracts::SecretStoreAclStorage; -use types::all::{Error, DocumentAddress, Public}; +use types::all::{Error, ServerKeyId, Public}; const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker"; /// ACL storage of Secret Store pub trait AclStorage: Send + Sync { /// Check if requestor with `public` key can access document with hash `document` - fn check(&self, public: &Public, document: &DocumentAddress) -> Result; + fn check(&self, public: &Public, document: &ServerKeyId) -> Result; } /// On-chain ACL storage implementation. @@ -48,7 +48,7 @@ impl OnChainAclStorage { } impl AclStorage for OnChainAclStorage { - fn check(&self, public: &Public, document: &DocumentAddress) -> Result { + fn check(&self, public: &Public, document: &ServerKeyId) -> Result { let mut contract = self.contract.lock(); if !contract.is_some() { *contract = self.client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned()) @@ -74,19 +74,19 @@ impl AclStorage for OnChainAclStorage { pub mod tests { use std::collections::{HashMap, HashSet}; use parking_lot::RwLock; - use types::all::{Error, DocumentAddress, Public}; + use types::all::{Error, ServerKeyId, Public}; use super::AclStorage; #[derive(Default, Debug)] /// Dummy ACL storage implementation pub struct DummyAclStorage { - prohibited: RwLock>>, + prohibited: RwLock>>, } impl DummyAclStorage { #[cfg(test)] /// Prohibit given requestor access to given document - pub fn prohibit(&self, public: Public, document: DocumentAddress) { + pub fn prohibit(&self, public: Public, document: ServerKeyId) { self.prohibited.write() .entry(public) .or_insert_with(Default::default) @@ -95,7 +95,7 @@ pub mod tests { } impl AclStorage for DummyAclStorage { - fn check(&self, public: &Public, document: &DocumentAddress) -> Result { + fn check(&self, public: &Public, document: &ServerKeyId) -> Result { Ok(self.prohibited.read() .get(public) .map(|docs| !docs.contains(document)) diff --git a/secret_store/src/http_listener.rs b/secret_store/src/http_listener.rs index bc51811de..1f7f14ede 100644 --- a/secret_store/src/http_listener.rs +++ b/secret_store/src/http_listener.rs @@ -21,14 +21,23 @@ use hyper::method::Method as HttpMethod; use hyper::status::StatusCode as HttpStatusCode; use hyper::server::{Server as HttpServer, Request as HttpRequest, Response as HttpResponse, Handler as HttpHandler, Listening as HttpListening}; +use serde::Serialize; use serde_json; use url::percent_encoding::percent_decode; -use traits::KeyServer; -use serialization::{SerializableDocumentEncryptedKeyShadow, SerializableBytes}; -use types::all::{Error, NodeAddress, RequestSignature, DocumentAddress, DocumentEncryptedKey, DocumentEncryptedKeyShadow}; +use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer}; +use serialization::{SerializableEncryptedDocumentKeyShadow, SerializableBytes, SerializablePublic}; +use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, NodeAddress, RequestSignature, ServerKeyId, + EncryptedDocumentKey, EncryptedDocumentKeyShadow}; + +/// Key server http-requests listener. Available requests: +/// To generate server key: POST /shadow/{server_key_id}/{signature}/{threshold} +/// To store pregenerated encrypted document key: POST /shadow/{server_key_id}/{signature}/{common_point}/{encrypted_key} +/// To generate server && document key: POST /{server_key_id}/{signature}/{threshold} +/// To get document key: GET /{server_key_id}/{signature} +/// To get document key shadow: GET /shadow/{server_key_id}/{signature} +/// To sign message with server key: GET /{server_key_id}/{signature}/{message_hash} -/// Key server http-requests listener pub struct KeyServerHttpListener { _http_server: HttpListening, handler: Arc>, @@ -39,12 +48,18 @@ pub struct KeyServerHttpListener { enum Request { /// Invalid request Invalid, + /// Generate server key. + GenerateServerKey(ServerKeyId, RequestSignature, usize), + /// Store document key. + StoreDocumentKey(ServerKeyId, RequestSignature, Public, Public), /// Generate encryption key. - GenerateDocumentKey(DocumentAddress, RequestSignature, usize), + GenerateDocumentKey(ServerKeyId, RequestSignature, usize), /// Request encryption key of given document for given requestor. - GetDocumentKey(DocumentAddress, RequestSignature), + GetDocumentKey(ServerKeyId, RequestSignature), /// Request shadow of encryption key of given document for given requestor. - GetDocumentKeyShadow(DocumentAddress, RequestSignature), + GetDocumentKeyShadow(ServerKeyId, RequestSignature), + /// Sign message. + SignMessage(ServerKeyId, RequestSignature, MessageHash), } /// Cloneable http handler @@ -78,17 +93,35 @@ impl KeyServerHttpListener where T: KeyServer + 'static { } } -impl KeyServer for KeyServerHttpListener where T: KeyServer + 'static { - fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result { - self.handler.key_server.generate_document_key(signature, document, threshold) +impl KeyServer for KeyServerHttpListener where T: KeyServer + 'static {} + +impl ServerKeyGenerator for KeyServerHttpListener where T: KeyServer + 'static { + fn generate_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result { + self.handler.key_server.generate_key(key_id, signature, threshold) + } +} + +impl DocumentKeyServer for KeyServerHttpListener where T: KeyServer + 'static { + fn store_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> { + self.handler.key_server.store_document_key(key_id, signature, common_point, encrypted_document_key) } - fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result { - self.handler.key_server.document_key(signature, document) + fn generate_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result { + self.handler.key_server.generate_document_key(key_id, signature, threshold) } - fn document_key_shadow(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result { - self.handler.key_server.document_key_shadow(signature, document) + fn restore_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result { + self.handler.key_server.restore_document_key(key_id, signature) + } + + fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result { + self.handler.key_server.restore_document_key_shadow(key_id, signature) + } +} + +impl MessageSigner for KeyServerHttpListener where T: KeyServer + 'static { + fn sign_message(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result { + self.handler.key_server.sign_message(key_id, signature, message) } } @@ -111,47 +144,47 @@ impl HttpHandler for KeyServerHttpHandler where T: KeyServer + 'static { let req_uri = req.uri.clone(); match &req_uri { &RequestUri::AbsolutePath(ref path) => match parse_request(&req_method, &path) { + Request::GenerateServerKey(document, signature, threshold) => { + return_server_public_key(req, res, self.handler.key_server.generate_key(&document, &signature, threshold) + .map_err(|err| { + warn!(target: "secretstore", "GenerateServerKey request {} has failed with: {}", req_uri, err); + err + })); + }, + Request::StoreDocumentKey(document, signature, common_point, encrypted_document_key) => { + return_empty(req, res, self.handler.key_server.store_document_key(&document, &signature, common_point, encrypted_document_key) + .map_err(|err| { + warn!(target: "secretstore", "StoreDocumentKey request {} has failed with: {}", req_uri, err); + err + })); + }, Request::GenerateDocumentKey(document, signature, threshold) => { - return_document_key(req, res, self.handler.key_server.generate_document_key(&signature, &document, threshold) + return_document_key(req, res, self.handler.key_server.generate_document_key(&document, &signature, threshold) .map_err(|err| { warn!(target: "secretstore", "GenerateDocumentKey request {} has failed with: {}", req_uri, err); err })); }, Request::GetDocumentKey(document, signature) => { - return_document_key(req, res, self.handler.key_server.document_key(&signature, &document) + return_document_key(req, res, self.handler.key_server.restore_document_key(&document, &signature) .map_err(|err| { warn!(target: "secretstore", "GetDocumentKey request {} has failed with: {}", req_uri, err); err })); }, Request::GetDocumentKeyShadow(document, signature) => { - match self.handler.key_server.document_key_shadow(&signature, &document) + return_document_key_shadow(req, res, self.handler.key_server.restore_document_key_shadow(&document, &signature) .map_err(|err| { warn!(target: "secretstore", "GetDocumentKeyShadow request {} has failed with: {}", req_uri, err); err - }) { - Ok(document_key_shadow) => { - let document_key_shadow = SerializableDocumentEncryptedKeyShadow { - decrypted_secret: document_key_shadow.decrypted_secret.into(), - common_point: document_key_shadow.common_point.expect("always filled when requesting document_key_shadow; qed").into(), - decrypt_shadows: document_key_shadow.decrypt_shadows.expect("always filled when requesting document_key_shadow; qed").into_iter().map(Into::into).collect(), - }; - match serde_json::to_vec(&document_key_shadow) { - Ok(document_key) => { - res.headers_mut().set(header::ContentType::json()); - if let Err(err) = res.send(&document_key) { - // nothing to do, but to log an error - warn!(target: "secretstore", "response to request {} has failed with: {}", req.uri, err); - } - }, - Err(err) => { - warn!(target: "secretstore", "response to request {} has failed with: {}", req.uri, err); - } - } - }, - Err(err) => return_error(res, err), - } + })); + }, + Request::SignMessage(document, signature, message_hash) => { + return_message_signature(req, res, self.handler.key_server.sign_message(&document, &signature, message_hash) + .map_err(|err| { + warn!(target: "secretstore", "SignMessage request {} has failed with: {}", req_uri, err); + err + })); }, Request::Invalid => { warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri); @@ -166,17 +199,45 @@ impl HttpHandler for KeyServerHttpHandler where T: KeyServer + 'static { } } -fn return_document_key(req: HttpRequest, mut res: HttpResponse, document_key: Result) { - let document_key = document_key. - and_then(|k| serde_json::to_vec(&SerializableBytes(k)).map_err(|e| Error::Serde(e.to_string()))); - match document_key { - Ok(document_key) => { - res.headers_mut().set(header::ContentType::plaintext()); - if let Err(err) = res.send(&document_key) { - // nothing to do, but to log an error +fn return_empty(req: HttpRequest, res: HttpResponse, empty: Result<(), Error>) { + return_bytes::(req, res, empty.map(|_| None)) +} + +fn return_server_public_key(req: HttpRequest, res: HttpResponse, server_public: Result) { + return_bytes(req, res, server_public.map(|k| Some(SerializablePublic(k)))) +} + +fn return_message_signature(req: HttpRequest, res: HttpResponse, signature: Result) { + return_bytes(req, res, signature.map(|s| Some(SerializableBytes(s)))) +} + +fn return_document_key(req: HttpRequest, res: HttpResponse, document_key: Result) { + return_bytes(req, res, document_key.map(|k| Some(SerializableBytes(k)))) +} + +fn return_document_key_shadow(req: HttpRequest, res: HttpResponse, document_key_shadow: Result) { + return_bytes(req, res, document_key_shadow.map(|k| Some(SerializableEncryptedDocumentKeyShadow { + decrypted_secret: k.decrypted_secret.into(), + common_point: k.common_point.expect("always filled when requesting document_key_shadow; qed").into(), + decrypt_shadows: k.decrypt_shadows.expect("always filled when requesting document_key_shadow; qed").into_iter().map(Into::into).collect(), + }))) +} + +fn return_bytes(req: HttpRequest, mut res: HttpResponse, result: Result, Error>) { + match result { + Ok(Some(result)) => match serde_json::to_vec(&result) { + Ok(result) => { + res.headers_mut().set(header::ContentType::json()); + if let Err(err) = res.send(&result) { + // nothing to do, but to log an error + warn!(target: "secretstore", "response to request {} has failed with: {}", req.uri, err); + } + }, + Err(err) => { warn!(target: "secretstore", "response to request {} has failed with: {}", req.uri, err); } }, + Ok(None) => *res.status_mut() = HttpStatusCode::Ok, Err(err) => return_error(res, err), } } @@ -190,6 +251,13 @@ fn return_error(mut res: HttpResponse, err: Error) { Error::Database(_) => *res.status_mut() = HttpStatusCode::InternalServerError, Error::Internal(_) => *res.status_mut() = HttpStatusCode::InternalServerError, } + + // return error text. ignore errors when returning error + let error_text = format!("\"{}\"", err); + if let Ok(error_text) = serde_json::to_vec(&error_text) { + res.headers_mut().set(header::ContentType::json()); + let _ = res.send(&error_text); + } } fn parse_request(method: &HttpMethod, uri_path: &str) -> Request { @@ -202,24 +270,39 @@ fn parse_request(method: &HttpMethod, uri_path: &str) -> Request { if path.len() == 0 { return Request::Invalid; } - let (args_prefix, args_offset) = if &path[0] == "shadow" { - ("shadow", 1) - } else { - ("", 0) - }; - if path.len() < 2 + args_offset || path[args_offset].is_empty() || path[args_offset + 1].is_empty() { + let (is_shadow_request, args_offset) = if &path[0] == "shadow" { (true, 1) } else { (false, 0) }; + let args_count = path.len() - args_offset; + if args_count < 2 || path[args_offset].is_empty() || path[args_offset + 1].is_empty() { return Request::Invalid; } - let args_len = path.len(); - let document = path[args_offset].parse(); - let signature = path[args_offset + 1].parse(); - let threshold = (if args_len > args_offset + 2 { &path[args_offset + 2] } else { "" }).parse(); - match (args_prefix, args_len, method, document, signature, threshold) { - ("", 3, &HttpMethod::Post, Ok(document), Ok(signature), Ok(threshold)) => Request::GenerateDocumentKey(document, signature, threshold), - ("", 2, &HttpMethod::Get, Ok(document), Ok(signature), _) => Request::GetDocumentKey(document, signature), - ("shadow", 3, &HttpMethod::Get, Ok(document), Ok(signature), _) => Request::GetDocumentKeyShadow(document, signature), + let document = match path[args_offset].parse() { + Ok(document) => document, + _ => return Request::Invalid, + }; + let signature = match path[args_offset + 1].parse() { + Ok(signature) => signature, + _ => return Request::Invalid, + }; + + let threshold = path.get(args_offset + 2).map(|v| v.parse()); + let message_hash = path.get(args_offset + 2).map(|v| v.parse()); + let common_point = path.get(args_offset + 2).map(|v| v.parse()); + let encrypted_key = path.get(args_offset + 3).map(|v| v.parse()); + match (is_shadow_request, args_count, method, threshold, message_hash, common_point, encrypted_key) { + (true, 3, &HttpMethod::Post, Some(Ok(threshold)), _, _, _) => + Request::GenerateServerKey(document, signature, threshold), + (true, 4, &HttpMethod::Post, _, _, Some(Ok(common_point)), Some(Ok(encrypted_key))) => + Request::StoreDocumentKey(document, signature, common_point, encrypted_key), + (false, 3, &HttpMethod::Post, Some(Ok(threshold)), _, _, _) => + Request::GenerateDocumentKey(document, signature, threshold), + (false, 2, &HttpMethod::Get, _, _, _, _) => + Request::GetDocumentKey(document, signature), + (true, 2, &HttpMethod::Get, _, _, _, _) => + Request::GetDocumentKeyShadow(document, signature), + (false, 3, &HttpMethod::Get, _, Some(Ok(message_hash)), _, _) => + Request::SignMessage(document, signature, message_hash), _ => Request::Invalid, } } @@ -241,19 +324,49 @@ mod tests { #[test] fn parse_request_successful() { + // POST /shadow/{server_key_id}/{signature}/{threshold} => generate server key + assert_eq!(parse_request(&HttpMethod::Post, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2"), + Request::GenerateServerKey("0000000000000000000000000000000000000000000000000000000000000001".into(), + "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), + 2)); + // POST /shadow/{server_key_id}/{signature}/{common_point}/{encrypted_key} => store encrypted document key + assert_eq!(parse_request(&HttpMethod::Post, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8/1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb"), + Request::StoreDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), + "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), + "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse().unwrap(), + "1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb".parse().unwrap())); + // POST /{server_key_id}/{signature}/{threshold} => generate server && document key + assert_eq!(parse_request(&HttpMethod::Post, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2"), + Request::GenerateDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), + "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), + 2)); + // GET /{server_key_id}/{signature} => get document key assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); assert_eq!(parse_request(&HttpMethod::Get, "/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); + // GET /shadow/{server_key_id}/{signature} => get document key shadow + assert_eq!(parse_request(&HttpMethod::Get, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), + Request::GetDocumentKeyShadow("0000000000000000000000000000000000000000000000000000000000000001".into(), + "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); + // GET /{server_key_id}/{signature}/{message_hash} => sign message with server key + assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c"), + Request::SignMessage("0000000000000000000000000000000000000000000000000000000000000001".into(), + "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(), + "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap())); } #[test] fn parse_request_failed() { + assert_eq!(parse_request(&HttpMethod::Get, ""), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "/shadow"), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "///2"), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "/shadow///2"), Request::Invalid); assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001"), Request::Invalid); assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/"), Request::Invalid); assert_eq!(parse_request(&HttpMethod::Get, "/a/b"), Request::Invalid); - assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002"), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002"), Request::Invalid); } } diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index f960c5cc5..fd4e154fa 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -24,9 +24,10 @@ use ethcrypto; use ethkey; use super::acl_storage::AclStorage; use super::key_storage::KeyStorage; -use key_server_cluster::ClusterCore; -use traits::KeyServer; -use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey, DocumentEncryptedKeyShadow, ClusterConfiguration}; +use key_server_cluster::{math, ClusterCore}; +use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer}; +use types::all::{Error, Public, RequestSignature, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow, + ClusterConfiguration, MessageHash, EncryptedMessageSignature}; use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration}; /// Secret store key server implementation @@ -56,15 +57,41 @@ impl KeyServerImpl { } } -impl KeyServer for KeyServerImpl { - fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result { +impl KeyServer for KeyServerImpl {} + +impl ServerKeyGenerator for KeyServerImpl { + fn generate_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result { // recover requestor' public key from signature - let public = ethkey::recover(signature, document) + let public = ethkey::recover(signature, key_id) .map_err(|_| Error::BadSignature)?; - // generate document key - let encryption_session = self.data.lock().cluster.new_encryption_session(document.clone(), threshold)?; - let document_key = encryption_session.wait(None)?; + // generate server key + let generation_session = self.data.lock().cluster.new_generation_session(key_id.clone(), public, threshold)?; + generation_session.wait(None).map_err(Into::into) + } +} + +impl DocumentKeyServer for KeyServerImpl { + fn store_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> { + // store encrypted key + let encryption_session = self.data.lock().cluster.new_encryption_session(key_id.clone(), signature.clone(), common_point, encrypted_document_key)?; + encryption_session.wait(None).map_err(Into::into) + } + + fn generate_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result { + // recover requestor' public key from signature + let public = ethkey::recover(signature, key_id) + .map_err(|_| Error::BadSignature)?; + + // generate server key + let server_key = self.generate_key(key_id, signature, threshold)?; + + // generate random document key + let document_key = math::generate_random_point()?; + let encrypted_document_key = math::encrypt_secret(&document_key, &server_key)?; + + // store document key in the storage + self.store_document_key(key_id, signature, encrypted_document_key.common_point, encrypted_document_key.encrypted_point)?; // encrypt document key with requestor public key let document_key = ethcrypto::ecies::encrypt(&public, ðcrypto::DEFAULT_MAC, &document_key) @@ -72,14 +99,13 @@ impl KeyServer for KeyServerImpl { Ok(document_key) } - fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result { + fn restore_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result { // recover requestor' public key from signature - let public = ethkey::recover(signature, document) + let public = ethkey::recover(signature, key_id) .map_err(|_| Error::BadSignature)?; - // decrypt document key - let decryption_session = self.data.lock().cluster.new_decryption_session(document.clone(), signature.clone(), false)?; + let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), signature.clone(), false)?; let document_key = decryption_session.wait()?.decrypted_secret; // encrypt document key with requestor public key @@ -88,12 +114,34 @@ impl KeyServer for KeyServerImpl { Ok(document_key) } - fn document_key_shadow(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result { - let decryption_session = self.data.lock().cluster.new_decryption_session(document.clone(), signature.clone(), true)?; + fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result { + let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), signature.clone(), true)?; decryption_session.wait().map_err(Into::into) } } +impl MessageSigner for KeyServerImpl { + fn sign_message(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result { + // recover requestor' public key from signature + let public = ethkey::recover(signature, key_id) + .map_err(|_| Error::BadSignature)?; + + // sign message + let signing_session = self.data.lock().cluster.new_signing_session(key_id.clone(), signature.clone(), message)?; + let message_signature = signing_session.wait()?; + + // compose two message signature components into single one + let mut combined_signature = [0; 64]; + combined_signature[..32].clone_from_slice(&**message_signature.0); + combined_signature[32..].clone_from_slice(&**message_signature.1); + + // encrypt combined signature with requestor public key + let message_signature = ethcrypto::ecies::encrypt(&public, ðcrypto::DEFAULT_MAC, &combined_signature) + .map_err(|err| Error::Internal(format!("Error encrypting message signature: {}", err)))?; + Ok(message_signature) + } +} + impl KeyServerCore { pub fn new(config: &ClusterConfiguration, acl_storage: Arc, key_storage: Arc) -> Result { let config = NetClusterConfiguration { @@ -146,24 +194,46 @@ pub mod tests { use std::time; use std::sync::Arc; use ethcrypto; - use ethkey::{self, Random, Generator}; + use ethkey::{self, Secret, Random, Generator}; use acl_storage::tests::DummyAclStorage; use key_storage::tests::DummyKeyStorage; - use types::all::{Error, ClusterConfiguration, NodeAddress, RequestSignature, DocumentAddress, DocumentEncryptedKey, DocumentEncryptedKeyShadow}; - use super::{KeyServer, KeyServerImpl}; + use key_server_cluster::math; + use util::H256; + use types::all::{Error, Public, ClusterConfiguration, NodeAddress, RequestSignature, ServerKeyId, + EncryptedDocumentKey, EncryptedDocumentKeyShadow, MessageHash, EncryptedMessageSignature}; + use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer}; + use super::KeyServerImpl; pub struct DummyKeyServer; - impl KeyServer for DummyKeyServer { - fn generate_document_key(&self, _signature: &RequestSignature, _document: &DocumentAddress, _threshold: usize) -> Result { + impl KeyServer for DummyKeyServer {} + + impl ServerKeyGenerator for DummyKeyServer { + fn generate_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _threshold: usize) -> Result { + unimplemented!() + } + } + + impl DocumentKeyServer for DummyKeyServer { + fn store_document_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _common_point: Public, _encrypted_document_key: Public) -> Result<(), Error> { unimplemented!() } - fn document_key(&self, _signature: &RequestSignature, _document: &DocumentAddress) -> Result { + fn generate_document_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _threshold: usize) -> Result { unimplemented!() } - fn document_key_shadow(&self, _signature: &RequestSignature, _document: &DocumentAddress) -> Result { + fn restore_document_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature) -> Result { + unimplemented!() + } + + fn restore_document_key_shadow(&self, _key_id: &ServerKeyId, _signature: &RequestSignature) -> Result { + unimplemented!() + } + } + + impl MessageSigner for DummyKeyServer { + fn sign_message(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _message: MessageHash) -> Result { unimplemented!() } } @@ -228,12 +298,12 @@ pub mod tests { let document = Random.generate().unwrap().secret().clone(); let secret = Random.generate().unwrap().secret().clone(); let signature = ethkey::sign(&secret, &document).unwrap(); - let generated_key = key_servers[0].generate_document_key(&signature, &document, threshold).unwrap(); + let generated_key = key_servers[0].generate_document_key(&document, &signature, threshold).unwrap(); let generated_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &generated_key).unwrap(); // now let's try to retrieve key back for key_server in key_servers.iter() { - let retrieved_key = key_server.document_key(&signature, &document).unwrap(); + let retrieved_key = key_server.restore_document_key(&document, &signature).unwrap(); let retrieved_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap(); assert_eq!(retrieved_key, generated_key); } @@ -250,15 +320,70 @@ pub mod tests { let document = Random.generate().unwrap().secret().clone(); let secret = Random.generate().unwrap().secret().clone(); let signature = ethkey::sign(&secret, &document).unwrap(); - let generated_key = key_servers[0].generate_document_key(&signature, &document, *threshold).unwrap(); + let generated_key = key_servers[0].generate_document_key(&document, &signature, *threshold).unwrap(); let generated_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &generated_key).unwrap(); // now let's try to retrieve key back for key_server in key_servers.iter() { - let retrieved_key = key_server.document_key(&signature, &document).unwrap(); + let retrieved_key = key_server.restore_document_key(&document, &signature).unwrap(); let retrieved_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap(); assert_eq!(retrieved_key, generated_key); } } } + + #[test] + fn server_key_generation_and_storing_document_key_works_over_network_with_3_nodes() { + //::logger::init_log(); + let key_servers = make_key_servers(6090, 3); + + let test_cases = [0, 1, 2]; + for threshold in &test_cases { + // generate server key + let server_key_id = Random.generate().unwrap().secret().clone(); + let requestor_secret = Random.generate().unwrap().secret().clone(); + let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); + let server_public = key_servers[0].generate_key(&server_key_id, &signature, *threshold).unwrap(); + + // generate document key (this is done by KS client so that document key is unknown to any KS) + let generated_key = Random.generate().unwrap().public().clone(); + let encrypted_document_key = math::encrypt_secret(&generated_key, &server_public).unwrap(); + + // store document key + key_servers[0].store_document_key(&server_key_id, &signature, encrypted_document_key.common_point, encrypted_document_key.encrypted_point).unwrap(); + + // now let's try to retrieve key back + for key_server in key_servers.iter() { + let retrieved_key = key_server.restore_document_key(&server_key_id, &signature).unwrap(); + let retrieved_key = ethcrypto::ecies::decrypt(&requestor_secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap(); + let retrieved_key = Public::from_slice(&retrieved_key); + assert_eq!(retrieved_key, generated_key); + } + } + } + + #[test] + fn server_key_generation_and_message_signing_works_over_network_with_3_nodes() { + //::logger::init_log(); + let key_servers = make_key_servers(6100, 3); + + let test_cases = [0, 1, 2]; + for threshold in &test_cases { + // generate server key + let server_key_id = Random.generate().unwrap().secret().clone(); + let requestor_secret = Random.generate().unwrap().secret().clone(); + let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); + let server_public = key_servers[0].generate_key(&server_key_id, &signature, *threshold).unwrap(); + + // sign message + let message_hash = H256::from(42); + let combined_signature = key_servers[0].sign_message(&server_key_id, &signature, message_hash.clone()).unwrap(); + let combined_signature = ethcrypto::ecies::decrypt(&requestor_secret, ðcrypto::DEFAULT_MAC, &combined_signature).unwrap(); + let signature_c = Secret::from_slice(&combined_signature[..32]); + let signature_s = Secret::from_slice(&combined_signature[32..]); + + // check signature + assert_eq!(math::verify_signature(&server_public, &(signature_c, signature_s), &message_hash), Ok(true)); + } + } } diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 209126605..c86f30267 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -16,9 +16,8 @@ use std::io; use std::time; -use std::sync::{Arc, Weak}; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::collections::{BTreeMap, BTreeSet, VecDeque}; +use std::sync::Arc; +use std::collections::{BTreeMap, BTreeSet}; use std::collections::btree_map::Entry; use std::net::{SocketAddr, IpAddr}; use futures::{finished, failed, Future, Stream, BoxFuture}; @@ -27,13 +26,19 @@ use parking_lot::{RwLock, Mutex}; use tokio_io::IoFuture; use tokio_core::reactor::{Handle, Remote, Interval}; use tokio_core::net::{TcpListener, TcpStream}; -use ethkey::{Public, Secret, KeyPair, Signature, Random, Generator}; -use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, DocumentEncryptedKeyShadow}; -use key_server_cluster::message::{self, Message, ClusterMessage, EncryptionMessage, DecryptionMessage}; -use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl, SessionState as DecryptionSessionState, - SessionParams as DecryptionSessionParams, Session as DecryptionSession, DecryptionSessionId}; -use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl, SessionState as EncryptionSessionState, - SessionParams as EncryptionSessionParams, Session as EncryptionSession}; +use ethkey::{Public, KeyPair, Signature, Random, Generator}; +use util::H256; +use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage}; +use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, GenerationSessionWrapper, EncryptionSessionWrapper, + DecryptionSessionWrapper, SigningSessionWrapper}; +use key_server_cluster::message::{self, Message, ClusterMessage, GenerationMessage, EncryptionMessage, DecryptionMessage, + SigningMessage, ConsensusMessage}; +use key_server_cluster::generation_session::{Session as GenerationSession, SessionState as GenerationSessionState}; +#[cfg(test)] +use key_server_cluster::generation_session::SessionImpl as GenerationSessionImpl; +use key_server_cluster::decryption_session::{Session as DecryptionSession, DecryptionSessionId}; +use key_server_cluster::encryption_session::{Session as EncryptionSession, SessionState as EncryptionSessionState}; +use key_server_cluster::signing_session::{Session as SigningSession, SigningSessionId}; use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, read_encrypted_message, WriteMessage, write_encrypted_message}; use key_server_cluster::net::{accept_connection as net_accept_connection, connect as net_connect, Connection as NetConnection}; @@ -50,18 +55,6 @@ const KEEP_ALIVE_SEND_INTERVAL: u64 = 30; /// we must treat this node as non-responding && disconnect from it. const KEEP_ALIVE_DISCONNECT_INTERVAL: u64 = 60; -/// When there are no encryption session-related messages for ENCRYPTION_SESSION_TIMEOUT_INTERVAL seconds, -/// we must treat this session as stalled && finish it with an error. -/// This timeout is for cases when node is responding to KeepAlive messages, but intentionally ignores -/// session messages. -const ENCRYPTION_SESSION_TIMEOUT_INTERVAL: u64 = 60; - -/// When there are no decryption session-related messages for DECRYPTION_SESSION_TIMEOUT_INTERVAL seconds, -/// we must treat this session as stalled && finish it with an error. -/// This timeout is for cases when node is responding to KeepAlive messages, but intentionally ignores -/// session messages. -const DECRYPTION_SESSION_TIMEOUT_INTERVAL: u64 = 60; - /// Encryption sesion timeout interval. It works /// Empty future. type BoxedEmptyFuture = BoxFuture<(), ()>; @@ -70,23 +63,27 @@ type BoxedEmptyFuture = BoxFuture<(), ()>; pub trait ClusterClient: Send + Sync { /// Get cluster state. fn cluster_state(&self) -> ClusterState; + /// Start new generation session. + fn new_generation_session(&self, session_id: SessionId, author: Public, threshold: usize) -> Result, Error>; /// Start new encryption session. - fn new_encryption_session(&self, session_id: SessionId, threshold: usize) -> Result, Error>; + fn new_encryption_session(&self, session_id: SessionId, requestor_signature: Signature, common_point: Public, encrypted_point: Public) -> Result, Error>; /// Start new decryption session. fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature, is_shadow_decryption: bool) -> Result, Error>; + /// Start new signing session. + fn new_signing_session(&self, session_id: SessionId, requestor_signature: Signature, message_hash: H256) -> Result, Error>; #[cfg(test)] - /// Ask node to make 'faulty' encryption sessions. - fn make_faulty_encryption_sessions(&self); + /// Ask node to make 'faulty' generation sessions. + fn make_faulty_generation_sessions(&self); #[cfg(test)] - /// Get active encryption session with given id. - fn encryption_session(&self, session_id: &SessionId) -> Option>; + /// Get active generation session with given id. + fn generation_session(&self, session_id: &SessionId) -> Option>; #[cfg(test)] /// Try connect to disconnected nodes. fn connect(&self); } -/// Cluster access for single encryption/decryption participant. +/// Cluster access for single encryption/decryption/signing participant. pub trait Cluster: Send + Sync { /// Broadcast message to all other nodes. fn broadcast(&self, message: Message) -> Result<(), Error>; @@ -166,52 +163,6 @@ pub struct ClusterConnections { pub connections: RwLock>>, } -/// Active sessions on this cluster. -pub struct ClusterSessions { - /// Self node id. - pub self_node_id: NodeId, - /// All nodes ids. - pub nodes: BTreeSet, - /// Reference to key storage - pub key_storage: Arc, - /// Reference to ACL storage - pub acl_storage: Arc, - /// Active encryption sessions. - pub encryption_sessions: RwLock>, - /// Active decryption sessions. - pub decryption_sessions: RwLock>, - /// Make faulty encryption sessions. - pub make_faulty_encryption_sessions: AtomicBool, -} - -/// Encryption session and its message queue. -pub struct QueuedEncryptionSession { - /// Session master. - pub master: NodeId, - /// Cluster view. - pub cluster_view: Arc, - /// Last received message time. - pub last_message_time: time::Instant, - /// Encryption session. - pub session: Arc, - /// Messages queue. - pub queue: VecDeque<(NodeId, EncryptionMessage)>, -} - -/// Decryption session and its message queue. -pub struct QueuedDecryptionSession { - /// Session master. - pub master: NodeId, - /// Cluster view. - pub cluster_view: Arc, - /// Last received message time. - pub last_message_time: time::Instant, - /// Decryption session. - pub session: Arc, - /// Messages queue. - pub queue: VecDeque<(NodeId, DecryptionMessage)>, -} - /// Cluster view core. struct ClusterViewCore { /// Cluster reference. @@ -236,28 +187,6 @@ pub struct Connection { last_message_time: Mutex, } -/// Encryption session implementation, which removes session from cluster on drop. -struct EncryptionSessionWrapper { - /// Wrapped session. - session: Arc, - /// Session Id. - session_id: SessionId, - /// Cluster data reference. - cluster: Weak, -} - -/// Decryption session implementation, which removes session from cluster on drop. -struct DecryptionSessionWrapper { - /// Wrapped session. - session: Arc, - /// Session Id. - session_id: SessionId, - /// Session sub id. - access_key: Secret, - /// Cluster data reference. - cluster: Weak, -} - impl ClusterCore { pub fn new(handle: Handle, config: ClusterConfiguration) -> Result, Error> { let listen_address = make_socket_address(&config.listen_address.0, config.listen_address.1)?; @@ -461,62 +390,49 @@ impl ClusterCore { connection.set_last_message_time(time::Instant::now()); trace!(target: "secretstore_net", "{}: received message {} from {}", data.self_key_pair.public(), message, connection.node_id()); match message { + Message::Generation(message) => ClusterCore::process_generation_message(data, connection, message), Message::Encryption(message) => ClusterCore::process_encryption_message(data, connection, message), Message::Decryption(message) => ClusterCore::process_decryption_message(data, connection, message), + Message::Signing(message) => ClusterCore::process_signing_message(data, connection, message), Message::Cluster(message) => ClusterCore::process_cluster_message(data, connection, message), } } - /// Process single encryption message from the connection. - fn process_encryption_message(data: Arc, connection: Arc, mut message: EncryptionMessage) { + /// Process single generation message from the connection. + fn process_generation_message(data: Arc, connection: Arc, mut message: GenerationMessage) { let session_id = message.session_id().clone(); let mut sender = connection.node_id().clone(); let session = match message { - EncryptionMessage::InitializeSession(_) => { + GenerationMessage::InitializeSession(_) => { let mut connected_nodes = data.connections.connected_nodes(); connected_nodes.insert(data.self_key_pair.public().clone()); let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); - data.sessions.new_encryption_session(sender.clone(), session_id.clone(), cluster) + data.sessions.new_generation_session(sender.clone(), session_id.clone(), cluster) }, _ => { - data.sessions.encryption_session(&session_id) + data.sessions.generation_sessions.get(&session_id) .ok_or(Error::InvalidSessionId) }, }; let mut is_queued_message = false; loop { - match session.clone().and_then(|session| match message { - EncryptionMessage::InitializeSession(ref message) => - session.on_initialize_session(sender.clone(), message), - EncryptionMessage::ConfirmInitialization(ref message) => - session.on_confirm_initialization(sender.clone(), message), - EncryptionMessage::CompleteInitialization(ref message) => - session.on_complete_initialization(sender.clone(), message), - EncryptionMessage::KeysDissemination(ref message) => - session.on_keys_dissemination(sender.clone(), message), - EncryptionMessage::PublicKeyShare(ref message) => - session.on_public_key_share(sender.clone(), message), - EncryptionMessage::SessionError(ref message) => - session.on_session_error(sender.clone(), message), - EncryptionMessage::SessionCompleted(ref message) => - session.on_session_completed(sender.clone(), message), - }) { + match session.clone().and_then(|session| session.process_message(&sender, &message)) { Ok(_) => { // if session is completed => stop let session = session.clone().expect("session.method() call finished with success; session exists; qed"); let session_state = session.state(); - if session_state == EncryptionSessionState::Finished { - info!(target: "secretstore_net", "{}: encryption session completed", data.self_key_pair.public()); + if session_state == GenerationSessionState::Finished { + info!(target: "secretstore_net", "{}: generation session completed", data.self_key_pair.public()); } - if session_state == EncryptionSessionState::Finished || session_state == EncryptionSessionState::Failed { - data.sessions.remove_encryption_session(&session_id); + if session_state == GenerationSessionState::Finished || session_state == GenerationSessionState::Failed { + data.sessions.generation_sessions.remove(&session_id); break; } // try to dequeue message - match data.sessions.dequeue_encryption_message(&session_id) { + match data.sessions.generation_sessions.dequeue_message(&session_id) { Some((msg_sender, msg)) => { is_queued_message = true; sender = msg_sender; @@ -526,17 +442,86 @@ impl ClusterCore { } }, Err(Error::TooEarlyForRequest) => { - data.sessions.enqueue_encryption_message(&session_id, sender, message, is_queued_message); + data.sessions.generation_sessions.enqueue_message(&session_id, sender, message, is_queued_message); break; }, Err(err) => { - warn!(target: "secretstore_net", "{}: encryption session error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); - data.sessions.respond_with_encryption_error(&session_id, message::SessionError { + warn!(target: "secretstore_net", "{}: generation session error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); + data.sessions.respond_with_generation_error(&session_id, message::SessionError { session: session_id.clone().into(), error: format!("{:?}", err), }); if err != Error::InvalidSessionId { - data.sessions.remove_encryption_session(&session_id); + data.sessions.generation_sessions.remove(&session_id); + } + break; + }, + } + } + } + + /// Process single encryption message from the connection. + fn process_encryption_message(data: Arc, connection: Arc, mut message: EncryptionMessage) { + let session_id = message.session_id().clone(); + let mut sender = connection.node_id().clone(); + let session = match message { + EncryptionMessage::InitializeEncryptionSession(_) => { + let mut connected_nodes = data.connections.connected_nodes(); + connected_nodes.insert(data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); + data.sessions.new_encryption_session(sender.clone(), session_id.clone(), cluster) + }, + _ => { + data.sessions.encryption_sessions.get(&session_id) + .ok_or(Error::InvalidSessionId) + }, + }; + + let mut is_queued_message = false; + loop { + match session.clone().and_then(|session| match message { + EncryptionMessage::InitializeEncryptionSession(ref message) => + session.on_initialize_session(sender.clone(), message), + EncryptionMessage::ConfirmEncryptionInitialization(ref message) => + session.on_confirm_initialization(sender.clone(), message), + EncryptionMessage::EncryptionSessionError(ref message) => + session.on_session_error(sender.clone(), message), + }) { + Ok(_) => { + // if session is completed => stop + let session = session.clone().expect("session.method() call finished with success; session exists; qed"); + let session_state = session.state(); + if session_state == EncryptionSessionState::Finished { + info!(target: "secretstore_net", "{}: encryption session completed", data.self_key_pair.public()); + } + if session_state == EncryptionSessionState::Finished || session_state == EncryptionSessionState::Failed { + data.sessions.encryption_sessions.remove(&session_id); + break; + } + + // try to dequeue message + match data.sessions.encryption_sessions.dequeue_message(&session_id) { + Some((msg_sender, msg)) => { + is_queued_message = true; + sender = msg_sender; + message = msg; + }, + None => break, + } + }, + Err(Error::TooEarlyForRequest) => { + data.sessions.encryption_sessions.enqueue_message(&session_id, sender, message, is_queued_message); + break; + }, + Err(err) => { + warn!(target: "secretstore_net", "{}: encryption session error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); + data.sessions.respond_with_encryption_error(&session_id, message::EncryptionSessionError { + session: session_id.clone().into(), + error: format!("{:?}", err), + }); + if err != Error::InvalidSessionId { + data.sessions.encryption_sessions.remove(&session_id); } break; }, @@ -548,63 +533,45 @@ impl ClusterCore { fn process_decryption_message(data: Arc, connection: Arc, mut message: DecryptionMessage) { let session_id = message.session_id().clone(); let sub_session_id = message.sub_session_id().clone(); + let decryption_session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); let mut sender = connection.node_id().clone(); let session = match message { - DecryptionMessage::InitializeDecryptionSession(_) => { + DecryptionMessage::DecryptionConsensusMessage(ref message) if match message.message { + ConsensusMessage::InitializeConsensusSession(_) => true, + _ => false, + } => { let mut connected_nodes = data.connections.connected_nodes(); connected_nodes.insert(data.self_key_pair.public().clone()); let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); - data.sessions.new_decryption_session(sender.clone(), session_id.clone(), sub_session_id.clone(), cluster) + data.sessions.new_decryption_session(sender.clone(), session_id.clone(), sub_session_id.clone(), cluster, None) }, _ => { - data.sessions.decryption_session(&session_id, &sub_session_id) + data.sessions.decryption_sessions.get(&decryption_session_id) .ok_or(Error::InvalidSessionId) }, }; - let mut is_queued_message = false; loop { - match session.clone().and_then(|session| match message { - DecryptionMessage::InitializeDecryptionSession(ref message) => - session.on_initialize_session(sender.clone(), message), - DecryptionMessage::ConfirmDecryptionInitialization(ref message) => - session.on_confirm_initialization(sender.clone(), message), - DecryptionMessage::RequestPartialDecryption(ref message) => - session.on_partial_decryption_requested(sender.clone(), message), - DecryptionMessage::PartialDecryption(ref message) => - session.on_partial_decryption(sender.clone(), message), - DecryptionMessage::DecryptionSessionError(ref message) => - session.on_session_error(sender.clone(), message), - DecryptionMessage::DecryptionSessionCompleted(ref message) => - session.on_session_completed(sender.clone(), message), - }) { + match session.clone().and_then(|session| session.process_message(&sender, &message)) { Ok(_) => { // if session is completed => stop let session = session.clone().expect("session.method() call finished with success; session exists; qed"); - let session_state = session.state(); - if session_state == DecryptionSessionState::Finished { + if session.is_finished() { info!(target: "secretstore_net", "{}: decryption session completed", data.self_key_pair.public()); - } - if session_state == DecryptionSessionState::Finished || session_state == DecryptionSessionState::Failed { - data.sessions.remove_decryption_session(&session_id, &sub_session_id); + data.sessions.decryption_sessions.remove(&decryption_session_id); break; } // try to dequeue message - match data.sessions.dequeue_decryption_message(&session_id, &sub_session_id) { + match data.sessions.decryption_sessions.dequeue_message(&decryption_session_id) { Some((msg_sender, msg)) => { - is_queued_message = true; sender = msg_sender; message = msg; }, None => break, } }, - Err(Error::TooEarlyForRequest) => { - data.sessions.enqueue_decryption_message(&session_id, &sub_session_id, sender, message, is_queued_message); - break; - }, Err(err) => { warn!(target: "secretstore_net", "{}: decryption session error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); data.sessions.respond_with_decryption_error(&session_id, &sub_session_id, &sender, message::DecryptionSessionError { @@ -613,7 +580,72 @@ impl ClusterCore { error: format!("{:?}", err), }); if err != Error::InvalidSessionId { - data.sessions.remove_decryption_session(&session_id, &sub_session_id); + data.sessions.decryption_sessions.remove(&decryption_session_id); + } + break; + }, + } + } + } + + /// Process singlesigning message from the connection. + fn process_signing_message(data: Arc, connection: Arc, mut message: SigningMessage) { + let session_id = message.session_id().clone(); + let sub_session_id = message.sub_session_id().clone(); + let signing_session_id = SigningSessionId::new(session_id.clone(), sub_session_id.clone()); + let mut sender = connection.node_id().clone(); + let session = match message { + SigningMessage::SigningConsensusMessage(ref message) if match message.message { + ConsensusMessage::InitializeConsensusSession(_) => true, + _ => false, + } => { + let mut connected_nodes = data.connections.connected_nodes(); + connected_nodes.insert(data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); + data.sessions.new_signing_session(sender.clone(), session_id.clone(), sub_session_id.clone(), cluster, None) + }, + _ => { + data.sessions.signing_sessions.get(&signing_session_id) + .ok_or(Error::InvalidSessionId) + }, + }; + + let mut is_queued_message = false; + loop { + match session.clone().and_then(|session| session.process_message(&sender, &message)) { + Ok(_) => { + // if session is completed => stop + let session = session.clone().expect("session.method() call finished with success; session exists; qed"); + if session.is_finished() { + info!(target: "secretstore_net", "{}: signing session completed", data.self_key_pair.public()); + data.sessions.signing_sessions.remove(&signing_session_id); + break; + } + + // try to dequeue message + match data.sessions.signing_sessions.dequeue_message(&signing_session_id) { + Some((msg_sender, msg)) => { + is_queued_message = true; + sender = msg_sender; + message = msg; + }, + None => break, + } + }, + Err(Error::TooEarlyForRequest) => { + data.sessions.signing_sessions.enqueue_message(&signing_session_id, sender, message, is_queued_message); + break; + }, + Err(err) => { + warn!(target: "secretstore_net", "{}: signing session error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); + data.sessions.respond_with_signing_error(&session_id, &sub_session_id, &sender, message::SigningSessionError { + session: session_id.clone().into(), + sub_session: sub_session_id.clone().into(), + error: format!("{:?}", err), + }); + if err != Error::InvalidSessionId { + data.sessions.signing_sessions.remove(&signing_session_id); } break; }, @@ -702,213 +734,6 @@ impl ClusterConnections { } } -impl ClusterSessions { - pub fn new(config: &ClusterConfiguration) -> Self { - ClusterSessions { - self_node_id: config.self_key_pair.public().clone(), - nodes: config.nodes.keys().cloned().collect(), - acl_storage: config.acl_storage.clone(), - key_storage: config.key_storage.clone(), - encryption_sessions: RwLock::new(BTreeMap::new()), - decryption_sessions: RwLock::new(BTreeMap::new()), - make_faulty_encryption_sessions: AtomicBool::new(false), - } - } - - pub fn new_encryption_session(&self, master: NodeId, session_id: SessionId, cluster: Arc) -> Result, Error> { - let mut encryption_sessions = self.encryption_sessions.write(); - // check that there's no active encryption session with the same id - if encryption_sessions.contains_key(&session_id) { - return Err(Error::DuplicateSessionId); - } - // check that there's no finished encryption session with the same id - if self.key_storage.contains(&session_id) { - return Err(Error::DuplicateSessionId); - } - - // communicating to all other nodes is crucial for encryption session - // => check that we have connections to all cluster nodes - if self.nodes.iter().any(|n| !cluster.is_connected(n)) { - return Err(Error::NodeDisconnected); - } - - let session = Arc::new(EncryptionSessionImpl::new(EncryptionSessionParams { - id: session_id.clone(), - self_node_id: self.self_node_id.clone(), - key_storage: self.key_storage.clone(), - cluster: cluster.clone(), - })); - let encryption_session = QueuedEncryptionSession { - master: master, - cluster_view: cluster, - last_message_time: time::Instant::now(), - session: session.clone(), - queue: VecDeque::new() - }; - if self.make_faulty_encryption_sessions.load(Ordering::Relaxed) { - encryption_session.session.simulate_faulty_behaviour(); - } - encryption_sessions.insert(session_id, encryption_session); - Ok(session) - } - - pub fn remove_encryption_session(&self, session_id: &SessionId) { - self.encryption_sessions.write().remove(session_id); - } - - pub fn encryption_session(&self, session_id: &SessionId) -> Option> { - self.encryption_sessions.read().get(session_id).map(|s| s.session.clone()) - } - - pub fn enqueue_encryption_message(&self, session_id: &SessionId, sender: NodeId, message: EncryptionMessage, is_queued_message: bool) { - self.encryption_sessions.write().get_mut(session_id) - .map(|session| if is_queued_message { session.queue.push_front((sender, message)) } - else { session.queue.push_back((sender, message)) }); - } - - pub fn dequeue_encryption_message(&self, session_id: &SessionId) -> Option<(NodeId, EncryptionMessage)> { - self.encryption_sessions.write().get_mut(session_id) - .and_then(|session| session.queue.pop_front()) - } - - pub fn respond_with_encryption_error(&self, session_id: &SessionId, error: message::SessionError) { - self.encryption_sessions.read().get(session_id) - .map(|s| { - // error in encryption session is considered fatal - // => broadcast error - - // do not bother processing send error, as we already processing error - let _ = s.cluster_view.broadcast(Message::Encryption(EncryptionMessage::SessionError(error))); - }); - } - - #[cfg(test)] - pub fn make_faulty_encryption_sessions(&self) { - self.make_faulty_encryption_sessions.store(true, Ordering::Relaxed); - } - - pub fn new_decryption_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, cluster: Arc) -> Result, Error> { - let mut decryption_sessions = self.decryption_sessions.write(); - let session_id = DecryptionSessionId::new(session_id, sub_session_id); - if decryption_sessions.contains_key(&session_id) { - return Err(Error::DuplicateSessionId); - } - - // some of nodes, which were encrypting secret may be down - // => do not use these in decryption session - let mut encrypted_data = self.key_storage.get(&session_id.id).map_err(|e| Error::KeyStorage(e.into()))?; - let disconnected_nodes: BTreeSet<_> = encrypted_data.id_numbers.keys().cloned().collect(); - let disconnected_nodes: BTreeSet<_> = disconnected_nodes.difference(&cluster.nodes()).cloned().collect(); - for disconnected_node in disconnected_nodes { - encrypted_data.id_numbers.remove(&disconnected_node); - } - - let session = Arc::new(DecryptionSessionImpl::new(DecryptionSessionParams { - id: session_id.id.clone(), - access_key: session_id.access_key.clone(), - self_node_id: self.self_node_id.clone(), - encrypted_data: encrypted_data, - acl_storage: self.acl_storage.clone(), - cluster: cluster.clone(), - })?); - let decryption_session = QueuedDecryptionSession { - master: master, - cluster_view: cluster, - last_message_time: time::Instant::now(), - session: session.clone(), - queue: VecDeque::new() - }; - decryption_sessions.insert(session_id, decryption_session); - Ok(session) - } - - pub fn remove_decryption_session(&self, session_id: &SessionId, sub_session_id: &Secret) { - let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); - self.decryption_sessions.write().remove(&session_id); - } - - pub fn decryption_session(&self, session_id: &SessionId, sub_session_id: &Secret) -> Option> { - let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); - self.decryption_sessions.read().get(&session_id).map(|s| s.session.clone()) - } - - pub fn enqueue_decryption_message(&self, session_id: &SessionId, sub_session_id: &Secret, sender: NodeId, message: DecryptionMessage, is_queued_message: bool) { - let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); - self.decryption_sessions.write().get_mut(&session_id) - .map(|session| if is_queued_message { session.queue.push_front((sender, message)) } - else { session.queue.push_back((sender, message)) }); - } - - pub fn dequeue_decryption_message(&self, session_id: &SessionId, sub_session_id: &Secret) -> Option<(NodeId, DecryptionMessage)> { - let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); - self.decryption_sessions.write().get_mut(&session_id) - .and_then(|session| session.queue.pop_front()) - } - - pub fn respond_with_decryption_error(&self, session_id: &SessionId, sub_session_id: &Secret, to: &NodeId, error: message::DecryptionSessionError) { - let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); - self.decryption_sessions.read().get(&session_id) - .map(|s| { - // error in decryption session is non-fatal, if occurs on slave node - // => either respond with error - // => or broadcast error - - // do not bother processing send error, as we already processing error - if &s.master == s.session.node() { - let _ = s.cluster_view.broadcast(Message::Decryption(DecryptionMessage::DecryptionSessionError(error))); - } else { - let _ = s.cluster_view.send(to, Message::Decryption(DecryptionMessage::DecryptionSessionError(error))); - } - }); - } - - fn stop_stalled_sessions(&self) { - { - let sessions = self.encryption_sessions.write(); - for sid in sessions.keys().collect::>() { - let session = sessions.get(&sid).expect("enumerating only existing sessions; qed"); - if time::Instant::now() - session.last_message_time > time::Duration::from_secs(ENCRYPTION_SESSION_TIMEOUT_INTERVAL) { - session.session.on_session_timeout(); - if session.session.state() == EncryptionSessionState::Finished - || session.session.state() == EncryptionSessionState::Failed { - self.remove_encryption_session(&sid); - } - } - } - } - { - let sessions = self.decryption_sessions.write(); - for sid in sessions.keys().collect::>() { - let session = sessions.get(&sid).expect("enumerating only existing sessions; qed"); - if time::Instant::now() - session.last_message_time > time::Duration::from_secs(DECRYPTION_SESSION_TIMEOUT_INTERVAL) { - session.session.on_session_timeout(); - if session.session.state() == DecryptionSessionState::Finished - || session.session.state() == DecryptionSessionState::Failed { - self.remove_decryption_session(&sid.id, &sid.access_key); - } - } - } - } - } - - pub fn on_connection_timeout(&self, node_id: &NodeId) { - for (sid, session) in self.encryption_sessions.read().iter() { - session.session.on_node_timeout(node_id); - if session.session.state() == EncryptionSessionState::Finished - || session.session.state() == EncryptionSessionState::Failed { - self.remove_encryption_session(sid); - } - } - for (sid, session) in self.decryption_sessions.read().iter() { - session.session.on_node_timeout(node_id); - if session.session.state() == DecryptionSessionState::Finished - || session.session.state() == DecryptionSessionState::Failed { - self.remove_decryption_session(&sid.id, &sid.access_key); - } - } - } -} - impl ClusterData { pub fn new(handle: &Handle, config: ClusterConfiguration, connections: ClusterConnections, sessions: ClusterSessions) -> Arc { Arc::new(ClusterData { @@ -926,6 +751,11 @@ impl ClusterData { self.connections.get(node) } + /// Get sessions reference. + pub fn sessions(&self) -> &ClusterSessions { + &self.sessions + } + /// Spawns a future using thread pool and schedules execution of it with event loop handle. pub fn spawn(&self, f: F) where F: Future + Send + 'static, F::Item: Send + 'static, F::Error: Send + 'static { let pool_work = self.pool.spawn(f); @@ -1028,13 +858,23 @@ impl ClusterClient for ClusterClientImpl { self.data.connections.cluster_state() } - fn new_encryption_session(&self, session_id: SessionId, threshold: usize) -> Result, Error> { + fn new_generation_session(&self, session_id: SessionId, author: Public, threshold: usize) -> Result, Error> { let mut connected_nodes = self.data.connections.connected_nodes(); connected_nodes.insert(self.data.self_key_pair.public().clone()); let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); - let session = self.data.sessions.new_encryption_session(self.data.self_key_pair.public().clone(), session_id.clone(), cluster)?; - session.initialize(threshold, connected_nodes)?; + let session = self.data.sessions.new_generation_session(self.data.self_key_pair.public().clone(), session_id, cluster)?; + session.initialize(author, threshold, connected_nodes)?; + Ok(GenerationSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)) + } + + fn new_encryption_session(&self, session_id: SessionId, requestor_signature: Signature, common_point: Public, encrypted_point: Public) -> Result, Error> { + let mut connected_nodes = self.data.connections.connected_nodes(); + connected_nodes.insert(self.data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); + let session = self.data.sessions.new_encryption_session(self.data.self_key_pair.public().clone(), session_id, cluster)?; + session.initialize(requestor_signature, common_point, encrypted_point)?; Ok(EncryptionSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)) } @@ -1044,9 +884,20 @@ impl ClusterClient for ClusterClientImpl { let access_key = Random.generate()?.secret().clone(); let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); - let session = self.data.sessions.new_decryption_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), cluster)?; - session.initialize(requestor_signature, is_shadow_decryption)?; - Ok(DecryptionSessionWrapper::new(Arc::downgrade(&self.data), session_id, access_key, session)) + let session = self.data.sessions.new_decryption_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), cluster, Some(requestor_signature))?; + session.initialize(is_shadow_decryption)?; + Ok(DecryptionSessionWrapper::new(Arc::downgrade(&self.data), DecryptionSessionId::new(session_id, access_key), session)) + } + + fn new_signing_session(&self, session_id: SessionId, requestor_signature: Signature, message_hash: H256) -> Result, Error> { + let mut connected_nodes = self.data.connections.connected_nodes(); + connected_nodes.insert(self.data.self_key_pair.public().clone()); + + let access_key = Random.generate()?.secret().clone(); + let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); + let session = self.data.sessions.new_signing_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), cluster, Some(requestor_signature))?; + session.initialize(message_hash)?; + Ok(SigningSessionWrapper::new(Arc::downgrade(&self.data), SigningSessionId::new(session_id, access_key), session)) } #[cfg(test)] @@ -1055,71 +906,13 @@ impl ClusterClient for ClusterClientImpl { } #[cfg(test)] - fn make_faulty_encryption_sessions(&self) { - self.data.sessions.make_faulty_encryption_sessions(); + fn make_faulty_generation_sessions(&self) { + self.data.sessions.make_faulty_generation_sessions(); } #[cfg(test)] - fn encryption_session(&self, session_id: &SessionId) -> Option> { - self.data.sessions.encryption_session(session_id) - } -} - -impl EncryptionSessionWrapper { - pub fn new(cluster: Weak, session_id: SessionId, session: Arc) -> Arc { - Arc::new(EncryptionSessionWrapper { - session: session, - session_id: session_id, - cluster: cluster, - }) - } -} - -impl EncryptionSession for EncryptionSessionWrapper { - fn state(&self) -> EncryptionSessionState { - self.session.state() - } - - fn wait(&self, timeout: Option) -> Result { - self.session.wait(timeout) - } - - #[cfg(test)] - fn joint_public_key(&self) -> Option> { - self.session.joint_public_key() - } -} - -impl Drop for EncryptionSessionWrapper { - fn drop(&mut self) { - if let Some(cluster) = self.cluster.upgrade() { - cluster.sessions.remove_encryption_session(&self.session_id); - } - } -} - -impl DecryptionSessionWrapper { - pub fn new(cluster: Weak, session_id: SessionId, access_key: Secret, session: Arc) -> Arc { - Arc::new(DecryptionSessionWrapper { - session: session, - session_id: session_id, - access_key: access_key, - cluster: cluster, - }) - } -} - -impl DecryptionSession for DecryptionSessionWrapper { - fn wait(&self) -> Result { - self.session.wait() - } -} - -impl Drop for DecryptionSessionWrapper { - fn drop(&mut self) { - if let Some(cluster) = self.cluster.upgrade() { - cluster.sessions.remove_decryption_session(&self.session_id, &self.access_key); - } + fn generation_session(&self, session_id: &SessionId) -> Option> { + self.data.sessions.generation_sessions.get(session_id) } } @@ -1135,11 +928,11 @@ pub mod tests { use std::collections::VecDeque; use parking_lot::Mutex; use tokio_core::reactor::Core; - use ethkey::{Random, Generator}; + use ethkey::{Random, Generator, Public}; use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage}; use key_server_cluster::message::Message; use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration}; - use key_server_cluster::encryption_session::{Session as EncryptionSession, SessionState as EncryptionSessionState}; + use key_server_cluster::generation_session::{Session as GenerationSession, SessionState as GenerationSessionState}; #[derive(Debug)] pub struct DummyCluster { @@ -1249,11 +1042,11 @@ pub mod tests { } #[test] - fn cluster_wont_start_encryption_session_if_not_fully_connected() { + fn cluster_wont_start_generation_session_if_not_fully_connected() { let core = Core::new().unwrap(); let clusters = make_clusters(&core, 6013, 3); clusters[0].run().unwrap(); - match clusters[0].client().new_encryption_session(SessionId::default(), 1) { + match clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1) { Err(Error::NodeDisconnected) => (), Err(e) => panic!("unexpected error {:?}", e), _ => panic!("unexpected success"), @@ -1261,50 +1054,50 @@ pub mod tests { } #[test] - fn error_in_encryption_session_broadcasted_to_all_other_nodes() { + fn error_in_generation_session_broadcasted_to_all_other_nodes() { let mut core = Core::new().unwrap(); let clusters = make_clusters(&core, 6016, 3); run_clusters(&clusters); loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established)); - // ask one of nodes to produce faulty encryption sessions - clusters[1].client().make_faulty_encryption_sessions(); + // ask one of nodes to produce faulty generation sessions + clusters[1].client().make_faulty_generation_sessions(); - // start && wait for encryption session to fail - let session = clusters[0].client().new_encryption_session(SessionId::default(), 1).unwrap(); - loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_key().is_some()); - assert!(session.joint_public_key().unwrap().is_err()); + // start && wait for generation session to fail + let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap(); + loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some()); + assert!(session.joint_public_and_secret().unwrap().is_err()); // check that faulty session is either removed from all nodes, or nonexistent (already removed) - assert!(clusters[0].client().encryption_session(&SessionId::default()).is_none()); + assert!(clusters[0].client().generation_session(&SessionId::default()).is_none()); for i in 1..3 { - if let Some(session) = clusters[i].client().encryption_session(&SessionId::default()) { - loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_key().is_some()); - assert!(session.joint_public_key().unwrap().is_err()); - assert!(clusters[i].client().encryption_session(&SessionId::default()).is_none()); + if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) { + loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some()); + assert!(session.joint_public_and_secret().unwrap().is_err()); + assert!(clusters[i].client().generation_session(&SessionId::default()).is_none()); } } } #[test] - fn encryption_session_is_removed_when_succeeded() { + fn generation_session_is_removed_when_succeeded() { let mut core = Core::new().unwrap(); let clusters = make_clusters(&core, 6019, 3); run_clusters(&clusters); loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established)); - // start && wait for encryption session to complete - let session = clusters[0].client().new_encryption_session(SessionId::default(), 1).unwrap(); - loop_until(&mut core, time::Duration::from_millis(300), || session.state() == EncryptionSessionState::Finished); - assert!(session.joint_public_key().unwrap().is_ok()); + // start && wait for generation session to complete + let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap(); + loop_until(&mut core, time::Duration::from_millis(300), || session.state() == GenerationSessionState::Finished); + assert!(session.joint_public_and_secret().unwrap().is_ok()); // check that session is either removed from all nodes, or nonexistent (already removed) - assert!(clusters[0].client().encryption_session(&SessionId::default()).is_none()); + assert!(clusters[0].client().generation_session(&SessionId::default()).is_none()); for i in 1..3 { - if let Some(session) = clusters[i].client().encryption_session(&SessionId::default()) { - loop_until(&mut core, time::Duration::from_millis(300), || session.state() == EncryptionSessionState::Finished); - assert!(session.joint_public_key().unwrap().is_err()); - assert!(clusters[i].client().encryption_session(&SessionId::default()).is_none()); + if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) { + loop_until(&mut core, time::Duration::from_millis(300), || session.state() == GenerationSessionState::Finished); + assert!(session.joint_public_and_secret().unwrap().is_err()); + assert!(clusters[i].client().generation_session(&SessionId::default()).is_none()); } } } diff --git a/secret_store/src/key_server_cluster/cluster_sessions.rs b/secret_store/src/key_server_cluster/cluster_sessions.rs new file mode 100644 index 000000000..f66ad972f --- /dev/null +++ b/secret_store/src/key_server_cluster/cluster_sessions.rs @@ -0,0 +1,506 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::time; +use std::sync::{Arc, Weak}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::collections::{VecDeque, BTreeSet, BTreeMap}; +use parking_lot::RwLock; +use ethkey::{Public, Secret, Signature}; +use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, DocumentKeyShare, EncryptedDocumentKeyShadow, SessionMeta}; +use key_server_cluster::cluster::{Cluster, ClusterData, ClusterView, ClusterConfiguration}; +use key_server_cluster::message::{self, Message, GenerationMessage, EncryptionMessage, DecryptionMessage, SigningMessage}; +use key_server_cluster::generation_session::{Session as GenerationSession, SessionImpl as GenerationSessionImpl, + SessionParams as GenerationSessionParams, SessionState as GenerationSessionState}; +use key_server_cluster::decryption_session::{Session as DecryptionSession, SessionImpl as DecryptionSessionImpl, + DecryptionSessionId, SessionParams as DecryptionSessionParams}; +use key_server_cluster::encryption_session::{Session as EncryptionSession, SessionImpl as EncryptionSessionImpl, + SessionParams as EncryptionSessionParams, SessionState as EncryptionSessionState}; +use key_server_cluster::signing_session::{Session as SigningSession, SessionImpl as SigningSessionImpl, + SigningSessionId, SessionParams as SigningSessionParams}; + +/// When there are no session-related messages for SESSION_TIMEOUT_INTERVAL seconds, +/// we must treat this session as stalled && finish it with an error. +/// This timeout is for cases when node is responding to KeepAlive messages, but intentionally ignores +/// session messages. +const SESSION_TIMEOUT_INTERVAL: u64 = 60; + +/// Generic cluster session. +pub trait ClusterSession { + /// If session is finished (either with succcess or not). + fn is_finished(&self) -> bool; + /// When it takes too much time to complete session. + fn on_session_timeout(&self); + /// When it takes too much time to receive response from the node. + fn on_node_timeout(&self, node_id: &NodeId); +} + +/// Active sessions on this cluster. +pub struct ClusterSessions { + /// Key generation sessions. + pub generation_sessions: ClusterSessionsContainer, + /// Encryption sessions. + pub encryption_sessions: ClusterSessionsContainer, + /// Decryption sessions. + pub decryption_sessions: ClusterSessionsContainer, + /// Signing sessions. + pub signing_sessions: ClusterSessionsContainer, + /// Self node id. + self_node_id: NodeId, + /// All nodes ids. + nodes: BTreeSet, + /// Reference to key storage + key_storage: Arc, + /// Reference to ACL storage + acl_storage: Arc, + /// Make faulty generation sessions. + make_faulty_generation_sessions: AtomicBool, +} + +/// Active sessions container. +pub struct ClusterSessionsContainer { + /// Active sessions. + pub sessions: RwLock>>, +} + +/// Session and its message queue. +pub struct QueuedSession { + /// Session master. + pub master: NodeId, + /// Cluster view. + pub cluster_view: Arc, + /// Last received message time. + pub last_message_time: time::Instant, + /// Generation session. + pub session: Arc, + /// Messages queue. + pub queue: VecDeque<(NodeId, M)>, +} + +/// Generation session implementation, which removes session from cluster on drop. +pub struct GenerationSessionWrapper { + /// Wrapped session. + session: Arc, + /// Session Id. + session_id: SessionId, + /// Cluster data reference. + cluster: Weak, +} + +/// Encryption session implementation, which removes session from cluster on drop. +pub struct EncryptionSessionWrapper { + /// Wrapped session. + session: Arc, + /// Session Id. + session_id: SessionId, + /// Cluster data reference. + cluster: Weak, +} + +/// Decryption session implementation, which removes session from cluster on drop. +pub struct DecryptionSessionWrapper { + /// Wrapped session. + session: Arc, + /// Session Id. + session_id: DecryptionSessionId, + /// Cluster data reference. + cluster: Weak, +} + +/// Signing session implementation, which removes session from cluster on drop. +pub struct SigningSessionWrapper { + /// Wrapped session. + session: Arc, + /// Session Id. + session_id: SigningSessionId, + /// Cluster data reference. + cluster: Weak, +} + +impl ClusterSessions { + /// Create new cluster sessions container. + pub fn new(config: &ClusterConfiguration) -> Self { + ClusterSessions { + self_node_id: config.self_key_pair.public().clone(), + nodes: config.nodes.keys().cloned().collect(), + acl_storage: config.acl_storage.clone(), + key_storage: config.key_storage.clone(), + generation_sessions: ClusterSessionsContainer::new(), + encryption_sessions: ClusterSessionsContainer::new(), + decryption_sessions: ClusterSessionsContainer::new(), + signing_sessions: ClusterSessionsContainer::new(), + make_faulty_generation_sessions: AtomicBool::new(false), + } + } + + #[cfg(test)] + pub fn make_faulty_generation_sessions(&self) { + self.make_faulty_generation_sessions.store(true, Ordering::Relaxed); + } + + /// Create new generation session. + pub fn new_generation_session(&self, master: NodeId, session_id: SessionId, cluster: Arc) -> Result, Error> { + // check that there's no finished encryption session with the same id + if self.key_storage.contains(&session_id) { + return Err(Error::DuplicateSessionId); + } + // communicating to all other nodes is crucial for encryption session + // => check that we have connections to all cluster nodes + if self.nodes.iter().any(|n| !cluster.is_connected(n)) { + return Err(Error::NodeDisconnected); + } + + // check that there's no active encryption session with the same id + self.generation_sessions.insert(master, session_id, cluster.clone(), move || + Ok(GenerationSessionImpl::new(GenerationSessionParams { + id: session_id.clone(), + self_node_id: self.self_node_id.clone(), + key_storage: Some(self.key_storage.clone()), + cluster: cluster, + }))) + .map(|session| { + if self.make_faulty_generation_sessions.load(Ordering::Relaxed) { + session.simulate_faulty_behaviour(); + } + session + }) + } + + /// Send generation session error. + pub fn respond_with_generation_error(&self, session_id: &SessionId, error: message::SessionError) { + self.generation_sessions.sessions.read().get(session_id) + .map(|s| { + // error in generation session is considered fatal + // => broadcast error + + // do not bother processing send error, as we already processing error + let _ = s.cluster_view.broadcast(Message::Generation(GenerationMessage::SessionError(error))); + }); + } + + /// Create new encryption session. + pub fn new_encryption_session(&self, master: NodeId, session_id: SessionId, cluster: Arc) -> Result, Error> { + let encrypted_data = self.read_key_share(&session_id, &cluster)?; + self.encryption_sessions.insert(master, session_id, cluster.clone(), move || EncryptionSessionImpl::new(EncryptionSessionParams { + id: session_id.clone(), + self_node_id: self.self_node_id.clone(), + encrypted_data: encrypted_data, + key_storage: self.key_storage.clone(), + cluster: cluster, + })) + } + + /// Send encryption session error. + pub fn respond_with_encryption_error(&self, session_id: &SessionId, error: message::EncryptionSessionError) { + self.encryption_sessions.sessions.read().get(session_id) + .map(|s| { + // error in encryption session is considered fatal + // => broadcast error + + // do not bother processing send error, as we already processing error + let _ = s.cluster_view.broadcast(Message::Encryption(EncryptionMessage::EncryptionSessionError(error))); + }); + } + + /// Create new decryption session. + pub fn new_decryption_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, cluster: Arc, requester_signature: Option) -> Result, Error> { + let session_id = DecryptionSessionId::new(session_id, sub_session_id); + let encrypted_data = self.read_key_share(&session_id.id, &cluster)?; + + self.decryption_sessions.insert(master, session_id.clone(), cluster.clone(), move || DecryptionSessionImpl::new(DecryptionSessionParams { + meta: SessionMeta { + id: session_id.id, + self_node_id: self.self_node_id.clone(), + master_node_id: master, + threshold: encrypted_data.threshold, + }, + access_key: session_id.access_key, + key_share: encrypted_data, + acl_storage: self.acl_storage.clone(), + cluster: cluster, + }, requester_signature)) + } + + /// Send decryption session error. + pub fn respond_with_decryption_error(&self, session_id: &SessionId, sub_session_id: &Secret, to: &NodeId, error: message::DecryptionSessionError) { + let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); + self.decryption_sessions.sessions.read().get(&session_id) + .map(|s| { + // error in decryption session is non-fatal, if occurs on slave node + // => either respond with error + // => or broadcast error + + // do not bother processing send error, as we already processing error + if s.master == self.self_node_id { + let _ = s.cluster_view.broadcast(Message::Decryption(DecryptionMessage::DecryptionSessionError(error))); + } else { + let _ = s.cluster_view.send(to, Message::Decryption(DecryptionMessage::DecryptionSessionError(error))); + } + }); + } + + /// Create new signing session. + pub fn new_signing_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, cluster: Arc, requester_signature: Option) -> Result, Error> { + let session_id = SigningSessionId::new(session_id, sub_session_id); + let encrypted_data = self.read_key_share(&session_id.id, &cluster)?; + + self.signing_sessions.insert(master, session_id.clone(), cluster.clone(), move || SigningSessionImpl::new(SigningSessionParams { + meta: SessionMeta { + id: session_id.id, + self_node_id: self.self_node_id.clone(), + master_node_id: master, + threshold: encrypted_data.threshold, + }, + access_key: session_id.access_key, + key_share: encrypted_data, + acl_storage: self.acl_storage.clone(), + cluster: cluster, + }, requester_signature)) + } + + /// Send signing session error. + pub fn respond_with_signing_error(&self, session_id: &SessionId, sub_session_id: &Secret, to: &NodeId, error: message::SigningSessionError) { + let session_id = SigningSessionId::new(session_id.clone(), sub_session_id.clone()); + self.signing_sessions.sessions.read().get(&session_id) + .map(|s| { + // error in signing session is non-fatal, if occurs on slave node + // => either respond with error + // => or broadcast error + + // do not bother processing send error, as we already processing error + if s.master == self.self_node_id { + let _ = s.cluster_view.broadcast(Message::Signing(SigningMessage::SigningSessionError(error))); + } else { + let _ = s.cluster_view.send(to, Message::Signing(SigningMessage::SigningSessionError(error))); + } + }); + } + + /// Stop sessions that are stalling. + pub fn stop_stalled_sessions(&self) { + self.generation_sessions.stop_stalled_sessions(); + self.encryption_sessions.stop_stalled_sessions(); + self.decryption_sessions.stop_stalled_sessions(); + self.signing_sessions.stop_stalled_sessions(); + } + + /// When connection to node is lost. + pub fn on_connection_timeout(&self, node_id: &NodeId) { + self.generation_sessions.on_connection_timeout(node_id); + self.encryption_sessions.on_connection_timeout(node_id); + self.decryption_sessions.on_connection_timeout(node_id); + self.signing_sessions.on_connection_timeout(node_id); + } + + /// Read key share && remove disconnected nodes. + fn read_key_share(&self, key_id: &SessionId, cluster: &Arc) -> Result { + let mut encrypted_data = self.key_storage.get(key_id).map_err(|e| Error::KeyStorage(e.into()))?; + + // some of nodes, which were encrypting secret may be down + // => do not use these in session + let disconnected_nodes: BTreeSet<_> = encrypted_data.id_numbers.keys().cloned().collect(); + for disconnected_node in disconnected_nodes.difference(&cluster.nodes()) { + encrypted_data.id_numbers.remove(&disconnected_node); + } + Ok(encrypted_data) + } +} + +impl ClusterSessionsContainer where K: Clone + Ord, V: ClusterSession { + pub fn new() -> Self { + ClusterSessionsContainer { + sessions: RwLock::new(BTreeMap::new()), + } + } + + pub fn get(&self, session_id: &K) -> Option> { + self.sessions.read().get(session_id).map(|s| s.session.clone()) + } + + pub fn insert Result>(&self, master: NodeId, session_id: K, cluster: Arc, session: F) -> Result, Error> { + let mut sessions = self.sessions.write(); + if sessions.contains_key(&session_id) { + return Err(Error::DuplicateSessionId); + } + + let session = Arc::new(session()?); + let queued_session = QueuedSession { + master: master, + cluster_view: cluster, + last_message_time: time::Instant::now(), + session: session.clone(), + queue: VecDeque::new(), + }; + sessions.insert(session_id, queued_session); + Ok(session) + } + + pub fn remove(&self, session_id: &K) { + self.sessions.write().remove(session_id); + } + + pub fn enqueue_message(&self, session_id: &K, sender: NodeId, message: M, is_queued_message: bool) { + self.sessions.write().get_mut(session_id) + .map(|session| if is_queued_message { session.queue.push_front((sender, message)) } + else { session.queue.push_back((sender, message)) }); + } + + pub fn dequeue_message(&self, session_id: &K) -> Option<(NodeId, M)> { + self.sessions.write().get_mut(session_id) + .and_then(|session| session.queue.pop_front()) + } + + pub fn stop_stalled_sessions(&self) { + let mut sessions = self.sessions.write(); + for sid in sessions.keys().cloned().collect::>() { + let remove_session = { + let session = sessions.get(&sid).expect("enumerating only existing sessions; qed"); + if time::Instant::now() - session.last_message_time > time::Duration::from_secs(SESSION_TIMEOUT_INTERVAL) { + session.session.on_session_timeout(); + session.session.is_finished() + } else { + false + } + }; + + if remove_session { + sessions.remove(&sid); + } + } + } + + pub fn on_connection_timeout(&self, node_id: &NodeId) { + let mut sessions = self.sessions.write(); + for sid in sessions.keys().cloned().collect::>() { + let remove_session = { + let session = sessions.get(&sid).expect("enumerating only existing sessions; qed"); + session.session.on_node_timeout(node_id); + session.session.is_finished() + }; + if remove_session { + sessions.remove(&sid); + } + } + } +} + +impl GenerationSessionWrapper { + pub fn new(cluster: Weak, session_id: SessionId, session: Arc) -> Arc { + Arc::new(GenerationSessionWrapper { + session: session, + session_id: session_id, + cluster: cluster, + }) + } +} + +impl GenerationSession for GenerationSessionWrapper { + fn state(&self) -> GenerationSessionState { + self.session.state() + } + + fn wait(&self, timeout: Option) -> Result { + self.session.wait(timeout) + } + + fn joint_public_and_secret(&self) -> Option> { + self.session.joint_public_and_secret() + } +} + +impl Drop for GenerationSessionWrapper { + fn drop(&mut self) { + if let Some(cluster) = self.cluster.upgrade() { + cluster.sessions().generation_sessions.remove(&self.session_id); + } + } +} + +impl EncryptionSessionWrapper { + pub fn new(cluster: Weak, session_id: SessionId, session: Arc) -> Arc { + Arc::new(EncryptionSessionWrapper { + session: session, + session_id: session_id, + cluster: cluster, + }) + } +} + +impl EncryptionSession for EncryptionSessionWrapper { + fn state(&self) -> EncryptionSessionState { + self.session.state() + } + + fn wait(&self, timeout: Option) -> Result<(), Error> { + self.session.wait(timeout) + } +} + +impl Drop for EncryptionSessionWrapper { + fn drop(&mut self) { + if let Some(cluster) = self.cluster.upgrade() { + cluster.sessions().encryption_sessions.remove(&self.session_id); + } + } +} + +impl DecryptionSessionWrapper { + pub fn new(cluster: Weak, session_id: DecryptionSessionId, session: Arc) -> Arc { + Arc::new(DecryptionSessionWrapper { + session: session, + session_id: session_id, + cluster: cluster, + }) + } +} + +impl DecryptionSession for DecryptionSessionWrapper { + fn wait(&self) -> Result { + self.session.wait() + } +} + +impl Drop for DecryptionSessionWrapper { + fn drop(&mut self) { + if let Some(cluster) = self.cluster.upgrade() { + cluster.sessions().decryption_sessions.remove(&self.session_id); + } + } +} + +impl SigningSessionWrapper { + pub fn new(cluster: Weak, session_id: SigningSessionId, session: Arc) -> Arc { + Arc::new(SigningSessionWrapper { + session: session, + session_id: session_id, + cluster: cluster, + }) + } +} + +impl SigningSession for SigningSessionWrapper { + fn wait(&self) -> Result<(Secret, Secret), Error> { + self.session.wait() + } +} + +impl Drop for SigningSessionWrapper { + fn drop(&mut self) { + if let Some(cluster) = self.cluster.upgrade() { + cluster.sessions().signing_sessions.remove(&self.session_id); + } + } +} diff --git a/secret_store/src/key_server_cluster/decryption_session.rs b/secret_store/src/key_server_cluster/decryption_session.rs index 96361ef46..6a806bb92 100644 --- a/secret_store/src/key_server_cluster/decryption_session.rs +++ b/secret_store/src/key_server_cluster/decryption_session.rs @@ -15,22 +15,23 @@ // along with Parity. If not, see . use std::cmp::{Ord, PartialOrd, Ordering}; -use std::collections::{BTreeSet, BTreeMap}; use std::sync::Arc; use parking_lot::{Mutex, Condvar}; -use ethcrypto::ecies::encrypt; -use ethcrypto::DEFAULT_MAC; -use ethkey::{self, Secret, Public, Signature}; -use key_server_cluster::{Error, AclStorage, DocumentKeyShare, NodeId, SessionId, DocumentEncryptedKeyShadow}; +use ethkey::{Secret, Signature}; +use key_server_cluster::{Error, AclStorage, DocumentKeyShare, NodeId, SessionId, EncryptedDocumentKeyShadow, SessionMeta}; use key_server_cluster::cluster::Cluster; -use key_server_cluster::math; -use key_server_cluster::message::{Message, DecryptionMessage, InitializeDecryptionSession, ConfirmDecryptionInitialization, - RequestPartialDecryption, PartialDecryption, DecryptionSessionError, DecryptionSessionCompleted}; +use key_server_cluster::cluster_sessions::ClusterSession; +use key_server_cluster::message::{Message, DecryptionMessage, DecryptionConsensusMessage, RequestPartialDecryption, + PartialDecryption, DecryptionSessionError, DecryptionSessionCompleted, ConsensusMessage, InitializeConsensusSession, + ConfirmConsensusInitialization}; +use key_server_cluster::jobs::job_session::JobTransport; +use key_server_cluster::jobs::decryption_job::{PartialDecryptionRequest, PartialDecryptionResponse, DecryptionJob}; +use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; /// Decryption session API. pub trait Session: Send + Sync + 'static { /// Wait until session is completed. Returns distributely restored secret key. - fn wait(&self) -> Result; + fn wait(&self) -> Result; } /// Distributed decryption session. @@ -42,24 +43,39 @@ pub trait Session: Send + Sync + 'static { /// 3) partial decryption: every node which has succussfully checked access for the requestor do a partial decryption /// 4) decryption: master node receives all partial decryptions of the secret and restores the secret pub struct SessionImpl { - /// Encryption session id. - id: SessionId, - /// Decryption session access key. - access_key: Secret, - /// Public identifier of this node. - self_node_id: NodeId, - /// Encrypted data. - encrypted_data: DocumentKeyShare, - /// ACL storate to check access to the resource. - acl_storage: Arc, - /// Cluster which allows this node to send messages to other nodes in the cluster. - cluster: Arc, - /// SessionImpl completion condvar. - completed: Condvar, - /// Mutable session data. + /// Session core. + core: SessionCore, + /// Session data. data: Mutex, } +/// Immutable session data. +struct SessionCore { + /// Session metadata. + pub meta: SessionMeta, + /// Decryption session access key. + pub access_key: Secret, + /// Key share. + pub key_share: DocumentKeyShare, + /// Cluster which allows this node to send messages to other nodes in the cluster. + pub cluster: Arc, + /// SessionImpl completion condvar. + pub completed: Condvar, +} + +/// Decryption consensus session type. +type DecryptionConsensusSession = ConsensusSession; + +/// Mutable session data. +struct SessionData { + /// Consensus-based decryption session. + pub consensus_session: DecryptionConsensusSession, + /// Is shadow decryption requested? + pub is_shadow_decryption: Option, + /// Decryption result. + pub result: Option>, +} + /// Decryption session Id. #[derive(Debug, Clone, PartialEq, Eq)] pub struct DecryptionSessionId { @@ -71,546 +87,357 @@ pub struct DecryptionSessionId { /// SessionImpl creation parameters pub struct SessionParams { - /// SessionImpl identifier. - pub id: SessionId, - /// SessionImpl access key. + /// Session metadata. + pub meta: SessionMeta, + /// Session access key. pub access_key: Secret, - /// Id of node, on which this session is running. - pub self_node_id: Public, - /// Encrypted data (result of running encryption_session::SessionImpl). - pub encrypted_data: DocumentKeyShare, + /// Key share. + pub key_share: DocumentKeyShare, /// ACL storage. pub acl_storage: Arc, /// Cluster pub cluster: Arc, } -#[derive(Debug)] -/// Partial decryption result. -struct PartialDecryptionResult { - /// Shadow point. - pub shadow_point: Public, - /// Decryption shadow coefficient, if requested. - pub decrypt_shadow: Option>, +/// Decryption consensus transport. +struct DecryptionConsensusTransport { + /// Session id. + id: SessionId, + /// Session access key. + access_key: Secret, + /// Cluster. + cluster: Arc, } -#[derive(Debug)] -/// Mutable data of encryption (distributed key generation) session. -struct SessionData { - /// Current state of the session. - state: SessionState, - - // === Values, filled when session initialization just starts === - /// Reference to the node, which has started this session. - master: Option, - /// Public key of requestor. - requestor: Option, - /// Is shadow decryption requested? - is_shadow_decryption: Option, - - // === Values, filled during session initialization === - /// Nodes, which have been requested for decryption initialization. - requested_nodes: BTreeSet, - /// Nodes, which have responded with reject to initialization request. - rejected_nodes: BTreeSet, - /// Nodes, which have responded with confirm to initialization request. - confirmed_nodes: BTreeSet, - - // === Values, filled during partial decryption === - /// Nodes, which have been asked for partial decryption. - shadow_requests: BTreeSet, - /// Shadow points, received from nodes as a response to partial decryption request. - shadow_points: BTreeMap, - - /// === Values, filled during final decryption === - /// Decrypted secret - decrypted_secret: Option>, -} - -#[derive(Debug, Clone, PartialEq)] -/// Decryption session data. -pub enum SessionState { - /// Every node starts in this state. - WaitingForInitialization, - /// Master node waits for other nodes to confirm decryption. - WaitingForInitializationConfirm, - /// Waiting for partial decrypion request. - WaitingForPartialDecryptionRequest, - /// Waiting for partial decryption responses. - WaitingForPartialDecryption, - /// Decryption session is finished for this node. - Finished, - /// Decryption session is failed for this node. - Failed, +/// Decryption job transport +struct DecryptionJobTransport { + /// Session id. + id: SessionId, + //// Session access key. + access_key: Secret, + /// Cluster. + cluster: Arc, } impl SessionImpl { /// Create new decryption session. - pub fn new(params: SessionParams) -> Result { - check_encrypted_data(¶ms.self_node_id, ¶ms.encrypted_data)?; + pub fn new(params: SessionParams, requester_signature: Option) -> Result { + debug_assert_eq!(params.meta.threshold, params.key_share.threshold); + debug_assert_eq!(params.meta.self_node_id == params.meta.master_node_id, requester_signature.is_some()); + + use key_server_cluster::generation_session::{check_cluster_nodes, check_threshold}; + + // check that common_point and encrypted_point are already set + if params.key_share.common_point.is_none() || params.key_share.encrypted_point.is_none() { + return Err(Error::NotStartedSessionId); + } + + // check nodes and threshold + let nodes = params.key_share.id_numbers.keys().cloned().collect(); + check_cluster_nodes(¶ms.meta.self_node_id, &nodes)?; + check_threshold(params.key_share.threshold, &nodes)?; + + let consensus_transport = DecryptionConsensusTransport { + id: params.meta.id.clone(), + access_key: params.access_key.clone(), + cluster: params.cluster.clone(), + }; Ok(SessionImpl { - id: params.id, - access_key: params.access_key, - self_node_id: params.self_node_id, - encrypted_data: params.encrypted_data, - acl_storage: params.acl_storage, - cluster: params.cluster, - completed: Condvar::new(), + core: SessionCore { + meta: params.meta.clone(), + access_key: params.access_key, + key_share: params.key_share, + cluster: params.cluster, + completed: Condvar::new(), + }, data: Mutex::new(SessionData { - state: SessionState::WaitingForInitialization, - master: None, - requestor: None, + consensus_session: match requester_signature { + Some(requester_signature) => ConsensusSession::new_on_master(ConsensusSessionParams { + meta: params.meta, + acl_storage: params.acl_storage.clone(), + consensus_transport: consensus_transport, + }, requester_signature)?, + None => ConsensusSession::new_on_slave(ConsensusSessionParams { + meta: params.meta, + acl_storage: params.acl_storage.clone(), + consensus_transport: consensus_transport, + })?, + }, is_shadow_decryption: None, - requested_nodes: BTreeSet::new(), - rejected_nodes: BTreeSet::new(), - confirmed_nodes: BTreeSet::new(), - shadow_requests: BTreeSet::new(), - shadow_points: BTreeMap::new(), - decrypted_secret: None, - }) + result: None, + }), }) } - /// Get this node Id. + #[cfg(test)] + /// Get this node id. pub fn node(&self) -> &NodeId { - &self.self_node_id - } - - /// Get current session state. - pub fn state(&self) -> SessionState { - self.data.lock().state.clone() + &self.core.meta.self_node_id } #[cfg(test)] /// Get this session access key. pub fn access_key(&self) -> &Secret { - &self.access_key + &self.core.access_key + } + + #[cfg(test)] + /// Get session state. + pub fn state(&self) -> ConsensusSessionState { + self.data.lock().consensus_session.state() } #[cfg(test)] /// Get decrypted secret - pub fn decrypted_secret(&self) -> Option> { - self.data.lock().decrypted_secret.clone() + pub fn decrypted_secret(&self) -> Option> { + self.data.lock().result.clone() } - /// Initialize decryption session. - pub fn initialize(&self, requestor_signature: Signature, is_shadow_decryption: bool) -> Result<(), Error> { + /// Initialize decryption session on master node. + pub fn initialize(&self, is_shadow_decryption: bool) -> Result<(), Error> { let mut data = self.data.lock(); - - // check state - if data.state != SessionState::WaitingForInitialization { - return Err(Error::InvalidStateForRequest); - } - - // recover requestor signature - let requestor_public = ethkey::recover(&requestor_signature, &self.id)?; - - // update state - data.master = Some(self.node().clone()); - data.state = SessionState::WaitingForInitializationConfirm; - data.requestor = Some(requestor_public.clone()); data.is_shadow_decryption = Some(is_shadow_decryption); - data.requested_nodes.extend(self.encrypted_data.id_numbers.keys().cloned()); + data.consensus_session.initialize(self.core.key_share.id_numbers.keys().cloned().collect())?; - // ..and finally check access on our's own - let is_requestor_allowed_to_read = self.acl_storage.check(&requestor_public, &self.id).unwrap_or(false); - process_initialization_response(&self.encrypted_data, &mut *data, self.node(), is_requestor_allowed_to_read)?; + if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished { + self.core.disseminate_jobs(&mut data.consensus_session, is_shadow_decryption)?; - // check if we have enough nodes to decrypt data - match data.state { - // not enough nodes => pass initialization message to all other nodes - SessionState::WaitingForInitializationConfirm => { - for node in self.encrypted_data.id_numbers.keys().filter(|n| *n != self.node()) { - self.cluster.send(node, Message::Decryption(DecryptionMessage::InitializeDecryptionSession(InitializeDecryptionSession { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - requestor_signature: requestor_signature.clone().into(), - is_shadow_decryption: is_shadow_decryption, - })))?; - } - }, - // we can decrypt data on our own - SessionState::WaitingForPartialDecryption => { - data.confirmed_nodes.insert(self.node().clone()); - SessionImpl::start_waiting_for_partial_decryption(self.node().clone(), self.id.clone(), self.access_key.clone(), &self.cluster, &self.encrypted_data, &mut *data)?; - SessionImpl::do_decryption(self.access_key.clone(), &self.encrypted_data, &mut *data)?; - self.completed.notify_all(); - }, - // we can not decrypt data - SessionState::Failed => self.completed.notify_all(), - // cannot reach other states - _ => unreachable!("process_initialization_response can change state to WaitingForPartialDecryption or Failed; checked that we are in WaitingForInitializationConfirm state above; qed"), + debug_assert!(data.consensus_session.state() == ConsensusSessionState::Finished); + data.result = Some(Ok(data.consensus_session.result()?)); + self.core.completed.notify_all(); } Ok(()) } - /// When session initialization message is received. - pub fn on_initialize_session(&self, sender: NodeId, message: &InitializeDecryptionSession) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(self.access_key == *message.sub_session); - debug_assert!(&sender != self.node()); - - let mut data = self.data.lock(); - - // check state - if data.state != SessionState::WaitingForInitialization { - return Err(Error::InvalidStateForRequest); + /// Process decryption message. + pub fn process_message(&self, sender: &NodeId, message: &DecryptionMessage) -> Result<(), Error> { + match message { + &DecryptionMessage::DecryptionConsensusMessage(ref message) => + self.on_consensus_message(sender, message), + &DecryptionMessage::RequestPartialDecryption(ref message) => + self.on_partial_decryption_requested(sender, message), + &DecryptionMessage::PartialDecryption(ref message) => + self.on_partial_decryption(sender, message), + &DecryptionMessage::DecryptionSessionError(ref message) => + self.on_session_error(sender, message), + &DecryptionMessage::DecryptionSessionCompleted(ref message) => + self.on_session_completed(sender, message), } - - // recover requestor signature - let requestor_public = ethkey::recover(&message.requestor_signature, &self.id)?; - - // check access - let is_requestor_allowed_to_read = self.acl_storage.check(&requestor_public, &self.id).unwrap_or(false); - data.state = if is_requestor_allowed_to_read { SessionState::WaitingForPartialDecryptionRequest } - else { SessionState::Failed }; - data.requestor = Some(requestor_public); - data.is_shadow_decryption = Some(message.is_shadow_decryption); - - // respond to master node - data.master = Some(sender.clone()); - self.cluster.send(&sender, Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(ConfirmDecryptionInitialization { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - is_confirmed: is_requestor_allowed_to_read, - }))) } - /// When session initialization confirmation message is reeived. - pub fn on_confirm_initialization(&self, sender: NodeId, message: &ConfirmDecryptionInitialization) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(self.access_key == *message.sub_session); - debug_assert!(&sender != self.node()); + /// When consensus-related message is received. + pub fn on_consensus_message(&self, sender: &NodeId, message: &DecryptionConsensusMessage) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); let mut data = self.data.lock(); + let is_establishing_consensus = data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus; + data.consensus_session.on_consensus_message(&sender, &message.message)?; - // check state - if data.state == SessionState::WaitingForPartialDecryption { - // if there were enough confirmations/rejections before this message - // we have already moved to the next state - if !data.requested_nodes.remove(&sender) { - return Err(Error::InvalidMessage); - } - - data.confirmed_nodes.insert(sender); - return Ok(()); - } - if data.state != SessionState::WaitingForInitializationConfirm { + let is_consensus_established = data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished; + if self.core.meta.self_node_id != self.core.meta.master_node_id || !is_establishing_consensus || !is_consensus_established { return Ok(()); } - // update state - process_initialization_response(&self.encrypted_data, &mut *data, &sender, message.is_confirmed)?; - - // check if we have enough nodes to decrypt data - match data.state { - // we do not yet have enough nodes for decryption - SessionState::WaitingForInitializationConfirm => Ok(()), - // we have enough nodes for decryption - SessionState::WaitingForPartialDecryption => - SessionImpl::start_waiting_for_partial_decryption(self.node().clone(), self.id.clone(), self.access_key.clone(), &self.cluster, &self.encrypted_data, &mut *data), - // we can not have enough nodes for decryption - SessionState::Failed => { - self.completed.notify_all(); - Ok(()) - }, - // cannot reach other states - _ => unreachable!("process_initialization_response can change state to WaitingForPartialDecryption or Failed; checked that we are in WaitingForInitializationConfirm state above; qed"), - } + let is_shadow_decryption = data.is_shadow_decryption + .expect("we are on master node; on master node is_shadow_decryption is filled in initialize(); on_consensus_message follows initialize (state check in consensus_session); qed"); + self.core.disseminate_jobs(&mut data.consensus_session, is_shadow_decryption) } /// When partial decryption is requested. - pub fn on_partial_decryption_requested(&self, sender: NodeId, message: &RequestPartialDecryption) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(self.access_key == *message.sub_session); - debug_assert!(&sender != self.node()); + pub fn on_partial_decryption_requested(&self, sender: &NodeId, message: &RequestPartialDecryption) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); - // check message - if message.nodes.len() != self.encrypted_data.threshold + 1 { - return Err(Error::InvalidMessage); - } + let mut data = self.data.lock(); + let requester = data.consensus_session.requester()?.clone(); + let decryption_job = DecryptionJob::new_on_slave(self.core.meta.self_node_id.clone(), self.core.access_key.clone(), requester, self.core.key_share.clone())?; + let decryption_transport = self.core.decryption_transport(); - let data = self.data.lock(); - - // check state - if data.master != Some(sender) { - return Err(Error::InvalidMessage); - } - if data.state != SessionState::WaitingForPartialDecryptionRequest { - return Err(Error::InvalidStateForRequest); - } - - // calculate shadow point - let decryption_result = { - let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryptionRequest follows initialization; qed"); - let is_shadow_decryption = data.is_shadow_decryption.expect("is_shadow_decryption is filled during initialization; WaitingForPartialDecryptionRequest follows initialization; qed"); - let nodes = message.nodes.iter().cloned().map(Into::into).collect(); - do_partial_decryption(self.node(), &requestor, is_shadow_decryption, &nodes, &self.access_key, &self.encrypted_data)? - }; - self.cluster.send(&sender, Message::Decryption(DecryptionMessage::PartialDecryption(PartialDecryption { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - shadow_point: decryption_result.shadow_point.into(), - decrypt_shadow: decryption_result.decrypt_shadow, - })))?; - - // master could ask us for another partial decryption in case of restart - // => no state change is required - - Ok(()) + data.consensus_session.on_job_request(&sender, PartialDecryptionRequest { + id: message.request_id.clone().into(), + is_shadow_decryption: message.is_shadow_decryption, + other_nodes_ids: message.nodes.iter().cloned().map(Into::into).collect(), + }, decryption_job, decryption_transport) } /// When partial decryption is received. - pub fn on_partial_decryption(&self, sender: NodeId, message: &PartialDecryption) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(self.access_key == *message.sub_session); - debug_assert!(&sender != self.node()); + pub fn on_partial_decryption(&self, sender: &NodeId, message: &PartialDecryption) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); let mut data = self.data.lock(); - - // check state - if data.state != SessionState::WaitingForPartialDecryption { - return Err(Error::InvalidStateForRequest); - } - - if !data.shadow_requests.remove(&sender) { - return Err(Error::InvalidStateForRequest); - } - data.shadow_points.insert(sender, PartialDecryptionResult { + data.consensus_session.on_job_response(sender, PartialDecryptionResponse { + request_id: message.request_id.clone().into(), shadow_point: message.shadow_point.clone().into(), decrypt_shadow: message.decrypt_shadow.clone(), - }); + })?; - // check if we have enough shadow points to decrypt the secret - if data.shadow_points.len() != self.encrypted_data.threshold + 1 { + if data.consensus_session.state() != ConsensusSessionState::Finished { return Ok(()); } - // notify all other nodes about session completion - self.cluster.broadcast(Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(DecryptionSessionCompleted { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), + self.core.cluster.broadcast(Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(DecryptionSessionCompleted { + session: self.core.meta.id.clone().into(), + sub_session: self.core.access_key.clone().into(), })))?; - // do decryption - SessionImpl::do_decryption(self.access_key.clone(), &self.encrypted_data, &mut *data)?; - self.completed.notify_all(); + data.result = Some(Ok(data.consensus_session.result()?)); + self.core.completed.notify_all(); Ok(()) } /// When session is completed. - pub fn on_session_completed(&self, sender: NodeId, message: &DecryptionSessionCompleted) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(self.access_key == *message.sub_session); - debug_assert!(&sender != self.node()); + pub fn on_session_completed(&self, sender: &NodeId, message: &DecryptionSessionCompleted) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); - let mut data = self.data.lock(); - - // check state - if data.state != SessionState::WaitingForPartialDecryptionRequest { - return Err(Error::InvalidStateForRequest); - } - if data.master != Some(sender) { - return Err(Error::InvalidMessage); - } - - // update state - data.state = SessionState::Finished; - - Ok(()) + self.data.lock().consensus_session.on_session_completed(sender) } /// When error has occured on another node. - pub fn on_session_error(&self, sender: NodeId, message: &DecryptionSessionError) -> Result<(), Error> { - let mut data = self.data.lock(); - - warn!("{}: decryption session failed with error: {:?} from {}", self.node(), message.error, sender); - - data.state = SessionState::Failed; - data.decrypted_secret = Some(Err(Error::Io(message.error.clone()))); - self.completed.notify_all(); - - Ok(()) + pub fn on_session_error(&self, sender: &NodeId, message: &DecryptionSessionError) -> Result<(), Error> { + self.process_node_error(Some(&sender), &message.error) } - /// When connection to one of cluster nodes has timeouted. - pub fn on_node_timeout(&self, node: &NodeId) { + /// Process error from the other node. + fn process_node_error(&self, node: Option<&NodeId>, error: &String) -> Result<(), Error> { let mut data = self.data.lock(); + match { + match node { + Some(node) => data.consensus_session.on_node_error(node), + None => data.consensus_session.on_session_timeout(), + } + } { + Ok(false) => Ok(()), + Ok(true) => { + let is_shadow_decryption = data.is_shadow_decryption.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when is_shadow_decryption.is_some(); qed"); + let disseminate_result = self.core.disseminate_jobs(&mut data.consensus_session, is_shadow_decryption); + match disseminate_result { + Ok(()) => Ok(()), + Err(err) => { + warn!("{}: decryption session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node); - let is_self_master = data.master.as_ref() == Some(self.node()); - let is_other_master = data.master.as_ref() == Some(node); - // if this is master node, we might have to restart - if is_self_master { - match data.state { - SessionState::WaitingForInitializationConfirm => { - // we will never receive confirmation from this node => treat as reject - if data.requested_nodes.remove(node) || data.confirmed_nodes.remove(node) { - data.rejected_nodes.insert(node.clone()); - } - // check if we still have enough nodes for decryption - if self.encrypted_data.id_numbers.len() - data.rejected_nodes.len() >= self.encrypted_data.threshold + 1 { - return; + data.result = Some(Err(err.clone())); + self.core.completed.notify_all(); + Err(err) } } - SessionState::WaitingForPartialDecryption => { - if data.rejected_nodes.contains(node) { - // already rejected => does not affect session - return; - } - if data.requested_nodes.remove(node) { - // we have tried to initialize this node, but it has failed - // => no restart required, just mark as rejected - data.rejected_nodes.insert(node.clone()); - return; - } - if data.confirmed_nodes.contains(node) { - if data.shadow_points.contains_key(node) { - // we have already received partial decryption from this node - // => just ignore this connection drop - return; - } + }, + Err(err) => { + warn!("{}: decryption session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node); - // the worst case: we have sent partial decryption request to other nodes - // => we have to restart the session - data.confirmed_nodes.remove(node); - data.rejected_nodes.insert(node.clone()); - // check if we still have enough nodes for decryption - if self.encrypted_data.id_numbers.len() - data.rejected_nodes.len() >= self.encrypted_data.threshold + 1 { - // we are going to stop session anyway => ignore error - let _ = SessionImpl::start_waiting_for_partial_decryption(self.node().clone(), self.id.clone(), self.access_key.clone(), &self.cluster, &self.encrypted_data, &mut *data); - return; - } - // not enough nodes - } - } - _ => (), // all other states lead to failures - } - } else if !is_other_master { - // disconnected from non-master node on non-master node - // => this does not affect this session - return; + data.result = Some(Err(err.clone())); + self.core.completed.notify_all(); + Err(err) + }, } - // else: disconnecting from master node means failure + } +} - // no more nodes left for decryption => fail - warn!("{}: decryption session failed because {} connection has timeouted", self.node(), node); - - data.state = SessionState::Failed; - data.decrypted_secret = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); +impl ClusterSession for SessionImpl { + fn is_finished(&self) -> bool { + let data = self.data.lock(); + data.consensus_session.state() == ConsensusSessionState::Failed + || data.consensus_session.state() == ConsensusSessionState::Finished } - /// When session timeout has occured. - pub fn on_session_timeout(&self) { - let mut data = self.data.lock(); - - let is_self_master = data.master.as_ref() == Some(self.node()); - // if this is master node, we might have to restart - if is_self_master { - match data.state { - SessionState::WaitingForInitializationConfirm => - // we have sent initialization requests to all nodes, but haven't received confirmation - // => nodes will never respond => fail - (), - SessionState::WaitingForPartialDecryption => { - // we have requested partial decryption, but some nodes have failed to respond - // => mark these nodes as rejected && restart - for timeouted_node in data.shadow_requests.iter().cloned().collect::>() { - data.confirmed_nodes.remove(&timeouted_node); - data.rejected_nodes.insert(timeouted_node); - } - - // check if we still have enough nodes for decryption - if self.encrypted_data.id_numbers.len() - data.rejected_nodes.len() >= self.encrypted_data.threshold + 1 { - // we are going to stop session anyway => ignore error - let _ = SessionImpl::start_waiting_for_partial_decryption(self.node().clone(), self.id.clone(), self.access_key.clone(), &self.cluster, &self.encrypted_data, &mut *data); - return; - } - }, - // no nodes has responded to our requests => session is failed - _ => return, - } - } - - // no more nodes left for decryption => fail - warn!("{}: decryption session failed with timeout", self.node()); - - data.state = SessionState::Failed; - data.decrypted_secret = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); + fn on_node_timeout(&self, node: &NodeId) { + // ignore error, only state matters + let _ = self.process_node_error(Some(node), &Error::NodeDisconnected.into()); } - fn start_waiting_for_partial_decryption(self_node_id: NodeId, session_id: SessionId, access_key: Secret, cluster: &Arc, encrypted_data: &DocumentKeyShare, data: &mut SessionData) -> Result<(), Error> { - let confirmed_nodes: BTreeSet<_> = data.confirmed_nodes.clone(); - let confirmed_nodes: BTreeSet<_> = confirmed_nodes.difference(&data.rejected_nodes).cloned().collect(); - - data.shadow_requests.clear(); - data.shadow_points.clear(); - for node in confirmed_nodes.iter().filter(|n| n != &&self_node_id) { - data.shadow_requests.insert(node.clone()); - cluster.send(node, Message::Decryption(DecryptionMessage::RequestPartialDecryption(RequestPartialDecryption { - session: session_id.clone().into(), - sub_session: access_key.clone().into(), - nodes: confirmed_nodes.iter().cloned().map(Into::into).collect(), - })))?; - } - - if data.confirmed_nodes.remove(&self_node_id) { - let decryption_result = { - let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryption follows initialization; qed"); - let is_shadow_decryption = data.is_shadow_decryption.expect("is_shadow_decryption is filled during initialization; WaitingForPartialDecryption follows initialization; qed"); - do_partial_decryption(&self_node_id, &requestor, is_shadow_decryption, &data.confirmed_nodes, &access_key, &encrypted_data)? - }; - data.shadow_points.insert(self_node_id.clone(), decryption_result); - } - - Ok(()) - } - - fn do_decryption(access_key: Secret, encrypted_data: &DocumentKeyShare, data: &mut SessionData) -> Result<(), Error> { - // decrypt the secret using shadow points - let joint_shadow_point = math::compute_joint_shadow_point(data.shadow_points.values().map(|s| &s.shadow_point))?; - let decrypted_secret = math::decrypt_with_joint_shadow(encrypted_data.threshold, &access_key, &encrypted_data.encrypted_point, &joint_shadow_point)?; - let is_shadow_decryption = data.is_shadow_decryption.expect("is_shadow_decryption is filled during initialization; decryption follows initialization; qed"); - let (common_point, decrypt_shadows) = if is_shadow_decryption { - ( - Some(math::make_common_shadow_point(encrypted_data.threshold, encrypted_data.common_point.clone())?), - Some(data.shadow_points.values() - .map(|s| s.decrypt_shadow.as_ref().expect("decrypt_shadow is filled during partial decryption; decryption follows partial decryption; qed").clone()) - .collect()) - ) - } else { - (None, None) - }; - data.decrypted_secret = Some(Ok(DocumentEncryptedKeyShadow { - decrypted_secret: decrypted_secret, - common_point: common_point, - decrypt_shadows: decrypt_shadows, - })); - - // switch to completed state - data.state = SessionState::Finished; - - Ok(()) + fn on_session_timeout(&self) { + // ignore error, only state matters + let _ = self.process_node_error(None, &Error::NodeDisconnected.into()); } } impl Session for SessionImpl { - fn wait(&self) -> Result { + fn wait(&self) -> Result { let mut data = self.data.lock(); - if !data.decrypted_secret.is_some() { - self.completed.wait(&mut data); + if !data.result.is_some() { + self.core.completed.wait(&mut data); } - data.decrypted_secret.as_ref() - .expect("checked above or waited for completed; completed is only signaled when decrypted_secret.is_some(); qed") + data.result.as_ref() + .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") .clone() } } +impl SessionCore { + pub fn decryption_transport(&self) -> DecryptionJobTransport { + DecryptionJobTransport { + id: self.meta.id.clone(), + access_key: self.access_key.clone(), + cluster: self.cluster.clone() + } + } + + pub fn disseminate_jobs(&self, consensus_session: &mut DecryptionConsensusSession, is_shadow_decryption: bool) -> Result<(), Error> { + let requester = consensus_session.requester()?.clone(); + let decryption_job = DecryptionJob::new_on_master(self.meta.self_node_id.clone(), self.access_key.clone(), requester, self.key_share.clone(), is_shadow_decryption)?; + consensus_session.disseminate_jobs(decryption_job, self.decryption_transport()) + } +} + +impl JobTransport for DecryptionConsensusTransport { + type PartialJobRequest=Signature; + type PartialJobResponse=bool; + + fn send_partial_request(&self, node: &NodeId, request: Signature) -> Result<(), Error> { + self.cluster.send(node, Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(DecryptionConsensusMessage { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { + requestor_signature: request.into(), + }) + }))) + } + + fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { + self.cluster.send(node, Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(DecryptionConsensusMessage { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: response, + }) + }))) + } +} + +impl JobTransport for DecryptionJobTransport { + type PartialJobRequest=PartialDecryptionRequest; + type PartialJobResponse=PartialDecryptionResponse; + + fn send_partial_request(&self, node: &NodeId, request: PartialDecryptionRequest) -> Result<(), Error> { + self.cluster.send(node, Message::Decryption(DecryptionMessage::RequestPartialDecryption(RequestPartialDecryption { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + request_id: request.id.into(), + is_shadow_decryption: request.is_shadow_decryption, + nodes: request.other_nodes_ids.into_iter().map(Into::into).collect(), + }))) + } + + fn send_partial_response(&self, node: &NodeId, response: PartialDecryptionResponse) -> Result<(), Error> { + self.cluster.send(node, Message::Decryption(DecryptionMessage::PartialDecryption(PartialDecryption { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + request_id: response.request_id.into(), + shadow_point: response.shadow_point.into(), + decrypt_shadow: response.decrypt_shadow, + }))) + } +} + impl DecryptionSessionId { /// Create new decryption session Id. pub fn new(session_id: SessionId, sub_session_id: Secret) -> Self { @@ -627,7 +454,6 @@ impl PartialOrd for DecryptionSessionId { } } - impl Ord for DecryptionSessionId { fn cmp(&self, other: &Self) -> Ordering { match self.id.cmp(&other.id) { @@ -637,78 +463,23 @@ impl Ord for DecryptionSessionId { } } - -fn check_encrypted_data(self_node_id: &Public, encrypted_data: &DocumentKeyShare) -> Result<(), Error> { - use key_server_cluster::encryption_session::{check_cluster_nodes, check_threshold}; - - let nodes = encrypted_data.id_numbers.keys().cloned().collect(); - check_cluster_nodes(self_node_id, &nodes)?; - check_threshold(encrypted_data.threshold, &nodes)?; - - Ok(()) -} - -fn process_initialization_response(encrypted_data: &DocumentKeyShare, data: &mut SessionData, node: &NodeId, check_result: bool) -> Result<(), Error> { - if !data.requested_nodes.remove(node) { - return Err(Error::InvalidMessage); - } - - match check_result { - true => { - data.confirmed_nodes.insert(node.clone()); - - // check if we have enough nodes to do a decryption? - if data.confirmed_nodes.len() == encrypted_data.threshold + 1 { - data.state = SessionState::WaitingForPartialDecryption; - } - }, - false => { - data.rejected_nodes.insert(node.clone()); - - // check if we still can receive enough confirmations to do a decryption? - if encrypted_data.id_numbers.len() - data.rejected_nodes.len() < encrypted_data.threshold + 1 { - data.decrypted_secret = Some(Err(Error::AccessDenied)); - data.state = SessionState::Failed; - } - }, - } - - Ok(()) -} - -fn do_partial_decryption(node: &NodeId, requestor_public: &Public, is_shadow_decryption: bool, participants: &BTreeSet, access_key: &Secret, encrypted_data: &DocumentKeyShare) -> Result { - let node_id_number = &encrypted_data.id_numbers[node]; - let node_secret_share = &encrypted_data.secret_share; - let other_id_numbers = participants.iter() - .filter(|id| *id != node) - .map(|id| &encrypted_data.id_numbers[id]); - let node_shadow = math::compute_node_shadow(node_id_number, node_secret_share, other_id_numbers)?; - let decrypt_shadow = if is_shadow_decryption { Some(math::generate_random_scalar()?) } else { None }; - let (shadow_point, decrypt_shadow) = math::compute_node_shadow_point(access_key, &encrypted_data.common_point, &node_shadow, decrypt_shadow)?; - Ok(PartialDecryptionResult { - shadow_point: shadow_point, - decrypt_shadow: match decrypt_shadow { - None => None, - Some(decrypt_shadow) => Some(encrypt(requestor_public, &DEFAULT_MAC, &**decrypt_shadow)?), - }, - }) -} - #[cfg(test)] mod tests { use std::sync::Arc; use std::collections::BTreeMap; use super::super::super::acl_storage::tests::DummyAclStorage; - use ethkey::{self, Random, Generator, Public, Secret}; - use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, Error, DocumentEncryptedKeyShadow}; + use ethkey::{self, KeyPair, Random, Generator, Public, Secret}; + use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, Error, EncryptedDocumentKeyShadow, SessionMeta}; use key_server_cluster::cluster::tests::DummyCluster; - use key_server_cluster::decryption_session::{SessionImpl, SessionParams, SessionState}; + use key_server_cluster::cluster_sessions::ClusterSession; + use key_server_cluster::decryption_session::{SessionImpl, SessionParams}; use key_server_cluster::message::{self, Message, DecryptionMessage}; use key_server_cluster::math; + use key_server_cluster::jobs::consensus_session::ConsensusSessionState; const SECRET_PLAIN: &'static str = "d2b57ae7619e070af0af6bc8c703c0cd27814c54d5d6a999cacac0da34ede279ca0d9216e85991029e54e2f0c92ee0bd30237725fa765cbdbfc4529489864c5f"; - fn prepare_decryption_sessions() -> (Vec>, Vec>, Vec) { + fn prepare_decryption_sessions() -> (KeyPair, Vec>, Vec>, Vec) { // prepare encrypted data + cluster configuration for scheme 4-of-5 let session_id = SessionId::default(); let access_key = Random.generate().unwrap().secret().clone(); @@ -734,11 +505,12 @@ mod tests { let common_point: Public = "6962be696e1bcbba8e64cc7fddf140f854835354b5804f3bb95ae5a2799130371b589a131bd39699ac7174ccb35fc4342dab05331202209582fc8f3a40916ab0".into(); let encrypted_point: Public = "b07031982bde9890e12eff154765f03c56c3ab646ad47431db5dd2d742a9297679c4c65b998557f8008469afd0c43d40b6c5f6c6a1c7354875da4115237ed87a".into(); let encrypted_datas: Vec<_> = (0..5).map(|i| DocumentKeyShare { + author: Public::default(), threshold: 3, id_numbers: id_numbers.clone().into_iter().collect(), secret_share: secret_shares[i].clone(), - common_point: common_point.clone(), - encrypted_point: encrypted_point.clone(), + common_point: Some(common_point.clone()), + encrypted_point: Some(encrypted_point.clone()), }).collect(); let acl_storages: Vec<_> = (0..5).map(|_| Arc::new(DummyAclStorage::default())).collect(); let clusters: Vec<_> = (0..5).map(|i| { @@ -748,23 +520,29 @@ mod tests { } cluster }).collect(); + let requester = Random.generate().unwrap(); + let signature = Some(ethkey::sign(requester.secret(), &SessionId::default()).unwrap()); let sessions: Vec<_> = (0..5).map(|i| SessionImpl::new(SessionParams { - id: session_id.clone(), + meta: SessionMeta { + id: session_id.clone(), + self_node_id: id_numbers.iter().nth(i).clone().unwrap().0, + master_node_id: id_numbers.iter().nth(0).clone().unwrap().0, + threshold: encrypted_datas[i].threshold, + }, access_key: access_key.clone(), - self_node_id: id_numbers.iter().nth(i).clone().unwrap().0, - encrypted_data: encrypted_datas[i].clone(), + key_share: encrypted_datas[i].clone(), acl_storage: acl_storages[i].clone(), cluster: clusters[i].clone() - }).unwrap()).collect(); + }, if i == 0 { signature.clone() } else { None }).unwrap()).collect(); - (clusters, acl_storages, sessions) + (requester, clusters, acl_storages, sessions) } - fn do_messages_exchange(clusters: &[Arc], sessions: &[SessionImpl]) { - do_messages_exchange_until(clusters, sessions, |_, _, _| false); + fn do_messages_exchange(clusters: &[Arc], sessions: &[SessionImpl]) -> Result<(), Error> { + do_messages_exchange_until(clusters, sessions, |_, _, _| false) } - fn do_messages_exchange_until(clusters: &[Arc], sessions: &[SessionImpl], mut cond: F) where F: FnMut(&NodeId, &NodeId, &Message) -> bool { + fn do_messages_exchange_until(clusters: &[Arc], sessions: &[SessionImpl], mut cond: F) -> Result<(), Error> where F: FnMut(&NodeId, &NodeId, &Message) -> bool { while let Some((from, to, message)) = clusters.iter().filter_map(|c| c.take_message().map(|(to, msg)| (c.node(), to, msg))).next() { let session = &sessions[sessions.iter().position(|s| s.node() == &to).unwrap()]; if cond(&from, &to, &message) { @@ -772,14 +550,12 @@ mod tests { } match message { - Message::Decryption(DecryptionMessage::InitializeDecryptionSession(message)) => session.on_initialize_session(from, &message).unwrap(), - Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(message)) => session.on_confirm_initialization(from, &message).unwrap(), - Message::Decryption(DecryptionMessage::RequestPartialDecryption(message)) => session.on_partial_decryption_requested(from, &message).unwrap(), - Message::Decryption(DecryptionMessage::PartialDecryption(message)) => session.on_partial_decryption(from, &message).unwrap(), - Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(message)) => session.on_session_completed(from, &message).unwrap(), - _ => panic!("unexpected"), + Message::Decryption(message) => session.process_message(&from, &message)?, + _ => unreachable!(), } } + + Ok(()) } #[test] @@ -788,19 +564,24 @@ mod tests { let self_node_id = Random.generate().unwrap().public().clone(); nodes.insert(self_node_id, Random.generate().unwrap().secret().clone()); match SessionImpl::new(SessionParams { - id: SessionId::default(), + meta: SessionMeta { + id: SessionId::default(), + self_node_id: self_node_id.clone(), + master_node_id: self_node_id.clone(), + threshold: 0, + }, access_key: Random.generate().unwrap().secret().clone(), - self_node_id: self_node_id.clone(), - encrypted_data: DocumentKeyShare { + key_share: DocumentKeyShare { + author: Public::default(), threshold: 0, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), - common_point: Random.generate().unwrap().public().clone(), - encrypted_point: Random.generate().unwrap().public().clone(), + common_point: Some(Random.generate().unwrap().public().clone()), + encrypted_point: Some(Random.generate().unwrap().public().clone()), }, acl_storage: Arc::new(DummyAclStorage::default()), cluster: Arc::new(DummyCluster::new(self_node_id.clone())), - }) { + }, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) { Ok(_) => (), _ => panic!("unexpected"), } @@ -813,19 +594,24 @@ mod tests { nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); match SessionImpl::new(SessionParams { - id: SessionId::default(), + meta: SessionMeta { + id: SessionId::default(), + self_node_id: self_node_id.clone(), + master_node_id: self_node_id.clone(), + threshold: 0, + }, access_key: Random.generate().unwrap().secret().clone(), - self_node_id: self_node_id.clone(), - encrypted_data: DocumentKeyShare { + key_share: DocumentKeyShare { + author: Public::default(), threshold: 0, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), - common_point: Random.generate().unwrap().public().clone(), - encrypted_point: Random.generate().unwrap().public().clone(), + common_point: Some(Random.generate().unwrap().public().clone()), + encrypted_point: Some(Random.generate().unwrap().public().clone()), }, acl_storage: Arc::new(DummyAclStorage::default()), cluster: Arc::new(DummyCluster::new(self_node_id.clone())), - }) { + }, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) { Err(Error::InvalidNodesConfiguration) => (), _ => panic!("unexpected"), } @@ -838,19 +624,24 @@ mod tests { nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone()); nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); match SessionImpl::new(SessionParams { - id: SessionId::default(), + meta: SessionMeta { + id: SessionId::default(), + self_node_id: self_node_id.clone(), + master_node_id: self_node_id.clone(), + threshold: 2, + }, access_key: Random.generate().unwrap().secret().clone(), - self_node_id: self_node_id.clone(), - encrypted_data: DocumentKeyShare { + key_share: DocumentKeyShare { + author: Public::default(), threshold: 2, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), - common_point: Random.generate().unwrap().public().clone(), - encrypted_point: Random.generate().unwrap().public().clone(), + common_point: Some(Random.generate().unwrap().public().clone()), + encrypted_point: Some(Random.generate().unwrap().public().clone()), }, acl_storage: Arc::new(DummyAclStorage::default()), cluster: Arc::new(DummyCluster::new(self_node_id.clone())), - }) { + }, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) { Err(Error::InvalidThreshold) => (), _ => panic!("unexpected"), } @@ -858,61 +649,69 @@ mod tests { #[test] fn fails_to_initialize_when_already_initialized() { - let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), false).unwrap(), ()); - assert_eq!(sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), false).unwrap_err(), Error::InvalidStateForRequest); + let (_, _, _, sessions) = prepare_decryption_sessions(); + assert_eq!(sessions[0].initialize(false).unwrap(), ()); + assert_eq!(sessions[0].initialize(false).unwrap_err(), Error::InvalidStateForRequest); } #[test] fn fails_to_accept_initialization_when_already_initialized() { - let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), false).unwrap(), ()); - assert_eq!(sessions[0].on_initialize_session(sessions[1].node().clone(), &message::InitializeDecryptionSession { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), - is_shadow_decryption: false, - }).unwrap_err(), Error::InvalidStateForRequest); + let (_, _, _, sessions) = prepare_decryption_sessions(); + assert_eq!(sessions[0].initialize(false).unwrap(), ()); + assert_eq!(sessions[0].on_consensus_message(sessions[1].node(), &message::DecryptionConsensusMessage { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession { + requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), + }), + }).unwrap_err(), Error::InvalidMessage); } #[test] fn fails_to_partial_decrypt_if_requested_by_slave() { - let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), &message::InitializeDecryptionSession { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), - is_shadow_decryption: false, + let (_, _, _, sessions) = prepare_decryption_sessions(); + assert_eq!(sessions[1].on_consensus_message(sessions[0].node(), &message::DecryptionConsensusMessage { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession { + requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), + }), }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node().clone(), &message::RequestPartialDecryption { + assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node(), &message::RequestPartialDecryption { session: SessionId::default().into(), sub_session: sessions[0].access_key().clone().into(), + request_id: Random.generate().unwrap().secret().clone().into(), + is_shadow_decryption: false, nodes: sessions.iter().map(|s| s.node().clone().into()).take(4).collect(), }).unwrap_err(), Error::InvalidMessage); } #[test] fn fails_to_partial_decrypt_if_wrong_number_of_nodes_participating() { - let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), &message::InitializeDecryptionSession { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), - is_shadow_decryption: false, + let (_, _, _, sessions) = prepare_decryption_sessions(); + assert_eq!(sessions[1].on_consensus_message(sessions[0].node(), &message::DecryptionConsensusMessage { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession { + requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), + }), }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), &message::RequestPartialDecryption { + assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node(), &message::RequestPartialDecryption { session: SessionId::default().into(), sub_session: sessions[0].access_key().clone().into(), + request_id: Random.generate().unwrap().secret().clone().into(), + is_shadow_decryption: false, nodes: sessions.iter().map(|s| s.node().clone().into()).take(2).collect(), }).unwrap_err(), Error::InvalidMessage); } #[test] fn fails_to_accept_partial_decrypt_if_not_waiting() { - let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[0].on_partial_decryption(sessions[1].node().clone(), &message::PartialDecryption { + let (_, _, _, sessions) = prepare_decryption_sessions(); + assert_eq!(sessions[0].on_partial_decryption(sessions[1].node(), &message::PartialDecryption { session: SessionId::default().into(), sub_session: sessions[0].access_key().clone().into(), + request_id: Random.generate().unwrap().secret().clone().into(), shadow_point: Random.generate().unwrap().public().clone().into(), decrypt_shadow: None, }).unwrap_err(), Error::InvalidStateForRequest); @@ -920,8 +719,8 @@ mod tests { #[test] fn fails_to_accept_partial_decrypt_twice() { - let (clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), false).unwrap(); + let (_, clusters, _, sessions) = prepare_decryption_sessions(); + sessions[0].initialize(false).unwrap(); let mut pd_from = None; let mut pd_msg = None; @@ -932,130 +731,127 @@ mod tests { true }, _ => false, - }); + }).unwrap(); - assert_eq!(sessions[0].on_partial_decryption(pd_from.clone().unwrap(), &pd_msg.clone().unwrap()).unwrap(), ()); - assert_eq!(sessions[0].on_partial_decryption(pd_from.unwrap(), &pd_msg.unwrap()).unwrap_err(), Error::InvalidStateForRequest); + assert_eq!(sessions[0].on_partial_decryption(pd_from.as_ref().unwrap(), &pd_msg.clone().unwrap()).unwrap(), ()); + assert_eq!(sessions[0].on_partial_decryption(pd_from.as_ref().unwrap(), &pd_msg.unwrap()).unwrap_err(), Error::InvalidNodeForRequest); } #[test] fn decryption_fails_on_session_timeout() { - let (_, _, sessions) = prepare_decryption_sessions(); + let (_, _, _, sessions) = prepare_decryption_sessions(); assert!(sessions[0].decrypted_secret().is_none()); sessions[0].on_session_timeout(); - assert!(sessions[0].decrypted_secret().unwrap().unwrap_err() == Error::NodeDisconnected); + assert_eq!(sessions[0].decrypted_secret().unwrap().unwrap_err(), Error::ConsensusUnreachable); } #[test] fn node_is_marked_rejected_when_timed_out_during_initialization_confirmation() { - let (_, _, sessions) = prepare_decryption_sessions(); - sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), false).unwrap(); + let (_, _, _, sessions) = prepare_decryption_sessions(); + sessions[0].initialize(false).unwrap(); // 1 node disconnects => we still can recover secret sessions[0].on_node_timeout(sessions[1].node()); - assert!(sessions[0].data.lock().rejected_nodes.contains(sessions[1].node())); - assert!(sessions[0].data.lock().state == SessionState::WaitingForInitializationConfirm); + assert!(sessions[0].data.lock().consensus_session.consensus_job().rejects().contains(sessions[1].node())); + assert!(sessions[0].state() == ConsensusSessionState::EstablishingConsensus); // 2 node are disconnected => we can not recover secret sessions[0].on_node_timeout(sessions[2].node()); - assert!(sessions[0].data.lock().rejected_nodes.contains(sessions[2].node())); - assert!(sessions[0].data.lock().state == SessionState::Failed); + assert!(sessions[0].state() == ConsensusSessionState::Failed); } #[test] fn session_does_not_fail_if_rejected_node_disconnects() { - let (clusters, acl_storages, sessions) = prepare_decryption_sessions(); + let (_, clusters, acl_storages, sessions) = prepare_decryption_sessions(); let key_pair = Random.generate().unwrap(); acl_storages[1].prohibit(key_pair.public().clone(), SessionId::default()); - sessions[0].initialize(ethkey::sign(key_pair.secret(), &SessionId::default()).unwrap(), false).unwrap(); + sessions[0].initialize(false).unwrap(); - do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == SessionState::WaitingForPartialDecryption); + do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap(); // 1st node disconnects => ignore this sessions[0].on_node_timeout(sessions[1].node()); - assert!(sessions[0].data.lock().state == SessionState::WaitingForPartialDecryption); + assert_eq!(sessions[0].state(), ConsensusSessionState::EstablishingConsensus); } #[test] fn session_does_not_fail_if_requested_node_disconnects() { - let (clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), false).unwrap(); + let (_, clusters, _, sessions) = prepare_decryption_sessions(); + sessions[0].initialize(false).unwrap(); - do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == SessionState::WaitingForPartialDecryption); + do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap(); // 1 node disconnects => we still can recover secret sessions[0].on_node_timeout(sessions[1].node()); - assert!(sessions[0].data.lock().state == SessionState::WaitingForPartialDecryption); + assert!(sessions[0].state() == ConsensusSessionState::EstablishingConsensus); // 2 node are disconnected => we can not recover secret sessions[0].on_node_timeout(sessions[2].node()); - assert!(sessions[0].data.lock().state == SessionState::Failed); + assert!(sessions[0].state() == ConsensusSessionState::Failed); } #[test] fn session_does_not_fail_if_node_with_shadow_point_disconnects() { - let (clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), false).unwrap(); + let (_, clusters, _, sessions) = prepare_decryption_sessions(); + sessions[0].initialize(false).unwrap(); - do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == SessionState::WaitingForPartialDecryption - && sessions[0].data.lock().shadow_points.len() == 2); + do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults + && sessions[0].data.lock().consensus_session.computation_job().responses().len() == 2).unwrap(); // disconnects from the node which has already sent us its own shadow point let disconnected = sessions[0].data.lock(). - shadow_points.keys() + consensus_session.computation_job().responses().keys() .filter(|n| *n != sessions[0].node()) .cloned().nth(0).unwrap(); sessions[0].on_node_timeout(&disconnected); - assert!(sessions[0].data.lock().state == SessionState::WaitingForPartialDecryption); + assert_eq!(sessions[0].state(), ConsensusSessionState::EstablishingConsensus); } #[test] fn session_restarts_if_confirmed_node_disconnects() { - let (clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), false).unwrap(); + let (_, clusters, _, sessions) = prepare_decryption_sessions(); + sessions[0].initialize(false).unwrap(); - do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == SessionState::WaitingForPartialDecryption); + do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap(); // disconnects from the node which has already confirmed its participation - let disconnected = sessions[0].data.lock().shadow_requests.iter().cloned().nth(0).unwrap(); + let disconnected = sessions[0].data.lock().consensus_session.computation_job().requests().iter().cloned().nth(0).unwrap(); sessions[0].on_node_timeout(&disconnected); - assert!(sessions[0].data.lock().state == SessionState::WaitingForPartialDecryption); - assert!(sessions[0].data.lock().rejected_nodes.contains(&disconnected)); - assert!(!sessions[0].data.lock().shadow_requests.contains(&disconnected)); + assert_eq!(sessions[0].state(), ConsensusSessionState::EstablishingConsensus); + assert!(sessions[0].data.lock().consensus_session.computation_job().rejects().contains(&disconnected)); + assert!(!sessions[0].data.lock().consensus_session.computation_job().requests().contains(&disconnected)); } #[test] fn session_does_not_fail_if_non_master_node_disconnects_from_non_master_node() { - let (clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), false).unwrap(); + let (_, clusters, _, sessions) = prepare_decryption_sessions(); + sessions[0].initialize(false).unwrap(); - do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == SessionState::WaitingForPartialDecryption); + do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap(); // disconnects from the node which has already confirmed its participation sessions[1].on_node_timeout(sessions[2].node()); - assert!(sessions[0].data.lock().state == SessionState::WaitingForPartialDecryption); - assert!(sessions[1].data.lock().state == SessionState::WaitingForPartialDecryptionRequest); + assert!(sessions[0].state() == ConsensusSessionState::WaitingForPartialResults); + assert!(sessions[1].state() == ConsensusSessionState::ConsensusEstablished); } #[test] fn complete_dec_session() { - let (clusters, _, sessions) = prepare_decryption_sessions(); + let (_, clusters, _, sessions) = prepare_decryption_sessions(); // now let's try to do a decryption - let key_pair = Random.generate().unwrap(); - let signature = ethkey::sign(key_pair.secret(), &SessionId::default()).unwrap(); - sessions[0].initialize(signature, false).unwrap(); + sessions[0].initialize(false).unwrap(); - do_messages_exchange(&clusters, &sessions); + do_messages_exchange(&clusters, &sessions).unwrap(); // now check that: // 1) 5 of 5 sessions are in Finished state - assert_eq!(sessions.iter().filter(|s| s.state() == SessionState::Finished).count(), 5); + assert_eq!(sessions.iter().filter(|s| s.state() == ConsensusSessionState::Finished).count(), 5); // 2) 1 session has decrypted key value assert!(sessions.iter().skip(1).all(|s| s.decrypted_secret().is_none())); - assert_eq!(sessions[0].decrypted_secret().unwrap().unwrap(), DocumentEncryptedKeyShadow { + assert_eq!(sessions[0].decrypted_secret().unwrap().unwrap(), EncryptedDocumentKeyShadow { decrypted_secret: SECRET_PLAIN.into(), common_point: None, decrypt_shadows: None, @@ -1064,18 +860,16 @@ mod tests { #[test] fn complete_shadow_dec_session() { - let (clusters, _, sessions) = prepare_decryption_sessions(); + let (key_pair, clusters, _, sessions) = prepare_decryption_sessions(); // now let's try to do a decryption - let key_pair = Random.generate().unwrap(); - let signature = ethkey::sign(key_pair.secret(), &SessionId::default()).unwrap(); - sessions[0].initialize(signature, true).unwrap(); + sessions[0].initialize(true).unwrap(); - do_messages_exchange(&clusters, &sessions); + do_messages_exchange(&clusters, &sessions).unwrap(); // now check that: // 1) 5 of 5 sessions are in Finished state - assert_eq!(sessions.iter().filter(|s| s.state() == SessionState::Finished).count(), 5); + assert_eq!(sessions.iter().filter(|s| s.state() == ConsensusSessionState::Finished).count(), 5); // 2) 1 session has decrypted key value assert!(sessions.iter().skip(1).all(|s| s.decrypted_secret().is_none())); @@ -1097,51 +891,42 @@ mod tests { #[test] fn failed_dec_session() { - let (clusters, acl_storages, sessions) = prepare_decryption_sessions(); + let (key_pair, clusters, acl_storages, sessions) = prepare_decryption_sessions(); // now let's try to do a decryption - let key_pair = Random.generate().unwrap(); - let signature = ethkey::sign(key_pair.secret(), &SessionId::default()).unwrap(); - sessions[0].initialize(signature, false).unwrap(); + sessions[0].initialize(false).unwrap(); // we need 4 out of 5 nodes to agree to do a decryption // let's say that 2 of these nodes are disagree acl_storages[1].prohibit(key_pair.public().clone(), SessionId::default()); acl_storages[2].prohibit(key_pair.public().clone(), SessionId::default()); - let node3 = sessions[3].node().clone(); - do_messages_exchange_until(&clusters, &sessions, |from, _, _msg| from == &node3); + assert_eq!(do_messages_exchange(&clusters, &sessions).unwrap_err(), Error::ConsensusUnreachable); - // now check that: - // 1) 3 of 5 sessions are in Failed state - assert_eq!(sessions.iter().filter(|s| s.state() == SessionState::Failed).count(), 3); - // 2) 2 of 5 sessions are in WaitingForPartialDecryptionRequest state - assert_eq!(sessions.iter().filter(|s| s.state() == SessionState::WaitingForPartialDecryptionRequest).count(), 2); - // 3) 0 sessions have decrypted key value - assert!(sessions.iter().all(|s| s.decrypted_secret().is_none() || s.decrypted_secret().unwrap().is_err())); + // check that 3 nodes have failed state + assert_eq!(sessions[0].state(), ConsensusSessionState::Failed); + assert_eq!(sessions.iter().filter(|s| s.state() == ConsensusSessionState::Failed).count(), 3); } #[test] fn complete_dec_session_with_acl_check_failed_on_master() { - let (clusters, acl_storages, sessions) = prepare_decryption_sessions(); + let (key_pair, clusters, acl_storages, sessions) = prepare_decryption_sessions(); // we need 4 out of 5 nodes to agree to do a decryption // let's say that 1 of these nodes (master) is disagree - let key_pair = Random.generate().unwrap(); acl_storages[0].prohibit(key_pair.public().clone(), SessionId::default()); // now let's try to do a decryption - let signature = ethkey::sign(key_pair.secret(), &SessionId::default()).unwrap(); - sessions[0].initialize(signature, false).unwrap(); + sessions[0].initialize(false).unwrap(); - do_messages_exchange(&clusters, &sessions); + do_messages_exchange(&clusters, &sessions).unwrap(); // now check that: // 1) 4 of 5 sessions are in Finished state - assert_eq!(sessions.iter().filter(|s| s.state() == SessionState::Finished).count(), 5); + assert_eq!(sessions.iter().filter(|s| s.state() == ConsensusSessionState::Finished).count(), 5); // 2) 1 session has decrypted key value assert!(sessions.iter().skip(1).all(|s| s.decrypted_secret().is_none())); - assert_eq!(sessions[0].decrypted_secret().unwrap().unwrap(), DocumentEncryptedKeyShadow { + assert_eq!(sessions[0].decrypted_secret().unwrap().unwrap(), EncryptedDocumentKeyShadow { decrypted_secret: SECRET_PLAIN.into(), common_point: None, decrypt_shadows: None, diff --git a/secret_store/src/key_server_cluster/encryption_session.rs b/secret_store/src/key_server_cluster/encryption_session.rs index 0268bf596..b61594925 100644 --- a/secret_store/src/key_server_cluster/encryption_session.rs +++ b/secret_store/src/key_server_cluster/encryption_session.rs @@ -14,44 +14,41 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::collections::{BTreeSet, BTreeMap, VecDeque}; +use std::collections::BTreeMap; use std::fmt::{Debug, Formatter, Error as FmtError}; use std::time; use std::sync::Arc; use parking_lot::{Condvar, Mutex}; -use ethkey::{Public, Secret}; +use ethkey::{self, Public, Signature}; use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare}; -use key_server_cluster::math; use key_server_cluster::cluster::Cluster; -use key_server_cluster::message::{Message, EncryptionMessage, InitializeSession, ConfirmInitialization, CompleteInitialization, - KeysDissemination, PublicKeyShare, SessionError, SessionCompleted}; +use key_server_cluster::cluster_sessions::ClusterSession; +use key_server_cluster::message::{Message, EncryptionMessage, InitializeEncryptionSession, + ConfirmEncryptionInitialization, EncryptionSessionError}; /// Encryption session API. pub trait Session: Send + Sync + 'static { /// Get encryption session state. fn state(&self) -> SessionState; /// Wait until session is completed. Returns distributely generated secret key. - fn wait(&self, timeout: Option) -> Result; - - #[cfg(test)] - /// Get joint public key (if it is known). - fn joint_public_key(&self) -> Option>; + fn wait(&self, timeout: Option) -> Result<(), Error>; } /// Encryption (distributed key generation) session. /// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: /// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf /// Brief overview: -/// 1) initialization: master node (which has received request for generating joint public + secret) initializes the session on all other nodes -/// 2) key dissemination (KD): all nodes are generating secret + public values and send these to appropriate nodes -/// 3) key verification (KV): all nodes are checking values, received for other nodes -/// 4) key generation phase (KG): nodes are exchanging with information, enough to generate joint public key -/// 5) encryption phase: master node generates secret key, encrypts it using joint public && broadcasts encryption result +/// 1) initialization: master node (which has received request for storing the secret) initializes the session on all other nodes +/// 2) master node sends common_point + encrypted_point to all other nodes +/// 3) common_point + encrypted_point are saved on all nodes +/// 4) in case of error, previous values are restored pub struct SessionImpl { /// Unique session id. id: SessionId, /// Public identifier of this node. self_node_id: NodeId, + /// Encrypted data. + encrypted_data: DocumentKeyShare, /// Key storage. key_storage: Arc, /// Cluster which allows this node to send messages to other nodes in the cluster. @@ -68,6 +65,8 @@ pub struct SessionParams { pub id: SessionId, /// Id of node, on which this session is running. pub self_node_id: Public, + /// Encrypted data (result of running generation_session::SessionImpl). + pub encrypted_data: DocumentKeyShare, /// Key storage. pub key_storage: Arc, /// Cluster @@ -79,73 +78,18 @@ pub struct SessionParams { struct SessionData { /// Current state of the session. state: SessionState, - /// Simulate faulty behaviour? - simulate_faulty_behaviour: bool, - - // === Values, filled when session initialization just starts === - /// Reference to the node, which has started this session. - master: Option, - - // === Values, filled when session initialization is completed === - /// Threshold value for this DKG. Only `threshold + 1` will be able to collectively recreate joint secret, - /// and thus - decrypt message, encrypted with joint public. - threshold: Option, - /// Random point, jointly generated by every node in the cluster. - derived_point: Option, /// Nodes-specific data. nodes: BTreeMap, - - // === Values, filled during KD phase === - /// Value of polynom1[0], generated by this node. - secret_coeff: Option, - - // === Values, filled during KG phase === - /// Secret share, which this node holds. Persistent + private. - secret_share: Option, - - /// === Values, filled when DKG session is completed successfully === - /// Jointly generated public key, which can be used to encrypt secret. Public. - joint_public: Option>, - /// Secret point. - secret_point: Option>, + /// Encryption session result. + result: Option>, } #[derive(Debug, Clone)] /// Mutable node-specific data. struct NodeData { - /// Random unique scalar. Persistent. - pub id_number: Secret, - - // === Values, filled during KD phase === - /// Secret value1, which has been sent to this node. - pub secret1_sent: Option, - /// Secret value2, which has been sent to this node. - pub secret2_sent: Option, - /// Secret value1, which has been received from this node. - pub secret1: Option, - /// Secret value2, which has been received from this node. - pub secret2: Option, - /// Public values, which have been received from this node. - pub publics: Option>, - - // === Values, filled during KG phase === - /// Public share, which has been received from this node. - pub public_share: Option, - - // === Values, filled during encryption phase === - /// Flags marking that node has confirmed session completion (encryption data is stored). - pub completion_confirmed: bool, -} - -#[derive(Debug, Clone, PartialEq)] -/// Schedule for visiting other nodes of cluster. -pub struct EveryOtherNodeVisitor { - /// Already visited nodes. - visited: BTreeSet, - /// Not yet visited nodes. - unvisited: VecDeque, - /// Nodes, which are currently visited. - in_progress: BTreeSet, + // === Values, filled during initialization phase === + /// Flags marking that node has confirmed session initialization. + pub initialization_confirmed: bool, } #[derive(Debug, Clone, PartialEq)] @@ -154,53 +98,34 @@ pub enum SessionState { // === Initialization states === /// Every node starts in this state. WaitingForInitialization, - /// Master node asks every other node to confirm initialization. - /// Derived point is generated by all nodes in the cluster. - WaitingForInitializationConfirm(EveryOtherNodeVisitor), - /// Slave nodes are in this state until initialization completion is reported by master node. - WaitingForInitializationComplete, - - // === KD phase states === - /// Node is waiting for generated keys from every other node. - WaitingForKeysDissemination, - - // === KG phase states === - /// Node is waiting for joint public key share to be received from every other node. - WaitingForPublicKeyShare, - - // === Encryption phase states === - /// Node is waiting for session completion/session completion confirmation. - WaitingForEncryptionConfirmation, + /// Master node waits for every other node to confirm initialization. + WaitingForInitializationConfirm, // === Final states of the session === - /// Joint public key generation is completed. + /// Encryption data is saved. Finished, - /// Joint public key generation is failed. + /// Failed to save encryption data. Failed, } impl SessionImpl { /// Create new encryption session. - pub fn new(params: SessionParams) -> Self { - SessionImpl { + pub fn new(params: SessionParams) -> Result { + check_encrypted_data(¶ms.self_node_id, ¶ms.encrypted_data)?; + + Ok(SessionImpl { id: params.id, self_node_id: params.self_node_id, + encrypted_data: params.encrypted_data, key_storage: params.key_storage, cluster: params.cluster, completed: Condvar::new(), data: Mutex::new(SessionData { state: SessionState::WaitingForInitialization, - simulate_faulty_behaviour: false, - master: None, - threshold: None, - derived_point: None, nodes: BTreeMap::new(), - secret_coeff: None, - secret_share: None, - joint_public: None, - secret_point: None, + result: None, }), - } + }) } /// Get this node Id. @@ -208,22 +133,8 @@ impl SessionImpl { &self.self_node_id } - #[cfg(test)] - /// Get derived point. - pub fn derived_point(&self) -> Option { - self.data.lock().derived_point.clone() - } - - /// Simulate faulty encryption session behaviour. - pub fn simulate_faulty_behaviour(&self) { - self.data.lock().simulate_faulty_behaviour = true; - } - /// Start new session initialization. This must be called on master node. - pub fn initialize(&self, threshold: usize, nodes: BTreeSet) -> Result<(), Error> { - check_cluster_nodes(self.node(), &nodes)?; - check_threshold(threshold, &nodes)?; - + pub fn initialize(&self, requestor_signature: Signature, common_point: Public, encrypted_point: Public) -> Result<(), Error> { let mut data = self.data.lock(); // check state @@ -231,39 +142,48 @@ impl SessionImpl { return Err(Error::InvalidStateForRequest); } - // update state - data.master = Some(self.node().clone()); - data.threshold = Some(threshold); - for node_id in &nodes { - // generate node identification parameter - let node_id_number = math::generate_random_scalar()?; - data.nodes.insert(node_id.clone(), NodeData::with_id_number(node_id_number)); + // check that the requester is the author of the encrypted data + let requestor_public = ethkey::recover(&requestor_signature, &self.id)?; + if self.encrypted_data.author != requestor_public { + return Err(Error::AccessDenied); } - let mut visit_policy = EveryOtherNodeVisitor::new(self.node(), data.nodes.keys().cloned()); - let derived_point = math::generate_random_point()?; - match visit_policy.next_node() { - Some(next_node) => { - data.state = SessionState::WaitingForInitializationConfirm(visit_policy); + // update state + data.state = SessionState::WaitingForInitializationConfirm; + for node_id in self.encrypted_data.id_numbers.keys() { + data.nodes.insert(node_id.clone(), NodeData { + initialization_confirmed: node_id == self.node(), + }); + } - // start initialization - self.cluster.send(&next_node, Message::Encryption(EncryptionMessage::InitializeSession(InitializeSession { - session: self.id.clone().into(), - derived_point: derived_point.into(), - }))) - }, - None => { - drop(data); - self.complete_initialization(derived_point)?; - self.disseminate_keys()?; - self.verify_keys()?; - self.complete_encryption() - } + // TODO: there could be situation when some nodes have failed to store encrypted data + // => potential problems during restore. some confirmation step is needed? + // save encryption data + let mut encrypted_data = self.encrypted_data.clone(); + encrypted_data.common_point = Some(common_point.clone()); + encrypted_data.encrypted_point = Some(encrypted_point.clone()); + self.key_storage.update(self.id.clone(), encrypted_data) + .map_err(|e| Error::KeyStorage(e.into()))?; + + // start initialization + if self.encrypted_data.id_numbers.len() > 1 { + self.cluster.broadcast(Message::Encryption(EncryptionMessage::InitializeEncryptionSession(InitializeEncryptionSession { + session: self.id.clone().into(), + requestor_signature: requestor_signature.into(), + common_point: common_point.into(), + encrypted_point: encrypted_point.into(), + }))) + } else { + data.state = SessionState::Finished; + data.result = Some(Ok(())); + self.completed.notify_all(); + + Ok(()) } } /// When session initialization message is received. - pub fn on_initialize_session(&self, sender: NodeId, message: &InitializeSession) -> Result<(), Error> { + pub fn on_initialize_session(&self, sender: NodeId, message: &InitializeEncryptionSession) -> Result<(), Error> { debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); @@ -274,957 +194,129 @@ impl SessionImpl { return Err(Error::InvalidStateForRequest); } - // update derived point with random scalar - let mut derived_point = message.derived_point.clone().into(); - math::update_random_point(&mut derived_point)?; + // check that the requester is the author of the encrypted data + let requestor_public = ethkey::recover(&message.requestor_signature.clone().into(), &self.id)?; + if self.encrypted_data.author != requestor_public { + return Err(Error::AccessDenied); + } - // send confirmation back to master node - self.cluster.send(&sender, Message::Encryption(EncryptionMessage::ConfirmInitialization(ConfirmInitialization { - session: self.id.clone().into(), - derived_point: derived_point.into(), - })))?; + // save encryption data + let mut encrypted_data = self.encrypted_data.clone(); + encrypted_data.common_point = Some(message.common_point.clone().into()); + encrypted_data.encrypted_point = Some(message.encrypted_point.clone().into()); + self.key_storage.update(self.id.clone(), encrypted_data) + .map_err(|e| Error::KeyStorage(e.into()))?; // update state - data.master = Some(sender); - data.state = SessionState::WaitingForInitializationComplete; + data.state = SessionState::Finished; - Ok(()) + // send confirmation back to master node + self.cluster.send(&sender, Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization(ConfirmEncryptionInitialization { + session: self.id.clone().into(), + }))) } /// When session initialization confirmation message is reeived. - pub fn on_confirm_initialization(&self, sender: NodeId, message: &ConfirmInitialization) -> Result<(), Error> { + pub fn on_confirm_initialization(&self, sender: NodeId, message: &ConfirmEncryptionInitialization) -> Result<(), Error> { debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); debug_assert!(data.nodes.contains_key(&sender)); - // check state && select new node to be initialized - let next_receiver = match data.state { - SessionState::WaitingForInitializationConfirm(ref mut visit_policy) => { - if !visit_policy.mark_visited(&sender) { - return Err(Error::InvalidStateForRequest); - } - - visit_policy.next_node() - }, - _ => return Err(Error::InvalidStateForRequest), - }; - - // proceed message - if let Some(next_receiver) = next_receiver { - return self.cluster.send(&next_receiver, Message::Encryption(EncryptionMessage::InitializeSession(InitializeSession { - session: self.id.clone().into(), - derived_point: message.derived_point.clone().into(), - }))); - } - - // now it is time for keys dissemination (KD) phase - drop(data); - self.complete_initialization(message.derived_point.clone().into())?; - self.disseminate_keys() - } - - /// When session initialization completion message is received. - pub fn on_complete_initialization(&self, sender: NodeId, message: &CompleteInitialization) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); - - // check message - let nodes_ids = message.nodes.keys().cloned().map(Into::into).collect(); - check_cluster_nodes(self.node(), &nodes_ids)?; - check_threshold(message.threshold, &nodes_ids)?; - - let mut data = self.data.lock(); - - // check state - if data.state != SessionState::WaitingForInitializationComplete { - return Err(Error::InvalidStateForRequest); - } - if data.master != Some(sender) { - return Err(Error::InvalidMessage); - } - - // remember passed data - data.threshold = Some(message.threshold); - data.derived_point = Some(message.derived_point.clone().into()); - data.nodes = message.nodes.iter().map(|(id, number)| (id.clone().into(), NodeData::with_id_number(number.clone().into()))).collect(); - - // now it is time for keys dissemination (KD) phase - drop(data); - self.disseminate_keys() - } - - /// When keys dissemination message is received. - pub fn on_keys_dissemination(&self, sender: NodeId, message: &KeysDissemination) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); - - let mut data = self.data.lock(); - - // simulate failure, if required - if data.simulate_faulty_behaviour { - return Err(Error::Io("simulated error".into())); - } - - // check state - if data.state != SessionState::WaitingForKeysDissemination { - match data.state { - SessionState::WaitingForInitializationComplete => return Err(Error::TooEarlyForRequest), - _ => return Err(Error::InvalidStateForRequest), - } - } - debug_assert!(data.nodes.contains_key(&sender)); - - // check message - let threshold = data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"); - if message.publics.len() != threshold + 1 { - return Err(Error::InvalidMessage); - } - - // update node data - { - let node_data = data.nodes.get_mut(&sender).ok_or(Error::InvalidMessage)?; - if node_data.secret1.is_some() || node_data.secret2.is_some() || node_data.publics.is_some() { - return Err(Error::InvalidStateForRequest); - } - - node_data.secret1 = Some(message.secret1.clone().into()); - node_data.secret2 = Some(message.secret2.clone().into()); - node_data.publics = Some(message.publics.iter().cloned().map(Into::into).collect()); - } - - // check if we have received keys from every other node - if data.nodes.iter().any(|(node_id, node_data)| node_id != self.node() && (node_data.publics.is_none() || node_data.secret1.is_none() || node_data.secret2.is_none())) { - return Ok(()) - } - - drop(data); - self.verify_keys() - } - - /// When public key share is received. - pub fn on_public_key_share(&self, sender: NodeId, message: &PublicKeyShare) -> Result<(), Error> { - let mut data = self.data.lock(); - - // check state - if data.state != SessionState::WaitingForPublicKeyShare { - match data.state { - SessionState::WaitingForInitializationComplete | - SessionState::WaitingForKeysDissemination => return Err(Error::TooEarlyForRequest), - _ => return Err(Error::InvalidStateForRequest), - } - } - - // update node data with received public share - { - let node_data = &mut data.nodes.get_mut(&sender).ok_or(Error::InvalidMessage)?; - if node_data.public_share.is_some() { - return Err(Error::InvalidMessage); - } - - node_data.public_share = Some(message.public_share.clone().into()); - } - - // if there's also nodes, which has not sent us their public shares - do nothing - if data.nodes.iter().any(|(node_id, node_data)| node_id != self.node() && node_data.public_share.is_none()) { + // check if all nodes have confirmed initialization + data.nodes.get_mut(&sender) + .expect("message is received from cluster; nodes contains all cluster nodes; qed") + .initialization_confirmed = true; + if !data.nodes.values().all(|n| n.initialization_confirmed) { return Ok(()); } - drop(data); - self.complete_encryption() - } - - /// When session completion message is received. - pub fn on_session_completed(&self, sender: NodeId, message: &SessionCompleted) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); - - let mut data = self.data.lock(); - debug_assert!(data.nodes.contains_key(&sender)); - - // check state - if data.state != SessionState::WaitingForEncryptionConfirmation { - match data.state { - SessionState::WaitingForPublicKeyShare => return Err(Error::TooEarlyForRequest), - _ => return Err(Error::InvalidStateForRequest), - } - } - - // if we are not masters, save result and respond with confirmation - if data.master.as_ref() != Some(self.node()) { - // check that we have received message from master - if data.master.as_ref() != Some(&sender) { - return Err(Error::InvalidMessage); - } - - // save encrypted data to key storage - let encrypted_data = DocumentKeyShare { - threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), - id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(), - secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(), - common_point: message.common_point.clone().into(), - encrypted_point: message.encrypted_point.clone().into(), - }; - self.key_storage.insert(self.id.clone(), encrypted_data.clone()) - .map_err(|e| Error::KeyStorage(e.into()))?; - - // then respond with confirmation - data.state = SessionState::Finished; - return self.cluster.send(&sender, Message::Encryption(EncryptionMessage::SessionCompleted(SessionCompleted { - session: self.id.clone().into(), - common_point: encrypted_data.common_point.clone().into(), - encrypted_point: encrypted_data.encrypted_point.clone().into(), - }))); - } - - // remember that we have received confirmation from sender node - { - let sender_node = data.nodes.get_mut(&sender).expect("node is always qualified by himself; qed"); - if sender_node.completion_confirmed { - return Err(Error::InvalidMessage); - } - - sender_node.completion_confirmed = true; - } - - // check if we have received confirmations from all cluster nodes - if data.nodes.iter().any(|(_, node_data)| !node_data.completion_confirmed) { - return Ok(()) - } - - // we have received enough confirmations => complete session + // update state data.state = SessionState::Finished; + data.result = Some(Ok(())); self.completed.notify_all(); Ok(()) } /// When error has occured on another node. - pub fn on_session_error(&self, sender: NodeId, message: &SessionError) -> Result<(), Error> { + pub fn on_session_error(&self, sender: NodeId, message: &EncryptionSessionError) -> Result<(), Error> { let mut data = self.data.lock(); warn!("{}: encryption session failed with error: {} from {}", self.node(), message.error, sender); data.state = SessionState::Failed; - data.joint_public = Some(Err(Error::Io(message.error.clone()))); - data.secret_point = Some(Err(Error::Io(message.error.clone()))); + data.result = Some(Err(Error::Io(message.error.clone()))); self.completed.notify_all(); Ok(()) } - - /// When connection to one of cluster nodes has timeouted. - pub fn on_node_timeout(&self, node: &NodeId) { - let mut data = self.data.lock(); - - // all nodes are required for encryption session - // => fail without check - warn!("{}: encryption session failed because {} connection has timeouted", self.node(), node); - - data.state = SessionState::Failed; - data.joint_public = Some(Err(Error::NodeDisconnected)); - data.secret_point = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); - } - - /// When session timeout has occured. - pub fn on_session_timeout(&self) { - let mut data = self.data.lock(); - - warn!("{}: encryption session failed with timeout", self.node()); - - data.state = SessionState::Failed; - data.joint_public = Some(Err(Error::NodeDisconnected)); - data.secret_point = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); - } - - /// Complete initialization (when all other nodex has responded with confirmation) - fn complete_initialization(&self, mut derived_point: Public) -> Result<(), Error> { - // update point once again to make sure that derived point is not generated by last node - math::update_random_point(&mut derived_point)?; - - // remember derived point - let mut data = self.data.lock(); - data.derived_point = Some(derived_point.clone().into()); - - // broadcast derived point && other session paraeters to every other node - self.cluster.broadcast(Message::Encryption(EncryptionMessage::CompleteInitialization(CompleteInitialization { - session: self.id.clone().into(), - nodes: data.nodes.iter().map(|(id, data)| (id.clone().into(), data.id_number.clone().into())).collect(), - threshold: data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"), - derived_point: derived_point.into(), - }))) - } - - /// Keys dissemination (KD) phase - fn disseminate_keys(&self) -> Result<(), Error> { - let mut data = self.data.lock(); - - // pick 2t + 2 random numbers as polynomial coefficients for 2 polynoms - let threshold = data.threshold.expect("threshold is filled on initialization phase; KD phase follows initialization phase; qed"); - let polynom1 = math::generate_random_polynom(threshold)?; - let polynom2 = math::generate_random_polynom(threshold)?; - data.secret_coeff = Some(polynom1[0].clone()); - - // compute t+1 public values - let publics = math::public_values_generation(threshold, - data.derived_point.as_ref().expect("keys dissemination occurs after derived point is agreed; qed"), - &polynom1, - &polynom2)?; - - // compute secret values for every other node - for (node, node_data) in data.nodes.iter_mut() { - let secret1 = math::compute_polynom(&polynom1, &node_data.id_number)?; - let secret2 = math::compute_polynom(&polynom2, &node_data.id_number)?; - - // send a message containing secret1 && secret2 to other node - if node != self.node() { - node_data.secret1_sent = Some(secret1.clone()); - node_data.secret2_sent = Some(secret2.clone()); - - self.cluster.send(&node, Message::Encryption(EncryptionMessage::KeysDissemination(KeysDissemination { - session: self.id.clone().into(), - secret1: secret1.into(), - secret2: secret2.into(), - publics: publics.iter().cloned().map(Into::into).collect(), - })))?; - } else { - node_data.secret1 = Some(secret1); - node_data.secret2 = Some(secret2); - node_data.publics = Some(publics.clone()); - } - } - - // update state - data.state = SessionState::WaitingForKeysDissemination; - - Ok(()) - } - - /// Keys verification (KV) phase - fn verify_keys(&self) -> Result<(), Error> { - let mut data = self.data.lock(); - - // key verification (KV) phase: check that other nodes have passed correct secrets - let threshold = data.threshold.expect("threshold is filled in initialization phase; KV phase follows initialization phase; qed"); - let derived_point = data.derived_point.clone().expect("derived point generated on initialization phase; KV phase follows initialization phase; qed"); - let number_id = data.nodes[self.node()].id_number.clone(); - for (_ , node_data) in data.nodes.iter_mut().filter(|&(node_id, _)| node_id != self.node()) { - let secret1 = node_data.secret1.as_ref().expect("keys received on KD phase; KV phase follows KD phase; qed"); - let secret2 = node_data.secret2.as_ref().expect("keys received on KD phase; KV phase follows KD phase; qed"); - let publics = node_data.publics.as_ref().expect("keys received on KD phase; KV phase follows KD phase; qed"); - let is_key_verification_ok = math::keys_verification(threshold, &derived_point, &number_id, - secret1, secret2, publics)?; - - if !is_key_verification_ok { - // node has sent us incorrect values. In original ECDKG protocol we should have sent complaint here. - return Err(Error::InvalidMessage); - } - } - - // calculate public share - let self_public_share = { - let self_secret_coeff = data.secret_coeff.as_ref().expect("secret_coeff is generated on KD phase; KG phase follows KD phase; qed"); - math::compute_public_share(self_secret_coeff)? - }; - - // calculate self secret + public shares - let self_secret_share = { - let secret_values_iter = data.nodes.values() - .map(|n| n.secret1.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed")); - math::compute_secret_share(secret_values_iter)? - }; - - // update state - data.state = SessionState::WaitingForPublicKeyShare; - data.secret_share = Some(self_secret_share); - let self_node = data.nodes.get_mut(self.node()).expect("node is always qualified by himself; qed"); - self_node.public_share = Some(self_public_share.clone()); - - // broadcast self public key share - self.cluster.broadcast(Message::Encryption(EncryptionMessage::PublicKeyShare(PublicKeyShare { - session: self.id.clone().into(), - public_share: self_public_share.into(), - }))) - } - - /// Complete encryption - fn complete_encryption(&self) -> Result<(), Error> { - let mut data = self.data.lock(); - - // else - calculate joint public key - let joint_public = { - let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed")); - math::compute_joint_public(public_shares)? - }; - - // if we are at the slave node - wait for session completion - if data.master.as_ref() != Some(self.node()) { - data.joint_public = Some(Ok(joint_public)); - data.state = SessionState::WaitingForEncryptionConfirmation; - return Ok(()); - } - - // then generate secret point - // then encrypt secret point with joint public key - // TODO: secret is revealed to KeyServer here - let secret_point = math::generate_random_point()?; - let encrypted_secret_point = math::encrypt_secret(&secret_point, &joint_public)?; - - // then save encrypted data to the key storage - let encrypted_data = DocumentKeyShare { - threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), - id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(), - secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(), - common_point: encrypted_secret_point.common_point, - encrypted_point: encrypted_secret_point.encrypted_point, - }; - self.key_storage.insert(self.id.clone(), encrypted_data.clone()) - .map_err(|e| Error::KeyStorage(e.into()))?; - - // then distribute encrypted data to every other node - self.cluster.broadcast(Message::Encryption(EncryptionMessage::SessionCompleted(SessionCompleted { - session: self.id.clone().into(), - common_point: encrypted_data.common_point.clone().into(), - encrypted_point: encrypted_data.encrypted_point.clone().into(), - })))?; - - // then wait for confirmation from all other nodes - { - let self_node = data.nodes.get_mut(self.node()).expect("node is always qualified by himself; qed"); - self_node.completion_confirmed = true; - } - data.joint_public = Some(Ok(joint_public)); - data.secret_point = Some(Ok(secret_point)); - data.state = SessionState::WaitingForEncryptionConfirmation; - - Ok(()) - } } -impl Session for SessionImpl { - #[cfg(test)] - fn joint_public_key(&self) -> Option> { - self.data.lock().joint_public.clone() +impl ClusterSession for SessionImpl { + fn is_finished(&self) -> bool { + let data = self.data.lock(); + data.state == SessionState::Failed + || data.state == SessionState::Finished } + fn on_node_timeout(&self, node: &NodeId) { + let mut data = self.data.lock(); + + warn!("{}: encryption session failed because {} connection has timeouted", self.node(), node); + + data.state = SessionState::Failed; + data.result = Some(Err(Error::NodeDisconnected)); + self.completed.notify_all(); + } + + fn on_session_timeout(&self) { + let mut data = self.data.lock(); + + warn!("{}: encryption session failed with timeout", self.node()); + + data.state = SessionState::Failed; + data.result = Some(Err(Error::NodeDisconnected)); + self.completed.notify_all(); + } +} + +impl Session for SessionImpl { fn state(&self) -> SessionState { self.data.lock().state.clone() } - fn wait(&self, timeout: Option) -> Result { + fn wait(&self, timeout: Option) -> Result<(), Error> { let mut data = self.data.lock(); - if !data.secret_point.is_some() { + if !data.result.is_some() { match timeout { None => self.completed.wait(&mut data), Some(timeout) => { self.completed.wait_for(&mut data, timeout); }, } } - data.secret_point.as_ref() - .expect("checked above or waited for completed; completed is only signaled when secret_point.is_some(); qed") + data.result.as_ref() + .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") .clone() } } -impl EveryOtherNodeVisitor { - pub fn new(self_id: &NodeId, nodes: I) -> Self where I: Iterator { - EveryOtherNodeVisitor { - visited: BTreeSet::new(), - unvisited: nodes.filter(|n| n != self_id).collect(), - in_progress: BTreeSet::new(), - } - } - - pub fn next_node(&mut self) -> Option { - let next_node = self.unvisited.pop_front(); - if let Some(ref next_node) = next_node { - self.in_progress.insert(next_node.clone()); - } - next_node - } - - pub fn mark_visited(&mut self, node: &NodeId) -> bool { - if !self.in_progress.remove(node) { - return false; - } - self.visited.insert(node.clone()) - } -} - -impl NodeData { - fn with_id_number(node_id_number: Secret) -> Self { - NodeData { - id_number: node_id_number, - secret1_sent: None, - secret2_sent: None, - secret1: None, - secret2: None, - publics: None, - public_share: None, - completion_confirmed: false, - } - } -} - impl Debug for SessionImpl { fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { write!(f, "Encryption session {} on {}", self.id, self.self_node_id) } } -pub fn check_cluster_nodes(self_node_id: &NodeId, nodes: &BTreeSet) -> Result<(), Error> { - // at least two nodes must be in cluster - if nodes.len() < 1 { - return Err(Error::InvalidNodesCount); - } - // this node must be a part of cluster - if !nodes.contains(self_node_id) { - return Err(Error::InvalidNodesConfiguration); +fn check_encrypted_data(self_node_id: &Public, encrypted_data: &DocumentKeyShare) -> Result<(), Error> { + use key_server_cluster::generation_session::{check_cluster_nodes, check_threshold}; + + // check that common_point and encrypted_point are still not set yet + if encrypted_data.common_point.is_some() || encrypted_data.encrypted_point.is_some() { + return Err(Error::CompletedSessionId); } - Ok(()) -} - -pub fn check_threshold(threshold: usize, nodes: &BTreeSet) -> Result<(), Error> { - // at least threshold + 1 nodes are required to collectively decrypt message - if threshold >= nodes.len() { - return Err(Error::InvalidThreshold); - } - - Ok(()) -} - -#[cfg(test)] -mod tests { - use std::time; - use std::sync::Arc; - use std::collections::{BTreeSet, BTreeMap, VecDeque}; - use tokio_core::reactor::Core; - use ethkey::{Random, Generator}; - use key_server_cluster::{NodeId, SessionId, Error, DummyKeyStorage}; - use key_server_cluster::message::{self, Message, EncryptionMessage}; - use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established}; - use key_server_cluster::encryption_session::{Session, SessionImpl, SessionState, SessionParams}; - use key_server_cluster::math; - use key_server_cluster::math::tests::do_encryption_and_decryption; - - #[derive(Debug)] - struct Node { - pub cluster: Arc, - pub session: SessionImpl, - } - - #[derive(Debug)] - struct MessageLoop { - pub session_id: SessionId, - pub nodes: BTreeMap, - pub queue: VecDeque<(NodeId, NodeId, Message)>, - } - - impl MessageLoop { - pub fn new(nodes_num: usize) -> Self { - let mut nodes = BTreeMap::new(); - let session_id = SessionId::default(); - for _ in 0..nodes_num { - let key_pair = Random.generate().unwrap(); - let node_id = key_pair.public().clone(); - let cluster = Arc::new(DummyCluster::new(node_id.clone())); - let session = SessionImpl::new(SessionParams { - id: session_id.clone(), - self_node_id: node_id.clone(), - key_storage: Arc::new(DummyKeyStorage::default()), - cluster: cluster.clone(), - }); - nodes.insert(node_id, Node { cluster: cluster, session: session }); - } - - let nodes_ids: Vec<_> = nodes.keys().cloned().collect(); - for node in nodes.values() { - for node_id in &nodes_ids { - node.cluster.add_node(node_id.clone()); - } - } - - MessageLoop { - session_id: session_id, - nodes: nodes, - queue: VecDeque::new(), - } - } - - pub fn master(&self) -> &SessionImpl { - &self.nodes.values().nth(0).unwrap().session - } - - pub fn first_slave(&self) -> &SessionImpl { - &self.nodes.values().nth(1).unwrap().session - } - - pub fn second_slave(&self) -> &SessionImpl { - &self.nodes.values().nth(2).unwrap().session - } - - pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { - self.nodes.values() - .filter_map(|n| n.cluster.take_message().map(|m| (n.session.node().clone(), m.0, m.1))) - .nth(0) - .or_else(|| self.queue.pop_front()) - } - - pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { - match { - match msg.2 { - Message::Encryption(EncryptionMessage::InitializeSession(ref message)) => self.nodes[&msg.1].session.on_initialize_session(msg.0.clone(), &message), - Message::Encryption(EncryptionMessage::ConfirmInitialization(ref message)) => self.nodes[&msg.1].session.on_confirm_initialization(msg.0.clone(), &message), - Message::Encryption(EncryptionMessage::CompleteInitialization(ref message)) => self.nodes[&msg.1].session.on_complete_initialization(msg.0.clone(), &message), - Message::Encryption(EncryptionMessage::KeysDissemination(ref message)) => self.nodes[&msg.1].session.on_keys_dissemination(msg.0.clone(), &message), - Message::Encryption(EncryptionMessage::PublicKeyShare(ref message)) => self.nodes[&msg.1].session.on_public_key_share(msg.0.clone(), &message), - Message::Encryption(EncryptionMessage::SessionCompleted(ref message)) => self.nodes[&msg.1].session.on_session_completed(msg.0.clone(), &message), - _ => panic!("unexpected"), - } - } { - Ok(_) => Ok(()), - Err(Error::TooEarlyForRequest) => { - self.queue.push_back(msg); - Ok(()) - }, - Err(err) => Err(err), - } - } - - pub fn take_and_process_message(&mut self) -> Result<(), Error> { - let msg = self.take_message().unwrap(); - self.process_message(msg) - } - } - - fn make_simple_cluster(threshold: usize, num_nodes: usize) -> Result<(SessionId, NodeId, NodeId, MessageLoop), Error> { - let l = MessageLoop::new(num_nodes); - l.master().initialize(threshold, l.nodes.keys().cloned().collect())?; - - let session_id = l.session_id.clone(); - let master_id = l.master().node().clone(); - let slave_id = l.first_slave().node().clone(); - Ok((session_id, master_id, slave_id, l)) - } - - #[test] - fn initializes_in_cluster_of_single_node() { - let l = MessageLoop::new(1); - assert!(l.master().initialize(0, l.nodes.keys().cloned().collect()).is_ok()); - } - - #[test] - fn fails_to_initialize_if_not_a_part_of_cluster() { - let node_id = math::generate_random_point().unwrap(); - let cluster = Arc::new(DummyCluster::new(node_id.clone())); - let session = SessionImpl::new(SessionParams { - id: SessionId::default(), - self_node_id: node_id.clone(), - key_storage: Arc::new(DummyKeyStorage::default()), - cluster: cluster, - }); - let cluster_nodes: BTreeSet<_> = (0..2).map(|_| math::generate_random_point().unwrap()).collect(); - assert_eq!(session.initialize(0, cluster_nodes).unwrap_err(), Error::InvalidNodesConfiguration); - } - - #[test] - fn fails_to_initialize_if_threshold_is_wrong() { - assert_eq!(make_simple_cluster(2, 2).unwrap_err(), Error::InvalidThreshold); - } - - #[test] - fn fails_to_initialize_when_already_initialized() { - let (_, _, _, l) = make_simple_cluster(0, 2).unwrap(); - assert_eq!(l.master().initialize(0, l.nodes.keys().cloned().collect()).unwrap_err(), Error::InvalidStateForRequest); - } - - #[test] - fn fails_to_accept_initialization_when_already_initialized() { - let (sid, m, _, mut l) = make_simple_cluster(0, 2).unwrap(); - l.take_and_process_message().unwrap(); - assert_eq!(l.first_slave().on_initialize_session(m, &message::InitializeSession { - session: sid.into(), - derived_point: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::InvalidStateForRequest); - } - - #[test] - fn slave_updates_derived_point_on_initialization() { - let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); - let passed_point = match l.take_message().unwrap() { - (f, t, Message::Encryption(EncryptionMessage::InitializeSession(message))) => { - let point = message.derived_point.clone(); - l.process_message((f, t, Message::Encryption(EncryptionMessage::InitializeSession(message)))).unwrap(); - point - }, - _ => panic!("unexpected"), - }; - - match l.take_message().unwrap() { - (_, _, Message::Encryption(EncryptionMessage::ConfirmInitialization(message))) => assert!(passed_point != message.derived_point), - _ => panic!("unexpected"), - } - } - - #[test] - fn fails_to_accept_initialization_confirmation_if_already_accepted_from_the_same_node() { - let (sid, _, s, mut l) = make_simple_cluster(0, 3).unwrap(); - l.take_and_process_message().unwrap(); - l.take_and_process_message().unwrap(); - l.take_and_process_message().unwrap(); - assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { - session: sid.into(), - derived_point: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::InvalidStateForRequest); - } - - #[test] - fn fails_to_accept_initialization_confirmation_if_initialization_already_completed() { - let (sid, _, s, mut l) = make_simple_cluster(0, 2).unwrap(); - l.take_and_process_message().unwrap(); - l.take_and_process_message().unwrap(); - assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { - session: sid.into(), - derived_point: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::InvalidStateForRequest); - } - - #[test] - fn master_updates_derived_point_on_initialization_completion() { - let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); - l.take_and_process_message().unwrap(); - let passed_point = match l.take_message().unwrap() { - (f, t, Message::Encryption(EncryptionMessage::ConfirmInitialization(message))) => { - let point = message.derived_point.clone(); - l.process_message((f, t, Message::Encryption(EncryptionMessage::ConfirmInitialization(message)))).unwrap(); - point - }, - _ => panic!("unexpected"), - }; - - assert!(l.master().derived_point().unwrap() != passed_point.into()); - } - - #[test] - fn fails_to_complete_initialization_if_not_a_part_of_cluster() { - let (sid, m, _, l) = make_simple_cluster(0, 2).unwrap(); - let mut nodes = BTreeMap::new(); - nodes.insert(m, math::generate_random_scalar().unwrap()); - nodes.insert(math::generate_random_point().unwrap(), math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { - session: sid.into(), - nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), - threshold: 0, - derived_point: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::InvalidNodesConfiguration); - } - - #[test] - fn fails_to_complete_initialization_if_threshold_is_wrong() { - let (sid, m, s, l) = make_simple_cluster(0, 2).unwrap(); - let mut nodes = BTreeMap::new(); - nodes.insert(m, math::generate_random_scalar().unwrap()); - nodes.insert(s, math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { - session: sid.into(), - nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), - threshold: 2, - derived_point: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::InvalidThreshold); - } - - #[test] - fn fails_to_complete_initialization_if_not_waiting_for_it() { - let (sid, m, s, l) = make_simple_cluster(0, 2).unwrap(); - let mut nodes = BTreeMap::new(); - nodes.insert(m, math::generate_random_scalar().unwrap()); - nodes.insert(s, math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { - session: sid.into(), - nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), - threshold: 0, - derived_point: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::InvalidStateForRequest); - } - - #[test] - fn fails_to_complete_initialization_from_non_master_node() { - let (sid, m, s, mut l) = make_simple_cluster(0, 3).unwrap(); - l.take_and_process_message().unwrap(); - l.take_and_process_message().unwrap(); - l.take_and_process_message().unwrap(); - l.take_and_process_message().unwrap(); - let mut nodes = BTreeMap::new(); - nodes.insert(m, math::generate_random_scalar().unwrap()); - nodes.insert(s, math::generate_random_scalar().unwrap()); - nodes.insert(l.second_slave().node().clone(), math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(l.second_slave().node().clone(), &message::CompleteInitialization { - session: sid.into(), - nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), - threshold: 0, - derived_point: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::InvalidMessage); - } - - #[test] - fn fails_to_accept_keys_dissemination_if_not_waiting_for_it() { - let (sid, _, s, l) = make_simple_cluster(0, 2).unwrap(); - assert_eq!(l.master().on_keys_dissemination(s, &message::KeysDissemination { - session: sid.into(), - secret1: math::generate_random_scalar().unwrap().into(), - secret2: math::generate_random_scalar().unwrap().into(), - publics: vec![math::generate_random_point().unwrap().into()], - }).unwrap_err(), Error::InvalidStateForRequest); - } - - #[test] - fn fails_to_accept_keys_dissemination_if_wrong_number_of_publics_passed() { - let (sid, m, _, mut l) = make_simple_cluster(0, 3).unwrap(); - l.take_and_process_message().unwrap(); // m -> s1: InitializeSession - l.take_and_process_message().unwrap(); // m -> s2: InitializeSession - l.take_and_process_message().unwrap(); // s1 -> m: ConfirmInitialization - l.take_and_process_message().unwrap(); // s2 -> m: ConfirmInitialization - l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization - l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization - l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination - assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { - session: sid.into(), - secret1: math::generate_random_scalar().unwrap().into(), - secret2: math::generate_random_scalar().unwrap().into(), - publics: vec![math::generate_random_point().unwrap().into(), math::generate_random_point().unwrap().into()], - }).unwrap_err(), Error::InvalidMessage); - } - - #[test] - fn fails_to_accept_keys_dissemination_second_time_from_the_same_node() { - let (sid, m, _, mut l) = make_simple_cluster(0, 3).unwrap(); - l.take_and_process_message().unwrap(); // m -> s1: InitializeSession - l.take_and_process_message().unwrap(); // m -> s2: InitializeSession - l.take_and_process_message().unwrap(); // s1 -> m: ConfirmInitialization - l.take_and_process_message().unwrap(); // s2 -> m: ConfirmInitialization - l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization - l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization - l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination - assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { - session: sid.into(), - secret1: math::generate_random_scalar().unwrap().into(), - secret2: math::generate_random_scalar().unwrap().into(), - publics: vec![math::generate_random_point().unwrap().into()], - }).unwrap_err(), Error::InvalidStateForRequest); - } - - #[test] - fn should_not_accept_public_key_share_when_is_not_waiting_for_it() { - let (sid, _, s, l) = make_simple_cluster(1, 3).unwrap(); - assert_eq!(l.master().on_public_key_share(s, &message::PublicKeyShare { - session: sid.into(), - public_share: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::InvalidStateForRequest); - } - - #[test] - fn should_not_accept_public_key_share_when_receiving_twice() { - let (sid, m, _, mut l) = make_simple_cluster(0, 3).unwrap(); - l.take_and_process_message().unwrap(); // m -> s1: InitializeSession - l.take_and_process_message().unwrap(); // m -> s2: InitializeSession - l.take_and_process_message().unwrap(); // s1 -> m: ConfirmInitialization - l.take_and_process_message().unwrap(); // s2 -> m: ConfirmInitialization - l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization - l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization - l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination - l.take_and_process_message().unwrap(); // m -> s2: KeysDissemination - l.take_and_process_message().unwrap(); // s1 -> m: KeysDissemination - l.take_and_process_message().unwrap(); // s1 -> s2: KeysDissemination - l.take_and_process_message().unwrap(); // s2 -> m: KeysDissemination - l.take_and_process_message().unwrap(); // s2 -> s1: KeysDissemination - let (f, t, msg) = match l.take_message() { - Some((f, t, Message::Encryption(EncryptionMessage::PublicKeyShare(msg)))) => (f, t, msg), - _ => panic!("unexpected"), - }; - assert_eq!(&f, l.master().node()); - assert_eq!(&t, l.second_slave().node()); - l.process_message((f, t, Message::Encryption(EncryptionMessage::PublicKeyShare(msg.clone())))).unwrap(); - assert_eq!(l.second_slave().on_public_key_share(m, &message::PublicKeyShare { - session: sid.into(), - public_share: math::generate_random_point().unwrap().into(), - }).unwrap_err(), Error::InvalidMessage); - } - - - #[test] - fn encryption_fails_on_session_timeout() { - let (_, _, _, l) = make_simple_cluster(0, 2).unwrap(); - assert!(l.master().joint_public_key().is_none()); - l.master().on_session_timeout(); - assert!(l.master().joint_public_key().unwrap().unwrap_err() == Error::NodeDisconnected); - } - - #[test] - fn encryption_fails_on_node_timeout() { - let (_, _, _, l) = make_simple_cluster(0, 2).unwrap(); - assert!(l.master().joint_public_key().is_none()); - l.master().on_node_timeout(l.first_slave().node()); - assert!(l.master().joint_public_key().unwrap().unwrap_err() == Error::NodeDisconnected); - } - - #[test] - fn complete_enc_dec_session() { - let test_cases = [(0, 5), (2, 5), (3, 5)]; - for &(threshold, num_nodes) in &test_cases { - let mut l = MessageLoop::new(num_nodes); - l.master().initialize(threshold, l.nodes.keys().cloned().collect()).unwrap(); - assert_eq!(l.nodes.len(), num_nodes); - - // let nodes do initialization + keys dissemination - while let Some((from, to, message)) = l.take_message() { - l.process_message((from, to, message)).unwrap(); - } - - // check that all nodes has finished joint public generation - let joint_public_key = l.master().joint_public_key().unwrap().unwrap(); - for node in l.nodes.values() { - let state = node.session.state(); - assert_eq!(state, SessionState::Finished); - assert_eq!(node.session.joint_public_key().as_ref(), Some(&Ok(joint_public_key))); - } - - // now let's encrypt some secret (which is a point on EC) - let document_secret_plain = Random.generate().unwrap().public().clone(); - let all_nodes_id_numbers: Vec<_> = l.master().data.lock().nodes.values().map(|n| n.id_number.clone()).collect(); - let all_nodes_secret_shares: Vec<_> = l.nodes.values().map(|n| n.session.data.lock().secret_share.as_ref().unwrap().clone()).collect(); - let document_secret_decrypted = do_encryption_and_decryption(threshold, &joint_public_key, - &all_nodes_id_numbers, - &all_nodes_secret_shares, - None, - document_secret_plain.clone() - ).0; - assert_eq!(document_secret_plain, document_secret_decrypted); - } - } - - #[test] - fn encryption_session_works_over_network() { - //::util::log::init_log(); - - let test_cases = [(1, 3)]; - for &(threshold, num_nodes) in &test_cases { - let mut core = Core::new().unwrap(); - - // prepare cluster objects for each node - let clusters = make_clusters(&core, 6022, num_nodes); - run_clusters(&clusters); - - // establish connections - loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established)); - - // run session to completion - let session_id = SessionId::default(); - let session = clusters[0].client().new_encryption_session(session_id, threshold).unwrap(); - loop_until(&mut core, time::Duration::from_millis(1000), || session.joint_public_key().is_some()); - } - } + let nodes = encrypted_data.id_numbers.keys().cloned().collect(); + check_cluster_nodes(self_node_id, &nodes)?; + check_threshold(encrypted_data.threshold, &nodes) } diff --git a/secret_store/src/key_server_cluster/generation_session.rs b/secret_store/src/key_server_cluster/generation_session.rs new file mode 100644 index 000000000..e94d5bd35 --- /dev/null +++ b/secret_store/src/key_server_cluster/generation_session.rs @@ -0,0 +1,1256 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::{BTreeSet, BTreeMap, VecDeque}; +use std::fmt::{Debug, Formatter, Error as FmtError}; +use std::time; +use std::sync::Arc; +use parking_lot::{Condvar, Mutex}; +use ethkey::{Public, Secret}; +use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare}; +use key_server_cluster::math; +use key_server_cluster::cluster::Cluster; +use key_server_cluster::cluster_sessions::ClusterSession; +use key_server_cluster::message::{Message, GenerationMessage, InitializeSession, ConfirmInitialization, CompleteInitialization, + KeysDissemination, PublicKeyShare, SessionError, SessionCompleted}; + +/// Key generation session API. +pub trait Session: Send + Sync + 'static { + /// Get generation session state. + fn state(&self) -> SessionState; + /// Wait until session is completed. Returns public portion of generated server key. + fn wait(&self, timeout: Option) -> Result; + /// Get joint public key (if it is known). + fn joint_public_and_secret(&self) -> Option>; +} + +/// Distributed key generation session. +/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: +/// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf +/// Brief overview: +/// 1) initialization: master node (which has received request for generating joint public + secret) initializes the session on all other nodes +/// 2) key dissemination (KD): all nodes are generating secret + public values and send these to appropriate nodes +/// 3) key verification (KV): all nodes are checking values, received for other nodes +/// 4) key generation phase (KG): nodes are exchanging with information, enough to generate joint public key +pub struct SessionImpl { + /// Unique session id. + id: SessionId, + /// Public identifier of this node. + self_node_id: NodeId, + /// Key storage. + key_storage: Option>, + /// Cluster which allows this node to send messages to other nodes in the cluster. + cluster: Arc, + /// SessionImpl completion condvar. + completed: Condvar, + /// Mutable session data. + data: Mutex, +} + +/// SessionImpl creation parameters +pub struct SessionParams { + /// SessionImpl identifier. + pub id: SessionId, + /// Id of node, on which this session is running. + pub self_node_id: Public, + /// Key storage. + pub key_storage: Option>, + /// Cluster + pub cluster: Arc, +} + +#[derive(Debug)] +/// Mutable data of distributed key generation session. +struct SessionData { + /// Current state of the session. + state: SessionState, + /// Simulate faulty behaviour? + simulate_faulty_behaviour: bool, + + // === Values, filled when session initialization just starts === + /// Reference to the node, which has started this session. + master: Option, + /// Public key of the creator of the session. + author: Option, + + // === Values, filled when session initialization is completed === + /// Threshold value for this DKG. Only `threshold + 1` will be able to collectively recreate joint secret, + /// and thus - decrypt message, encrypted with joint public. + threshold: Option, + /// Random point, jointly generated by every node in the cluster. + derived_point: Option, + /// Nodes-specific data. + nodes: BTreeMap, + + // === Values, filled during KD phase === + /// Value of polynom1[0], generated by this node. + secret_coeff: Option, + + // === Values, filled during KG phase === + /// Secret share, which this node holds. Persistent + private. + secret_share: Option, + + /// === Values, filled when DKG session is completed successfully === + /// Key share. + key_share: Option>, + /// Jointly generated public key, which can be used to encrypt secret. Public. + joint_public_and_secret: Option>, +} + +#[derive(Debug, Clone)] +/// Mutable node-specific data. +struct NodeData { + /// Random unique scalar. Persistent. + pub id_number: Secret, + + // === Values, filled during KD phase === + /// Secret value1, which has been sent to this node. + pub secret1_sent: Option, + /// Secret value2, which has been sent to this node. + pub secret2_sent: Option, + /// Secret value1, which has been received from this node. + pub secret1: Option, + /// Secret value2, which has been received from this node. + pub secret2: Option, + /// Public values, which have been received from this node. + pub publics: Option>, + + // === Values, filled during KG phase === + /// Public share, which has been received from this node. + pub public_share: Option, + + // === Values, filled during completion phase === + /// Flags marking that node has confirmed session completion (generated key is stored). + pub completion_confirmed: bool, +} + +#[derive(Debug, Clone, PartialEq)] +/// Schedule for visiting other nodes of cluster. +pub struct EveryOtherNodeVisitor { + /// Already visited nodes. + visited: BTreeSet, + /// Not yet visited nodes. + unvisited: VecDeque, + /// Nodes, which are currently visited. + in_progress: BTreeSet, +} + +#[derive(Debug, Clone, PartialEq)] +/// Distributed key generation session state. +pub enum SessionState { + // === Initialization states === + /// Every node starts in this state. + WaitingForInitialization, + /// Master node asks every other node to confirm initialization. + /// Derived point is generated by all nodes in the cluster. + WaitingForInitializationConfirm(EveryOtherNodeVisitor), + /// Slave nodes are in this state until initialization completion is reported by master node. + WaitingForInitializationComplete, + + // === KD phase states === + /// Node is waiting for generated keys from every other node. + WaitingForKeysDissemination, + + // === KG phase states === + /// Node is waiting for joint public key share to be received from every other node. + WaitingForPublicKeyShare, + + // === Generation phase states === + /// Node is waiting for session completion/session completion confirmation. + WaitingForGenerationConfirmation, + + // === Final states of the session === + /// Joint public key generation is completed. + Finished, + /// Joint public key generation is failed. + Failed, +} + +impl SessionImpl { + /// Create new generation session. + pub fn new(params: SessionParams) -> Self { + SessionImpl { + id: params.id, + self_node_id: params.self_node_id, + key_storage: params.key_storage, + cluster: params.cluster, + completed: Condvar::new(), + data: Mutex::new(SessionData { + state: SessionState::WaitingForInitialization, + simulate_faulty_behaviour: false, + master: None, + author: None, + threshold: None, + derived_point: None, + nodes: BTreeMap::new(), + secret_coeff: None, + secret_share: None, + key_share: None, + joint_public_and_secret: None, + }), + } + } + + /// Get this node Id. + pub fn node(&self) -> &NodeId { + &self.self_node_id + } + + #[cfg(test)] + /// Get derived point. + pub fn derived_point(&self) -> Option { + self.data.lock().derived_point.clone() + } + + /// Simulate faulty generation session behaviour. + pub fn simulate_faulty_behaviour(&self) { + self.data.lock().simulate_faulty_behaviour = true; + } + + /// Start new session initialization. This must be called on master node. + pub fn initialize(&self, author: Public, threshold: usize, nodes: BTreeSet) -> Result<(), Error> { + check_cluster_nodes(self.node(), &nodes)?; + check_threshold(threshold, &nodes)?; + + let mut data = self.data.lock(); + + // check state + if data.state != SessionState::WaitingForInitialization { + return Err(Error::InvalidStateForRequest); + } + + // update state + data.master = Some(self.node().clone()); + data.author = Some(author.clone()); + data.threshold = Some(threshold); + for node_id in &nodes { + // generate node identification parameter + let node_id_number = math::generate_random_scalar()?; + data.nodes.insert(node_id.clone(), NodeData::with_id_number(node_id_number)); + } + + let mut visit_policy = EveryOtherNodeVisitor::new(self.node(), data.nodes.keys().cloned()); + let derived_point = math::generate_random_point()?; + match visit_policy.next_node() { + Some(next_node) => { + data.state = SessionState::WaitingForInitializationConfirm(visit_policy); + + // start initialization + self.cluster.send(&next_node, Message::Generation(GenerationMessage::InitializeSession(InitializeSession { + session: self.id.clone().into(), + author: author.into(), + nodes: data.nodes.iter().map(|(k, v)| (k.clone().into(), v.id_number.clone().into())).collect(), + threshold: data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"), + derived_point: derived_point.into(), + }))) + }, + None => { + drop(data); + self.complete_initialization(derived_point)?; + self.disseminate_keys()?; + self.verify_keys()?; + self.complete_generation() + } + } + } + + /// Process single message. + pub fn process_message(&self, sender: &NodeId, message: &GenerationMessage) -> Result<(), Error> { + match message { + &GenerationMessage::InitializeSession(ref message) => + self.on_initialize_session(sender.clone(), message), + &GenerationMessage::ConfirmInitialization(ref message) => + self.on_confirm_initialization(sender.clone(), message), + &GenerationMessage::CompleteInitialization(ref message) => + self.on_complete_initialization(sender.clone(), message), + &GenerationMessage::KeysDissemination(ref message) => + self.on_keys_dissemination(sender.clone(), message), + &GenerationMessage::PublicKeyShare(ref message) => + self.on_public_key_share(sender.clone(), message), + &GenerationMessage::SessionError(ref message) => + self.on_session_error(sender.clone(), message), + &GenerationMessage::SessionCompleted(ref message) => + self.on_session_completed(sender.clone(), message), + } + } + + /// When session initialization message is received. + pub fn on_initialize_session(&self, sender: NodeId, message: &InitializeSession) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(&sender != self.node()); + + // check message + let nodes_ids = message.nodes.keys().cloned().map(Into::into).collect(); + check_threshold(message.threshold, &nodes_ids)?; + check_cluster_nodes(self.node(), &nodes_ids)?; + + let mut data = self.data.lock(); + + // check state + if data.state != SessionState::WaitingForInitialization { + return Err(Error::InvalidStateForRequest); + } + + // update derived point with random scalar + let mut derived_point = message.derived_point.clone().into(); + math::update_random_point(&mut derived_point)?; + + // send confirmation back to master node + self.cluster.send(&sender, Message::Generation(GenerationMessage::ConfirmInitialization(ConfirmInitialization { + session: self.id.clone().into(), + derived_point: derived_point.into(), + })))?; + + // update state + data.master = Some(sender); + data.author = Some(message.author.clone().into()); + data.state = SessionState::WaitingForInitializationComplete; + data.nodes = message.nodes.iter().map(|(id, number)| (id.clone().into(), NodeData::with_id_number(number.clone().into()))).collect(); + data.threshold = Some(message.threshold); + + Ok(()) + } + + /// When session initialization confirmation message is reeived. + pub fn on_confirm_initialization(&self, sender: NodeId, message: &ConfirmInitialization) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(&sender != self.node()); + + let mut data = self.data.lock(); + debug_assert!(data.nodes.contains_key(&sender)); + + // check state && select new node to be initialized + let next_receiver = match data.state { + SessionState::WaitingForInitializationConfirm(ref mut visit_policy) => { + if !visit_policy.mark_visited(&sender) { + return Err(Error::InvalidStateForRequest); + } + + visit_policy.next_node() + }, + _ => return Err(Error::InvalidStateForRequest), + }; + + // proceed message + if let Some(next_receiver) = next_receiver { + return self.cluster.send(&next_receiver, Message::Generation(GenerationMessage::InitializeSession(InitializeSession { + session: self.id.clone().into(), + author: data.author.as_ref().expect("author is filled on initialization step; confrm initialization follows initialization; qed").clone().into(), + nodes: data.nodes.iter().map(|(k, v)| (k.clone().into(), v.id_number.clone().into())).collect(), + threshold: data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"), + derived_point: message.derived_point.clone().into(), + }))); + } + + // now it is time for keys dissemination (KD) phase + drop(data); + self.complete_initialization(message.derived_point.clone().into())?; + self.disseminate_keys() + } + + /// When session initialization completion message is received. + pub fn on_complete_initialization(&self, sender: NodeId, message: &CompleteInitialization) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(&sender != self.node()); + + let mut data = self.data.lock(); + + // check state + if data.state != SessionState::WaitingForInitializationComplete { + return Err(Error::InvalidStateForRequest); + } + if data.master != Some(sender) { + return Err(Error::InvalidMessage); + } + + // remember passed data + data.derived_point = Some(message.derived_point.clone().into()); + + // now it is time for keys dissemination (KD) phase + drop(data); + self.disseminate_keys() + } + + /// When keys dissemination message is received. + pub fn on_keys_dissemination(&self, sender: NodeId, message: &KeysDissemination) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(&sender != self.node()); + + let mut data = self.data.lock(); + + // simulate failure, if required + if data.simulate_faulty_behaviour { + return Err(Error::Io("simulated error".into())); + } + + // check state + if data.state != SessionState::WaitingForKeysDissemination { + match data.state { + SessionState::WaitingForInitializationComplete => return Err(Error::TooEarlyForRequest), + _ => return Err(Error::InvalidStateForRequest), + } + } + debug_assert!(data.nodes.contains_key(&sender)); + + // check message + let threshold = data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"); + if message.publics.len() != threshold + 1 { + return Err(Error::InvalidMessage); + } + + // update node data + { + let node_data = data.nodes.get_mut(&sender).ok_or(Error::InvalidMessage)?; + if node_data.secret1.is_some() || node_data.secret2.is_some() || node_data.publics.is_some() { + return Err(Error::InvalidStateForRequest); + } + + node_data.secret1 = Some(message.secret1.clone().into()); + node_data.secret2 = Some(message.secret2.clone().into()); + node_data.publics = Some(message.publics.iter().cloned().map(Into::into).collect()); + } + + // check if we have received keys from every other node + if data.nodes.iter().any(|(node_id, node_data)| node_id != self.node() && (node_data.publics.is_none() || node_data.secret1.is_none() || node_data.secret2.is_none())) { + return Ok(()) + } + + drop(data); + self.verify_keys() + } + + /// When public key share is received. + pub fn on_public_key_share(&self, sender: NodeId, message: &PublicKeyShare) -> Result<(), Error> { + let mut data = self.data.lock(); + + // check state + if data.state != SessionState::WaitingForPublicKeyShare { + match data.state { + SessionState::WaitingForInitializationComplete | + SessionState::WaitingForKeysDissemination => return Err(Error::TooEarlyForRequest), + _ => return Err(Error::InvalidStateForRequest), + } + } + + // update node data with received public share + { + let node_data = &mut data.nodes.get_mut(&sender).ok_or(Error::InvalidMessage)?; + if node_data.public_share.is_some() { + return Err(Error::InvalidMessage); + } + + node_data.public_share = Some(message.public_share.clone().into()); + } + + // if there's also nodes, which has not sent us their public shares - do nothing + if data.nodes.iter().any(|(node_id, node_data)| node_id != self.node() && node_data.public_share.is_none()) { + return Ok(()); + } + + drop(data); + self.complete_generation() + } + + /// When session completion message is received. + pub fn on_session_completed(&self, sender: NodeId, message: &SessionCompleted) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(&sender != self.node()); + + let mut data = self.data.lock(); + debug_assert!(data.nodes.contains_key(&sender)); + + // check state + if data.state != SessionState::WaitingForGenerationConfirmation { + match data.state { + SessionState::WaitingForPublicKeyShare => return Err(Error::TooEarlyForRequest), + _ => return Err(Error::InvalidStateForRequest), + } + } + + // if we are not masters, save result and respond with confirmation + if data.master.as_ref() != Some(self.node()) { + // check that we have received message from master + if data.master.as_ref() != Some(&sender) { + return Err(Error::InvalidMessage); + } + + // save encrypted data to key storage + let encrypted_data = DocumentKeyShare { + author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(), + threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), + id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(), + secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(), + common_point: None, + encrypted_point: None, + }; + + if let Some(ref key_storage) = self.key_storage { + key_storage.insert(self.id.clone(), encrypted_data.clone()) + .map_err(|e| Error::KeyStorage(e.into()))?; + } + + // then respond with confirmation + data.state = SessionState::Finished; + return self.cluster.send(&sender, Message::Generation(GenerationMessage::SessionCompleted(SessionCompleted { + session: self.id.clone().into(), + }))); + } + + // remember that we have received confirmation from sender node + { + let sender_node = data.nodes.get_mut(&sender).expect("node is always qualified by himself; qed"); + if sender_node.completion_confirmed { + return Err(Error::InvalidMessage); + } + + sender_node.completion_confirmed = true; + } + + // check if we have received confirmations from all cluster nodes + if data.nodes.iter().any(|(_, node_data)| !node_data.completion_confirmed) { + return Ok(()) + } + + // we have received enough confirmations => complete session + data.state = SessionState::Finished; + self.completed.notify_all(); + + Ok(()) + } + + /// When error has occured on another node. + pub fn on_session_error(&self, sender: NodeId, message: &SessionError) -> Result<(), Error> { + let mut data = self.data.lock(); + + warn!("{}: generation session failed with error: {} from {}", self.node(), message.error, sender); + + data.state = SessionState::Failed; + data.key_share = Some(Err(Error::Io(message.error.clone()))); + data.joint_public_and_secret = Some(Err(Error::Io(message.error.clone()))); + self.completed.notify_all(); + + Ok(()) + } + + /// Complete initialization (when all other nodex has responded with confirmation) + fn complete_initialization(&self, mut derived_point: Public) -> Result<(), Error> { + // update point once again to make sure that derived point is not generated by last node + math::update_random_point(&mut derived_point)?; + + // remember derived point + let mut data = self.data.lock(); + data.derived_point = Some(derived_point.clone().into()); + + // broadcast derived point && other session paraeters to every other node + self.cluster.broadcast(Message::Generation(GenerationMessage::CompleteInitialization(CompleteInitialization { + session: self.id.clone().into(), + derived_point: derived_point.into(), + }))) + } + + /// Keys dissemination (KD) phase + fn disseminate_keys(&self) -> Result<(), Error> { + let mut data = self.data.lock(); + + // pick 2t + 2 random numbers as polynomial coefficients for 2 polynoms + let threshold = data.threshold.expect("threshold is filled on initialization phase; KD phase follows initialization phase; qed"); + let polynom1 = math::generate_random_polynom(threshold)?; + let polynom2 = math::generate_random_polynom(threshold)?; + data.secret_coeff = Some(polynom1[0].clone()); + + // compute t+1 public values + let publics = math::public_values_generation(threshold, + data.derived_point.as_ref().expect("keys dissemination occurs after derived point is agreed; qed"), + &polynom1, + &polynom2)?; + + // compute secret values for every other node + for (node, node_data) in data.nodes.iter_mut() { + let secret1 = math::compute_polynom(&polynom1, &node_data.id_number)?; + let secret2 = math::compute_polynom(&polynom2, &node_data.id_number)?; + + // send a message containing secret1 && secret2 to other node + if node != self.node() { + node_data.secret1_sent = Some(secret1.clone()); + node_data.secret2_sent = Some(secret2.clone()); + + self.cluster.send(&node, Message::Generation(GenerationMessage::KeysDissemination(KeysDissemination { + session: self.id.clone().into(), + secret1: secret1.into(), + secret2: secret2.into(), + publics: publics.iter().cloned().map(Into::into).collect(), + })))?; + } else { + node_data.secret1 = Some(secret1); + node_data.secret2 = Some(secret2); + node_data.publics = Some(publics.clone()); + } + } + + // update state + data.state = SessionState::WaitingForKeysDissemination; + + Ok(()) + } + + /// Keys verification (KV) phase + fn verify_keys(&self) -> Result<(), Error> { + let mut data = self.data.lock(); + + // key verification (KV) phase: check that other nodes have passed correct secrets + let threshold = data.threshold.expect("threshold is filled in initialization phase; KV phase follows initialization phase; qed"); + let derived_point = data.derived_point.clone().expect("derived point generated on initialization phase; KV phase follows initialization phase; qed"); + let number_id = data.nodes[self.node()].id_number.clone(); + for (_ , node_data) in data.nodes.iter_mut().filter(|&(node_id, _)| node_id != self.node()) { + let secret1 = node_data.secret1.as_ref().expect("keys received on KD phase; KV phase follows KD phase; qed"); + let secret2 = node_data.secret2.as_ref().expect("keys received on KD phase; KV phase follows KD phase; qed"); + let publics = node_data.publics.as_ref().expect("keys received on KD phase; KV phase follows KD phase; qed"); + let is_key_verification_ok = math::keys_verification(threshold, &derived_point, &number_id, + secret1, secret2, publics)?; + + if !is_key_verification_ok { + // node has sent us incorrect values. In original ECDKG protocol we should have sent complaint here. + return Err(Error::InvalidMessage); + } + } + + // calculate public share + let self_public_share = { + let self_secret_coeff = data.secret_coeff.as_ref().expect("secret_coeff is generated on KD phase; KG phase follows KD phase; qed"); + math::compute_public_share(self_secret_coeff)? + }; + + // calculate self secret + public shares + let self_secret_share = { + let secret_values_iter = data.nodes.values() + .map(|n| n.secret1.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed")); + math::compute_secret_share(secret_values_iter)? + }; + + // update state + data.state = SessionState::WaitingForPublicKeyShare; + data.secret_share = Some(self_secret_share); + let self_node = data.nodes.get_mut(self.node()).expect("node is always qualified by himself; qed"); + self_node.public_share = Some(self_public_share.clone()); + + // broadcast self public key share + self.cluster.broadcast(Message::Generation(GenerationMessage::PublicKeyShare(PublicKeyShare { + session: self.id.clone().into(), + public_share: self_public_share.into(), + }))) + } + + /// Complete generation + fn complete_generation(&self) -> Result<(), Error> { + let mut data = self.data.lock(); + + // else - calculate joint public key + let joint_public = { + let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed")); + math::compute_joint_public(public_shares)? + }; + + // prepare key data + let encrypted_data = DocumentKeyShare { + author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(), + threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), + id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(), + secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(), + common_point: None, + encrypted_point: None, + }; + + // if we are at the slave node - wait for session completion + let secret_coeff = data.secret_coeff.as_ref().expect("secret coeff is selected on initialization phase; current phase follows initialization; qed").clone(); + if data.master.as_ref() != Some(self.node()) { + data.key_share = Some(Ok(encrypted_data)); + data.joint_public_and_secret = Some(Ok((joint_public, secret_coeff))); + data.state = SessionState::WaitingForGenerationConfirmation; + return Ok(()); + } + + // then save encrypted data to the key storage + if let Some(ref key_storage) = self.key_storage { + key_storage.insert(self.id.clone(), encrypted_data.clone()) + .map_err(|e| Error::KeyStorage(e.into()))?; + } + + // then distribute encrypted data to every other node + self.cluster.broadcast(Message::Generation(GenerationMessage::SessionCompleted(SessionCompleted { + session: self.id.clone().into(), + })))?; + + // then wait for confirmation from all other nodes + { + let self_node = data.nodes.get_mut(self.node()).expect("node is always qualified by himself; qed"); + self_node.completion_confirmed = true; + } + data.key_share = Some(Ok(encrypted_data)); + data.joint_public_and_secret = Some(Ok((joint_public, secret_coeff))); + data.state = SessionState::WaitingForGenerationConfirmation; + + Ok(()) + } +} + +impl ClusterSession for SessionImpl { + fn is_finished(&self) -> bool { + let data = self.data.lock(); + data.state == SessionState::Failed + || data.state == SessionState::Finished + } + + fn on_node_timeout(&self, node: &NodeId) { + let mut data = self.data.lock(); + + // all nodes are required for generation session + // => fail without check + warn!("{}: generation session failed because {} connection has timeouted", self.node(), node); + + data.state = SessionState::Failed; + data.key_share = Some(Err(Error::NodeDisconnected)); + data.joint_public_and_secret = Some(Err(Error::NodeDisconnected)); + self.completed.notify_all(); + } + + fn on_session_timeout(&self) { + let mut data = self.data.lock(); + + warn!("{}: generation session failed with timeout", self.node()); + + data.state = SessionState::Failed; + data.key_share = Some(Err(Error::NodeDisconnected)); + data.joint_public_and_secret = Some(Err(Error::NodeDisconnected)); + self.completed.notify_all(); + } +} + +impl Session for SessionImpl { + fn state(&self) -> SessionState { + self.data.lock().state.clone() + } + + fn wait(&self, timeout: Option) -> Result { + let mut data = self.data.lock(); + if !data.joint_public_and_secret.is_some() { + match timeout { + None => self.completed.wait(&mut data), + Some(timeout) => { self.completed.wait_for(&mut data, timeout); }, + } + } + + data.joint_public_and_secret.clone() + .expect("checked above or waited for completed; completed is only signaled when joint_public.is_some(); qed") + .map(|p| p.0) + } + + fn joint_public_and_secret(&self) -> Option> { + self.data.lock().joint_public_and_secret.clone() + } +} + +impl EveryOtherNodeVisitor { + pub fn new(self_id: &NodeId, nodes: I) -> Self where I: Iterator { + EveryOtherNodeVisitor { + visited: BTreeSet::new(), + unvisited: nodes.filter(|n| n != self_id).collect(), + in_progress: BTreeSet::new(), + } + } + + pub fn next_node(&mut self) -> Option { + let next_node = self.unvisited.pop_front(); + if let Some(ref next_node) = next_node { + self.in_progress.insert(next_node.clone()); + } + next_node + } + + pub fn mark_visited(&mut self, node: &NodeId) -> bool { + if !self.in_progress.remove(node) { + return false; + } + self.visited.insert(node.clone()) + } +} + +impl NodeData { + fn with_id_number(node_id_number: Secret) -> Self { + NodeData { + id_number: node_id_number, + secret1_sent: None, + secret2_sent: None, + secret1: None, + secret2: None, + publics: None, + public_share: None, + completion_confirmed: false, + } + } +} + +impl Debug for SessionImpl { + fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { + write!(f, "Generation session {} on {}", self.id, self.self_node_id) + } +} + +pub fn check_cluster_nodes(self_node_id: &NodeId, nodes: &BTreeSet) -> Result<(), Error> { + // at least two nodes must be in cluster + if nodes.len() < 1 { + return Err(Error::InvalidNodesCount); + } + // this node must be a part of cluster + if !nodes.contains(self_node_id) { + return Err(Error::InvalidNodesConfiguration); + } + + Ok(()) +} + +pub fn check_threshold(threshold: usize, nodes: &BTreeSet) -> Result<(), Error> { + // at least threshold + 1 nodes are required to collectively decrypt message + if threshold >= nodes.len() { + return Err(Error::InvalidThreshold); + } + + Ok(()) +} + +#[cfg(test)] +pub mod tests { + use std::time; + use std::sync::Arc; + use std::collections::{BTreeSet, BTreeMap, VecDeque}; + use tokio_core::reactor::Core; + use ethkey::{Random, Generator, Public}; + use key_server_cluster::{NodeId, SessionId, Error, DummyKeyStorage}; + use key_server_cluster::message::{self, Message, GenerationMessage}; + use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established}; + use key_server_cluster::cluster_sessions::ClusterSession; + use key_server_cluster::generation_session::{Session, SessionImpl, SessionState, SessionParams}; + use key_server_cluster::math; + use key_server_cluster::math::tests::do_encryption_and_decryption; + + pub struct Node { + pub cluster: Arc, + pub key_storage: Arc, + pub session: SessionImpl, + } + + pub struct MessageLoop { + pub session_id: SessionId, + pub nodes: BTreeMap, + pub queue: VecDeque<(NodeId, NodeId, Message)>, + } + + impl MessageLoop { + pub fn new(nodes_num: usize) -> Self { + let mut nodes = BTreeMap::new(); + let session_id = SessionId::default(); + for _ in 0..nodes_num { + let key_pair = Random.generate().unwrap(); + let node_id = key_pair.public().clone(); + let cluster = Arc::new(DummyCluster::new(node_id.clone())); + let key_storage = Arc::new(DummyKeyStorage::default()); + let session = SessionImpl::new(SessionParams { + id: session_id.clone(), + self_node_id: node_id.clone(), + key_storage: Some(key_storage.clone()), + cluster: cluster.clone(), + }); + nodes.insert(node_id, Node { cluster: cluster, key_storage: key_storage, session: session }); + } + + let nodes_ids: Vec<_> = nodes.keys().cloned().collect(); + for node in nodes.values() { + for node_id in &nodes_ids { + node.cluster.add_node(node_id.clone()); + } + } + + MessageLoop { + session_id: session_id, + nodes: nodes, + queue: VecDeque::new(), + } + } + + pub fn master(&self) -> &SessionImpl { + &self.nodes.values().nth(0).unwrap().session + } + + pub fn first_slave(&self) -> &SessionImpl { + &self.nodes.values().nth(1).unwrap().session + } + + pub fn second_slave(&self) -> &SessionImpl { + &self.nodes.values().nth(2).unwrap().session + } + + pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { + self.nodes.values() + .filter_map(|n| n.cluster.take_message().map(|m| (n.session.node().clone(), m.0, m.1))) + .nth(0) + .or_else(|| self.queue.pop_front()) + } + + pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { + match { + match msg.2 { + Message::Generation(GenerationMessage::InitializeSession(ref message)) => self.nodes[&msg.1].session.on_initialize_session(msg.0.clone(), &message), + Message::Generation(GenerationMessage::ConfirmInitialization(ref message)) => self.nodes[&msg.1].session.on_confirm_initialization(msg.0.clone(), &message), + Message::Generation(GenerationMessage::CompleteInitialization(ref message)) => self.nodes[&msg.1].session.on_complete_initialization(msg.0.clone(), &message), + Message::Generation(GenerationMessage::KeysDissemination(ref message)) => self.nodes[&msg.1].session.on_keys_dissemination(msg.0.clone(), &message), + Message::Generation(GenerationMessage::PublicKeyShare(ref message)) => self.nodes[&msg.1].session.on_public_key_share(msg.0.clone(), &message), + Message::Generation(GenerationMessage::SessionCompleted(ref message)) => self.nodes[&msg.1].session.on_session_completed(msg.0.clone(), &message), + _ => panic!("unexpected"), + } + } { + Ok(_) => Ok(()), + Err(Error::TooEarlyForRequest) => { + self.queue.push_back(msg); + Ok(()) + }, + Err(err) => Err(err), + } + } + + pub fn take_and_process_message(&mut self) -> Result<(), Error> { + let msg = self.take_message().unwrap(); + self.process_message(msg) + } + } + + fn make_simple_cluster(threshold: usize, num_nodes: usize) -> Result<(SessionId, NodeId, NodeId, MessageLoop), Error> { + let l = MessageLoop::new(num_nodes); + l.master().initialize(Public::default(), threshold, l.nodes.keys().cloned().collect())?; + + let session_id = l.session_id.clone(); + let master_id = l.master().node().clone(); + let slave_id = l.first_slave().node().clone(); + Ok((session_id, master_id, slave_id, l)) + } + + #[test] + fn initializes_in_cluster_of_single_node() { + let l = MessageLoop::new(1); + assert!(l.master().initialize(Public::default(), 0, l.nodes.keys().cloned().collect()).is_ok()); + } + + #[test] + fn fails_to_initialize_if_not_a_part_of_cluster() { + let node_id = math::generate_random_point().unwrap(); + let cluster = Arc::new(DummyCluster::new(node_id.clone())); + let session = SessionImpl::new(SessionParams { + id: SessionId::default(), + self_node_id: node_id.clone(), + key_storage: Some(Arc::new(DummyKeyStorage::default())), + cluster: cluster, + }); + let cluster_nodes: BTreeSet<_> = (0..2).map(|_| math::generate_random_point().unwrap()).collect(); + assert_eq!(session.initialize(Public::default(), 0, cluster_nodes).unwrap_err(), Error::InvalidNodesConfiguration); + } + + #[test] + fn fails_to_initialize_if_threshold_is_wrong() { + match make_simple_cluster(2, 2) { + Err(Error::InvalidThreshold) => (), + _ => panic!("unexpected"), + } + } + + #[test] + fn fails_to_initialize_when_already_initialized() { + let (_, _, _, l) = make_simple_cluster(0, 2).unwrap(); + assert_eq!(l.master().initialize(Public::default(), 0, l.nodes.keys().cloned().collect()).unwrap_err(), Error::InvalidStateForRequest); + } + + #[test] + fn fails_to_accept_initialization_when_already_initialized() { + let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); + let message = l.take_message().unwrap(); + l.process_message(message.clone()).unwrap(); + assert_eq!(l.process_message(message.clone()).unwrap_err(), Error::InvalidStateForRequest); + } + + #[test] + fn slave_updates_derived_point_on_initialization() { + let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); + let passed_point = match l.take_message().unwrap() { + (f, t, Message::Generation(GenerationMessage::InitializeSession(message))) => { + let point = message.derived_point.clone(); + l.process_message((f, t, Message::Generation(GenerationMessage::InitializeSession(message)))).unwrap(); + point + }, + _ => panic!("unexpected"), + }; + + match l.take_message().unwrap() { + (_, _, Message::Generation(GenerationMessage::ConfirmInitialization(message))) => assert!(passed_point != message.derived_point), + _ => panic!("unexpected"), + } + } + + #[test] + fn fails_to_accept_initialization_confirmation_if_already_accepted_from_the_same_node() { + let (sid, _, s, mut l) = make_simple_cluster(0, 3).unwrap(); + l.take_and_process_message().unwrap(); + l.take_and_process_message().unwrap(); + l.take_and_process_message().unwrap(); + assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { + session: sid.into(), + derived_point: math::generate_random_point().unwrap().into(), + }).unwrap_err(), Error::InvalidStateForRequest); + } + + #[test] + fn fails_to_accept_initialization_confirmation_if_initialization_already_completed() { + let (sid, _, s, mut l) = make_simple_cluster(0, 2).unwrap(); + l.take_and_process_message().unwrap(); + l.take_and_process_message().unwrap(); + assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { + session: sid.into(), + derived_point: math::generate_random_point().unwrap().into(), + }).unwrap_err(), Error::InvalidStateForRequest); + } + + #[test] + fn master_updates_derived_point_on_initialization_completion() { + let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); + l.take_and_process_message().unwrap(); + let passed_point = match l.take_message().unwrap() { + (f, t, Message::Generation(GenerationMessage::ConfirmInitialization(message))) => { + let point = message.derived_point.clone(); + l.process_message((f, t, Message::Generation(GenerationMessage::ConfirmInitialization(message)))).unwrap(); + point + }, + _ => panic!("unexpected"), + }; + + assert!(l.master().derived_point().unwrap() != passed_point.into()); + } + + #[test] + fn fails_to_complete_initialization_if_not_a_part_of_cluster() { + let (sid, m, _, l) = make_simple_cluster(0, 2).unwrap(); + let mut nodes = BTreeMap::new(); + nodes.insert(m, math::generate_random_scalar().unwrap()); + nodes.insert(math::generate_random_point().unwrap(), math::generate_random_scalar().unwrap()); + assert_eq!(l.first_slave().on_initialize_session(m, &message::InitializeSession { + session: sid.into(), + author: Public::default().into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), + threshold: 0, + derived_point: math::generate_random_point().unwrap().into(), + }).unwrap_err(), Error::InvalidNodesConfiguration); + } + + #[test] + fn fails_to_complete_initialization_if_threshold_is_wrong() { + let (sid, m, s, l) = make_simple_cluster(0, 2).unwrap(); + let mut nodes = BTreeMap::new(); + nodes.insert(m, math::generate_random_scalar().unwrap()); + nodes.insert(s, math::generate_random_scalar().unwrap()); + assert_eq!(l.first_slave().on_initialize_session(m, &message::InitializeSession { + session: sid.into(), + author: Public::default().into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), + threshold: 2, + derived_point: math::generate_random_point().unwrap().into(), + }).unwrap_err(), Error::InvalidThreshold); + } + + #[test] + fn fails_to_complete_initialization_if_not_waiting_for_it() { + let (sid, m, _, l) = make_simple_cluster(0, 2).unwrap(); + assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { + session: sid.into(), + derived_point: math::generate_random_point().unwrap().into(), + }).unwrap_err(), Error::InvalidStateForRequest); + } + + #[test] + fn fails_to_complete_initialization_from_non_master_node() { + let (sid, _, _, mut l) = make_simple_cluster(0, 3).unwrap(); + l.take_and_process_message().unwrap(); + l.take_and_process_message().unwrap(); + l.take_and_process_message().unwrap(); + l.take_and_process_message().unwrap(); + assert_eq!(l.first_slave().on_complete_initialization(l.second_slave().node().clone(), &message::CompleteInitialization { + session: sid.into(), + derived_point: math::generate_random_point().unwrap().into(), + }).unwrap_err(), Error::InvalidMessage); + } + + #[test] + fn fails_to_accept_keys_dissemination_if_not_waiting_for_it() { + let (sid, _, s, l) = make_simple_cluster(0, 2).unwrap(); + assert_eq!(l.master().on_keys_dissemination(s, &message::KeysDissemination { + session: sid.into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into()], + }).unwrap_err(), Error::InvalidStateForRequest); + } + + #[test] + fn fails_to_accept_keys_dissemination_if_wrong_number_of_publics_passed() { + let (sid, m, _, mut l) = make_simple_cluster(0, 3).unwrap(); + l.take_and_process_message().unwrap(); // m -> s1: InitializeSession + l.take_and_process_message().unwrap(); // m -> s2: InitializeSession + l.take_and_process_message().unwrap(); // s1 -> m: ConfirmInitialization + l.take_and_process_message().unwrap(); // s2 -> m: ConfirmInitialization + l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization + l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization + l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination + assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { + session: sid.into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into(), math::generate_random_point().unwrap().into()], + }).unwrap_err(), Error::InvalidMessage); + } + + #[test] + fn fails_to_accept_keys_dissemination_second_time_from_the_same_node() { + let (sid, m, _, mut l) = make_simple_cluster(0, 3).unwrap(); + l.take_and_process_message().unwrap(); // m -> s1: InitializeSession + l.take_and_process_message().unwrap(); // m -> s2: InitializeSession + l.take_and_process_message().unwrap(); // s1 -> m: ConfirmInitialization + l.take_and_process_message().unwrap(); // s2 -> m: ConfirmInitialization + l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization + l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization + l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination + assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { + session: sid.into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into()], + }).unwrap_err(), Error::InvalidStateForRequest); + } + + #[test] + fn should_not_accept_public_key_share_when_is_not_waiting_for_it() { + let (sid, _, s, l) = make_simple_cluster(1, 3).unwrap(); + assert_eq!(l.master().on_public_key_share(s, &message::PublicKeyShare { + session: sid.into(), + public_share: math::generate_random_point().unwrap().into(), + }).unwrap_err(), Error::InvalidStateForRequest); + } + + #[test] + fn should_not_accept_public_key_share_when_receiving_twice() { + let (sid, m, _, mut l) = make_simple_cluster(0, 3).unwrap(); + l.take_and_process_message().unwrap(); // m -> s1: InitializeSession + l.take_and_process_message().unwrap(); // m -> s2: InitializeSession + l.take_and_process_message().unwrap(); // s1 -> m: ConfirmInitialization + l.take_and_process_message().unwrap(); // s2 -> m: ConfirmInitialization + l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization + l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization + l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination + l.take_and_process_message().unwrap(); // m -> s2: KeysDissemination + l.take_and_process_message().unwrap(); // s1 -> m: KeysDissemination + l.take_and_process_message().unwrap(); // s1 -> s2: KeysDissemination + l.take_and_process_message().unwrap(); // s2 -> m: KeysDissemination + l.take_and_process_message().unwrap(); // s2 -> s1: KeysDissemination + let (f, t, msg) = match l.take_message() { + Some((f, t, Message::Generation(GenerationMessage::PublicKeyShare(msg)))) => (f, t, msg), + _ => panic!("unexpected"), + }; + assert_eq!(&f, l.master().node()); + assert_eq!(&t, l.second_slave().node()); + l.process_message((f, t, Message::Generation(GenerationMessage::PublicKeyShare(msg.clone())))).unwrap(); + assert_eq!(l.second_slave().on_public_key_share(m, &message::PublicKeyShare { + session: sid.into(), + public_share: math::generate_random_point().unwrap().into(), + }).unwrap_err(), Error::InvalidMessage); + } + + + #[test] + fn encryption_fails_on_session_timeout() { + let (_, _, _, l) = make_simple_cluster(0, 2).unwrap(); + assert!(l.master().joint_public_and_secret().is_none()); + l.master().on_session_timeout(); + assert!(l.master().joint_public_and_secret().unwrap().unwrap_err() == Error::NodeDisconnected); + } + + #[test] + fn encryption_fails_on_node_timeout() { + let (_, _, _, l) = make_simple_cluster(0, 2).unwrap(); + assert!(l.master().joint_public_and_secret().is_none()); + l.master().on_node_timeout(l.first_slave().node()); + assert!(l.master().joint_public_and_secret().unwrap().unwrap_err() == Error::NodeDisconnected); + } + + #[test] + fn complete_enc_dec_session() { + let test_cases = [(0, 5), (2, 5), (3, 5)]; + for &(threshold, num_nodes) in &test_cases { + let mut l = MessageLoop::new(num_nodes); + l.master().initialize(Public::default(), threshold, l.nodes.keys().cloned().collect()).unwrap(); + assert_eq!(l.nodes.len(), num_nodes); + + // let nodes do initialization + keys dissemination + while let Some((from, to, message)) = l.take_message() { + l.process_message((from, to, message)).unwrap(); + } + + // check that all nodes has finished joint public generation + let joint_public_key = l.master().joint_public_and_secret().unwrap().unwrap().0; + for node in l.nodes.values() { + let state = node.session.state(); + assert_eq!(state, SessionState::Finished); + assert_eq!(node.session.joint_public_and_secret().map(|p| p.map(|p| p.0)), Some(Ok(joint_public_key))); + } + + // now let's encrypt some secret (which is a point on EC) + let document_secret_plain = Random.generate().unwrap().public().clone(); + let all_nodes_id_numbers: Vec<_> = l.master().data.lock().nodes.values().map(|n| n.id_number.clone()).collect(); + let all_nodes_secret_shares: Vec<_> = l.nodes.values().map(|n| n.session.data.lock().secret_share.as_ref().unwrap().clone()).collect(); + let document_secret_decrypted = do_encryption_and_decryption(threshold, &joint_public_key, + &all_nodes_id_numbers, + &all_nodes_secret_shares, + None, + document_secret_plain.clone() + ).0; + assert_eq!(document_secret_plain, document_secret_decrypted); + } + } + + #[test] + fn encryption_session_works_over_network() { + //::util::log::init_log(); + + let test_cases = [(1, 3)]; + for &(threshold, num_nodes) in &test_cases { + let mut core = Core::new().unwrap(); + + // prepare cluster objects for each node + let clusters = make_clusters(&core, 6022, num_nodes); + run_clusters(&clusters); + + // establish connections + loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established)); + + // run session to completion + let session_id = SessionId::default(); + let session = clusters[0].client().new_generation_session(session_id, Public::default(), threshold).unwrap(); + loop_until(&mut core, time::Duration::from_millis(1000), || session.joint_public_and_secret().is_some()); + } + } +} diff --git a/secret_store/src/key_server_cluster/io/message.rs b/secret_store/src/key_server_cluster/io/message.rs index 95d3a54cb..49b71e39d 100644 --- a/secret_store/src/key_server_cluster/io/message.rs +++ b/secret_store/src/key_server_cluster/io/message.rs @@ -25,7 +25,8 @@ use ethkey::{Public, Secret, KeyPair}; use ethkey::math::curve_order; use util::{H256, U256}; use key_server_cluster::Error; -use key_server_cluster::message::{Message, ClusterMessage, EncryptionMessage, DecryptionMessage}; +use key_server_cluster::message::{Message, ClusterMessage, GenerationMessage, EncryptionMessage, + DecryptionMessage, SigningMessage}; /// Size of serialized header. pub const MESSAGE_HEADER_SIZE: usize = 4; @@ -67,20 +68,30 @@ pub fn serialize_message(message: Message) -> Result { Message::Cluster(ClusterMessage::KeepAlive(payload)) => (3, serde_json::to_vec(&payload)), Message::Cluster(ClusterMessage::KeepAliveResponse(payload)) => (4, serde_json::to_vec(&payload)), - Message::Encryption(EncryptionMessage::InitializeSession(payload)) => (50, serde_json::to_vec(&payload)), - Message::Encryption(EncryptionMessage::ConfirmInitialization(payload)) => (51, serde_json::to_vec(&payload)), - Message::Encryption(EncryptionMessage::CompleteInitialization(payload)) => (52, serde_json::to_vec(&payload)), - Message::Encryption(EncryptionMessage::KeysDissemination(payload)) => (53, serde_json::to_vec(&payload)), - Message::Encryption(EncryptionMessage::PublicKeyShare(payload)) => (54, serde_json::to_vec(&payload)), - Message::Encryption(EncryptionMessage::SessionError(payload)) => (55, serde_json::to_vec(&payload)), - Message::Encryption(EncryptionMessage::SessionCompleted(payload)) => (56, serde_json::to_vec(&payload)), + Message::Generation(GenerationMessage::InitializeSession(payload)) => (50, serde_json::to_vec(&payload)), + Message::Generation(GenerationMessage::ConfirmInitialization(payload)) => (51, serde_json::to_vec(&payload)), + Message::Generation(GenerationMessage::CompleteInitialization(payload)) => (52, serde_json::to_vec(&payload)), + Message::Generation(GenerationMessage::KeysDissemination(payload)) => (53, serde_json::to_vec(&payload)), + Message::Generation(GenerationMessage::PublicKeyShare(payload)) => (54, serde_json::to_vec(&payload)), + Message::Generation(GenerationMessage::SessionError(payload)) => (55, serde_json::to_vec(&payload)), + Message::Generation(GenerationMessage::SessionCompleted(payload)) => (56, serde_json::to_vec(&payload)), - Message::Decryption(DecryptionMessage::InitializeDecryptionSession(payload)) => (100, serde_json::to_vec(&payload)), - Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(payload)) => (101, serde_json::to_vec(&payload)), - Message::Decryption(DecryptionMessage::RequestPartialDecryption(payload)) => (102, serde_json::to_vec(&payload)), - Message::Decryption(DecryptionMessage::PartialDecryption(payload)) => (103, serde_json::to_vec(&payload)), - Message::Decryption(DecryptionMessage::DecryptionSessionError(payload)) => (104, serde_json::to_vec(&payload)), - Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(payload)) => (105, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::InitializeEncryptionSession(payload)) => (100, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization(payload)) => (101, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::EncryptionSessionError(payload)) => (102, serde_json::to_vec(&payload)), + + Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(payload)) => (150, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::RequestPartialDecryption(payload)) => (151, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::PartialDecryption(payload)) => (152, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::DecryptionSessionError(payload)) => (153, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(payload)) => (154, serde_json::to_vec(&payload)), + + Message::Signing(SigningMessage::SigningConsensusMessage(payload)) => (200, serde_json::to_vec(&payload)), + Message::Signing(SigningMessage::SigningGenerationMessage(payload)) => (201, serde_json::to_vec(&payload)), + Message::Signing(SigningMessage::RequestPartialSignature(payload)) => (202, serde_json::to_vec(&payload)), + Message::Signing(SigningMessage::PartialSignature(payload)) => (203, serde_json::to_vec(&payload)), + Message::Signing(SigningMessage::SigningSessionError(payload)) => (204, serde_json::to_vec(&payload)), + Message::Signing(SigningMessage::SigningSessionCompleted(payload)) => (205, serde_json::to_vec(&payload)), }; let payload = payload.map_err(|err| Error::Serde(err.to_string()))?; @@ -99,20 +110,30 @@ pub fn deserialize_message(header: &MessageHeader, payload: Vec) -> Result Message::Cluster(ClusterMessage::KeepAlive(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), 4 => Message::Cluster(ClusterMessage::KeepAliveResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 50 => Message::Encryption(EncryptionMessage::InitializeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 51 => Message::Encryption(EncryptionMessage::ConfirmInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 52 => Message::Encryption(EncryptionMessage::CompleteInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 53 => Message::Encryption(EncryptionMessage::KeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 54 => Message::Encryption(EncryptionMessage::PublicKeyShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 55 => Message::Encryption(EncryptionMessage::SessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 56 => Message::Encryption(EncryptionMessage::SessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 50 => Message::Generation(GenerationMessage::InitializeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 51 => Message::Generation(GenerationMessage::ConfirmInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 52 => Message::Generation(GenerationMessage::CompleteInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 53 => Message::Generation(GenerationMessage::KeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 54 => Message::Generation(GenerationMessage::PublicKeyShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 55 => Message::Generation(GenerationMessage::SessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 56 => Message::Generation(GenerationMessage::SessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 100 => Message::Decryption(DecryptionMessage::InitializeDecryptionSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 101 => Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 102 => Message::Decryption(DecryptionMessage::RequestPartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 103 => Message::Decryption(DecryptionMessage::PartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 104 => Message::Decryption(DecryptionMessage::DecryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), - 105 => Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 100 => Message::Encryption(EncryptionMessage::InitializeEncryptionSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 101 => Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 102 => Message::Encryption(EncryptionMessage::EncryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + + 150 => Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 151 => Message::Decryption(DecryptionMessage::RequestPartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 152 => Message::Decryption(DecryptionMessage::PartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 153 => Message::Decryption(DecryptionMessage::DecryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 154 => Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + + 200 => Message::Signing(SigningMessage::SigningConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 201 => Message::Signing(SigningMessage::SigningGenerationMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 202 => Message::Signing(SigningMessage::RequestPartialSignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 203 => Message::Signing(SigningMessage::PartialSignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 204 => Message::Signing(SigningMessage::SigningSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 205 => Message::Signing(SigningMessage::SigningSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), _ => return Err(Error::Serde(format!("unknown message type {}", header.kind))), }) diff --git a/secret_store/src/key_server_cluster/jobs/consensus_session.rs b/secret_store/src/key_server_cluster/jobs/consensus_session.rs new file mode 100644 index 000000000..27542bc44 --- /dev/null +++ b/secret_store/src/key_server_cluster/jobs/consensus_session.rs @@ -0,0 +1,756 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::BTreeSet; +use std::sync::Arc; +use ethkey::{Public, Signature, recover}; +use key_server_cluster::{Error, NodeId, SessionMeta, AclStorage}; +use key_server_cluster::message::ConsensusMessage; +use key_server_cluster::jobs::job_session::{JobSession, JobSessionState, JobTransport, JobExecutor}; +use key_server_cluster::jobs::key_access_job::KeyAccessJob; + +/// Consensus session state. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum ConsensusSessionState { + /// Every node starts in this state. + WaitingForInitialization, + /// Consensus group is establishing. + EstablishingConsensus, + /// Consensus group is established. + /// Master node can start jobs dissemination. + /// Slave node waits for partial job requests. + ConsensusEstablished, + /// Master node waits for partial jobs responses. + WaitingForPartialResults, + /// Consensus session is completed successfully. + /// Master node can call result() to get computation result. + Finished, + /// Consensus session has failed with error. + Failed, +} + +/// Consensus session consists of following states: +/// 1) consensus group is established +/// 2) master node sends partial job requests to every member of consensus group +/// 3) slave nodes are computing partial responses +/// 4) master node computes result from partial responses +pub struct ConsensusSession, ComputationExecutor: JobExecutor, ComputationTransport: JobTransport> { + /// Current session state. + state: ConsensusSessionState, + /// Session metadata. + meta: SessionMeta, + /// Requester, for which consensus group has allowed access. + requester: Option, + /// Consensus establish job. + consensus_job: JobSession, + /// Consensus group. + consensus_group: BTreeSet, + /// Computation job. + computation_job: Option>, +} + +/// Consensus session creation parameters. +pub struct ConsensusSessionParams> { + /// Session metadata. + pub meta: SessionMeta, + /// ACL storage for access check. + pub acl_storage: Arc, + /// Transport for consensus establish job. + pub consensus_transport: ConsensusTransport, +} + +impl ConsensusSession where ConsensusTransport: JobTransport, ComputationExecutor: JobExecutor, ComputationTransport: JobTransport { + /// Create new consensus session on slave node. + pub fn new_on_slave(params: ConsensusSessionParams) -> Result { + debug_assert!(params.meta.self_node_id != params.meta.master_node_id); + Self::new(None, KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()), params) + } + + /// Create new consensus session on master node. + pub fn new_on_master(params: ConsensusSessionParams, signature: Signature) -> Result { + debug_assert!(params.meta.self_node_id == params.meta.master_node_id); + Self::new(Some(recover(&signature, ¶ms.meta.id)?), + KeyAccessJob::new_on_master(params.meta.id.clone(), params.acl_storage.clone(), signature), params) + } + + /// Create new consensus session. + fn new(requester: Option, consensus_job_executor: KeyAccessJob, params: ConsensusSessionParams) -> Result { + let consensus_job = JobSession::new(params.meta.clone(), consensus_job_executor, params.consensus_transport); + debug_assert!(consensus_job.state() == JobSessionState::Inactive); + + Ok(ConsensusSession { + state: ConsensusSessionState::WaitingForInitialization, + meta: params.meta, + requester: requester, + consensus_job: consensus_job, + consensus_group: BTreeSet::new(), + computation_job: None, + }) + } + + /// Get consensus job reference. + #[cfg(test)] + pub fn consensus_job(&self) -> &JobSession { + &self.consensus_job + } + + /// Get computation job reference. + #[cfg(test)] + pub fn computation_job(&self) -> &JobSession { + self.computation_job.as_ref() + .expect("computation_job must only be called on master nodes") + } + + /// Get consensus session state. + pub fn state(&self) -> ConsensusSessionState { + self.state + } + + /// Get requester, for which consensus has been reached. + pub fn requester(&self) -> Result<&Public, Error> { + self.requester.as_ref().ok_or(Error::InvalidStateForRequest) + } + + /// Get computation result. + pub fn result(&self) -> Result { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + if self.state != ConsensusSessionState::Finished { + return Err(Error::InvalidStateForRequest); + } + + self.computation_job.as_ref() + .expect("we are on master node in finished state; computation_job is set on master node during initialization; qed") + .result() + } + + /// Initialize session on master node. + pub fn initialize(&mut self, nodes: BTreeSet) -> Result<(), Error> { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + let initialization_result = self.consensus_job.initialize(nodes); + self.state = ConsensusSessionState::EstablishingConsensus; + self.process_result(initialization_result) + } + + /// Process consensus message. + pub fn on_consensus_message(&mut self, sender: &NodeId, message: &ConsensusMessage) -> Result<(), Error> { + let consensus_result = match message { + &ConsensusMessage::InitializeConsensusSession(ref message) => { + let signature = message.requestor_signature.clone().into(); + self.requester = Some(recover(&signature, &self.meta.id)?); + self.consensus_job.on_partial_request(sender, signature) + }, + &ConsensusMessage::ConfirmConsensusInitialization(ref message) => + self.consensus_job.on_partial_response(sender, message.is_confirmed), + }; + self.process_result(consensus_result) + } + + /// Select nodes for processing partial requests. + pub fn select_consensus_group(&mut self) -> Result<&BTreeSet, Error> { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + if self.state != ConsensusSessionState::ConsensusEstablished { + return Err(Error::InvalidStateForRequest); + } + + if self.consensus_group.is_empty() { + let consensus_group = self.consensus_job.result()?; + let is_self_in_consensus = consensus_group.contains(&self.meta.self_node_id); + self.consensus_group = consensus_group.into_iter().take(self.meta.threshold + 1).collect(); + + if is_self_in_consensus { + self.consensus_group.remove(&self.meta.master_node_id); + self.consensus_group.insert(self.meta.master_node_id.clone()); + } + } + + Ok(&self.consensus_group) + } + + /// Disseminate jobs from master node. + pub fn disseminate_jobs(&mut self, executor: ComputationExecutor, transport: ComputationTransport) -> Result<(), Error> { + let consensus_group = self.select_consensus_group()?.clone(); + self.consensus_group.clear(); + + let mut computation_job = JobSession::new(self.meta.clone(), executor, transport); + let computation_result = computation_job.initialize(consensus_group); + self.computation_job = Some(computation_job); + self.state = ConsensusSessionState::WaitingForPartialResults; + self.process_result(computation_result) + } + + /// Process job request on slave node. + pub fn on_job_request(&mut self, node: &NodeId, request: ComputationExecutor::PartialJobRequest, executor: ComputationExecutor, transport: ComputationTransport) -> Result<(), Error> { + if &self.meta.master_node_id != node { + return Err(Error::InvalidMessage); + } + if self.state != ConsensusSessionState::ConsensusEstablished { + return Err(Error::InvalidStateForRequest); + } + + JobSession::new(self.meta.clone(), executor, transport).on_partial_request(node, request) + } + + /// Process job response on slave node. + pub fn on_job_response(&mut self, node: &NodeId, response: ComputationExecutor::PartialJobResponse) -> Result<(), Error> { + if self.state != ConsensusSessionState::WaitingForPartialResults { + return Err(Error::InvalidStateForRequest); + } + + let computation_result = self.computation_job.as_mut() + .expect("WaitingForPartialResults is only set when computation_job is created; qed") + .on_partial_response(node, response); + self.process_result(computation_result) + } + + /// When session is completed on slave node. + pub fn on_session_completed(&mut self, node: &NodeId) -> Result<(), Error> { + if node != &self.meta.master_node_id { + return Err(Error::InvalidMessage); + } + if self.state != ConsensusSessionState::ConsensusEstablished { + return Err(Error::InvalidStateForRequest); + } + + self.state = ConsensusSessionState::Finished; + + Ok(()) + } + + /// When error is received from node. + pub fn on_node_error(&mut self, node: &NodeId) -> Result { + let is_self_master = self.meta.master_node_id == self.meta.self_node_id; + let is_node_master = self.meta.master_node_id == *node; + let (is_restart_needed, timeout_result) = match self.state { + ConsensusSessionState::WaitingForInitialization if is_self_master => { + // it is strange to receive error before session is initialized && slave doesn't know access_key + // => ignore this error for now + (false, Ok(())) + } + ConsensusSessionState::WaitingForInitialization if is_node_master => { + // can not establish consensus + // => fatal error + self.state = ConsensusSessionState::Failed; + (false, Err(Error::ConsensusUnreachable)) + }, + ConsensusSessionState::EstablishingConsensus => { + debug_assert!(is_self_master); + + // consensus still can be established + // => try to live without this node + (false, self.consensus_job.on_node_error(node)) + }, + ConsensusSessionState::ConsensusEstablished => { + // we could try to continue without this node, if enough nodes left + (false, self.consensus_job.on_node_error(node)) + }, + ConsensusSessionState::WaitingForPartialResults => { + // check if *current* computation job can continue without this node + let is_computation_node = self.computation_job.as_mut() + .expect("WaitingForPartialResults state is only set when computation_job is created; qed") + .on_node_error(node) + .is_err(); + if !is_computation_node { + // it is not used by current computation job + // => no restart required + (false, Ok(())) + } else { + // it is used by current computation job + // => restart is required if there are still enough nodes + self.consensus_group.clear(); + self.state = ConsensusSessionState::EstablishingConsensus; + + let consensus_result = self.consensus_job.on_node_error(node); + let is_consensus_established = self.consensus_job.state() == JobSessionState::Finished; + (is_consensus_established, consensus_result) + } + }, + // in all other cases - just ignore error + ConsensusSessionState::WaitingForInitialization | ConsensusSessionState::Failed | ConsensusSessionState::Finished => (false, Ok(())), + }; + self.process_result(timeout_result)?; + Ok(is_restart_needed) + } + + /// When session is timeouted. + pub fn on_session_timeout(&mut self) -> Result { + match self.state { + // if we are waiting for results from slaves, there is a chance to send request to other nodes subset => fall through + ConsensusSessionState::WaitingForPartialResults => (), + // in some states this error is fatal + ConsensusSessionState::WaitingForInitialization | ConsensusSessionState::EstablishingConsensus | ConsensusSessionState::ConsensusEstablished => { + let _ = self.consensus_job.on_session_timeout(); + + self.consensus_group.clear(); + self.state = ConsensusSessionState::EstablishingConsensus; + return self.process_result(Err(Error::ConsensusUnreachable)).map(|_| unreachable!()); + }, + // in all other cases - just ignore error + ConsensusSessionState::Finished | ConsensusSessionState::Failed => return Ok(false), + }; + + let timeouted_nodes = self.computation_job.as_ref() + .expect("WaitingForPartialResults state is only set when computation_job is created; qed") + .requests() + .clone(); + assert!(!timeouted_nodes.is_empty()); // timeout should not ever happen if no requests are active && we are waiting for responses + + self.consensus_group.clear(); + for timeouted_node in timeouted_nodes { + let timeout_result = self.consensus_job.on_node_error(&timeouted_node); + self.state = ConsensusSessionState::EstablishingConsensus; + self.process_result(timeout_result)?; + } + + Ok(self.state == ConsensusSessionState::ConsensusEstablished) + } + + /// Process result of job. + fn process_result(&mut self, result: Result<(), Error>) -> Result<(), Error> { + match self.state { + ConsensusSessionState::WaitingForInitialization | ConsensusSessionState::EstablishingConsensus | ConsensusSessionState::ConsensusEstablished => match self.consensus_job.state() { + JobSessionState::Finished => self.state = ConsensusSessionState::ConsensusEstablished, + JobSessionState::Failed => self.state = ConsensusSessionState::Failed, + _ => (), + }, + ConsensusSessionState::WaitingForPartialResults => match self.computation_job.as_ref() + .expect("WaitingForPartialResults state is only set when computation_job is created; qed") + .state() { + JobSessionState::Finished => self.state = ConsensusSessionState::Finished, + JobSessionState::Failed => self.state = ConsensusSessionState::Failed, + _ => (), + }, + _ => (), + } + + result + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + use ethkey::{Signature, KeyPair, Random, Generator, sign}; + use key_server_cluster::{Error, NodeId, SessionId, DummyAclStorage}; + use key_server_cluster::message::{ConsensusMessage, InitializeConsensusSession, ConfirmConsensusInitialization}; + use key_server_cluster::jobs::job_session::tests::{make_master_session_meta, make_slave_session_meta, SquaredSumJobExecutor, DummyJobTransport}; + use super::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState}; + + type SquaredSumConsensusSession = ConsensusSession, SquaredSumJobExecutor, DummyJobTransport>; + + fn make_master_consensus_session(threshold: usize, requester: Option, acl_storage: Option) -> SquaredSumConsensusSession { + let secret = requester.map(|kp| kp.secret().clone()).unwrap_or(Random.generate().unwrap().secret().clone()); + SquaredSumConsensusSession::new_on_master(ConsensusSessionParams { + meta: make_master_session_meta(threshold), + acl_storage: Arc::new(acl_storage.unwrap_or(DummyAclStorage::default())), + consensus_transport: DummyJobTransport::default(), + }, sign(&secret, &SessionId::default()).unwrap()).unwrap() + } + + fn make_slave_consensus_session(threshold: usize, acl_storage: Option) -> SquaredSumConsensusSession { + SquaredSumConsensusSession::new_on_slave(ConsensusSessionParams { + meta: make_slave_session_meta(threshold), + acl_storage: Arc::new(acl_storage.unwrap_or(DummyAclStorage::default())), + consensus_transport: DummyJobTransport::default(), + }).unwrap() + } + + #[test] + fn consensus_session_consensus_is_not_reached_when_initializes_with_non_zero_threshold() { + let mut session = make_master_consensus_session(1, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + } + + #[test] + fn consensus_session_consensus_is_reached_when_initializes_with_zero_threshold() { + let mut session = make_master_consensus_session(0, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + } + + #[test] + fn consensus_session_consensus_is_not_reached_when_initializes_with_zero_threshold_and_master_rejects() { + let requester = Random.generate().unwrap(); + let acl_storage = DummyAclStorage::default(); + acl_storage.prohibit(requester.public().clone(), SessionId::default()); + + let mut session = make_master_consensus_session(0, Some(requester), Some(acl_storage)); + session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + } + + #[test] + fn consensus_session_consensus_is_failed_by_master_node() { + let requester = Random.generate().unwrap(); + let acl_storage = DummyAclStorage::default(); + acl_storage.prohibit(requester.public().clone(), SessionId::default()); + + let mut session = make_master_consensus_session(1, Some(requester), Some(acl_storage)); + assert_eq!(session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap_err(), Error::ConsensusUnreachable); + assert_eq!(session.state(), ConsensusSessionState::Failed); + } + + #[test] + fn consensus_session_consensus_is_failed_by_slave_node() { + let mut session = make_master_consensus_session(1, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); + assert_eq!(session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: false, + })).unwrap_err(), Error::ConsensusUnreachable); + assert_eq!(session.state(), ConsensusSessionState::Failed); + } + + #[test] + fn consensus_session_job_dissemination_fails_if_consensus_is_not_reached() { + let mut session = make_master_consensus_session(1, None, None); + assert_eq!(session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidStateForRequest); + } + + #[test] + fn consensus_session_job_dissemination_selects_master_node_if_agreed() { + let mut session = make_master_consensus_session(0, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::Finished); + assert!(session.computation_job().responses().contains_key(&NodeId::from(1))); + } + + #[test] + fn consensus_session_job_dissemination_does_not_select_master_node_if_rejected() { + let requester = Random.generate().unwrap(); + let acl_storage = DummyAclStorage::default(); + acl_storage.prohibit(requester.public().clone(), SessionId::default()); + + let mut session = make_master_consensus_session(0, Some(requester), Some(acl_storage)); + session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + assert!(!session.computation_job().responses().contains_key(&NodeId::from(1))); + } + + #[test] + fn consensus_session_computation_request_is_rejected_when_received_by_master_node() { + let mut session = make_master_consensus_session(0, None, None); + assert_eq!(session.on_job_request(&NodeId::from(2), 2, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidMessage); + } + + #[test] + fn consensus_session_computation_request_is_rejected_when_received_before_consensus_is_established() { + let mut session = make_slave_consensus_session(0, None); + assert_eq!(session.on_job_request(&NodeId::from(1), 2, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidStateForRequest); + } + + #[test] + fn consensus_session_computation_request_is_ignored_when_wrong() { + let mut session = make_slave_consensus_session(0, None); + assert_eq!(session.state(), ConsensusSessionState::WaitingForInitialization); + session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { + requestor_signature: sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), + })).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + assert_eq!(session.on_job_request(&NodeId::from(1), 20, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidMessage); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + } + + #[test] + fn consensus_session_computation_request_is_processed_when_correct() { + let mut session = make_slave_consensus_session(0, None); + assert_eq!(session.state(), ConsensusSessionState::WaitingForInitialization); + session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { + requestor_signature: sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), + })).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session.on_job_request(&NodeId::from(1), 2, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + } + + #[test] + fn consensus_session_computation_response_is_ignored_when_consensus_is_not_reached() { + let mut session = make_master_consensus_session(1, None, None); + assert_eq!(session.on_job_response(&NodeId::from(2), 4).unwrap_err(), Error::InvalidStateForRequest); + } + + #[test] + fn consessus_session_completion_is_ignored_when_received_from_non_master_node() { + let mut session = make_slave_consensus_session(0, None); + assert_eq!(session.on_session_completed(&NodeId::from(3)).unwrap_err(), Error::InvalidMessage); + } + + #[test] + fn consessus_session_completion_is_ignored_when_consensus_is_not_established() { + let mut session = make_slave_consensus_session(0, None); + assert_eq!(session.on_session_completed(&NodeId::from(1)).unwrap_err(), Error::InvalidStateForRequest); + } + + #[test] + fn consessus_session_completion_is_accepted() { + let mut session = make_slave_consensus_session(0, None); + session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { + requestor_signature: sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), + })).unwrap(); + session.on_session_completed(&NodeId::from(1)).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::Finished); + } + + #[test] + fn consensus_session_continues_if_node_error_received_by_uninitialized_master() { + let mut session = make_master_consensus_session(0, None, None); + assert_eq!(session.on_node_error(&NodeId::from(2)), Ok(false)); + } + + #[test] + fn consensus_session_fails_if_node_error_received_by_uninitialized_slave_from_master() { + let mut session = make_slave_consensus_session(0, None); + assert_eq!(session.on_node_error(&NodeId::from(1)), Err(Error::ConsensusUnreachable)); + assert_eq!(session.state(), ConsensusSessionState::Failed); + } + + #[test] + fn consensus_session_continues_if_node_error_received_by_master_during_establish_and_enough_nodes_left() { + let mut session = make_master_consensus_session(1, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)].into_iter().collect()).unwrap(); + assert_eq!(session.on_node_error(&NodeId::from(2)), Ok(false)); + } + + #[test] + fn consensus_session_fails_if_node_error_received_by_master_during_establish_and_not_enough_nodes_left() { + let mut session = make_master_consensus_session(1, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); + assert_eq!(session.on_node_error(&NodeId::from(2)), Err(Error::ConsensusUnreachable)); + assert_eq!(session.state(), ConsensusSessionState::Failed); + } + + #[test] + fn consensus_session_continues_if_node2_error_received_by_master_after_consensus_established_and_enough_nodes_left() { + let mut session = make_master_consensus_session(1, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)].into_iter().collect()).unwrap(); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + assert_eq!(session.on_node_error(&NodeId::from(2)), Ok(false)); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + } + + #[test] + fn consensus_session_continues_if_node3_error_received_by_master_after_consensus_established_and_enough_nodes_left() { + let mut session = make_master_consensus_session(1, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)].into_iter().collect()).unwrap(); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + assert_eq!(session.on_node_error(&NodeId::from(3)), Ok(false)); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + } + + #[test] + fn consensus_session_fails_if_node_error_received_by_master_after_consensus_established_and_not_enough_nodes_left() { + let mut session = make_master_consensus_session(1, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + assert_eq!(session.on_node_error(&NodeId::from(2)), Err(Error::ConsensusUnreachable)); + assert_eq!(session.state(), ConsensusSessionState::Failed); + } + + #[test] + fn consensus_session_continues_if_node_error_received_from_slave_not_participating_in_computation() { + let mut session = make_master_consensus_session(1, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3), NodeId::from(4)].into_iter().collect()).unwrap(); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + session.on_consensus_message(&NodeId::from(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap(); + assert_eq!(session.on_node_error(&NodeId::from(3)), Ok(false)); + assert_eq!(session.on_node_error(&NodeId::from(4)), Ok(false)); + assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + } + + #[test] + fn consensus_session_restarts_if_node_error_received_from_slave_participating_in_computation_and_enough_nodes_left() { + let mut session = make_master_consensus_session(1, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3), NodeId::from(4)].into_iter().collect()).unwrap(); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + + session.on_consensus_message(&NodeId::from(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + assert_eq!(session.on_node_error(&NodeId::from(2)), Ok(true)); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + + assert_eq!(session.on_node_error(&NodeId::from(3)), Ok(false)); + assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); + } + + #[test] + fn consensus_session_fails_if_node_error_received_from_slave_participating_in_computation_and_not_enough_nodes_left() { + let mut session = make_master_consensus_session(1, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap(); + assert_eq!(session.on_node_error(&NodeId::from(2)), Err(Error::ConsensusUnreachable)); + assert_eq!(session.state(), ConsensusSessionState::Failed); + } + + #[test] + fn consensus_session_fails_if_uninitialized_session_timeouts() { + let mut session = make_master_consensus_session(1, None, None); + assert_eq!(session.on_session_timeout(), Err(Error::ConsensusUnreachable)); + } + + #[test] + fn consensus_session_continues_if_session_timeouts_and_enough_nodes_left_for_computation() { + let mut session = make_master_consensus_session(1, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3), NodeId::from(4)].into_iter().collect()).unwrap(); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + + session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + + session.on_consensus_message(&NodeId::from(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + assert_eq!(session.on_session_timeout(), Ok(true)); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + + session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + + assert_eq!(session.on_session_timeout(), Ok(false)); + assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); + } + + #[test] + fn consensus_session_continues_if_session_timeouts_and_not_enough_nodes_left_for_computation() { + let mut session = make_master_consensus_session(1, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap(); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + + assert_eq!(session.on_session_timeout(), Err(Error::ConsensusUnreachable)); + assert_eq!(session.state(), ConsensusSessionState::Failed); + } + + #[test] + fn same_consensus_group_returned_after_second_selection() { + let mut session = make_master_consensus_session(1, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)].into_iter().collect()).unwrap(); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + session.on_consensus_message(&NodeId::from(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + + let consensus_group1 = session.select_consensus_group().unwrap().clone(); + let consensus_group2 = session.select_consensus_group().unwrap().clone(); + assert_eq!(consensus_group1, consensus_group2); + } + + #[test] + fn consensus_session_complete_2_of_4() { + let mut session = make_master_consensus_session(1, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3), NodeId::from(3)].into_iter().collect()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + session.on_job_response(&NodeId::from(2), 16).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::Finished); + assert_eq!(session.result(), Ok(20)); + } + + #[test] + fn consensus_session_complete_2_of_4_after_restart() { + let mut session = make_master_consensus_session(1, None, None); + session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3), NodeId::from(4)].into_iter().collect()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); + session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + + session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + + session.on_consensus_message(&NodeId::from(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + + assert_eq!(session.on_node_error(&NodeId::from(2)).unwrap(), true); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + + session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + + assert_eq!(session.on_node_error(&NodeId::from(3)).unwrap(), false); + assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus); + + session.on_consensus_message(&NodeId::from(4), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + })).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished); + + session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults); + + session.on_job_response(&NodeId::from(4), 16).unwrap(); + assert_eq!(session.state(), ConsensusSessionState::Finished); + assert_eq!(session.result(), Ok(20)); + } +} diff --git a/secret_store/src/key_server_cluster/jobs/decryption_job.rs b/secret_store/src/key_server_cluster/jobs/decryption_job.rs new file mode 100644 index 000000000..54594c827 --- /dev/null +++ b/secret_store/src/key_server_cluster/jobs/decryption_job.rs @@ -0,0 +1,162 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::{BTreeSet, BTreeMap}; +use ethkey::{Public, Secret}; +use ethcrypto::ecies::encrypt; +use ethcrypto::DEFAULT_MAC; +use key_server_cluster::{Error, NodeId, DocumentKeyShare, EncryptedDocumentKeyShadow}; +use key_server_cluster::math; +use key_server_cluster::jobs::job_session::{JobPartialRequestAction, JobPartialResponseAction, JobExecutor}; + +/// Decryption job. +pub struct DecryptionJob { + /// This node id. + self_node_id: NodeId, + /// Access key. + access_key: Secret, + /// Requester public key. + requester: Public, + /// Key share. + key_share: DocumentKeyShare, + /// Request id. + request_id: Option, + /// Is shadow decryption requested. + is_shadow_decryption: Option, +} + +/// Decryption job partial request. +pub struct PartialDecryptionRequest { + /// Request id. + pub id: Secret, + /// Is shadow decryption requested. + pub is_shadow_decryption: bool, + /// Id of other nodes, participating in decryption. + pub other_nodes_ids: BTreeSet, +} + +/// Decryption job partial response. +pub struct PartialDecryptionResponse { + /// Request id. + pub request_id: Secret, + /// Shadow point. + pub shadow_point: Public, + /// Decryption shadow coefficient, if requested. + pub decrypt_shadow: Option>, +} + +impl DecryptionJob { + pub fn new_on_slave(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare) -> Result { + debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some()); + Ok(DecryptionJob { + self_node_id: self_node_id, + access_key: access_key, + requester: requester, + key_share: key_share, + request_id: None, + is_shadow_decryption: None, + }) + } + + pub fn new_on_master(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare, is_shadow_decryption: bool) -> Result { + debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some()); + Ok(DecryptionJob { + self_node_id: self_node_id, + access_key: access_key, + requester: requester, + key_share: key_share, + request_id: Some(math::generate_random_scalar()?), + is_shadow_decryption: Some(is_shadow_decryption), + }) + } +} + +impl JobExecutor for DecryptionJob { + type PartialJobRequest = PartialDecryptionRequest; + type PartialJobResponse = PartialDecryptionResponse; + type JobResponse = EncryptedDocumentKeyShadow; + + fn prepare_partial_request(&self, node: &NodeId, nodes: &BTreeSet) -> Result { + debug_assert!(nodes.len() == self.key_share.threshold + 1); + + let request_id = self.request_id.as_ref() + .expect("prepare_partial_request is only called on master nodes; request_id is filed in constructor on master nodes; qed"); + let is_shadow_decryption = self.is_shadow_decryption + .expect("prepare_partial_request is only called on master nodes; is_shadow_decryption is filed in constructor on master nodes; qed"); + let mut other_nodes_ids = nodes.clone(); + other_nodes_ids.remove(node); + + Ok(PartialDecryptionRequest { + id: request_id.clone(), + is_shadow_decryption: is_shadow_decryption, + other_nodes_ids: other_nodes_ids, + }) + } + + fn process_partial_request(&self, partial_request: PartialDecryptionRequest) -> Result, Error> { + if partial_request.other_nodes_ids.len() != self.key_share.threshold + || partial_request.other_nodes_ids.contains(&self.self_node_id) + || partial_request.other_nodes_ids.iter().any(|n| !self.key_share.id_numbers.contains_key(n)) { + return Err(Error::InvalidMessage); + } + + let self_id_number = &self.key_share.id_numbers[&self.self_node_id]; + let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &self.key_share.id_numbers[n]); + let node_shadow = math::compute_node_shadow(&self.key_share.secret_share, &self_id_number, other_id_numbers)?; + let decrypt_shadow = if partial_request.is_shadow_decryption { Some(math::generate_random_scalar()?) } else { None }; + let common_point = self.key_share.common_point.as_ref().expect("DecryptionJob is only created when common_point is known; qed"); + let (shadow_point, decrypt_shadow) = math::compute_node_shadow_point(&self.access_key, &common_point, &node_shadow, decrypt_shadow)?; + Ok(JobPartialRequestAction::Respond(PartialDecryptionResponse { + request_id: partial_request.id, + shadow_point: shadow_point, + decrypt_shadow: match decrypt_shadow { + None => None, + Some(decrypt_shadow) => Some(encrypt(&self.requester, &DEFAULT_MAC, &**decrypt_shadow)?), + }, + })) + } + + fn check_partial_response(&self, partial_response: &PartialDecryptionResponse) -> Result { + if Some(&partial_response.request_id) != self.request_id.as_ref() { + return Ok(JobPartialResponseAction::Ignore); + } + if self.is_shadow_decryption != Some(partial_response.decrypt_shadow.is_some()) { + return Ok(JobPartialResponseAction::Reject); + } + Ok(JobPartialResponseAction::Accept) + } + + fn compute_response(&self, partial_responses: &BTreeMap) -> Result { + let is_shadow_decryption = self.is_shadow_decryption + .expect("compute_response is only called on master nodes; is_shadow_decryption is filed in constructor on master nodes; qed"); + let common_point = self.key_share.common_point.as_ref().expect("DecryptionJob is only created when common_point is known; qed"); + let encrypted_point = self.key_share.encrypted_point.as_ref().expect("DecryptionJob is only created when encrypted_point is known; qed"); + let joint_shadow_point = math::compute_joint_shadow_point(partial_responses.values().map(|s| &s.shadow_point))?; + let decrypted_secret = math::decrypt_with_joint_shadow(self.key_share.threshold, &self.access_key, encrypted_point, &joint_shadow_point)?; + Ok(EncryptedDocumentKeyShadow { + decrypted_secret: decrypted_secret, + common_point: if is_shadow_decryption { + Some(math::make_common_shadow_point(self.key_share.threshold, common_point.clone())?) + } else { None }, + decrypt_shadows: if is_shadow_decryption { + Some(partial_responses.values().map(|r| r.decrypt_shadow.as_ref() + .expect("is_shadow_decryption == true; decrypt_shadow.is_some() is checked in check_partial_response; qed") + .clone()) + .collect()) + } else { None }, + }) + } +} diff --git a/secret_store/src/key_server_cluster/jobs/job_session.rs b/secret_store/src/key_server_cluster/jobs/job_session.rs new file mode 100644 index 000000000..7ae1da42a --- /dev/null +++ b/secret_store/src/key_server_cluster/jobs/job_session.rs @@ -0,0 +1,536 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::{BTreeSet, BTreeMap}; +use key_server_cluster::{Error, NodeId, SessionMeta}; + +#[derive(Debug, Clone, Copy, PartialEq)] +/// Partial response action. +pub enum JobPartialResponseAction { + /// Ignore this response. + Ignore, + /// Mark this response as reject. + Reject, + /// Accept this response. + Accept, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +/// Partial request action. +pub enum JobPartialRequestAction { + /// Repond with reject. + Reject(PartialJobResponse), + /// Respond with this response. + Respond(PartialJobResponse), +} + +/// Job executor. +pub trait JobExecutor { + type PartialJobRequest; + type PartialJobResponse; + type JobResponse; + + /// Prepare job request for given node. + fn prepare_partial_request(&self, node: &NodeId, nodes: &BTreeSet) -> Result; + /// Process partial request. + fn process_partial_request(&self, partial_request: Self::PartialJobRequest) -> Result, Error>; + /// Check partial response of given node. + fn check_partial_response(&self, partial_response: &Self::PartialJobResponse) -> Result; + /// Compute final job response. + fn compute_response(&self, partial_responses: &BTreeMap) -> Result; +} + +/// Jobs transport. +pub trait JobTransport { + type PartialJobRequest; + type PartialJobResponse; + + /// Send partial request to given node. + fn send_partial_request(&self, node: &NodeId, request: Self::PartialJobRequest) -> Result<(), Error>; + /// Send partial request to given node. + fn send_partial_response(&self, node: &NodeId, response: Self::PartialJobResponse) -> Result<(), Error>; +} + +#[derive(Debug, Clone, Copy, PartialEq)] +/// Current state of job session. +pub enum JobSessionState { + /// Session is inactive. + Inactive, + /// Session is active. + Active, + /// Session is finished. + Finished, + /// Session has failed. + Failed, +} + +/// Basic request-response session on a set of nodes. +pub struct JobSession where Transport: JobTransport { + /// Session meta. + meta: SessionMeta, + /// Job executor. + executor: Executor, + /// Jobs transport. + transport: Transport, + /// Session data. + data: JobSessionData, + //// PartialJobRequest dummy. + // dummy: PhantomData, +} + +/// Data of job session. +struct JobSessionData { + /// Session state. + state: JobSessionState, + /// Mutable session data. + active_data: Option>, +} + +/// Active job session data. +struct ActiveJobSessionData { + /// Active partial requests. + requests: BTreeSet, + /// Rejects to partial requests. + rejects: BTreeSet, + /// Received partial responses. + responses: BTreeMap, +} + +impl JobSession where Executor: JobExecutor, Transport: JobTransport { + /// Create new session. + pub fn new(meta: SessionMeta, executor: Executor, transport: Transport) -> Self { + JobSession { + meta: meta, + executor: executor, + transport: transport, + data: JobSessionData { + state: JobSessionState::Inactive, + active_data: None, + }, + } + } + + #[cfg(test)] + /// Get transport reference. + pub fn transport(&self) -> &Transport { + &self.transport + } + + /// Get job state. + pub fn state(&self) -> JobSessionState { + self.data.state + } + + #[cfg(test)] + /// Get rejects. + pub fn rejects(&self) -> &BTreeSet { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + + &self.data.active_data.as_ref() + .expect("rejects is only called on master nodes after initialization; on master nodes active_data is filled during initialization; qed") + .rejects + } + + /// Get active requests. + pub fn requests(&self) -> &BTreeSet { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + + &self.data.active_data.as_ref() + .expect("requests is only called on master nodes after initialization; on master nodes active_data is filled during initialization; qed") + .requests + } + + #[cfg(test)] + /// Get responses. + pub fn responses(&self) -> &BTreeMap { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + + &self.data.active_data.as_ref() + .expect("responses is only called on master nodes after initialization; on master nodes active_data is filled during initialization; qed") + .responses + } + + /// Get job result. + pub fn result(&self) -> Result { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + + if self.data.state != JobSessionState::Finished { + return Err(Error::InvalidStateForRequest); + } + + self.executor.compute_response(&self.data.active_data.as_ref() + .expect("requests is only called on master nodes; on master nodes active_data is filled during initialization; qed") + .responses) + } + + /// Initialize. + pub fn initialize(&mut self, nodes: BTreeSet) -> Result<(), Error> { + debug_assert!(self.meta.self_node_id == self.meta.master_node_id); + debug_assert!(nodes.len() >= self.meta.threshold + 1); + + if self.data.state != JobSessionState::Inactive { + return Err(Error::InvalidStateForRequest); + } + + // send requests to slave nodes + let mut waits_for_self = false; + let active_data = ActiveJobSessionData { + requests: nodes, + rejects: BTreeSet::new(), + responses: BTreeMap::new(), + }; + for node in &active_data.requests { + if node != &self.meta.self_node_id { + self.transport.send_partial_request(&node, self.executor.prepare_partial_request(node, &active_data.requests)?)?; + } else { + waits_for_self = true; + } + } + + // result from self + let self_response = if waits_for_self { + let partial_request = self.executor.prepare_partial_request(&self.meta.self_node_id, &active_data.requests)?; + Some(self.executor.process_partial_request(partial_request)?) + } else { + None + }; + + // update state + self.data.active_data = Some(active_data); + self.data.state = JobSessionState::Active; + + // if we are waiting for response from self => do it + if let Some(self_response) = self_response { + let self_node_id = self.meta.self_node_id.clone(); + match self_response { + JobPartialRequestAction::Respond(self_response) => self.on_partial_response(&self_node_id, self_response)?, + JobPartialRequestAction::Reject(self_response) => self.on_partial_response(&self_node_id, self_response)?, + } + } + + Ok(()) + } + + /// When partial request is received by slave node. + pub fn on_partial_request(&mut self, node: &NodeId, request: Executor::PartialJobRequest) -> Result<(), Error> { + if node != &self.meta.master_node_id { + return Err(Error::InvalidMessage); + } + if self.meta.self_node_id == self.meta.master_node_id { + return Err(Error::InvalidMessage); + } + if self.data.state != JobSessionState::Inactive && self.data.state != JobSessionState::Finished { + return Err(Error::InvalidStateForRequest); + } + + let partial_response = match self.executor.process_partial_request(request)? { + JobPartialRequestAction::Respond(partial_response) => { + self.data.state = JobSessionState::Finished; + partial_response + }, + JobPartialRequestAction::Reject(partial_response) => { + self.data.state = JobSessionState::Failed; + partial_response + }, + }; + self.transport.send_partial_response(node, partial_response) + } + + /// When partial request is received by master node. + pub fn on_partial_response(&mut self, node: &NodeId, response: Executor::PartialJobResponse) -> Result<(), Error> { + if self.meta.self_node_id != self.meta.master_node_id { + return Err(Error::InvalidMessage); + } + if self.data.state != JobSessionState::Active && self.data.state != JobSessionState::Finished { + return Err(Error::InvalidStateForRequest); + } + + let active_data = self.data.active_data.as_mut() + .expect("on_partial_response is only called on master nodes; on master nodes active_data is filled during initialization; qed"); + if !active_data.requests.remove(node) { + return Err(Error::InvalidNodeForRequest); + } + + match self.executor.check_partial_response(&response)? { + JobPartialResponseAction::Ignore => Ok(()), + JobPartialResponseAction::Reject => { + active_data.rejects.insert(node.clone()); + if active_data.requests.len() + active_data.responses.len() >= self.meta.threshold + 1 { + return Ok(()); + } + + self.data.state = JobSessionState::Failed; + Err(Error::ConsensusUnreachable) + }, + JobPartialResponseAction::Accept => { + active_data.responses.insert(node.clone(), response); + + if active_data.responses.len() < self.meta.threshold + 1 { + return Ok(()); + } + + self.data.state = JobSessionState::Finished; + Ok(()) + }, + } + } + + /// When error from node is received. + pub fn on_node_error(&mut self, node: &NodeId) -> Result<(), Error> { + if self.meta.self_node_id != self.meta.master_node_id { + if node != &self.meta.master_node_id { + return Ok(()); + } + + self.data.state = JobSessionState::Failed; + return Err(Error::ConsensusUnreachable); + } + + let active_data = self.data.active_data.as_mut() + .expect("we have checked that we are on master node; on master nodes active_data is filled during initialization; qed"); + if active_data.rejects.contains(node) { + return Ok(()); + } + if active_data.requests.remove(node) || active_data.responses.remove(node).is_some() { + active_data.rejects.insert(node.clone()); + if self.data.state == JobSessionState::Finished && active_data.responses.len() < self.meta.threshold + 1 { + self.data.state = JobSessionState::Active; + } + if active_data.requests.len() + active_data.responses.len() >= self.meta.threshold + 1 { + return Ok(()); + } + + self.data.state = JobSessionState::Failed; + return Err(Error::ConsensusUnreachable); + } + + Ok(()) + } + + /// When session timeouted. + pub fn on_session_timeout(&mut self) -> Result<(), Error> { + if self.data.state == JobSessionState::Finished || self.data.state == JobSessionState::Failed { + return Ok(()); + } + + self.data.state = JobSessionState::Failed; + Err(Error::ConsensusUnreachable) + } +} + + +#[cfg(test)] +pub mod tests { + use std::collections::{VecDeque, BTreeMap, BTreeSet}; + use parking_lot::Mutex; + use ethkey::Public; + use key_server_cluster::{Error, NodeId, SessionId, SessionMeta}; + use super::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor, JobTransport, JobSession, JobSessionState}; + + pub struct SquaredSumJobExecutor; + + impl JobExecutor for SquaredSumJobExecutor { + type PartialJobRequest = u32; + type PartialJobResponse = u32; + type JobResponse = u32; + + fn prepare_partial_request(&self, _n: &NodeId, _nodes: &BTreeSet) -> Result { Ok(2) } + fn process_partial_request(&self, r: u32) -> Result, Error> { if r <= 10 { Ok(JobPartialRequestAction::Respond(r * r)) } else { Err(Error::InvalidMessage) } } + fn check_partial_response(&self, r: &u32) -> Result { if r % 2 == 0 { Ok(JobPartialResponseAction::Accept) } else { Ok(JobPartialResponseAction::Reject) } } + fn compute_response(&self, r: &BTreeMap) -> Result { Ok(r.values().fold(0, |v1, v2| v1 + v2)) } + } + + #[derive(Default)] + pub struct DummyJobTransport { + pub requests: Mutex>, + pub responses: Mutex>, + } + + impl DummyJobTransport { + pub fn response(&self) -> (NodeId, U) { + self.responses.lock().pop_front().unwrap() + } + } + + impl JobTransport for DummyJobTransport { + type PartialJobRequest = T; + type PartialJobResponse = U; + + fn send_partial_request(&self, node: &NodeId, request: T) -> Result<(), Error> { self.requests.lock().push_back((node.clone(), request)); Ok(()) } + fn send_partial_response(&self, node: &NodeId, response: U) -> Result<(), Error> { self.responses.lock().push_back((node.clone(), response)); Ok(()) } + } + + pub fn make_master_session_meta(threshold: usize) -> SessionMeta { + SessionMeta { id: SessionId::default(), master_node_id: NodeId::from(1), self_node_id: NodeId::from(1), threshold: threshold } + } + + pub fn make_slave_session_meta(threshold: usize) -> SessionMeta { + SessionMeta { id: SessionId::default(), master_node_id: NodeId::from(1), self_node_id: NodeId::from(2), threshold: threshold } + } + + #[test] + fn job_initialize_fails_if_not_inactive() { + let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); + job.initialize(vec![Public::from(1)].into_iter().collect()).unwrap(); + assert_eq!(job.initialize(vec![Public::from(1)].into_iter().collect()).unwrap_err(), Error::InvalidStateForRequest); + } + + #[test] + fn job_initialization_leads_to_finish_if_single_node_is_required() { + let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); + job.initialize(vec![Public::from(1)].into_iter().collect()).unwrap(); + assert_eq!(job.state(), JobSessionState::Finished); + assert_eq!(job.result(), Ok(4)); + } + + #[test] + fn job_initialization_does_not_leads_to_finish_if_single_other_node_is_required() { + let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); + job.initialize(vec![Public::from(2)].into_iter().collect()).unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + } + + #[test] + fn job_request_fails_if_comes_from_non_master_node() { + let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); + assert_eq!(job.on_partial_request(&NodeId::from(3), 2).unwrap_err(), Error::InvalidMessage); + } + + #[test] + fn job_request_fails_if_comes_to_master_node() { + let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); + assert_eq!(job.on_partial_request(&NodeId::from(1), 2).unwrap_err(), Error::InvalidMessage); + } + + #[test] + fn job_request_fails_if_comes_to_failed_state() { + let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); + job.on_session_timeout().unwrap_err(); + assert_eq!(job.on_partial_request(&NodeId::from(1), 2).unwrap_err(), Error::InvalidStateForRequest); + } + + #[test] + fn job_request_succeeds_if_comes_to_finished_state() { + let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); + job.on_partial_request(&NodeId::from(1), 2).unwrap(); + assert_eq!(job.transport().response(), (NodeId::from(1), 4)); + assert_eq!(job.state(), JobSessionState::Finished); + job.on_partial_request(&NodeId::from(1), 3).unwrap(); + assert_eq!(job.transport().response(), (NodeId::from(1), 9)); + assert_eq!(job.state(), JobSessionState::Finished); + } + + #[test] + fn job_response_fails_if_comes_to_slave_node() { + let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); + assert_eq!(job.on_partial_response(&NodeId::from(1), 2).unwrap_err(), Error::InvalidMessage); + } + + #[test] + fn job_response_fails_if_comes_to_failed_state() { + let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); + job.initialize(vec![Public::from(2)].into_iter().collect()).unwrap(); + job.on_session_timeout().unwrap_err(); + assert_eq!(job.on_partial_response(&NodeId::from(2), 2).unwrap_err(), Error::InvalidStateForRequest); + } + + #[test] + fn job_response_fails_if_comes_from_unknown_node() { + let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default()); + job.initialize(vec![Public::from(2)].into_iter().collect()).unwrap(); + assert_eq!(job.on_partial_response(&NodeId::from(3), 2).unwrap_err(), Error::InvalidNodeForRequest); + } + + #[test] + fn job_response_leads_to_failure_if_too_few_nodes_left() { + let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); + job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect()).unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + assert_eq!(job.on_partial_response(&NodeId::from(2), 3).unwrap_err(), Error::ConsensusUnreachable); + assert_eq!(job.state(), JobSessionState::Failed); + } + + #[test] + fn job_response_succeeds() { + let mut job = JobSession::new(make_master_session_meta(2), SquaredSumJobExecutor, DummyJobTransport::default()); + job.initialize(vec![Public::from(1), Public::from(2), Public::from(3)].into_iter().collect()).unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + job.on_partial_response(&NodeId::from(2), 2).unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + } + + #[test] + fn job_response_leads_to_finish() { + let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); + job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect()).unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + job.on_partial_response(&NodeId::from(2), 2).unwrap(); + assert_eq!(job.state(), JobSessionState::Finished); + } + + #[test] + fn job_node_error_ignored_when_slave_disconnects_from_slave() { + let mut job = JobSession::new(make_slave_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); + assert_eq!(job.state(), JobSessionState::Inactive); + job.on_node_error(&NodeId::from(3)).unwrap(); + assert_eq!(job.state(), JobSessionState::Inactive); + } + + #[test] + fn job_node_error_leads_to_fail_when_slave_disconnects_from_master() { + let mut job = JobSession::new(make_slave_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); + assert_eq!(job.state(), JobSessionState::Inactive); + assert_eq!(job.on_node_error(&NodeId::from(1)).unwrap_err(), Error::ConsensusUnreachable); + assert_eq!(job.state(), JobSessionState::Failed); + } + + #[test] + fn job_node_error_ignored_when_disconnects_from_rejected() { + let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); + job.initialize(vec![Public::from(1), Public::from(2), Public::from(3)].into_iter().collect()).unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + job.on_partial_response(&NodeId::from(2), 3).unwrap(); + job.on_node_error(&NodeId::from(2)).unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + } + + #[test] + fn job_node_error_ignored_when_disconnects_from_unknown() { + let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); + job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect()).unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + job.on_node_error(&NodeId::from(3)).unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + } + + #[test] + fn job_node_error_ignored_when_disconnects_from_requested_and_enough_nodes_left() { + let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); + job.initialize(vec![Public::from(1), Public::from(2), Public::from(3)].into_iter().collect()).unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + job.on_node_error(&NodeId::from(3)).unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + } + + #[test] + fn job_node_error_leads_to_fail_when_disconnects_from_requested_and_not_enough_nodes_left() { + let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default()); + job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect()).unwrap(); + assert_eq!(job.state(), JobSessionState::Active); + assert_eq!(job.on_node_error(&NodeId::from(2)).unwrap_err(), Error::ConsensusUnreachable); + assert_eq!(job.state(), JobSessionState::Failed); + } +} diff --git a/secret_store/src/key_server_cluster/jobs/key_access_job.rs b/secret_store/src/key_server_cluster/jobs/key_access_job.rs new file mode 100644 index 000000000..0bbb8bf04 --- /dev/null +++ b/secret_store/src/key_server_cluster/jobs/key_access_job.rs @@ -0,0 +1,73 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use std::collections::{BTreeSet, BTreeMap}; +use ethkey::{Signature, recover}; +use key_server_cluster::{Error, NodeId, SessionId, AclStorage}; +use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor}; + +/// Purpose of this job is to construct set of nodes, which have agreed to provide access to the given key for the given requestor. +pub struct KeyAccessJob { + /// Key id. + id: SessionId, + /// ACL storage. + acl_storage: Arc, + /// Requester signature. + signature: Option, +} + +impl KeyAccessJob { + pub fn new_on_slave(id: SessionId, acl_storage: Arc) -> Self { + KeyAccessJob { + id: id, + acl_storage: acl_storage, + signature: None, + } + } + + pub fn new_on_master(id: SessionId, acl_storage: Arc, signature: Signature) -> Self { + KeyAccessJob { + id: id, + acl_storage: acl_storage, + signature: Some(signature), + } + } +} + +impl JobExecutor for KeyAccessJob { + type PartialJobRequest = Signature; + type PartialJobResponse = bool; + type JobResponse = BTreeSet; + + fn prepare_partial_request(&self, _node: &NodeId, _nodes: &BTreeSet) -> Result { + Ok(self.signature.as_ref().expect("prepare_partial_request is only called on master nodes; new_on_master fills the signature; qed").clone()) + } + + fn process_partial_request(&self, partial_request: Signature) -> Result, Error> { + self.acl_storage.check(&recover(&partial_request, &self.id)?, &self.id) + .map_err(|_| Error::AccessDenied) + .map(|is_confirmed| if is_confirmed { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) }) + } + + fn check_partial_response(&self, partial_response: &bool) -> Result { + Ok(if *partial_response { JobPartialResponseAction::Accept } else { JobPartialResponseAction::Reject }) + } + + fn compute_response(&self, partial_responses: &BTreeMap) -> Result, Error> { + Ok(partial_responses.keys().cloned().collect()) + } +} diff --git a/secret_store/src/key_server_cluster/jobs/mod.rs b/secret_store/src/key_server_cluster/jobs/mod.rs new file mode 100644 index 000000000..d9a358aba --- /dev/null +++ b/secret_store/src/key_server_cluster/jobs/mod.rs @@ -0,0 +1,21 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +pub mod consensus_session; +pub mod decryption_job; +pub mod job_session; +pub mod key_access_job; +pub mod signing_job; diff --git a/secret_store/src/key_server_cluster/jobs/signing_job.rs b/secret_store/src/key_server_cluster/jobs/signing_job.rs new file mode 100644 index 000000000..28ac31a1e --- /dev/null +++ b/secret_store/src/key_server_cluster/jobs/signing_job.rs @@ -0,0 +1,145 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::{BTreeSet, BTreeMap}; +use ethkey::{Public, Secret}; +use util::H256; +use key_server_cluster::{Error, NodeId, DocumentKeyShare}; +use key_server_cluster::math; +use key_server_cluster::jobs::job_session::{JobPartialRequestAction, JobPartialResponseAction, JobExecutor}; + +/// Signing job. +pub struct SigningJob { + /// This node id. + self_node_id: NodeId, + /// Key share. + key_share: DocumentKeyShare, + /// Session public key. + session_public: Public, + /// Session secret coefficient. + session_secret_coeff: Secret, + /// Request id. + request_id: Option, + /// Message hash. + message_hash: Option, +} + +/// Signing job partial request. +pub struct PartialSigningRequest { + /// Request id. + pub id: Secret, + /// Message hash. + pub message_hash: H256, + /// Id of other nodes, participating in signing. + pub other_nodes_ids: BTreeSet, +} + +/// Signing job partial response. +pub struct PartialSigningResponse { + /// Request id. + pub request_id: Secret, + /// Partial signature. + pub partial_signature: Secret, +} + +impl SigningJob { + pub fn new_on_slave(self_node_id: NodeId, key_share: DocumentKeyShare, session_public: Public, session_secret_coeff: Secret) -> Result { + Ok(SigningJob { + self_node_id: self_node_id, + key_share: key_share, + session_public: session_public, + session_secret_coeff: session_secret_coeff, + request_id: None, + message_hash: None, + }) + } + + pub fn new_on_master(self_node_id: NodeId, key_share: DocumentKeyShare, session_public: Public, session_secret_coeff: Secret, message_hash: H256) -> Result { + Ok(SigningJob { + self_node_id: self_node_id, + key_share: key_share, + session_public: session_public, + session_secret_coeff: session_secret_coeff, + request_id: Some(math::generate_random_scalar()?), + message_hash: Some(message_hash), + }) + } +} + +impl JobExecutor for SigningJob { + type PartialJobRequest = PartialSigningRequest; + type PartialJobResponse = PartialSigningResponse; + type JobResponse = (Secret, Secret); + + fn prepare_partial_request(&self, node: &NodeId, nodes: &BTreeSet) -> Result { + debug_assert!(nodes.len() == self.key_share.threshold + 1); + + let request_id = self.request_id.as_ref() + .expect("prepare_partial_request is only called on master nodes; request_id is filed in constructor on master nodes; qed"); + let message_hash = self.message_hash.as_ref() + .expect("compute_response is only called on master nodes; message_hash is filed in constructor on master nodes; qed"); + let mut other_nodes_ids = nodes.clone(); + other_nodes_ids.remove(node); + + Ok(PartialSigningRequest { + id: request_id.clone(), + message_hash: message_hash.clone(), + other_nodes_ids: other_nodes_ids, + }) + } + + fn process_partial_request(&self, partial_request: PartialSigningRequest) -> Result, Error> { + if partial_request.other_nodes_ids.len() != self.key_share.threshold + || partial_request.other_nodes_ids.contains(&self.self_node_id) + || partial_request.other_nodes_ids.iter().any(|n| !self.key_share.id_numbers.contains_key(n)) { + return Err(Error::InvalidMessage); + } + + let self_id_number = &self.key_share.id_numbers[&self.self_node_id]; + let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &self.key_share.id_numbers[n]); + let combined_hash = math::combine_message_hash_with_public(&partial_request.message_hash, &self.session_public)?; + Ok(JobPartialRequestAction::Respond(PartialSigningResponse { + request_id: partial_request.id, + partial_signature: math::compute_signature_share( + self.key_share.threshold, + &combined_hash, + &self.session_secret_coeff, + &self.key_share.secret_share, + self_id_number, + other_id_numbers + )?, + })) + } + + fn check_partial_response(&self, partial_response: &PartialSigningResponse) -> Result { + if Some(&partial_response.request_id) != self.request_id.as_ref() { + return Ok(JobPartialResponseAction::Ignore); + } + // TODO: check_signature_share() + + Ok(JobPartialResponseAction::Accept) + } + + fn compute_response(&self, partial_responses: &BTreeMap) -> Result<(Secret, Secret), Error> { + let message_hash = self.message_hash.as_ref() + .expect("compute_response is only called on master nodes; message_hash is filed in constructor on master nodes; qed"); + + let signature_c = math::combine_message_hash_with_public(message_hash, &self.session_public)?; + let signature_s = math::compute_signature(partial_responses.values().map(|r| &r.partial_signature))?; + + Ok((signature_c, signature_s)) + } +} diff --git a/secret_store/src/key_server_cluster/math.rs b/secret_store/src/key_server_cluster/math.rs index c3bef274f..047a4556c 100644 --- a/secret_store/src/key_server_cluster/math.rs +++ b/secret_store/src/key_server_cluster/math.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use ethkey::{Public, Secret, Random, Generator, math}; +use util::{U256, H256, Hashable}; use key_server_cluster::Error; #[derive(Debug)] @@ -36,6 +37,48 @@ pub fn generate_random_point() -> Result { Ok(Random.generate()?.public().clone()) } +/// Compute publics sum. +pub fn compute_public_sum<'a, I>(mut publics: I) -> Result where I: Iterator { + let mut sum = publics.next().expect("compute_public_sum is called when there's at least one public; qed").clone(); + while let Some(public) = publics.next() { + math::public_add(&mut sum, &public)?; + } + Ok(sum) +} + +/// Compute secrets sum. +pub fn compute_secret_sum<'a, I>(mut secrets: I) -> Result where I: Iterator { + let mut sum = secrets.next().expect("compute_secret_sum is called when there's at least one secret; qed").clone(); + while let Some(secret) = secrets.next() { + sum.add(secret)?; + } + Ok(sum) +} + +/// Compute secrets 'shadow' multiplication: coeff * multiplication(s[j] / (s[i] - s[j])) for every i != j +pub fn compute_shadow_mul<'a, I>(coeff: &Secret, self_secret: &Secret, mut other_secrets: I) -> Result where I: Iterator { + // when there are no other secrets, only coeff is left + let other_secret = match other_secrets.next() { + Some(other_secret) => other_secret, + None => return Ok(coeff.clone()), + }; + + let mut shadow_mul = self_secret.clone(); + shadow_mul.sub(other_secret)?; + shadow_mul.inv()?; + shadow_mul.mul(other_secret)?; + while let Some(other_secret) = other_secrets.next() { + let mut shadow_mul_element = self_secret.clone(); + shadow_mul_element.sub(other_secret)?; + shadow_mul_element.inv()?; + shadow_mul_element.mul(other_secret)?; + shadow_mul.mul(&shadow_mul_element)?; + } + + shadow_mul.mul(coeff)?; + Ok(shadow_mul) +} + /// Update point by multiplying to random scalar pub fn update_random_point(point: &mut Public) -> Result<(), Error> { Ok(math::public_mul_secret(point, &generate_random_scalar()?)?) @@ -43,12 +86,9 @@ pub fn update_random_point(point: &mut Public) -> Result<(), Error> { /// Generate random polynom of threshold degree pub fn generate_random_polynom(threshold: usize) -> Result, Error> { - let mut polynom: Vec<_> = Vec::with_capacity(threshold + 1); - for _ in 0..threshold + 1 { - polynom.push(generate_random_scalar()?); - } - debug_assert_eq!(polynom.len(), threshold + 1); - Ok(polynom) + (0..threshold + 1) + .map(|_| generate_random_scalar()) + .collect() } /// Compute value of polynom, using `node_number` as argument @@ -125,12 +165,8 @@ pub fn keys_verification(threshold: usize, derived_point: &Public, number_id: &S } /// Compute secret share. -pub fn compute_secret_share<'a, I>(mut secret_values: I) -> Result where I: Iterator { - let mut secret_share = secret_values.next().expect("compute_secret_share is called when cluster has at least one node; qed").clone(); - while let Some(secret_value) = secret_values.next() { - secret_share.add(secret_value)?; - } - Ok(secret_share) +pub fn compute_secret_share<'a, I>(secret_values: I) -> Result where I: Iterator { + compute_secret_sum(secret_values) } /// Compute public key share. @@ -141,22 +177,14 @@ pub fn compute_public_share(self_secret_value: &Secret) -> Result } /// Compute joint public key. -pub fn compute_joint_public<'a, I>(mut public_shares: I) -> Result where I: Iterator { - let mut joint_public = public_shares.next().expect("compute_joint_public is called when cluster has at least one node; qed").clone(); - while let Some(public_share) = public_shares.next() { - math::public_add(&mut joint_public, &public_share)?; - } - Ok(joint_public) +pub fn compute_joint_public<'a, I>(public_shares: I) -> Result where I: Iterator { + compute_public_sum(public_shares) } #[cfg(test)] /// Compute joint secret key. -pub fn compute_joint_secret<'a, I>(mut secret_coeffs: I) -> Result where I: Iterator { - let mut joint_secret = secret_coeffs.next().expect("compute_joint_private is called when cluster has at least one node; qed").clone(); - while let Some(secret_coeff) = secret_coeffs.next() { - joint_secret.add(secret_coeff)?; - } - Ok(joint_secret) +pub fn compute_joint_secret<'a, I>(secret_coeffs: I) -> Result where I: Iterator { + compute_secret_sum(secret_coeffs) } /// Encrypt secret with joint public key. @@ -180,26 +208,8 @@ pub fn encrypt_secret(secret: &Public, joint_public: &Public) -> Result(node_number: &Secret, node_secret_share: &Secret, mut other_nodes_numbers: I) -> Result where I: Iterator { - let other_node_number = match other_nodes_numbers.next() { - Some(other_node_number) => other_node_number, - None => return Ok(node_secret_share.clone()), - }; - - let mut shadow = node_number.clone(); - shadow.sub(other_node_number)?; - shadow.inv()?; - shadow.mul(other_node_number)?; - while let Some(other_node_number) = other_nodes_numbers.next() { - let mut shadow_element = node_number.clone(); - shadow_element.sub(other_node_number)?; - shadow_element.inv()?; - shadow_element.mul(other_node_number)?; - shadow.mul(&shadow_element)?; - } - - shadow.mul(&node_secret_share)?; - Ok(shadow) +pub fn compute_node_shadow<'a, I>(node_secret_share: &Secret, node_number: &Secret, other_nodes_numbers: I) -> Result where I: Iterator { + compute_shadow_mul(node_secret_share, node_number, other_nodes_numbers) } /// Compute shadow point for the node. @@ -224,21 +234,14 @@ pub fn compute_node_shadow_point(access_key: &Secret, common_point: &Public, nod } /// Compute joint shadow point. -pub fn compute_joint_shadow_point<'a, I>(mut nodes_shadow_points: I) -> Result where I: Iterator { - let mut joint_shadow_point = nodes_shadow_points.next().expect("compute_joint_shadow_point is called when at least two nodes are required to decrypt secret; qed").clone(); - while let Some(node_shadow_point) = nodes_shadow_points.next() { - math::public_add(&mut joint_shadow_point, &node_shadow_point)?; - } - Ok(joint_shadow_point) +pub fn compute_joint_shadow_point<'a, I>(nodes_shadow_points: I) -> Result where I: Iterator { + compute_public_sum(nodes_shadow_points) } #[cfg(test)] /// Compute joint shadow point (version for tests). -pub fn compute_joint_shadow_point_test<'a, I>(access_key: &Secret, common_point: &Public, mut nodes_shadows: I) -> Result where I: Iterator { - let mut joint_shadow = nodes_shadows.next().expect("compute_joint_shadow_point_test is called when at least two nodes are required to decrypt secret; qed").clone(); - while let Some(node_shadow) = nodes_shadows.next() { - joint_shadow.add(node_shadow)?; - } +pub fn compute_joint_shadow_point_test<'a, I>(access_key: &Secret, common_point: &Public, nodes_shadows: I) -> Result where I: Iterator { + let mut joint_shadow = compute_secret_sum(nodes_shadows)?; joint_shadow.mul(access_key)?; let mut joint_shadow_point = common_point.clone(); @@ -277,10 +280,7 @@ pub fn make_common_shadow_point(threshold: usize, mut common_point: Public) -> R #[cfg(test)] /// Decrypt shadow-encrypted secret. pub fn decrypt_with_shadow_coefficients(mut decrypted_shadow: Public, mut common_shadow_point: Public, shadow_coefficients: Vec) -> Result { - let mut shadow_coefficients_sum = shadow_coefficients[0].clone(); - for shadow_coefficient in shadow_coefficients.iter().skip(1) { - shadow_coefficients_sum.add(shadow_coefficient)?; - } + let shadow_coefficients_sum = compute_secret_sum(shadow_coefficients.iter())?; math::public_mul_secret(&mut common_shadow_point, &shadow_coefficients_sum)?; math::public_add(&mut decrypted_shadow, &common_shadow_point)?; Ok(decrypted_shadow) @@ -298,11 +298,139 @@ pub fn decrypt_with_joint_secret(encrypted_point: &Public, common_point: &Public Ok(decrypted_point) } +/// Combine message hash with public key X coordinate. +pub fn combine_message_hash_with_public(message_hash: &H256, public: &Public) -> Result { + // buffer is just [message_hash | public.x] + let mut buffer = [0; 64]; + buffer[0..32].copy_from_slice(&message_hash[0..32]); + buffer[32..64].copy_from_slice(&public[0..32]); + + // calculate hash of buffer + let hash = (&buffer[..]).sha3(); + + // map hash to EC finite field value + let hash: U256 = hash.into(); + let hash: H256 = (hash % math::curve_order()).into(); + let hash = Secret::from_slice(&*hash); + hash.check_validity()?; + + Ok(hash) +} + +/// Compute signature share. +pub fn compute_signature_share<'a, I>(threshold: usize, combined_hash: &Secret, one_time_secret_coeff: &Secret, node_secret_share: &Secret, node_number: &Secret, other_nodes_numbers: I) + -> Result where I: Iterator { + let mut sum = one_time_secret_coeff.clone(); + let mut subtrahend = compute_shadow_mul(combined_hash, node_number, other_nodes_numbers)?; + subtrahend.mul(node_secret_share)?; + if threshold % 2 == 0 { + sum.sub(&subtrahend)?; + } else { + sum.add(&subtrahend)?; + } + Ok(sum) +} + +/// Check signature share. +pub fn _check_signature_share<'a, I>(_combined_hash: &Secret, _signature_share: &Secret, _public_share: &Public, _one_time_public_share: &Public, _node_numbers: I) + -> Result where I: Iterator { + // TODO: in paper partial signature is checked using comparison: + // sig[i] * T = r[i] - c * lagrange_coeff(i) * y[i] + // => (k[i] - c * lagrange_coeff(i) * s[i]) * T = r[i] - c * lagrange_coeff(i) * y[i] + // => k[i] * T - c * lagrange_coeff(i) * s[i] * T = k[i] * T - c * lagrange_coeff(i) * y[i] + // => this means that y[i] = s[i] * T + // but when verifying signature (for t = 1), nonce public (r) is restored using following expression: + // r = (sig[0] + sig[1]) * T - c * y + // r = (k[0] - c * lagrange_coeff(0) * s[0] + k[1] - c * lagrange_coeff(1) * s[1]) * T - c * y + // r = (k[0] + k[1]) * T - c * (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T - c * y + // r = r - c * (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T - c * y + // => -c * y = c * (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T + // => -y = (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T + // => y[i] != s[i] * T + // => some other way is required + Ok(true) +} + +/// Compute signature. +pub fn compute_signature<'a, I>(signature_shares: I) -> Result where I: Iterator { + compute_secret_sum(signature_shares) +} + +#[cfg(test)] +/// Locally compute Schnorr signature as described in https://en.wikipedia.org/wiki/Schnorr_signature#Signing. +pub fn local_compute_signature(nonce: &Secret, secret: &Secret, message_hash: &Secret) -> Result<(Secret, Secret), Error> { + let mut nonce_public = math::generation_point(); + math::public_mul_secret(&mut nonce_public, &nonce).unwrap(); + + let combined_hash = combine_message_hash_with_public(message_hash, &nonce_public)?; + + let mut sig_subtrahend = combined_hash.clone(); + sig_subtrahend.mul(secret)?; + let mut sig = nonce.clone(); + sig.sub(&sig_subtrahend)?; + + Ok((combined_hash, sig)) +} + +#[cfg(test)] +/// Verify signature as described in https://en.wikipedia.org/wiki/Schnorr_signature#Verifying. +pub fn verify_signature(public: &Public, signature: &(Secret, Secret), message_hash: &H256) -> Result { + let mut addendum = math::generation_point(); + math::public_mul_secret(&mut addendum, &signature.1)?; + let mut nonce_public = public.clone(); + math::public_mul_secret(&mut nonce_public, &signature.0)?; + math::public_add(&mut nonce_public, &addendum)?; + + let combined_hash = combine_message_hash_with_public(message_hash, &nonce_public)?; + Ok(combined_hash == signature.0) +} + #[cfg(test)] pub mod tests { + use std::iter::once; use ethkey::KeyPair; use super::*; + #[derive(Clone)] + struct KeyGenerationArtifacts { + id_numbers: Vec, + polynoms1: Vec>, + secrets1: Vec>, + public_shares: Vec, + secret_shares: Vec, + joint_public: Public, + } + + fn run_key_generation(t: usize, n: usize, id_numbers: Option>) -> KeyGenerationArtifacts { + // === PART1: DKG === + + // data, gathered during initialization + let id_numbers: Vec<_> = match id_numbers { + Some(id_numbers) => id_numbers, + None => (0..n).map(|_| generate_random_scalar().unwrap()).collect(), + }; + + // data, generated during keys dissemination + let polynoms1: Vec<_> = (0..n).map(|_| generate_random_polynom(t).unwrap()).collect(); + let secrets1: Vec<_> = (0..n).map(|i| (0..n).map(|j| compute_polynom(&polynoms1[i], &id_numbers[j]).unwrap()).collect::>()).collect(); + + // data, generated during keys generation + let public_shares: Vec<_> = (0..n).map(|i| compute_public_share(&polynoms1[i][0]).unwrap()).collect(); + let secret_shares: Vec<_> = (0..n).map(|i| compute_secret_share(secrets1.iter().map(|s| &s[i])).unwrap()).collect(); + + // joint public key, as a result of DKG + let joint_public = compute_joint_public(public_shares.iter()).unwrap(); + + KeyGenerationArtifacts { + id_numbers: id_numbers, + polynoms1: polynoms1, + secrets1: secrets1, + public_shares: public_shares, + secret_shares: secret_shares, + joint_public: joint_public, + } + } + pub fn do_encryption_and_decryption(t: usize, joint_public: &Public, id_numbers: &[Secret], secret_shares: &[Secret], joint_secret: Option<&Secret>, document_secret_plain: Public) -> (Public, Public) { // === PART2: encryption using joint public key === @@ -316,7 +444,7 @@ pub mod tests { // use t + 1 nodes to compute joint shadow point let nodes_shadows: Vec<_> = (0..t + 1).map(|i| - compute_node_shadow(&id_numbers[i], &secret_shares[i], id_numbers.iter() + compute_node_shadow(&secret_shares[i], &id_numbers[i], id_numbers.iter() .enumerate() .filter(|&(j, _)| j != i) .take(t) @@ -349,39 +477,108 @@ pub mod tests { let test_cases = [(0, 2), (1, 2), (1, 3), (2, 3), (1, 4), (2, 4), (3, 4), (1, 5), (2, 5), (3, 5), (4, 5), (1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10)]; for &(t, n) in &test_cases { - // === PART1: DKG === - - // data, gathered during initialization - let id_numbers: Vec<_> = (0..n).map(|_| generate_random_scalar().unwrap()).collect(); - - // data, generated during keys dissemination - let polynoms1: Vec<_> = (0..n).map(|_| generate_random_polynom(t).unwrap()).collect(); - let secrets1: Vec<_> = (0..n).map(|i| (0..n).map(|j| compute_polynom(&polynoms1[i], &id_numbers[j]).unwrap()).collect::>()).collect(); - - // data, generated during keys generation - let public_shares: Vec<_> = (0..n).map(|i| compute_public_share(&polynoms1[i][0]).unwrap()).collect(); - let secret_shares: Vec<_> = (0..n).map(|i| compute_secret_share(secrets1.iter().map(|s| &s[i])).unwrap()).collect(); - - // joint public key, as a result of DKG - let joint_public = compute_joint_public(public_shares.iter()).unwrap(); + let artifacts = run_key_generation(t, n, None); // compute joint private key [just for test] - let joint_secret = compute_joint_secret(polynoms1.iter().map(|p| &p[0])).unwrap(); + let joint_secret = compute_joint_secret(artifacts.polynoms1.iter().map(|p| &p[0])).unwrap(); let joint_key_pair = KeyPair::from_secret(joint_secret.clone()).unwrap(); - assert_eq!(&joint_public, joint_key_pair.public()); + assert_eq!(&artifacts.joint_public, joint_key_pair.public()); // check secret shares computation [just for test] - let secret_shares_polynom: Vec<_> = (0..t + 1).map(|k| compute_secret_share(polynoms1.iter().map(|p| &p[k])).unwrap()).collect(); - let secret_shares_calculated_from_polynom: Vec<_> = id_numbers.iter().map(|id_number| compute_polynom(&*secret_shares_polynom, id_number).unwrap()).collect(); - assert_eq!(secret_shares, secret_shares_calculated_from_polynom); + let secret_shares_polynom: Vec<_> = (0..t + 1).map(|k| compute_secret_share(artifacts.polynoms1.iter().map(|p| &p[k])).unwrap()).collect(); + let secret_shares_calculated_from_polynom: Vec<_> = artifacts.id_numbers.iter().map(|id_number| compute_polynom(&*secret_shares_polynom, id_number).unwrap()).collect(); + assert_eq!(artifacts.secret_shares, secret_shares_calculated_from_polynom); // now encrypt and decrypt data let document_secret_plain = generate_random_point().unwrap(); let (document_secret_decrypted, document_secret_decrypted_test) = - do_encryption_and_decryption(t, &joint_public, &id_numbers, &secret_shares, Some(&joint_secret), document_secret_plain.clone()); + do_encryption_and_decryption(t, &artifacts.joint_public, &artifacts.id_numbers, &artifacts.secret_shares, Some(&joint_secret), document_secret_plain.clone()); assert_eq!(document_secret_plain, document_secret_decrypted_test); assert_eq!(document_secret_plain, document_secret_decrypted); } } + + #[test] + fn local_signature_works() { + let key_pair = Random.generate().unwrap(); + let message_hash = "0000000000000000000000000000000000000000000000000000000000000042".parse().unwrap(); + let nonce = generate_random_scalar().unwrap(); + let signature = local_compute_signature(&nonce, key_pair.secret(), &message_hash).unwrap(); + assert_eq!(verify_signature(key_pair.public(), &signature, &message_hash), Ok(true)); + } + + #[test] + fn full_signature_math_session() { + let test_cases = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (1, 4), (2, 4), (3, 4), (1, 5), (2, 5), (3, 5), (4, 5), + (1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10)]; + for &(t, n) in &test_cases { + // hash of the message to be signed + let message_hash: Secret = "0000000000000000000000000000000000000000000000000000000000000042".parse().unwrap(); + + // === MiDS-S algorithm === + // setup: all nodes share master secret key && every node knows master public key + let artifacts = run_key_generation(t, n, None); + + // in this gap (not related to math): + // master node should ask every other node if it is able to do a signing + // if there are < than t+1 nodes, able to sign => error + // select t+1 nodes for signing session + // all steps below are for this subset of nodes + let n = t + 1; + + // step 1: run DKG to generate one-time secret key (nonce) + let id_numbers = artifacts.id_numbers.iter().cloned().take(n).collect(); + let one_time_artifacts = run_key_generation(t, n, Some(id_numbers)); + + // step 2: message hash && x coordinate of one-time public value are combined + let combined_hash = combine_message_hash_with_public(&message_hash, &one_time_artifacts.joint_public).unwrap(); + + // step 3: compute signature shares + let partial_signatures: Vec<_> = (0..n) + .map(|i| compute_signature_share( + t, + &combined_hash, + &one_time_artifacts.polynoms1[i][0], + &artifacts.secret_shares[i], + &artifacts.id_numbers[i], + artifacts.id_numbers.iter() + .enumerate() + .filter(|&(j, _)| i != j) + .map(|(_, n)| n) + .take(t) + ).unwrap()) + .collect(); + + // step 4: receive and verify signatures shares from other nodes + let received_signatures: Vec> = (0..n) + .map(|i| (0..n) + .filter(|j| i != *j) + .map(|j| { + let signature_share = partial_signatures[j].clone(); + assert!(_check_signature_share(&combined_hash, + &signature_share, + &artifacts.public_shares[j], + &one_time_artifacts.public_shares[j], + artifacts.id_numbers.iter().take(t)).unwrap_or(false)); + signature_share + }) + .collect()) + .collect(); + + // step 5: compute signature + let signatures: Vec<_> = (0..n) + .map(|i| (combined_hash.clone(), compute_signature(received_signatures[i].iter().chain(once(&partial_signatures[i]))).unwrap())) + .collect(); + + // === verify signature === + let master_secret = compute_joint_secret(artifacts.polynoms1.iter().map(|p| &p[0])).unwrap(); + let nonce = compute_joint_secret(one_time_artifacts.polynoms1.iter().map(|p| &p[0])).unwrap(); + let local_signature = local_compute_signature(&nonce, &master_secret, &message_hash).unwrap(); + for signature in &signatures { + assert_eq!(signature, &local_signature); + assert_eq!(verify_signature(&artifacts.joint_public, signature, &message_hash), Ok(true)); + } + } + } } diff --git a/secret_store/src/key_server_cluster/message.rs b/secret_store/src/key_server_cluster/message.rs index af3a113fe..b18cf512f 100644 --- a/secret_store/src/key_server_cluster/message.rs +++ b/secret_store/src/key_server_cluster/message.rs @@ -18,7 +18,7 @@ use std::fmt; use std::collections::{BTreeSet, BTreeMap}; use ethkey::Secret; use key_server_cluster::SessionId; -use super::{SerializableH256, SerializablePublic, SerializableSecret, SerializableSignature}; +use super::{SerializableH256, SerializablePublic, SerializableSecret, SerializableSignature, SerializableMessageHash}; pub type MessageSessionId = SerializableH256; pub type MessageNodeId = SerializablePublic; @@ -28,10 +28,14 @@ pub type MessageNodeId = SerializablePublic; pub enum Message { /// Cluster message. Cluster(ClusterMessage), + /// Key generation message. + Generation(GenerationMessage), /// Encryption message. Encryption(EncryptionMessage), /// Decryption message. Decryption(DecryptionMessage), + /// Signing message. + Signing(SigningMessage), } #[derive(Clone, Debug)] @@ -47,9 +51,9 @@ pub enum ClusterMessage { KeepAliveResponse(KeepAliveResponse), } -#[derive(Clone, Debug)] -/// All possible messages that can be sent during encryption session. -pub enum EncryptionMessage { +#[derive(Clone, Debug, Serialize, Deserialize)] +/// All possible messages that can be sent during key generation session. +pub enum GenerationMessage { /// Initialize new DKG session. InitializeSession(InitializeSession), /// Confirm DKG session initialization. @@ -66,16 +70,34 @@ pub enum EncryptionMessage { SessionCompleted(SessionCompleted), } +#[derive(Clone, Debug)] +/// All possible messages that can be sent during encryption session. +pub enum EncryptionMessage { + /// Initialize encryption session. + InitializeEncryptionSession(InitializeEncryptionSession), + /// Confirm/reject encryption session initialization. + ConfirmEncryptionInitialization(ConfirmEncryptionInitialization), + /// When encryption session error has occured. + EncryptionSessionError(EncryptionSessionError), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// All possible messages that can be sent during consensus establishing. +pub enum ConsensusMessage { + /// Initialize consensus session. + InitializeConsensusSession(InitializeConsensusSession), + /// Confirm/reject consensus session initialization. + ConfirmConsensusInitialization(ConfirmConsensusInitialization), +} + #[derive(Clone, Debug)] /// All possible messages that can be sent during decryption session. pub enum DecryptionMessage { - /// Initialize decryption session. - InitializeDecryptionSession(InitializeDecryptionSession), - /// Confirm/reject decryption session initialization. - ConfirmDecryptionInitialization(ConfirmDecryptionInitialization), + /// Consensus establishing message. + DecryptionConsensusMessage(DecryptionConsensusMessage), /// Request partial decryption from node. RequestPartialDecryption(RequestPartialDecryption), - /// Partial decryption is completed + /// Partial decryption is completed. PartialDecryption(PartialDecryption), /// When decryption session error has occured. DecryptionSessionError(DecryptionSessionError), @@ -83,6 +105,23 @@ pub enum DecryptionMessage { DecryptionSessionCompleted(DecryptionSessionCompleted), } +#[derive(Clone, Debug)] +/// All possible messages that can be sent during signing session. +pub enum SigningMessage { + /// Consensus establishing message. + SigningConsensusMessage(SigningConsensusMessage), + /// Session key generation message. + SigningGenerationMessage(SigningGenerationMessage), + /// Request partial signature from node. + RequestPartialSignature(RequestPartialSignature), + /// Partial signature is generated. + PartialSignature(PartialSignature), + /// Signing error occured. + SigningSessionError(SigningSessionError), + /// Signing session completed. + SigningSessionCompleted(SigningSessionCompleted), +} + #[derive(Clone, Debug, Serialize, Deserialize)] /// Introduce node public key. pub struct NodePublicKey { @@ -115,6 +154,13 @@ pub struct KeepAliveResponse { pub struct InitializeSession { /// Session Id. pub session: MessageSessionId, + /// Session author. + pub author: SerializablePublic, + /// All session participants along with their identification numbers. + pub nodes: BTreeMap, + /// Decryption threshold. During decryption threshold-of-route.len() nodes must came to + /// consensus to successfully decrypt message. + pub threshold: usize, /// Derived generation point. Starting from originator, every node must multiply this /// point by random scalar (unknown by other nodes). At the end of initialization /// `point` will be some (k1 * k2 * ... * kn) * G = `point` where `(k1 * k2 * ... * kn)` @@ -136,11 +182,6 @@ pub struct ConfirmInitialization { pub struct CompleteInitialization { /// Session Id. pub session: MessageSessionId, - /// All session participants along with their identification numbers. - pub nodes: BTreeMap, - /// Decryption threshold. During decryption threshold-of-route.len() nodes must came to - /// consensus to successfully decrypt message. - pub threshold: usize, /// Derived generation point. pub derived_point: SerializablePublic, } @@ -181,37 +222,132 @@ pub struct SessionError { pub struct SessionCompleted { /// Session Id. pub session: MessageSessionId, - /// Common (shared) encryption point. +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Node is requested to prepare for saving encrypted data. +pub struct InitializeEncryptionSession { + /// Encryption session Id. + pub session: MessageSessionId, + /// Requestor signature. + pub requestor_signature: SerializableSignature, + /// Common point. pub common_point: SerializablePublic, - /// Encrypted point. + /// Encrypted data. pub encrypted_point: SerializablePublic, } #[derive(Clone, Debug, Serialize, Deserialize)] -/// Node is requested to decrypt data, encrypted in given session. -pub struct InitializeDecryptionSession { +/// Node is responding to encryption initialization request. +pub struct ConfirmEncryptionInitialization { /// Encryption session Id. pub session: MessageSessionId, - /// Decryption session Id. - pub sub_session: SerializableSecret, - /// Requestor signature. - pub requestor_signature: SerializableSignature, - /// Is shadow decryption requested? When true, decryption result - /// will be visible to the owner of requestor public key only. - pub is_shadow_decryption: bool, } #[derive(Clone, Debug, Serialize, Deserialize)] -/// Node is responding to decryption request. -pub struct ConfirmDecryptionInitialization { +/// When encryption session error has occured. +pub struct EncryptionSessionError { /// Encryption session Id. pub session: MessageSessionId, - /// Decryption session Id. - pub sub_session: SerializableSecret, - /// Is node confirmed to make a decryption?. + /// Error message. + pub error: String, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Node is asked to be part of consensus group. +pub struct InitializeConsensusSession { + /// Requestor signature. + pub requestor_signature: SerializableSignature, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Node is responding to consensus initialization request. +pub struct ConfirmConsensusInitialization { + /// Is node confirmed consensus participation. pub is_confirmed: bool, } +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Consensus-related signing message. +pub struct SigningConsensusMessage { + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Consensus message. + pub message: ConsensusMessage, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Session key generation message. +pub struct SigningGenerationMessage { + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Generation message. + pub message: GenerationMessage, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Request partial signature. +pub struct RequestPartialSignature { + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Request id. + pub request_id: SerializableSecret, + /// Message hash. + pub message_hash: SerializableMessageHash, + /// Selected nodes. + pub nodes: BTreeSet, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Partial signature. +pub struct PartialSignature { + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Request id. + pub request_id: SerializableSecret, + /// S part of signature. + pub partial_signature: SerializableSecret, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// When signing session error has occured. +pub struct SigningSessionError { + /// Encryption session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Error description. + pub error: String, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Signing session completed. +pub struct SigningSessionCompleted { + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Consensus-related decryption message. +pub struct DecryptionConsensusMessage { + /// Generation session Id. + pub session: MessageSessionId, + /// Signing session Id. + pub sub_session: SerializableSecret, + /// Consensus message. + pub message: ConsensusMessage, +} + #[derive(Clone, Debug, Serialize, Deserialize)] /// Node is requested to do a partial decryption. pub struct RequestPartialDecryption { @@ -219,6 +355,11 @@ pub struct RequestPartialDecryption { pub session: MessageSessionId, /// Decryption session Id. pub sub_session: SerializableSecret, + /// Request id. + pub request_id: SerializableSecret, + /// Is shadow decryption requested? When true, decryption result + /// will be visible to the owner of requestor public key only. + pub is_shadow_decryption: bool, /// Nodes that are agreed to do a decryption. pub nodes: BTreeSet, } @@ -230,6 +371,8 @@ pub struct PartialDecryption { pub session: MessageSessionId, /// Decryption session Id. pub sub_session: SerializableSecret, + /// Request id. + pub request_id: SerializableSecret, /// Partially decrypted secret. pub shadow_point: SerializablePublic, /// Decrypt shadow coefficient (if requested), encrypted with requestor public. @@ -256,16 +399,26 @@ pub struct DecryptionSessionCompleted { pub sub_session: SerializableSecret, } +impl GenerationMessage { + pub fn session_id(&self) -> &SessionId { + match *self { + GenerationMessage::InitializeSession(ref msg) => &msg.session, + GenerationMessage::ConfirmInitialization(ref msg) => &msg.session, + GenerationMessage::CompleteInitialization(ref msg) => &msg.session, + GenerationMessage::KeysDissemination(ref msg) => &msg.session, + GenerationMessage::PublicKeyShare(ref msg) => &msg.session, + GenerationMessage::SessionError(ref msg) => &msg.session, + GenerationMessage::SessionCompleted(ref msg) => &msg.session, + } + } +} + impl EncryptionMessage { pub fn session_id(&self) -> &SessionId { match *self { - EncryptionMessage::InitializeSession(ref msg) => &msg.session, - EncryptionMessage::ConfirmInitialization(ref msg) => &msg.session, - EncryptionMessage::CompleteInitialization(ref msg) => &msg.session, - EncryptionMessage::KeysDissemination(ref msg) => &msg.session, - EncryptionMessage::PublicKeyShare(ref msg) => &msg.session, - EncryptionMessage::SessionError(ref msg) => &msg.session, - EncryptionMessage::SessionCompleted(ref msg) => &msg.session, + EncryptionMessage::InitializeEncryptionSession(ref msg) => &msg.session, + EncryptionMessage::ConfirmEncryptionInitialization(ref msg) => &msg.session, + EncryptionMessage::EncryptionSessionError(ref msg) => &msg.session, } } } @@ -273,8 +426,7 @@ impl EncryptionMessage { impl DecryptionMessage { pub fn session_id(&self) -> &SessionId { match *self { - DecryptionMessage::InitializeDecryptionSession(ref msg) => &msg.session, - DecryptionMessage::ConfirmDecryptionInitialization(ref msg) => &msg.session, + DecryptionMessage::DecryptionConsensusMessage(ref msg) => &msg.session, DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.session, DecryptionMessage::PartialDecryption(ref msg) => &msg.session, DecryptionMessage::DecryptionSessionError(ref msg) => &msg.session, @@ -284,8 +436,7 @@ impl DecryptionMessage { pub fn sub_session_id(&self) -> &Secret { match *self { - DecryptionMessage::InitializeDecryptionSession(ref msg) => &msg.sub_session, - DecryptionMessage::ConfirmDecryptionInitialization(ref msg) => &msg.sub_session, + DecryptionMessage::DecryptionConsensusMessage(ref msg) => &msg.sub_session, DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.sub_session, DecryptionMessage::PartialDecryption(ref msg) => &msg.sub_session, DecryptionMessage::DecryptionSessionError(ref msg) => &msg.sub_session, @@ -294,12 +445,38 @@ impl DecryptionMessage { } } +impl SigningMessage { + pub fn session_id(&self) -> &SessionId { + match *self { + SigningMessage::SigningConsensusMessage(ref msg) => &msg.session, + SigningMessage::SigningGenerationMessage(ref msg) => &msg.session, + SigningMessage::RequestPartialSignature(ref msg) => &msg.session, + SigningMessage::PartialSignature(ref msg) => &msg.session, + SigningMessage::SigningSessionError(ref msg) => &msg.session, + SigningMessage::SigningSessionCompleted(ref msg) => &msg.session, + } + } + + pub fn sub_session_id(&self) -> &Secret { + match *self { + SigningMessage::SigningConsensusMessage(ref msg) => &msg.sub_session, + SigningMessage::SigningGenerationMessage(ref msg) => &msg.sub_session, + SigningMessage::RequestPartialSignature(ref msg) => &msg.sub_session, + SigningMessage::PartialSignature(ref msg) => &msg.sub_session, + SigningMessage::SigningSessionError(ref msg) => &msg.sub_session, + SigningMessage::SigningSessionCompleted(ref msg) => &msg.sub_session, + } + } +} + impl fmt::Display for Message { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Message::Cluster(ref message) => write!(f, "Cluster.{}", message), + Message::Generation(ref message) => write!(f, "Generation.{}", message), Message::Encryption(ref message) => write!(f, "Encryption.{}", message), Message::Decryption(ref message) => write!(f, "Decryption.{}", message), + Message::Signing(ref message) => write!(f, "Signing.{}", message), } } } @@ -315,16 +492,35 @@ impl fmt::Display for ClusterMessage { } } +impl fmt::Display for GenerationMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + GenerationMessage::InitializeSession(_) => write!(f, "InitializeSession"), + GenerationMessage::ConfirmInitialization(_) => write!(f, "ConfirmInitialization"), + GenerationMessage::CompleteInitialization(_) => write!(f, "CompleteInitialization"), + GenerationMessage::KeysDissemination(_) => write!(f, "KeysDissemination"), + GenerationMessage::PublicKeyShare(_) => write!(f, "PublicKeyShare"), + GenerationMessage::SessionError(ref msg) => write!(f, "SessionError({})", msg.error), + GenerationMessage::SessionCompleted(_) => write!(f, "SessionCompleted"), + } + } +} + impl fmt::Display for EncryptionMessage { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - EncryptionMessage::InitializeSession(_) => write!(f, "InitializeSession"), - EncryptionMessage::ConfirmInitialization(_) => write!(f, "ConfirmInitialization"), - EncryptionMessage::CompleteInitialization(_) => write!(f, "CompleteInitialization"), - EncryptionMessage::KeysDissemination(_) => write!(f, "KeysDissemination"), - EncryptionMessage::PublicKeyShare(_) => write!(f, "PublicKeyShare"), - EncryptionMessage::SessionError(ref msg) => write!(f, "SessionError({})", msg.error), - EncryptionMessage::SessionCompleted(_) => write!(f, "SessionCompleted"), + EncryptionMessage::InitializeEncryptionSession(_) => write!(f, "InitializeEncryptionSession"), + EncryptionMessage::ConfirmEncryptionInitialization(_) => write!(f, "ConfirmEncryptionInitialization"), + EncryptionMessage::EncryptionSessionError(ref msg) => write!(f, "EncryptionSessionError({})", msg.error), + } + } +} + +impl fmt::Display for ConsensusMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ConsensusMessage::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"), + ConsensusMessage::ConfirmConsensusInitialization(_) => write!(f, "ConfirmConsensusInitialization"), } } } @@ -332,8 +528,7 @@ impl fmt::Display for EncryptionMessage { impl fmt::Display for DecryptionMessage { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - DecryptionMessage::InitializeDecryptionSession(_) => write!(f, "InitializeDecryptionSession"), - DecryptionMessage::ConfirmDecryptionInitialization(_) => write!(f, "ConfirmDecryptionInitialization"), + DecryptionMessage::DecryptionConsensusMessage(ref m) => write!(f, "DecryptionConsensusMessage.{}", m.message), DecryptionMessage::RequestPartialDecryption(_) => write!(f, "RequestPartialDecryption"), DecryptionMessage::PartialDecryption(_) => write!(f, "PartialDecryption"), DecryptionMessage::DecryptionSessionError(_) => write!(f, "DecryptionSessionError"), @@ -341,3 +536,16 @@ impl fmt::Display for DecryptionMessage { } } } + +impl fmt::Display for SigningMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + SigningMessage::SigningConsensusMessage(ref m) => write!(f, "SigningConsensusMessage.{}", m.message), + SigningMessage::SigningGenerationMessage(ref m) => write!(f, "SigningGenerationMessage.{}", m.message), + SigningMessage::RequestPartialSignature(_) => write!(f, "RequestPartialSignature"), + SigningMessage::PartialSignature(_) => write!(f, "PartialSignature"), + SigningMessage::SigningSessionError(_) => write!(f, "SigningSessionError"), + SigningMessage::SigningSessionCompleted(_) => write!(f, "SigningSessionCompleted"), + } + } +} diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index bdaa868ee..71c505f95 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -18,13 +18,14 @@ use std::fmt; use std::io::Error as IoError; use ethkey; use ethcrypto; -use super::types::all::DocumentAddress; +use super::types::all::ServerKeyId; -pub use super::types::all::{NodeId, DocumentEncryptedKeyShadow}; +pub use super::types::all::{NodeId, EncryptedDocumentKeyShadow}; pub use super::acl_storage::AclStorage; pub use super::key_storage::{KeyStorage, DocumentKeyShare}; -pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic}; +pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic, SerializableMessageHash}; pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient}; +pub use self::generation_session::Session as GenerationSession; pub use self::encryption_session::Session as EncryptionSession; pub use self::decryption_session::Session as DecryptionSession; @@ -33,7 +34,20 @@ pub use super::key_storage::tests::DummyKeyStorage; #[cfg(test)] pub use super::acl_storage::tests::DummyAclStorage; -pub type SessionId = DocumentAddress; +pub type SessionId = ServerKeyId; + +#[derive(Debug, Clone)] +/// Session metadata. +pub struct SessionMeta { + /// Key id. + pub id: SessionId, + /// Id of node, which has started this session. + pub master_node_id: NodeId, + /// Id of node, on which this session is running. + pub self_node_id: NodeId, + /// Session threshold. + pub threshold: usize, +} #[derive(Clone, Debug, PartialEq)] /// Errors which can occur during encryption/decryption session @@ -44,6 +58,10 @@ pub enum Error { InvalidNodeId, /// Session with the given id already exists. DuplicateSessionId, + /// Session with the same id already completed. + CompletedSessionId, + /// Session is not ready to start yet (required data is not ready). + NotStartedSessionId, /// Session with the given id is unknown. InvalidSessionId, /// Invalid number of nodes. @@ -61,6 +79,8 @@ pub enum Error { /// Current state of encryption/decryption session does not allow to proceed request. /// This means that either there is some comm-failure or node is misbehaving/cheating. InvalidStateForRequest, + /// Request cannot be sent/received from this node. + InvalidNodeForRequest, /// Message or some data in the message was recognized as invalid. /// This means that node is misbehaving/cheating. InvalidMessage, @@ -74,6 +94,8 @@ pub enum Error { Serde(String), /// Key storage error. KeyStorage(String), + /// Consensus is unreachable. + ConsensusUnreachable, /// Acl storage error. AccessDenied, } @@ -102,18 +124,22 @@ impl fmt::Display for Error { Error::InvalidNodeAddress => write!(f, "invalid node address has been passed"), Error::InvalidNodeId => write!(f, "invalid node id has been passed"), Error::DuplicateSessionId => write!(f, "session with the same id is already registered"), + Error::CompletedSessionId => write!(f, "session with the same id is already completed"), + Error::NotStartedSessionId => write!(f, "not enough data to start session with the given id"), Error::InvalidSessionId => write!(f, "invalid session id has been passed"), Error::InvalidNodesCount => write!(f, "invalid nodes count"), Error::InvalidNodesConfiguration => write!(f, "invalid nodes configuration"), Error::InvalidThreshold => write!(f, "invalid threshold value has been passed"), Error::TooEarlyForRequest => write!(f, "session is not yet ready to process this request"), Error::InvalidStateForRequest => write!(f, "session is in invalid state for processing this request"), + Error::InvalidNodeForRequest => write!(f, "invalid node for this request"), Error::InvalidMessage => write!(f, "invalid message is received"), Error::NodeDisconnected => write!(f, "node required for this operation is currently disconnected"), Error::EthKey(ref e) => write!(f, "cryptographic error {}", e), Error::Io(ref e) => write!(f, "i/o error {}", e), Error::Serde(ref e) => write!(f, "serde error {}", e), Error::KeyStorage(ref e) => write!(f, "key storage error {}", e), + Error::ConsensusUnreachable => write!(f, "Consensus unreachable"), Error::AccessDenied => write!(f, "Access denied"), } } @@ -126,9 +152,13 @@ impl Into for Error { } mod cluster; +mod cluster_sessions; mod decryption_session; mod encryption_session; +mod generation_session; mod io; -mod math; +mod jobs; +pub mod math; mod message; +mod signing_session; mod net; diff --git a/secret_store/src/key_server_cluster/signing_session.rs b/secret_store/src/key_server_cluster/signing_session.rs new file mode 100644 index 000000000..00246ae64 --- /dev/null +++ b/secret_store/src/key_server_cluster/signing_session.rs @@ -0,0 +1,706 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::BTreeSet; +use std::sync::Arc; +use parking_lot::{Mutex, Condvar}; +use ethkey::{Public, Secret, Signature}; +use util::H256; +use key_server_cluster::{Error, NodeId, SessionId, SessionMeta, AclStorage, DocumentKeyShare}; +use key_server_cluster::cluster::{Cluster}; +use key_server_cluster::cluster_sessions::ClusterSession; +use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionParams as GenerationSessionParams, + Session as GenerationSessionApi, SessionState as GenerationSessionState}; +use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessage, SigningGenerationMessage, + RequestPartialSignature, PartialSignature, SigningSessionCompleted, GenerationMessage, ConsensusMessage, SigningSessionError, + InitializeConsensusSession, ConfirmConsensusInitialization}; +use key_server_cluster::jobs::job_session::JobTransport; +use key_server_cluster::jobs::signing_job::{PartialSigningRequest, PartialSigningResponse, SigningJob}; +use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; + +pub use key_server_cluster::decryption_session::DecryptionSessionId as SigningSessionId; + +/// Signing session API. +pub trait Session: Send + Sync + 'static { + /// Wait until session is completed. Returns signed message. + fn wait(&self) -> Result<(Secret, Secret), Error>; +} + +/// Distributed signing session. +/// Based on "Efficient Multi-Party Digital Signature using Adaptive Secret Sharing for Low-Power Devices in Wireless Network" paper. +/// Brief overview: +/// 1) initialization: master node (which has received request for signing the message) requests all other nodes to sign the message +/// 2) ACL check: all nodes which have received the request are querying ACL-contract to check if requestor has access to the private key +/// 3) partial signing: every node which has succussfully checked access for the requestor do a partial signing +/// 4) signing: master node receives all partial signatures of the secret and computes the signature +pub struct SessionImpl { + /// Session core. + core: SessionCore, + /// Session data. + data: Mutex, +} + +/// Immutable session data. +struct SessionCore { + /// Session metadata. + pub meta: SessionMeta, + /// Signing session access key. + pub access_key: Secret, + /// Key share. + pub key_share: DocumentKeyShare, + /// Cluster which allows this node to send messages to other nodes in the cluster. + pub cluster: Arc, + /// SessionImpl completion condvar. + pub completed: Condvar, +} + +/// Signing consensus session type. +type SigningConsensusSession = ConsensusSession; + +/// Mutable session data. +struct SessionData { + /// Session state. + pub state: SessionState, + /// Message hash. + pub message_hash: Option, + /// Consensus-based signing session. + pub consensus_session: SigningConsensusSession, + /// Session key generation session. + pub generation_session: Option, + /// Decryption result. + pub result: Option>, +} + +/// Signing session state. +#[derive(Debug, PartialEq)] +pub enum SessionState { + /// State when consensus is establishing. + ConsensusEstablishing, + /// State when session key is generating. + SessionKeyGeneration, + /// State when signature is computing. + SignatureComputing, +} + +/// Session creation parameters +pub struct SessionParams { + /// Session metadata. + pub meta: SessionMeta, + /// Session access key. + pub access_key: Secret, + /// Key share. + pub key_share: DocumentKeyShare, + /// ACL storage. + pub acl_storage: Arc, + /// Cluster + pub cluster: Arc, +} + +/// Signing consensus transport. +struct SigningConsensusTransport { + /// Session id. + id: SessionId, + /// Session access key. + access_key: Secret, + /// Cluster. + cluster: Arc, +} + +/// Signing key generation transport. +struct SessionKeyGenerationTransport { + /// Session access key. + access_key: Secret, + /// Cluster. + cluster: Arc, + /// Other nodes ids. + other_nodes_ids: BTreeSet, +} + +/// Signing job transport +struct SigningJobTransport { + /// Session id. + id: SessionId, + //// Session access key. + access_key: Secret, + /// Cluster. + cluster: Arc, +} + +impl SessionImpl { + /// Create new signing session. + pub fn new(params: SessionParams, requester_signature: Option) -> Result { + debug_assert_eq!(params.meta.threshold, params.key_share.threshold); + debug_assert_eq!(params.meta.self_node_id == params.meta.master_node_id, requester_signature.is_some()); + + use key_server_cluster::generation_session::{check_cluster_nodes, check_threshold}; + + // check nodes and threshold + let nodes = params.key_share.id_numbers.keys().cloned().collect(); + check_cluster_nodes(¶ms.meta.self_node_id, &nodes)?; + check_threshold(params.key_share.threshold, &nodes)?; + + let consensus_transport = SigningConsensusTransport { + id: params.meta.id.clone(), + access_key: params.access_key.clone(), + cluster: params.cluster.clone(), + }; + + Ok(SessionImpl { + core: SessionCore { + meta: params.meta.clone(), + access_key: params.access_key, + key_share: params.key_share, + cluster: params.cluster, + completed: Condvar::new(), + }, + data: Mutex::new(SessionData { + state: SessionState::ConsensusEstablishing, + message_hash: None, + consensus_session: match requester_signature { + Some(requester_signature) => ConsensusSession::new_on_master(ConsensusSessionParams { + meta: params.meta, + acl_storage: params.acl_storage.clone(), + consensus_transport: consensus_transport, + }, requester_signature)?, + None => ConsensusSession::new_on_slave(ConsensusSessionParams { + meta: params.meta, + acl_storage: params.acl_storage.clone(), + consensus_transport: consensus_transport, + })?, + }, + generation_session: None, + result: None, + }), + }) + } + + /// Initialize signing session on master node. + pub fn initialize(&self, message_hash: H256) -> Result<(), Error> { + let mut data = self.data.lock(); + data.message_hash = Some(message_hash); + data.consensus_session.initialize(self.core.key_share.id_numbers.keys().cloned().collect())?; + + if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished { + let generation_session = GenerationSession::new(GenerationSessionParams { + id: self.core.meta.id.clone(), + self_node_id: self.core.meta.self_node_id.clone(), + key_storage: None, + cluster: Arc::new(SessionKeyGenerationTransport { + access_key: self.core.access_key.clone(), + cluster: self.core.cluster.clone(), + other_nodes_ids: BTreeSet::new() + }), + }); + generation_session.initialize(Public::default(), 0, vec![self.core.meta.self_node_id.clone()].into_iter().collect())?; + + debug_assert_eq!(generation_session.state(), GenerationSessionState::WaitingForGenerationConfirmation); + let joint_public_and_secret = generation_session + .joint_public_and_secret() + .expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?; + data.generation_session = Some(generation_session); + data.state = SessionState::SignatureComputing; + + self.core.disseminate_jobs(&mut data.consensus_session, joint_public_and_secret.0, joint_public_and_secret.1, message_hash)?; + + debug_assert!(data.consensus_session.state() == ConsensusSessionState::Finished); + data.result = Some(Ok(data.consensus_session.result()?)); + self.core.completed.notify_all(); + } + + Ok(()) + } + + /// Process signing message. + pub fn process_message(&self, sender: &NodeId, message: &SigningMessage) -> Result<(), Error> { + match message { + &SigningMessage::SigningConsensusMessage(ref message) => + self.on_consensus_message(sender, message), + &SigningMessage::SigningGenerationMessage(ref message) => + self.on_generation_message(sender, message), + &SigningMessage::RequestPartialSignature(ref message) => + self.on_partial_signature_requested(sender, message), + &SigningMessage::PartialSignature(ref message) => + self.on_partial_signature(sender, message), + &SigningMessage::SigningSessionError(ref message) => + self.on_session_error(sender, message), + &SigningMessage::SigningSessionCompleted(ref message) => + self.on_session_completed(sender, message), + } + } + + /// When consensus-related message is received. + pub fn on_consensus_message(&self, sender: &NodeId, message: &SigningConsensusMessage) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); + + let mut data = self.data.lock(); + let is_establishing_consensus = data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus; + data.consensus_session.on_consensus_message(&sender, &message.message)?; + + let is_consensus_established = data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished; + if self.core.meta.self_node_id != self.core.meta.master_node_id || !is_establishing_consensus || !is_consensus_established { + return Ok(()); + } + + let consensus_group = data.consensus_session.select_consensus_group()?.clone(); + let mut other_consensus_group_nodes = consensus_group.clone(); + other_consensus_group_nodes.remove(&self.core.meta.self_node_id); + + let generation_session = GenerationSession::new(GenerationSessionParams { + id: self.core.meta.id.clone(), + self_node_id: self.core.meta.self_node_id.clone(), + key_storage: None, + cluster: Arc::new(SessionKeyGenerationTransport { + access_key: self.core.access_key.clone(), + cluster: self.core.cluster.clone(), + other_nodes_ids: other_consensus_group_nodes, + }), + }); + generation_session.initialize(Public::default(), self.core.key_share.threshold, consensus_group)?; + data.generation_session = Some(generation_session); + data.state = SessionState::SessionKeyGeneration; + + Ok(()) + } + + /// When session key related message is received. + pub fn on_generation_message(&self, sender: &NodeId, message: &SigningGenerationMessage) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); + + let mut data = self.data.lock(); + + if let &GenerationMessage::InitializeSession(ref message) = &message.message { + if &self.core.meta.master_node_id != sender { + return Err(Error::InvalidMessage); + } + + let consensus_group: BTreeSet = message.nodes.keys().cloned().map(Into::into).collect(); + let mut other_consensus_group_nodes = consensus_group.clone(); + other_consensus_group_nodes.remove(&self.core.meta.self_node_id); + + let generation_session = GenerationSession::new(GenerationSessionParams { + id: self.core.meta.id.clone(), + self_node_id: self.core.meta.self_node_id.clone(), + key_storage: None, + cluster: Arc::new(SessionKeyGenerationTransport { + access_key: self.core.access_key.clone(), + cluster: self.core.cluster.clone(), + other_nodes_ids: other_consensus_group_nodes + }), + }); + data.generation_session = Some(generation_session); + data.state = SessionState::SessionKeyGeneration; + } + + { + let generation_session = data.generation_session.as_ref().ok_or(Error::InvalidStateForRequest)?; + let is_key_generating = generation_session.state() != GenerationSessionState::Finished; + generation_session.process_message(sender, &message.message)?; + + let is_key_generated = generation_session.state() == GenerationSessionState::Finished; + if !is_key_generating || !is_key_generated { + return Ok(()); + } + } + + data.state = SessionState::SignatureComputing; + if self.core.meta.master_node_id != self.core.meta.self_node_id { + return Ok(()); + } + + let message_hash = data.message_hash + .expect("we are on master node; on master node message_hash is filled in initialize(); on_generation_message follows initialize; qed"); + let joint_public_and_secret = data.generation_session.as_ref() + .expect("session key is generated before signature is computed; we are in SignatureComputing state; qed") + .joint_public_and_secret() + .expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?; + self.core.disseminate_jobs(&mut data.consensus_session, joint_public_and_secret.0, joint_public_and_secret.1, message_hash) + } + + /// When partial signature is requested. + pub fn on_partial_signature_requested(&self, sender: &NodeId, message: &RequestPartialSignature) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); + + let mut data = self.data.lock(); + + if sender != &self.core.meta.master_node_id { + return Err(Error::InvalidMessage); + } + if data.state != SessionState::SignatureComputing { + return Err(Error::InvalidStateForRequest); + } + + let joint_public_and_secret = data.generation_session.as_ref() + .expect("session key is generated before signature is computed; we are in SignatureComputing state; qed") + .joint_public_and_secret() + .expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?; + let signing_job = SigningJob::new_on_slave(self.core.meta.self_node_id.clone(), self.core.key_share.clone(), joint_public_and_secret.0, joint_public_and_secret.1)?; + let signing_transport = self.core.signing_transport(); + + data.consensus_session.on_job_request(sender, PartialSigningRequest { + id: message.request_id.clone().into(), + message_hash: message.message_hash.clone().into(), + other_nodes_ids: message.nodes.iter().cloned().map(Into::into).collect(), + }, signing_job, signing_transport) + } + + /// When partial signature is received. + pub fn on_partial_signature(&self, sender: &NodeId, message: &PartialSignature) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); + + let mut data = self.data.lock(); + data.consensus_session.on_job_response(sender, PartialSigningResponse { + request_id: message.request_id.clone().into(), + partial_signature: message.partial_signature.clone().into(), + })?; + + if data.consensus_session.state() != ConsensusSessionState::Finished { + return Ok(()); + } + + self.core.cluster.broadcast(Message::Signing(SigningMessage::SigningSessionCompleted(SigningSessionCompleted { + session: self.core.meta.id.clone().into(), + sub_session: self.core.access_key.clone().into(), + })))?; + + data.result = Some(Ok(data.consensus_session.result()?)); + self.core.completed.notify_all(); + + Ok(()) + } + + /// When session is completed. + pub fn on_session_completed(&self, sender: &NodeId, message: &SigningSessionCompleted) -> Result<(), Error> { + debug_assert!(self.core.meta.id == *message.session); + debug_assert!(self.core.access_key == *message.sub_session); + debug_assert!(sender != &self.core.meta.self_node_id); + + self.data.lock().consensus_session.on_session_completed(sender) + } + + /// When error has occured on another node. + pub fn on_session_error(&self, sender: &NodeId, message: &SigningSessionError) -> Result<(), Error> { + self.process_node_error(Some(&sender), &message.error) + } + + /// Process error from the other node. + fn process_node_error(&self, node: Option<&NodeId>, error: &String) -> Result<(), Error> { + let mut data = self.data.lock(); + match { + match node { + Some(node) => data.consensus_session.on_node_error(node), + None => data.consensus_session.on_session_timeout(), + } + } { + Ok(false) => Ok(()), + Ok(true) => { + let message_hash = data.message_hash.as_ref().cloned() + .expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed"); + let joint_public_and_secret = data.generation_session.as_ref() + .expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed") + .joint_public_and_secret() + .expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed")?; + let disseminate_result = self.core.disseminate_jobs(&mut data.consensus_session, joint_public_and_secret.0, joint_public_and_secret.1, message_hash); + match disseminate_result { + Ok(()) => Ok(()), + Err(err) => { + warn!("{}: signing session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node); + + data.result = Some(Err(err.clone())); + self.core.completed.notify_all(); + Err(err) + } + } + }, + Err(err) => { + warn!("{}: signing session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node); + + data.result = Some(Err(err.clone())); + self.core.completed.notify_all(); + Err(err) + }, + } + } +} + +impl ClusterSession for SessionImpl { + fn is_finished(&self) -> bool { + let data = self.data.lock(); + data.consensus_session.state() == ConsensusSessionState::Failed + || data.consensus_session.state() == ConsensusSessionState::Finished + } + + fn on_node_timeout(&self, node: &NodeId) { + // ignore error, only state matters + let _ = self.process_node_error(Some(node), &Error::NodeDisconnected.into()); + } + + fn on_session_timeout(&self) { + // ignore error, only state matters + let _ = self.process_node_error(None, &Error::NodeDisconnected.into()); + } +} + +impl Session for SessionImpl { + fn wait(&self) -> Result<(Secret, Secret), Error> { + let mut data = self.data.lock(); + if !data.result.is_some() { + self.core.completed.wait(&mut data); + } + + data.result.as_ref() + .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") + .clone() + } +} + +impl SessionKeyGenerationTransport { + fn map_message(&self, message: Message) -> Result { + match message { + Message::Generation(message) => Ok(Message::Signing(SigningMessage::SigningGenerationMessage(SigningGenerationMessage { + session: message.session_id().clone().into(), + sub_session: self.access_key.clone().into(), + message: message, + }))), + _ => Err(Error::InvalidMessage), + } + } +} + +impl Cluster for SessionKeyGenerationTransport { + fn broadcast(&self, message: Message) -> Result<(), Error> { + let message = self.map_message(message)?; + for to in &self.other_nodes_ids { + self.cluster.send(to, message.clone())?; + } + Ok(()) + } + + fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { + debug_assert!(self.other_nodes_ids.contains(to)); + self.cluster.send(to, self.map_message(message)?) + } +} + +impl SessionCore { + pub fn signing_transport(&self) -> SigningJobTransport { + SigningJobTransport { + id: self.meta.id.clone(), + access_key: self.access_key.clone(), + cluster: self.cluster.clone() + } + } + + pub fn disseminate_jobs(&self, consensus_session: &mut SigningConsensusSession, session_public: Public, session_secret_share: Secret, message_hash: H256) -> Result<(), Error> { + let signing_job = SigningJob::new_on_master(self.meta.self_node_id.clone(), self.key_share.clone(), session_public, session_secret_share, message_hash)?; + consensus_session.disseminate_jobs(signing_job, self.signing_transport()) + } +} + +impl JobTransport for SigningConsensusTransport { + type PartialJobRequest=Signature; + type PartialJobResponse=bool; + + fn send_partial_request(&self, node: &NodeId, request: Signature) -> Result<(), Error> { + self.cluster.send(node, Message::Signing(SigningMessage::SigningConsensusMessage(SigningConsensusMessage { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession { + requestor_signature: request.into(), + }) + }))) + } + + fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { + self.cluster.send(node, Message::Signing(SigningMessage::SigningConsensusMessage(SigningConsensusMessage { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: response, + }) + }))) + } +} + +impl JobTransport for SigningJobTransport { + type PartialJobRequest=PartialSigningRequest; + type PartialJobResponse=PartialSigningResponse; + + fn send_partial_request(&self, node: &NodeId, request: PartialSigningRequest) -> Result<(), Error> { + self.cluster.send(node, Message::Signing(SigningMessage::RequestPartialSignature(RequestPartialSignature { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + request_id: request.id.into(), + message_hash: request.message_hash.into(), + nodes: request.other_nodes_ids.into_iter().map(Into::into).collect(), + }))) + } + + fn send_partial_response(&self, node: &NodeId, response: PartialSigningResponse) -> Result<(), Error> { + self.cluster.send(node, Message::Signing(SigningMessage::PartialSignature(PartialSignature { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + request_id: response.request_id.into(), + partial_signature: response.partial_signature.into(), + }))) + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + use std::collections::{BTreeMap, VecDeque}; + use ethkey::{self, Random, Generator, Public}; + use util::H256; + use super::super::super::acl_storage::tests::DummyAclStorage; + use key_server_cluster::{NodeId, SessionId, SessionMeta, Error, KeyStorage}; + use key_server_cluster::cluster::tests::DummyCluster; + use key_server_cluster::generation_session::{Session as GenerationSession}; + use key_server_cluster::generation_session::tests::MessageLoop as KeyGenerationMessageLoop; + use key_server_cluster::math; + use key_server_cluster::message::{Message, SigningMessage}; + use key_server_cluster::signing_session::{Session, SessionImpl, SessionParams}; + + struct Node { + pub node_id: NodeId, + pub cluster: Arc, + pub session: SessionImpl, + } + + struct MessageLoop { + pub session_id: SessionId, + pub nodes: BTreeMap, + pub queue: VecDeque<(NodeId, NodeId, Message)>, + } + + impl MessageLoop { + pub fn new(gl: &KeyGenerationMessageLoop) -> Self { + let mut nodes = BTreeMap::new(); + let session_id = gl.session_id.clone(); + let requester = Random.generate().unwrap(); + let signature = Some(ethkey::sign(requester.secret(), &SessionId::default()).unwrap()); + let master_node_id = gl.nodes.keys().nth(0).unwrap().clone(); + for (i, (gl_node_id, gl_node)) in gl.nodes.iter().enumerate() { + let acl_storage = Arc::new(DummyAclStorage::default()); + let cluster = Arc::new(DummyCluster::new(gl_node_id.clone())); + let session = SessionImpl::new(SessionParams { + meta: SessionMeta { + id: session_id.clone(), + self_node_id: gl_node_id.clone(), + master_node_id: master_node_id.clone(), + threshold: gl_node.key_storage.get(&session_id).unwrap().threshold, + }, + access_key: "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec".parse().unwrap(), + key_share: gl_node.key_storage.get(&session_id).unwrap(), + acl_storage: acl_storage, + cluster: cluster.clone(), + }, if i == 0 { signature.clone() } else { None }).unwrap(); + nodes.insert(gl_node_id.clone(), Node { node_id: gl_node_id.clone(), cluster: cluster, session: session }); + } + + let nodes_ids: Vec<_> = nodes.keys().cloned().collect(); + for node in nodes.values() { + for node_id in &nodes_ids { + node.cluster.add_node(node_id.clone()); + } + } + + MessageLoop { + session_id: session_id, + nodes: nodes, + queue: VecDeque::new(), + } + } + + pub fn master(&self) -> &SessionImpl { + &self.nodes.values().nth(0).unwrap().session + } + + pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { + self.nodes.values() + .filter_map(|n| n.cluster.take_message().map(|m| (n.node_id.clone(), m.0, m.1))) + .nth(0) + .or_else(|| self.queue.pop_front()) + } + + pub fn process_message(&mut self, mut msg: (NodeId, NodeId, Message)) -> Result<(), Error> { + let mut is_queued_message = false; + loop { + match { + match msg.2 { + Message::Signing(SigningMessage::SigningConsensusMessage(ref message)) => self.nodes[&msg.1].session.on_consensus_message(&msg.0, &message), + Message::Signing(SigningMessage::SigningGenerationMessage(ref message)) => self.nodes[&msg.1].session.on_generation_message(&msg.0, &message), + Message::Signing(SigningMessage::RequestPartialSignature(ref message)) => self.nodes[&msg.1].session.on_partial_signature_requested(&msg.0, &message), + Message::Signing(SigningMessage::PartialSignature(ref message)) => self.nodes[&msg.1].session.on_partial_signature(&msg.0, &message), + Message::Signing(SigningMessage::SigningSessionCompleted(ref message)) => self.nodes[&msg.1].session.on_session_completed(&msg.0, &message), + _ => panic!("unexpected"), + } + } { + Ok(_) => { + if let Some(message) = self.queue.pop_front() { + msg = message; + is_queued_message = true; + continue; + } + return Ok(()); + }, + Err(Error::TooEarlyForRequest) => { + if is_queued_message { + self.queue.push_front(msg); + } else { + self.queue.push_back(msg); + } + return Ok(()); + }, + Err(err) => return Err(err), + } + } + } + } + + #[test] + fn complete_gen_sign_session() { + let test_cases = [(0, 1), (0, 5), (2, 5), (3, 5)]; + for &(threshold, num_nodes) in &test_cases { + // run key generation sessions + let mut gl = KeyGenerationMessageLoop::new(num_nodes); + gl.master().initialize(Public::default(), threshold, gl.nodes.keys().cloned().collect()).unwrap(); + while let Some((from, to, message)) = gl.take_message() { + gl.process_message((from, to, message)).unwrap(); + } + + // run signing session + let message_hash = H256::from(777); + let mut sl = MessageLoop::new(&gl); + sl.master().initialize(message_hash).unwrap(); + while let Some((from, to, message)) = sl.take_message() { + sl.process_message((from, to, message)).unwrap(); + } + + // verify signature + let public = gl.master().joint_public_and_secret().unwrap().unwrap().0; + let signature = sl.master().wait().unwrap(); + assert!(math::verify_signature(&public, &signature, &message_hash).unwrap()); + } + } +} diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index 391725998..d5af7a5fa 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -19,12 +19,17 @@ use std::collections::BTreeMap; use serde_json; use ethkey::{Secret, Public}; use util::Database; -use types::all::{Error, ServiceConfiguration, DocumentAddress, NodeId}; +use types::all::{Error, ServiceConfiguration, ServerKeyId, NodeId}; use serialization::{SerializablePublic, SerializableSecret}; +/// Key of version value. +const DB_META_KEY_VERSION: &'static [u8; 7] = b"version"; + #[derive(Debug, Clone, PartialEq)] /// Encrypted key share, stored by key storage on the single key server. pub struct DocumentKeyShare { + /// Author of the entry. + pub author: Public, /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). pub threshold: usize, /// Nodes ids numbers. @@ -32,19 +37,21 @@ pub struct DocumentKeyShare { /// Node secret share. pub secret_share: Secret, /// Common (shared) encryption point. - pub common_point: Public, + pub common_point: Option, /// Encrypted point. - pub encrypted_point: Public, + pub encrypted_point: Option, } /// Document encryption keys storage pub trait KeyStorage: Send + Sync { /// Insert document encryption key - fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error>; + fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error>; + /// Update document encryption key + fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error>; /// Get document encryption key - fn get(&self, document: &DocumentAddress) -> Result; + fn get(&self, document: &ServerKeyId) -> Result; /// Check if storage contains document encryption key - fn contains(&self, document: &DocumentAddress) -> bool; + fn contains(&self, document: &ServerKeyId) -> bool; } /// Persistent document encryption keys storage @@ -53,8 +60,8 @@ pub struct PersistentKeyStorage { } #[derive(Serialize, Deserialize)] -/// Encrypted key share, as it is stored by key storage on the single key server. -struct SerializableDocumentKeyShare { +/// V0 of encrypted key share, as it is stored by key storage on the single key server. +struct SerializableDocumentKeyShareV0 { /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). pub threshold: usize, /// Nodes ids numbers. @@ -67,6 +74,23 @@ struct SerializableDocumentKeyShare { pub encrypted_point: SerializablePublic, } +#[derive(Serialize, Deserialize)] +/// V1 of encrypted key share, as it is stored by key storage on the single key server. +struct SerializableDocumentKeyShareV1 { + /// Authore of the entry. + pub author: SerializablePublic, + /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). + pub threshold: usize, + /// Nodes ids numbers. + pub id_numbers: BTreeMap, + /// Node secret share. + pub secret_share: SerializableSecret, + /// Common (shared) encryption point. + pub common_point: Option, + /// Encrypted point. + pub encrypted_point: Option, +} + impl PersistentKeyStorage { /// Create new persistent document encryption keys storage pub fn new(config: &ServiceConfiguration) -> Result { @@ -74,57 +98,96 @@ impl PersistentKeyStorage { db_path.push("db"); let db_path = db_path.to_str().ok_or(Error::Database("Invalid secretstore path".to_owned()))?; + let db = Database::open_default(&db_path).map_err(Error::Database)?; + let db = upgrade_db(db)?; + Ok(PersistentKeyStorage { - db: Database::open_default(&db_path).map_err(Error::Database)?, + db: db, }) } } +fn upgrade_db(db: Database) -> Result { + let version = db.get(None, DB_META_KEY_VERSION).map_err(Error::Database)?; + let version = version.and_then(|v| v.get(0).cloned()).unwrap_or(0); + match version { + 0 => { + let mut batch = db.transaction(); + batch.put(None, DB_META_KEY_VERSION, &[1]); + for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner) { + let v0_key = serde_json::from_slice::(&db_value).map_err(|e| Error::Database(e.to_string()))?; + let v1_key = SerializableDocumentKeyShareV1 { + // author is used in separate generation + encrypt sessions. + // in v0 there have been only simultaneous GenEnc sessions. + author: Public::default().into(), + threshold: v0_key.threshold, + id_numbers: v0_key.id_numbers, + secret_share: v0_key.secret_share, + common_point: Some(v0_key.common_point), + encrypted_point: Some(v0_key.encrypted_point), + }; + let db_value = serde_json::to_vec(&v1_key).map_err(|e| Error::Database(e.to_string()))?; + batch.put(None, &*db_key, &*db_value); + } + db.write(batch).map_err(Error::Database)?; + Ok(db) + }, + 1 => Ok(db), + _ => Err(Error::Database(format!("unsupported SecretStore database version:? {}", version))), + } +} + impl KeyStorage for PersistentKeyStorage { - fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error> { - let key: SerializableDocumentKeyShare = key.into(); + fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> { + let key: SerializableDocumentKeyShareV1 = key.into(); let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?; let mut batch = self.db.transaction(); batch.put(None, &document, &key); self.db.write(batch).map_err(Error::Database) } - fn get(&self, document: &DocumentAddress) -> Result { + fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> { + self.insert(document, key) + } + + fn get(&self, document: &ServerKeyId) -> Result { self.db.get(None, document) .map_err(Error::Database)? .ok_or(Error::DocumentNotFound) .map(|key| key.into_vec()) - .and_then(|key| serde_json::from_slice::(&key).map_err(|e| Error::Database(e.to_string()))) + .and_then(|key| serde_json::from_slice::(&key).map_err(|e| Error::Database(e.to_string()))) .map(Into::into) } - fn contains(&self, document: &DocumentAddress) -> bool { + fn contains(&self, document: &ServerKeyId) -> bool { self.db.get(None, document) .map(|k| k.is_some()) .unwrap_or(false) } } -impl From for SerializableDocumentKeyShare { +impl From for SerializableDocumentKeyShareV1 { fn from(key: DocumentKeyShare) -> Self { - SerializableDocumentKeyShare { + SerializableDocumentKeyShareV1 { + author: key.author.into(), threshold: key.threshold, id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), secret_share: key.secret_share.into(), - common_point: key.common_point.into(), - encrypted_point: key.encrypted_point.into(), + common_point: key.common_point.map(Into::into), + encrypted_point: key.encrypted_point.map(Into::into), } } } -impl From for DocumentKeyShare { - fn from(key: SerializableDocumentKeyShare) -> Self { +impl From for DocumentKeyShare { + fn from(key: SerializableDocumentKeyShareV1) -> Self { DocumentKeyShare { + author: key.author.into(), threshold: key.threshold, id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), secret_share: key.secret_share.into(), - common_point: key.common_point.into(), - encrypted_point: key.encrypted_point.into(), + common_point: key.common_point.map(Into::into), + encrypted_point: key.encrypted_point.map(Into::into), } } } @@ -133,28 +196,36 @@ impl From for DocumentKeyShare { pub mod tests { use std::collections::{BTreeMap, HashMap}; use parking_lot::RwLock; + use serde_json; use devtools::RandomTempPath; - use ethkey::{Random, Generator}; - use super::super::types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, DocumentAddress}; - use super::{KeyStorage, PersistentKeyStorage, DocumentKeyShare}; + use ethkey::{Random, Generator, Public, Secret}; + use util::Database; + use super::super::types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId}; + use super::{DB_META_KEY_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare, + SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1, upgrade_db}; #[derive(Default)] /// In-memory document encryption keys storage pub struct DummyKeyStorage { - keys: RwLock>, + keys: RwLock>, } impl KeyStorage for DummyKeyStorage { - fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error> { + fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> { self.keys.write().insert(document, key); Ok(()) } - fn get(&self, document: &DocumentAddress) -> Result { + fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> { + self.keys.write().insert(document, key); + Ok(()) + } + + fn get(&self, document: &ServerKeyId) -> Result { self.keys.read().get(document).cloned().ok_or(Error::DocumentNotFound) } - fn contains(&self, document: &DocumentAddress) -> bool { + fn contains(&self, document: &ServerKeyId) -> bool { self.keys.read().contains_key(document) } } @@ -180,27 +251,29 @@ pub mod tests { }, }; - let key1 = DocumentAddress::from(1); + let key1 = ServerKeyId::from(1); let value1 = DocumentKeyShare { + author: Public::default(), threshold: 100, id_numbers: vec![ (Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()) ].into_iter().collect(), secret_share: Random.generate().unwrap().secret().clone(), - common_point: Random.generate().unwrap().public().clone(), - encrypted_point: Random.generate().unwrap().public().clone(), + common_point: Some(Random.generate().unwrap().public().clone()), + encrypted_point: Some(Random.generate().unwrap().public().clone()), }; - let key2 = DocumentAddress::from(2); + let key2 = ServerKeyId::from(2); let value2 = DocumentKeyShare { + author: Public::default(), threshold: 200, id_numbers: vec![ (Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()) ].into_iter().collect(), secret_share: Random.generate().unwrap().secret().clone(), - common_point: Random.generate().unwrap().public().clone(), - encrypted_point: Random.generate().unwrap().public().clone(), + common_point: Some(Random.generate().unwrap().public().clone()), + encrypted_point: Some(Random.generate().unwrap().public().clone()), }; - let key3 = DocumentAddress::from(3); + let key3 = ServerKeyId::from(3); let key_storage = PersistentKeyStorage::new(&config).unwrap(); key_storage.insert(key1.clone(), value1.clone()).unwrap(); @@ -215,4 +288,43 @@ pub mod tests { assert_eq!(key_storage.get(&key2), Ok(value2)); assert_eq!(key_storage.get(&key3), Err(Error::DocumentNotFound)); } + + #[test] + fn upgrade_db_0_to_1() { + let db_path = RandomTempPath::create_dir(); + let db = Database::open_default(db_path.as_str()).unwrap(); + + // prepare v0 database + { + let key = serde_json::to_vec(&SerializableDocumentKeyShareV0 { + threshold: 777, + id_numbers: vec![( + "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(), + "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse::().unwrap().into(), + )].into_iter().collect(), + secret_share: "00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::().unwrap().into(), + common_point: "99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".into(), + encrypted_point: "7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".into(), + }).unwrap(); + let mut batch = db.transaction(); + batch.put(None, &[7], &key); + db.write(batch).unwrap(); + } + + // upgrade database + let db = upgrade_db(db).unwrap(); + + // check upgrade + assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], 1); + let key = serde_json::from_slice::(&db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap()).unwrap(); + assert_eq!(Public::default(), key.author.clone().into()); + assert_eq!(777, key.threshold); + assert_eq!(vec![( + "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse::().unwrap(), + "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse::().unwrap(), + )], key.id_numbers.clone().into_iter().map(|(k, v)| (k.into(), v.into())).collect::>()); + assert_eq!("00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::().unwrap(), key.secret_share.into()); + assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::().unwrap()), key.common_point.clone().map(Into::into)); + assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::().unwrap()), key.encrypted_point.clone().map(Into::into)); + } } diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 4176bd322..f8a74dd1a 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -60,7 +60,7 @@ mod serialization; use std::sync::Arc; use ethcore::client::Client; -pub use types::all::{DocumentAddress, DocumentKey, DocumentEncryptedKey, RequestSignature, Public, +pub use types::all::{ServerKeyId, EncryptedDocumentKey, RequestSignature, Public, Error, NodeAddress, ServiceConfiguration, ClusterConfiguration}; pub use traits::{KeyServer}; diff --git a/secret_store/src/serialization.rs b/secret_store/src/serialization.rs index 4b42311cf..55d4ac387 100644 --- a/secret_store/src/serialization.rs +++ b/secret_store/src/serialization.rs @@ -23,9 +23,12 @@ use serde::de::{Visitor, Error as SerdeError}; use ethkey::{Public, Secret, Signature}; use util::{H256, Bytes}; +/// Serializable message hash. +pub type SerializableMessageHash = SerializableH256; + #[derive(Clone, Debug, Serialize, Deserialize)] /// Serializable shadow decryption result. -pub struct SerializableDocumentEncryptedKeyShadow { +pub struct SerializableEncryptedDocumentKeyShadow { /// Decrypted secret point. It is partially decrypted if shadow decrpytion was requested. pub decrypted_secret: SerializablePublic, /// Shared common point. diff --git a/secret_store/src/traits.rs b/secret_store/src/traits.rs index 86d41be87..33a4eff3c 100644 --- a/secret_store/src/traits.rs +++ b/secret_store/src/traits.rs @@ -14,21 +14,63 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey, DocumentEncryptedKeyShadow}; +use types::all::{Error, Public, ServerKeyId, MessageHash, EncryptedMessageSignature, RequestSignature, EncryptedDocumentKey, + EncryptedDocumentKeyShadow}; -#[ipc(client_ident="RemoteKeyServer")] -/// Secret store key server -pub trait KeyServer: Send + Sync { - /// Generate encryption key for given document. - fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result; - /// Request encryption key of given document for given requestor - fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result; - /// Request encryption key of given document for given requestor. - /// This method does not reveal document_key to any KeyServer, but it requires additional actions on client. - /// To calculate decrypted key on client: +/// Server key (SK) generator. +pub trait ServerKeyGenerator { + /// Generate new SK. + /// `key_id` is the caller-provided identifier of generated SK. + /// `signature` is `key_id`, signed with caller public key. + /// `threshold + 1` is the minimal number of nodes, required to restore private key. + /// Result is a public portion of SK. + fn generate_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result; +} + +/// Document key (DK) server. +pub trait DocumentKeyServer: ServerKeyGenerator { + /// Store externally generated DK. + /// `key_id` is identifier of previously generated SK. + /// `signature` is key_id, signed with caller public key. Caller must be the same as in the `generate_key` call. + /// `common_point` is a result of `k * T` expression, where `T` is generation point and `k` is random scalar in EC field. + /// `encrypted_document_key` is a result of `M + k * y` expression, where `M` is unencrypted document key (point on EC), + /// `k` is the same scalar used in `common_point` calculation and `y` is previously generated public part of SK. + fn store_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, common_point: Public, encrypted_document_key: Public) -> Result<(), Error>; + /// Generate and store both SK and DK. This is a shortcut for consequent calls of `generate_key` and `store_document_key`. + /// The only difference is that DK is generated by DocumentKeyServer (which might be considered unsafe). + /// `key_id` is the caller-provided identifier of generated SK. + /// `signature` is `key_id`, signed with caller public key. + /// `threshold + 1` is the minimal number of nodes, required to restore private key. + /// Result is a DK, encrypted with caller public key. + fn generate_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result; + /// Restore previously stored DK. + /// DK is decrypted on the key server (which might be considered unsafe), and then encrypted with caller public key. + /// `key_id` is identifier of previously generated SK. + /// `signature` is key_id, signed with caller public key. Caller must be on ACL for this function to succeed. + /// Result is a DK, encrypted with caller public key. + fn restore_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result; + /// Restore previously stored DK. + /// To decrypt DK on client: /// 1) use requestor secret key to decrypt secret coefficients from result.decrypt_shadows /// 2) calculate decrypt_shadows_sum = sum of all secrets from (1) /// 3) calculate decrypt_shadow_point: decrypt_shadows_sum * result.common_point /// 4) calculate decrypted_secret: result.decrypted_secret + decrypt_shadow_point - fn document_key_shadow(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result; + /// Result is a DK shadow. + fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result; +} + +/// Message signer. +pub trait MessageSigner: ServerKeyGenerator { + /// Sign message with previously generated SK. + /// `key_id` is the caller-provided identifier of generated SK. + /// `signature` is `key_id`, signed with caller public key. + /// `message` is the message to be signed. + /// Result is a signed message, encrypted with caller public key. + fn sign_message(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result; +} + + +#[ipc(client_ident="RemoteKeyServer")] +/// Key server. +pub trait KeyServer: DocumentKeyServer + MessageSigner + Send + Sync { } diff --git a/secret_store/src/types/all.rs b/secret_store/src/types/all.rs index 905841ea7..54fc8acae 100644 --- a/secret_store/src/types/all.rs +++ b/secret_store/src/types/all.rs @@ -24,12 +24,14 @@ use key_server_cluster; /// Node id. pub type NodeId = ethkey::Public; -/// Document address type. -pub type DocumentAddress = util::H256; -/// Document key type. -pub type DocumentKey = util::Bytes; -/// Encrypted key type. -pub type DocumentEncryptedKey = util::Bytes; +/// Server key id. When key is used to encrypt document, it could be document contents hash. +pub type ServerKeyId = util::H256; +/// Encrypted document key type. +pub type EncryptedDocumentKey = util::Bytes; +/// Message hash. +pub type MessageHash = util::H256; +/// Message signature. +pub type EncryptedMessageSignature = util::Bytes; /// Request signature type. pub type RequestSignature = ethkey::Signature; /// Public key type. @@ -95,7 +97,7 @@ pub struct ClusterConfiguration { #[derive(Clone, Debug, PartialEq)] #[binary] /// Shadow decryption result. -pub struct DocumentEncryptedKeyShadow { +pub struct EncryptedDocumentKeyShadow { /// Decrypted secret point. It is partially decrypted if shadow decrpytion was requested. pub decrypted_secret: ethkey::Public, /// Shared common point.