SecretStore: generating signatures (#5764)
* refactoring traits * separate generation session * generalized ClusterSessions * signing session prototype * full_signature_math_session * consensus session prototype * continue signing session * continue signing session * continue signing session * continue signing session * isolated consensus logic * started work on signing test * complete_gen_sign_session works * consensus tests * get rid of duplicated data in SigningSession * TODOs in signing session * fixing tests * fixed last test * signing session in http listener * new key server tests * fix after merge * enabled warnings * fixed possible race * ignore previous jobs responses * include sef node in consensus when confirmed * fixed warning * removed extra clones * consensus_restarts_after_node_timeout * encrypt signature before return * return error text along with HTTP status * fix for odd-of-N (share check fails + not equal to local sign) * fixed t-of-N for odd t * fixed test cases in complete_gen_sign_session * fixed mistimed response reaction * jobs draft * DecryptionJob * consensus session tets * fixed decryption tests * signing job implementation * siginng_session using new consensus_session * added license preambles * same_consensus_group_returned_after_second_selection * database upgrade v0 -> v1 * typo * fixed grumbles
This commit is contained in:
parent
46183b1cdd
commit
6334893561
@ -92,6 +92,15 @@ impl Secret {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Inplace negate secret key (-scalar)
|
||||
pub fn neg(&mut self) -> Result<(), Error> {
|
||||
let mut key_secret = self.to_secp256k1_secret()?;
|
||||
key_secret.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?;
|
||||
|
||||
*self = key_secret.into();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Inplace inverse secret key (1 / scalar)
|
||||
pub fn inv(&mut self) -> Result<(), Error> {
|
||||
let mut key_secret = self.to_secp256k1_secret()?;
|
||||
|
@ -20,14 +20,14 @@ use parking_lot::Mutex;
|
||||
use ethkey::public_to_address;
|
||||
use ethcore::client::{Client, BlockChainClient, BlockId};
|
||||
use native_contracts::SecretStoreAclStorage;
|
||||
use types::all::{Error, DocumentAddress, Public};
|
||||
use types::all::{Error, ServerKeyId, Public};
|
||||
|
||||
const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker";
|
||||
|
||||
/// ACL storage of Secret Store
|
||||
pub trait AclStorage: Send + Sync {
|
||||
/// Check if requestor with `public` key can access document with hash `document`
|
||||
fn check(&self, public: &Public, document: &DocumentAddress) -> Result<bool, Error>;
|
||||
fn check(&self, public: &Public, document: &ServerKeyId) -> Result<bool, Error>;
|
||||
}
|
||||
|
||||
/// On-chain ACL storage implementation.
|
||||
@ -48,7 +48,7 @@ impl OnChainAclStorage {
|
||||
}
|
||||
|
||||
impl AclStorage for OnChainAclStorage {
|
||||
fn check(&self, public: &Public, document: &DocumentAddress) -> Result<bool, Error> {
|
||||
fn check(&self, public: &Public, document: &ServerKeyId) -> Result<bool, Error> {
|
||||
let mut contract = self.contract.lock();
|
||||
if !contract.is_some() {
|
||||
*contract = self.client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned())
|
||||
@ -74,19 +74,19 @@ impl AclStorage for OnChainAclStorage {
|
||||
pub mod tests {
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use parking_lot::RwLock;
|
||||
use types::all::{Error, DocumentAddress, Public};
|
||||
use types::all::{Error, ServerKeyId, Public};
|
||||
use super::AclStorage;
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
/// Dummy ACL storage implementation
|
||||
pub struct DummyAclStorage {
|
||||
prohibited: RwLock<HashMap<Public, HashSet<DocumentAddress>>>,
|
||||
prohibited: RwLock<HashMap<Public, HashSet<ServerKeyId>>>,
|
||||
}
|
||||
|
||||
impl DummyAclStorage {
|
||||
#[cfg(test)]
|
||||
/// Prohibit given requestor access to given document
|
||||
pub fn prohibit(&self, public: Public, document: DocumentAddress) {
|
||||
pub fn prohibit(&self, public: Public, document: ServerKeyId) {
|
||||
self.prohibited.write()
|
||||
.entry(public)
|
||||
.or_insert_with(Default::default)
|
||||
@ -95,7 +95,7 @@ pub mod tests {
|
||||
}
|
||||
|
||||
impl AclStorage for DummyAclStorage {
|
||||
fn check(&self, public: &Public, document: &DocumentAddress) -> Result<bool, Error> {
|
||||
fn check(&self, public: &Public, document: &ServerKeyId) -> Result<bool, Error> {
|
||||
Ok(self.prohibited.read()
|
||||
.get(public)
|
||||
.map(|docs| !docs.contains(document))
|
||||
|
@ -21,14 +21,23 @@ use hyper::method::Method as HttpMethod;
|
||||
use hyper::status::StatusCode as HttpStatusCode;
|
||||
use hyper::server::{Server as HttpServer, Request as HttpRequest, Response as HttpResponse, Handler as HttpHandler,
|
||||
Listening as HttpListening};
|
||||
use serde::Serialize;
|
||||
use serde_json;
|
||||
use url::percent_encoding::percent_decode;
|
||||
|
||||
use traits::KeyServer;
|
||||
use serialization::{SerializableDocumentEncryptedKeyShadow, SerializableBytes};
|
||||
use types::all::{Error, NodeAddress, RequestSignature, DocumentAddress, DocumentEncryptedKey, DocumentEncryptedKeyShadow};
|
||||
use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer};
|
||||
use serialization::{SerializableEncryptedDocumentKeyShadow, SerializableBytes, SerializablePublic};
|
||||
use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, NodeAddress, RequestSignature, ServerKeyId,
|
||||
EncryptedDocumentKey, EncryptedDocumentKeyShadow};
|
||||
|
||||
/// Key server http-requests listener. Available requests:
|
||||
/// To generate server key: POST /shadow/{server_key_id}/{signature}/{threshold}
|
||||
/// To store pregenerated encrypted document key: POST /shadow/{server_key_id}/{signature}/{common_point}/{encrypted_key}
|
||||
/// To generate server && document key: POST /{server_key_id}/{signature}/{threshold}
|
||||
/// To get document key: GET /{server_key_id}/{signature}
|
||||
/// To get document key shadow: GET /shadow/{server_key_id}/{signature}
|
||||
/// To sign message with server key: GET /{server_key_id}/{signature}/{message_hash}
|
||||
|
||||
/// Key server http-requests listener
|
||||
pub struct KeyServerHttpListener<T: KeyServer + 'static> {
|
||||
_http_server: HttpListening,
|
||||
handler: Arc<KeyServerSharedHttpHandler<T>>,
|
||||
@ -39,12 +48,18 @@ pub struct KeyServerHttpListener<T: KeyServer + 'static> {
|
||||
enum Request {
|
||||
/// Invalid request
|
||||
Invalid,
|
||||
/// Generate server key.
|
||||
GenerateServerKey(ServerKeyId, RequestSignature, usize),
|
||||
/// Store document key.
|
||||
StoreDocumentKey(ServerKeyId, RequestSignature, Public, Public),
|
||||
/// Generate encryption key.
|
||||
GenerateDocumentKey(DocumentAddress, RequestSignature, usize),
|
||||
GenerateDocumentKey(ServerKeyId, RequestSignature, usize),
|
||||
/// Request encryption key of given document for given requestor.
|
||||
GetDocumentKey(DocumentAddress, RequestSignature),
|
||||
GetDocumentKey(ServerKeyId, RequestSignature),
|
||||
/// Request shadow of encryption key of given document for given requestor.
|
||||
GetDocumentKeyShadow(DocumentAddress, RequestSignature),
|
||||
GetDocumentKeyShadow(ServerKeyId, RequestSignature),
|
||||
/// Sign message.
|
||||
SignMessage(ServerKeyId, RequestSignature, MessageHash),
|
||||
}
|
||||
|
||||
/// Cloneable http handler
|
||||
@ -78,17 +93,35 @@ impl<T> KeyServerHttpListener<T> where T: KeyServer + 'static {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> KeyServer for KeyServerHttpListener<T> where T: KeyServer + 'static {
|
||||
fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result<DocumentEncryptedKey, Error> {
|
||||
self.handler.key_server.generate_document_key(signature, document, threshold)
|
||||
impl<T> KeyServer for KeyServerHttpListener<T> where T: KeyServer + 'static {}
|
||||
|
||||
impl<T> ServerKeyGenerator for KeyServerHttpListener<T> where T: KeyServer + 'static {
|
||||
fn generate_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<Public, Error> {
|
||||
self.handler.key_server.generate_key(key_id, signature, threshold)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DocumentKeyServer for KeyServerHttpListener<T> where T: KeyServer + 'static {
|
||||
fn store_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> {
|
||||
self.handler.key_server.store_document_key(key_id, signature, common_point, encrypted_document_key)
|
||||
}
|
||||
|
||||
fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKey, Error> {
|
||||
self.handler.key_server.document_key(signature, document)
|
||||
fn generate_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<EncryptedDocumentKey, Error> {
|
||||
self.handler.key_server.generate_document_key(key_id, signature, threshold)
|
||||
}
|
||||
|
||||
fn document_key_shadow(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKeyShadow, Error> {
|
||||
self.handler.key_server.document_key_shadow(signature, document)
|
||||
fn restore_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKey, Error> {
|
||||
self.handler.key_server.restore_document_key(key_id, signature)
|
||||
}
|
||||
|
||||
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||
self.handler.key_server.restore_document_key_shadow(key_id, signature)
|
||||
}
|
||||
}
|
||||
|
||||
impl <T> MessageSigner for KeyServerHttpListener<T> where T: KeyServer + 'static {
|
||||
fn sign_message(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
||||
self.handler.key_server.sign_message(key_id, signature, message)
|
||||
}
|
||||
}
|
||||
|
||||
@ -111,47 +144,47 @@ impl<T> HttpHandler for KeyServerHttpHandler<T> where T: KeyServer + 'static {
|
||||
let req_uri = req.uri.clone();
|
||||
match &req_uri {
|
||||
&RequestUri::AbsolutePath(ref path) => match parse_request(&req_method, &path) {
|
||||
Request::GenerateServerKey(document, signature, threshold) => {
|
||||
return_server_public_key(req, res, self.handler.key_server.generate_key(&document, &signature, threshold)
|
||||
.map_err(|err| {
|
||||
warn!(target: "secretstore", "GenerateServerKey request {} has failed with: {}", req_uri, err);
|
||||
err
|
||||
}));
|
||||
},
|
||||
Request::StoreDocumentKey(document, signature, common_point, encrypted_document_key) => {
|
||||
return_empty(req, res, self.handler.key_server.store_document_key(&document, &signature, common_point, encrypted_document_key)
|
||||
.map_err(|err| {
|
||||
warn!(target: "secretstore", "StoreDocumentKey request {} has failed with: {}", req_uri, err);
|
||||
err
|
||||
}));
|
||||
},
|
||||
Request::GenerateDocumentKey(document, signature, threshold) => {
|
||||
return_document_key(req, res, self.handler.key_server.generate_document_key(&signature, &document, threshold)
|
||||
return_document_key(req, res, self.handler.key_server.generate_document_key(&document, &signature, threshold)
|
||||
.map_err(|err| {
|
||||
warn!(target: "secretstore", "GenerateDocumentKey request {} has failed with: {}", req_uri, err);
|
||||
err
|
||||
}));
|
||||
},
|
||||
Request::GetDocumentKey(document, signature) => {
|
||||
return_document_key(req, res, self.handler.key_server.document_key(&signature, &document)
|
||||
return_document_key(req, res, self.handler.key_server.restore_document_key(&document, &signature)
|
||||
.map_err(|err| {
|
||||
warn!(target: "secretstore", "GetDocumentKey request {} has failed with: {}", req_uri, err);
|
||||
err
|
||||
}));
|
||||
},
|
||||
Request::GetDocumentKeyShadow(document, signature) => {
|
||||
match self.handler.key_server.document_key_shadow(&signature, &document)
|
||||
return_document_key_shadow(req, res, self.handler.key_server.restore_document_key_shadow(&document, &signature)
|
||||
.map_err(|err| {
|
||||
warn!(target: "secretstore", "GetDocumentKeyShadow request {} has failed with: {}", req_uri, err);
|
||||
err
|
||||
}) {
|
||||
Ok(document_key_shadow) => {
|
||||
let document_key_shadow = SerializableDocumentEncryptedKeyShadow {
|
||||
decrypted_secret: document_key_shadow.decrypted_secret.into(),
|
||||
common_point: document_key_shadow.common_point.expect("always filled when requesting document_key_shadow; qed").into(),
|
||||
decrypt_shadows: document_key_shadow.decrypt_shadows.expect("always filled when requesting document_key_shadow; qed").into_iter().map(Into::into).collect(),
|
||||
};
|
||||
match serde_json::to_vec(&document_key_shadow) {
|
||||
Ok(document_key) => {
|
||||
res.headers_mut().set(header::ContentType::json());
|
||||
if let Err(err) = res.send(&document_key) {
|
||||
// nothing to do, but to log an error
|
||||
warn!(target: "secretstore", "response to request {} has failed with: {}", req.uri, err);
|
||||
}
|
||||
}));
|
||||
},
|
||||
Err(err) => {
|
||||
warn!(target: "secretstore", "response to request {} has failed with: {}", req.uri, err);
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(err) => return_error(res, err),
|
||||
}
|
||||
Request::SignMessage(document, signature, message_hash) => {
|
||||
return_message_signature(req, res, self.handler.key_server.sign_message(&document, &signature, message_hash)
|
||||
.map_err(|err| {
|
||||
warn!(target: "secretstore", "SignMessage request {} has failed with: {}", req_uri, err);
|
||||
err
|
||||
}));
|
||||
},
|
||||
Request::Invalid => {
|
||||
warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri);
|
||||
@ -166,17 +199,45 @@ impl<T> HttpHandler for KeyServerHttpHandler<T> where T: KeyServer + 'static {
|
||||
}
|
||||
}
|
||||
|
||||
fn return_document_key(req: HttpRequest, mut res: HttpResponse, document_key: Result<DocumentEncryptedKey, Error>) {
|
||||
let document_key = document_key.
|
||||
and_then(|k| serde_json::to_vec(&SerializableBytes(k)).map_err(|e| Error::Serde(e.to_string())));
|
||||
match document_key {
|
||||
Ok(document_key) => {
|
||||
res.headers_mut().set(header::ContentType::plaintext());
|
||||
if let Err(err) = res.send(&document_key) {
|
||||
fn return_empty(req: HttpRequest, res: HttpResponse, empty: Result<(), Error>) {
|
||||
return_bytes::<i32>(req, res, empty.map(|_| None))
|
||||
}
|
||||
|
||||
fn return_server_public_key(req: HttpRequest, res: HttpResponse, server_public: Result<Public, Error>) {
|
||||
return_bytes(req, res, server_public.map(|k| Some(SerializablePublic(k))))
|
||||
}
|
||||
|
||||
fn return_message_signature(req: HttpRequest, res: HttpResponse, signature: Result<EncryptedDocumentKey, Error>) {
|
||||
return_bytes(req, res, signature.map(|s| Some(SerializableBytes(s))))
|
||||
}
|
||||
|
||||
fn return_document_key(req: HttpRequest, res: HttpResponse, document_key: Result<EncryptedDocumentKey, Error>) {
|
||||
return_bytes(req, res, document_key.map(|k| Some(SerializableBytes(k))))
|
||||
}
|
||||
|
||||
fn return_document_key_shadow(req: HttpRequest, res: HttpResponse, document_key_shadow: Result<EncryptedDocumentKeyShadow, Error>) {
|
||||
return_bytes(req, res, document_key_shadow.map(|k| Some(SerializableEncryptedDocumentKeyShadow {
|
||||
decrypted_secret: k.decrypted_secret.into(),
|
||||
common_point: k.common_point.expect("always filled when requesting document_key_shadow; qed").into(),
|
||||
decrypt_shadows: k.decrypt_shadows.expect("always filled when requesting document_key_shadow; qed").into_iter().map(Into::into).collect(),
|
||||
})))
|
||||
}
|
||||
|
||||
fn return_bytes<T: Serialize>(req: HttpRequest, mut res: HttpResponse, result: Result<Option<T>, Error>) {
|
||||
match result {
|
||||
Ok(Some(result)) => match serde_json::to_vec(&result) {
|
||||
Ok(result) => {
|
||||
res.headers_mut().set(header::ContentType::json());
|
||||
if let Err(err) = res.send(&result) {
|
||||
// nothing to do, but to log an error
|
||||
warn!(target: "secretstore", "response to request {} has failed with: {}", req.uri, err);
|
||||
}
|
||||
},
|
||||
Err(err) => {
|
||||
warn!(target: "secretstore", "response to request {} has failed with: {}", req.uri, err);
|
||||
}
|
||||
},
|
||||
Ok(None) => *res.status_mut() = HttpStatusCode::Ok,
|
||||
Err(err) => return_error(res, err),
|
||||
}
|
||||
}
|
||||
@ -190,6 +251,13 @@ fn return_error(mut res: HttpResponse, err: Error) {
|
||||
Error::Database(_) => *res.status_mut() = HttpStatusCode::InternalServerError,
|
||||
Error::Internal(_) => *res.status_mut() = HttpStatusCode::InternalServerError,
|
||||
}
|
||||
|
||||
// return error text. ignore errors when returning error
|
||||
let error_text = format!("\"{}\"", err);
|
||||
if let Ok(error_text) = serde_json::to_vec(&error_text) {
|
||||
res.headers_mut().set(header::ContentType::json());
|
||||
let _ = res.send(&error_text);
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_request(method: &HttpMethod, uri_path: &str) -> Request {
|
||||
@ -202,24 +270,39 @@ fn parse_request(method: &HttpMethod, uri_path: &str) -> Request {
|
||||
if path.len() == 0 {
|
||||
return Request::Invalid;
|
||||
}
|
||||
let (args_prefix, args_offset) = if &path[0] == "shadow" {
|
||||
("shadow", 1)
|
||||
} else {
|
||||
("", 0)
|
||||
};
|
||||
|
||||
if path.len() < 2 + args_offset || path[args_offset].is_empty() || path[args_offset + 1].is_empty() {
|
||||
let (is_shadow_request, args_offset) = if &path[0] == "shadow" { (true, 1) } else { (false, 0) };
|
||||
let args_count = path.len() - args_offset;
|
||||
if args_count < 2 || path[args_offset].is_empty() || path[args_offset + 1].is_empty() {
|
||||
return Request::Invalid;
|
||||
}
|
||||
|
||||
let args_len = path.len();
|
||||
let document = path[args_offset].parse();
|
||||
let signature = path[args_offset + 1].parse();
|
||||
let threshold = (if args_len > args_offset + 2 { &path[args_offset + 2] } else { "" }).parse();
|
||||
match (args_prefix, args_len, method, document, signature, threshold) {
|
||||
("", 3, &HttpMethod::Post, Ok(document), Ok(signature), Ok(threshold)) => Request::GenerateDocumentKey(document, signature, threshold),
|
||||
("", 2, &HttpMethod::Get, Ok(document), Ok(signature), _) => Request::GetDocumentKey(document, signature),
|
||||
("shadow", 3, &HttpMethod::Get, Ok(document), Ok(signature), _) => Request::GetDocumentKeyShadow(document, signature),
|
||||
let document = match path[args_offset].parse() {
|
||||
Ok(document) => document,
|
||||
_ => return Request::Invalid,
|
||||
};
|
||||
let signature = match path[args_offset + 1].parse() {
|
||||
Ok(signature) => signature,
|
||||
_ => return Request::Invalid,
|
||||
};
|
||||
|
||||
let threshold = path.get(args_offset + 2).map(|v| v.parse());
|
||||
let message_hash = path.get(args_offset + 2).map(|v| v.parse());
|
||||
let common_point = path.get(args_offset + 2).map(|v| v.parse());
|
||||
let encrypted_key = path.get(args_offset + 3).map(|v| v.parse());
|
||||
match (is_shadow_request, args_count, method, threshold, message_hash, common_point, encrypted_key) {
|
||||
(true, 3, &HttpMethod::Post, Some(Ok(threshold)), _, _, _) =>
|
||||
Request::GenerateServerKey(document, signature, threshold),
|
||||
(true, 4, &HttpMethod::Post, _, _, Some(Ok(common_point)), Some(Ok(encrypted_key))) =>
|
||||
Request::StoreDocumentKey(document, signature, common_point, encrypted_key),
|
||||
(false, 3, &HttpMethod::Post, Some(Ok(threshold)), _, _, _) =>
|
||||
Request::GenerateDocumentKey(document, signature, threshold),
|
||||
(false, 2, &HttpMethod::Get, _, _, _, _) =>
|
||||
Request::GetDocumentKey(document, signature),
|
||||
(true, 2, &HttpMethod::Get, _, _, _, _) =>
|
||||
Request::GetDocumentKeyShadow(document, signature),
|
||||
(false, 3, &HttpMethod::Get, _, Some(Ok(message_hash)), _, _) =>
|
||||
Request::SignMessage(document, signature, message_hash),
|
||||
_ => Request::Invalid,
|
||||
}
|
||||
}
|
||||
@ -241,19 +324,49 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn parse_request_successful() {
|
||||
// POST /shadow/{server_key_id}/{signature}/{threshold} => generate server key
|
||||
assert_eq!(parse_request(&HttpMethod::Post, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2"),
|
||||
Request::GenerateServerKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
|
||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
|
||||
2));
|
||||
// POST /shadow/{server_key_id}/{signature}/{common_point}/{encrypted_key} => store encrypted document key
|
||||
assert_eq!(parse_request(&HttpMethod::Post, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8/1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb"),
|
||||
Request::StoreDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
|
||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
|
||||
"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse().unwrap(),
|
||||
"1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb".parse().unwrap()));
|
||||
// POST /{server_key_id}/{signature}/{threshold} => generate server && document key
|
||||
assert_eq!(parse_request(&HttpMethod::Post, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2"),
|
||||
Request::GenerateDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
|
||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
|
||||
2));
|
||||
// GET /{server_key_id}/{signature} => get document key
|
||||
assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"),
|
||||
Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
|
||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap()));
|
||||
assert_eq!(parse_request(&HttpMethod::Get, "/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"),
|
||||
Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
|
||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap()));
|
||||
// GET /shadow/{server_key_id}/{signature} => get document key shadow
|
||||
assert_eq!(parse_request(&HttpMethod::Get, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"),
|
||||
Request::GetDocumentKeyShadow("0000000000000000000000000000000000000000000000000000000000000001".into(),
|
||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap()));
|
||||
// GET /{server_key_id}/{signature}/{message_hash} => sign message with server key
|
||||
assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c"),
|
||||
Request::SignMessage("0000000000000000000000000000000000000000000000000000000000000001".into(),
|
||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
|
||||
"281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_request_failed() {
|
||||
assert_eq!(parse_request(&HttpMethod::Get, ""), Request::Invalid);
|
||||
assert_eq!(parse_request(&HttpMethod::Get, "/shadow"), Request::Invalid);
|
||||
assert_eq!(parse_request(&HttpMethod::Get, "///2"), Request::Invalid);
|
||||
assert_eq!(parse_request(&HttpMethod::Get, "/shadow///2"), Request::Invalid);
|
||||
assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001"), Request::Invalid);
|
||||
assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/"), Request::Invalid);
|
||||
assert_eq!(parse_request(&HttpMethod::Get, "/a/b"), Request::Invalid);
|
||||
assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002"), Request::Invalid);
|
||||
assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002"), Request::Invalid);
|
||||
}
|
||||
}
|
||||
|
@ -24,9 +24,10 @@ use ethcrypto;
|
||||
use ethkey;
|
||||
use super::acl_storage::AclStorage;
|
||||
use super::key_storage::KeyStorage;
|
||||
use key_server_cluster::ClusterCore;
|
||||
use traits::KeyServer;
|
||||
use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey, DocumentEncryptedKeyShadow, ClusterConfiguration};
|
||||
use key_server_cluster::{math, ClusterCore};
|
||||
use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer};
|
||||
use types::all::{Error, Public, RequestSignature, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow,
|
||||
ClusterConfiguration, MessageHash, EncryptedMessageSignature};
|
||||
use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration};
|
||||
|
||||
/// Secret store key server implementation
|
||||
@ -56,15 +57,41 @@ impl KeyServerImpl {
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyServer for KeyServerImpl {
|
||||
fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result<DocumentEncryptedKey, Error> {
|
||||
impl KeyServer for KeyServerImpl {}
|
||||
|
||||
impl ServerKeyGenerator for KeyServerImpl {
|
||||
fn generate_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<Public, Error> {
|
||||
// recover requestor' public key from signature
|
||||
let public = ethkey::recover(signature, document)
|
||||
let public = ethkey::recover(signature, key_id)
|
||||
.map_err(|_| Error::BadSignature)?;
|
||||
|
||||
// generate document key
|
||||
let encryption_session = self.data.lock().cluster.new_encryption_session(document.clone(), threshold)?;
|
||||
let document_key = encryption_session.wait(None)?;
|
||||
// generate server key
|
||||
let generation_session = self.data.lock().cluster.new_generation_session(key_id.clone(), public, threshold)?;
|
||||
generation_session.wait(None).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl DocumentKeyServer for KeyServerImpl {
|
||||
fn store_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> {
|
||||
// store encrypted key
|
||||
let encryption_session = self.data.lock().cluster.new_encryption_session(key_id.clone(), signature.clone(), common_point, encrypted_document_key)?;
|
||||
encryption_session.wait(None).map_err(Into::into)
|
||||
}
|
||||
|
||||
fn generate_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<EncryptedDocumentKey, Error> {
|
||||
// recover requestor' public key from signature
|
||||
let public = ethkey::recover(signature, key_id)
|
||||
.map_err(|_| Error::BadSignature)?;
|
||||
|
||||
// generate server key
|
||||
let server_key = self.generate_key(key_id, signature, threshold)?;
|
||||
|
||||
// generate random document key
|
||||
let document_key = math::generate_random_point()?;
|
||||
let encrypted_document_key = math::encrypt_secret(&document_key, &server_key)?;
|
||||
|
||||
// store document key in the storage
|
||||
self.store_document_key(key_id, signature, encrypted_document_key.common_point, encrypted_document_key.encrypted_point)?;
|
||||
|
||||
// encrypt document key with requestor public key
|
||||
let document_key = ethcrypto::ecies::encrypt(&public, ðcrypto::DEFAULT_MAC, &document_key)
|
||||
@ -72,14 +99,13 @@ impl KeyServer for KeyServerImpl {
|
||||
Ok(document_key)
|
||||
}
|
||||
|
||||
fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKey, Error> {
|
||||
fn restore_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKey, Error> {
|
||||
// recover requestor' public key from signature
|
||||
let public = ethkey::recover(signature, document)
|
||||
let public = ethkey::recover(signature, key_id)
|
||||
.map_err(|_| Error::BadSignature)?;
|
||||
|
||||
|
||||
// decrypt document key
|
||||
let decryption_session = self.data.lock().cluster.new_decryption_session(document.clone(), signature.clone(), false)?;
|
||||
let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), signature.clone(), false)?;
|
||||
let document_key = decryption_session.wait()?.decrypted_secret;
|
||||
|
||||
// encrypt document key with requestor public key
|
||||
@ -88,12 +114,34 @@ impl KeyServer for KeyServerImpl {
|
||||
Ok(document_key)
|
||||
}
|
||||
|
||||
fn document_key_shadow(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKeyShadow, Error> {
|
||||
let decryption_session = self.data.lock().cluster.new_decryption_session(document.clone(), signature.clone(), true)?;
|
||||
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||
let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), signature.clone(), true)?;
|
||||
decryption_session.wait().map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageSigner for KeyServerImpl {
|
||||
fn sign_message(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
||||
// recover requestor' public key from signature
|
||||
let public = ethkey::recover(signature, key_id)
|
||||
.map_err(|_| Error::BadSignature)?;
|
||||
|
||||
// sign message
|
||||
let signing_session = self.data.lock().cluster.new_signing_session(key_id.clone(), signature.clone(), message)?;
|
||||
let message_signature = signing_session.wait()?;
|
||||
|
||||
// compose two message signature components into single one
|
||||
let mut combined_signature = [0; 64];
|
||||
combined_signature[..32].clone_from_slice(&**message_signature.0);
|
||||
combined_signature[32..].clone_from_slice(&**message_signature.1);
|
||||
|
||||
// encrypt combined signature with requestor public key
|
||||
let message_signature = ethcrypto::ecies::encrypt(&public, ðcrypto::DEFAULT_MAC, &combined_signature)
|
||||
.map_err(|err| Error::Internal(format!("Error encrypting message signature: {}", err)))?;
|
||||
Ok(message_signature)
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyServerCore {
|
||||
pub fn new(config: &ClusterConfiguration, acl_storage: Arc<AclStorage>, key_storage: Arc<KeyStorage>) -> Result<Self, Error> {
|
||||
let config = NetClusterConfiguration {
|
||||
@ -146,24 +194,46 @@ pub mod tests {
|
||||
use std::time;
|
||||
use std::sync::Arc;
|
||||
use ethcrypto;
|
||||
use ethkey::{self, Random, Generator};
|
||||
use ethkey::{self, Secret, Random, Generator};
|
||||
use acl_storage::tests::DummyAclStorage;
|
||||
use key_storage::tests::DummyKeyStorage;
|
||||
use types::all::{Error, ClusterConfiguration, NodeAddress, RequestSignature, DocumentAddress, DocumentEncryptedKey, DocumentEncryptedKeyShadow};
|
||||
use super::{KeyServer, KeyServerImpl};
|
||||
use key_server_cluster::math;
|
||||
use util::H256;
|
||||
use types::all::{Error, Public, ClusterConfiguration, NodeAddress, RequestSignature, ServerKeyId,
|
||||
EncryptedDocumentKey, EncryptedDocumentKeyShadow, MessageHash, EncryptedMessageSignature};
|
||||
use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer};
|
||||
use super::KeyServerImpl;
|
||||
|
||||
pub struct DummyKeyServer;
|
||||
|
||||
impl KeyServer for DummyKeyServer {
|
||||
fn generate_document_key(&self, _signature: &RequestSignature, _document: &DocumentAddress, _threshold: usize) -> Result<DocumentEncryptedKey, Error> {
|
||||
impl KeyServer for DummyKeyServer {}
|
||||
|
||||
impl ServerKeyGenerator for DummyKeyServer {
|
||||
fn generate_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _threshold: usize) -> Result<Public, Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl DocumentKeyServer for DummyKeyServer {
|
||||
fn store_document_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _common_point: Public, _encrypted_document_key: Public) -> Result<(), Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn document_key(&self, _signature: &RequestSignature, _document: &DocumentAddress) -> Result<DocumentEncryptedKey, Error> {
|
||||
fn generate_document_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _threshold: usize) -> Result<EncryptedDocumentKey, Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn document_key_shadow(&self, _signature: &RequestSignature, _document: &DocumentAddress) -> Result<DocumentEncryptedKeyShadow, Error> {
|
||||
fn restore_document_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature) -> Result<EncryptedDocumentKey, Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn restore_document_key_shadow(&self, _key_id: &ServerKeyId, _signature: &RequestSignature) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageSigner for DummyKeyServer {
|
||||
fn sign_message(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
@ -228,12 +298,12 @@ pub mod tests {
|
||||
let document = Random.generate().unwrap().secret().clone();
|
||||
let secret = Random.generate().unwrap().secret().clone();
|
||||
let signature = ethkey::sign(&secret, &document).unwrap();
|
||||
let generated_key = key_servers[0].generate_document_key(&signature, &document, threshold).unwrap();
|
||||
let generated_key = key_servers[0].generate_document_key(&document, &signature, threshold).unwrap();
|
||||
let generated_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &generated_key).unwrap();
|
||||
|
||||
// now let's try to retrieve key back
|
||||
for key_server in key_servers.iter() {
|
||||
let retrieved_key = key_server.document_key(&signature, &document).unwrap();
|
||||
let retrieved_key = key_server.restore_document_key(&document, &signature).unwrap();
|
||||
let retrieved_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap();
|
||||
assert_eq!(retrieved_key, generated_key);
|
||||
}
|
||||
@ -250,15 +320,70 @@ pub mod tests {
|
||||
let document = Random.generate().unwrap().secret().clone();
|
||||
let secret = Random.generate().unwrap().secret().clone();
|
||||
let signature = ethkey::sign(&secret, &document).unwrap();
|
||||
let generated_key = key_servers[0].generate_document_key(&signature, &document, *threshold).unwrap();
|
||||
let generated_key = key_servers[0].generate_document_key(&document, &signature, *threshold).unwrap();
|
||||
let generated_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &generated_key).unwrap();
|
||||
|
||||
// now let's try to retrieve key back
|
||||
for key_server in key_servers.iter() {
|
||||
let retrieved_key = key_server.document_key(&signature, &document).unwrap();
|
||||
let retrieved_key = key_server.restore_document_key(&document, &signature).unwrap();
|
||||
let retrieved_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap();
|
||||
assert_eq!(retrieved_key, generated_key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn server_key_generation_and_storing_document_key_works_over_network_with_3_nodes() {
|
||||
//::logger::init_log();
|
||||
let key_servers = make_key_servers(6090, 3);
|
||||
|
||||
let test_cases = [0, 1, 2];
|
||||
for threshold in &test_cases {
|
||||
// generate server key
|
||||
let server_key_id = Random.generate().unwrap().secret().clone();
|
||||
let requestor_secret = Random.generate().unwrap().secret().clone();
|
||||
let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap();
|
||||
let server_public = key_servers[0].generate_key(&server_key_id, &signature, *threshold).unwrap();
|
||||
|
||||
// generate document key (this is done by KS client so that document key is unknown to any KS)
|
||||
let generated_key = Random.generate().unwrap().public().clone();
|
||||
let encrypted_document_key = math::encrypt_secret(&generated_key, &server_public).unwrap();
|
||||
|
||||
// store document key
|
||||
key_servers[0].store_document_key(&server_key_id, &signature, encrypted_document_key.common_point, encrypted_document_key.encrypted_point).unwrap();
|
||||
|
||||
// now let's try to retrieve key back
|
||||
for key_server in key_servers.iter() {
|
||||
let retrieved_key = key_server.restore_document_key(&server_key_id, &signature).unwrap();
|
||||
let retrieved_key = ethcrypto::ecies::decrypt(&requestor_secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap();
|
||||
let retrieved_key = Public::from_slice(&retrieved_key);
|
||||
assert_eq!(retrieved_key, generated_key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn server_key_generation_and_message_signing_works_over_network_with_3_nodes() {
|
||||
//::logger::init_log();
|
||||
let key_servers = make_key_servers(6100, 3);
|
||||
|
||||
let test_cases = [0, 1, 2];
|
||||
for threshold in &test_cases {
|
||||
// generate server key
|
||||
let server_key_id = Random.generate().unwrap().secret().clone();
|
||||
let requestor_secret = Random.generate().unwrap().secret().clone();
|
||||
let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap();
|
||||
let server_public = key_servers[0].generate_key(&server_key_id, &signature, *threshold).unwrap();
|
||||
|
||||
// sign message
|
||||
let message_hash = H256::from(42);
|
||||
let combined_signature = key_servers[0].sign_message(&server_key_id, &signature, message_hash.clone()).unwrap();
|
||||
let combined_signature = ethcrypto::ecies::decrypt(&requestor_secret, ðcrypto::DEFAULT_MAC, &combined_signature).unwrap();
|
||||
let signature_c = Secret::from_slice(&combined_signature[..32]);
|
||||
let signature_s = Secret::from_slice(&combined_signature[32..]);
|
||||
|
||||
// check signature
|
||||
assert_eq!(math::verify_signature(&server_public, &(signature_c, signature_s), &message_hash), Ok(true));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -16,9 +16,8 @@
|
||||
|
||||
use std::io;
|
||||
use std::time;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::collections::{BTreeMap, BTreeSet, VecDeque};
|
||||
use std::sync::Arc;
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::collections::btree_map::Entry;
|
||||
use std::net::{SocketAddr, IpAddr};
|
||||
use futures::{finished, failed, Future, Stream, BoxFuture};
|
||||
@ -27,13 +26,19 @@ use parking_lot::{RwLock, Mutex};
|
||||
use tokio_io::IoFuture;
|
||||
use tokio_core::reactor::{Handle, Remote, Interval};
|
||||
use tokio_core::net::{TcpListener, TcpStream};
|
||||
use ethkey::{Public, Secret, KeyPair, Signature, Random, Generator};
|
||||
use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, DocumentEncryptedKeyShadow};
|
||||
use key_server_cluster::message::{self, Message, ClusterMessage, EncryptionMessage, DecryptionMessage};
|
||||
use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl, SessionState as DecryptionSessionState,
|
||||
SessionParams as DecryptionSessionParams, Session as DecryptionSession, DecryptionSessionId};
|
||||
use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl, SessionState as EncryptionSessionState,
|
||||
SessionParams as EncryptionSessionParams, Session as EncryptionSession};
|
||||
use ethkey::{Public, KeyPair, Signature, Random, Generator};
|
||||
use util::H256;
|
||||
use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage};
|
||||
use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, GenerationSessionWrapper, EncryptionSessionWrapper,
|
||||
DecryptionSessionWrapper, SigningSessionWrapper};
|
||||
use key_server_cluster::message::{self, Message, ClusterMessage, GenerationMessage, EncryptionMessage, DecryptionMessage,
|
||||
SigningMessage, ConsensusMessage};
|
||||
use key_server_cluster::generation_session::{Session as GenerationSession, SessionState as GenerationSessionState};
|
||||
#[cfg(test)]
|
||||
use key_server_cluster::generation_session::SessionImpl as GenerationSessionImpl;
|
||||
use key_server_cluster::decryption_session::{Session as DecryptionSession, DecryptionSessionId};
|
||||
use key_server_cluster::encryption_session::{Session as EncryptionSession, SessionState as EncryptionSessionState};
|
||||
use key_server_cluster::signing_session::{Session as SigningSession, SigningSessionId};
|
||||
use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, read_encrypted_message, WriteMessage, write_encrypted_message};
|
||||
use key_server_cluster::net::{accept_connection as net_accept_connection, connect as net_connect, Connection as NetConnection};
|
||||
|
||||
@ -50,18 +55,6 @@ const KEEP_ALIVE_SEND_INTERVAL: u64 = 30;
|
||||
/// we must treat this node as non-responding && disconnect from it.
|
||||
const KEEP_ALIVE_DISCONNECT_INTERVAL: u64 = 60;
|
||||
|
||||
/// When there are no encryption session-related messages for ENCRYPTION_SESSION_TIMEOUT_INTERVAL seconds,
|
||||
/// we must treat this session as stalled && finish it with an error.
|
||||
/// This timeout is for cases when node is responding to KeepAlive messages, but intentionally ignores
|
||||
/// session messages.
|
||||
const ENCRYPTION_SESSION_TIMEOUT_INTERVAL: u64 = 60;
|
||||
|
||||
/// When there are no decryption session-related messages for DECRYPTION_SESSION_TIMEOUT_INTERVAL seconds,
|
||||
/// we must treat this session as stalled && finish it with an error.
|
||||
/// This timeout is for cases when node is responding to KeepAlive messages, but intentionally ignores
|
||||
/// session messages.
|
||||
const DECRYPTION_SESSION_TIMEOUT_INTERVAL: u64 = 60;
|
||||
|
||||
/// Encryption sesion timeout interval. It works
|
||||
/// Empty future.
|
||||
type BoxedEmptyFuture = BoxFuture<(), ()>;
|
||||
@ -70,23 +63,27 @@ type BoxedEmptyFuture = BoxFuture<(), ()>;
|
||||
pub trait ClusterClient: Send + Sync {
|
||||
/// Get cluster state.
|
||||
fn cluster_state(&self) -> ClusterState;
|
||||
/// Start new generation session.
|
||||
fn new_generation_session(&self, session_id: SessionId, author: Public, threshold: usize) -> Result<Arc<GenerationSession>, Error>;
|
||||
/// Start new encryption session.
|
||||
fn new_encryption_session(&self, session_id: SessionId, threshold: usize) -> Result<Arc<EncryptionSession>, Error>;
|
||||
fn new_encryption_session(&self, session_id: SessionId, requestor_signature: Signature, common_point: Public, encrypted_point: Public) -> Result<Arc<EncryptionSession>, Error>;
|
||||
/// Start new decryption session.
|
||||
fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature, is_shadow_decryption: bool) -> Result<Arc<DecryptionSession>, Error>;
|
||||
/// Start new signing session.
|
||||
fn new_signing_session(&self, session_id: SessionId, requestor_signature: Signature, message_hash: H256) -> Result<Arc<SigningSession>, Error>;
|
||||
|
||||
#[cfg(test)]
|
||||
/// Ask node to make 'faulty' encryption sessions.
|
||||
fn make_faulty_encryption_sessions(&self);
|
||||
/// Ask node to make 'faulty' generation sessions.
|
||||
fn make_faulty_generation_sessions(&self);
|
||||
#[cfg(test)]
|
||||
/// Get active encryption session with given id.
|
||||
fn encryption_session(&self, session_id: &SessionId) -> Option<Arc<EncryptionSessionImpl>>;
|
||||
/// Get active generation session with given id.
|
||||
fn generation_session(&self, session_id: &SessionId) -> Option<Arc<GenerationSessionImpl>>;
|
||||
#[cfg(test)]
|
||||
/// Try connect to disconnected nodes.
|
||||
fn connect(&self);
|
||||
}
|
||||
|
||||
/// Cluster access for single encryption/decryption participant.
|
||||
/// Cluster access for single encryption/decryption/signing participant.
|
||||
pub trait Cluster: Send + Sync {
|
||||
/// Broadcast message to all other nodes.
|
||||
fn broadcast(&self, message: Message) -> Result<(), Error>;
|
||||
@ -166,52 +163,6 @@ pub struct ClusterConnections {
|
||||
pub connections: RwLock<BTreeMap<NodeId, Arc<Connection>>>,
|
||||
}
|
||||
|
||||
/// Active sessions on this cluster.
|
||||
pub struct ClusterSessions {
|
||||
/// Self node id.
|
||||
pub self_node_id: NodeId,
|
||||
/// All nodes ids.
|
||||
pub nodes: BTreeSet<NodeId>,
|
||||
/// Reference to key storage
|
||||
pub key_storage: Arc<KeyStorage>,
|
||||
/// Reference to ACL storage
|
||||
pub acl_storage: Arc<AclStorage>,
|
||||
/// Active encryption sessions.
|
||||
pub encryption_sessions: RwLock<BTreeMap<SessionId, QueuedEncryptionSession>>,
|
||||
/// Active decryption sessions.
|
||||
pub decryption_sessions: RwLock<BTreeMap<DecryptionSessionId, QueuedDecryptionSession>>,
|
||||
/// Make faulty encryption sessions.
|
||||
pub make_faulty_encryption_sessions: AtomicBool,
|
||||
}
|
||||
|
||||
/// Encryption session and its message queue.
|
||||
pub struct QueuedEncryptionSession {
|
||||
/// Session master.
|
||||
pub master: NodeId,
|
||||
/// Cluster view.
|
||||
pub cluster_view: Arc<ClusterView>,
|
||||
/// Last received message time.
|
||||
pub last_message_time: time::Instant,
|
||||
/// Encryption session.
|
||||
pub session: Arc<EncryptionSessionImpl>,
|
||||
/// Messages queue.
|
||||
pub queue: VecDeque<(NodeId, EncryptionMessage)>,
|
||||
}
|
||||
|
||||
/// Decryption session and its message queue.
|
||||
pub struct QueuedDecryptionSession {
|
||||
/// Session master.
|
||||
pub master: NodeId,
|
||||
/// Cluster view.
|
||||
pub cluster_view: Arc<ClusterView>,
|
||||
/// Last received message time.
|
||||
pub last_message_time: time::Instant,
|
||||
/// Decryption session.
|
||||
pub session: Arc<DecryptionSessionImpl>,
|
||||
/// Messages queue.
|
||||
pub queue: VecDeque<(NodeId, DecryptionMessage)>,
|
||||
}
|
||||
|
||||
/// Cluster view core.
|
||||
struct ClusterViewCore {
|
||||
/// Cluster reference.
|
||||
@ -236,28 +187,6 @@ pub struct Connection {
|
||||
last_message_time: Mutex<time::Instant>,
|
||||
}
|
||||
|
||||
/// Encryption session implementation, which removes session from cluster on drop.
|
||||
struct EncryptionSessionWrapper {
|
||||
/// Wrapped session.
|
||||
session: Arc<EncryptionSession>,
|
||||
/// Session Id.
|
||||
session_id: SessionId,
|
||||
/// Cluster data reference.
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
|
||||
/// Decryption session implementation, which removes session from cluster on drop.
|
||||
struct DecryptionSessionWrapper {
|
||||
/// Wrapped session.
|
||||
session: Arc<DecryptionSession>,
|
||||
/// Session Id.
|
||||
session_id: SessionId,
|
||||
/// Session sub id.
|
||||
access_key: Secret,
|
||||
/// Cluster data reference.
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
|
||||
impl ClusterCore {
|
||||
pub fn new(handle: Handle, config: ClusterConfiguration) -> Result<Arc<Self>, Error> {
|
||||
let listen_address = make_socket_address(&config.listen_address.0, config.listen_address.1)?;
|
||||
@ -461,62 +390,49 @@ impl ClusterCore {
|
||||
connection.set_last_message_time(time::Instant::now());
|
||||
trace!(target: "secretstore_net", "{}: received message {} from {}", data.self_key_pair.public(), message, connection.node_id());
|
||||
match message {
|
||||
Message::Generation(message) => ClusterCore::process_generation_message(data, connection, message),
|
||||
Message::Encryption(message) => ClusterCore::process_encryption_message(data, connection, message),
|
||||
Message::Decryption(message) => ClusterCore::process_decryption_message(data, connection, message),
|
||||
Message::Signing(message) => ClusterCore::process_signing_message(data, connection, message),
|
||||
Message::Cluster(message) => ClusterCore::process_cluster_message(data, connection, message),
|
||||
}
|
||||
}
|
||||
|
||||
/// Process single encryption message from the connection.
|
||||
fn process_encryption_message(data: Arc<ClusterData>, connection: Arc<Connection>, mut message: EncryptionMessage) {
|
||||
/// Process single generation message from the connection.
|
||||
fn process_generation_message(data: Arc<ClusterData>, connection: Arc<Connection>, mut message: GenerationMessage) {
|
||||
let session_id = message.session_id().clone();
|
||||
let mut sender = connection.node_id().clone();
|
||||
let session = match message {
|
||||
EncryptionMessage::InitializeSession(_) => {
|
||||
GenerationMessage::InitializeSession(_) => {
|
||||
let mut connected_nodes = data.connections.connected_nodes();
|
||||
connected_nodes.insert(data.self_key_pair.public().clone());
|
||||
|
||||
let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes));
|
||||
data.sessions.new_encryption_session(sender.clone(), session_id.clone(), cluster)
|
||||
data.sessions.new_generation_session(sender.clone(), session_id.clone(), cluster)
|
||||
},
|
||||
_ => {
|
||||
data.sessions.encryption_session(&session_id)
|
||||
data.sessions.generation_sessions.get(&session_id)
|
||||
.ok_or(Error::InvalidSessionId)
|
||||
},
|
||||
};
|
||||
|
||||
let mut is_queued_message = false;
|
||||
loop {
|
||||
match session.clone().and_then(|session| match message {
|
||||
EncryptionMessage::InitializeSession(ref message) =>
|
||||
session.on_initialize_session(sender.clone(), message),
|
||||
EncryptionMessage::ConfirmInitialization(ref message) =>
|
||||
session.on_confirm_initialization(sender.clone(), message),
|
||||
EncryptionMessage::CompleteInitialization(ref message) =>
|
||||
session.on_complete_initialization(sender.clone(), message),
|
||||
EncryptionMessage::KeysDissemination(ref message) =>
|
||||
session.on_keys_dissemination(sender.clone(), message),
|
||||
EncryptionMessage::PublicKeyShare(ref message) =>
|
||||
session.on_public_key_share(sender.clone(), message),
|
||||
EncryptionMessage::SessionError(ref message) =>
|
||||
session.on_session_error(sender.clone(), message),
|
||||
EncryptionMessage::SessionCompleted(ref message) =>
|
||||
session.on_session_completed(sender.clone(), message),
|
||||
}) {
|
||||
match session.clone().and_then(|session| session.process_message(&sender, &message)) {
|
||||
Ok(_) => {
|
||||
// if session is completed => stop
|
||||
let session = session.clone().expect("session.method() call finished with success; session exists; qed");
|
||||
let session_state = session.state();
|
||||
if session_state == EncryptionSessionState::Finished {
|
||||
info!(target: "secretstore_net", "{}: encryption session completed", data.self_key_pair.public());
|
||||
if session_state == GenerationSessionState::Finished {
|
||||
info!(target: "secretstore_net", "{}: generation session completed", data.self_key_pair.public());
|
||||
}
|
||||
if session_state == EncryptionSessionState::Finished || session_state == EncryptionSessionState::Failed {
|
||||
data.sessions.remove_encryption_session(&session_id);
|
||||
if session_state == GenerationSessionState::Finished || session_state == GenerationSessionState::Failed {
|
||||
data.sessions.generation_sessions.remove(&session_id);
|
||||
break;
|
||||
}
|
||||
|
||||
// try to dequeue message
|
||||
match data.sessions.dequeue_encryption_message(&session_id) {
|
||||
match data.sessions.generation_sessions.dequeue_message(&session_id) {
|
||||
Some((msg_sender, msg)) => {
|
||||
is_queued_message = true;
|
||||
sender = msg_sender;
|
||||
@ -526,17 +442,86 @@ impl ClusterCore {
|
||||
}
|
||||
},
|
||||
Err(Error::TooEarlyForRequest) => {
|
||||
data.sessions.enqueue_encryption_message(&session_id, sender, message, is_queued_message);
|
||||
data.sessions.generation_sessions.enqueue_message(&session_id, sender, message, is_queued_message);
|
||||
break;
|
||||
},
|
||||
Err(err) => {
|
||||
warn!(target: "secretstore_net", "{}: encryption session error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
||||
data.sessions.respond_with_encryption_error(&session_id, message::SessionError {
|
||||
warn!(target: "secretstore_net", "{}: generation session error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
||||
data.sessions.respond_with_generation_error(&session_id, message::SessionError {
|
||||
session: session_id.clone().into(),
|
||||
error: format!("{:?}", err),
|
||||
});
|
||||
if err != Error::InvalidSessionId {
|
||||
data.sessions.remove_encryption_session(&session_id);
|
||||
data.sessions.generation_sessions.remove(&session_id);
|
||||
}
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process single encryption message from the connection.
|
||||
fn process_encryption_message(data: Arc<ClusterData>, connection: Arc<Connection>, mut message: EncryptionMessage) {
|
||||
let session_id = message.session_id().clone();
|
||||
let mut sender = connection.node_id().clone();
|
||||
let session = match message {
|
||||
EncryptionMessage::InitializeEncryptionSession(_) => {
|
||||
let mut connected_nodes = data.connections.connected_nodes();
|
||||
connected_nodes.insert(data.self_key_pair.public().clone());
|
||||
|
||||
let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes));
|
||||
data.sessions.new_encryption_session(sender.clone(), session_id.clone(), cluster)
|
||||
},
|
||||
_ => {
|
||||
data.sessions.encryption_sessions.get(&session_id)
|
||||
.ok_or(Error::InvalidSessionId)
|
||||
},
|
||||
};
|
||||
|
||||
let mut is_queued_message = false;
|
||||
loop {
|
||||
match session.clone().and_then(|session| match message {
|
||||
EncryptionMessage::InitializeEncryptionSession(ref message) =>
|
||||
session.on_initialize_session(sender.clone(), message),
|
||||
EncryptionMessage::ConfirmEncryptionInitialization(ref message) =>
|
||||
session.on_confirm_initialization(sender.clone(), message),
|
||||
EncryptionMessage::EncryptionSessionError(ref message) =>
|
||||
session.on_session_error(sender.clone(), message),
|
||||
}) {
|
||||
Ok(_) => {
|
||||
// if session is completed => stop
|
||||
let session = session.clone().expect("session.method() call finished with success; session exists; qed");
|
||||
let session_state = session.state();
|
||||
if session_state == EncryptionSessionState::Finished {
|
||||
info!(target: "secretstore_net", "{}: encryption session completed", data.self_key_pair.public());
|
||||
}
|
||||
if session_state == EncryptionSessionState::Finished || session_state == EncryptionSessionState::Failed {
|
||||
data.sessions.encryption_sessions.remove(&session_id);
|
||||
break;
|
||||
}
|
||||
|
||||
// try to dequeue message
|
||||
match data.sessions.encryption_sessions.dequeue_message(&session_id) {
|
||||
Some((msg_sender, msg)) => {
|
||||
is_queued_message = true;
|
||||
sender = msg_sender;
|
||||
message = msg;
|
||||
},
|
||||
None => break,
|
||||
}
|
||||
},
|
||||
Err(Error::TooEarlyForRequest) => {
|
||||
data.sessions.encryption_sessions.enqueue_message(&session_id, sender, message, is_queued_message);
|
||||
break;
|
||||
},
|
||||
Err(err) => {
|
||||
warn!(target: "secretstore_net", "{}: encryption session error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
||||
data.sessions.respond_with_encryption_error(&session_id, message::EncryptionSessionError {
|
||||
session: session_id.clone().into(),
|
||||
error: format!("{:?}", err),
|
||||
});
|
||||
if err != Error::InvalidSessionId {
|
||||
data.sessions.encryption_sessions.remove(&session_id);
|
||||
}
|
||||
break;
|
||||
},
|
||||
@ -548,63 +533,45 @@ impl ClusterCore {
|
||||
fn process_decryption_message(data: Arc<ClusterData>, connection: Arc<Connection>, mut message: DecryptionMessage) {
|
||||
let session_id = message.session_id().clone();
|
||||
let sub_session_id = message.sub_session_id().clone();
|
||||
let decryption_session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone());
|
||||
let mut sender = connection.node_id().clone();
|
||||
let session = match message {
|
||||
DecryptionMessage::InitializeDecryptionSession(_) => {
|
||||
DecryptionMessage::DecryptionConsensusMessage(ref message) if match message.message {
|
||||
ConsensusMessage::InitializeConsensusSession(_) => true,
|
||||
_ => false,
|
||||
} => {
|
||||
let mut connected_nodes = data.connections.connected_nodes();
|
||||
connected_nodes.insert(data.self_key_pair.public().clone());
|
||||
|
||||
let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes));
|
||||
data.sessions.new_decryption_session(sender.clone(), session_id.clone(), sub_session_id.clone(), cluster)
|
||||
data.sessions.new_decryption_session(sender.clone(), session_id.clone(), sub_session_id.clone(), cluster, None)
|
||||
},
|
||||
_ => {
|
||||
data.sessions.decryption_session(&session_id, &sub_session_id)
|
||||
data.sessions.decryption_sessions.get(&decryption_session_id)
|
||||
.ok_or(Error::InvalidSessionId)
|
||||
},
|
||||
};
|
||||
|
||||
let mut is_queued_message = false;
|
||||
loop {
|
||||
match session.clone().and_then(|session| match message {
|
||||
DecryptionMessage::InitializeDecryptionSession(ref message) =>
|
||||
session.on_initialize_session(sender.clone(), message),
|
||||
DecryptionMessage::ConfirmDecryptionInitialization(ref message) =>
|
||||
session.on_confirm_initialization(sender.clone(), message),
|
||||
DecryptionMessage::RequestPartialDecryption(ref message) =>
|
||||
session.on_partial_decryption_requested(sender.clone(), message),
|
||||
DecryptionMessage::PartialDecryption(ref message) =>
|
||||
session.on_partial_decryption(sender.clone(), message),
|
||||
DecryptionMessage::DecryptionSessionError(ref message) =>
|
||||
session.on_session_error(sender.clone(), message),
|
||||
DecryptionMessage::DecryptionSessionCompleted(ref message) =>
|
||||
session.on_session_completed(sender.clone(), message),
|
||||
}) {
|
||||
match session.clone().and_then(|session| session.process_message(&sender, &message)) {
|
||||
Ok(_) => {
|
||||
// if session is completed => stop
|
||||
let session = session.clone().expect("session.method() call finished with success; session exists; qed");
|
||||
let session_state = session.state();
|
||||
if session_state == DecryptionSessionState::Finished {
|
||||
if session.is_finished() {
|
||||
info!(target: "secretstore_net", "{}: decryption session completed", data.self_key_pair.public());
|
||||
}
|
||||
if session_state == DecryptionSessionState::Finished || session_state == DecryptionSessionState::Failed {
|
||||
data.sessions.remove_decryption_session(&session_id, &sub_session_id);
|
||||
data.sessions.decryption_sessions.remove(&decryption_session_id);
|
||||
break;
|
||||
}
|
||||
|
||||
// try to dequeue message
|
||||
match data.sessions.dequeue_decryption_message(&session_id, &sub_session_id) {
|
||||
match data.sessions.decryption_sessions.dequeue_message(&decryption_session_id) {
|
||||
Some((msg_sender, msg)) => {
|
||||
is_queued_message = true;
|
||||
sender = msg_sender;
|
||||
message = msg;
|
||||
},
|
||||
None => break,
|
||||
}
|
||||
},
|
||||
Err(Error::TooEarlyForRequest) => {
|
||||
data.sessions.enqueue_decryption_message(&session_id, &sub_session_id, sender, message, is_queued_message);
|
||||
break;
|
||||
},
|
||||
Err(err) => {
|
||||
warn!(target: "secretstore_net", "{}: decryption session error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
||||
data.sessions.respond_with_decryption_error(&session_id, &sub_session_id, &sender, message::DecryptionSessionError {
|
||||
@ -613,7 +580,72 @@ impl ClusterCore {
|
||||
error: format!("{:?}", err),
|
||||
});
|
||||
if err != Error::InvalidSessionId {
|
||||
data.sessions.remove_decryption_session(&session_id, &sub_session_id);
|
||||
data.sessions.decryption_sessions.remove(&decryption_session_id);
|
||||
}
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process singlesigning message from the connection.
|
||||
fn process_signing_message(data: Arc<ClusterData>, connection: Arc<Connection>, mut message: SigningMessage) {
|
||||
let session_id = message.session_id().clone();
|
||||
let sub_session_id = message.sub_session_id().clone();
|
||||
let signing_session_id = SigningSessionId::new(session_id.clone(), sub_session_id.clone());
|
||||
let mut sender = connection.node_id().clone();
|
||||
let session = match message {
|
||||
SigningMessage::SigningConsensusMessage(ref message) if match message.message {
|
||||
ConsensusMessage::InitializeConsensusSession(_) => true,
|
||||
_ => false,
|
||||
} => {
|
||||
let mut connected_nodes = data.connections.connected_nodes();
|
||||
connected_nodes.insert(data.self_key_pair.public().clone());
|
||||
|
||||
let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes));
|
||||
data.sessions.new_signing_session(sender.clone(), session_id.clone(), sub_session_id.clone(), cluster, None)
|
||||
},
|
||||
_ => {
|
||||
data.sessions.signing_sessions.get(&signing_session_id)
|
||||
.ok_or(Error::InvalidSessionId)
|
||||
},
|
||||
};
|
||||
|
||||
let mut is_queued_message = false;
|
||||
loop {
|
||||
match session.clone().and_then(|session| session.process_message(&sender, &message)) {
|
||||
Ok(_) => {
|
||||
// if session is completed => stop
|
||||
let session = session.clone().expect("session.method() call finished with success; session exists; qed");
|
||||
if session.is_finished() {
|
||||
info!(target: "secretstore_net", "{}: signing session completed", data.self_key_pair.public());
|
||||
data.sessions.signing_sessions.remove(&signing_session_id);
|
||||
break;
|
||||
}
|
||||
|
||||
// try to dequeue message
|
||||
match data.sessions.signing_sessions.dequeue_message(&signing_session_id) {
|
||||
Some((msg_sender, msg)) => {
|
||||
is_queued_message = true;
|
||||
sender = msg_sender;
|
||||
message = msg;
|
||||
},
|
||||
None => break,
|
||||
}
|
||||
},
|
||||
Err(Error::TooEarlyForRequest) => {
|
||||
data.sessions.signing_sessions.enqueue_message(&signing_session_id, sender, message, is_queued_message);
|
||||
break;
|
||||
},
|
||||
Err(err) => {
|
||||
warn!(target: "secretstore_net", "{}: signing session error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
||||
data.sessions.respond_with_signing_error(&session_id, &sub_session_id, &sender, message::SigningSessionError {
|
||||
session: session_id.clone().into(),
|
||||
sub_session: sub_session_id.clone().into(),
|
||||
error: format!("{:?}", err),
|
||||
});
|
||||
if err != Error::InvalidSessionId {
|
||||
data.sessions.signing_sessions.remove(&signing_session_id);
|
||||
}
|
||||
break;
|
||||
},
|
||||
@ -702,213 +734,6 @@ impl ClusterConnections {
|
||||
}
|
||||
}
|
||||
|
||||
impl ClusterSessions {
|
||||
pub fn new(config: &ClusterConfiguration) -> Self {
|
||||
ClusterSessions {
|
||||
self_node_id: config.self_key_pair.public().clone(),
|
||||
nodes: config.nodes.keys().cloned().collect(),
|
||||
acl_storage: config.acl_storage.clone(),
|
||||
key_storage: config.key_storage.clone(),
|
||||
encryption_sessions: RwLock::new(BTreeMap::new()),
|
||||
decryption_sessions: RwLock::new(BTreeMap::new()),
|
||||
make_faulty_encryption_sessions: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_encryption_session(&self, master: NodeId, session_id: SessionId, cluster: Arc<ClusterView>) -> Result<Arc<EncryptionSessionImpl>, Error> {
|
||||
let mut encryption_sessions = self.encryption_sessions.write();
|
||||
// check that there's no active encryption session with the same id
|
||||
if encryption_sessions.contains_key(&session_id) {
|
||||
return Err(Error::DuplicateSessionId);
|
||||
}
|
||||
// check that there's no finished encryption session with the same id
|
||||
if self.key_storage.contains(&session_id) {
|
||||
return Err(Error::DuplicateSessionId);
|
||||
}
|
||||
|
||||
// communicating to all other nodes is crucial for encryption session
|
||||
// => check that we have connections to all cluster nodes
|
||||
if self.nodes.iter().any(|n| !cluster.is_connected(n)) {
|
||||
return Err(Error::NodeDisconnected);
|
||||
}
|
||||
|
||||
let session = Arc::new(EncryptionSessionImpl::new(EncryptionSessionParams {
|
||||
id: session_id.clone(),
|
||||
self_node_id: self.self_node_id.clone(),
|
||||
key_storage: self.key_storage.clone(),
|
||||
cluster: cluster.clone(),
|
||||
}));
|
||||
let encryption_session = QueuedEncryptionSession {
|
||||
master: master,
|
||||
cluster_view: cluster,
|
||||
last_message_time: time::Instant::now(),
|
||||
session: session.clone(),
|
||||
queue: VecDeque::new()
|
||||
};
|
||||
if self.make_faulty_encryption_sessions.load(Ordering::Relaxed) {
|
||||
encryption_session.session.simulate_faulty_behaviour();
|
||||
}
|
||||
encryption_sessions.insert(session_id, encryption_session);
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
pub fn remove_encryption_session(&self, session_id: &SessionId) {
|
||||
self.encryption_sessions.write().remove(session_id);
|
||||
}
|
||||
|
||||
pub fn encryption_session(&self, session_id: &SessionId) -> Option<Arc<EncryptionSessionImpl>> {
|
||||
self.encryption_sessions.read().get(session_id).map(|s| s.session.clone())
|
||||
}
|
||||
|
||||
pub fn enqueue_encryption_message(&self, session_id: &SessionId, sender: NodeId, message: EncryptionMessage, is_queued_message: bool) {
|
||||
self.encryption_sessions.write().get_mut(session_id)
|
||||
.map(|session| if is_queued_message { session.queue.push_front((sender, message)) }
|
||||
else { session.queue.push_back((sender, message)) });
|
||||
}
|
||||
|
||||
pub fn dequeue_encryption_message(&self, session_id: &SessionId) -> Option<(NodeId, EncryptionMessage)> {
|
||||
self.encryption_sessions.write().get_mut(session_id)
|
||||
.and_then(|session| session.queue.pop_front())
|
||||
}
|
||||
|
||||
pub fn respond_with_encryption_error(&self, session_id: &SessionId, error: message::SessionError) {
|
||||
self.encryption_sessions.read().get(session_id)
|
||||
.map(|s| {
|
||||
// error in encryption session is considered fatal
|
||||
// => broadcast error
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
let _ = s.cluster_view.broadcast(Message::Encryption(EncryptionMessage::SessionError(error)));
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn make_faulty_encryption_sessions(&self) {
|
||||
self.make_faulty_encryption_sessions.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn new_decryption_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, cluster: Arc<ClusterView>) -> Result<Arc<DecryptionSessionImpl>, Error> {
|
||||
let mut decryption_sessions = self.decryption_sessions.write();
|
||||
let session_id = DecryptionSessionId::new(session_id, sub_session_id);
|
||||
if decryption_sessions.contains_key(&session_id) {
|
||||
return Err(Error::DuplicateSessionId);
|
||||
}
|
||||
|
||||
// some of nodes, which were encrypting secret may be down
|
||||
// => do not use these in decryption session
|
||||
let mut encrypted_data = self.key_storage.get(&session_id.id).map_err(|e| Error::KeyStorage(e.into()))?;
|
||||
let disconnected_nodes: BTreeSet<_> = encrypted_data.id_numbers.keys().cloned().collect();
|
||||
let disconnected_nodes: BTreeSet<_> = disconnected_nodes.difference(&cluster.nodes()).cloned().collect();
|
||||
for disconnected_node in disconnected_nodes {
|
||||
encrypted_data.id_numbers.remove(&disconnected_node);
|
||||
}
|
||||
|
||||
let session = Arc::new(DecryptionSessionImpl::new(DecryptionSessionParams {
|
||||
id: session_id.id.clone(),
|
||||
access_key: session_id.access_key.clone(),
|
||||
self_node_id: self.self_node_id.clone(),
|
||||
encrypted_data: encrypted_data,
|
||||
acl_storage: self.acl_storage.clone(),
|
||||
cluster: cluster.clone(),
|
||||
})?);
|
||||
let decryption_session = QueuedDecryptionSession {
|
||||
master: master,
|
||||
cluster_view: cluster,
|
||||
last_message_time: time::Instant::now(),
|
||||
session: session.clone(),
|
||||
queue: VecDeque::new()
|
||||
};
|
||||
decryption_sessions.insert(session_id, decryption_session);
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
pub fn remove_decryption_session(&self, session_id: &SessionId, sub_session_id: &Secret) {
|
||||
let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone());
|
||||
self.decryption_sessions.write().remove(&session_id);
|
||||
}
|
||||
|
||||
pub fn decryption_session(&self, session_id: &SessionId, sub_session_id: &Secret) -> Option<Arc<DecryptionSessionImpl>> {
|
||||
let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone());
|
||||
self.decryption_sessions.read().get(&session_id).map(|s| s.session.clone())
|
||||
}
|
||||
|
||||
pub fn enqueue_decryption_message(&self, session_id: &SessionId, sub_session_id: &Secret, sender: NodeId, message: DecryptionMessage, is_queued_message: bool) {
|
||||
let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone());
|
||||
self.decryption_sessions.write().get_mut(&session_id)
|
||||
.map(|session| if is_queued_message { session.queue.push_front((sender, message)) }
|
||||
else { session.queue.push_back((sender, message)) });
|
||||
}
|
||||
|
||||
pub fn dequeue_decryption_message(&self, session_id: &SessionId, sub_session_id: &Secret) -> Option<(NodeId, DecryptionMessage)> {
|
||||
let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone());
|
||||
self.decryption_sessions.write().get_mut(&session_id)
|
||||
.and_then(|session| session.queue.pop_front())
|
||||
}
|
||||
|
||||
pub fn respond_with_decryption_error(&self, session_id: &SessionId, sub_session_id: &Secret, to: &NodeId, error: message::DecryptionSessionError) {
|
||||
let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone());
|
||||
self.decryption_sessions.read().get(&session_id)
|
||||
.map(|s| {
|
||||
// error in decryption session is non-fatal, if occurs on slave node
|
||||
// => either respond with error
|
||||
// => or broadcast error
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
if &s.master == s.session.node() {
|
||||
let _ = s.cluster_view.broadcast(Message::Decryption(DecryptionMessage::DecryptionSessionError(error)));
|
||||
} else {
|
||||
let _ = s.cluster_view.send(to, Message::Decryption(DecryptionMessage::DecryptionSessionError(error)));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn stop_stalled_sessions(&self) {
|
||||
{
|
||||
let sessions = self.encryption_sessions.write();
|
||||
for sid in sessions.keys().collect::<Vec<_>>() {
|
||||
let session = sessions.get(&sid).expect("enumerating only existing sessions; qed");
|
||||
if time::Instant::now() - session.last_message_time > time::Duration::from_secs(ENCRYPTION_SESSION_TIMEOUT_INTERVAL) {
|
||||
session.session.on_session_timeout();
|
||||
if session.session.state() == EncryptionSessionState::Finished
|
||||
|| session.session.state() == EncryptionSessionState::Failed {
|
||||
self.remove_encryption_session(&sid);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
let sessions = self.decryption_sessions.write();
|
||||
for sid in sessions.keys().collect::<Vec<_>>() {
|
||||
let session = sessions.get(&sid).expect("enumerating only existing sessions; qed");
|
||||
if time::Instant::now() - session.last_message_time > time::Duration::from_secs(DECRYPTION_SESSION_TIMEOUT_INTERVAL) {
|
||||
session.session.on_session_timeout();
|
||||
if session.session.state() == DecryptionSessionState::Finished
|
||||
|| session.session.state() == DecryptionSessionState::Failed {
|
||||
self.remove_decryption_session(&sid.id, &sid.access_key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn on_connection_timeout(&self, node_id: &NodeId) {
|
||||
for (sid, session) in self.encryption_sessions.read().iter() {
|
||||
session.session.on_node_timeout(node_id);
|
||||
if session.session.state() == EncryptionSessionState::Finished
|
||||
|| session.session.state() == EncryptionSessionState::Failed {
|
||||
self.remove_encryption_session(sid);
|
||||
}
|
||||
}
|
||||
for (sid, session) in self.decryption_sessions.read().iter() {
|
||||
session.session.on_node_timeout(node_id);
|
||||
if session.session.state() == DecryptionSessionState::Finished
|
||||
|| session.session.state() == DecryptionSessionState::Failed {
|
||||
self.remove_decryption_session(&sid.id, &sid.access_key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ClusterData {
|
||||
pub fn new(handle: &Handle, config: ClusterConfiguration, connections: ClusterConnections, sessions: ClusterSessions) -> Arc<Self> {
|
||||
Arc::new(ClusterData {
|
||||
@ -926,6 +751,11 @@ impl ClusterData {
|
||||
self.connections.get(node)
|
||||
}
|
||||
|
||||
/// Get sessions reference.
|
||||
pub fn sessions(&self) -> &ClusterSessions {
|
||||
&self.sessions
|
||||
}
|
||||
|
||||
/// Spawns a future using thread pool and schedules execution of it with event loop handle.
|
||||
pub fn spawn<F>(&self, f: F) where F: Future + Send + 'static, F::Item: Send + 'static, F::Error: Send + 'static {
|
||||
let pool_work = self.pool.spawn(f);
|
||||
@ -1028,13 +858,23 @@ impl ClusterClient for ClusterClientImpl {
|
||||
self.data.connections.cluster_state()
|
||||
}
|
||||
|
||||
fn new_encryption_session(&self, session_id: SessionId, threshold: usize) -> Result<Arc<EncryptionSession>, Error> {
|
||||
fn new_generation_session(&self, session_id: SessionId, author: Public, threshold: usize) -> Result<Arc<GenerationSession>, Error> {
|
||||
let mut connected_nodes = self.data.connections.connected_nodes();
|
||||
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||
|
||||
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone()));
|
||||
let session = self.data.sessions.new_encryption_session(self.data.self_key_pair.public().clone(), session_id.clone(), cluster)?;
|
||||
session.initialize(threshold, connected_nodes)?;
|
||||
let session = self.data.sessions.new_generation_session(self.data.self_key_pair.public().clone(), session_id, cluster)?;
|
||||
session.initialize(author, threshold, connected_nodes)?;
|
||||
Ok(GenerationSessionWrapper::new(Arc::downgrade(&self.data), session_id, session))
|
||||
}
|
||||
|
||||
fn new_encryption_session(&self, session_id: SessionId, requestor_signature: Signature, common_point: Public, encrypted_point: Public) -> Result<Arc<EncryptionSession>, Error> {
|
||||
let mut connected_nodes = self.data.connections.connected_nodes();
|
||||
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||
|
||||
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone()));
|
||||
let session = self.data.sessions.new_encryption_session(self.data.self_key_pair.public().clone(), session_id, cluster)?;
|
||||
session.initialize(requestor_signature, common_point, encrypted_point)?;
|
||||
Ok(EncryptionSessionWrapper::new(Arc::downgrade(&self.data), session_id, session))
|
||||
}
|
||||
|
||||
@ -1044,9 +884,20 @@ impl ClusterClient for ClusterClientImpl {
|
||||
|
||||
let access_key = Random.generate()?.secret().clone();
|
||||
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone()));
|
||||
let session = self.data.sessions.new_decryption_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), cluster)?;
|
||||
session.initialize(requestor_signature, is_shadow_decryption)?;
|
||||
Ok(DecryptionSessionWrapper::new(Arc::downgrade(&self.data), session_id, access_key, session))
|
||||
let session = self.data.sessions.new_decryption_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), cluster, Some(requestor_signature))?;
|
||||
session.initialize(is_shadow_decryption)?;
|
||||
Ok(DecryptionSessionWrapper::new(Arc::downgrade(&self.data), DecryptionSessionId::new(session_id, access_key), session))
|
||||
}
|
||||
|
||||
fn new_signing_session(&self, session_id: SessionId, requestor_signature: Signature, message_hash: H256) -> Result<Arc<SigningSession>, Error> {
|
||||
let mut connected_nodes = self.data.connections.connected_nodes();
|
||||
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||
|
||||
let access_key = Random.generate()?.secret().clone();
|
||||
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone()));
|
||||
let session = self.data.sessions.new_signing_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), cluster, Some(requestor_signature))?;
|
||||
session.initialize(message_hash)?;
|
||||
Ok(SigningSessionWrapper::new(Arc::downgrade(&self.data), SigningSessionId::new(session_id, access_key), session))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -1055,71 +906,13 @@ impl ClusterClient for ClusterClientImpl {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn make_faulty_encryption_sessions(&self) {
|
||||
self.data.sessions.make_faulty_encryption_sessions();
|
||||
fn make_faulty_generation_sessions(&self) {
|
||||
self.data.sessions.make_faulty_generation_sessions();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn encryption_session(&self, session_id: &SessionId) -> Option<Arc<EncryptionSessionImpl>> {
|
||||
self.data.sessions.encryption_session(session_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl EncryptionSessionWrapper {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionId, session: Arc<EncryptionSession>) -> Arc<Self> {
|
||||
Arc::new(EncryptionSessionWrapper {
|
||||
session: session,
|
||||
session_id: session_id,
|
||||
cluster: cluster,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl EncryptionSession for EncryptionSessionWrapper {
|
||||
fn state(&self) -> EncryptionSessionState {
|
||||
self.session.state()
|
||||
}
|
||||
|
||||
fn wait(&self, timeout: Option<time::Duration>) -> Result<Public, Error> {
|
||||
self.session.wait(timeout)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn joint_public_key(&self) -> Option<Result<Public, Error>> {
|
||||
self.session.joint_public_key()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for EncryptionSessionWrapper {
|
||||
fn drop(&mut self) {
|
||||
if let Some(cluster) = self.cluster.upgrade() {
|
||||
cluster.sessions.remove_encryption_session(&self.session_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DecryptionSessionWrapper {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionId, access_key: Secret, session: Arc<DecryptionSession>) -> Arc<Self> {
|
||||
Arc::new(DecryptionSessionWrapper {
|
||||
session: session,
|
||||
session_id: session_id,
|
||||
access_key: access_key,
|
||||
cluster: cluster,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl DecryptionSession for DecryptionSessionWrapper {
|
||||
fn wait(&self) -> Result<DocumentEncryptedKeyShadow, Error> {
|
||||
self.session.wait()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DecryptionSessionWrapper {
|
||||
fn drop(&mut self) {
|
||||
if let Some(cluster) = self.cluster.upgrade() {
|
||||
cluster.sessions.remove_decryption_session(&self.session_id, &self.access_key);
|
||||
}
|
||||
fn generation_session(&self, session_id: &SessionId) -> Option<Arc<GenerationSessionImpl>> {
|
||||
self.data.sessions.generation_sessions.get(session_id)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1135,11 +928,11 @@ pub mod tests {
|
||||
use std::collections::VecDeque;
|
||||
use parking_lot::Mutex;
|
||||
use tokio_core::reactor::Core;
|
||||
use ethkey::{Random, Generator};
|
||||
use ethkey::{Random, Generator, Public};
|
||||
use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage};
|
||||
use key_server_cluster::message::Message;
|
||||
use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration};
|
||||
use key_server_cluster::encryption_session::{Session as EncryptionSession, SessionState as EncryptionSessionState};
|
||||
use key_server_cluster::generation_session::{Session as GenerationSession, SessionState as GenerationSessionState};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct DummyCluster {
|
||||
@ -1249,11 +1042,11 @@ pub mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cluster_wont_start_encryption_session_if_not_fully_connected() {
|
||||
fn cluster_wont_start_generation_session_if_not_fully_connected() {
|
||||
let core = Core::new().unwrap();
|
||||
let clusters = make_clusters(&core, 6013, 3);
|
||||
clusters[0].run().unwrap();
|
||||
match clusters[0].client().new_encryption_session(SessionId::default(), 1) {
|
||||
match clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1) {
|
||||
Err(Error::NodeDisconnected) => (),
|
||||
Err(e) => panic!("unexpected error {:?}", e),
|
||||
_ => panic!("unexpected success"),
|
||||
@ -1261,50 +1054,50 @@ pub mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn error_in_encryption_session_broadcasted_to_all_other_nodes() {
|
||||
fn error_in_generation_session_broadcasted_to_all_other_nodes() {
|
||||
let mut core = Core::new().unwrap();
|
||||
let clusters = make_clusters(&core, 6016, 3);
|
||||
run_clusters(&clusters);
|
||||
loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established));
|
||||
|
||||
// ask one of nodes to produce faulty encryption sessions
|
||||
clusters[1].client().make_faulty_encryption_sessions();
|
||||
// ask one of nodes to produce faulty generation sessions
|
||||
clusters[1].client().make_faulty_generation_sessions();
|
||||
|
||||
// start && wait for encryption session to fail
|
||||
let session = clusters[0].client().new_encryption_session(SessionId::default(), 1).unwrap();
|
||||
loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_key().is_some());
|
||||
assert!(session.joint_public_key().unwrap().is_err());
|
||||
// start && wait for generation session to fail
|
||||
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
|
||||
loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some());
|
||||
assert!(session.joint_public_and_secret().unwrap().is_err());
|
||||
|
||||
// check that faulty session is either removed from all nodes, or nonexistent (already removed)
|
||||
assert!(clusters[0].client().encryption_session(&SessionId::default()).is_none());
|
||||
assert!(clusters[0].client().generation_session(&SessionId::default()).is_none());
|
||||
for i in 1..3 {
|
||||
if let Some(session) = clusters[i].client().encryption_session(&SessionId::default()) {
|
||||
loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_key().is_some());
|
||||
assert!(session.joint_public_key().unwrap().is_err());
|
||||
assert!(clusters[i].client().encryption_session(&SessionId::default()).is_none());
|
||||
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
|
||||
loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some());
|
||||
assert!(session.joint_public_and_secret().unwrap().is_err());
|
||||
assert!(clusters[i].client().generation_session(&SessionId::default()).is_none());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encryption_session_is_removed_when_succeeded() {
|
||||
fn generation_session_is_removed_when_succeeded() {
|
||||
let mut core = Core::new().unwrap();
|
||||
let clusters = make_clusters(&core, 6019, 3);
|
||||
run_clusters(&clusters);
|
||||
loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established));
|
||||
|
||||
// start && wait for encryption session to complete
|
||||
let session = clusters[0].client().new_encryption_session(SessionId::default(), 1).unwrap();
|
||||
loop_until(&mut core, time::Duration::from_millis(300), || session.state() == EncryptionSessionState::Finished);
|
||||
assert!(session.joint_public_key().unwrap().is_ok());
|
||||
// start && wait for generation session to complete
|
||||
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
|
||||
loop_until(&mut core, time::Duration::from_millis(300), || session.state() == GenerationSessionState::Finished);
|
||||
assert!(session.joint_public_and_secret().unwrap().is_ok());
|
||||
|
||||
// check that session is either removed from all nodes, or nonexistent (already removed)
|
||||
assert!(clusters[0].client().encryption_session(&SessionId::default()).is_none());
|
||||
assert!(clusters[0].client().generation_session(&SessionId::default()).is_none());
|
||||
for i in 1..3 {
|
||||
if let Some(session) = clusters[i].client().encryption_session(&SessionId::default()) {
|
||||
loop_until(&mut core, time::Duration::from_millis(300), || session.state() == EncryptionSessionState::Finished);
|
||||
assert!(session.joint_public_key().unwrap().is_err());
|
||||
assert!(clusters[i].client().encryption_session(&SessionId::default()).is_none());
|
||||
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
|
||||
loop_until(&mut core, time::Duration::from_millis(300), || session.state() == GenerationSessionState::Finished);
|
||||
assert!(session.joint_public_and_secret().unwrap().is_err());
|
||||
assert!(clusters[i].client().generation_session(&SessionId::default()).is_none());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
506
secret_store/src/key_server_cluster/cluster_sessions.rs
Normal file
506
secret_store/src/key_server_cluster/cluster_sessions.rs
Normal file
@ -0,0 +1,506 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::time;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::collections::{VecDeque, BTreeSet, BTreeMap};
|
||||
use parking_lot::RwLock;
|
||||
use ethkey::{Public, Secret, Signature};
|
||||
use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, DocumentKeyShare, EncryptedDocumentKeyShadow, SessionMeta};
|
||||
use key_server_cluster::cluster::{Cluster, ClusterData, ClusterView, ClusterConfiguration};
|
||||
use key_server_cluster::message::{self, Message, GenerationMessage, EncryptionMessage, DecryptionMessage, SigningMessage};
|
||||
use key_server_cluster::generation_session::{Session as GenerationSession, SessionImpl as GenerationSessionImpl,
|
||||
SessionParams as GenerationSessionParams, SessionState as GenerationSessionState};
|
||||
use key_server_cluster::decryption_session::{Session as DecryptionSession, SessionImpl as DecryptionSessionImpl,
|
||||
DecryptionSessionId, SessionParams as DecryptionSessionParams};
|
||||
use key_server_cluster::encryption_session::{Session as EncryptionSession, SessionImpl as EncryptionSessionImpl,
|
||||
SessionParams as EncryptionSessionParams, SessionState as EncryptionSessionState};
|
||||
use key_server_cluster::signing_session::{Session as SigningSession, SessionImpl as SigningSessionImpl,
|
||||
SigningSessionId, SessionParams as SigningSessionParams};
|
||||
|
||||
/// When there are no session-related messages for SESSION_TIMEOUT_INTERVAL seconds,
|
||||
/// we must treat this session as stalled && finish it with an error.
|
||||
/// This timeout is for cases when node is responding to KeepAlive messages, but intentionally ignores
|
||||
/// session messages.
|
||||
const SESSION_TIMEOUT_INTERVAL: u64 = 60;
|
||||
|
||||
/// Generic cluster session.
|
||||
pub trait ClusterSession {
|
||||
/// If session is finished (either with succcess or not).
|
||||
fn is_finished(&self) -> bool;
|
||||
/// When it takes too much time to complete session.
|
||||
fn on_session_timeout(&self);
|
||||
/// When it takes too much time to receive response from the node.
|
||||
fn on_node_timeout(&self, node_id: &NodeId);
|
||||
}
|
||||
|
||||
/// Active sessions on this cluster.
|
||||
pub struct ClusterSessions {
|
||||
/// Key generation sessions.
|
||||
pub generation_sessions: ClusterSessionsContainer<SessionId, GenerationSessionImpl, GenerationMessage>,
|
||||
/// Encryption sessions.
|
||||
pub encryption_sessions: ClusterSessionsContainer<SessionId, EncryptionSessionImpl, EncryptionMessage>,
|
||||
/// Decryption sessions.
|
||||
pub decryption_sessions: ClusterSessionsContainer<DecryptionSessionId, DecryptionSessionImpl, DecryptionMessage>,
|
||||
/// Signing sessions.
|
||||
pub signing_sessions: ClusterSessionsContainer<SigningSessionId, SigningSessionImpl, SigningMessage>,
|
||||
/// Self node id.
|
||||
self_node_id: NodeId,
|
||||
/// All nodes ids.
|
||||
nodes: BTreeSet<NodeId>,
|
||||
/// Reference to key storage
|
||||
key_storage: Arc<KeyStorage>,
|
||||
/// Reference to ACL storage
|
||||
acl_storage: Arc<AclStorage>,
|
||||
/// Make faulty generation sessions.
|
||||
make_faulty_generation_sessions: AtomicBool,
|
||||
}
|
||||
|
||||
/// Active sessions container.
|
||||
pub struct ClusterSessionsContainer<K, V, M> {
|
||||
/// Active sessions.
|
||||
pub sessions: RwLock<BTreeMap<K, QueuedSession<V, M>>>,
|
||||
}
|
||||
|
||||
/// Session and its message queue.
|
||||
pub struct QueuedSession<V, M> {
|
||||
/// Session master.
|
||||
pub master: NodeId,
|
||||
/// Cluster view.
|
||||
pub cluster_view: Arc<ClusterView>,
|
||||
/// Last received message time.
|
||||
pub last_message_time: time::Instant,
|
||||
/// Generation session.
|
||||
pub session: Arc<V>,
|
||||
/// Messages queue.
|
||||
pub queue: VecDeque<(NodeId, M)>,
|
||||
}
|
||||
|
||||
/// Generation session implementation, which removes session from cluster on drop.
|
||||
pub struct GenerationSessionWrapper {
|
||||
/// Wrapped session.
|
||||
session: Arc<GenerationSession>,
|
||||
/// Session Id.
|
||||
session_id: SessionId,
|
||||
/// Cluster data reference.
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
|
||||
/// Encryption session implementation, which removes session from cluster on drop.
|
||||
pub struct EncryptionSessionWrapper {
|
||||
/// Wrapped session.
|
||||
session: Arc<EncryptionSession>,
|
||||
/// Session Id.
|
||||
session_id: SessionId,
|
||||
/// Cluster data reference.
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
|
||||
/// Decryption session implementation, which removes session from cluster on drop.
|
||||
pub struct DecryptionSessionWrapper {
|
||||
/// Wrapped session.
|
||||
session: Arc<DecryptionSession>,
|
||||
/// Session Id.
|
||||
session_id: DecryptionSessionId,
|
||||
/// Cluster data reference.
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
|
||||
/// Signing session implementation, which removes session from cluster on drop.
|
||||
pub struct SigningSessionWrapper {
|
||||
/// Wrapped session.
|
||||
session: Arc<SigningSession>,
|
||||
/// Session Id.
|
||||
session_id: SigningSessionId,
|
||||
/// Cluster data reference.
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
|
||||
impl ClusterSessions {
|
||||
/// Create new cluster sessions container.
|
||||
pub fn new(config: &ClusterConfiguration) -> Self {
|
||||
ClusterSessions {
|
||||
self_node_id: config.self_key_pair.public().clone(),
|
||||
nodes: config.nodes.keys().cloned().collect(),
|
||||
acl_storage: config.acl_storage.clone(),
|
||||
key_storage: config.key_storage.clone(),
|
||||
generation_sessions: ClusterSessionsContainer::new(),
|
||||
encryption_sessions: ClusterSessionsContainer::new(),
|
||||
decryption_sessions: ClusterSessionsContainer::new(),
|
||||
signing_sessions: ClusterSessionsContainer::new(),
|
||||
make_faulty_generation_sessions: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn make_faulty_generation_sessions(&self) {
|
||||
self.make_faulty_generation_sessions.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Create new generation session.
|
||||
pub fn new_generation_session(&self, master: NodeId, session_id: SessionId, cluster: Arc<ClusterView>) -> Result<Arc<GenerationSessionImpl>, Error> {
|
||||
// check that there's no finished encryption session with the same id
|
||||
if self.key_storage.contains(&session_id) {
|
||||
return Err(Error::DuplicateSessionId);
|
||||
}
|
||||
// communicating to all other nodes is crucial for encryption session
|
||||
// => check that we have connections to all cluster nodes
|
||||
if self.nodes.iter().any(|n| !cluster.is_connected(n)) {
|
||||
return Err(Error::NodeDisconnected);
|
||||
}
|
||||
|
||||
// check that there's no active encryption session with the same id
|
||||
self.generation_sessions.insert(master, session_id, cluster.clone(), move ||
|
||||
Ok(GenerationSessionImpl::new(GenerationSessionParams {
|
||||
id: session_id.clone(),
|
||||
self_node_id: self.self_node_id.clone(),
|
||||
key_storage: Some(self.key_storage.clone()),
|
||||
cluster: cluster,
|
||||
})))
|
||||
.map(|session| {
|
||||
if self.make_faulty_generation_sessions.load(Ordering::Relaxed) {
|
||||
session.simulate_faulty_behaviour();
|
||||
}
|
||||
session
|
||||
})
|
||||
}
|
||||
|
||||
/// Send generation session error.
|
||||
pub fn respond_with_generation_error(&self, session_id: &SessionId, error: message::SessionError) {
|
||||
self.generation_sessions.sessions.read().get(session_id)
|
||||
.map(|s| {
|
||||
// error in generation session is considered fatal
|
||||
// => broadcast error
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
let _ = s.cluster_view.broadcast(Message::Generation(GenerationMessage::SessionError(error)));
|
||||
});
|
||||
}
|
||||
|
||||
/// Create new encryption session.
|
||||
pub fn new_encryption_session(&self, master: NodeId, session_id: SessionId, cluster: Arc<ClusterView>) -> Result<Arc<EncryptionSessionImpl>, Error> {
|
||||
let encrypted_data = self.read_key_share(&session_id, &cluster)?;
|
||||
self.encryption_sessions.insert(master, session_id, cluster.clone(), move || EncryptionSessionImpl::new(EncryptionSessionParams {
|
||||
id: session_id.clone(),
|
||||
self_node_id: self.self_node_id.clone(),
|
||||
encrypted_data: encrypted_data,
|
||||
key_storage: self.key_storage.clone(),
|
||||
cluster: cluster,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Send encryption session error.
|
||||
pub fn respond_with_encryption_error(&self, session_id: &SessionId, error: message::EncryptionSessionError) {
|
||||
self.encryption_sessions.sessions.read().get(session_id)
|
||||
.map(|s| {
|
||||
// error in encryption session is considered fatal
|
||||
// => broadcast error
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
let _ = s.cluster_view.broadcast(Message::Encryption(EncryptionMessage::EncryptionSessionError(error)));
|
||||
});
|
||||
}
|
||||
|
||||
/// Create new decryption session.
|
||||
pub fn new_decryption_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, cluster: Arc<ClusterView>, requester_signature: Option<Signature>) -> Result<Arc<DecryptionSessionImpl>, Error> {
|
||||
let session_id = DecryptionSessionId::new(session_id, sub_session_id);
|
||||
let encrypted_data = self.read_key_share(&session_id.id, &cluster)?;
|
||||
|
||||
self.decryption_sessions.insert(master, session_id.clone(), cluster.clone(), move || DecryptionSessionImpl::new(DecryptionSessionParams {
|
||||
meta: SessionMeta {
|
||||
id: session_id.id,
|
||||
self_node_id: self.self_node_id.clone(),
|
||||
master_node_id: master,
|
||||
threshold: encrypted_data.threshold,
|
||||
},
|
||||
access_key: session_id.access_key,
|
||||
key_share: encrypted_data,
|
||||
acl_storage: self.acl_storage.clone(),
|
||||
cluster: cluster,
|
||||
}, requester_signature))
|
||||
}
|
||||
|
||||
/// Send decryption session error.
|
||||
pub fn respond_with_decryption_error(&self, session_id: &SessionId, sub_session_id: &Secret, to: &NodeId, error: message::DecryptionSessionError) {
|
||||
let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone());
|
||||
self.decryption_sessions.sessions.read().get(&session_id)
|
||||
.map(|s| {
|
||||
// error in decryption session is non-fatal, if occurs on slave node
|
||||
// => either respond with error
|
||||
// => or broadcast error
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
if s.master == self.self_node_id {
|
||||
let _ = s.cluster_view.broadcast(Message::Decryption(DecryptionMessage::DecryptionSessionError(error)));
|
||||
} else {
|
||||
let _ = s.cluster_view.send(to, Message::Decryption(DecryptionMessage::DecryptionSessionError(error)));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Create new signing session.
|
||||
pub fn new_signing_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, cluster: Arc<ClusterView>, requester_signature: Option<Signature>) -> Result<Arc<SigningSessionImpl>, Error> {
|
||||
let session_id = SigningSessionId::new(session_id, sub_session_id);
|
||||
let encrypted_data = self.read_key_share(&session_id.id, &cluster)?;
|
||||
|
||||
self.signing_sessions.insert(master, session_id.clone(), cluster.clone(), move || SigningSessionImpl::new(SigningSessionParams {
|
||||
meta: SessionMeta {
|
||||
id: session_id.id,
|
||||
self_node_id: self.self_node_id.clone(),
|
||||
master_node_id: master,
|
||||
threshold: encrypted_data.threshold,
|
||||
},
|
||||
access_key: session_id.access_key,
|
||||
key_share: encrypted_data,
|
||||
acl_storage: self.acl_storage.clone(),
|
||||
cluster: cluster,
|
||||
}, requester_signature))
|
||||
}
|
||||
|
||||
/// Send signing session error.
|
||||
pub fn respond_with_signing_error(&self, session_id: &SessionId, sub_session_id: &Secret, to: &NodeId, error: message::SigningSessionError) {
|
||||
let session_id = SigningSessionId::new(session_id.clone(), sub_session_id.clone());
|
||||
self.signing_sessions.sessions.read().get(&session_id)
|
||||
.map(|s| {
|
||||
// error in signing session is non-fatal, if occurs on slave node
|
||||
// => either respond with error
|
||||
// => or broadcast error
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
if s.master == self.self_node_id {
|
||||
let _ = s.cluster_view.broadcast(Message::Signing(SigningMessage::SigningSessionError(error)));
|
||||
} else {
|
||||
let _ = s.cluster_view.send(to, Message::Signing(SigningMessage::SigningSessionError(error)));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Stop sessions that are stalling.
|
||||
pub fn stop_stalled_sessions(&self) {
|
||||
self.generation_sessions.stop_stalled_sessions();
|
||||
self.encryption_sessions.stop_stalled_sessions();
|
||||
self.decryption_sessions.stop_stalled_sessions();
|
||||
self.signing_sessions.stop_stalled_sessions();
|
||||
}
|
||||
|
||||
/// When connection to node is lost.
|
||||
pub fn on_connection_timeout(&self, node_id: &NodeId) {
|
||||
self.generation_sessions.on_connection_timeout(node_id);
|
||||
self.encryption_sessions.on_connection_timeout(node_id);
|
||||
self.decryption_sessions.on_connection_timeout(node_id);
|
||||
self.signing_sessions.on_connection_timeout(node_id);
|
||||
}
|
||||
|
||||
/// Read key share && remove disconnected nodes.
|
||||
fn read_key_share(&self, key_id: &SessionId, cluster: &Arc<ClusterView>) -> Result<DocumentKeyShare, Error> {
|
||||
let mut encrypted_data = self.key_storage.get(key_id).map_err(|e| Error::KeyStorage(e.into()))?;
|
||||
|
||||
// some of nodes, which were encrypting secret may be down
|
||||
// => do not use these in session
|
||||
let disconnected_nodes: BTreeSet<_> = encrypted_data.id_numbers.keys().cloned().collect();
|
||||
for disconnected_node in disconnected_nodes.difference(&cluster.nodes()) {
|
||||
encrypted_data.id_numbers.remove(&disconnected_node);
|
||||
}
|
||||
Ok(encrypted_data)
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: ClusterSession {
|
||||
pub fn new() -> Self {
|
||||
ClusterSessionsContainer {
|
||||
sessions: RwLock::new(BTreeMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(&self, session_id: &K) -> Option<Arc<V>> {
|
||||
self.sessions.read().get(session_id).map(|s| s.session.clone())
|
||||
}
|
||||
|
||||
pub fn insert<F: FnOnce() -> Result<V, Error>>(&self, master: NodeId, session_id: K, cluster: Arc<ClusterView>, session: F) -> Result<Arc<V>, Error> {
|
||||
let mut sessions = self.sessions.write();
|
||||
if sessions.contains_key(&session_id) {
|
||||
return Err(Error::DuplicateSessionId);
|
||||
}
|
||||
|
||||
let session = Arc::new(session()?);
|
||||
let queued_session = QueuedSession {
|
||||
master: master,
|
||||
cluster_view: cluster,
|
||||
last_message_time: time::Instant::now(),
|
||||
session: session.clone(),
|
||||
queue: VecDeque::new(),
|
||||
};
|
||||
sessions.insert(session_id, queued_session);
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
pub fn remove(&self, session_id: &K) {
|
||||
self.sessions.write().remove(session_id);
|
||||
}
|
||||
|
||||
pub fn enqueue_message(&self, session_id: &K, sender: NodeId, message: M, is_queued_message: bool) {
|
||||
self.sessions.write().get_mut(session_id)
|
||||
.map(|session| if is_queued_message { session.queue.push_front((sender, message)) }
|
||||
else { session.queue.push_back((sender, message)) });
|
||||
}
|
||||
|
||||
pub fn dequeue_message(&self, session_id: &K) -> Option<(NodeId, M)> {
|
||||
self.sessions.write().get_mut(session_id)
|
||||
.and_then(|session| session.queue.pop_front())
|
||||
}
|
||||
|
||||
pub fn stop_stalled_sessions(&self) {
|
||||
let mut sessions = self.sessions.write();
|
||||
for sid in sessions.keys().cloned().collect::<Vec<_>>() {
|
||||
let remove_session = {
|
||||
let session = sessions.get(&sid).expect("enumerating only existing sessions; qed");
|
||||
if time::Instant::now() - session.last_message_time > time::Duration::from_secs(SESSION_TIMEOUT_INTERVAL) {
|
||||
session.session.on_session_timeout();
|
||||
session.session.is_finished()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if remove_session {
|
||||
sessions.remove(&sid);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn on_connection_timeout(&self, node_id: &NodeId) {
|
||||
let mut sessions = self.sessions.write();
|
||||
for sid in sessions.keys().cloned().collect::<Vec<_>>() {
|
||||
let remove_session = {
|
||||
let session = sessions.get(&sid).expect("enumerating only existing sessions; qed");
|
||||
session.session.on_node_timeout(node_id);
|
||||
session.session.is_finished()
|
||||
};
|
||||
if remove_session {
|
||||
sessions.remove(&sid);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl GenerationSessionWrapper {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionId, session: Arc<GenerationSession>) -> Arc<Self> {
|
||||
Arc::new(GenerationSessionWrapper {
|
||||
session: session,
|
||||
session_id: session_id,
|
||||
cluster: cluster,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl GenerationSession for GenerationSessionWrapper {
|
||||
fn state(&self) -> GenerationSessionState {
|
||||
self.session.state()
|
||||
}
|
||||
|
||||
fn wait(&self, timeout: Option<time::Duration>) -> Result<Public, Error> {
|
||||
self.session.wait(timeout)
|
||||
}
|
||||
|
||||
fn joint_public_and_secret(&self) -> Option<Result<(Public, Secret), Error>> {
|
||||
self.session.joint_public_and_secret()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for GenerationSessionWrapper {
|
||||
fn drop(&mut self) {
|
||||
if let Some(cluster) = self.cluster.upgrade() {
|
||||
cluster.sessions().generation_sessions.remove(&self.session_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EncryptionSessionWrapper {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionId, session: Arc<EncryptionSession>) -> Arc<Self> {
|
||||
Arc::new(EncryptionSessionWrapper {
|
||||
session: session,
|
||||
session_id: session_id,
|
||||
cluster: cluster,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl EncryptionSession for EncryptionSessionWrapper {
|
||||
fn state(&self) -> EncryptionSessionState {
|
||||
self.session.state()
|
||||
}
|
||||
|
||||
fn wait(&self, timeout: Option<time::Duration>) -> Result<(), Error> {
|
||||
self.session.wait(timeout)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for EncryptionSessionWrapper {
|
||||
fn drop(&mut self) {
|
||||
if let Some(cluster) = self.cluster.upgrade() {
|
||||
cluster.sessions().encryption_sessions.remove(&self.session_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DecryptionSessionWrapper {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: DecryptionSessionId, session: Arc<DecryptionSession>) -> Arc<Self> {
|
||||
Arc::new(DecryptionSessionWrapper {
|
||||
session: session,
|
||||
session_id: session_id,
|
||||
cluster: cluster,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl DecryptionSession for DecryptionSessionWrapper {
|
||||
fn wait(&self) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||
self.session.wait()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DecryptionSessionWrapper {
|
||||
fn drop(&mut self) {
|
||||
if let Some(cluster) = self.cluster.upgrade() {
|
||||
cluster.sessions().decryption_sessions.remove(&self.session_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SigningSessionWrapper {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: SigningSessionId, session: Arc<SigningSession>) -> Arc<Self> {
|
||||
Arc::new(SigningSessionWrapper {
|
||||
session: session,
|
||||
session_id: session_id,
|
||||
cluster: cluster,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl SigningSession for SigningSessionWrapper {
|
||||
fn wait(&self) -> Result<(Secret, Secret), Error> {
|
||||
self.session.wait()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SigningSessionWrapper {
|
||||
fn drop(&mut self) {
|
||||
if let Some(cluster) = self.cluster.upgrade() {
|
||||
cluster.sessions().signing_sessions.remove(&self.session_id);
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1256
secret_store/src/key_server_cluster/generation_session.rs
Normal file
1256
secret_store/src/key_server_cluster/generation_session.rs
Normal file
File diff suppressed because it is too large
Load Diff
@ -25,7 +25,8 @@ use ethkey::{Public, Secret, KeyPair};
|
||||
use ethkey::math::curve_order;
|
||||
use util::{H256, U256};
|
||||
use key_server_cluster::Error;
|
||||
use key_server_cluster::message::{Message, ClusterMessage, EncryptionMessage, DecryptionMessage};
|
||||
use key_server_cluster::message::{Message, ClusterMessage, GenerationMessage, EncryptionMessage,
|
||||
DecryptionMessage, SigningMessage};
|
||||
|
||||
/// Size of serialized header.
|
||||
pub const MESSAGE_HEADER_SIZE: usize = 4;
|
||||
@ -67,20 +68,30 @@ pub fn serialize_message(message: Message) -> Result<SerializedMessage, Error> {
|
||||
Message::Cluster(ClusterMessage::KeepAlive(payload)) => (3, serde_json::to_vec(&payload)),
|
||||
Message::Cluster(ClusterMessage::KeepAliveResponse(payload)) => (4, serde_json::to_vec(&payload)),
|
||||
|
||||
Message::Encryption(EncryptionMessage::InitializeSession(payload)) => (50, serde_json::to_vec(&payload)),
|
||||
Message::Encryption(EncryptionMessage::ConfirmInitialization(payload)) => (51, serde_json::to_vec(&payload)),
|
||||
Message::Encryption(EncryptionMessage::CompleteInitialization(payload)) => (52, serde_json::to_vec(&payload)),
|
||||
Message::Encryption(EncryptionMessage::KeysDissemination(payload)) => (53, serde_json::to_vec(&payload)),
|
||||
Message::Encryption(EncryptionMessage::PublicKeyShare(payload)) => (54, serde_json::to_vec(&payload)),
|
||||
Message::Encryption(EncryptionMessage::SessionError(payload)) => (55, serde_json::to_vec(&payload)),
|
||||
Message::Encryption(EncryptionMessage::SessionCompleted(payload)) => (56, serde_json::to_vec(&payload)),
|
||||
Message::Generation(GenerationMessage::InitializeSession(payload)) => (50, serde_json::to_vec(&payload)),
|
||||
Message::Generation(GenerationMessage::ConfirmInitialization(payload)) => (51, serde_json::to_vec(&payload)),
|
||||
Message::Generation(GenerationMessage::CompleteInitialization(payload)) => (52, serde_json::to_vec(&payload)),
|
||||
Message::Generation(GenerationMessage::KeysDissemination(payload)) => (53, serde_json::to_vec(&payload)),
|
||||
Message::Generation(GenerationMessage::PublicKeyShare(payload)) => (54, serde_json::to_vec(&payload)),
|
||||
Message::Generation(GenerationMessage::SessionError(payload)) => (55, serde_json::to_vec(&payload)),
|
||||
Message::Generation(GenerationMessage::SessionCompleted(payload)) => (56, serde_json::to_vec(&payload)),
|
||||
|
||||
Message::Decryption(DecryptionMessage::InitializeDecryptionSession(payload)) => (100, serde_json::to_vec(&payload)),
|
||||
Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(payload)) => (101, serde_json::to_vec(&payload)),
|
||||
Message::Decryption(DecryptionMessage::RequestPartialDecryption(payload)) => (102, serde_json::to_vec(&payload)),
|
||||
Message::Decryption(DecryptionMessage::PartialDecryption(payload)) => (103, serde_json::to_vec(&payload)),
|
||||
Message::Decryption(DecryptionMessage::DecryptionSessionError(payload)) => (104, serde_json::to_vec(&payload)),
|
||||
Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(payload)) => (105, serde_json::to_vec(&payload)),
|
||||
Message::Encryption(EncryptionMessage::InitializeEncryptionSession(payload)) => (100, serde_json::to_vec(&payload)),
|
||||
Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization(payload)) => (101, serde_json::to_vec(&payload)),
|
||||
Message::Encryption(EncryptionMessage::EncryptionSessionError(payload)) => (102, serde_json::to_vec(&payload)),
|
||||
|
||||
Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(payload)) => (150, serde_json::to_vec(&payload)),
|
||||
Message::Decryption(DecryptionMessage::RequestPartialDecryption(payload)) => (151, serde_json::to_vec(&payload)),
|
||||
Message::Decryption(DecryptionMessage::PartialDecryption(payload)) => (152, serde_json::to_vec(&payload)),
|
||||
Message::Decryption(DecryptionMessage::DecryptionSessionError(payload)) => (153, serde_json::to_vec(&payload)),
|
||||
Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(payload)) => (154, serde_json::to_vec(&payload)),
|
||||
|
||||
Message::Signing(SigningMessage::SigningConsensusMessage(payload)) => (200, serde_json::to_vec(&payload)),
|
||||
Message::Signing(SigningMessage::SigningGenerationMessage(payload)) => (201, serde_json::to_vec(&payload)),
|
||||
Message::Signing(SigningMessage::RequestPartialSignature(payload)) => (202, serde_json::to_vec(&payload)),
|
||||
Message::Signing(SigningMessage::PartialSignature(payload)) => (203, serde_json::to_vec(&payload)),
|
||||
Message::Signing(SigningMessage::SigningSessionError(payload)) => (204, serde_json::to_vec(&payload)),
|
||||
Message::Signing(SigningMessage::SigningSessionCompleted(payload)) => (205, serde_json::to_vec(&payload)),
|
||||
};
|
||||
|
||||
let payload = payload.map_err(|err| Error::Serde(err.to_string()))?;
|
||||
@ -99,20 +110,30 @@ pub fn deserialize_message(header: &MessageHeader, payload: Vec<u8>) -> Result<M
|
||||
3 => Message::Cluster(ClusterMessage::KeepAlive(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
4 => Message::Cluster(ClusterMessage::KeepAliveResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
|
||||
50 => Message::Encryption(EncryptionMessage::InitializeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
51 => Message::Encryption(EncryptionMessage::ConfirmInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
52 => Message::Encryption(EncryptionMessage::CompleteInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
53 => Message::Encryption(EncryptionMessage::KeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
54 => Message::Encryption(EncryptionMessage::PublicKeyShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
55 => Message::Encryption(EncryptionMessage::SessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
56 => Message::Encryption(EncryptionMessage::SessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
50 => Message::Generation(GenerationMessage::InitializeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
51 => Message::Generation(GenerationMessage::ConfirmInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
52 => Message::Generation(GenerationMessage::CompleteInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
53 => Message::Generation(GenerationMessage::KeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
54 => Message::Generation(GenerationMessage::PublicKeyShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
55 => Message::Generation(GenerationMessage::SessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
56 => Message::Generation(GenerationMessage::SessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
|
||||
100 => Message::Decryption(DecryptionMessage::InitializeDecryptionSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
101 => Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
102 => Message::Decryption(DecryptionMessage::RequestPartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
103 => Message::Decryption(DecryptionMessage::PartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
104 => Message::Decryption(DecryptionMessage::DecryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
105 => Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
100 => Message::Encryption(EncryptionMessage::InitializeEncryptionSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
101 => Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
102 => Message::Encryption(EncryptionMessage::EncryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
|
||||
150 => Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
151 => Message::Decryption(DecryptionMessage::RequestPartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
152 => Message::Decryption(DecryptionMessage::PartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
153 => Message::Decryption(DecryptionMessage::DecryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
154 => Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
|
||||
200 => Message::Signing(SigningMessage::SigningConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
201 => Message::Signing(SigningMessage::SigningGenerationMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
202 => Message::Signing(SigningMessage::RequestPartialSignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
203 => Message::Signing(SigningMessage::PartialSignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
204 => Message::Signing(SigningMessage::SigningSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
205 => Message::Signing(SigningMessage::SigningSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
|
||||
_ => return Err(Error::Serde(format!("unknown message type {}", header.kind))),
|
||||
})
|
||||
|
756
secret_store/src/key_server_cluster/jobs/consensus_session.rs
Normal file
756
secret_store/src/key_server_cluster/jobs/consensus_session.rs
Normal file
@ -0,0 +1,756 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::BTreeSet;
|
||||
use std::sync::Arc;
|
||||
use ethkey::{Public, Signature, recover};
|
||||
use key_server_cluster::{Error, NodeId, SessionMeta, AclStorage};
|
||||
use key_server_cluster::message::ConsensusMessage;
|
||||
use key_server_cluster::jobs::job_session::{JobSession, JobSessionState, JobTransport, JobExecutor};
|
||||
use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
||||
|
||||
/// Consensus session state.
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ConsensusSessionState {
|
||||
/// Every node starts in this state.
|
||||
WaitingForInitialization,
|
||||
/// Consensus group is establishing.
|
||||
EstablishingConsensus,
|
||||
/// Consensus group is established.
|
||||
/// Master node can start jobs dissemination.
|
||||
/// Slave node waits for partial job requests.
|
||||
ConsensusEstablished,
|
||||
/// Master node waits for partial jobs responses.
|
||||
WaitingForPartialResults,
|
||||
/// Consensus session is completed successfully.
|
||||
/// Master node can call result() to get computation result.
|
||||
Finished,
|
||||
/// Consensus session has failed with error.
|
||||
Failed,
|
||||
}
|
||||
|
||||
/// Consensus session consists of following states:
|
||||
/// 1) consensus group is established
|
||||
/// 2) master node sends partial job requests to every member of consensus group
|
||||
/// 3) slave nodes are computing partial responses
|
||||
/// 4) master node computes result from partial responses
|
||||
pub struct ConsensusSession<ConsensusTransport: JobTransport<PartialJobRequest=Signature, PartialJobResponse=bool>, ComputationExecutor: JobExecutor, ComputationTransport: JobTransport<PartialJobRequest=ComputationExecutor::PartialJobRequest, PartialJobResponse=ComputationExecutor::PartialJobResponse>> {
|
||||
/// Current session state.
|
||||
state: ConsensusSessionState,
|
||||
/// Session metadata.
|
||||
meta: SessionMeta,
|
||||
/// Requester, for which consensus group has allowed access.
|
||||
requester: Option<Public>,
|
||||
/// Consensus establish job.
|
||||
consensus_job: JobSession<KeyAccessJob, ConsensusTransport>,
|
||||
/// Consensus group.
|
||||
consensus_group: BTreeSet<NodeId>,
|
||||
/// Computation job.
|
||||
computation_job: Option<JobSession<ComputationExecutor, ComputationTransport>>,
|
||||
}
|
||||
|
||||
/// Consensus session creation parameters.
|
||||
pub struct ConsensusSessionParams<ConsensusTransport: JobTransport<PartialJobRequest=Signature, PartialJobResponse=bool>> {
|
||||
/// Session metadata.
|
||||
pub meta: SessionMeta,
|
||||
/// ACL storage for access check.
|
||||
pub acl_storage: Arc<AclStorage>,
|
||||
/// Transport for consensus establish job.
|
||||
pub consensus_transport: ConsensusTransport,
|
||||
}
|
||||
|
||||
impl<ConsensusTransport, ComputationExecutor, ComputationTransport> ConsensusSession<ConsensusTransport, ComputationExecutor, ComputationTransport> where ConsensusTransport: JobTransport<PartialJobRequest=Signature, PartialJobResponse=bool>, ComputationExecutor: JobExecutor, ComputationTransport: JobTransport<PartialJobRequest=ComputationExecutor::PartialJobRequest, PartialJobResponse=ComputationExecutor::PartialJobResponse> {
|
||||
/// Create new consensus session on slave node.
|
||||
pub fn new_on_slave(params: ConsensusSessionParams<ConsensusTransport>) -> Result<Self, Error> {
|
||||
debug_assert!(params.meta.self_node_id != params.meta.master_node_id);
|
||||
Self::new(None, KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()), params)
|
||||
}
|
||||
|
||||
/// Create new consensus session on master node.
|
||||
pub fn new_on_master(params: ConsensusSessionParams<ConsensusTransport>, signature: Signature) -> Result<Self, Error> {
|
||||
debug_assert!(params.meta.self_node_id == params.meta.master_node_id);
|
||||
Self::new(Some(recover(&signature, ¶ms.meta.id)?),
|
||||
KeyAccessJob::new_on_master(params.meta.id.clone(), params.acl_storage.clone(), signature), params)
|
||||
}
|
||||
|
||||
/// Create new consensus session.
|
||||
fn new(requester: Option<Public>, consensus_job_executor: KeyAccessJob, params: ConsensusSessionParams<ConsensusTransport>) -> Result<Self, Error> {
|
||||
let consensus_job = JobSession::new(params.meta.clone(), consensus_job_executor, params.consensus_transport);
|
||||
debug_assert!(consensus_job.state() == JobSessionState::Inactive);
|
||||
|
||||
Ok(ConsensusSession {
|
||||
state: ConsensusSessionState::WaitingForInitialization,
|
||||
meta: params.meta,
|
||||
requester: requester,
|
||||
consensus_job: consensus_job,
|
||||
consensus_group: BTreeSet::new(),
|
||||
computation_job: None,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get consensus job reference.
|
||||
#[cfg(test)]
|
||||
pub fn consensus_job(&self) -> &JobSession<KeyAccessJob, ConsensusTransport> {
|
||||
&self.consensus_job
|
||||
}
|
||||
|
||||
/// Get computation job reference.
|
||||
#[cfg(test)]
|
||||
pub fn computation_job(&self) -> &JobSession<ComputationExecutor, ComputationTransport> {
|
||||
self.computation_job.as_ref()
|
||||
.expect("computation_job must only be called on master nodes")
|
||||
}
|
||||
|
||||
/// Get consensus session state.
|
||||
pub fn state(&self) -> ConsensusSessionState {
|
||||
self.state
|
||||
}
|
||||
|
||||
/// Get requester, for which consensus has been reached.
|
||||
pub fn requester(&self) -> Result<&Public, Error> {
|
||||
self.requester.as_ref().ok_or(Error::InvalidStateForRequest)
|
||||
}
|
||||
|
||||
/// Get computation result.
|
||||
pub fn result(&self) -> Result<ComputationExecutor::JobResponse, Error> {
|
||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
||||
if self.state != ConsensusSessionState::Finished {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
self.computation_job.as_ref()
|
||||
.expect("we are on master node in finished state; computation_job is set on master node during initialization; qed")
|
||||
.result()
|
||||
}
|
||||
|
||||
/// Initialize session on master node.
|
||||
pub fn initialize(&mut self, nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
||||
let initialization_result = self.consensus_job.initialize(nodes);
|
||||
self.state = ConsensusSessionState::EstablishingConsensus;
|
||||
self.process_result(initialization_result)
|
||||
}
|
||||
|
||||
/// Process consensus message.
|
||||
pub fn on_consensus_message(&mut self, sender: &NodeId, message: &ConsensusMessage) -> Result<(), Error> {
|
||||
let consensus_result = match message {
|
||||
&ConsensusMessage::InitializeConsensusSession(ref message) => {
|
||||
let signature = message.requestor_signature.clone().into();
|
||||
self.requester = Some(recover(&signature, &self.meta.id)?);
|
||||
self.consensus_job.on_partial_request(sender, signature)
|
||||
},
|
||||
&ConsensusMessage::ConfirmConsensusInitialization(ref message) =>
|
||||
self.consensus_job.on_partial_response(sender, message.is_confirmed),
|
||||
};
|
||||
self.process_result(consensus_result)
|
||||
}
|
||||
|
||||
/// Select nodes for processing partial requests.
|
||||
pub fn select_consensus_group(&mut self) -> Result<&BTreeSet<NodeId>, Error> {
|
||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
||||
if self.state != ConsensusSessionState::ConsensusEstablished {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
if self.consensus_group.is_empty() {
|
||||
let consensus_group = self.consensus_job.result()?;
|
||||
let is_self_in_consensus = consensus_group.contains(&self.meta.self_node_id);
|
||||
self.consensus_group = consensus_group.into_iter().take(self.meta.threshold + 1).collect();
|
||||
|
||||
if is_self_in_consensus {
|
||||
self.consensus_group.remove(&self.meta.master_node_id);
|
||||
self.consensus_group.insert(self.meta.master_node_id.clone());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(&self.consensus_group)
|
||||
}
|
||||
|
||||
/// Disseminate jobs from master node.
|
||||
pub fn disseminate_jobs(&mut self, executor: ComputationExecutor, transport: ComputationTransport) -> Result<(), Error> {
|
||||
let consensus_group = self.select_consensus_group()?.clone();
|
||||
self.consensus_group.clear();
|
||||
|
||||
let mut computation_job = JobSession::new(self.meta.clone(), executor, transport);
|
||||
let computation_result = computation_job.initialize(consensus_group);
|
||||
self.computation_job = Some(computation_job);
|
||||
self.state = ConsensusSessionState::WaitingForPartialResults;
|
||||
self.process_result(computation_result)
|
||||
}
|
||||
|
||||
/// Process job request on slave node.
|
||||
pub fn on_job_request(&mut self, node: &NodeId, request: ComputationExecutor::PartialJobRequest, executor: ComputationExecutor, transport: ComputationTransport) -> Result<(), Error> {
|
||||
if &self.meta.master_node_id != node {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
if self.state != ConsensusSessionState::ConsensusEstablished {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
JobSession::new(self.meta.clone(), executor, transport).on_partial_request(node, request)
|
||||
}
|
||||
|
||||
/// Process job response on slave node.
|
||||
pub fn on_job_response(&mut self, node: &NodeId, response: ComputationExecutor::PartialJobResponse) -> Result<(), Error> {
|
||||
if self.state != ConsensusSessionState::WaitingForPartialResults {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
let computation_result = self.computation_job.as_mut()
|
||||
.expect("WaitingForPartialResults is only set when computation_job is created; qed")
|
||||
.on_partial_response(node, response);
|
||||
self.process_result(computation_result)
|
||||
}
|
||||
|
||||
/// When session is completed on slave node.
|
||||
pub fn on_session_completed(&mut self, node: &NodeId) -> Result<(), Error> {
|
||||
if node != &self.meta.master_node_id {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
if self.state != ConsensusSessionState::ConsensusEstablished {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
self.state = ConsensusSessionState::Finished;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When error is received from node.
|
||||
pub fn on_node_error(&mut self, node: &NodeId) -> Result<bool, Error> {
|
||||
let is_self_master = self.meta.master_node_id == self.meta.self_node_id;
|
||||
let is_node_master = self.meta.master_node_id == *node;
|
||||
let (is_restart_needed, timeout_result) = match self.state {
|
||||
ConsensusSessionState::WaitingForInitialization if is_self_master => {
|
||||
// it is strange to receive error before session is initialized && slave doesn't know access_key
|
||||
// => ignore this error for now
|
||||
(false, Ok(()))
|
||||
}
|
||||
ConsensusSessionState::WaitingForInitialization if is_node_master => {
|
||||
// can not establish consensus
|
||||
// => fatal error
|
||||
self.state = ConsensusSessionState::Failed;
|
||||
(false, Err(Error::ConsensusUnreachable))
|
||||
},
|
||||
ConsensusSessionState::EstablishingConsensus => {
|
||||
debug_assert!(is_self_master);
|
||||
|
||||
// consensus still can be established
|
||||
// => try to live without this node
|
||||
(false, self.consensus_job.on_node_error(node))
|
||||
},
|
||||
ConsensusSessionState::ConsensusEstablished => {
|
||||
// we could try to continue without this node, if enough nodes left
|
||||
(false, self.consensus_job.on_node_error(node))
|
||||
},
|
||||
ConsensusSessionState::WaitingForPartialResults => {
|
||||
// check if *current* computation job can continue without this node
|
||||
let is_computation_node = self.computation_job.as_mut()
|
||||
.expect("WaitingForPartialResults state is only set when computation_job is created; qed")
|
||||
.on_node_error(node)
|
||||
.is_err();
|
||||
if !is_computation_node {
|
||||
// it is not used by current computation job
|
||||
// => no restart required
|
||||
(false, Ok(()))
|
||||
} else {
|
||||
// it is used by current computation job
|
||||
// => restart is required if there are still enough nodes
|
||||
self.consensus_group.clear();
|
||||
self.state = ConsensusSessionState::EstablishingConsensus;
|
||||
|
||||
let consensus_result = self.consensus_job.on_node_error(node);
|
||||
let is_consensus_established = self.consensus_job.state() == JobSessionState::Finished;
|
||||
(is_consensus_established, consensus_result)
|
||||
}
|
||||
},
|
||||
// in all other cases - just ignore error
|
||||
ConsensusSessionState::WaitingForInitialization | ConsensusSessionState::Failed | ConsensusSessionState::Finished => (false, Ok(())),
|
||||
};
|
||||
self.process_result(timeout_result)?;
|
||||
Ok(is_restart_needed)
|
||||
}
|
||||
|
||||
/// When session is timeouted.
|
||||
pub fn on_session_timeout(&mut self) -> Result<bool, Error> {
|
||||
match self.state {
|
||||
// if we are waiting for results from slaves, there is a chance to send request to other nodes subset => fall through
|
||||
ConsensusSessionState::WaitingForPartialResults => (),
|
||||
// in some states this error is fatal
|
||||
ConsensusSessionState::WaitingForInitialization | ConsensusSessionState::EstablishingConsensus | ConsensusSessionState::ConsensusEstablished => {
|
||||
let _ = self.consensus_job.on_session_timeout();
|
||||
|
||||
self.consensus_group.clear();
|
||||
self.state = ConsensusSessionState::EstablishingConsensus;
|
||||
return self.process_result(Err(Error::ConsensusUnreachable)).map(|_| unreachable!());
|
||||
},
|
||||
// in all other cases - just ignore error
|
||||
ConsensusSessionState::Finished | ConsensusSessionState::Failed => return Ok(false),
|
||||
};
|
||||
|
||||
let timeouted_nodes = self.computation_job.as_ref()
|
||||
.expect("WaitingForPartialResults state is only set when computation_job is created; qed")
|
||||
.requests()
|
||||
.clone();
|
||||
assert!(!timeouted_nodes.is_empty()); // timeout should not ever happen if no requests are active && we are waiting for responses
|
||||
|
||||
self.consensus_group.clear();
|
||||
for timeouted_node in timeouted_nodes {
|
||||
let timeout_result = self.consensus_job.on_node_error(&timeouted_node);
|
||||
self.state = ConsensusSessionState::EstablishingConsensus;
|
||||
self.process_result(timeout_result)?;
|
||||
}
|
||||
|
||||
Ok(self.state == ConsensusSessionState::ConsensusEstablished)
|
||||
}
|
||||
|
||||
/// Process result of job.
|
||||
fn process_result(&mut self, result: Result<(), Error>) -> Result<(), Error> {
|
||||
match self.state {
|
||||
ConsensusSessionState::WaitingForInitialization | ConsensusSessionState::EstablishingConsensus | ConsensusSessionState::ConsensusEstablished => match self.consensus_job.state() {
|
||||
JobSessionState::Finished => self.state = ConsensusSessionState::ConsensusEstablished,
|
||||
JobSessionState::Failed => self.state = ConsensusSessionState::Failed,
|
||||
_ => (),
|
||||
},
|
||||
ConsensusSessionState::WaitingForPartialResults => match self.computation_job.as_ref()
|
||||
.expect("WaitingForPartialResults state is only set when computation_job is created; qed")
|
||||
.state() {
|
||||
JobSessionState::Finished => self.state = ConsensusSessionState::Finished,
|
||||
JobSessionState::Failed => self.state = ConsensusSessionState::Failed,
|
||||
_ => (),
|
||||
},
|
||||
_ => (),
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use ethkey::{Signature, KeyPair, Random, Generator, sign};
|
||||
use key_server_cluster::{Error, NodeId, SessionId, DummyAclStorage};
|
||||
use key_server_cluster::message::{ConsensusMessage, InitializeConsensusSession, ConfirmConsensusInitialization};
|
||||
use key_server_cluster::jobs::job_session::tests::{make_master_session_meta, make_slave_session_meta, SquaredSumJobExecutor, DummyJobTransport};
|
||||
use super::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState};
|
||||
|
||||
type SquaredSumConsensusSession = ConsensusSession<DummyJobTransport<Signature, bool>, SquaredSumJobExecutor, DummyJobTransport<u32, u32>>;
|
||||
|
||||
fn make_master_consensus_session(threshold: usize, requester: Option<KeyPair>, acl_storage: Option<DummyAclStorage>) -> SquaredSumConsensusSession {
|
||||
let secret = requester.map(|kp| kp.secret().clone()).unwrap_or(Random.generate().unwrap().secret().clone());
|
||||
SquaredSumConsensusSession::new_on_master(ConsensusSessionParams {
|
||||
meta: make_master_session_meta(threshold),
|
||||
acl_storage: Arc::new(acl_storage.unwrap_or(DummyAclStorage::default())),
|
||||
consensus_transport: DummyJobTransport::default(),
|
||||
}, sign(&secret, &SessionId::default()).unwrap()).unwrap()
|
||||
}
|
||||
|
||||
fn make_slave_consensus_session(threshold: usize, acl_storage: Option<DummyAclStorage>) -> SquaredSumConsensusSession {
|
||||
SquaredSumConsensusSession::new_on_slave(ConsensusSessionParams {
|
||||
meta: make_slave_session_meta(threshold),
|
||||
acl_storage: Arc::new(acl_storage.unwrap_or(DummyAclStorage::default())),
|
||||
consensus_transport: DummyJobTransport::default(),
|
||||
}).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_consensus_is_not_reached_when_initializes_with_non_zero_threshold() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_consensus_is_reached_when_initializes_with_zero_threshold() {
|
||||
let mut session = make_master_consensus_session(0, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_consensus_is_not_reached_when_initializes_with_zero_threshold_and_master_rejects() {
|
||||
let requester = Random.generate().unwrap();
|
||||
let acl_storage = DummyAclStorage::default();
|
||||
acl_storage.prohibit(requester.public().clone(), SessionId::default());
|
||||
|
||||
let mut session = make_master_consensus_session(0, Some(requester), Some(acl_storage));
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_consensus_is_failed_by_master_node() {
|
||||
let requester = Random.generate().unwrap();
|
||||
let acl_storage = DummyAclStorage::default();
|
||||
acl_storage.prohibit(requester.public().clone(), SessionId::default());
|
||||
|
||||
let mut session = make_master_consensus_session(1, Some(requester), Some(acl_storage));
|
||||
assert_eq!(session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap_err(), Error::ConsensusUnreachable);
|
||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_consensus_is_failed_by_slave_node() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
||||
assert_eq!(session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: false,
|
||||
})).unwrap_err(), Error::ConsensusUnreachable);
|
||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_job_dissemination_fails_if_consensus_is_not_reached() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
assert_eq!(session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_job_dissemination_selects_master_node_if_agreed() {
|
||||
let mut session = make_master_consensus_session(0, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::Finished);
|
||||
assert!(session.computation_job().responses().contains_key(&NodeId::from(1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_job_dissemination_does_not_select_master_node_if_rejected() {
|
||||
let requester = Random.generate().unwrap();
|
||||
let acl_storage = DummyAclStorage::default();
|
||||
acl_storage.prohibit(requester.public().clone(), SessionId::default());
|
||||
|
||||
let mut session = make_master_consensus_session(0, Some(requester), Some(acl_storage));
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
||||
assert!(!session.computation_job().responses().contains_key(&NodeId::from(1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_computation_request_is_rejected_when_received_by_master_node() {
|
||||
let mut session = make_master_consensus_session(0, None, None);
|
||||
assert_eq!(session.on_job_request(&NodeId::from(2), 2, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidMessage);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_computation_request_is_rejected_when_received_before_consensus_is_established() {
|
||||
let mut session = make_slave_consensus_session(0, None);
|
||||
assert_eq!(session.on_job_request(&NodeId::from(1), 2, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_computation_request_is_ignored_when_wrong() {
|
||||
let mut session = make_slave_consensus_session(0, None);
|
||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForInitialization);
|
||||
session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
||||
requestor_signature: sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||
})).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
assert_eq!(session.on_job_request(&NodeId::from(1), 20, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidMessage);
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_computation_request_is_processed_when_correct() {
|
||||
let mut session = make_slave_consensus_session(0, None);
|
||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForInitialization);
|
||||
session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
||||
requestor_signature: sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||
})).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
session.on_job_request(&NodeId::from(1), 2, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_computation_response_is_ignored_when_consensus_is_not_reached() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
assert_eq!(session.on_job_response(&NodeId::from(2), 4).unwrap_err(), Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consessus_session_completion_is_ignored_when_received_from_non_master_node() {
|
||||
let mut session = make_slave_consensus_session(0, None);
|
||||
assert_eq!(session.on_session_completed(&NodeId::from(3)).unwrap_err(), Error::InvalidMessage);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consessus_session_completion_is_ignored_when_consensus_is_not_established() {
|
||||
let mut session = make_slave_consensus_session(0, None);
|
||||
assert_eq!(session.on_session_completed(&NodeId::from(1)).unwrap_err(), Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consessus_session_completion_is_accepted() {
|
||||
let mut session = make_slave_consensus_session(0, None);
|
||||
session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
||||
requestor_signature: sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||
})).unwrap();
|
||||
session.on_session_completed(&NodeId::from(1)).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::Finished);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_continues_if_node_error_received_by_uninitialized_master() {
|
||||
let mut session = make_master_consensus_session(0, None, None);
|
||||
assert_eq!(session.on_node_error(&NodeId::from(2)), Ok(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_fails_if_node_error_received_by_uninitialized_slave_from_master() {
|
||||
let mut session = make_slave_consensus_session(0, None);
|
||||
assert_eq!(session.on_node_error(&NodeId::from(1)), Err(Error::ConsensusUnreachable));
|
||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_continues_if_node_error_received_by_master_during_establish_and_enough_nodes_left() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)].into_iter().collect()).unwrap();
|
||||
assert_eq!(session.on_node_error(&NodeId::from(2)), Ok(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_fails_if_node_error_received_by_master_during_establish_and_not_enough_nodes_left() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap();
|
||||
assert_eq!(session.on_node_error(&NodeId::from(2)), Err(Error::ConsensusUnreachable));
|
||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_continues_if_node2_error_received_by_master_after_consensus_established_and_enough_nodes_left() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)].into_iter().collect()).unwrap();
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
assert_eq!(session.on_node_error(&NodeId::from(2)), Ok(false));
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_continues_if_node3_error_received_by_master_after_consensus_established_and_enough_nodes_left() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)].into_iter().collect()).unwrap();
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
assert_eq!(session.on_node_error(&NodeId::from(3)), Ok(false));
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_fails_if_node_error_received_by_master_after_consensus_established_and_not_enough_nodes_left() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap();
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
assert_eq!(session.on_node_error(&NodeId::from(2)), Err(Error::ConsensusUnreachable));
|
||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_continues_if_node_error_received_from_slave_not_participating_in_computation() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3), NodeId::from(4)].into_iter().collect()).unwrap();
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
session.on_consensus_message(&NodeId::from(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||
assert_eq!(session.on_node_error(&NodeId::from(3)), Ok(false));
|
||||
assert_eq!(session.on_node_error(&NodeId::from(4)), Ok(false));
|
||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_restarts_if_node_error_received_from_slave_participating_in_computation_and_enough_nodes_left() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3), NodeId::from(4)].into_iter().collect()).unwrap();
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
||||
|
||||
session.on_consensus_message(&NodeId::from(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
assert_eq!(session.on_node_error(&NodeId::from(2)), Ok(true));
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
||||
|
||||
assert_eq!(session.on_node_error(&NodeId::from(3)), Ok(false));
|
||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_fails_if_node_error_received_from_slave_participating_in_computation_and_not_enough_nodes_left() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap();
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||
assert_eq!(session.on_node_error(&NodeId::from(2)), Err(Error::ConsensusUnreachable));
|
||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_fails_if_uninitialized_session_timeouts() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
assert_eq!(session.on_session_timeout(), Err(Error::ConsensusUnreachable));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_continues_if_session_timeouts_and_enough_nodes_left_for_computation() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3), NodeId::from(4)].into_iter().collect()).unwrap();
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
|
||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
||||
|
||||
session.on_consensus_message(&NodeId::from(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
assert_eq!(session.on_session_timeout(), Ok(true));
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
|
||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
||||
|
||||
assert_eq!(session.on_session_timeout(), Ok(false));
|
||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_continues_if_session_timeouts_and_not_enough_nodes_left_for_computation() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap();
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
||||
|
||||
assert_eq!(session.on_session_timeout(), Err(Error::ConsensusUnreachable));
|
||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn same_consensus_group_returned_after_second_selection() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3)].into_iter().collect()).unwrap();
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
session.on_consensus_message(&NodeId::from(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
|
||||
let consensus_group1 = session.select_consensus_group().unwrap().clone();
|
||||
let consensus_group2 = session.select_consensus_group().unwrap().clone();
|
||||
assert_eq!(consensus_group1, consensus_group2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_complete_2_of_4() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3), NodeId::from(3)].into_iter().collect()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
||||
session.on_job_response(&NodeId::from(2), 16).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::Finished);
|
||||
assert_eq!(session.result(), Ok(20));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_complete_2_of_4_after_restart() {
|
||||
let mut session = make_master_consensus_session(1, None, None);
|
||||
session.initialize(vec![NodeId::from(1), NodeId::from(2), NodeId::from(3), NodeId::from(4)].into_iter().collect()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
||||
session.on_consensus_message(&NodeId::from(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
|
||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
||||
|
||||
session.on_consensus_message(&NodeId::from(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
|
||||
assert_eq!(session.on_node_error(&NodeId::from(2)).unwrap(), true);
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
|
||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
||||
|
||||
assert_eq!(session.on_node_error(&NodeId::from(3)).unwrap(), false);
|
||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
||||
|
||||
session.on_consensus_message(&NodeId::from(4), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: true,
|
||||
})).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
|
||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
||||
|
||||
session.on_job_response(&NodeId::from(4), 16).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::Finished);
|
||||
assert_eq!(session.result(), Ok(20));
|
||||
}
|
||||
}
|
162
secret_store/src/key_server_cluster/jobs/decryption_job.rs
Normal file
162
secret_store/src/key_server_cluster/jobs/decryption_job.rs
Normal file
@ -0,0 +1,162 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::{BTreeSet, BTreeMap};
|
||||
use ethkey::{Public, Secret};
|
||||
use ethcrypto::ecies::encrypt;
|
||||
use ethcrypto::DEFAULT_MAC;
|
||||
use key_server_cluster::{Error, NodeId, DocumentKeyShare, EncryptedDocumentKeyShadow};
|
||||
use key_server_cluster::math;
|
||||
use key_server_cluster::jobs::job_session::{JobPartialRequestAction, JobPartialResponseAction, JobExecutor};
|
||||
|
||||
/// Decryption job.
|
||||
pub struct DecryptionJob {
|
||||
/// This node id.
|
||||
self_node_id: NodeId,
|
||||
/// Access key.
|
||||
access_key: Secret,
|
||||
/// Requester public key.
|
||||
requester: Public,
|
||||
/// Key share.
|
||||
key_share: DocumentKeyShare,
|
||||
/// Request id.
|
||||
request_id: Option<Secret>,
|
||||
/// Is shadow decryption requested.
|
||||
is_shadow_decryption: Option<bool>,
|
||||
}
|
||||
|
||||
/// Decryption job partial request.
|
||||
pub struct PartialDecryptionRequest {
|
||||
/// Request id.
|
||||
pub id: Secret,
|
||||
/// Is shadow decryption requested.
|
||||
pub is_shadow_decryption: bool,
|
||||
/// Id of other nodes, participating in decryption.
|
||||
pub other_nodes_ids: BTreeSet<NodeId>,
|
||||
}
|
||||
|
||||
/// Decryption job partial response.
|
||||
pub struct PartialDecryptionResponse {
|
||||
/// Request id.
|
||||
pub request_id: Secret,
|
||||
/// Shadow point.
|
||||
pub shadow_point: Public,
|
||||
/// Decryption shadow coefficient, if requested.
|
||||
pub decrypt_shadow: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl DecryptionJob {
|
||||
pub fn new_on_slave(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare) -> Result<Self, Error> {
|
||||
debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some());
|
||||
Ok(DecryptionJob {
|
||||
self_node_id: self_node_id,
|
||||
access_key: access_key,
|
||||
requester: requester,
|
||||
key_share: key_share,
|
||||
request_id: None,
|
||||
is_shadow_decryption: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new_on_master(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare, is_shadow_decryption: bool) -> Result<Self, Error> {
|
||||
debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some());
|
||||
Ok(DecryptionJob {
|
||||
self_node_id: self_node_id,
|
||||
access_key: access_key,
|
||||
requester: requester,
|
||||
key_share: key_share,
|
||||
request_id: Some(math::generate_random_scalar()?),
|
||||
is_shadow_decryption: Some(is_shadow_decryption),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl JobExecutor for DecryptionJob {
|
||||
type PartialJobRequest = PartialDecryptionRequest;
|
||||
type PartialJobResponse = PartialDecryptionResponse;
|
||||
type JobResponse = EncryptedDocumentKeyShadow;
|
||||
|
||||
fn prepare_partial_request(&self, node: &NodeId, nodes: &BTreeSet<NodeId>) -> Result<PartialDecryptionRequest, Error> {
|
||||
debug_assert!(nodes.len() == self.key_share.threshold + 1);
|
||||
|
||||
let request_id = self.request_id.as_ref()
|
||||
.expect("prepare_partial_request is only called on master nodes; request_id is filed in constructor on master nodes; qed");
|
||||
let is_shadow_decryption = self.is_shadow_decryption
|
||||
.expect("prepare_partial_request is only called on master nodes; is_shadow_decryption is filed in constructor on master nodes; qed");
|
||||
let mut other_nodes_ids = nodes.clone();
|
||||
other_nodes_ids.remove(node);
|
||||
|
||||
Ok(PartialDecryptionRequest {
|
||||
id: request_id.clone(),
|
||||
is_shadow_decryption: is_shadow_decryption,
|
||||
other_nodes_ids: other_nodes_ids,
|
||||
})
|
||||
}
|
||||
|
||||
fn process_partial_request(&self, partial_request: PartialDecryptionRequest) -> Result<JobPartialRequestAction<PartialDecryptionResponse>, Error> {
|
||||
if partial_request.other_nodes_ids.len() != self.key_share.threshold
|
||||
|| partial_request.other_nodes_ids.contains(&self.self_node_id)
|
||||
|| partial_request.other_nodes_ids.iter().any(|n| !self.key_share.id_numbers.contains_key(n)) {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
|
||||
let self_id_number = &self.key_share.id_numbers[&self.self_node_id];
|
||||
let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &self.key_share.id_numbers[n]);
|
||||
let node_shadow = math::compute_node_shadow(&self.key_share.secret_share, &self_id_number, other_id_numbers)?;
|
||||
let decrypt_shadow = if partial_request.is_shadow_decryption { Some(math::generate_random_scalar()?) } else { None };
|
||||
let common_point = self.key_share.common_point.as_ref().expect("DecryptionJob is only created when common_point is known; qed");
|
||||
let (shadow_point, decrypt_shadow) = math::compute_node_shadow_point(&self.access_key, &common_point, &node_shadow, decrypt_shadow)?;
|
||||
Ok(JobPartialRequestAction::Respond(PartialDecryptionResponse {
|
||||
request_id: partial_request.id,
|
||||
shadow_point: shadow_point,
|
||||
decrypt_shadow: match decrypt_shadow {
|
||||
None => None,
|
||||
Some(decrypt_shadow) => Some(encrypt(&self.requester, &DEFAULT_MAC, &**decrypt_shadow)?),
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
fn check_partial_response(&self, partial_response: &PartialDecryptionResponse) -> Result<JobPartialResponseAction, Error> {
|
||||
if Some(&partial_response.request_id) != self.request_id.as_ref() {
|
||||
return Ok(JobPartialResponseAction::Ignore);
|
||||
}
|
||||
if self.is_shadow_decryption != Some(partial_response.decrypt_shadow.is_some()) {
|
||||
return Ok(JobPartialResponseAction::Reject);
|
||||
}
|
||||
Ok(JobPartialResponseAction::Accept)
|
||||
}
|
||||
|
||||
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, PartialDecryptionResponse>) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||
let is_shadow_decryption = self.is_shadow_decryption
|
||||
.expect("compute_response is only called on master nodes; is_shadow_decryption is filed in constructor on master nodes; qed");
|
||||
let common_point = self.key_share.common_point.as_ref().expect("DecryptionJob is only created when common_point is known; qed");
|
||||
let encrypted_point = self.key_share.encrypted_point.as_ref().expect("DecryptionJob is only created when encrypted_point is known; qed");
|
||||
let joint_shadow_point = math::compute_joint_shadow_point(partial_responses.values().map(|s| &s.shadow_point))?;
|
||||
let decrypted_secret = math::decrypt_with_joint_shadow(self.key_share.threshold, &self.access_key, encrypted_point, &joint_shadow_point)?;
|
||||
Ok(EncryptedDocumentKeyShadow {
|
||||
decrypted_secret: decrypted_secret,
|
||||
common_point: if is_shadow_decryption {
|
||||
Some(math::make_common_shadow_point(self.key_share.threshold, common_point.clone())?)
|
||||
} else { None },
|
||||
decrypt_shadows: if is_shadow_decryption {
|
||||
Some(partial_responses.values().map(|r| r.decrypt_shadow.as_ref()
|
||||
.expect("is_shadow_decryption == true; decrypt_shadow.is_some() is checked in check_partial_response; qed")
|
||||
.clone())
|
||||
.collect())
|
||||
} else { None },
|
||||
})
|
||||
}
|
||||
}
|
536
secret_store/src/key_server_cluster/jobs/job_session.rs
Normal file
536
secret_store/src/key_server_cluster/jobs/job_session.rs
Normal file
@ -0,0 +1,536 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::{BTreeSet, BTreeMap};
|
||||
use key_server_cluster::{Error, NodeId, SessionMeta};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
/// Partial response action.
|
||||
pub enum JobPartialResponseAction {
|
||||
/// Ignore this response.
|
||||
Ignore,
|
||||
/// Mark this response as reject.
|
||||
Reject,
|
||||
/// Accept this response.
|
||||
Accept,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
/// Partial request action.
|
||||
pub enum JobPartialRequestAction<PartialJobResponse> {
|
||||
/// Repond with reject.
|
||||
Reject(PartialJobResponse),
|
||||
/// Respond with this response.
|
||||
Respond(PartialJobResponse),
|
||||
}
|
||||
|
||||
/// Job executor.
|
||||
pub trait JobExecutor {
|
||||
type PartialJobRequest;
|
||||
type PartialJobResponse;
|
||||
type JobResponse;
|
||||
|
||||
/// Prepare job request for given node.
|
||||
fn prepare_partial_request(&self, node: &NodeId, nodes: &BTreeSet<NodeId>) -> Result<Self::PartialJobRequest, Error>;
|
||||
/// Process partial request.
|
||||
fn process_partial_request(&self, partial_request: Self::PartialJobRequest) -> Result<JobPartialRequestAction<Self::PartialJobResponse>, Error>;
|
||||
/// Check partial response of given node.
|
||||
fn check_partial_response(&self, partial_response: &Self::PartialJobResponse) -> Result<JobPartialResponseAction, Error>;
|
||||
/// Compute final job response.
|
||||
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, Self::PartialJobResponse>) -> Result<Self::JobResponse, Error>;
|
||||
}
|
||||
|
||||
/// Jobs transport.
|
||||
pub trait JobTransport {
|
||||
type PartialJobRequest;
|
||||
type PartialJobResponse;
|
||||
|
||||
/// Send partial request to given node.
|
||||
fn send_partial_request(&self, node: &NodeId, request: Self::PartialJobRequest) -> Result<(), Error>;
|
||||
/// Send partial request to given node.
|
||||
fn send_partial_response(&self, node: &NodeId, response: Self::PartialJobResponse) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
/// Current state of job session.
|
||||
pub enum JobSessionState {
|
||||
/// Session is inactive.
|
||||
Inactive,
|
||||
/// Session is active.
|
||||
Active,
|
||||
/// Session is finished.
|
||||
Finished,
|
||||
/// Session has failed.
|
||||
Failed,
|
||||
}
|
||||
|
||||
/// Basic request-response session on a set of nodes.
|
||||
pub struct JobSession<Executor: JobExecutor, Transport> where Transport: JobTransport<PartialJobRequest = Executor::PartialJobRequest, PartialJobResponse = Executor::PartialJobResponse> {
|
||||
/// Session meta.
|
||||
meta: SessionMeta,
|
||||
/// Job executor.
|
||||
executor: Executor,
|
||||
/// Jobs transport.
|
||||
transport: Transport,
|
||||
/// Session data.
|
||||
data: JobSessionData<Executor::PartialJobResponse>,
|
||||
//// PartialJobRequest dummy.
|
||||
// dummy: PhantomData<PartialJobRequest>,
|
||||
}
|
||||
|
||||
/// Data of job session.
|
||||
struct JobSessionData<PartialJobResponse> {
|
||||
/// Session state.
|
||||
state: JobSessionState,
|
||||
/// Mutable session data.
|
||||
active_data: Option<ActiveJobSessionData<PartialJobResponse>>,
|
||||
}
|
||||
|
||||
/// Active job session data.
|
||||
struct ActiveJobSessionData<PartialJobResponse> {
|
||||
/// Active partial requests.
|
||||
requests: BTreeSet<NodeId>,
|
||||
/// Rejects to partial requests.
|
||||
rejects: BTreeSet<NodeId>,
|
||||
/// Received partial responses.
|
||||
responses: BTreeMap<NodeId, PartialJobResponse>,
|
||||
}
|
||||
|
||||
impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExecutor, Transport: JobTransport<PartialJobRequest = Executor::PartialJobRequest, PartialJobResponse = Executor::PartialJobResponse> {
|
||||
/// Create new session.
|
||||
pub fn new(meta: SessionMeta, executor: Executor, transport: Transport) -> Self {
|
||||
JobSession {
|
||||
meta: meta,
|
||||
executor: executor,
|
||||
transport: transport,
|
||||
data: JobSessionData {
|
||||
state: JobSessionState::Inactive,
|
||||
active_data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Get transport reference.
|
||||
pub fn transport(&self) -> &Transport {
|
||||
&self.transport
|
||||
}
|
||||
|
||||
/// Get job state.
|
||||
pub fn state(&self) -> JobSessionState {
|
||||
self.data.state
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Get rejects.
|
||||
pub fn rejects(&self) -> &BTreeSet<NodeId> {
|
||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
||||
|
||||
&self.data.active_data.as_ref()
|
||||
.expect("rejects is only called on master nodes after initialization; on master nodes active_data is filled during initialization; qed")
|
||||
.rejects
|
||||
}
|
||||
|
||||
/// Get active requests.
|
||||
pub fn requests(&self) -> &BTreeSet<NodeId> {
|
||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
||||
|
||||
&self.data.active_data.as_ref()
|
||||
.expect("requests is only called on master nodes after initialization; on master nodes active_data is filled during initialization; qed")
|
||||
.requests
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Get responses.
|
||||
pub fn responses(&self) -> &BTreeMap<NodeId, Executor::PartialJobResponse> {
|
||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
||||
|
||||
&self.data.active_data.as_ref()
|
||||
.expect("responses is only called on master nodes after initialization; on master nodes active_data is filled during initialization; qed")
|
||||
.responses
|
||||
}
|
||||
|
||||
/// Get job result.
|
||||
pub fn result(&self) -> Result<Executor::JobResponse, Error> {
|
||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
||||
|
||||
if self.data.state != JobSessionState::Finished {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
self.executor.compute_response(&self.data.active_data.as_ref()
|
||||
.expect("requests is only called on master nodes; on master nodes active_data is filled during initialization; qed")
|
||||
.responses)
|
||||
}
|
||||
|
||||
/// Initialize.
|
||||
pub fn initialize(&mut self, nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
||||
debug_assert!(nodes.len() >= self.meta.threshold + 1);
|
||||
|
||||
if self.data.state != JobSessionState::Inactive {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
// send requests to slave nodes
|
||||
let mut waits_for_self = false;
|
||||
let active_data = ActiveJobSessionData {
|
||||
requests: nodes,
|
||||
rejects: BTreeSet::new(),
|
||||
responses: BTreeMap::new(),
|
||||
};
|
||||
for node in &active_data.requests {
|
||||
if node != &self.meta.self_node_id {
|
||||
self.transport.send_partial_request(&node, self.executor.prepare_partial_request(node, &active_data.requests)?)?;
|
||||
} else {
|
||||
waits_for_self = true;
|
||||
}
|
||||
}
|
||||
|
||||
// result from self
|
||||
let self_response = if waits_for_self {
|
||||
let partial_request = self.executor.prepare_partial_request(&self.meta.self_node_id, &active_data.requests)?;
|
||||
Some(self.executor.process_partial_request(partial_request)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// update state
|
||||
self.data.active_data = Some(active_data);
|
||||
self.data.state = JobSessionState::Active;
|
||||
|
||||
// if we are waiting for response from self => do it
|
||||
if let Some(self_response) = self_response {
|
||||
let self_node_id = self.meta.self_node_id.clone();
|
||||
match self_response {
|
||||
JobPartialRequestAction::Respond(self_response) => self.on_partial_response(&self_node_id, self_response)?,
|
||||
JobPartialRequestAction::Reject(self_response) => self.on_partial_response(&self_node_id, self_response)?,
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When partial request is received by slave node.
|
||||
pub fn on_partial_request(&mut self, node: &NodeId, request: Executor::PartialJobRequest) -> Result<(), Error> {
|
||||
if node != &self.meta.master_node_id {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
if self.meta.self_node_id == self.meta.master_node_id {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
if self.data.state != JobSessionState::Inactive && self.data.state != JobSessionState::Finished {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
let partial_response = match self.executor.process_partial_request(request)? {
|
||||
JobPartialRequestAction::Respond(partial_response) => {
|
||||
self.data.state = JobSessionState::Finished;
|
||||
partial_response
|
||||
},
|
||||
JobPartialRequestAction::Reject(partial_response) => {
|
||||
self.data.state = JobSessionState::Failed;
|
||||
partial_response
|
||||
},
|
||||
};
|
||||
self.transport.send_partial_response(node, partial_response)
|
||||
}
|
||||
|
||||
/// When partial request is received by master node.
|
||||
pub fn on_partial_response(&mut self, node: &NodeId, response: Executor::PartialJobResponse) -> Result<(), Error> {
|
||||
if self.meta.self_node_id != self.meta.master_node_id {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
if self.data.state != JobSessionState::Active && self.data.state != JobSessionState::Finished {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
let active_data = self.data.active_data.as_mut()
|
||||
.expect("on_partial_response is only called on master nodes; on master nodes active_data is filled during initialization; qed");
|
||||
if !active_data.requests.remove(node) {
|
||||
return Err(Error::InvalidNodeForRequest);
|
||||
}
|
||||
|
||||
match self.executor.check_partial_response(&response)? {
|
||||
JobPartialResponseAction::Ignore => Ok(()),
|
||||
JobPartialResponseAction::Reject => {
|
||||
active_data.rejects.insert(node.clone());
|
||||
if active_data.requests.len() + active_data.responses.len() >= self.meta.threshold + 1 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.data.state = JobSessionState::Failed;
|
||||
Err(Error::ConsensusUnreachable)
|
||||
},
|
||||
JobPartialResponseAction::Accept => {
|
||||
active_data.responses.insert(node.clone(), response);
|
||||
|
||||
if active_data.responses.len() < self.meta.threshold + 1 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.data.state = JobSessionState::Finished;
|
||||
Ok(())
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// When error from node is received.
|
||||
pub fn on_node_error(&mut self, node: &NodeId) -> Result<(), Error> {
|
||||
if self.meta.self_node_id != self.meta.master_node_id {
|
||||
if node != &self.meta.master_node_id {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.data.state = JobSessionState::Failed;
|
||||
return Err(Error::ConsensusUnreachable);
|
||||
}
|
||||
|
||||
let active_data = self.data.active_data.as_mut()
|
||||
.expect("we have checked that we are on master node; on master nodes active_data is filled during initialization; qed");
|
||||
if active_data.rejects.contains(node) {
|
||||
return Ok(());
|
||||
}
|
||||
if active_data.requests.remove(node) || active_data.responses.remove(node).is_some() {
|
||||
active_data.rejects.insert(node.clone());
|
||||
if self.data.state == JobSessionState::Finished && active_data.responses.len() < self.meta.threshold + 1 {
|
||||
self.data.state = JobSessionState::Active;
|
||||
}
|
||||
if active_data.requests.len() + active_data.responses.len() >= self.meta.threshold + 1 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.data.state = JobSessionState::Failed;
|
||||
return Err(Error::ConsensusUnreachable);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When session timeouted.
|
||||
pub fn on_session_timeout(&mut self) -> Result<(), Error> {
|
||||
if self.data.state == JobSessionState::Finished || self.data.state == JobSessionState::Failed {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.data.state = JobSessionState::Failed;
|
||||
Err(Error::ConsensusUnreachable)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use std::collections::{VecDeque, BTreeMap, BTreeSet};
|
||||
use parking_lot::Mutex;
|
||||
use ethkey::Public;
|
||||
use key_server_cluster::{Error, NodeId, SessionId, SessionMeta};
|
||||
use super::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor, JobTransport, JobSession, JobSessionState};
|
||||
|
||||
pub struct SquaredSumJobExecutor;
|
||||
|
||||
impl JobExecutor for SquaredSumJobExecutor {
|
||||
type PartialJobRequest = u32;
|
||||
type PartialJobResponse = u32;
|
||||
type JobResponse = u32;
|
||||
|
||||
fn prepare_partial_request(&self, _n: &NodeId, _nodes: &BTreeSet<NodeId>) -> Result<u32, Error> { Ok(2) }
|
||||
fn process_partial_request(&self, r: u32) -> Result<JobPartialRequestAction<u32>, Error> { if r <= 10 { Ok(JobPartialRequestAction::Respond(r * r)) } else { Err(Error::InvalidMessage) } }
|
||||
fn check_partial_response(&self, r: &u32) -> Result<JobPartialResponseAction, Error> { if r % 2 == 0 { Ok(JobPartialResponseAction::Accept) } else { Ok(JobPartialResponseAction::Reject) } }
|
||||
fn compute_response(&self, r: &BTreeMap<NodeId, u32>) -> Result<u32, Error> { Ok(r.values().fold(0, |v1, v2| v1 + v2)) }
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DummyJobTransport<T, U> {
|
||||
pub requests: Mutex<VecDeque<(NodeId, T)>>,
|
||||
pub responses: Mutex<VecDeque<(NodeId, U)>>,
|
||||
}
|
||||
|
||||
impl<T, U> DummyJobTransport<T, U> {
|
||||
pub fn response(&self) -> (NodeId, U) {
|
||||
self.responses.lock().pop_front().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> JobTransport for DummyJobTransport<T, U> {
|
||||
type PartialJobRequest = T;
|
||||
type PartialJobResponse = U;
|
||||
|
||||
fn send_partial_request(&self, node: &NodeId, request: T) -> Result<(), Error> { self.requests.lock().push_back((node.clone(), request)); Ok(()) }
|
||||
fn send_partial_response(&self, node: &NodeId, response: U) -> Result<(), Error> { self.responses.lock().push_back((node.clone(), response)); Ok(()) }
|
||||
}
|
||||
|
||||
pub fn make_master_session_meta(threshold: usize) -> SessionMeta {
|
||||
SessionMeta { id: SessionId::default(), master_node_id: NodeId::from(1), self_node_id: NodeId::from(1), threshold: threshold }
|
||||
}
|
||||
|
||||
pub fn make_slave_session_meta(threshold: usize) -> SessionMeta {
|
||||
SessionMeta { id: SessionId::default(), master_node_id: NodeId::from(1), self_node_id: NodeId::from(2), threshold: threshold }
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_initialize_fails_if_not_inactive() {
|
||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
job.initialize(vec![Public::from(1)].into_iter().collect()).unwrap();
|
||||
assert_eq!(job.initialize(vec![Public::from(1)].into_iter().collect()).unwrap_err(), Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_initialization_leads_to_finish_if_single_node_is_required() {
|
||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
job.initialize(vec![Public::from(1)].into_iter().collect()).unwrap();
|
||||
assert_eq!(job.state(), JobSessionState::Finished);
|
||||
assert_eq!(job.result(), Ok(4));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_initialization_does_not_leads_to_finish_if_single_other_node_is_required() {
|
||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
job.initialize(vec![Public::from(2)].into_iter().collect()).unwrap();
|
||||
assert_eq!(job.state(), JobSessionState::Active);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_request_fails_if_comes_from_non_master_node() {
|
||||
let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
assert_eq!(job.on_partial_request(&NodeId::from(3), 2).unwrap_err(), Error::InvalidMessage);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_request_fails_if_comes_to_master_node() {
|
||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
assert_eq!(job.on_partial_request(&NodeId::from(1), 2).unwrap_err(), Error::InvalidMessage);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_request_fails_if_comes_to_failed_state() {
|
||||
let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
job.on_session_timeout().unwrap_err();
|
||||
assert_eq!(job.on_partial_request(&NodeId::from(1), 2).unwrap_err(), Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_request_succeeds_if_comes_to_finished_state() {
|
||||
let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
job.on_partial_request(&NodeId::from(1), 2).unwrap();
|
||||
assert_eq!(job.transport().response(), (NodeId::from(1), 4));
|
||||
assert_eq!(job.state(), JobSessionState::Finished);
|
||||
job.on_partial_request(&NodeId::from(1), 3).unwrap();
|
||||
assert_eq!(job.transport().response(), (NodeId::from(1), 9));
|
||||
assert_eq!(job.state(), JobSessionState::Finished);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_response_fails_if_comes_to_slave_node() {
|
||||
let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
assert_eq!(job.on_partial_response(&NodeId::from(1), 2).unwrap_err(), Error::InvalidMessage);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_response_fails_if_comes_to_failed_state() {
|
||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
job.initialize(vec![Public::from(2)].into_iter().collect()).unwrap();
|
||||
job.on_session_timeout().unwrap_err();
|
||||
assert_eq!(job.on_partial_response(&NodeId::from(2), 2).unwrap_err(), Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_response_fails_if_comes_from_unknown_node() {
|
||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
job.initialize(vec![Public::from(2)].into_iter().collect()).unwrap();
|
||||
assert_eq!(job.on_partial_response(&NodeId::from(3), 2).unwrap_err(), Error::InvalidNodeForRequest);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_response_leads_to_failure_if_too_few_nodes_left() {
|
||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect()).unwrap();
|
||||
assert_eq!(job.state(), JobSessionState::Active);
|
||||
assert_eq!(job.on_partial_response(&NodeId::from(2), 3).unwrap_err(), Error::ConsensusUnreachable);
|
||||
assert_eq!(job.state(), JobSessionState::Failed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_response_succeeds() {
|
||||
let mut job = JobSession::new(make_master_session_meta(2), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
job.initialize(vec![Public::from(1), Public::from(2), Public::from(3)].into_iter().collect()).unwrap();
|
||||
assert_eq!(job.state(), JobSessionState::Active);
|
||||
job.on_partial_response(&NodeId::from(2), 2).unwrap();
|
||||
assert_eq!(job.state(), JobSessionState::Active);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_response_leads_to_finish() {
|
||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect()).unwrap();
|
||||
assert_eq!(job.state(), JobSessionState::Active);
|
||||
job.on_partial_response(&NodeId::from(2), 2).unwrap();
|
||||
assert_eq!(job.state(), JobSessionState::Finished);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_node_error_ignored_when_slave_disconnects_from_slave() {
|
||||
let mut job = JobSession::new(make_slave_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
assert_eq!(job.state(), JobSessionState::Inactive);
|
||||
job.on_node_error(&NodeId::from(3)).unwrap();
|
||||
assert_eq!(job.state(), JobSessionState::Inactive);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_node_error_leads_to_fail_when_slave_disconnects_from_master() {
|
||||
let mut job = JobSession::new(make_slave_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
assert_eq!(job.state(), JobSessionState::Inactive);
|
||||
assert_eq!(job.on_node_error(&NodeId::from(1)).unwrap_err(), Error::ConsensusUnreachable);
|
||||
assert_eq!(job.state(), JobSessionState::Failed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_node_error_ignored_when_disconnects_from_rejected() {
|
||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
job.initialize(vec![Public::from(1), Public::from(2), Public::from(3)].into_iter().collect()).unwrap();
|
||||
assert_eq!(job.state(), JobSessionState::Active);
|
||||
job.on_partial_response(&NodeId::from(2), 3).unwrap();
|
||||
job.on_node_error(&NodeId::from(2)).unwrap();
|
||||
assert_eq!(job.state(), JobSessionState::Active);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_node_error_ignored_when_disconnects_from_unknown() {
|
||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect()).unwrap();
|
||||
assert_eq!(job.state(), JobSessionState::Active);
|
||||
job.on_node_error(&NodeId::from(3)).unwrap();
|
||||
assert_eq!(job.state(), JobSessionState::Active);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_node_error_ignored_when_disconnects_from_requested_and_enough_nodes_left() {
|
||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
job.initialize(vec![Public::from(1), Public::from(2), Public::from(3)].into_iter().collect()).unwrap();
|
||||
assert_eq!(job.state(), JobSessionState::Active);
|
||||
job.on_node_error(&NodeId::from(3)).unwrap();
|
||||
assert_eq!(job.state(), JobSessionState::Active);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn job_node_error_leads_to_fail_when_disconnects_from_requested_and_not_enough_nodes_left() {
|
||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect()).unwrap();
|
||||
assert_eq!(job.state(), JobSessionState::Active);
|
||||
assert_eq!(job.on_node_error(&NodeId::from(2)).unwrap_err(), Error::ConsensusUnreachable);
|
||||
assert_eq!(job.state(), JobSessionState::Failed);
|
||||
}
|
||||
}
|
73
secret_store/src/key_server_cluster/jobs/key_access_job.rs
Normal file
73
secret_store/src/key_server_cluster/jobs/key_access_job.rs
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::collections::{BTreeSet, BTreeMap};
|
||||
use ethkey::{Signature, recover};
|
||||
use key_server_cluster::{Error, NodeId, SessionId, AclStorage};
|
||||
use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor};
|
||||
|
||||
/// Purpose of this job is to construct set of nodes, which have agreed to provide access to the given key for the given requestor.
|
||||
pub struct KeyAccessJob {
|
||||
/// Key id.
|
||||
id: SessionId,
|
||||
/// ACL storage.
|
||||
acl_storage: Arc<AclStorage>,
|
||||
/// Requester signature.
|
||||
signature: Option<Signature>,
|
||||
}
|
||||
|
||||
impl KeyAccessJob {
|
||||
pub fn new_on_slave(id: SessionId, acl_storage: Arc<AclStorage>) -> Self {
|
||||
KeyAccessJob {
|
||||
id: id,
|
||||
acl_storage: acl_storage,
|
||||
signature: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_on_master(id: SessionId, acl_storage: Arc<AclStorage>, signature: Signature) -> Self {
|
||||
KeyAccessJob {
|
||||
id: id,
|
||||
acl_storage: acl_storage,
|
||||
signature: Some(signature),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl JobExecutor for KeyAccessJob {
|
||||
type PartialJobRequest = Signature;
|
||||
type PartialJobResponse = bool;
|
||||
type JobResponse = BTreeSet<NodeId>;
|
||||
|
||||
fn prepare_partial_request(&self, _node: &NodeId, _nodes: &BTreeSet<NodeId>) -> Result<Signature, Error> {
|
||||
Ok(self.signature.as_ref().expect("prepare_partial_request is only called on master nodes; new_on_master fills the signature; qed").clone())
|
||||
}
|
||||
|
||||
fn process_partial_request(&self, partial_request: Signature) -> Result<JobPartialRequestAction<bool>, Error> {
|
||||
self.acl_storage.check(&recover(&partial_request, &self.id)?, &self.id)
|
||||
.map_err(|_| Error::AccessDenied)
|
||||
.map(|is_confirmed| if is_confirmed { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) })
|
||||
}
|
||||
|
||||
fn check_partial_response(&self, partial_response: &bool) -> Result<JobPartialResponseAction, Error> {
|
||||
Ok(if *partial_response { JobPartialResponseAction::Accept } else { JobPartialResponseAction::Reject })
|
||||
}
|
||||
|
||||
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, bool>) -> Result<BTreeSet<NodeId>, Error> {
|
||||
Ok(partial_responses.keys().cloned().collect())
|
||||
}
|
||||
}
|
21
secret_store/src/key_server_cluster/jobs/mod.rs
Normal file
21
secret_store/src/key_server_cluster/jobs/mod.rs
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
pub mod consensus_session;
|
||||
pub mod decryption_job;
|
||||
pub mod job_session;
|
||||
pub mod key_access_job;
|
||||
pub mod signing_job;
|
145
secret_store/src/key_server_cluster/jobs/signing_job.rs
Normal file
145
secret_store/src/key_server_cluster/jobs/signing_job.rs
Normal file
@ -0,0 +1,145 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::{BTreeSet, BTreeMap};
|
||||
use ethkey::{Public, Secret};
|
||||
use util::H256;
|
||||
use key_server_cluster::{Error, NodeId, DocumentKeyShare};
|
||||
use key_server_cluster::math;
|
||||
use key_server_cluster::jobs::job_session::{JobPartialRequestAction, JobPartialResponseAction, JobExecutor};
|
||||
|
||||
/// Signing job.
|
||||
pub struct SigningJob {
|
||||
/// This node id.
|
||||
self_node_id: NodeId,
|
||||
/// Key share.
|
||||
key_share: DocumentKeyShare,
|
||||
/// Session public key.
|
||||
session_public: Public,
|
||||
/// Session secret coefficient.
|
||||
session_secret_coeff: Secret,
|
||||
/// Request id.
|
||||
request_id: Option<Secret>,
|
||||
/// Message hash.
|
||||
message_hash: Option<H256>,
|
||||
}
|
||||
|
||||
/// Signing job partial request.
|
||||
pub struct PartialSigningRequest {
|
||||
/// Request id.
|
||||
pub id: Secret,
|
||||
/// Message hash.
|
||||
pub message_hash: H256,
|
||||
/// Id of other nodes, participating in signing.
|
||||
pub other_nodes_ids: BTreeSet<NodeId>,
|
||||
}
|
||||
|
||||
/// Signing job partial response.
|
||||
pub struct PartialSigningResponse {
|
||||
/// Request id.
|
||||
pub request_id: Secret,
|
||||
/// Partial signature.
|
||||
pub partial_signature: Secret,
|
||||
}
|
||||
|
||||
impl SigningJob {
|
||||
pub fn new_on_slave(self_node_id: NodeId, key_share: DocumentKeyShare, session_public: Public, session_secret_coeff: Secret) -> Result<Self, Error> {
|
||||
Ok(SigningJob {
|
||||
self_node_id: self_node_id,
|
||||
key_share: key_share,
|
||||
session_public: session_public,
|
||||
session_secret_coeff: session_secret_coeff,
|
||||
request_id: None,
|
||||
message_hash: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new_on_master(self_node_id: NodeId, key_share: DocumentKeyShare, session_public: Public, session_secret_coeff: Secret, message_hash: H256) -> Result<Self, Error> {
|
||||
Ok(SigningJob {
|
||||
self_node_id: self_node_id,
|
||||
key_share: key_share,
|
||||
session_public: session_public,
|
||||
session_secret_coeff: session_secret_coeff,
|
||||
request_id: Some(math::generate_random_scalar()?),
|
||||
message_hash: Some(message_hash),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl JobExecutor for SigningJob {
|
||||
type PartialJobRequest = PartialSigningRequest;
|
||||
type PartialJobResponse = PartialSigningResponse;
|
||||
type JobResponse = (Secret, Secret);
|
||||
|
||||
fn prepare_partial_request(&self, node: &NodeId, nodes: &BTreeSet<NodeId>) -> Result<PartialSigningRequest, Error> {
|
||||
debug_assert!(nodes.len() == self.key_share.threshold + 1);
|
||||
|
||||
let request_id = self.request_id.as_ref()
|
||||
.expect("prepare_partial_request is only called on master nodes; request_id is filed in constructor on master nodes; qed");
|
||||
let message_hash = self.message_hash.as_ref()
|
||||
.expect("compute_response is only called on master nodes; message_hash is filed in constructor on master nodes; qed");
|
||||
let mut other_nodes_ids = nodes.clone();
|
||||
other_nodes_ids.remove(node);
|
||||
|
||||
Ok(PartialSigningRequest {
|
||||
id: request_id.clone(),
|
||||
message_hash: message_hash.clone(),
|
||||
other_nodes_ids: other_nodes_ids,
|
||||
})
|
||||
}
|
||||
|
||||
fn process_partial_request(&self, partial_request: PartialSigningRequest) -> Result<JobPartialRequestAction<PartialSigningResponse>, Error> {
|
||||
if partial_request.other_nodes_ids.len() != self.key_share.threshold
|
||||
|| partial_request.other_nodes_ids.contains(&self.self_node_id)
|
||||
|| partial_request.other_nodes_ids.iter().any(|n| !self.key_share.id_numbers.contains_key(n)) {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
|
||||
let self_id_number = &self.key_share.id_numbers[&self.self_node_id];
|
||||
let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &self.key_share.id_numbers[n]);
|
||||
let combined_hash = math::combine_message_hash_with_public(&partial_request.message_hash, &self.session_public)?;
|
||||
Ok(JobPartialRequestAction::Respond(PartialSigningResponse {
|
||||
request_id: partial_request.id,
|
||||
partial_signature: math::compute_signature_share(
|
||||
self.key_share.threshold,
|
||||
&combined_hash,
|
||||
&self.session_secret_coeff,
|
||||
&self.key_share.secret_share,
|
||||
self_id_number,
|
||||
other_id_numbers
|
||||
)?,
|
||||
}))
|
||||
}
|
||||
|
||||
fn check_partial_response(&self, partial_response: &PartialSigningResponse) -> Result<JobPartialResponseAction, Error> {
|
||||
if Some(&partial_response.request_id) != self.request_id.as_ref() {
|
||||
return Ok(JobPartialResponseAction::Ignore);
|
||||
}
|
||||
// TODO: check_signature_share()
|
||||
|
||||
Ok(JobPartialResponseAction::Accept)
|
||||
}
|
||||
|
||||
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, PartialSigningResponse>) -> Result<(Secret, Secret), Error> {
|
||||
let message_hash = self.message_hash.as_ref()
|
||||
.expect("compute_response is only called on master nodes; message_hash is filed in constructor on master nodes; qed");
|
||||
|
||||
let signature_c = math::combine_message_hash_with_public(message_hash, &self.session_public)?;
|
||||
let signature_s = math::compute_signature(partial_responses.values().map(|r| &r.partial_signature))?;
|
||||
|
||||
Ok((signature_c, signature_s))
|
||||
}
|
||||
}
|
@ -15,6 +15,7 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use ethkey::{Public, Secret, Random, Generator, math};
|
||||
use util::{U256, H256, Hashable};
|
||||
use key_server_cluster::Error;
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -36,6 +37,48 @@ pub fn generate_random_point() -> Result<Public, Error> {
|
||||
Ok(Random.generate()?.public().clone())
|
||||
}
|
||||
|
||||
/// Compute publics sum.
|
||||
pub fn compute_public_sum<'a, I>(mut publics: I) -> Result<Public, Error> where I: Iterator<Item=&'a Public> {
|
||||
let mut sum = publics.next().expect("compute_public_sum is called when there's at least one public; qed").clone();
|
||||
while let Some(public) = publics.next() {
|
||||
math::public_add(&mut sum, &public)?;
|
||||
}
|
||||
Ok(sum)
|
||||
}
|
||||
|
||||
/// Compute secrets sum.
|
||||
pub fn compute_secret_sum<'a, I>(mut secrets: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
||||
let mut sum = secrets.next().expect("compute_secret_sum is called when there's at least one secret; qed").clone();
|
||||
while let Some(secret) = secrets.next() {
|
||||
sum.add(secret)?;
|
||||
}
|
||||
Ok(sum)
|
||||
}
|
||||
|
||||
/// Compute secrets 'shadow' multiplication: coeff * multiplication(s[j] / (s[i] - s[j])) for every i != j
|
||||
pub fn compute_shadow_mul<'a, I>(coeff: &Secret, self_secret: &Secret, mut other_secrets: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
||||
// when there are no other secrets, only coeff is left
|
||||
let other_secret = match other_secrets.next() {
|
||||
Some(other_secret) => other_secret,
|
||||
None => return Ok(coeff.clone()),
|
||||
};
|
||||
|
||||
let mut shadow_mul = self_secret.clone();
|
||||
shadow_mul.sub(other_secret)?;
|
||||
shadow_mul.inv()?;
|
||||
shadow_mul.mul(other_secret)?;
|
||||
while let Some(other_secret) = other_secrets.next() {
|
||||
let mut shadow_mul_element = self_secret.clone();
|
||||
shadow_mul_element.sub(other_secret)?;
|
||||
shadow_mul_element.inv()?;
|
||||
shadow_mul_element.mul(other_secret)?;
|
||||
shadow_mul.mul(&shadow_mul_element)?;
|
||||
}
|
||||
|
||||
shadow_mul.mul(coeff)?;
|
||||
Ok(shadow_mul)
|
||||
}
|
||||
|
||||
/// Update point by multiplying to random scalar
|
||||
pub fn update_random_point(point: &mut Public) -> Result<(), Error> {
|
||||
Ok(math::public_mul_secret(point, &generate_random_scalar()?)?)
|
||||
@ -43,12 +86,9 @@ pub fn update_random_point(point: &mut Public) -> Result<(), Error> {
|
||||
|
||||
/// Generate random polynom of threshold degree
|
||||
pub fn generate_random_polynom(threshold: usize) -> Result<Vec<Secret>, Error> {
|
||||
let mut polynom: Vec<_> = Vec::with_capacity(threshold + 1);
|
||||
for _ in 0..threshold + 1 {
|
||||
polynom.push(generate_random_scalar()?);
|
||||
}
|
||||
debug_assert_eq!(polynom.len(), threshold + 1);
|
||||
Ok(polynom)
|
||||
(0..threshold + 1)
|
||||
.map(|_| generate_random_scalar())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Compute value of polynom, using `node_number` as argument
|
||||
@ -125,12 +165,8 @@ pub fn keys_verification(threshold: usize, derived_point: &Public, number_id: &S
|
||||
}
|
||||
|
||||
/// Compute secret share.
|
||||
pub fn compute_secret_share<'a, I>(mut secret_values: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
||||
let mut secret_share = secret_values.next().expect("compute_secret_share is called when cluster has at least one node; qed").clone();
|
||||
while let Some(secret_value) = secret_values.next() {
|
||||
secret_share.add(secret_value)?;
|
||||
}
|
||||
Ok(secret_share)
|
||||
pub fn compute_secret_share<'a, I>(secret_values: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
||||
compute_secret_sum(secret_values)
|
||||
}
|
||||
|
||||
/// Compute public key share.
|
||||
@ -141,22 +177,14 @@ pub fn compute_public_share(self_secret_value: &Secret) -> Result<Public, Error>
|
||||
}
|
||||
|
||||
/// Compute joint public key.
|
||||
pub fn compute_joint_public<'a, I>(mut public_shares: I) -> Result<Public, Error> where I: Iterator<Item=&'a Public> {
|
||||
let mut joint_public = public_shares.next().expect("compute_joint_public is called when cluster has at least one node; qed").clone();
|
||||
while let Some(public_share) = public_shares.next() {
|
||||
math::public_add(&mut joint_public, &public_share)?;
|
||||
}
|
||||
Ok(joint_public)
|
||||
pub fn compute_joint_public<'a, I>(public_shares: I) -> Result<Public, Error> where I: Iterator<Item=&'a Public> {
|
||||
compute_public_sum(public_shares)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Compute joint secret key.
|
||||
pub fn compute_joint_secret<'a, I>(mut secret_coeffs: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
||||
let mut joint_secret = secret_coeffs.next().expect("compute_joint_private is called when cluster has at least one node; qed").clone();
|
||||
while let Some(secret_coeff) = secret_coeffs.next() {
|
||||
joint_secret.add(secret_coeff)?;
|
||||
}
|
||||
Ok(joint_secret)
|
||||
pub fn compute_joint_secret<'a, I>(secret_coeffs: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
||||
compute_secret_sum(secret_coeffs)
|
||||
}
|
||||
|
||||
/// Encrypt secret with joint public key.
|
||||
@ -180,26 +208,8 @@ pub fn encrypt_secret(secret: &Public, joint_public: &Public) -> Result<Encrypte
|
||||
}
|
||||
|
||||
/// Compute shadow for the node.
|
||||
pub fn compute_node_shadow<'a, I>(node_number: &Secret, node_secret_share: &Secret, mut other_nodes_numbers: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
||||
let other_node_number = match other_nodes_numbers.next() {
|
||||
Some(other_node_number) => other_node_number,
|
||||
None => return Ok(node_secret_share.clone()),
|
||||
};
|
||||
|
||||
let mut shadow = node_number.clone();
|
||||
shadow.sub(other_node_number)?;
|
||||
shadow.inv()?;
|
||||
shadow.mul(other_node_number)?;
|
||||
while let Some(other_node_number) = other_nodes_numbers.next() {
|
||||
let mut shadow_element = node_number.clone();
|
||||
shadow_element.sub(other_node_number)?;
|
||||
shadow_element.inv()?;
|
||||
shadow_element.mul(other_node_number)?;
|
||||
shadow.mul(&shadow_element)?;
|
||||
}
|
||||
|
||||
shadow.mul(&node_secret_share)?;
|
||||
Ok(shadow)
|
||||
pub fn compute_node_shadow<'a, I>(node_secret_share: &Secret, node_number: &Secret, other_nodes_numbers: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
||||
compute_shadow_mul(node_secret_share, node_number, other_nodes_numbers)
|
||||
}
|
||||
|
||||
/// Compute shadow point for the node.
|
||||
@ -224,21 +234,14 @@ pub fn compute_node_shadow_point(access_key: &Secret, common_point: &Public, nod
|
||||
}
|
||||
|
||||
/// Compute joint shadow point.
|
||||
pub fn compute_joint_shadow_point<'a, I>(mut nodes_shadow_points: I) -> Result<Public, Error> where I: Iterator<Item=&'a Public> {
|
||||
let mut joint_shadow_point = nodes_shadow_points.next().expect("compute_joint_shadow_point is called when at least two nodes are required to decrypt secret; qed").clone();
|
||||
while let Some(node_shadow_point) = nodes_shadow_points.next() {
|
||||
math::public_add(&mut joint_shadow_point, &node_shadow_point)?;
|
||||
}
|
||||
Ok(joint_shadow_point)
|
||||
pub fn compute_joint_shadow_point<'a, I>(nodes_shadow_points: I) -> Result<Public, Error> where I: Iterator<Item=&'a Public> {
|
||||
compute_public_sum(nodes_shadow_points)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Compute joint shadow point (version for tests).
|
||||
pub fn compute_joint_shadow_point_test<'a, I>(access_key: &Secret, common_point: &Public, mut nodes_shadows: I) -> Result<Public, Error> where I: Iterator<Item=&'a Secret> {
|
||||
let mut joint_shadow = nodes_shadows.next().expect("compute_joint_shadow_point_test is called when at least two nodes are required to decrypt secret; qed").clone();
|
||||
while let Some(node_shadow) = nodes_shadows.next() {
|
||||
joint_shadow.add(node_shadow)?;
|
||||
}
|
||||
pub fn compute_joint_shadow_point_test<'a, I>(access_key: &Secret, common_point: &Public, nodes_shadows: I) -> Result<Public, Error> where I: Iterator<Item=&'a Secret> {
|
||||
let mut joint_shadow = compute_secret_sum(nodes_shadows)?;
|
||||
joint_shadow.mul(access_key)?;
|
||||
|
||||
let mut joint_shadow_point = common_point.clone();
|
||||
@ -277,10 +280,7 @@ pub fn make_common_shadow_point(threshold: usize, mut common_point: Public) -> R
|
||||
#[cfg(test)]
|
||||
/// Decrypt shadow-encrypted secret.
|
||||
pub fn decrypt_with_shadow_coefficients(mut decrypted_shadow: Public, mut common_shadow_point: Public, shadow_coefficients: Vec<Secret>) -> Result<Public, Error> {
|
||||
let mut shadow_coefficients_sum = shadow_coefficients[0].clone();
|
||||
for shadow_coefficient in shadow_coefficients.iter().skip(1) {
|
||||
shadow_coefficients_sum.add(shadow_coefficient)?;
|
||||
}
|
||||
let shadow_coefficients_sum = compute_secret_sum(shadow_coefficients.iter())?;
|
||||
math::public_mul_secret(&mut common_shadow_point, &shadow_coefficients_sum)?;
|
||||
math::public_add(&mut decrypted_shadow, &common_shadow_point)?;
|
||||
Ok(decrypted_shadow)
|
||||
@ -298,11 +298,139 @@ pub fn decrypt_with_joint_secret(encrypted_point: &Public, common_point: &Public
|
||||
Ok(decrypted_point)
|
||||
}
|
||||
|
||||
/// Combine message hash with public key X coordinate.
|
||||
pub fn combine_message_hash_with_public(message_hash: &H256, public: &Public) -> Result<Secret, Error> {
|
||||
// buffer is just [message_hash | public.x]
|
||||
let mut buffer = [0; 64];
|
||||
buffer[0..32].copy_from_slice(&message_hash[0..32]);
|
||||
buffer[32..64].copy_from_slice(&public[0..32]);
|
||||
|
||||
// calculate hash of buffer
|
||||
let hash = (&buffer[..]).sha3();
|
||||
|
||||
// map hash to EC finite field value
|
||||
let hash: U256 = hash.into();
|
||||
let hash: H256 = (hash % math::curve_order()).into();
|
||||
let hash = Secret::from_slice(&*hash);
|
||||
hash.check_validity()?;
|
||||
|
||||
Ok(hash)
|
||||
}
|
||||
|
||||
/// Compute signature share.
|
||||
pub fn compute_signature_share<'a, I>(threshold: usize, combined_hash: &Secret, one_time_secret_coeff: &Secret, node_secret_share: &Secret, node_number: &Secret, other_nodes_numbers: I)
|
||||
-> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
||||
let mut sum = one_time_secret_coeff.clone();
|
||||
let mut subtrahend = compute_shadow_mul(combined_hash, node_number, other_nodes_numbers)?;
|
||||
subtrahend.mul(node_secret_share)?;
|
||||
if threshold % 2 == 0 {
|
||||
sum.sub(&subtrahend)?;
|
||||
} else {
|
||||
sum.add(&subtrahend)?;
|
||||
}
|
||||
Ok(sum)
|
||||
}
|
||||
|
||||
/// Check signature share.
|
||||
pub fn _check_signature_share<'a, I>(_combined_hash: &Secret, _signature_share: &Secret, _public_share: &Public, _one_time_public_share: &Public, _node_numbers: I)
|
||||
-> Result<bool, Error> where I: Iterator<Item=&'a Secret> {
|
||||
// TODO: in paper partial signature is checked using comparison:
|
||||
// sig[i] * T = r[i] - c * lagrange_coeff(i) * y[i]
|
||||
// => (k[i] - c * lagrange_coeff(i) * s[i]) * T = r[i] - c * lagrange_coeff(i) * y[i]
|
||||
// => k[i] * T - c * lagrange_coeff(i) * s[i] * T = k[i] * T - c * lagrange_coeff(i) * y[i]
|
||||
// => this means that y[i] = s[i] * T
|
||||
// but when verifying signature (for t = 1), nonce public (r) is restored using following expression:
|
||||
// r = (sig[0] + sig[1]) * T - c * y
|
||||
// r = (k[0] - c * lagrange_coeff(0) * s[0] + k[1] - c * lagrange_coeff(1) * s[1]) * T - c * y
|
||||
// r = (k[0] + k[1]) * T - c * (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T - c * y
|
||||
// r = r - c * (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T - c * y
|
||||
// => -c * y = c * (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T
|
||||
// => -y = (lagrange_coeff(0) * s[0] + lagrange_coeff(1) * s[1]) * T
|
||||
// => y[i] != s[i] * T
|
||||
// => some other way is required
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Compute signature.
|
||||
pub fn compute_signature<'a, I>(signature_shares: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
||||
compute_secret_sum(signature_shares)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Locally compute Schnorr signature as described in https://en.wikipedia.org/wiki/Schnorr_signature#Signing.
|
||||
pub fn local_compute_signature(nonce: &Secret, secret: &Secret, message_hash: &Secret) -> Result<(Secret, Secret), Error> {
|
||||
let mut nonce_public = math::generation_point();
|
||||
math::public_mul_secret(&mut nonce_public, &nonce).unwrap();
|
||||
|
||||
let combined_hash = combine_message_hash_with_public(message_hash, &nonce_public)?;
|
||||
|
||||
let mut sig_subtrahend = combined_hash.clone();
|
||||
sig_subtrahend.mul(secret)?;
|
||||
let mut sig = nonce.clone();
|
||||
sig.sub(&sig_subtrahend)?;
|
||||
|
||||
Ok((combined_hash, sig))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Verify signature as described in https://en.wikipedia.org/wiki/Schnorr_signature#Verifying.
|
||||
pub fn verify_signature(public: &Public, signature: &(Secret, Secret), message_hash: &H256) -> Result<bool, Error> {
|
||||
let mut addendum = math::generation_point();
|
||||
math::public_mul_secret(&mut addendum, &signature.1)?;
|
||||
let mut nonce_public = public.clone();
|
||||
math::public_mul_secret(&mut nonce_public, &signature.0)?;
|
||||
math::public_add(&mut nonce_public, &addendum)?;
|
||||
|
||||
let combined_hash = combine_message_hash_with_public(message_hash, &nonce_public)?;
|
||||
Ok(combined_hash == signature.0)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use std::iter::once;
|
||||
use ethkey::KeyPair;
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct KeyGenerationArtifacts {
|
||||
id_numbers: Vec<Secret>,
|
||||
polynoms1: Vec<Vec<Secret>>,
|
||||
secrets1: Vec<Vec<Secret>>,
|
||||
public_shares: Vec<Public>,
|
||||
secret_shares: Vec<Secret>,
|
||||
joint_public: Public,
|
||||
}
|
||||
|
||||
fn run_key_generation(t: usize, n: usize, id_numbers: Option<Vec<Secret>>) -> KeyGenerationArtifacts {
|
||||
// === PART1: DKG ===
|
||||
|
||||
// data, gathered during initialization
|
||||
let id_numbers: Vec<_> = match id_numbers {
|
||||
Some(id_numbers) => id_numbers,
|
||||
None => (0..n).map(|_| generate_random_scalar().unwrap()).collect(),
|
||||
};
|
||||
|
||||
// data, generated during keys dissemination
|
||||
let polynoms1: Vec<_> = (0..n).map(|_| generate_random_polynom(t).unwrap()).collect();
|
||||
let secrets1: Vec<_> = (0..n).map(|i| (0..n).map(|j| compute_polynom(&polynoms1[i], &id_numbers[j]).unwrap()).collect::<Vec<_>>()).collect();
|
||||
|
||||
// data, generated during keys generation
|
||||
let public_shares: Vec<_> = (0..n).map(|i| compute_public_share(&polynoms1[i][0]).unwrap()).collect();
|
||||
let secret_shares: Vec<_> = (0..n).map(|i| compute_secret_share(secrets1.iter().map(|s| &s[i])).unwrap()).collect();
|
||||
|
||||
// joint public key, as a result of DKG
|
||||
let joint_public = compute_joint_public(public_shares.iter()).unwrap();
|
||||
|
||||
KeyGenerationArtifacts {
|
||||
id_numbers: id_numbers,
|
||||
polynoms1: polynoms1,
|
||||
secrets1: secrets1,
|
||||
public_shares: public_shares,
|
||||
secret_shares: secret_shares,
|
||||
joint_public: joint_public,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn do_encryption_and_decryption(t: usize, joint_public: &Public, id_numbers: &[Secret], secret_shares: &[Secret], joint_secret: Option<&Secret>, document_secret_plain: Public) -> (Public, Public) {
|
||||
// === PART2: encryption using joint public key ===
|
||||
|
||||
@ -316,7 +444,7 @@ pub mod tests {
|
||||
|
||||
// use t + 1 nodes to compute joint shadow point
|
||||
let nodes_shadows: Vec<_> = (0..t + 1).map(|i|
|
||||
compute_node_shadow(&id_numbers[i], &secret_shares[i], id_numbers.iter()
|
||||
compute_node_shadow(&secret_shares[i], &id_numbers[i], id_numbers.iter()
|
||||
.enumerate()
|
||||
.filter(|&(j, _)| j != i)
|
||||
.take(t)
|
||||
@ -349,39 +477,108 @@ pub mod tests {
|
||||
let test_cases = [(0, 2), (1, 2), (1, 3), (2, 3), (1, 4), (2, 4), (3, 4), (1, 5), (2, 5), (3, 5), (4, 5),
|
||||
(1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10)];
|
||||
for &(t, n) in &test_cases {
|
||||
// === PART1: DKG ===
|
||||
|
||||
// data, gathered during initialization
|
||||
let id_numbers: Vec<_> = (0..n).map(|_| generate_random_scalar().unwrap()).collect();
|
||||
|
||||
// data, generated during keys dissemination
|
||||
let polynoms1: Vec<_> = (0..n).map(|_| generate_random_polynom(t).unwrap()).collect();
|
||||
let secrets1: Vec<_> = (0..n).map(|i| (0..n).map(|j| compute_polynom(&polynoms1[i], &id_numbers[j]).unwrap()).collect::<Vec<_>>()).collect();
|
||||
|
||||
// data, generated during keys generation
|
||||
let public_shares: Vec<_> = (0..n).map(|i| compute_public_share(&polynoms1[i][0]).unwrap()).collect();
|
||||
let secret_shares: Vec<_> = (0..n).map(|i| compute_secret_share(secrets1.iter().map(|s| &s[i])).unwrap()).collect();
|
||||
|
||||
// joint public key, as a result of DKG
|
||||
let joint_public = compute_joint_public(public_shares.iter()).unwrap();
|
||||
let artifacts = run_key_generation(t, n, None);
|
||||
|
||||
// compute joint private key [just for test]
|
||||
let joint_secret = compute_joint_secret(polynoms1.iter().map(|p| &p[0])).unwrap();
|
||||
let joint_secret = compute_joint_secret(artifacts.polynoms1.iter().map(|p| &p[0])).unwrap();
|
||||
let joint_key_pair = KeyPair::from_secret(joint_secret.clone()).unwrap();
|
||||
assert_eq!(&joint_public, joint_key_pair.public());
|
||||
assert_eq!(&artifacts.joint_public, joint_key_pair.public());
|
||||
|
||||
// check secret shares computation [just for test]
|
||||
let secret_shares_polynom: Vec<_> = (0..t + 1).map(|k| compute_secret_share(polynoms1.iter().map(|p| &p[k])).unwrap()).collect();
|
||||
let secret_shares_calculated_from_polynom: Vec<_> = id_numbers.iter().map(|id_number| compute_polynom(&*secret_shares_polynom, id_number).unwrap()).collect();
|
||||
assert_eq!(secret_shares, secret_shares_calculated_from_polynom);
|
||||
let secret_shares_polynom: Vec<_> = (0..t + 1).map(|k| compute_secret_share(artifacts.polynoms1.iter().map(|p| &p[k])).unwrap()).collect();
|
||||
let secret_shares_calculated_from_polynom: Vec<_> = artifacts.id_numbers.iter().map(|id_number| compute_polynom(&*secret_shares_polynom, id_number).unwrap()).collect();
|
||||
assert_eq!(artifacts.secret_shares, secret_shares_calculated_from_polynom);
|
||||
|
||||
// now encrypt and decrypt data
|
||||
let document_secret_plain = generate_random_point().unwrap();
|
||||
let (document_secret_decrypted, document_secret_decrypted_test) =
|
||||
do_encryption_and_decryption(t, &joint_public, &id_numbers, &secret_shares, Some(&joint_secret), document_secret_plain.clone());
|
||||
do_encryption_and_decryption(t, &artifacts.joint_public, &artifacts.id_numbers, &artifacts.secret_shares, Some(&joint_secret), document_secret_plain.clone());
|
||||
|
||||
assert_eq!(document_secret_plain, document_secret_decrypted_test);
|
||||
assert_eq!(document_secret_plain, document_secret_decrypted);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn local_signature_works() {
|
||||
let key_pair = Random.generate().unwrap();
|
||||
let message_hash = "0000000000000000000000000000000000000000000000000000000000000042".parse().unwrap();
|
||||
let nonce = generate_random_scalar().unwrap();
|
||||
let signature = local_compute_signature(&nonce, key_pair.secret(), &message_hash).unwrap();
|
||||
assert_eq!(verify_signature(key_pair.public(), &signature, &message_hash), Ok(true));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn full_signature_math_session() {
|
||||
let test_cases = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (1, 4), (2, 4), (3, 4), (1, 5), (2, 5), (3, 5), (4, 5),
|
||||
(1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10)];
|
||||
for &(t, n) in &test_cases {
|
||||
// hash of the message to be signed
|
||||
let message_hash: Secret = "0000000000000000000000000000000000000000000000000000000000000042".parse().unwrap();
|
||||
|
||||
// === MiDS-S algorithm ===
|
||||
// setup: all nodes share master secret key && every node knows master public key
|
||||
let artifacts = run_key_generation(t, n, None);
|
||||
|
||||
// in this gap (not related to math):
|
||||
// master node should ask every other node if it is able to do a signing
|
||||
// if there are < than t+1 nodes, able to sign => error
|
||||
// select t+1 nodes for signing session
|
||||
// all steps below are for this subset of nodes
|
||||
let n = t + 1;
|
||||
|
||||
// step 1: run DKG to generate one-time secret key (nonce)
|
||||
let id_numbers = artifacts.id_numbers.iter().cloned().take(n).collect();
|
||||
let one_time_artifacts = run_key_generation(t, n, Some(id_numbers));
|
||||
|
||||
// step 2: message hash && x coordinate of one-time public value are combined
|
||||
let combined_hash = combine_message_hash_with_public(&message_hash, &one_time_artifacts.joint_public).unwrap();
|
||||
|
||||
// step 3: compute signature shares
|
||||
let partial_signatures: Vec<_> = (0..n)
|
||||
.map(|i| compute_signature_share(
|
||||
t,
|
||||
&combined_hash,
|
||||
&one_time_artifacts.polynoms1[i][0],
|
||||
&artifacts.secret_shares[i],
|
||||
&artifacts.id_numbers[i],
|
||||
artifacts.id_numbers.iter()
|
||||
.enumerate()
|
||||
.filter(|&(j, _)| i != j)
|
||||
.map(|(_, n)| n)
|
||||
.take(t)
|
||||
).unwrap())
|
||||
.collect();
|
||||
|
||||
// step 4: receive and verify signatures shares from other nodes
|
||||
let received_signatures: Vec<Vec<_>> = (0..n)
|
||||
.map(|i| (0..n)
|
||||
.filter(|j| i != *j)
|
||||
.map(|j| {
|
||||
let signature_share = partial_signatures[j].clone();
|
||||
assert!(_check_signature_share(&combined_hash,
|
||||
&signature_share,
|
||||
&artifacts.public_shares[j],
|
||||
&one_time_artifacts.public_shares[j],
|
||||
artifacts.id_numbers.iter().take(t)).unwrap_or(false));
|
||||
signature_share
|
||||
})
|
||||
.collect())
|
||||
.collect();
|
||||
|
||||
// step 5: compute signature
|
||||
let signatures: Vec<_> = (0..n)
|
||||
.map(|i| (combined_hash.clone(), compute_signature(received_signatures[i].iter().chain(once(&partial_signatures[i]))).unwrap()))
|
||||
.collect();
|
||||
|
||||
// === verify signature ===
|
||||
let master_secret = compute_joint_secret(artifacts.polynoms1.iter().map(|p| &p[0])).unwrap();
|
||||
let nonce = compute_joint_secret(one_time_artifacts.polynoms1.iter().map(|p| &p[0])).unwrap();
|
||||
let local_signature = local_compute_signature(&nonce, &master_secret, &message_hash).unwrap();
|
||||
for signature in &signatures {
|
||||
assert_eq!(signature, &local_signature);
|
||||
assert_eq!(verify_signature(&artifacts.joint_public, signature, &message_hash), Ok(true));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ use std::fmt;
|
||||
use std::collections::{BTreeSet, BTreeMap};
|
||||
use ethkey::Secret;
|
||||
use key_server_cluster::SessionId;
|
||||
use super::{SerializableH256, SerializablePublic, SerializableSecret, SerializableSignature};
|
||||
use super::{SerializableH256, SerializablePublic, SerializableSecret, SerializableSignature, SerializableMessageHash};
|
||||
|
||||
pub type MessageSessionId = SerializableH256;
|
||||
pub type MessageNodeId = SerializablePublic;
|
||||
@ -28,10 +28,14 @@ pub type MessageNodeId = SerializablePublic;
|
||||
pub enum Message {
|
||||
/// Cluster message.
|
||||
Cluster(ClusterMessage),
|
||||
/// Key generation message.
|
||||
Generation(GenerationMessage),
|
||||
/// Encryption message.
|
||||
Encryption(EncryptionMessage),
|
||||
/// Decryption message.
|
||||
Decryption(DecryptionMessage),
|
||||
/// Signing message.
|
||||
Signing(SigningMessage),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@ -47,9 +51,9 @@ pub enum ClusterMessage {
|
||||
KeepAliveResponse(KeepAliveResponse),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
/// All possible messages that can be sent during encryption session.
|
||||
pub enum EncryptionMessage {
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// All possible messages that can be sent during key generation session.
|
||||
pub enum GenerationMessage {
|
||||
/// Initialize new DKG session.
|
||||
InitializeSession(InitializeSession),
|
||||
/// Confirm DKG session initialization.
|
||||
@ -66,16 +70,34 @@ pub enum EncryptionMessage {
|
||||
SessionCompleted(SessionCompleted),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
/// All possible messages that can be sent during encryption session.
|
||||
pub enum EncryptionMessage {
|
||||
/// Initialize encryption session.
|
||||
InitializeEncryptionSession(InitializeEncryptionSession),
|
||||
/// Confirm/reject encryption session initialization.
|
||||
ConfirmEncryptionInitialization(ConfirmEncryptionInitialization),
|
||||
/// When encryption session error has occured.
|
||||
EncryptionSessionError(EncryptionSessionError),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// All possible messages that can be sent during consensus establishing.
|
||||
pub enum ConsensusMessage {
|
||||
/// Initialize consensus session.
|
||||
InitializeConsensusSession(InitializeConsensusSession),
|
||||
/// Confirm/reject consensus session initialization.
|
||||
ConfirmConsensusInitialization(ConfirmConsensusInitialization),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
/// All possible messages that can be sent during decryption session.
|
||||
pub enum DecryptionMessage {
|
||||
/// Initialize decryption session.
|
||||
InitializeDecryptionSession(InitializeDecryptionSession),
|
||||
/// Confirm/reject decryption session initialization.
|
||||
ConfirmDecryptionInitialization(ConfirmDecryptionInitialization),
|
||||
/// Consensus establishing message.
|
||||
DecryptionConsensusMessage(DecryptionConsensusMessage),
|
||||
/// Request partial decryption from node.
|
||||
RequestPartialDecryption(RequestPartialDecryption),
|
||||
/// Partial decryption is completed
|
||||
/// Partial decryption is completed.
|
||||
PartialDecryption(PartialDecryption),
|
||||
/// When decryption session error has occured.
|
||||
DecryptionSessionError(DecryptionSessionError),
|
||||
@ -83,6 +105,23 @@ pub enum DecryptionMessage {
|
||||
DecryptionSessionCompleted(DecryptionSessionCompleted),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
/// All possible messages that can be sent during signing session.
|
||||
pub enum SigningMessage {
|
||||
/// Consensus establishing message.
|
||||
SigningConsensusMessage(SigningConsensusMessage),
|
||||
/// Session key generation message.
|
||||
SigningGenerationMessage(SigningGenerationMessage),
|
||||
/// Request partial signature from node.
|
||||
RequestPartialSignature(RequestPartialSignature),
|
||||
/// Partial signature is generated.
|
||||
PartialSignature(PartialSignature),
|
||||
/// Signing error occured.
|
||||
SigningSessionError(SigningSessionError),
|
||||
/// Signing session completed.
|
||||
SigningSessionCompleted(SigningSessionCompleted),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Introduce node public key.
|
||||
pub struct NodePublicKey {
|
||||
@ -115,6 +154,13 @@ pub struct KeepAliveResponse {
|
||||
pub struct InitializeSession {
|
||||
/// Session Id.
|
||||
pub session: MessageSessionId,
|
||||
/// Session author.
|
||||
pub author: SerializablePublic,
|
||||
/// All session participants along with their identification numbers.
|
||||
pub nodes: BTreeMap<MessageNodeId, SerializableSecret>,
|
||||
/// Decryption threshold. During decryption threshold-of-route.len() nodes must came to
|
||||
/// consensus to successfully decrypt message.
|
||||
pub threshold: usize,
|
||||
/// Derived generation point. Starting from originator, every node must multiply this
|
||||
/// point by random scalar (unknown by other nodes). At the end of initialization
|
||||
/// `point` will be some (k1 * k2 * ... * kn) * G = `point` where `(k1 * k2 * ... * kn)`
|
||||
@ -136,11 +182,6 @@ pub struct ConfirmInitialization {
|
||||
pub struct CompleteInitialization {
|
||||
/// Session Id.
|
||||
pub session: MessageSessionId,
|
||||
/// All session participants along with their identification numbers.
|
||||
pub nodes: BTreeMap<MessageNodeId, SerializableSecret>,
|
||||
/// Decryption threshold. During decryption threshold-of-route.len() nodes must came to
|
||||
/// consensus to successfully decrypt message.
|
||||
pub threshold: usize,
|
||||
/// Derived generation point.
|
||||
pub derived_point: SerializablePublic,
|
||||
}
|
||||
@ -181,37 +222,132 @@ pub struct SessionError {
|
||||
pub struct SessionCompleted {
|
||||
/// Session Id.
|
||||
pub session: MessageSessionId,
|
||||
/// Common (shared) encryption point.
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Node is requested to prepare for saving encrypted data.
|
||||
pub struct InitializeEncryptionSession {
|
||||
/// Encryption session Id.
|
||||
pub session: MessageSessionId,
|
||||
/// Requestor signature.
|
||||
pub requestor_signature: SerializableSignature,
|
||||
/// Common point.
|
||||
pub common_point: SerializablePublic,
|
||||
/// Encrypted point.
|
||||
/// Encrypted data.
|
||||
pub encrypted_point: SerializablePublic,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Node is requested to decrypt data, encrypted in given session.
|
||||
pub struct InitializeDecryptionSession {
|
||||
/// Node is responding to encryption initialization request.
|
||||
pub struct ConfirmEncryptionInitialization {
|
||||
/// Encryption session Id.
|
||||
pub session: MessageSessionId,
|
||||
/// Decryption session Id.
|
||||
pub sub_session: SerializableSecret,
|
||||
/// Requestor signature.
|
||||
pub requestor_signature: SerializableSignature,
|
||||
/// Is shadow decryption requested? When true, decryption result
|
||||
/// will be visible to the owner of requestor public key only.
|
||||
pub is_shadow_decryption: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Node is responding to decryption request.
|
||||
pub struct ConfirmDecryptionInitialization {
|
||||
/// When encryption session error has occured.
|
||||
pub struct EncryptionSessionError {
|
||||
/// Encryption session Id.
|
||||
pub session: MessageSessionId,
|
||||
/// Decryption session Id.
|
||||
pub sub_session: SerializableSecret,
|
||||
/// Is node confirmed to make a decryption?.
|
||||
/// Error message.
|
||||
pub error: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Node is asked to be part of consensus group.
|
||||
pub struct InitializeConsensusSession {
|
||||
/// Requestor signature.
|
||||
pub requestor_signature: SerializableSignature,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Node is responding to consensus initialization request.
|
||||
pub struct ConfirmConsensusInitialization {
|
||||
/// Is node confirmed consensus participation.
|
||||
pub is_confirmed: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Consensus-related signing message.
|
||||
pub struct SigningConsensusMessage {
|
||||
/// Generation session Id.
|
||||
pub session: MessageSessionId,
|
||||
/// Signing session Id.
|
||||
pub sub_session: SerializableSecret,
|
||||
/// Consensus message.
|
||||
pub message: ConsensusMessage,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Session key generation message.
|
||||
pub struct SigningGenerationMessage {
|
||||
/// Generation session Id.
|
||||
pub session: MessageSessionId,
|
||||
/// Signing session Id.
|
||||
pub sub_session: SerializableSecret,
|
||||
/// Generation message.
|
||||
pub message: GenerationMessage,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Request partial signature.
|
||||
pub struct RequestPartialSignature {
|
||||
/// Generation session Id.
|
||||
pub session: MessageSessionId,
|
||||
/// Signing session Id.
|
||||
pub sub_session: SerializableSecret,
|
||||
/// Request id.
|
||||
pub request_id: SerializableSecret,
|
||||
/// Message hash.
|
||||
pub message_hash: SerializableMessageHash,
|
||||
/// Selected nodes.
|
||||
pub nodes: BTreeSet<MessageNodeId>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Partial signature.
|
||||
pub struct PartialSignature {
|
||||
/// Generation session Id.
|
||||
pub session: MessageSessionId,
|
||||
/// Signing session Id.
|
||||
pub sub_session: SerializableSecret,
|
||||
/// Request id.
|
||||
pub request_id: SerializableSecret,
|
||||
/// S part of signature.
|
||||
pub partial_signature: SerializableSecret,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// When signing session error has occured.
|
||||
pub struct SigningSessionError {
|
||||
/// Encryption session Id.
|
||||
pub session: MessageSessionId,
|
||||
/// Signing session Id.
|
||||
pub sub_session: SerializableSecret,
|
||||
/// Error description.
|
||||
pub error: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Signing session completed.
|
||||
pub struct SigningSessionCompleted {
|
||||
/// Generation session Id.
|
||||
pub session: MessageSessionId,
|
||||
/// Signing session Id.
|
||||
pub sub_session: SerializableSecret,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Consensus-related decryption message.
|
||||
pub struct DecryptionConsensusMessage {
|
||||
/// Generation session Id.
|
||||
pub session: MessageSessionId,
|
||||
/// Signing session Id.
|
||||
pub sub_session: SerializableSecret,
|
||||
/// Consensus message.
|
||||
pub message: ConsensusMessage,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Node is requested to do a partial decryption.
|
||||
pub struct RequestPartialDecryption {
|
||||
@ -219,6 +355,11 @@ pub struct RequestPartialDecryption {
|
||||
pub session: MessageSessionId,
|
||||
/// Decryption session Id.
|
||||
pub sub_session: SerializableSecret,
|
||||
/// Request id.
|
||||
pub request_id: SerializableSecret,
|
||||
/// Is shadow decryption requested? When true, decryption result
|
||||
/// will be visible to the owner of requestor public key only.
|
||||
pub is_shadow_decryption: bool,
|
||||
/// Nodes that are agreed to do a decryption.
|
||||
pub nodes: BTreeSet<MessageNodeId>,
|
||||
}
|
||||
@ -230,6 +371,8 @@ pub struct PartialDecryption {
|
||||
pub session: MessageSessionId,
|
||||
/// Decryption session Id.
|
||||
pub sub_session: SerializableSecret,
|
||||
/// Request id.
|
||||
pub request_id: SerializableSecret,
|
||||
/// Partially decrypted secret.
|
||||
pub shadow_point: SerializablePublic,
|
||||
/// Decrypt shadow coefficient (if requested), encrypted with requestor public.
|
||||
@ -256,16 +399,26 @@ pub struct DecryptionSessionCompleted {
|
||||
pub sub_session: SerializableSecret,
|
||||
}
|
||||
|
||||
impl GenerationMessage {
|
||||
pub fn session_id(&self) -> &SessionId {
|
||||
match *self {
|
||||
GenerationMessage::InitializeSession(ref msg) => &msg.session,
|
||||
GenerationMessage::ConfirmInitialization(ref msg) => &msg.session,
|
||||
GenerationMessage::CompleteInitialization(ref msg) => &msg.session,
|
||||
GenerationMessage::KeysDissemination(ref msg) => &msg.session,
|
||||
GenerationMessage::PublicKeyShare(ref msg) => &msg.session,
|
||||
GenerationMessage::SessionError(ref msg) => &msg.session,
|
||||
GenerationMessage::SessionCompleted(ref msg) => &msg.session,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EncryptionMessage {
|
||||
pub fn session_id(&self) -> &SessionId {
|
||||
match *self {
|
||||
EncryptionMessage::InitializeSession(ref msg) => &msg.session,
|
||||
EncryptionMessage::ConfirmInitialization(ref msg) => &msg.session,
|
||||
EncryptionMessage::CompleteInitialization(ref msg) => &msg.session,
|
||||
EncryptionMessage::KeysDissemination(ref msg) => &msg.session,
|
||||
EncryptionMessage::PublicKeyShare(ref msg) => &msg.session,
|
||||
EncryptionMessage::SessionError(ref msg) => &msg.session,
|
||||
EncryptionMessage::SessionCompleted(ref msg) => &msg.session,
|
||||
EncryptionMessage::InitializeEncryptionSession(ref msg) => &msg.session,
|
||||
EncryptionMessage::ConfirmEncryptionInitialization(ref msg) => &msg.session,
|
||||
EncryptionMessage::EncryptionSessionError(ref msg) => &msg.session,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -273,8 +426,7 @@ impl EncryptionMessage {
|
||||
impl DecryptionMessage {
|
||||
pub fn session_id(&self) -> &SessionId {
|
||||
match *self {
|
||||
DecryptionMessage::InitializeDecryptionSession(ref msg) => &msg.session,
|
||||
DecryptionMessage::ConfirmDecryptionInitialization(ref msg) => &msg.session,
|
||||
DecryptionMessage::DecryptionConsensusMessage(ref msg) => &msg.session,
|
||||
DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.session,
|
||||
DecryptionMessage::PartialDecryption(ref msg) => &msg.session,
|
||||
DecryptionMessage::DecryptionSessionError(ref msg) => &msg.session,
|
||||
@ -284,8 +436,7 @@ impl DecryptionMessage {
|
||||
|
||||
pub fn sub_session_id(&self) -> &Secret {
|
||||
match *self {
|
||||
DecryptionMessage::InitializeDecryptionSession(ref msg) => &msg.sub_session,
|
||||
DecryptionMessage::ConfirmDecryptionInitialization(ref msg) => &msg.sub_session,
|
||||
DecryptionMessage::DecryptionConsensusMessage(ref msg) => &msg.sub_session,
|
||||
DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.sub_session,
|
||||
DecryptionMessage::PartialDecryption(ref msg) => &msg.sub_session,
|
||||
DecryptionMessage::DecryptionSessionError(ref msg) => &msg.sub_session,
|
||||
@ -294,12 +445,38 @@ impl DecryptionMessage {
|
||||
}
|
||||
}
|
||||
|
||||
impl SigningMessage {
|
||||
pub fn session_id(&self) -> &SessionId {
|
||||
match *self {
|
||||
SigningMessage::SigningConsensusMessage(ref msg) => &msg.session,
|
||||
SigningMessage::SigningGenerationMessage(ref msg) => &msg.session,
|
||||
SigningMessage::RequestPartialSignature(ref msg) => &msg.session,
|
||||
SigningMessage::PartialSignature(ref msg) => &msg.session,
|
||||
SigningMessage::SigningSessionError(ref msg) => &msg.session,
|
||||
SigningMessage::SigningSessionCompleted(ref msg) => &msg.session,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn sub_session_id(&self) -> &Secret {
|
||||
match *self {
|
||||
SigningMessage::SigningConsensusMessage(ref msg) => &msg.sub_session,
|
||||
SigningMessage::SigningGenerationMessage(ref msg) => &msg.sub_session,
|
||||
SigningMessage::RequestPartialSignature(ref msg) => &msg.sub_session,
|
||||
SigningMessage::PartialSignature(ref msg) => &msg.sub_session,
|
||||
SigningMessage::SigningSessionError(ref msg) => &msg.sub_session,
|
||||
SigningMessage::SigningSessionCompleted(ref msg) => &msg.sub_session,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Message {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
Message::Cluster(ref message) => write!(f, "Cluster.{}", message),
|
||||
Message::Generation(ref message) => write!(f, "Generation.{}", message),
|
||||
Message::Encryption(ref message) => write!(f, "Encryption.{}", message),
|
||||
Message::Decryption(ref message) => write!(f, "Decryption.{}", message),
|
||||
Message::Signing(ref message) => write!(f, "Signing.{}", message),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -315,16 +492,35 @@ impl fmt::Display for ClusterMessage {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for GenerationMessage {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
GenerationMessage::InitializeSession(_) => write!(f, "InitializeSession"),
|
||||
GenerationMessage::ConfirmInitialization(_) => write!(f, "ConfirmInitialization"),
|
||||
GenerationMessage::CompleteInitialization(_) => write!(f, "CompleteInitialization"),
|
||||
GenerationMessage::KeysDissemination(_) => write!(f, "KeysDissemination"),
|
||||
GenerationMessage::PublicKeyShare(_) => write!(f, "PublicKeyShare"),
|
||||
GenerationMessage::SessionError(ref msg) => write!(f, "SessionError({})", msg.error),
|
||||
GenerationMessage::SessionCompleted(_) => write!(f, "SessionCompleted"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for EncryptionMessage {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
EncryptionMessage::InitializeSession(_) => write!(f, "InitializeSession"),
|
||||
EncryptionMessage::ConfirmInitialization(_) => write!(f, "ConfirmInitialization"),
|
||||
EncryptionMessage::CompleteInitialization(_) => write!(f, "CompleteInitialization"),
|
||||
EncryptionMessage::KeysDissemination(_) => write!(f, "KeysDissemination"),
|
||||
EncryptionMessage::PublicKeyShare(_) => write!(f, "PublicKeyShare"),
|
||||
EncryptionMessage::SessionError(ref msg) => write!(f, "SessionError({})", msg.error),
|
||||
EncryptionMessage::SessionCompleted(_) => write!(f, "SessionCompleted"),
|
||||
EncryptionMessage::InitializeEncryptionSession(_) => write!(f, "InitializeEncryptionSession"),
|
||||
EncryptionMessage::ConfirmEncryptionInitialization(_) => write!(f, "ConfirmEncryptionInitialization"),
|
||||
EncryptionMessage::EncryptionSessionError(ref msg) => write!(f, "EncryptionSessionError({})", msg.error),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ConsensusMessage {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
ConsensusMessage::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"),
|
||||
ConsensusMessage::ConfirmConsensusInitialization(_) => write!(f, "ConfirmConsensusInitialization"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -332,8 +528,7 @@ impl fmt::Display for EncryptionMessage {
|
||||
impl fmt::Display for DecryptionMessage {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
DecryptionMessage::InitializeDecryptionSession(_) => write!(f, "InitializeDecryptionSession"),
|
||||
DecryptionMessage::ConfirmDecryptionInitialization(_) => write!(f, "ConfirmDecryptionInitialization"),
|
||||
DecryptionMessage::DecryptionConsensusMessage(ref m) => write!(f, "DecryptionConsensusMessage.{}", m.message),
|
||||
DecryptionMessage::RequestPartialDecryption(_) => write!(f, "RequestPartialDecryption"),
|
||||
DecryptionMessage::PartialDecryption(_) => write!(f, "PartialDecryption"),
|
||||
DecryptionMessage::DecryptionSessionError(_) => write!(f, "DecryptionSessionError"),
|
||||
@ -341,3 +536,16 @@ impl fmt::Display for DecryptionMessage {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for SigningMessage {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
SigningMessage::SigningConsensusMessage(ref m) => write!(f, "SigningConsensusMessage.{}", m.message),
|
||||
SigningMessage::SigningGenerationMessage(ref m) => write!(f, "SigningGenerationMessage.{}", m.message),
|
||||
SigningMessage::RequestPartialSignature(_) => write!(f, "RequestPartialSignature"),
|
||||
SigningMessage::PartialSignature(_) => write!(f, "PartialSignature"),
|
||||
SigningMessage::SigningSessionError(_) => write!(f, "SigningSessionError"),
|
||||
SigningMessage::SigningSessionCompleted(_) => write!(f, "SigningSessionCompleted"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,13 +18,14 @@ use std::fmt;
|
||||
use std::io::Error as IoError;
|
||||
use ethkey;
|
||||
use ethcrypto;
|
||||
use super::types::all::DocumentAddress;
|
||||
use super::types::all::ServerKeyId;
|
||||
|
||||
pub use super::types::all::{NodeId, DocumentEncryptedKeyShadow};
|
||||
pub use super::types::all::{NodeId, EncryptedDocumentKeyShadow};
|
||||
pub use super::acl_storage::AclStorage;
|
||||
pub use super::key_storage::{KeyStorage, DocumentKeyShare};
|
||||
pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic};
|
||||
pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic, SerializableMessageHash};
|
||||
pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient};
|
||||
pub use self::generation_session::Session as GenerationSession;
|
||||
pub use self::encryption_session::Session as EncryptionSession;
|
||||
pub use self::decryption_session::Session as DecryptionSession;
|
||||
|
||||
@ -33,7 +34,20 @@ pub use super::key_storage::tests::DummyKeyStorage;
|
||||
#[cfg(test)]
|
||||
pub use super::acl_storage::tests::DummyAclStorage;
|
||||
|
||||
pub type SessionId = DocumentAddress;
|
||||
pub type SessionId = ServerKeyId;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
/// Session metadata.
|
||||
pub struct SessionMeta {
|
||||
/// Key id.
|
||||
pub id: SessionId,
|
||||
/// Id of node, which has started this session.
|
||||
pub master_node_id: NodeId,
|
||||
/// Id of node, on which this session is running.
|
||||
pub self_node_id: NodeId,
|
||||
/// Session threshold.
|
||||
pub threshold: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
/// Errors which can occur during encryption/decryption session
|
||||
@ -44,6 +58,10 @@ pub enum Error {
|
||||
InvalidNodeId,
|
||||
/// Session with the given id already exists.
|
||||
DuplicateSessionId,
|
||||
/// Session with the same id already completed.
|
||||
CompletedSessionId,
|
||||
/// Session is not ready to start yet (required data is not ready).
|
||||
NotStartedSessionId,
|
||||
/// Session with the given id is unknown.
|
||||
InvalidSessionId,
|
||||
/// Invalid number of nodes.
|
||||
@ -61,6 +79,8 @@ pub enum Error {
|
||||
/// Current state of encryption/decryption session does not allow to proceed request.
|
||||
/// This means that either there is some comm-failure or node is misbehaving/cheating.
|
||||
InvalidStateForRequest,
|
||||
/// Request cannot be sent/received from this node.
|
||||
InvalidNodeForRequest,
|
||||
/// Message or some data in the message was recognized as invalid.
|
||||
/// This means that node is misbehaving/cheating.
|
||||
InvalidMessage,
|
||||
@ -74,6 +94,8 @@ pub enum Error {
|
||||
Serde(String),
|
||||
/// Key storage error.
|
||||
KeyStorage(String),
|
||||
/// Consensus is unreachable.
|
||||
ConsensusUnreachable,
|
||||
/// Acl storage error.
|
||||
AccessDenied,
|
||||
}
|
||||
@ -102,18 +124,22 @@ impl fmt::Display for Error {
|
||||
Error::InvalidNodeAddress => write!(f, "invalid node address has been passed"),
|
||||
Error::InvalidNodeId => write!(f, "invalid node id has been passed"),
|
||||
Error::DuplicateSessionId => write!(f, "session with the same id is already registered"),
|
||||
Error::CompletedSessionId => write!(f, "session with the same id is already completed"),
|
||||
Error::NotStartedSessionId => write!(f, "not enough data to start session with the given id"),
|
||||
Error::InvalidSessionId => write!(f, "invalid session id has been passed"),
|
||||
Error::InvalidNodesCount => write!(f, "invalid nodes count"),
|
||||
Error::InvalidNodesConfiguration => write!(f, "invalid nodes configuration"),
|
||||
Error::InvalidThreshold => write!(f, "invalid threshold value has been passed"),
|
||||
Error::TooEarlyForRequest => write!(f, "session is not yet ready to process this request"),
|
||||
Error::InvalidStateForRequest => write!(f, "session is in invalid state for processing this request"),
|
||||
Error::InvalidNodeForRequest => write!(f, "invalid node for this request"),
|
||||
Error::InvalidMessage => write!(f, "invalid message is received"),
|
||||
Error::NodeDisconnected => write!(f, "node required for this operation is currently disconnected"),
|
||||
Error::EthKey(ref e) => write!(f, "cryptographic error {}", e),
|
||||
Error::Io(ref e) => write!(f, "i/o error {}", e),
|
||||
Error::Serde(ref e) => write!(f, "serde error {}", e),
|
||||
Error::KeyStorage(ref e) => write!(f, "key storage error {}", e),
|
||||
Error::ConsensusUnreachable => write!(f, "Consensus unreachable"),
|
||||
Error::AccessDenied => write!(f, "Access denied"),
|
||||
}
|
||||
}
|
||||
@ -126,9 +152,13 @@ impl Into<String> for Error {
|
||||
}
|
||||
|
||||
mod cluster;
|
||||
mod cluster_sessions;
|
||||
mod decryption_session;
|
||||
mod encryption_session;
|
||||
mod generation_session;
|
||||
mod io;
|
||||
mod math;
|
||||
mod jobs;
|
||||
pub mod math;
|
||||
mod message;
|
||||
mod signing_session;
|
||||
mod net;
|
||||
|
706
secret_store/src/key_server_cluster/signing_session.rs
Normal file
706
secret_store/src/key_server_cluster/signing_session.rs
Normal file
@ -0,0 +1,706 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::BTreeSet;
|
||||
use std::sync::Arc;
|
||||
use parking_lot::{Mutex, Condvar};
|
||||
use ethkey::{Public, Secret, Signature};
|
||||
use util::H256;
|
||||
use key_server_cluster::{Error, NodeId, SessionId, SessionMeta, AclStorage, DocumentKeyShare};
|
||||
use key_server_cluster::cluster::{Cluster};
|
||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionParams as GenerationSessionParams,
|
||||
Session as GenerationSessionApi, SessionState as GenerationSessionState};
|
||||
use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessage, SigningGenerationMessage,
|
||||
RequestPartialSignature, PartialSignature, SigningSessionCompleted, GenerationMessage, ConsensusMessage, SigningSessionError,
|
||||
InitializeConsensusSession, ConfirmConsensusInitialization};
|
||||
use key_server_cluster::jobs::job_session::JobTransport;
|
||||
use key_server_cluster::jobs::signing_job::{PartialSigningRequest, PartialSigningResponse, SigningJob};
|
||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||
|
||||
pub use key_server_cluster::decryption_session::DecryptionSessionId as SigningSessionId;
|
||||
|
||||
/// Signing session API.
|
||||
pub trait Session: Send + Sync + 'static {
|
||||
/// Wait until session is completed. Returns signed message.
|
||||
fn wait(&self) -> Result<(Secret, Secret), Error>;
|
||||
}
|
||||
|
||||
/// Distributed signing session.
|
||||
/// Based on "Efficient Multi-Party Digital Signature using Adaptive Secret Sharing for Low-Power Devices in Wireless Network" paper.
|
||||
/// Brief overview:
|
||||
/// 1) initialization: master node (which has received request for signing the message) requests all other nodes to sign the message
|
||||
/// 2) ACL check: all nodes which have received the request are querying ACL-contract to check if requestor has access to the private key
|
||||
/// 3) partial signing: every node which has succussfully checked access for the requestor do a partial signing
|
||||
/// 4) signing: master node receives all partial signatures of the secret and computes the signature
|
||||
pub struct SessionImpl {
|
||||
/// Session core.
|
||||
core: SessionCore,
|
||||
/// Session data.
|
||||
data: Mutex<SessionData>,
|
||||
}
|
||||
|
||||
/// Immutable session data.
|
||||
struct SessionCore {
|
||||
/// Session metadata.
|
||||
pub meta: SessionMeta,
|
||||
/// Signing session access key.
|
||||
pub access_key: Secret,
|
||||
/// Key share.
|
||||
pub key_share: DocumentKeyShare,
|
||||
/// Cluster which allows this node to send messages to other nodes in the cluster.
|
||||
pub cluster: Arc<Cluster>,
|
||||
/// SessionImpl completion condvar.
|
||||
pub completed: Condvar,
|
||||
}
|
||||
|
||||
/// Signing consensus session type.
|
||||
type SigningConsensusSession = ConsensusSession<SigningConsensusTransport, SigningJob, SigningJobTransport>;
|
||||
|
||||
/// Mutable session data.
|
||||
struct SessionData {
|
||||
/// Session state.
|
||||
pub state: SessionState,
|
||||
/// Message hash.
|
||||
pub message_hash: Option<H256>,
|
||||
/// Consensus-based signing session.
|
||||
pub consensus_session: SigningConsensusSession,
|
||||
/// Session key generation session.
|
||||
pub generation_session: Option<GenerationSession>,
|
||||
/// Decryption result.
|
||||
pub result: Option<Result<(Secret, Secret), Error>>,
|
||||
}
|
||||
|
||||
/// Signing session state.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum SessionState {
|
||||
/// State when consensus is establishing.
|
||||
ConsensusEstablishing,
|
||||
/// State when session key is generating.
|
||||
SessionKeyGeneration,
|
||||
/// State when signature is computing.
|
||||
SignatureComputing,
|
||||
}
|
||||
|
||||
/// Session creation parameters
|
||||
pub struct SessionParams {
|
||||
/// Session metadata.
|
||||
pub meta: SessionMeta,
|
||||
/// Session access key.
|
||||
pub access_key: Secret,
|
||||
/// Key share.
|
||||
pub key_share: DocumentKeyShare,
|
||||
/// ACL storage.
|
||||
pub acl_storage: Arc<AclStorage>,
|
||||
/// Cluster
|
||||
pub cluster: Arc<Cluster>,
|
||||
}
|
||||
|
||||
/// Signing consensus transport.
|
||||
struct SigningConsensusTransport {
|
||||
/// Session id.
|
||||
id: SessionId,
|
||||
/// Session access key.
|
||||
access_key: Secret,
|
||||
/// Cluster.
|
||||
cluster: Arc<Cluster>,
|
||||
}
|
||||
|
||||
/// Signing key generation transport.
|
||||
struct SessionKeyGenerationTransport {
|
||||
/// Session access key.
|
||||
access_key: Secret,
|
||||
/// Cluster.
|
||||
cluster: Arc<Cluster>,
|
||||
/// Other nodes ids.
|
||||
other_nodes_ids: BTreeSet<NodeId>,
|
||||
}
|
||||
|
||||
/// Signing job transport
|
||||
struct SigningJobTransport {
|
||||
/// Session id.
|
||||
id: SessionId,
|
||||
//// Session access key.
|
||||
access_key: Secret,
|
||||
/// Cluster.
|
||||
cluster: Arc<Cluster>,
|
||||
}
|
||||
|
||||
impl SessionImpl {
|
||||
/// Create new signing session.
|
||||
pub fn new(params: SessionParams, requester_signature: Option<Signature>) -> Result<Self, Error> {
|
||||
debug_assert_eq!(params.meta.threshold, params.key_share.threshold);
|
||||
debug_assert_eq!(params.meta.self_node_id == params.meta.master_node_id, requester_signature.is_some());
|
||||
|
||||
use key_server_cluster::generation_session::{check_cluster_nodes, check_threshold};
|
||||
|
||||
// check nodes and threshold
|
||||
let nodes = params.key_share.id_numbers.keys().cloned().collect();
|
||||
check_cluster_nodes(¶ms.meta.self_node_id, &nodes)?;
|
||||
check_threshold(params.key_share.threshold, &nodes)?;
|
||||
|
||||
let consensus_transport = SigningConsensusTransport {
|
||||
id: params.meta.id.clone(),
|
||||
access_key: params.access_key.clone(),
|
||||
cluster: params.cluster.clone(),
|
||||
};
|
||||
|
||||
Ok(SessionImpl {
|
||||
core: SessionCore {
|
||||
meta: params.meta.clone(),
|
||||
access_key: params.access_key,
|
||||
key_share: params.key_share,
|
||||
cluster: params.cluster,
|
||||
completed: Condvar::new(),
|
||||
},
|
||||
data: Mutex::new(SessionData {
|
||||
state: SessionState::ConsensusEstablishing,
|
||||
message_hash: None,
|
||||
consensus_session: match requester_signature {
|
||||
Some(requester_signature) => ConsensusSession::new_on_master(ConsensusSessionParams {
|
||||
meta: params.meta,
|
||||
acl_storage: params.acl_storage.clone(),
|
||||
consensus_transport: consensus_transport,
|
||||
}, requester_signature)?,
|
||||
None => ConsensusSession::new_on_slave(ConsensusSessionParams {
|
||||
meta: params.meta,
|
||||
acl_storage: params.acl_storage.clone(),
|
||||
consensus_transport: consensus_transport,
|
||||
})?,
|
||||
},
|
||||
generation_session: None,
|
||||
result: None,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
/// Initialize signing session on master node.
|
||||
pub fn initialize(&self, message_hash: H256) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
data.message_hash = Some(message_hash);
|
||||
data.consensus_session.initialize(self.core.key_share.id_numbers.keys().cloned().collect())?;
|
||||
|
||||
if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished {
|
||||
let generation_session = GenerationSession::new(GenerationSessionParams {
|
||||
id: self.core.meta.id.clone(),
|
||||
self_node_id: self.core.meta.self_node_id.clone(),
|
||||
key_storage: None,
|
||||
cluster: Arc::new(SessionKeyGenerationTransport {
|
||||
access_key: self.core.access_key.clone(),
|
||||
cluster: self.core.cluster.clone(),
|
||||
other_nodes_ids: BTreeSet::new()
|
||||
}),
|
||||
});
|
||||
generation_session.initialize(Public::default(), 0, vec![self.core.meta.self_node_id.clone()].into_iter().collect())?;
|
||||
|
||||
debug_assert_eq!(generation_session.state(), GenerationSessionState::WaitingForGenerationConfirmation);
|
||||
let joint_public_and_secret = generation_session
|
||||
.joint_public_and_secret()
|
||||
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?;
|
||||
data.generation_session = Some(generation_session);
|
||||
data.state = SessionState::SignatureComputing;
|
||||
|
||||
self.core.disseminate_jobs(&mut data.consensus_session, joint_public_and_secret.0, joint_public_and_secret.1, message_hash)?;
|
||||
|
||||
debug_assert!(data.consensus_session.state() == ConsensusSessionState::Finished);
|
||||
data.result = Some(Ok(data.consensus_session.result()?));
|
||||
self.core.completed.notify_all();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process signing message.
|
||||
pub fn process_message(&self, sender: &NodeId, message: &SigningMessage) -> Result<(), Error> {
|
||||
match message {
|
||||
&SigningMessage::SigningConsensusMessage(ref message) =>
|
||||
self.on_consensus_message(sender, message),
|
||||
&SigningMessage::SigningGenerationMessage(ref message) =>
|
||||
self.on_generation_message(sender, message),
|
||||
&SigningMessage::RequestPartialSignature(ref message) =>
|
||||
self.on_partial_signature_requested(sender, message),
|
||||
&SigningMessage::PartialSignature(ref message) =>
|
||||
self.on_partial_signature(sender, message),
|
||||
&SigningMessage::SigningSessionError(ref message) =>
|
||||
self.on_session_error(sender, message),
|
||||
&SigningMessage::SigningSessionCompleted(ref message) =>
|
||||
self.on_session_completed(sender, message),
|
||||
}
|
||||
}
|
||||
|
||||
/// When consensus-related message is received.
|
||||
pub fn on_consensus_message(&self, sender: &NodeId, message: &SigningConsensusMessage) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
debug_assert!(self.core.access_key == *message.sub_session);
|
||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||
|
||||
let mut data = self.data.lock();
|
||||
let is_establishing_consensus = data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus;
|
||||
data.consensus_session.on_consensus_message(&sender, &message.message)?;
|
||||
|
||||
let is_consensus_established = data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished;
|
||||
if self.core.meta.self_node_id != self.core.meta.master_node_id || !is_establishing_consensus || !is_consensus_established {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let consensus_group = data.consensus_session.select_consensus_group()?.clone();
|
||||
let mut other_consensus_group_nodes = consensus_group.clone();
|
||||
other_consensus_group_nodes.remove(&self.core.meta.self_node_id);
|
||||
|
||||
let generation_session = GenerationSession::new(GenerationSessionParams {
|
||||
id: self.core.meta.id.clone(),
|
||||
self_node_id: self.core.meta.self_node_id.clone(),
|
||||
key_storage: None,
|
||||
cluster: Arc::new(SessionKeyGenerationTransport {
|
||||
access_key: self.core.access_key.clone(),
|
||||
cluster: self.core.cluster.clone(),
|
||||
other_nodes_ids: other_consensus_group_nodes,
|
||||
}),
|
||||
});
|
||||
generation_session.initialize(Public::default(), self.core.key_share.threshold, consensus_group)?;
|
||||
data.generation_session = Some(generation_session);
|
||||
data.state = SessionState::SessionKeyGeneration;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When session key related message is received.
|
||||
pub fn on_generation_message(&self, sender: &NodeId, message: &SigningGenerationMessage) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
debug_assert!(self.core.access_key == *message.sub_session);
|
||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||
|
||||
let mut data = self.data.lock();
|
||||
|
||||
if let &GenerationMessage::InitializeSession(ref message) = &message.message {
|
||||
if &self.core.meta.master_node_id != sender {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
|
||||
let consensus_group: BTreeSet<NodeId> = message.nodes.keys().cloned().map(Into::into).collect();
|
||||
let mut other_consensus_group_nodes = consensus_group.clone();
|
||||
other_consensus_group_nodes.remove(&self.core.meta.self_node_id);
|
||||
|
||||
let generation_session = GenerationSession::new(GenerationSessionParams {
|
||||
id: self.core.meta.id.clone(),
|
||||
self_node_id: self.core.meta.self_node_id.clone(),
|
||||
key_storage: None,
|
||||
cluster: Arc::new(SessionKeyGenerationTransport {
|
||||
access_key: self.core.access_key.clone(),
|
||||
cluster: self.core.cluster.clone(),
|
||||
other_nodes_ids: other_consensus_group_nodes
|
||||
}),
|
||||
});
|
||||
data.generation_session = Some(generation_session);
|
||||
data.state = SessionState::SessionKeyGeneration;
|
||||
}
|
||||
|
||||
{
|
||||
let generation_session = data.generation_session.as_ref().ok_or(Error::InvalidStateForRequest)?;
|
||||
let is_key_generating = generation_session.state() != GenerationSessionState::Finished;
|
||||
generation_session.process_message(sender, &message.message)?;
|
||||
|
||||
let is_key_generated = generation_session.state() == GenerationSessionState::Finished;
|
||||
if !is_key_generating || !is_key_generated {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
data.state = SessionState::SignatureComputing;
|
||||
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let message_hash = data.message_hash
|
||||
.expect("we are on master node; on master node message_hash is filled in initialize(); on_generation_message follows initialize; qed");
|
||||
let joint_public_and_secret = data.generation_session.as_ref()
|
||||
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")
|
||||
.joint_public_and_secret()
|
||||
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?;
|
||||
self.core.disseminate_jobs(&mut data.consensus_session, joint_public_and_secret.0, joint_public_and_secret.1, message_hash)
|
||||
}
|
||||
|
||||
/// When partial signature is requested.
|
||||
pub fn on_partial_signature_requested(&self, sender: &NodeId, message: &RequestPartialSignature) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
debug_assert!(self.core.access_key == *message.sub_session);
|
||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||
|
||||
let mut data = self.data.lock();
|
||||
|
||||
if sender != &self.core.meta.master_node_id {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
if data.state != SessionState::SignatureComputing {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
let joint_public_and_secret = data.generation_session.as_ref()
|
||||
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")
|
||||
.joint_public_and_secret()
|
||||
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?;
|
||||
let signing_job = SigningJob::new_on_slave(self.core.meta.self_node_id.clone(), self.core.key_share.clone(), joint_public_and_secret.0, joint_public_and_secret.1)?;
|
||||
let signing_transport = self.core.signing_transport();
|
||||
|
||||
data.consensus_session.on_job_request(sender, PartialSigningRequest {
|
||||
id: message.request_id.clone().into(),
|
||||
message_hash: message.message_hash.clone().into(),
|
||||
other_nodes_ids: message.nodes.iter().cloned().map(Into::into).collect(),
|
||||
}, signing_job, signing_transport)
|
||||
}
|
||||
|
||||
/// When partial signature is received.
|
||||
pub fn on_partial_signature(&self, sender: &NodeId, message: &PartialSignature) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
debug_assert!(self.core.access_key == *message.sub_session);
|
||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||
|
||||
let mut data = self.data.lock();
|
||||
data.consensus_session.on_job_response(sender, PartialSigningResponse {
|
||||
request_id: message.request_id.clone().into(),
|
||||
partial_signature: message.partial_signature.clone().into(),
|
||||
})?;
|
||||
|
||||
if data.consensus_session.state() != ConsensusSessionState::Finished {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.core.cluster.broadcast(Message::Signing(SigningMessage::SigningSessionCompleted(SigningSessionCompleted {
|
||||
session: self.core.meta.id.clone().into(),
|
||||
sub_session: self.core.access_key.clone().into(),
|
||||
})))?;
|
||||
|
||||
data.result = Some(Ok(data.consensus_session.result()?));
|
||||
self.core.completed.notify_all();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When session is completed.
|
||||
pub fn on_session_completed(&self, sender: &NodeId, message: &SigningSessionCompleted) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
debug_assert!(self.core.access_key == *message.sub_session);
|
||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||
|
||||
self.data.lock().consensus_session.on_session_completed(sender)
|
||||
}
|
||||
|
||||
/// When error has occured on another node.
|
||||
pub fn on_session_error(&self, sender: &NodeId, message: &SigningSessionError) -> Result<(), Error> {
|
||||
self.process_node_error(Some(&sender), &message.error)
|
||||
}
|
||||
|
||||
/// Process error from the other node.
|
||||
fn process_node_error(&self, node: Option<&NodeId>, error: &String) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
match {
|
||||
match node {
|
||||
Some(node) => data.consensus_session.on_node_error(node),
|
||||
None => data.consensus_session.on_session_timeout(),
|
||||
}
|
||||
} {
|
||||
Ok(false) => Ok(()),
|
||||
Ok(true) => {
|
||||
let message_hash = data.message_hash.as_ref().cloned()
|
||||
.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed");
|
||||
let joint_public_and_secret = data.generation_session.as_ref()
|
||||
.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed")
|
||||
.joint_public_and_secret()
|
||||
.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed")?;
|
||||
let disseminate_result = self.core.disseminate_jobs(&mut data.consensus_session, joint_public_and_secret.0, joint_public_and_secret.1, message_hash);
|
||||
match disseminate_result {
|
||||
Ok(()) => Ok(()),
|
||||
Err(err) => {
|
||||
warn!("{}: signing session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node);
|
||||
|
||||
data.result = Some(Err(err.clone()));
|
||||
self.core.completed.notify_all();
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(err) => {
|
||||
warn!("{}: signing session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node);
|
||||
|
||||
data.result = Some(Err(err.clone()));
|
||||
self.core.completed.notify_all();
|
||||
Err(err)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ClusterSession for SessionImpl {
|
||||
fn is_finished(&self) -> bool {
|
||||
let data = self.data.lock();
|
||||
data.consensus_session.state() == ConsensusSessionState::Failed
|
||||
|| data.consensus_session.state() == ConsensusSessionState::Finished
|
||||
}
|
||||
|
||||
fn on_node_timeout(&self, node: &NodeId) {
|
||||
// ignore error, only state matters
|
||||
let _ = self.process_node_error(Some(node), &Error::NodeDisconnected.into());
|
||||
}
|
||||
|
||||
fn on_session_timeout(&self) {
|
||||
// ignore error, only state matters
|
||||
let _ = self.process_node_error(None, &Error::NodeDisconnected.into());
|
||||
}
|
||||
}
|
||||
|
||||
impl Session for SessionImpl {
|
||||
fn wait(&self) -> Result<(Secret, Secret), Error> {
|
||||
let mut data = self.data.lock();
|
||||
if !data.result.is_some() {
|
||||
self.core.completed.wait(&mut data);
|
||||
}
|
||||
|
||||
data.result.as_ref()
|
||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
||||
.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl SessionKeyGenerationTransport {
|
||||
fn map_message(&self, message: Message) -> Result<Message, Error> {
|
||||
match message {
|
||||
Message::Generation(message) => Ok(Message::Signing(SigningMessage::SigningGenerationMessage(SigningGenerationMessage {
|
||||
session: message.session_id().clone().into(),
|
||||
sub_session: self.access_key.clone().into(),
|
||||
message: message,
|
||||
}))),
|
||||
_ => Err(Error::InvalidMessage),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Cluster for SessionKeyGenerationTransport {
|
||||
fn broadcast(&self, message: Message) -> Result<(), Error> {
|
||||
let message = self.map_message(message)?;
|
||||
for to in &self.other_nodes_ids {
|
||||
self.cluster.send(to, message.clone())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> {
|
||||
debug_assert!(self.other_nodes_ids.contains(to));
|
||||
self.cluster.send(to, self.map_message(message)?)
|
||||
}
|
||||
}
|
||||
|
||||
impl SessionCore {
|
||||
pub fn signing_transport(&self) -> SigningJobTransport {
|
||||
SigningJobTransport {
|
||||
id: self.meta.id.clone(),
|
||||
access_key: self.access_key.clone(),
|
||||
cluster: self.cluster.clone()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn disseminate_jobs(&self, consensus_session: &mut SigningConsensusSession, session_public: Public, session_secret_share: Secret, message_hash: H256) -> Result<(), Error> {
|
||||
let signing_job = SigningJob::new_on_master(self.meta.self_node_id.clone(), self.key_share.clone(), session_public, session_secret_share, message_hash)?;
|
||||
consensus_session.disseminate_jobs(signing_job, self.signing_transport())
|
||||
}
|
||||
}
|
||||
|
||||
impl JobTransport for SigningConsensusTransport {
|
||||
type PartialJobRequest=Signature;
|
||||
type PartialJobResponse=bool;
|
||||
|
||||
fn send_partial_request(&self, node: &NodeId, request: Signature) -> Result<(), Error> {
|
||||
self.cluster.send(node, Message::Signing(SigningMessage::SigningConsensusMessage(SigningConsensusMessage {
|
||||
session: self.id.clone().into(),
|
||||
sub_session: self.access_key.clone().into(),
|
||||
message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
||||
requestor_signature: request.into(),
|
||||
})
|
||||
})))
|
||||
}
|
||||
|
||||
fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> {
|
||||
self.cluster.send(node, Message::Signing(SigningMessage::SigningConsensusMessage(SigningConsensusMessage {
|
||||
session: self.id.clone().into(),
|
||||
sub_session: self.access_key.clone().into(),
|
||||
message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: response,
|
||||
})
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
||||
impl JobTransport for SigningJobTransport {
|
||||
type PartialJobRequest=PartialSigningRequest;
|
||||
type PartialJobResponse=PartialSigningResponse;
|
||||
|
||||
fn send_partial_request(&self, node: &NodeId, request: PartialSigningRequest) -> Result<(), Error> {
|
||||
self.cluster.send(node, Message::Signing(SigningMessage::RequestPartialSignature(RequestPartialSignature {
|
||||
session: self.id.clone().into(),
|
||||
sub_session: self.access_key.clone().into(),
|
||||
request_id: request.id.into(),
|
||||
message_hash: request.message_hash.into(),
|
||||
nodes: request.other_nodes_ids.into_iter().map(Into::into).collect(),
|
||||
})))
|
||||
}
|
||||
|
||||
fn send_partial_response(&self, node: &NodeId, response: PartialSigningResponse) -> Result<(), Error> {
|
||||
self.cluster.send(node, Message::Signing(SigningMessage::PartialSignature(PartialSignature {
|
||||
session: self.id.clone().into(),
|
||||
sub_session: self.access_key.clone().into(),
|
||||
request_id: response.request_id.into(),
|
||||
partial_signature: response.partial_signature.into(),
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::collections::{BTreeMap, VecDeque};
|
||||
use ethkey::{self, Random, Generator, Public};
|
||||
use util::H256;
|
||||
use super::super::super::acl_storage::tests::DummyAclStorage;
|
||||
use key_server_cluster::{NodeId, SessionId, SessionMeta, Error, KeyStorage};
|
||||
use key_server_cluster::cluster::tests::DummyCluster;
|
||||
use key_server_cluster::generation_session::{Session as GenerationSession};
|
||||
use key_server_cluster::generation_session::tests::MessageLoop as KeyGenerationMessageLoop;
|
||||
use key_server_cluster::math;
|
||||
use key_server_cluster::message::{Message, SigningMessage};
|
||||
use key_server_cluster::signing_session::{Session, SessionImpl, SessionParams};
|
||||
|
||||
struct Node {
|
||||
pub node_id: NodeId,
|
||||
pub cluster: Arc<DummyCluster>,
|
||||
pub session: SessionImpl,
|
||||
}
|
||||
|
||||
struct MessageLoop {
|
||||
pub session_id: SessionId,
|
||||
pub nodes: BTreeMap<NodeId, Node>,
|
||||
pub queue: VecDeque<(NodeId, NodeId, Message)>,
|
||||
}
|
||||
|
||||
impl MessageLoop {
|
||||
pub fn new(gl: &KeyGenerationMessageLoop) -> Self {
|
||||
let mut nodes = BTreeMap::new();
|
||||
let session_id = gl.session_id.clone();
|
||||
let requester = Random.generate().unwrap();
|
||||
let signature = Some(ethkey::sign(requester.secret(), &SessionId::default()).unwrap());
|
||||
let master_node_id = gl.nodes.keys().nth(0).unwrap().clone();
|
||||
for (i, (gl_node_id, gl_node)) in gl.nodes.iter().enumerate() {
|
||||
let acl_storage = Arc::new(DummyAclStorage::default());
|
||||
let cluster = Arc::new(DummyCluster::new(gl_node_id.clone()));
|
||||
let session = SessionImpl::new(SessionParams {
|
||||
meta: SessionMeta {
|
||||
id: session_id.clone(),
|
||||
self_node_id: gl_node_id.clone(),
|
||||
master_node_id: master_node_id.clone(),
|
||||
threshold: gl_node.key_storage.get(&session_id).unwrap().threshold,
|
||||
},
|
||||
access_key: "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec".parse().unwrap(),
|
||||
key_share: gl_node.key_storage.get(&session_id).unwrap(),
|
||||
acl_storage: acl_storage,
|
||||
cluster: cluster.clone(),
|
||||
}, if i == 0 { signature.clone() } else { None }).unwrap();
|
||||
nodes.insert(gl_node_id.clone(), Node { node_id: gl_node_id.clone(), cluster: cluster, session: session });
|
||||
}
|
||||
|
||||
let nodes_ids: Vec<_> = nodes.keys().cloned().collect();
|
||||
for node in nodes.values() {
|
||||
for node_id in &nodes_ids {
|
||||
node.cluster.add_node(node_id.clone());
|
||||
}
|
||||
}
|
||||
|
||||
MessageLoop {
|
||||
session_id: session_id,
|
||||
nodes: nodes,
|
||||
queue: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn master(&self) -> &SessionImpl {
|
||||
&self.nodes.values().nth(0).unwrap().session
|
||||
}
|
||||
|
||||
pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> {
|
||||
self.nodes.values()
|
||||
.filter_map(|n| n.cluster.take_message().map(|m| (n.node_id.clone(), m.0, m.1)))
|
||||
.nth(0)
|
||||
.or_else(|| self.queue.pop_front())
|
||||
}
|
||||
|
||||
pub fn process_message(&mut self, mut msg: (NodeId, NodeId, Message)) -> Result<(), Error> {
|
||||
let mut is_queued_message = false;
|
||||
loop {
|
||||
match {
|
||||
match msg.2 {
|
||||
Message::Signing(SigningMessage::SigningConsensusMessage(ref message)) => self.nodes[&msg.1].session.on_consensus_message(&msg.0, &message),
|
||||
Message::Signing(SigningMessage::SigningGenerationMessage(ref message)) => self.nodes[&msg.1].session.on_generation_message(&msg.0, &message),
|
||||
Message::Signing(SigningMessage::RequestPartialSignature(ref message)) => self.nodes[&msg.1].session.on_partial_signature_requested(&msg.0, &message),
|
||||
Message::Signing(SigningMessage::PartialSignature(ref message)) => self.nodes[&msg.1].session.on_partial_signature(&msg.0, &message),
|
||||
Message::Signing(SigningMessage::SigningSessionCompleted(ref message)) => self.nodes[&msg.1].session.on_session_completed(&msg.0, &message),
|
||||
_ => panic!("unexpected"),
|
||||
}
|
||||
} {
|
||||
Ok(_) => {
|
||||
if let Some(message) = self.queue.pop_front() {
|
||||
msg = message;
|
||||
is_queued_message = true;
|
||||
continue;
|
||||
}
|
||||
return Ok(());
|
||||
},
|
||||
Err(Error::TooEarlyForRequest) => {
|
||||
if is_queued_message {
|
||||
self.queue.push_front(msg);
|
||||
} else {
|
||||
self.queue.push_back(msg);
|
||||
}
|
||||
return Ok(());
|
||||
},
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn complete_gen_sign_session() {
|
||||
let test_cases = [(0, 1), (0, 5), (2, 5), (3, 5)];
|
||||
for &(threshold, num_nodes) in &test_cases {
|
||||
// run key generation sessions
|
||||
let mut gl = KeyGenerationMessageLoop::new(num_nodes);
|
||||
gl.master().initialize(Public::default(), threshold, gl.nodes.keys().cloned().collect()).unwrap();
|
||||
while let Some((from, to, message)) = gl.take_message() {
|
||||
gl.process_message((from, to, message)).unwrap();
|
||||
}
|
||||
|
||||
// run signing session
|
||||
let message_hash = H256::from(777);
|
||||
let mut sl = MessageLoop::new(&gl);
|
||||
sl.master().initialize(message_hash).unwrap();
|
||||
while let Some((from, to, message)) = sl.take_message() {
|
||||
sl.process_message((from, to, message)).unwrap();
|
||||
}
|
||||
|
||||
// verify signature
|
||||
let public = gl.master().joint_public_and_secret().unwrap().unwrap().0;
|
||||
let signature = sl.master().wait().unwrap();
|
||||
assert!(math::verify_signature(&public, &signature, &message_hash).unwrap());
|
||||
}
|
||||
}
|
||||
}
|
@ -19,12 +19,17 @@ use std::collections::BTreeMap;
|
||||
use serde_json;
|
||||
use ethkey::{Secret, Public};
|
||||
use util::Database;
|
||||
use types::all::{Error, ServiceConfiguration, DocumentAddress, NodeId};
|
||||
use types::all::{Error, ServiceConfiguration, ServerKeyId, NodeId};
|
||||
use serialization::{SerializablePublic, SerializableSecret};
|
||||
|
||||
/// Key of version value.
|
||||
const DB_META_KEY_VERSION: &'static [u8; 7] = b"version";
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
/// Encrypted key share, stored by key storage on the single key server.
|
||||
pub struct DocumentKeyShare {
|
||||
/// Author of the entry.
|
||||
pub author: Public,
|
||||
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
||||
pub threshold: usize,
|
||||
/// Nodes ids numbers.
|
||||
@ -32,19 +37,21 @@ pub struct DocumentKeyShare {
|
||||
/// Node secret share.
|
||||
pub secret_share: Secret,
|
||||
/// Common (shared) encryption point.
|
||||
pub common_point: Public,
|
||||
pub common_point: Option<Public>,
|
||||
/// Encrypted point.
|
||||
pub encrypted_point: Public,
|
||||
pub encrypted_point: Option<Public>,
|
||||
}
|
||||
|
||||
/// Document encryption keys storage
|
||||
pub trait KeyStorage: Send + Sync {
|
||||
/// Insert document encryption key
|
||||
fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error>;
|
||||
fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error>;
|
||||
/// Update document encryption key
|
||||
fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error>;
|
||||
/// Get document encryption key
|
||||
fn get(&self, document: &DocumentAddress) -> Result<DocumentKeyShare, Error>;
|
||||
fn get(&self, document: &ServerKeyId) -> Result<DocumentKeyShare, Error>;
|
||||
/// Check if storage contains document encryption key
|
||||
fn contains(&self, document: &DocumentAddress) -> bool;
|
||||
fn contains(&self, document: &ServerKeyId) -> bool;
|
||||
}
|
||||
|
||||
/// Persistent document encryption keys storage
|
||||
@ -53,8 +60,8 @@ pub struct PersistentKeyStorage {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Encrypted key share, as it is stored by key storage on the single key server.
|
||||
struct SerializableDocumentKeyShare {
|
||||
/// V0 of encrypted key share, as it is stored by key storage on the single key server.
|
||||
struct SerializableDocumentKeyShareV0 {
|
||||
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
||||
pub threshold: usize,
|
||||
/// Nodes ids numbers.
|
||||
@ -67,6 +74,23 @@ struct SerializableDocumentKeyShare {
|
||||
pub encrypted_point: SerializablePublic,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// V1 of encrypted key share, as it is stored by key storage on the single key server.
|
||||
struct SerializableDocumentKeyShareV1 {
|
||||
/// Authore of the entry.
|
||||
pub author: SerializablePublic,
|
||||
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
||||
pub threshold: usize,
|
||||
/// Nodes ids numbers.
|
||||
pub id_numbers: BTreeMap<SerializablePublic, SerializableSecret>,
|
||||
/// Node secret share.
|
||||
pub secret_share: SerializableSecret,
|
||||
/// Common (shared) encryption point.
|
||||
pub common_point: Option<SerializablePublic>,
|
||||
/// Encrypted point.
|
||||
pub encrypted_point: Option<SerializablePublic>,
|
||||
}
|
||||
|
||||
impl PersistentKeyStorage {
|
||||
/// Create new persistent document encryption keys storage
|
||||
pub fn new(config: &ServiceConfiguration) -> Result<Self, Error> {
|
||||
@ -74,57 +98,96 @@ impl PersistentKeyStorage {
|
||||
db_path.push("db");
|
||||
let db_path = db_path.to_str().ok_or(Error::Database("Invalid secretstore path".to_owned()))?;
|
||||
|
||||
let db = Database::open_default(&db_path).map_err(Error::Database)?;
|
||||
let db = upgrade_db(db)?;
|
||||
|
||||
Ok(PersistentKeyStorage {
|
||||
db: Database::open_default(&db_path).map_err(Error::Database)?,
|
||||
db: db,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn upgrade_db(db: Database) -> Result<Database, Error> {
|
||||
let version = db.get(None, DB_META_KEY_VERSION).map_err(Error::Database)?;
|
||||
let version = version.and_then(|v| v.get(0).cloned()).unwrap_or(0);
|
||||
match version {
|
||||
0 => {
|
||||
let mut batch = db.transaction();
|
||||
batch.put(None, DB_META_KEY_VERSION, &[1]);
|
||||
for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner) {
|
||||
let v0_key = serde_json::from_slice::<SerializableDocumentKeyShareV0>(&db_value).map_err(|e| Error::Database(e.to_string()))?;
|
||||
let v1_key = SerializableDocumentKeyShareV1 {
|
||||
// author is used in separate generation + encrypt sessions.
|
||||
// in v0 there have been only simultaneous GenEnc sessions.
|
||||
author: Public::default().into(),
|
||||
threshold: v0_key.threshold,
|
||||
id_numbers: v0_key.id_numbers,
|
||||
secret_share: v0_key.secret_share,
|
||||
common_point: Some(v0_key.common_point),
|
||||
encrypted_point: Some(v0_key.encrypted_point),
|
||||
};
|
||||
let db_value = serde_json::to_vec(&v1_key).map_err(|e| Error::Database(e.to_string()))?;
|
||||
batch.put(None, &*db_key, &*db_value);
|
||||
}
|
||||
db.write(batch).map_err(Error::Database)?;
|
||||
Ok(db)
|
||||
},
|
||||
1 => Ok(db),
|
||||
_ => Err(Error::Database(format!("unsupported SecretStore database version:? {}", version))),
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyStorage for PersistentKeyStorage {
|
||||
fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error> {
|
||||
let key: SerializableDocumentKeyShare = key.into();
|
||||
fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> {
|
||||
let key: SerializableDocumentKeyShareV1 = key.into();
|
||||
let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?;
|
||||
let mut batch = self.db.transaction();
|
||||
batch.put(None, &document, &key);
|
||||
self.db.write(batch).map_err(Error::Database)
|
||||
}
|
||||
|
||||
fn get(&self, document: &DocumentAddress) -> Result<DocumentKeyShare, Error> {
|
||||
fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> {
|
||||
self.insert(document, key)
|
||||
}
|
||||
|
||||
fn get(&self, document: &ServerKeyId) -> Result<DocumentKeyShare, Error> {
|
||||
self.db.get(None, document)
|
||||
.map_err(Error::Database)?
|
||||
.ok_or(Error::DocumentNotFound)
|
||||
.map(|key| key.into_vec())
|
||||
.and_then(|key| serde_json::from_slice::<SerializableDocumentKeyShare>(&key).map_err(|e| Error::Database(e.to_string())))
|
||||
.and_then(|key| serde_json::from_slice::<SerializableDocumentKeyShareV1>(&key).map_err(|e| Error::Database(e.to_string())))
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
fn contains(&self, document: &DocumentAddress) -> bool {
|
||||
fn contains(&self, document: &ServerKeyId) -> bool {
|
||||
self.db.get(None, document)
|
||||
.map(|k| k.is_some())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DocumentKeyShare> for SerializableDocumentKeyShare {
|
||||
impl From<DocumentKeyShare> for SerializableDocumentKeyShareV1 {
|
||||
fn from(key: DocumentKeyShare) -> Self {
|
||||
SerializableDocumentKeyShare {
|
||||
SerializableDocumentKeyShareV1 {
|
||||
author: key.author.into(),
|
||||
threshold: key.threshold,
|
||||
id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
||||
secret_share: key.secret_share.into(),
|
||||
common_point: key.common_point.into(),
|
||||
encrypted_point: key.encrypted_point.into(),
|
||||
common_point: key.common_point.map(Into::into),
|
||||
encrypted_point: key.encrypted_point.map(Into::into),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SerializableDocumentKeyShare> for DocumentKeyShare {
|
||||
fn from(key: SerializableDocumentKeyShare) -> Self {
|
||||
impl From<SerializableDocumentKeyShareV1> for DocumentKeyShare {
|
||||
fn from(key: SerializableDocumentKeyShareV1) -> Self {
|
||||
DocumentKeyShare {
|
||||
author: key.author.into(),
|
||||
threshold: key.threshold,
|
||||
id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
||||
secret_share: key.secret_share.into(),
|
||||
common_point: key.common_point.into(),
|
||||
encrypted_point: key.encrypted_point.into(),
|
||||
common_point: key.common_point.map(Into::into),
|
||||
encrypted_point: key.encrypted_point.map(Into::into),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -133,28 +196,36 @@ impl From<SerializableDocumentKeyShare> for DocumentKeyShare {
|
||||
pub mod tests {
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use parking_lot::RwLock;
|
||||
use serde_json;
|
||||
use devtools::RandomTempPath;
|
||||
use ethkey::{Random, Generator};
|
||||
use super::super::types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, DocumentAddress};
|
||||
use super::{KeyStorage, PersistentKeyStorage, DocumentKeyShare};
|
||||
use ethkey::{Random, Generator, Public, Secret};
|
||||
use util::Database;
|
||||
use super::super::types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId};
|
||||
use super::{DB_META_KEY_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare,
|
||||
SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1, upgrade_db};
|
||||
|
||||
#[derive(Default)]
|
||||
/// In-memory document encryption keys storage
|
||||
pub struct DummyKeyStorage {
|
||||
keys: RwLock<HashMap<DocumentAddress, DocumentKeyShare>>,
|
||||
keys: RwLock<HashMap<ServerKeyId, DocumentKeyShare>>,
|
||||
}
|
||||
|
||||
impl KeyStorage for DummyKeyStorage {
|
||||
fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error> {
|
||||
fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> {
|
||||
self.keys.write().insert(document, key);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get(&self, document: &DocumentAddress) -> Result<DocumentKeyShare, Error> {
|
||||
fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> {
|
||||
self.keys.write().insert(document, key);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get(&self, document: &ServerKeyId) -> Result<DocumentKeyShare, Error> {
|
||||
self.keys.read().get(document).cloned().ok_or(Error::DocumentNotFound)
|
||||
}
|
||||
|
||||
fn contains(&self, document: &DocumentAddress) -> bool {
|
||||
fn contains(&self, document: &ServerKeyId) -> bool {
|
||||
self.keys.read().contains_key(document)
|
||||
}
|
||||
}
|
||||
@ -180,27 +251,29 @@ pub mod tests {
|
||||
},
|
||||
};
|
||||
|
||||
let key1 = DocumentAddress::from(1);
|
||||
let key1 = ServerKeyId::from(1);
|
||||
let value1 = DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 100,
|
||||
id_numbers: vec![
|
||||
(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone())
|
||||
].into_iter().collect(),
|
||||
secret_share: Random.generate().unwrap().secret().clone(),
|
||||
common_point: Random.generate().unwrap().public().clone(),
|
||||
encrypted_point: Random.generate().unwrap().public().clone(),
|
||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||
};
|
||||
let key2 = DocumentAddress::from(2);
|
||||
let key2 = ServerKeyId::from(2);
|
||||
let value2 = DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 200,
|
||||
id_numbers: vec![
|
||||
(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone())
|
||||
].into_iter().collect(),
|
||||
secret_share: Random.generate().unwrap().secret().clone(),
|
||||
common_point: Random.generate().unwrap().public().clone(),
|
||||
encrypted_point: Random.generate().unwrap().public().clone(),
|
||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||
};
|
||||
let key3 = DocumentAddress::from(3);
|
||||
let key3 = ServerKeyId::from(3);
|
||||
|
||||
let key_storage = PersistentKeyStorage::new(&config).unwrap();
|
||||
key_storage.insert(key1.clone(), value1.clone()).unwrap();
|
||||
@ -215,4 +288,43 @@ pub mod tests {
|
||||
assert_eq!(key_storage.get(&key2), Ok(value2));
|
||||
assert_eq!(key_storage.get(&key3), Err(Error::DocumentNotFound));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn upgrade_db_0_to_1() {
|
||||
let db_path = RandomTempPath::create_dir();
|
||||
let db = Database::open_default(db_path.as_str()).unwrap();
|
||||
|
||||
// prepare v0 database
|
||||
{
|
||||
let key = serde_json::to_vec(&SerializableDocumentKeyShareV0 {
|
||||
threshold: 777,
|
||||
id_numbers: vec![(
|
||||
"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(),
|
||||
"281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse::<Secret>().unwrap().into(),
|
||||
)].into_iter().collect(),
|
||||
secret_share: "00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::<Secret>().unwrap().into(),
|
||||
common_point: "99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".into(),
|
||||
encrypted_point: "7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".into(),
|
||||
}).unwrap();
|
||||
let mut batch = db.transaction();
|
||||
batch.put(None, &[7], &key);
|
||||
db.write(batch).unwrap();
|
||||
}
|
||||
|
||||
// upgrade database
|
||||
let db = upgrade_db(db).unwrap();
|
||||
|
||||
// check upgrade
|
||||
assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], 1);
|
||||
let key = serde_json::from_slice::<SerializableDocumentKeyShareV1>(&db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap()).unwrap();
|
||||
assert_eq!(Public::default(), key.author.clone().into());
|
||||
assert_eq!(777, key.threshold);
|
||||
assert_eq!(vec![(
|
||||
"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse::<Public>().unwrap(),
|
||||
"281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse::<Secret>().unwrap(),
|
||||
)], key.id_numbers.clone().into_iter().map(|(k, v)| (k.into(), v.into())).collect::<Vec<(Public, Secret)>>());
|
||||
assert_eq!("00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::<Secret>().unwrap(), key.secret_share.into());
|
||||
assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::<Public>().unwrap()), key.common_point.clone().map(Into::into));
|
||||
assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::<Public>().unwrap()), key.encrypted_point.clone().map(Into::into));
|
||||
}
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ mod serialization;
|
||||
use std::sync::Arc;
|
||||
use ethcore::client::Client;
|
||||
|
||||
pub use types::all::{DocumentAddress, DocumentKey, DocumentEncryptedKey, RequestSignature, Public,
|
||||
pub use types::all::{ServerKeyId, EncryptedDocumentKey, RequestSignature, Public,
|
||||
Error, NodeAddress, ServiceConfiguration, ClusterConfiguration};
|
||||
pub use traits::{KeyServer};
|
||||
|
||||
|
@ -23,9 +23,12 @@ use serde::de::{Visitor, Error as SerdeError};
|
||||
use ethkey::{Public, Secret, Signature};
|
||||
use util::{H256, Bytes};
|
||||
|
||||
/// Serializable message hash.
|
||||
pub type SerializableMessageHash = SerializableH256;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Serializable shadow decryption result.
|
||||
pub struct SerializableDocumentEncryptedKeyShadow {
|
||||
pub struct SerializableEncryptedDocumentKeyShadow {
|
||||
/// Decrypted secret point. It is partially decrypted if shadow decrpytion was requested.
|
||||
pub decrypted_secret: SerializablePublic,
|
||||
/// Shared common point.
|
||||
|
@ -14,21 +14,63 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey, DocumentEncryptedKeyShadow};
|
||||
use types::all::{Error, Public, ServerKeyId, MessageHash, EncryptedMessageSignature, RequestSignature, EncryptedDocumentKey,
|
||||
EncryptedDocumentKeyShadow};
|
||||
|
||||
#[ipc(client_ident="RemoteKeyServer")]
|
||||
/// Secret store key server
|
||||
pub trait KeyServer: Send + Sync {
|
||||
/// Generate encryption key for given document.
|
||||
fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result<DocumentEncryptedKey, Error>;
|
||||
/// Request encryption key of given document for given requestor
|
||||
fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKey, Error>;
|
||||
/// Request encryption key of given document for given requestor.
|
||||
/// This method does not reveal document_key to any KeyServer, but it requires additional actions on client.
|
||||
/// To calculate decrypted key on client:
|
||||
/// Server key (SK) generator.
|
||||
pub trait ServerKeyGenerator {
|
||||
/// Generate new SK.
|
||||
/// `key_id` is the caller-provided identifier of generated SK.
|
||||
/// `signature` is `key_id`, signed with caller public key.
|
||||
/// `threshold + 1` is the minimal number of nodes, required to restore private key.
|
||||
/// Result is a public portion of SK.
|
||||
fn generate_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<Public, Error>;
|
||||
}
|
||||
|
||||
/// Document key (DK) server.
|
||||
pub trait DocumentKeyServer: ServerKeyGenerator {
|
||||
/// Store externally generated DK.
|
||||
/// `key_id` is identifier of previously generated SK.
|
||||
/// `signature` is key_id, signed with caller public key. Caller must be the same as in the `generate_key` call.
|
||||
/// `common_point` is a result of `k * T` expression, where `T` is generation point and `k` is random scalar in EC field.
|
||||
/// `encrypted_document_key` is a result of `M + k * y` expression, where `M` is unencrypted document key (point on EC),
|
||||
/// `k` is the same scalar used in `common_point` calculation and `y` is previously generated public part of SK.
|
||||
fn store_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, common_point: Public, encrypted_document_key: Public) -> Result<(), Error>;
|
||||
/// Generate and store both SK and DK. This is a shortcut for consequent calls of `generate_key` and `store_document_key`.
|
||||
/// The only difference is that DK is generated by DocumentKeyServer (which might be considered unsafe).
|
||||
/// `key_id` is the caller-provided identifier of generated SK.
|
||||
/// `signature` is `key_id`, signed with caller public key.
|
||||
/// `threshold + 1` is the minimal number of nodes, required to restore private key.
|
||||
/// Result is a DK, encrypted with caller public key.
|
||||
fn generate_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<EncryptedDocumentKey, Error>;
|
||||
/// Restore previously stored DK.
|
||||
/// DK is decrypted on the key server (which might be considered unsafe), and then encrypted with caller public key.
|
||||
/// `key_id` is identifier of previously generated SK.
|
||||
/// `signature` is key_id, signed with caller public key. Caller must be on ACL for this function to succeed.
|
||||
/// Result is a DK, encrypted with caller public key.
|
||||
fn restore_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKey, Error>;
|
||||
/// Restore previously stored DK.
|
||||
/// To decrypt DK on client:
|
||||
/// 1) use requestor secret key to decrypt secret coefficients from result.decrypt_shadows
|
||||
/// 2) calculate decrypt_shadows_sum = sum of all secrets from (1)
|
||||
/// 3) calculate decrypt_shadow_point: decrypt_shadows_sum * result.common_point
|
||||
/// 4) calculate decrypted_secret: result.decrypted_secret + decrypt_shadow_point
|
||||
fn document_key_shadow(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKeyShadow, Error>;
|
||||
/// Result is a DK shadow.
|
||||
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKeyShadow, Error>;
|
||||
}
|
||||
|
||||
/// Message signer.
|
||||
pub trait MessageSigner: ServerKeyGenerator {
|
||||
/// Sign message with previously generated SK.
|
||||
/// `key_id` is the caller-provided identifier of generated SK.
|
||||
/// `signature` is `key_id`, signed with caller public key.
|
||||
/// `message` is the message to be signed.
|
||||
/// Result is a signed message, encrypted with caller public key.
|
||||
fn sign_message(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result<EncryptedMessageSignature, Error>;
|
||||
}
|
||||
|
||||
|
||||
#[ipc(client_ident="RemoteKeyServer")]
|
||||
/// Key server.
|
||||
pub trait KeyServer: DocumentKeyServer + MessageSigner + Send + Sync {
|
||||
}
|
||||
|
@ -24,12 +24,14 @@ use key_server_cluster;
|
||||
|
||||
/// Node id.
|
||||
pub type NodeId = ethkey::Public;
|
||||
/// Document address type.
|
||||
pub type DocumentAddress = util::H256;
|
||||
/// Document key type.
|
||||
pub type DocumentKey = util::Bytes;
|
||||
/// Encrypted key type.
|
||||
pub type DocumentEncryptedKey = util::Bytes;
|
||||
/// Server key id. When key is used to encrypt document, it could be document contents hash.
|
||||
pub type ServerKeyId = util::H256;
|
||||
/// Encrypted document key type.
|
||||
pub type EncryptedDocumentKey = util::Bytes;
|
||||
/// Message hash.
|
||||
pub type MessageHash = util::H256;
|
||||
/// Message signature.
|
||||
pub type EncryptedMessageSignature = util::Bytes;
|
||||
/// Request signature type.
|
||||
pub type RequestSignature = ethkey::Signature;
|
||||
/// Public key type.
|
||||
@ -95,7 +97,7 @@ pub struct ClusterConfiguration {
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[binary]
|
||||
/// Shadow decryption result.
|
||||
pub struct DocumentEncryptedKeyShadow {
|
||||
pub struct EncryptedDocumentKeyShadow {
|
||||
/// Decrypted secret point. It is partially decrypted if shadow decrpytion was requested.
|
||||
pub decrypted_secret: ethkey::Public,
|
||||
/// Shared common point.
|
||||
|
Loading…
Reference in New Issue
Block a user