SecretStore: versioned keys (#6910)
* SecretStore: first key versions flush * SecretStore: key versions in encryption session * SecretStore: flush key versions negotiation session * SecretStore: connected key version negotiation session to cluster * SecretStore: cluster sessions container refactoring * SecretStore: flush * SecretStore: flush key versions * SecretStore: flush * SecretStore: delegation proto * SecretStore: decryption_session_is_delegated_when_node_does_not_have_key_share * SecretStore: fixed version in decryption session * SecretStore: signing_session_is_delegated_when_node_does_not_have_key_share * SecretStore: started restoring admin sessions * SecretStore: restoring admin sessions * SecretStore: removed obsolete ShareRemove && ShareMove sessions * SecretStore: ShareAdd math tests only require old_t+1 nodes * SecretStore: ShareAdd revamp using new math backend * SecretStore: do not include isolated nodes into consensus_group * SecretStore: ServersSetChange + ShareAdd revamp * removed debug printlns * SecretStore: key version negotiation tests * SecretStore: removed debug/merge artifacts * SecretStore: fixed master node selection * SecretStore: cleanup + tests + fixes * SecretStore: uncommented tests * SecretStore: cleaning up * SecretStore: cleaning up + tests * SecretStore: cleaning up * SecretStore: cleaning up && tests * SecretStore: fixing TODOs * SecretStore: fixing TODOs + cleanup * SecretStore: fixing TODOs * SecretStore: nodes_add_to_the_node_with_obsolete_version * SecretStore: nodes_add_fails_when_not_enough_share_owners_are_connected * SecretStore: tests * SecretStore: signing && delegation tests * SecretStore: signing && decryption tests when some nodes are isolated * SecretStore: sessions_are_removed_when_initialization_fails * SecretStore: ceaning up * SecretStore: removed obsolete comments * SecretStore: signing_session_completes_if_node_does_not_have_a_share
This commit is contained in:
parent
713bba00ac
commit
7703cd226b
@ -106,7 +106,7 @@ impl DocumentKeyServer for KeyServerImpl {
|
|||||||
.map_err(|_| Error::BadSignature)?;
|
.map_err(|_| Error::BadSignature)?;
|
||||||
|
|
||||||
// decrypt document key
|
// decrypt document key
|
||||||
let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), signature.clone(), false)?;
|
let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), signature.clone(), None, false)?;
|
||||||
let document_key = decryption_session.wait()?.decrypted_secret;
|
let document_key = decryption_session.wait()?.decrypted_secret;
|
||||||
|
|
||||||
// encrypt document key with requestor public key
|
// encrypt document key with requestor public key
|
||||||
@ -116,7 +116,7 @@ impl DocumentKeyServer for KeyServerImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKeyShadow, Error> {
|
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||||
let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), signature.clone(), true)?;
|
let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), signature.clone(), None, true)?;
|
||||||
decryption_session.wait().map_err(Into::into)
|
decryption_session.wait().map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -128,7 +128,7 @@ impl MessageSigner for KeyServerImpl {
|
|||||||
.map_err(|_| Error::BadSignature)?;
|
.map_err(|_| Error::BadSignature)?;
|
||||||
|
|
||||||
// sign message
|
// sign message
|
||||||
let signing_session = self.data.lock().cluster.new_signing_session(key_id.clone(), signature.clone(), message)?;
|
let signing_session = self.data.lock().cluster.new_signing_session(key_id.clone(), signature.clone(), None, message)?;
|
||||||
let message_signature = signing_session.wait()?;
|
let message_signature = signing_session.wait()?;
|
||||||
|
|
||||||
// compose two message signature components into single one
|
// compose two message signature components into single one
|
||||||
@ -396,4 +396,52 @@ pub mod tests {
|
|||||||
assert_eq!(math::verify_signature(&server_public, &(signature_c, signature_s), &message_hash), Ok(true));
|
assert_eq!(math::verify_signature(&server_public, &(signature_c, signature_s), &message_hash), Ok(true));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn decryption_session_is_delegated_when_node_does_not_have_key_share() {
|
||||||
|
//::logger::init_log();
|
||||||
|
let key_servers = make_key_servers(6110, 3);
|
||||||
|
|
||||||
|
// generate document key
|
||||||
|
let threshold = 0;
|
||||||
|
let document = Random.generate().unwrap().secret().clone();
|
||||||
|
let secret = Random.generate().unwrap().secret().clone();
|
||||||
|
let signature = ethkey::sign(&secret, &document).unwrap();
|
||||||
|
let generated_key = key_servers[0].generate_document_key(&document, &signature, threshold).unwrap();
|
||||||
|
let generated_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &generated_key).unwrap();
|
||||||
|
|
||||||
|
// remove key from node0
|
||||||
|
key_servers[0].cluster().key_storage().remove(&document).unwrap();
|
||||||
|
|
||||||
|
// now let's try to retrieve key back by requesting it from node0, so that session must be delegated
|
||||||
|
let retrieved_key = key_servers[0].restore_document_key(&document, &signature).unwrap();
|
||||||
|
let retrieved_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap();
|
||||||
|
assert_eq!(retrieved_key, generated_key);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn signing_session_is_delegated_when_node_does_not_have_key_share() {
|
||||||
|
//::logger::init_log();
|
||||||
|
let key_servers = make_key_servers(6114, 3);
|
||||||
|
let threshold = 1;
|
||||||
|
|
||||||
|
// generate server key
|
||||||
|
let server_key_id = Random.generate().unwrap().secret().clone();
|
||||||
|
let requestor_secret = Random.generate().unwrap().secret().clone();
|
||||||
|
let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap();
|
||||||
|
let server_public = key_servers[0].generate_key(&server_key_id, &signature, threshold).unwrap();
|
||||||
|
|
||||||
|
// remove key from node0
|
||||||
|
key_servers[0].cluster().key_storage().remove(&server_key_id).unwrap();
|
||||||
|
|
||||||
|
// sign message
|
||||||
|
let message_hash = H256::from(42);
|
||||||
|
let combined_signature = key_servers[0].sign_message(&server_key_id, &signature, message_hash.clone()).unwrap();
|
||||||
|
let combined_signature = ethcrypto::ecies::decrypt(&requestor_secret, ðcrypto::DEFAULT_MAC, &combined_signature).unwrap();
|
||||||
|
let signature_c = Secret::from_slice(&combined_signature[..32]);
|
||||||
|
let signature_s = Secret::from_slice(&combined_signature[32..]);
|
||||||
|
|
||||||
|
// check signature
|
||||||
|
assert_eq!(math::verify_signature(&server_public, &(signature_c, signature_s), &message_hash), Ok(true));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,725 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::collections::{BTreeSet, BTreeMap};
|
||||||
|
use bigint::hash::H256;
|
||||||
|
use ethkey::Secret;
|
||||||
|
use parking_lot::{Mutex, Condvar};
|
||||||
|
use key_server_cluster::{Error, SessionId, NodeId, DocumentKeyShare};
|
||||||
|
use key_server_cluster::cluster::Cluster;
|
||||||
|
use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession};
|
||||||
|
use key_server_cluster::decryption_session::SessionImpl as DecryptionSession;
|
||||||
|
use key_server_cluster::signing_session::SessionImpl as SigningSession;
|
||||||
|
use key_server_cluster::message::{Message, KeyVersionNegotiationMessage, RequestKeyVersions, KeyVersions};
|
||||||
|
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||||
|
|
||||||
|
// TODO: optimizations: change sessions so that versions are sent by chunks.
|
||||||
|
/// Number of versions sent in single message.
|
||||||
|
const VERSIONS_PER_MESSAGE: usize = 32;
|
||||||
|
|
||||||
|
/// Key version negotiation session API.
|
||||||
|
pub trait Session: Send + Sync + 'static {
|
||||||
|
/// Set continue action.
|
||||||
|
fn set_continue_action(&self, action: ContinueAction);
|
||||||
|
/// Get continue action.
|
||||||
|
fn continue_action(&self) -> Option<ContinueAction>;
|
||||||
|
/// Wait until session is completed.
|
||||||
|
fn wait(&self) -> Result<(H256, NodeId), Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Key version negotiation transport.
|
||||||
|
pub trait SessionTransport {
|
||||||
|
/// Send message to given node.
|
||||||
|
fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Key version negotiation result computer.
|
||||||
|
pub trait SessionResultComputer: Send + Sync {
|
||||||
|
/// Compute result of session, if possible.
|
||||||
|
fn compute_result(&self, threshold: Option<usize>, confirmations: &BTreeSet<NodeId>, versions: &BTreeMap<H256, BTreeSet<NodeId>>) -> Option<Result<(H256, NodeId), Error>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Key discovery session API.
|
||||||
|
pub struct SessionImpl<T: SessionTransport> {
|
||||||
|
/// Session core.
|
||||||
|
core: SessionCore<T>,
|
||||||
|
/// Session data.
|
||||||
|
data: Mutex<SessionData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Action after key version is negotiated.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub enum ContinueAction {
|
||||||
|
/// Decryption session + is_shadow_decryption.
|
||||||
|
Decrypt(Arc<DecryptionSession>, bool),
|
||||||
|
/// Signing session + message hash.
|
||||||
|
Sign(Arc<SigningSession>, H256),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Immutable session data.
|
||||||
|
struct SessionCore<T: SessionTransport> {
|
||||||
|
/// Session meta.
|
||||||
|
pub meta: ShareChangeSessionMeta,
|
||||||
|
/// Sub-session id.
|
||||||
|
pub sub_session: Secret,
|
||||||
|
/// Key share.
|
||||||
|
pub key_share: Option<DocumentKeyShare>,
|
||||||
|
/// Session result computer.
|
||||||
|
pub result_computer: Arc<SessionResultComputer>,
|
||||||
|
/// Session transport.
|
||||||
|
pub transport: T,
|
||||||
|
/// Session nonce.
|
||||||
|
pub nonce: u64,
|
||||||
|
/// SessionImpl completion condvar.
|
||||||
|
pub completed: Condvar,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Mutable session data.
|
||||||
|
struct SessionData {
|
||||||
|
/// Session state.
|
||||||
|
pub state: SessionState,
|
||||||
|
/// Initialization confirmations.
|
||||||
|
pub confirmations: Option<BTreeSet<NodeId>>,
|
||||||
|
/// Key threshold.
|
||||||
|
pub threshold: Option<usize>,
|
||||||
|
/// { Version => Nodes }
|
||||||
|
pub versions: Option<BTreeMap<H256, BTreeSet<NodeId>>>,
|
||||||
|
/// Session result.
|
||||||
|
pub result: Option<Result<(H256, NodeId), Error>>,
|
||||||
|
/// Continue action.
|
||||||
|
pub continue_with: Option<ContinueAction>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// SessionImpl creation parameters
|
||||||
|
pub struct SessionParams<T: SessionTransport> {
|
||||||
|
/// Session meta.
|
||||||
|
pub meta: ShareChangeSessionMeta,
|
||||||
|
/// Sub-session id.
|
||||||
|
pub sub_session: Secret,
|
||||||
|
/// Key share.
|
||||||
|
pub key_share: Option<DocumentKeyShare>,
|
||||||
|
/// Session result computer.
|
||||||
|
pub result_computer: Arc<SessionResultComputer>,
|
||||||
|
/// Session transport to communicate to other cluster nodes.
|
||||||
|
pub transport: T,
|
||||||
|
/// Session nonce.
|
||||||
|
pub nonce: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Signing session state.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
enum SessionState {
|
||||||
|
/// Waiting for initialization.
|
||||||
|
WaitingForInitialization,
|
||||||
|
/// Waiting for responses.
|
||||||
|
WaitingForResponses,
|
||||||
|
/// Session is completed.
|
||||||
|
Finished,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Isolated session transport.
|
||||||
|
pub struct IsolatedSessionTransport {
|
||||||
|
/// Cluster.
|
||||||
|
pub cluster: Arc<Cluster>,
|
||||||
|
/// Key id.
|
||||||
|
pub key_id: SessionId,
|
||||||
|
/// Sub session id.
|
||||||
|
pub sub_session: Secret,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub nonce: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fastest session result computer. Computes first possible version that can be recovered on this node.
|
||||||
|
/// If there's no such version, selects version with the most support.
|
||||||
|
pub struct FastestResultComputer {
|
||||||
|
/// This node id.
|
||||||
|
self_node_id: NodeId,
|
||||||
|
/// Threshold (if known).
|
||||||
|
threshold: Option<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Selects version with most support, waiting for responses from all nodes.
|
||||||
|
pub struct LargestSupportResultComputer;
|
||||||
|
|
||||||
|
impl<T> SessionImpl<T> where T: SessionTransport {
|
||||||
|
/// Create new session.
|
||||||
|
pub fn new(params: SessionParams<T>) -> Self {
|
||||||
|
SessionImpl {
|
||||||
|
core: SessionCore {
|
||||||
|
meta: params.meta,
|
||||||
|
sub_session: params.sub_session,
|
||||||
|
key_share: params.key_share,
|
||||||
|
result_computer: params.result_computer,
|
||||||
|
transport: params.transport,
|
||||||
|
nonce: params.nonce,
|
||||||
|
completed: Condvar::new(),
|
||||||
|
},
|
||||||
|
data: Mutex::new(SessionData {
|
||||||
|
state: SessionState::WaitingForInitialization,
|
||||||
|
confirmations: None,
|
||||||
|
threshold: None,
|
||||||
|
versions: None,
|
||||||
|
result: None,
|
||||||
|
continue_with: None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return session meta.
|
||||||
|
pub fn meta(&self) -> &ShareChangeSessionMeta {
|
||||||
|
&self.core.meta
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return key threshold.
|
||||||
|
pub fn key_threshold(&self) -> Result<usize, Error> {
|
||||||
|
Ok(self.data.lock().threshold.clone().ok_or(Error::InvalidStateForRequest)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return result computer reference.
|
||||||
|
pub fn version_holders(&self, version: &H256) -> Result<BTreeSet<NodeId>, Error> {
|
||||||
|
Ok(self.data.lock().versions.as_ref().ok_or(Error::InvalidStateForRequest)?
|
||||||
|
.get(version).ok_or(Error::KeyStorage("key version not found".into()))?
|
||||||
|
.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize session.
|
||||||
|
pub fn initialize(&self, connected_nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||||
|
// check state
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if data.state != SessionState::WaitingForInitialization {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
// update state
|
||||||
|
let mut confirmations = connected_nodes;
|
||||||
|
let mut versions: BTreeMap<H256, BTreeSet<NodeId>> = BTreeMap::new();
|
||||||
|
let received_own_confirmation = confirmations.remove(&self.core.meta.self_node_id);
|
||||||
|
if received_own_confirmation {
|
||||||
|
if let Some(key_share) = self.core.key_share.as_ref() {
|
||||||
|
for version in &key_share.versions {
|
||||||
|
versions.entry(version.hash.clone())
|
||||||
|
.or_insert_with(Default::default)
|
||||||
|
.insert(self.core.meta.self_node_id.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// update state
|
||||||
|
let no_confirmations_required = confirmations.is_empty();
|
||||||
|
data.state = SessionState::WaitingForResponses;
|
||||||
|
data.confirmations = Some(confirmations);
|
||||||
|
data.versions = Some(versions);
|
||||||
|
|
||||||
|
// try to complete session
|
||||||
|
Self::try_complete(&self.core, &mut *data);
|
||||||
|
if no_confirmations_required && data.state != SessionState::Finished {
|
||||||
|
return Err(Error::ConsensusUnreachable);
|
||||||
|
} else if data.state == SessionState::Finished {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// send requests
|
||||||
|
let confirmations = data.confirmations.as_ref().expect("dilled couple of lines above; qed");
|
||||||
|
for connected_node in confirmations {
|
||||||
|
self.core.transport.send(connected_node, KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions {
|
||||||
|
session: self.core.meta.id.clone().into(),
|
||||||
|
sub_session: self.core.sub_session.clone().into(),
|
||||||
|
session_nonce: self.core.nonce,
|
||||||
|
}))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process single message.
|
||||||
|
pub fn process_message(&self, sender: &NodeId, message: &KeyVersionNegotiationMessage) -> Result<(), Error> {
|
||||||
|
if self.core.nonce != message.session_nonce() {
|
||||||
|
return Err(Error::ReplayProtection);
|
||||||
|
}
|
||||||
|
|
||||||
|
match message {
|
||||||
|
&KeyVersionNegotiationMessage::RequestKeyVersions(ref message) =>
|
||||||
|
self.on_key_versions_request(sender, message),
|
||||||
|
&KeyVersionNegotiationMessage::KeyVersions(ref message) =>
|
||||||
|
self.on_key_versions(sender, message),
|
||||||
|
&KeyVersionNegotiationMessage::KeyVersionsError(ref message) => {
|
||||||
|
self.on_session_error(sender, Error::Io(message.error.clone()));
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process key versions request.
|
||||||
|
pub fn on_key_versions_request(&self, sender: &NodeId, _message: &RequestKeyVersions) -> Result<(), Error> {
|
||||||
|
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
// check message
|
||||||
|
if *sender != self.core.meta.master_node_id {
|
||||||
|
return Err(Error::InvalidMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
// check state
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if data.state != SessionState::WaitingForInitialization {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
// send response
|
||||||
|
self.core.transport.send(sender, KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
||||||
|
session: self.core.meta.id.clone().into(),
|
||||||
|
sub_session: self.core.sub_session.clone().into(),
|
||||||
|
session_nonce: self.core.nonce,
|
||||||
|
threshold: self.core.key_share.as_ref().map(|key_share| key_share.threshold),
|
||||||
|
versions: self.core.key_share.as_ref().map(|key_share|
|
||||||
|
key_share.versions.iter().rev()
|
||||||
|
.filter(|v| v.id_numbers.contains_key(sender))
|
||||||
|
.chain(key_share.versions.iter().rev().filter(|v| !v.id_numbers.contains_key(sender)))
|
||||||
|
.map(|v| v.hash.clone().into())
|
||||||
|
.take(VERSIONS_PER_MESSAGE)
|
||||||
|
.collect())
|
||||||
|
.unwrap_or_else(|| Default::default())
|
||||||
|
}))?;
|
||||||
|
|
||||||
|
// update state
|
||||||
|
data.state = SessionState::Finished;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process key versions response.
|
||||||
|
pub fn on_key_versions(&self, sender: &NodeId, message: &KeyVersions) -> Result<(), Error> {
|
||||||
|
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
// check state
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if data.state != SessionState::WaitingForResponses && data.state != SessionState::Finished {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
let reason = "this field is filled on master node when initializing; this is initialized master node; qed";
|
||||||
|
if !data.confirmations.as_mut().expect(reason).remove(sender) {
|
||||||
|
return Err(Error::InvalidMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
// remember versions that sender have
|
||||||
|
{
|
||||||
|
match message.threshold.clone() {
|
||||||
|
Some(threshold) if data.threshold.is_none() => {
|
||||||
|
data.threshold = Some(threshold);
|
||||||
|
},
|
||||||
|
Some(threshold) if data.threshold.as_ref() == Some(&threshold) => (),
|
||||||
|
Some(_) => return Err(Error::InvalidMessage),
|
||||||
|
None if message.versions.is_empty() => (),
|
||||||
|
None => return Err(Error::InvalidMessage),
|
||||||
|
}
|
||||||
|
|
||||||
|
let versions = data.versions.as_mut().expect(reason);
|
||||||
|
for version in &message.versions {
|
||||||
|
versions.entry(version.clone().into())
|
||||||
|
.or_insert_with(Default::default)
|
||||||
|
.insert(sender.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to compute result
|
||||||
|
if data.state != SessionState::Finished {
|
||||||
|
Self::try_complete(&self.core, &mut *data);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try to complete result && finish session.
|
||||||
|
fn try_complete(core: &SessionCore<T>, data: &mut SessionData) {
|
||||||
|
let reason = "this field is filled on master node when initializing; try_complete is only called on initialized master node; qed";
|
||||||
|
let confirmations = data.confirmations.as_ref().expect(reason);
|
||||||
|
let versions = data.versions.as_ref().expect(reason);
|
||||||
|
if let Some(result) = core.result_computer.compute_result(data.threshold.clone(), confirmations, versions) {
|
||||||
|
data.state = SessionState::Finished;
|
||||||
|
data.result = Some(result);
|
||||||
|
core.completed.notify_all();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Session for SessionImpl<T> where T: SessionTransport + Send + Sync + 'static {
|
||||||
|
fn set_continue_action(&self, action: ContinueAction) {
|
||||||
|
self.data.lock().continue_with = Some(action);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn continue_action(&self) -> Option<ContinueAction> {
|
||||||
|
self.data.lock().continue_with.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wait(&self) -> Result<(H256, NodeId), Error> {
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if !data.result.is_some() {
|
||||||
|
self.core.completed.wait(&mut data);
|
||||||
|
}
|
||||||
|
|
||||||
|
data.result.as_ref()
|
||||||
|
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
||||||
|
.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> ClusterSession for SessionImpl<T> where T: SessionTransport {
|
||||||
|
type Id = SessionIdWithSubSession;
|
||||||
|
|
||||||
|
fn type_name() -> &'static str {
|
||||||
|
"version negotiation"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn id(&self) -> SessionIdWithSubSession {
|
||||||
|
SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.sub_session.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_finished(&self) -> bool {
|
||||||
|
self.data.lock().state == SessionState::Finished
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_session_timeout(&self) {
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
|
if data.confirmations.is_some() {
|
||||||
|
data.confirmations.as_mut().expect("checked a line above; qed").clear();
|
||||||
|
Self::try_complete(&self.core, &mut *data);
|
||||||
|
if data.state != SessionState::Finished {
|
||||||
|
warn!("{}: key version negotiation session failed with timeout", self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
data.result = Some(Err(Error::ConsensusUnreachable));
|
||||||
|
self.core.completed.notify_all();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_node_timeout(&self, node: &NodeId) {
|
||||||
|
self.on_session_error(node, Error::NodeDisconnected)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_session_error(&self, node: &NodeId, error: Error) {
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
|
if data.confirmations.is_some() {
|
||||||
|
let is_waiting_for_confirmation = data.confirmations.as_mut().expect("checked a line above; qed").remove(node);
|
||||||
|
if is_waiting_for_confirmation {
|
||||||
|
Self::try_complete(&self.core, &mut *data);
|
||||||
|
if data.state != SessionState::Finished {
|
||||||
|
warn!("{}: key version negotiation session failed because {} connection has timeouted", self.core.meta.self_node_id, node);
|
||||||
|
|
||||||
|
data.state = SessionState::Finished;
|
||||||
|
data.result = Some(Err(error));
|
||||||
|
self.core.completed.notify_all();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
||||||
|
match *message {
|
||||||
|
Message::KeyVersionNegotiation(ref message) => self.process_message(sender, message),
|
||||||
|
_ => unreachable!("cluster checks message to be correct before passing; qed"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SessionTransport for IsolatedSessionTransport {
|
||||||
|
fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> {
|
||||||
|
self.cluster.send(node, Message::KeyVersionNegotiation(message))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FastestResultComputer {
|
||||||
|
pub fn new(self_node_id: NodeId, key_share: Option<&DocumentKeyShare>) -> Self {
|
||||||
|
let threshold = key_share.map(|ks| ks.threshold);
|
||||||
|
FastestResultComputer {
|
||||||
|
self_node_id: self_node_id,
|
||||||
|
threshold: threshold,
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
|
||||||
|
impl SessionResultComputer for FastestResultComputer {
|
||||||
|
fn compute_result(&self, threshold: Option<usize>, confirmations: &BTreeSet<NodeId>, versions: &BTreeMap<H256, BTreeSet<NodeId>>) -> Option<Result<(H256, NodeId), Error>> {
|
||||||
|
match self.threshold.or(threshold) {
|
||||||
|
// if we have key share on this node
|
||||||
|
Some(threshold) => {
|
||||||
|
// select version this node have, with enough participants
|
||||||
|
let has_key_share = self.threshold.is_some();
|
||||||
|
let version = versions.iter().find(|&(_, ref n)| !has_key_share || n.contains(&self.self_node_id) && n.len() >= threshold + 1);
|
||||||
|
// if there's no such version, wait for more confirmations
|
||||||
|
match version {
|
||||||
|
Some((version, nodes)) => Some(Ok((version.clone(), if has_key_share { self.self_node_id.clone() } else { nodes.iter().cloned().nth(0)
|
||||||
|
.expect("version is only inserted when there's at least one owner; qed") }))),
|
||||||
|
None if !confirmations.is_empty() => None,
|
||||||
|
// otherwise - try to find any version
|
||||||
|
None => Some(versions.iter()
|
||||||
|
.find(|&(_, ref n)| n.len() >= threshold + 1)
|
||||||
|
.map(|(version, nodes)| Ok((version.clone(), nodes.iter().cloned().nth(0)
|
||||||
|
.expect("version is only inserted when there's at least one owner; qed"))))
|
||||||
|
.unwrap_or(Err(Error::ConsensusUnreachable))),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// if we do not have share, then wait for all confirmations
|
||||||
|
None if !confirmations.is_empty() => None,
|
||||||
|
// ...and select version with largest support
|
||||||
|
None => Some(versions.iter()
|
||||||
|
.max_by_key(|&(_, ref n)| n.len())
|
||||||
|
.map(|(version, nodes)| Ok((version.clone(), nodes.iter().cloned().nth(0)
|
||||||
|
.expect("version is only inserted when there's at least one owner; qed"))))
|
||||||
|
.unwrap_or(Err(Error::ConsensusUnreachable))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SessionResultComputer for LargestSupportResultComputer {
|
||||||
|
fn compute_result(&self, _threshold: Option<usize>, confirmations: &BTreeSet<NodeId>, versions: &BTreeMap<H256, BTreeSet<NodeId>>) -> Option<Result<(H256, NodeId), Error>> {
|
||||||
|
if !confirmations.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
versions.iter()
|
||||||
|
.max_by_key(|&(_, ref n)| n.len())
|
||||||
|
.map(|(version, nodes)| Ok((version.clone(), nodes.iter().cloned().nth(0)
|
||||||
|
.expect("version is only inserted when there's at least one owner; qed"))))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::collections::{VecDeque, BTreeMap, BTreeSet};
|
||||||
|
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage, DocumentKeyShare, DocumentKeyShareVersion};
|
||||||
|
use key_server_cluster::math;
|
||||||
|
use key_server_cluster::cluster::Cluster;
|
||||||
|
use key_server_cluster::cluster::tests::DummyCluster;
|
||||||
|
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||||
|
use key_server_cluster::message::{Message, KeyVersionNegotiationMessage, RequestKeyVersions, KeyVersions};
|
||||||
|
use super::{SessionImpl, SessionTransport, SessionParams, FastestResultComputer, SessionState};
|
||||||
|
|
||||||
|
struct DummyTransport {
|
||||||
|
cluster: Arc<DummyCluster>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SessionTransport for DummyTransport {
|
||||||
|
fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> {
|
||||||
|
self.cluster.send(node, Message::KeyVersionNegotiation(message))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Node {
|
||||||
|
pub cluster: Arc<DummyCluster>,
|
||||||
|
pub key_storage: Arc<DummyKeyStorage>,
|
||||||
|
pub session: SessionImpl<DummyTransport>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct MessageLoop {
|
||||||
|
pub session_id: SessionId,
|
||||||
|
pub nodes: BTreeMap<NodeId, Node>,
|
||||||
|
pub queue: VecDeque<(NodeId, NodeId, Message)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MessageLoop {
|
||||||
|
pub fn prepare_nodes(nodes_num: usize) -> BTreeMap<NodeId, Arc<DummyKeyStorage>> {
|
||||||
|
(0..nodes_num).map(|_| (math::generate_random_point().unwrap(),
|
||||||
|
Arc::new(DummyKeyStorage::default()))).collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn empty(nodes_num: usize) -> Self {
|
||||||
|
Self::new(Self::prepare_nodes(nodes_num))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new(nodes: BTreeMap<NodeId, Arc<DummyKeyStorage>>) -> Self {
|
||||||
|
let master_node_id = nodes.keys().cloned().nth(0).unwrap();
|
||||||
|
let sub_sesion = math::generate_random_scalar().unwrap();
|
||||||
|
let all_nodes_ids: BTreeSet<_> = nodes.keys().cloned().collect();
|
||||||
|
MessageLoop {
|
||||||
|
session_id: Default::default(),
|
||||||
|
nodes: nodes.iter().map(|(node_id, key_storage)| {
|
||||||
|
let cluster = Arc::new(DummyCluster::new(node_id.clone()));
|
||||||
|
cluster.add_nodes(all_nodes_ids.iter().cloned());
|
||||||
|
(node_id.clone(), Node {
|
||||||
|
cluster: cluster.clone(),
|
||||||
|
key_storage: key_storage.clone(),
|
||||||
|
session: SessionImpl::new(SessionParams {
|
||||||
|
meta: ShareChangeSessionMeta {
|
||||||
|
id: Default::default(),
|
||||||
|
self_node_id: node_id.clone(),
|
||||||
|
master_node_id: master_node_id.clone(),
|
||||||
|
},
|
||||||
|
sub_session: sub_sesion.clone(),
|
||||||
|
key_share: key_storage.get(&Default::default()).unwrap(),
|
||||||
|
result_computer: Arc::new(FastestResultComputer::new(
|
||||||
|
node_id.clone(),
|
||||||
|
key_storage.get(&Default::default()).unwrap().as_ref(),
|
||||||
|
)),
|
||||||
|
transport: DummyTransport {
|
||||||
|
cluster: cluster,
|
||||||
|
},
|
||||||
|
nonce: 0,
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
}).collect(),
|
||||||
|
queue: VecDeque::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn node_id(&self, idx: usize) -> &NodeId {
|
||||||
|
self.nodes.keys().nth(idx).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn session(&self, idx: usize) -> &SessionImpl<DummyTransport> {
|
||||||
|
&self.nodes.values().nth(idx).unwrap().session
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn negotiation_fails_if_initialized_twice() {
|
||||||
|
let ml = MessageLoop::empty(1);
|
||||||
|
assert_eq!(ml.session(0).initialize(BTreeSet::new()), Ok(()));
|
||||||
|
assert_eq!(ml.session(0).initialize(BTreeSet::new()), Err(Error::InvalidStateForRequest));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn negotiation_fails_if_message_contains_wrong_nonce() {
|
||||||
|
let ml = MessageLoop::empty(2);
|
||||||
|
assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions {
|
||||||
|
session: Default::default(),
|
||||||
|
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||||
|
session_nonce: 100,
|
||||||
|
})), Err(Error::ReplayProtection));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn negotiation_fails_if_versions_request_received_from_non_master() {
|
||||||
|
let ml = MessageLoop::empty(3);
|
||||||
|
assert_eq!(ml.session(2).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions {
|
||||||
|
session: Default::default(),
|
||||||
|
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||||
|
session_nonce: 0,
|
||||||
|
})), Err(Error::InvalidMessage));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn negotiation_fails_if_versions_request_received_twice() {
|
||||||
|
let ml = MessageLoop::empty(2);
|
||||||
|
assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions {
|
||||||
|
session: Default::default(),
|
||||||
|
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||||
|
session_nonce: 0,
|
||||||
|
})), Ok(()));
|
||||||
|
assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions {
|
||||||
|
session: Default::default(),
|
||||||
|
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||||
|
session_nonce: 0,
|
||||||
|
})), Err(Error::InvalidStateForRequest));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn negotiation_fails_if_versions_received_before_initialization() {
|
||||||
|
let ml = MessageLoop::empty(2);
|
||||||
|
assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
||||||
|
session: Default::default(),
|
||||||
|
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||||
|
session_nonce: 0,
|
||||||
|
threshold: Some(10),
|
||||||
|
versions: Vec::new(),
|
||||||
|
})), Err(Error::InvalidStateForRequest));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn negotiation_does_not_fails_if_versions_received_after_completion() {
|
||||||
|
let ml = MessageLoop::empty(3);
|
||||||
|
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();
|
||||||
|
assert_eq!(ml.session(0).data.lock().state, SessionState::WaitingForResponses);
|
||||||
|
|
||||||
|
let version_id = (*math::generate_random_scalar().unwrap()).clone();
|
||||||
|
assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
||||||
|
session: Default::default(),
|
||||||
|
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||||
|
session_nonce: 0,
|
||||||
|
threshold: Some(0),
|
||||||
|
versions: vec![version_id.clone().into()]
|
||||||
|
})), Ok(()));
|
||||||
|
assert_eq!(ml.session(0).data.lock().state, SessionState::Finished);
|
||||||
|
|
||||||
|
assert_eq!(ml.session(0).process_message(ml.node_id(2), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
||||||
|
session: Default::default(),
|
||||||
|
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||||
|
session_nonce: 0,
|
||||||
|
threshold: Some(0),
|
||||||
|
versions: vec![version_id.clone().into()]
|
||||||
|
})), Ok(()));
|
||||||
|
assert_eq!(ml.session(0).data.lock().state, SessionState::Finished);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn negotiation_fails_if_wrong_threshold_sent() {
|
||||||
|
let ml = MessageLoop::empty(3);
|
||||||
|
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();
|
||||||
|
|
||||||
|
let version_id = (*math::generate_random_scalar().unwrap()).clone();
|
||||||
|
assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
||||||
|
session: Default::default(),
|
||||||
|
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||||
|
session_nonce: 0,
|
||||||
|
threshold: Some(1),
|
||||||
|
versions: vec![version_id.clone().into()]
|
||||||
|
})), Ok(()));
|
||||||
|
assert_eq!(ml.session(0).process_message(ml.node_id(2), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
||||||
|
session: Default::default(),
|
||||||
|
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||||
|
session_nonce: 0,
|
||||||
|
threshold: Some(2),
|
||||||
|
versions: vec![version_id.clone().into()]
|
||||||
|
})), Err(Error::InvalidMessage));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn negotiation_fails_if_threshold_empty_when_versions_are_not_empty() {
|
||||||
|
let ml = MessageLoop::empty(2);
|
||||||
|
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();
|
||||||
|
|
||||||
|
let version_id = (*math::generate_random_scalar().unwrap()).clone();
|
||||||
|
assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
||||||
|
session: Default::default(),
|
||||||
|
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||||
|
session_nonce: 0,
|
||||||
|
threshold: None,
|
||||||
|
versions: vec![version_id.clone().into()]
|
||||||
|
})), Err(Error::InvalidMessage));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn fast_negotiation_does_not_completes_instantly_when_enough_share_owners_are_connected() {
|
||||||
|
let nodes = MessageLoop::prepare_nodes(2);
|
||||||
|
let version_id = (*math::generate_random_scalar().unwrap()).clone();
|
||||||
|
nodes.values().nth(0).unwrap().insert(Default::default(), DocumentKeyShare {
|
||||||
|
author: Default::default(),
|
||||||
|
threshold: 1,
|
||||||
|
common_point: None,
|
||||||
|
encrypted_point: None,
|
||||||
|
versions: vec![DocumentKeyShareVersion {
|
||||||
|
hash: version_id,
|
||||||
|
id_numbers: vec![(nodes.keys().cloned().nth(0).unwrap(), math::generate_random_scalar().unwrap())].into_iter().collect(),
|
||||||
|
secret_share: math::generate_random_scalar().unwrap(),
|
||||||
|
}],
|
||||||
|
}).unwrap();
|
||||||
|
let ml = MessageLoop::new(nodes);
|
||||||
|
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();
|
||||||
|
// we can't be sure that node has given key version because previous ShareAdd session could fail
|
||||||
|
assert!(ml.session(0).data.lock().state != SessionState::Finished);
|
||||||
|
}
|
||||||
|
}
|
@ -14,11 +14,10 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
pub mod key_version_negotiation_session;
|
||||||
pub mod servers_set_change_session;
|
pub mod servers_set_change_session;
|
||||||
pub mod share_add_session;
|
pub mod share_add_session;
|
||||||
pub mod share_change_session;
|
pub mod share_change_session;
|
||||||
pub mod share_move_session;
|
|
||||||
pub mod share_remove_session;
|
|
||||||
|
|
||||||
mod sessions_queue;
|
mod sessions_queue;
|
||||||
|
|
||||||
|
@ -20,22 +20,25 @@ use std::collections::btree_map::Entry;
|
|||||||
use parking_lot::{Mutex, Condvar};
|
use parking_lot::{Mutex, Condvar};
|
||||||
use ethkey::{Public, Signature};
|
use ethkey::{Public, Signature};
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage};
|
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage};
|
||||||
|
use key_server_cluster::math;
|
||||||
use key_server_cluster::cluster::Cluster;
|
use key_server_cluster::cluster::Cluster;
|
||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||||
use key_server_cluster::message::{Message, ServersSetChangeMessage,
|
use key_server_cluster::message::{Message, ServersSetChangeMessage,
|
||||||
ConsensusMessageWithServersSet, InitializeConsensusSessionWithServersSet,
|
ConsensusMessageWithServersSet, InitializeConsensusSessionWithServersSet,
|
||||||
ServersSetChangeConsensusMessage, ConfirmConsensusInitialization, UnknownSessionsRequest, UnknownSessions,
|
ServersSetChangeConsensusMessage, ConfirmConsensusInitialization, UnknownSessionsRequest, UnknownSessions,
|
||||||
ServersSetChangeShareAddMessage, ServersSetChangeError, ServersSetChangeCompleted,
|
ServersSetChangeShareAddMessage, ServersSetChangeError, ServersSetChangeCompleted,
|
||||||
ServersSetChangeShareMoveMessage, ServersSetChangeShareRemoveMessage,
|
|
||||||
ServersSetChangeDelegate, ServersSetChangeDelegateResponse, InitializeShareChangeSession,
|
ServersSetChangeDelegate, ServersSetChangeDelegateResponse, InitializeShareChangeSession,
|
||||||
ConfirmShareChangeSessionInitialization};
|
ConfirmShareChangeSessionInitialization, KeyVersionNegotiationMessage, ShareChangeKeyVersionNegotiation};
|
||||||
use key_server_cluster::share_change_session::{ShareChangeSession, ShareChangeSessionParams, ShareChangeSessionPlan,
|
use key_server_cluster::share_change_session::{ShareChangeSession, ShareChangeSessionParams, ShareChangeSessionPlan,
|
||||||
prepare_share_change_session_plan};
|
prepare_share_change_session_plan};
|
||||||
|
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl,
|
||||||
|
SessionParams as KeyVersionNegotiationSessionParams, LargestSupportResultComputer,
|
||||||
|
SessionTransport as KeyVersionNegotiationTransport, Session as KeyVersionNegotiationSession};
|
||||||
use key_server_cluster::jobs::job_session::JobTransport;
|
use key_server_cluster::jobs::job_session::JobTransport;
|
||||||
use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest};
|
use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest};
|
||||||
use key_server_cluster::jobs::unknown_sessions_job::{UnknownSessionsJob};
|
use key_server_cluster::jobs::unknown_sessions_job::{UnknownSessionsJob};
|
||||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||||
use key_server_cluster::admin_sessions::sessions_queue::{SessionsQueue, QueuedSession};
|
use key_server_cluster::admin_sessions::sessions_queue::SessionsQueue;
|
||||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||||
|
|
||||||
/// Maximal number of active share change sessions.
|
/// Maximal number of active share change sessions.
|
||||||
@ -110,6 +113,8 @@ struct SessionData {
|
|||||||
pub new_nodes_set: Option<BTreeSet<NodeId>>,
|
pub new_nodes_set: Option<BTreeSet<NodeId>>,
|
||||||
/// Share change sessions queue (valid on master nodes only).
|
/// Share change sessions queue (valid on master nodes only).
|
||||||
pub sessions_queue: Option<SessionsQueue>,
|
pub sessions_queue: Option<SessionsQueue>,
|
||||||
|
/// Share change sessions key version negotiation.
|
||||||
|
pub negotiation_sessions: BTreeMap<SessionId, KeyVersionNegotiationSessionImpl<ServersSetChangeKeyVersionNegotiationTransport>>,
|
||||||
/// Share change sessions initialization state (valid on master nodes only).
|
/// Share change sessions initialization state (valid on master nodes only).
|
||||||
pub sessions_initialization_state: BTreeMap<SessionId, SessionInitializationData>,
|
pub sessions_initialization_state: BTreeMap<SessionId, SessionInitializationData>,
|
||||||
/// Sessions delegated to other nodes (valid on master node only).
|
/// Sessions delegated to other nodes (valid on master node only).
|
||||||
@ -164,6 +169,16 @@ struct UnknownSessionsJobTransport {
|
|||||||
cluster: Arc<Cluster>,
|
cluster: Arc<Cluster>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Key version negotiation transport.
|
||||||
|
struct ServersSetChangeKeyVersionNegotiationTransport {
|
||||||
|
/// Session id.
|
||||||
|
id: SessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
nonce: u64,
|
||||||
|
/// Cluster.
|
||||||
|
cluster: Arc<Cluster>,
|
||||||
|
}
|
||||||
|
|
||||||
impl SessionImpl {
|
impl SessionImpl {
|
||||||
/// Create new servers set change session.
|
/// Create new servers set change session.
|
||||||
pub fn new(params: SessionParams) -> Result<Self, Error> {
|
pub fn new(params: SessionParams) -> Result<Self, Error> {
|
||||||
@ -182,6 +197,7 @@ impl SessionImpl {
|
|||||||
consensus_session: None,
|
consensus_session: None,
|
||||||
new_nodes_set: None,
|
new_nodes_set: None,
|
||||||
sessions_queue: None,
|
sessions_queue: None,
|
||||||
|
negotiation_sessions: BTreeMap::new(),
|
||||||
sessions_initialization_state: BTreeMap::new(),
|
sessions_initialization_state: BTreeMap::new(),
|
||||||
delegated_key_sessions: BTreeMap::new(),
|
delegated_key_sessions: BTreeMap::new(),
|
||||||
active_key_sessions: BTreeMap::new(),
|
active_key_sessions: BTreeMap::new(),
|
||||||
@ -207,7 +223,6 @@ impl SessionImpl {
|
|||||||
let mut consensus_session = ConsensusSession::new(ConsensusSessionParams {
|
let mut consensus_session = ConsensusSession::new(ConsensusSessionParams {
|
||||||
meta: self.core.meta.clone().into_consensus_meta(self.core.all_nodes_set.len())?,
|
meta: self.core.meta.clone().into_consensus_meta(self.core.all_nodes_set.len())?,
|
||||||
consensus_executor: ServersSetChangeAccessJob::new_on_master(self.core.admin_public.clone(),
|
consensus_executor: ServersSetChangeAccessJob::new_on_master(self.core.admin_public.clone(),
|
||||||
self.core.all_nodes_set.clone(),
|
|
||||||
self.core.all_nodes_set.clone(),
|
self.core.all_nodes_set.clone(),
|
||||||
new_nodes_set.clone(),
|
new_nodes_set.clone(),
|
||||||
all_set_signature,
|
all_set_signature,
|
||||||
@ -240,6 +255,8 @@ impl SessionImpl {
|
|||||||
self.on_unknown_sessions_requested(sender, message),
|
self.on_unknown_sessions_requested(sender, message),
|
||||||
&ServersSetChangeMessage::UnknownSessions(ref message) =>
|
&ServersSetChangeMessage::UnknownSessions(ref message) =>
|
||||||
self.on_unknown_sessions(sender, message),
|
self.on_unknown_sessions(sender, message),
|
||||||
|
&ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ref message) =>
|
||||||
|
self.on_key_version_negotiation(sender, message),
|
||||||
&ServersSetChangeMessage::InitializeShareChangeSession(ref message) =>
|
&ServersSetChangeMessage::InitializeShareChangeSession(ref message) =>
|
||||||
self.on_initialize_share_change_session(sender, message),
|
self.on_initialize_share_change_session(sender, message),
|
||||||
&ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref message) =>
|
&ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref message) =>
|
||||||
@ -250,12 +267,10 @@ impl SessionImpl {
|
|||||||
self.on_delegated_session_completed(sender, message),
|
self.on_delegated_session_completed(sender, message),
|
||||||
&ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref message) =>
|
&ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref message) =>
|
||||||
self.on_share_add_message(sender, message),
|
self.on_share_add_message(sender, message),
|
||||||
&ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ref message) =>
|
&ServersSetChangeMessage::ServersSetChangeError(ref message) => {
|
||||||
self.on_share_move_message(sender, message),
|
self.on_session_error(sender, Error::Io(message.error.clone()));
|
||||||
&ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ref message) =>
|
Ok(())
|
||||||
self.on_share_remove_message(sender, message),
|
},
|
||||||
&ServersSetChangeMessage::ServersSetChangeError(ref message) =>
|
|
||||||
self.on_session_error(sender, message),
|
|
||||||
&ServersSetChangeMessage::ServersSetChangeCompleted(ref message) =>
|
&ServersSetChangeMessage::ServersSetChangeCompleted(ref message) =>
|
||||||
self.on_session_completed(sender, message),
|
self.on_session_completed(sender, message),
|
||||||
}
|
}
|
||||||
@ -278,9 +293,7 @@ impl SessionImpl {
|
|||||||
&ConsensusMessageWithServersSet::InitializeConsensusSession(_) => {
|
&ConsensusMessageWithServersSet::InitializeConsensusSession(_) => {
|
||||||
data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams {
|
data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams {
|
||||||
meta: self.core.meta.clone().into_consensus_meta(self.core.all_nodes_set.len())?,
|
meta: self.core.meta.clone().into_consensus_meta(self.core.all_nodes_set.len())?,
|
||||||
consensus_executor: ServersSetChangeAccessJob::new_on_slave(self.core.admin_public.clone(),
|
consensus_executor: ServersSetChangeAccessJob::new_on_slave(self.core.admin_public.clone()),
|
||||||
self.core.all_nodes_set.clone(),
|
|
||||||
),
|
|
||||||
consensus_transport: ServersSetChangeConsensusTransport {
|
consensus_transport: ServersSetChangeConsensusTransport {
|
||||||
id: self.core.meta.id.clone(),
|
id: self.core.meta.id.clone(),
|
||||||
nonce: self.core.nonce,
|
nonce: self.core.nonce,
|
||||||
@ -367,12 +380,69 @@ impl SessionImpl {
|
|||||||
|
|
||||||
// initialize sessions queue
|
// initialize sessions queue
|
||||||
data.state = SessionState::RunningShareChangeSessions;
|
data.state = SessionState::RunningShareChangeSessions;
|
||||||
data.sessions_queue = Some(SessionsQueue::new(self.core.key_storage.clone(), unknown_sessions));
|
data.sessions_queue = Some(SessionsQueue::new(&self.core.key_storage, unknown_sessions.keys().cloned().collect()));
|
||||||
|
|
||||||
// and disseminate session initialization requests
|
// and disseminate session initialization requests
|
||||||
Self::disseminate_session_initialization_requests(&self.core, &mut *data)
|
Self::disseminate_session_initialization_requests(&self.core, &mut *data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// When key version negotiation message is received.
|
||||||
|
pub fn on_key_version_negotiation(&self, sender: &NodeId, message: &ShareChangeKeyVersionNegotiation) -> Result<(), Error> {
|
||||||
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
|
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
// check state
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if data.state != SessionState::RunningShareChangeSessions {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
// process message
|
||||||
|
match &message.message {
|
||||||
|
&KeyVersionNegotiationMessage::RequestKeyVersions(ref message) if sender == &self.core.meta.master_node_id => {
|
||||||
|
let key_id = message.session.clone().into();
|
||||||
|
let key_share = self.core.key_storage.get(&key_id).map_err(|e| Error::KeyStorage(e.into()))?;
|
||||||
|
let negotiation_session = KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams {
|
||||||
|
meta: ShareChangeSessionMeta {
|
||||||
|
id: key_id.clone(),
|
||||||
|
self_node_id: self.core.meta.self_node_id.clone(),
|
||||||
|
master_node_id: sender.clone(),
|
||||||
|
},
|
||||||
|
sub_session: message.sub_session.clone().into(),
|
||||||
|
key_share: key_share,
|
||||||
|
result_computer: Arc::new(LargestSupportResultComputer {}),
|
||||||
|
transport: ServersSetChangeKeyVersionNegotiationTransport {
|
||||||
|
id: key_id,
|
||||||
|
nonce: self.core.nonce,
|
||||||
|
cluster: self.core.cluster.clone(),
|
||||||
|
},
|
||||||
|
nonce: message.session_nonce,
|
||||||
|
});
|
||||||
|
negotiation_session.on_key_versions_request(sender, message)?;
|
||||||
|
debug_assert!(negotiation_session.is_finished());
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
&KeyVersionNegotiationMessage::KeyVersions(ref message) if self.core.meta.self_node_id == self.core.meta.master_node_id => {
|
||||||
|
let key_id = message.session.clone().into();
|
||||||
|
{
|
||||||
|
let negotiation_session = data.negotiation_sessions.get(&key_id).ok_or(Error::InvalidMessage)?;
|
||||||
|
negotiation_session.on_key_versions(sender, message)?;
|
||||||
|
if !negotiation_session.is_finished() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// else prepare plan && start share change session
|
||||||
|
if !Self::initialize_share_change_session(&self.core, &mut *data, key_id)? {
|
||||||
|
Self::disseminate_session_initialization_requests(&self.core, &mut *data)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
_ => Err(Error::InvalidMessage),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// When share change session initialization is requested.
|
/// When share change session initialization is requested.
|
||||||
pub fn on_initialize_share_change_session(&self, sender: &NodeId, message: &InitializeShareChangeSession) -> Result<(), Error> {
|
pub fn on_initialize_share_change_session(&self, sender: &NodeId, message: &InitializeShareChangeSession) -> Result<(), Error> {
|
||||||
debug_assert!(self.core.meta.id == *message.session);
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
@ -395,10 +465,9 @@ impl SessionImpl {
|
|||||||
true => return Err(Error::InvalidMessage),
|
true => return Err(Error::InvalidMessage),
|
||||||
false => {
|
false => {
|
||||||
let master_plan = ShareChangeSessionPlan {
|
let master_plan = ShareChangeSessionPlan {
|
||||||
isolated_nodes: message.isolated_nodes.iter().cloned().map(Into::into).collect(),
|
key_version: message.version.clone().into(),
|
||||||
nodes_to_add: message.shares_to_add.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(),
|
consensus_group: message.consensus_group.iter().cloned().map(Into::into).collect(),
|
||||||
nodes_to_move: message.shares_to_move.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(),
|
new_nodes_map: message.new_nodes_map.iter().map(|(k, v)| (k.clone().into(), v.clone().map(Into::into))).collect(),
|
||||||
nodes_to_remove: message.shares_to_remove.iter().cloned().map(Into::into).collect(),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// if master plan is empty, it is cheating
|
// if master plan is empty, it is cheating
|
||||||
@ -406,24 +475,29 @@ impl SessionImpl {
|
|||||||
return Err(Error::InvalidMessage);
|
return Err(Error::InvalidMessage);
|
||||||
}
|
}
|
||||||
|
|
||||||
// on nodes, which have their own key share, we could check if master node plan is correct
|
// on nodes, holding selected key share version, we could check if master node plan is correct
|
||||||
if let Ok(key_share) = self.core.key_storage.get(&key_id) {
|
let master_node_id = message.master_node_id.clone().into();
|
||||||
let new_nodes_set = data.new_nodes_set.as_ref()
|
if let Some(key_share) = self.core.key_storage.get(&key_id).map_err(|e| Error::KeyStorage(e.into()))? {
|
||||||
.expect("new_nodes_set is filled during consensus establishing; change sessions are running after this; qed");
|
let version = message.version.clone().into();
|
||||||
let local_plan = prepare_share_change_session_plan(&self.core.all_nodes_set, &key_share.id_numbers.keys().cloned().collect(), new_nodes_set)?;
|
if let Ok(key_version) = key_share.version(&version) {
|
||||||
if local_plan.isolated_nodes != master_plan.isolated_nodes
|
let key_share_owners = key_version.id_numbers.keys().cloned().collect();
|
||||||
|| local_plan.nodes_to_add.keys().any(|n| !local_plan.nodes_to_add.contains_key(n))
|
let new_nodes_set = data.new_nodes_set.as_ref()
|
||||||
|| local_plan.nodes_to_add.keys().any(|n| !master_plan.nodes_to_add.contains_key(n))
|
.expect("new_nodes_set is filled during consensus establishing; change sessions are running after this; qed");
|
||||||
|| local_plan.nodes_to_move != master_plan.nodes_to_move
|
let local_plan = prepare_share_change_session_plan(
|
||||||
|| local_plan.nodes_to_remove != master_plan.nodes_to_remove {
|
&self.core.all_nodes_set,
|
||||||
return Err(Error::InvalidMessage);
|
key_share.threshold,
|
||||||
|
version,
|
||||||
|
&master_node_id,
|
||||||
|
&key_share_owners,
|
||||||
|
new_nodes_set)?;
|
||||||
|
|
||||||
|
if local_plan.new_nodes_map.keys().collect::<BTreeSet<_>>() != master_plan.new_nodes_map.keys().collect::<BTreeSet<_>>() {
|
||||||
|
return Err(Error::InvalidMessage);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let session = Self::create_share_change_session(&self.core, key_id,
|
let session = Self::create_share_change_session(&self.core, key_id, master_node_id, master_plan)?;
|
||||||
message.master_node_id.clone().into(),
|
|
||||||
message.old_shares_set.iter().cloned().map(Into::into).collect(),
|
|
||||||
master_plan)?;
|
|
||||||
if !session.is_finished() {
|
if !session.is_finished() {
|
||||||
data.active_key_sessions.insert(key_id.clone(), session);
|
data.active_key_sessions.insert(key_id.clone(), session);
|
||||||
}
|
}
|
||||||
@ -551,31 +625,6 @@ impl SessionImpl {
|
|||||||
session.on_share_add_message(sender, &message.message))
|
session.on_share_add_message(sender, &message.message))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When share move message is received.
|
|
||||||
pub fn on_share_move_message(&self, sender: &NodeId, message: &ServersSetChangeShareMoveMessage) -> Result<(), Error> {
|
|
||||||
self.on_share_change_message(message.message.session_id().clone().into(), |session|
|
|
||||||
session.on_share_move_message(sender, &message.message))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When share remove message is received.
|
|
||||||
pub fn on_share_remove_message(&self, sender: &NodeId, message: &ServersSetChangeShareRemoveMessage) -> Result<(), Error> {
|
|
||||||
self.on_share_change_message(message.message.session_id().clone().into(), |session|
|
|
||||||
session.on_share_remove_message(sender, &message.message))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When error has occured on another node.
|
|
||||||
pub fn on_session_error(&self, sender: &NodeId, message: &ServersSetChangeError) -> Result<(), Error> {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
|
|
||||||
warn!("{}: servers set change session failed with error: {} from {}", self.core.meta.self_node_id, message.error, sender);
|
|
||||||
|
|
||||||
data.state = SessionState::Finished;
|
|
||||||
data.result = Some(Err(Error::Io(message.error.clone())));
|
|
||||||
self.core.completed.notify_all();
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When session completion message is received.
|
/// When session completion message is received.
|
||||||
pub fn on_session_completed(&self, sender: &NodeId, message: &ServersSetChangeCompleted) -> Result<(), Error> {
|
pub fn on_session_completed(&self, sender: &NodeId, message: &ServersSetChangeCompleted) -> Result<(), Error> {
|
||||||
debug_assert!(self.core.meta.id == *message.session);
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
@ -591,6 +640,13 @@ impl SessionImpl {
|
|||||||
return Err(Error::TooEarlyForRequest);
|
return Err(Error::TooEarlyForRequest);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if we are on the set of nodes that are being removed from the cluster, let's clear database
|
||||||
|
if !data.new_nodes_set.as_ref()
|
||||||
|
.expect("new_nodes_set is filled during initialization; session is completed after initialization; qed")
|
||||||
|
.contains(&self.core.meta.self_node_id) {
|
||||||
|
self.core.key_storage.clear().map_err(|e| Error::KeyStorage(e.into()))?;
|
||||||
|
}
|
||||||
|
|
||||||
data.state = SessionState::Finished;
|
data.state = SessionState::Finished;
|
||||||
self.core.completed.notify_all();
|
self.core.completed.notify_all();
|
||||||
|
|
||||||
@ -629,7 +685,7 @@ impl SessionImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create share change session.
|
/// Create share change session.
|
||||||
fn create_share_change_session(core: &SessionCore, key_id: SessionId, master_node_id: NodeId, old_nodes_set: BTreeSet<NodeId>, session_plan: ShareChangeSessionPlan) -> Result<ShareChangeSession, Error> {
|
fn create_share_change_session(core: &SessionCore, key_id: SessionId, master_node_id: NodeId, session_plan: ShareChangeSessionPlan) -> Result<ShareChangeSession, Error> {
|
||||||
ShareChangeSession::new(ShareChangeSessionParams {
|
ShareChangeSession::new(ShareChangeSessionParams {
|
||||||
session_id: key_id.clone(),
|
session_id: key_id.clone(),
|
||||||
nonce: core.nonce,
|
nonce: core.nonce,
|
||||||
@ -640,8 +696,6 @@ impl SessionImpl {
|
|||||||
},
|
},
|
||||||
cluster: core.cluster.clone(),
|
cluster: core.cluster.clone(),
|
||||||
key_storage: core.key_storage.clone(),
|
key_storage: core.key_storage.clone(),
|
||||||
old_nodes_set: old_nodes_set,
|
|
||||||
cluster_nodes_set: core.all_nodes_set.clone(),
|
|
||||||
plan: session_plan,
|
plan: session_plan,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -649,77 +703,43 @@ impl SessionImpl {
|
|||||||
/// Disseminate session initialization requests.
|
/// Disseminate session initialization requests.
|
||||||
fn disseminate_session_initialization_requests(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> {
|
fn disseminate_session_initialization_requests(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> {
|
||||||
debug_assert_eq!(core.meta.self_node_id, core.meta.master_node_id);
|
debug_assert_eq!(core.meta.self_node_id, core.meta.master_node_id);
|
||||||
if let Some(sessions_queue) = data.sessions_queue.as_mut() {
|
if data.sessions_queue.is_some() {
|
||||||
let mut number_of_sessions_to_start = MAX_ACTIVE_KEY_SESSIONS.saturating_sub(data.active_key_sessions.len() + data.delegated_key_sessions.len());
|
let number_of_sessions_active = data.active_key_sessions.len()
|
||||||
let new_nodes_set = data.new_nodes_set.as_ref()
|
+ data.delegated_key_sessions.len()
|
||||||
.expect("this method is called after consensus estabished; new_nodes_set is a result of consensus session; qed");
|
+ data.negotiation_sessions.len();
|
||||||
|
let mut number_of_sessions_to_start = MAX_ACTIVE_KEY_SESSIONS.saturating_sub(number_of_sessions_active);
|
||||||
while number_of_sessions_to_start > 0 {
|
while number_of_sessions_to_start > 0 {
|
||||||
let queued_session = match sessions_queue.next() {
|
let key_id = match data.sessions_queue.as_mut().expect("checked before beginning of the loop; qed").next() {
|
||||||
None => break, // complete session
|
None => break, // complete session
|
||||||
Some(Err(e)) => return Err(e),
|
Some(Err(e)) => return Err(e),
|
||||||
Some(Ok(session)) => session,
|
Some(Ok(key_id)) => key_id,
|
||||||
};
|
};
|
||||||
|
|
||||||
// prepare session change plan && check if something needs to be changed
|
let key_share = core.key_storage.get(&key_id).map_err(|e| Error::KeyStorage(e.into()))?;
|
||||||
let old_nodes_set = queued_session.nodes();
|
let negotiation_session = KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams {
|
||||||
let session_plan = prepare_share_change_session_plan(&core.all_nodes_set, &old_nodes_set, new_nodes_set)?;
|
meta: ShareChangeSessionMeta {
|
||||||
if session_plan.is_empty() {
|
id: key_id,
|
||||||
|
self_node_id: core.meta.self_node_id.clone(),
|
||||||
|
master_node_id: core.meta.self_node_id.clone(),
|
||||||
|
},
|
||||||
|
sub_session: math::generate_random_scalar()?,
|
||||||
|
key_share: key_share,
|
||||||
|
result_computer: Arc::new(LargestSupportResultComputer {}), // TODO: optimizations: could use modified Fast version
|
||||||
|
transport: ServersSetChangeKeyVersionNegotiationTransport {
|
||||||
|
id: key_id,
|
||||||
|
nonce: core.nonce,
|
||||||
|
cluster: core.cluster.clone(),
|
||||||
|
},
|
||||||
|
nonce: 0,
|
||||||
|
});
|
||||||
|
negotiation_session.initialize(core.cluster.nodes())?;
|
||||||
|
if !negotiation_session.is_finished() {
|
||||||
|
data.negotiation_sessions.insert(key_id, negotiation_session);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// select master for this session
|
if !Self::initialize_share_change_session(core, data, key_id)? {
|
||||||
let session_master = match &queued_session {
|
continue;
|
||||||
&QueuedSession::Known(_, _) => core.meta.self_node_id.clone(),
|
|
||||||
&QueuedSession::Unknown(_, ref nodes) => nodes.iter().cloned().nth(0)
|
|
||||||
.expect("unknown session is received is reported by at least one node; qed"),
|
|
||||||
};
|
|
||||||
|
|
||||||
// send key session initialization requests
|
|
||||||
let key_id = queued_session.id().clone();
|
|
||||||
let mut confirmations: BTreeSet<_> = old_nodes_set.iter().cloned()
|
|
||||||
.chain(session_plan.nodes_to_add.keys().cloned())
|
|
||||||
.chain(session_plan.nodes_to_move.keys().cloned())
|
|
||||||
.filter(|n| core.all_nodes_set.contains(n))
|
|
||||||
.collect();
|
|
||||||
let need_create_session = confirmations.remove(&core.meta.self_node_id);
|
|
||||||
let initialization_message = Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(InitializeShareChangeSession {
|
|
||||||
session: core.meta.id.clone().into(),
|
|
||||||
session_nonce: core.nonce,
|
|
||||||
key_id: key_id.clone().into(),
|
|
||||||
master_node_id: session_master.clone().into(),
|
|
||||||
old_shares_set: old_nodes_set.iter().cloned().map(Into::into).collect(),
|
|
||||||
isolated_nodes: session_plan.isolated_nodes.iter().cloned().map(Into::into).collect(),
|
|
||||||
shares_to_add: session_plan.nodes_to_add.iter()
|
|
||||||
.map(|(n, nid)| (n.clone().into(), nid.clone().into()))
|
|
||||||
.collect(),
|
|
||||||
shares_to_move: session_plan.nodes_to_move.iter()
|
|
||||||
.map(|(source, target)| (source.clone().into(), target.clone().into()))
|
|
||||||
.collect(),
|
|
||||||
shares_to_remove: session_plan.nodes_to_remove.iter().cloned().map(Into::into).collect(),
|
|
||||||
}));
|
|
||||||
for node in &confirmations {
|
|
||||||
core.cluster.send(&node, initialization_message.clone())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// create session on this node if required
|
|
||||||
if need_create_session {
|
|
||||||
data.active_key_sessions.insert(key_id.clone(), Self::create_share_change_session(core, key_id,
|
|
||||||
session_master.clone(),
|
|
||||||
queued_session.nodes(),
|
|
||||||
session_plan)?);
|
|
||||||
}
|
|
||||||
|
|
||||||
// initialize session if required
|
|
||||||
let wait_for_confirmations = !confirmations.is_empty();
|
|
||||||
if !wait_for_confirmations {
|
|
||||||
data.active_key_sessions.get_mut(&key_id)
|
|
||||||
.expect("!wait_for_confirmations is true only if this is the only session participant; if this is session participant, session is created above; qed")
|
|
||||||
.initialize()?;
|
|
||||||
} else {
|
|
||||||
data.sessions_initialization_state.insert(key_id, SessionInitializationData {
|
|
||||||
master: session_master,
|
|
||||||
confirmations: confirmations,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
number_of_sessions_to_start = number_of_sessions_to_start - 1;
|
number_of_sessions_to_start = number_of_sessions_to_start - 1;
|
||||||
@ -734,7 +754,9 @@ impl SessionImpl {
|
|||||||
// iteration is finished => complete session
|
// iteration is finished => complete session
|
||||||
if data.state != SessionState::Finished {
|
if data.state != SessionState::Finished {
|
||||||
data.sessions_queue = None;
|
data.sessions_queue = None;
|
||||||
if data.active_key_sessions.len() == 0 && data.delegated_key_sessions.len() == 0 {
|
if data.active_key_sessions.len() == 0 &&
|
||||||
|
data.delegated_key_sessions.len() == 0 &&
|
||||||
|
data.negotiation_sessions.len() == 0 {
|
||||||
Self::complete_session(core, data)?;
|
Self::complete_session(core, data)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -742,6 +764,65 @@ impl SessionImpl {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Initialize share change session.
|
||||||
|
fn initialize_share_change_session(core: &SessionCore, data: &mut SessionData, key_id: SessionId) -> Result<bool, Error> {
|
||||||
|
// get selected version && old nodes set from key negotiation session
|
||||||
|
let negotiation_session = data.negotiation_sessions.remove(&key_id)
|
||||||
|
.expect("share change session is only initialized when negotiation is completed; qed");
|
||||||
|
let (selected_version, selected_master) = negotiation_session.wait()?;
|
||||||
|
let selected_version_holders = negotiation_session.version_holders(&selected_version)?;
|
||||||
|
let selected_version_threshold = negotiation_session.key_threshold()?;
|
||||||
|
|
||||||
|
// prepare session change plan && check if something needs to be changed
|
||||||
|
let old_nodes_set = selected_version_holders;
|
||||||
|
let new_nodes_set = data.new_nodes_set.as_ref()
|
||||||
|
.expect("this method is called after consensus estabished; new_nodes_set is a result of consensus session; qed");
|
||||||
|
let session_plan = prepare_share_change_session_plan(&core.all_nodes_set, selected_version_threshold, selected_version.clone(), &selected_master, &old_nodes_set, new_nodes_set)?;
|
||||||
|
if session_plan.is_empty() {
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// send key session initialization requests
|
||||||
|
let mut confirmations: BTreeSet<_> = session_plan.new_nodes_map.keys().cloned().collect();
|
||||||
|
let need_create_session = confirmations.remove(&core.meta.self_node_id);
|
||||||
|
let initialization_message = Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(InitializeShareChangeSession {
|
||||||
|
session: core.meta.id.clone().into(),
|
||||||
|
session_nonce: core.nonce,
|
||||||
|
key_id: key_id.clone().into(),
|
||||||
|
version: selected_version.into(),
|
||||||
|
master_node_id: selected_master.clone().into(),
|
||||||
|
consensus_group: session_plan.consensus_group.iter().cloned().map(Into::into).collect(),
|
||||||
|
new_nodes_map: session_plan.new_nodes_map.iter()
|
||||||
|
.map(|(n, nid)| (n.clone().into(), nid.clone().map(Into::into)))
|
||||||
|
.collect(),
|
||||||
|
}));
|
||||||
|
for node in &confirmations {
|
||||||
|
core.cluster.send(&node, initialization_message.clone())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// create session on this node if required
|
||||||
|
if need_create_session {
|
||||||
|
data.active_key_sessions.insert(key_id.clone(), Self::create_share_change_session(core, key_id,
|
||||||
|
selected_master.clone(),
|
||||||
|
session_plan)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
// initialize session if required
|
||||||
|
let wait_for_confirmations = !confirmations.is_empty();
|
||||||
|
if !wait_for_confirmations {
|
||||||
|
data.active_key_sessions.get_mut(&key_id)
|
||||||
|
.expect("!wait_for_confirmations is true only if this is the only session participant; if this is session participant, session is created above; qed")
|
||||||
|
.initialize()?;
|
||||||
|
} else {
|
||||||
|
data.sessions_initialization_state.insert(key_id, SessionInitializationData {
|
||||||
|
master: selected_master,
|
||||||
|
confirmations: confirmations,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
|
||||||
/// Return delegated session to master.
|
/// Return delegated session to master.
|
||||||
fn return_delegated_session(core: &SessionCore, key_id: &SessionId) -> Result<(), Error> {
|
fn return_delegated_session(core: &SessionCore, key_id: &SessionId) -> Result<(), Error> {
|
||||||
assert!(core.meta.self_node_id != core.meta.master_node_id);
|
assert!(core.meta.self_node_id != core.meta.master_node_id);
|
||||||
@ -800,29 +881,55 @@ impl Session for SessionImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ClusterSession for SessionImpl {
|
impl ClusterSession for SessionImpl {
|
||||||
|
type Id = SessionId;
|
||||||
|
|
||||||
|
fn type_name() -> &'static str {
|
||||||
|
"servers set change"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn id(&self) -> SessionId {
|
||||||
|
self.core.meta.id.clone()
|
||||||
|
}
|
||||||
|
|
||||||
fn is_finished(&self) -> bool {
|
fn is_finished(&self) -> bool {
|
||||||
self.data.lock().state == SessionState::Finished
|
self.data.lock().state == SessionState::Finished
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_session_timeout(&self) {
|
fn on_session_timeout(&self) {
|
||||||
let mut data = self.data.lock();
|
self.on_session_error(&self.core.meta.self_node_id, Error::NodeDisconnected);
|
||||||
|
|
||||||
warn!("{}: servers set change session failed with timeout", self.core.meta.self_node_id);
|
|
||||||
|
|
||||||
data.state = SessionState::Finished;
|
|
||||||
data.result = Some(Err(Error::NodeDisconnected));
|
|
||||||
self.core.completed.notify_all();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_node_timeout(&self, node: &NodeId) {
|
fn on_node_timeout(&self, node: &NodeId) {
|
||||||
|
self.on_session_error(node, Error::NodeDisconnected);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_session_error(&self, node: &NodeId, error: Error) {
|
||||||
|
// error in generation session is considered fatal
|
||||||
|
// => broadcast error if error occured on this node
|
||||||
|
if *node == self.core.meta.self_node_id {
|
||||||
|
// do not bother processing send error, as we already processing error
|
||||||
|
let _ = self.core.cluster.broadcast(Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(ServersSetChangeError {
|
||||||
|
session: self.core.meta.id.clone().into(),
|
||||||
|
session_nonce: self.core.nonce,
|
||||||
|
error: error.clone().into(),
|
||||||
|
})));
|
||||||
|
}
|
||||||
|
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
warn!("{}: servers set change session failed because {} connection has timeouted", self.core.meta.self_node_id, node);
|
warn!("{}: servers set change session failed: {} on {}", self.core.meta.self_node_id, error, node);
|
||||||
|
|
||||||
data.state = SessionState::Finished;
|
data.state = SessionState::Finished;
|
||||||
data.result = Some(Err(Error::NodeDisconnected));
|
data.result = Some(Err(error));
|
||||||
self.core.completed.notify_all();
|
self.core.completed.notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
||||||
|
match *message {
|
||||||
|
Message::ServersSetChange(ref message) => self.process_message(sender, message),
|
||||||
|
_ => unreachable!("cluster checks message to be correct before passing; qed"),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl JobTransport for ServersSetChangeConsensusTransport {
|
impl JobTransport for ServersSetChangeConsensusTransport {
|
||||||
@ -873,6 +980,16 @@ impl JobTransport for UnknownSessionsJobTransport {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl KeyVersionNegotiationTransport for ServersSetChangeKeyVersionNegotiationTransport {
|
||||||
|
fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> {
|
||||||
|
self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ShareChangeKeyVersionNegotiation {
|
||||||
|
session: self.id.clone().into(),
|
||||||
|
session_nonce: self.nonce,
|
||||||
|
message: message,
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn check_nodes_set(all_nodes_set: &BTreeSet<NodeId>, new_nodes_set: &BTreeSet<NodeId>) -> Result<(), Error> {
|
fn check_nodes_set(all_nodes_set: &BTreeSet<NodeId>, new_nodes_set: &BTreeSet<NodeId>) -> Result<(), Error> {
|
||||||
// all new nodes must be a part of all nodes set
|
// all new nodes must be a part of all nodes set
|
||||||
match new_nodes_set.iter().any(|n| !all_nodes_set.contains(n)) {
|
match new_nodes_set.iter().any(|n| !all_nodes_set.contains(n)) {
|
||||||
@ -891,7 +1008,6 @@ pub mod tests {
|
|||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||||
use key_server_cluster::cluster::tests::DummyCluster;
|
use key_server_cluster::cluster::tests::DummyCluster;
|
||||||
use key_server_cluster::generation_session::tests::{MessageLoop as GenerationMessageLoop, Node as GenerationNode, generate_nodes_ids};
|
use key_server_cluster::generation_session::tests::{MessageLoop as GenerationMessageLoop, Node as GenerationNode, generate_nodes_ids};
|
||||||
use key_server_cluster::math;
|
|
||||||
use key_server_cluster::message::Message;
|
use key_server_cluster::message::Message;
|
||||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||||
use key_server_cluster::admin_sessions::share_add_session::tests::check_secret_is_preserved;
|
use key_server_cluster::admin_sessions::share_add_session::tests::check_secret_is_preserved;
|
||||||
@ -946,11 +1062,7 @@ pub mod tests {
|
|||||||
let admin_public = admin_key_pair.public().clone();
|
let admin_public = admin_key_pair.public().clone();
|
||||||
|
|
||||||
// compute original secret key
|
// compute original secret key
|
||||||
let original_secret = math::compute_joint_secret(gml.nodes.values()
|
let original_key_pair = gml.compute_key_pair(1);
|
||||||
.map(|nd| nd.key_storage.get(&SessionId::default()).unwrap().polynom1[0].clone())
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.iter()).unwrap();
|
|
||||||
let original_key_pair = KeyPair::from_secret(original_secret).unwrap();
|
|
||||||
|
|
||||||
// all active nodes set
|
// all active nodes set
|
||||||
let mut all_nodes_set: BTreeSet<_> = gml.nodes.keys()
|
let mut all_nodes_set: BTreeSet<_> = gml.nodes.keys()
|
||||||
@ -1108,7 +1220,7 @@ pub mod tests {
|
|||||||
.collect());
|
.collect());
|
||||||
|
|
||||||
// check that all removed nodes do not own key share
|
// check that all removed nodes do not own key share
|
||||||
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).is_err()));
|
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_none()));
|
||||||
|
|
||||||
// check that all sessions have finished
|
// check that all sessions have finished
|
||||||
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
|
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
|
||||||
@ -1134,7 +1246,7 @@ pub mod tests {
|
|||||||
.collect());
|
.collect());
|
||||||
|
|
||||||
// check that all removed nodes do not own key share
|
// check that all removed nodes do not own key share
|
||||||
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).is_err()));
|
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_none()));
|
||||||
|
|
||||||
// check that all sessions have finished
|
// check that all sessions have finished
|
||||||
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
|
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
|
||||||
@ -1160,7 +1272,7 @@ pub mod tests {
|
|||||||
.collect());
|
.collect());
|
||||||
|
|
||||||
// check that all isolated nodes still OWN key share
|
// check that all isolated nodes still OWN key share
|
||||||
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_isolate.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).is_ok()));
|
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_isolate.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_some()));
|
||||||
|
|
||||||
// check that all sessions have finished
|
// check that all sessions have finished
|
||||||
assert!(ml.nodes.iter().filter(|&(k, _)| !nodes_to_isolate.contains(k)).all(|(_, v)| v.session.is_finished()));
|
assert!(ml.nodes.iter().filter(|&(k, _)| !nodes_to_isolate.contains(k)).all(|(_, v)| v.session.is_finished()));
|
||||||
|
@ -15,35 +15,24 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::{VecDeque, BTreeSet, BTreeMap};
|
use std::collections::{VecDeque, BTreeSet};
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare};
|
use key_server_cluster::{Error, SessionId, KeyStorage};
|
||||||
|
|
||||||
/// Session, queued for change.
|
|
||||||
pub enum QueuedSession {
|
|
||||||
/// Session is known on this node.
|
|
||||||
Known(SessionId, DocumentKeyShare),
|
|
||||||
/// Session is unknown on this node.
|
|
||||||
Unknown(SessionId, BTreeSet<NodeId>),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Queue of share change sessions.
|
/// Queue of share change sessions.
|
||||||
pub struct SessionsQueue {
|
pub struct SessionsQueue {
|
||||||
/// Key storage.
|
|
||||||
key_storage: Arc<KeyStorage>,
|
|
||||||
/// Sessions, known on this node.
|
/// Sessions, known on this node.
|
||||||
known_sessions: VecDeque<SessionId>,
|
known_sessions: VecDeque<SessionId>,
|
||||||
/// Unknown sessions.
|
/// Unknown sessions.
|
||||||
unknown_sessions: VecDeque<(SessionId, BTreeSet<NodeId>)>,
|
unknown_sessions: VecDeque<SessionId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SessionsQueue {
|
impl SessionsQueue {
|
||||||
/// Create new sessions queue.
|
/// Create new sessions queue.
|
||||||
pub fn new(key_storage: Arc<KeyStorage>, unknown_sessions: BTreeMap<SessionId, BTreeSet<NodeId>>) -> Self {
|
pub fn new(key_storage: &Arc<KeyStorage>, unknown_sessions: BTreeSet<SessionId>) -> Self {
|
||||||
// TODO: optimizations:
|
// TODO: optimizations:
|
||||||
// 1) known sessions - change to iter
|
// 1) known sessions - change to iter
|
||||||
// 2) unknown sesions - request chunk-by-chunk
|
// 2) unknown sesions - request chunk-by-chunk
|
||||||
SessionsQueue {
|
SessionsQueue {
|
||||||
key_storage: key_storage.clone(),
|
|
||||||
known_sessions: key_storage.iter().map(|(k, _)| k).collect(),
|
known_sessions: key_storage.iter().map(|(k, _)| k).collect(),
|
||||||
unknown_sessions: unknown_sessions.into_iter().collect(),
|
unknown_sessions: unknown_sessions.into_iter().collect(),
|
||||||
}
|
}
|
||||||
@ -51,37 +40,17 @@ impl SessionsQueue {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Iterator for SessionsQueue {
|
impl Iterator for SessionsQueue {
|
||||||
type Item = Result<QueuedSession, Error>;
|
type Item = Result<SessionId, Error>;
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
if let Some(known_session) = self.known_sessions.pop_front() {
|
if let Some(known_session) = self.known_sessions.pop_front() {
|
||||||
return Some(self.key_storage.get(&known_session)
|
return Some(Ok(known_session));
|
||||||
.map(|session| QueuedSession::Known(known_session, session))
|
|
||||||
.map_err(|e| Error::KeyStorage(e.into())));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(unknown_session) = self.unknown_sessions.pop_front() {
|
if let Some(unknown_session) = self.unknown_sessions.pop_front() {
|
||||||
return Some(Ok(QueuedSession::Unknown(unknown_session.0, unknown_session.1)));
|
return Some(Ok(unknown_session));
|
||||||
}
|
}
|
||||||
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl QueuedSession {
|
|
||||||
/// Queued session (key) id.
|
|
||||||
pub fn id(&self) -> &SessionId {
|
|
||||||
match *self {
|
|
||||||
QueuedSession::Known(ref session_id, _) => session_id,
|
|
||||||
QueuedSession::Unknown(ref session_id, _) => session_id,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// OWners of key shares (aka session nodes).
|
|
||||||
pub fn nodes(&self) -> BTreeSet<NodeId> {
|
|
||||||
match *self {
|
|
||||||
QueuedSession::Known(_, ref key_share) => key_share.id_numbers.keys().cloned().collect(),
|
|
||||||
QueuedSession::Unknown(_, ref nodes) => nodes.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -16,6 +16,7 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::{BTreeSet, BTreeMap};
|
use std::collections::{BTreeSet, BTreeMap};
|
||||||
|
use bigint::hash::H256;
|
||||||
use ethkey::Secret;
|
use ethkey::Secret;
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage};
|
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage};
|
||||||
use key_server_cluster::cluster::Cluster;
|
use key_server_cluster::cluster::Cluster;
|
||||||
@ -23,15 +24,10 @@ use key_server_cluster::cluster_sessions::ClusterSession;
|
|||||||
use key_server_cluster::math;
|
use key_server_cluster::math;
|
||||||
use key_server_cluster::jobs::servers_set_change_access_job::ServersSetChangeAccessRequest;
|
use key_server_cluster::jobs::servers_set_change_access_job::ServersSetChangeAccessRequest;
|
||||||
use key_server_cluster::jobs::job_session::JobTransport;
|
use key_server_cluster::jobs::job_session::JobTransport;
|
||||||
use key_server_cluster::message::{Message, ServersSetChangeMessage, ServersSetChangeShareAddMessage, ServersSetChangeShareMoveMessage,
|
use key_server_cluster::message::{Message, ServersSetChangeMessage, ServersSetChangeShareAddMessage};
|
||||||
ServersSetChangeShareRemoveMessage};
|
|
||||||
use key_server_cluster::share_add_session::{SessionTransport as ShareAddSessionTransport,
|
use key_server_cluster::share_add_session::{SessionTransport as ShareAddSessionTransport,
|
||||||
SessionImpl as ShareAddSessionImpl, SessionParams as ShareAddSessionParams};
|
SessionImpl as ShareAddSessionImpl, SessionParams as ShareAddSessionParams};
|
||||||
use key_server_cluster::share_move_session::{SessionTransport as ShareMoveSessionTransport,
|
use key_server_cluster::message::ShareAddMessage;
|
||||||
SessionImpl as ShareMoveSessionImpl, SessionParams as ShareMoveSessionParams};
|
|
||||||
use key_server_cluster::share_remove_session::{SessionTransport as ShareRemoveSessionTransport,
|
|
||||||
SessionImpl as ShareRemoveSessionImpl, SessionParams as ShareRemoveSessionParams};
|
|
||||||
use key_server_cluster::message::{ShareAddMessage, ShareMoveMessage, ShareRemoveMessage};
|
|
||||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||||
|
|
||||||
/// Single session meta-change session. Brief overview:
|
/// Single session meta-change session. Brief overview:
|
||||||
@ -50,22 +46,14 @@ pub struct ShareChangeSession {
|
|||||||
cluster: Arc<Cluster>,
|
cluster: Arc<Cluster>,
|
||||||
/// Key storage.
|
/// Key storage.
|
||||||
key_storage: Arc<KeyStorage>,
|
key_storage: Arc<KeyStorage>,
|
||||||
/// Old nodes set.
|
/// Key version.
|
||||||
old_nodes_set: BTreeSet<NodeId>,
|
key_version: H256,
|
||||||
/// All cluster nodes set.
|
/// Consensus group to use in ShareAdd session.
|
||||||
cluster_nodes_set: BTreeSet<NodeId>,
|
consensus_group: Option<BTreeSet<NodeId>>,
|
||||||
/// Nodes to add shares for.
|
/// Nodes to add shares for.
|
||||||
nodes_to_add: Option<BTreeMap<NodeId, Secret>>,
|
new_nodes_map: Option<BTreeMap<NodeId, Option<Secret>>>,
|
||||||
/// Nodes to move shares from/to.
|
|
||||||
nodes_to_move: Option<BTreeMap<NodeId, NodeId>>,
|
|
||||||
/// Nodes to remove shares from.
|
|
||||||
nodes_to_remove: Option<BTreeSet<NodeId>>,
|
|
||||||
/// Share add session.
|
/// Share add session.
|
||||||
share_add_session: Option<ShareAddSessionImpl<ShareChangeTransport>>,
|
share_add_session: Option<ShareAddSessionImpl<ShareChangeTransport>>,
|
||||||
/// Share move session.
|
|
||||||
share_move_session: Option<ShareMoveSessionImpl<ShareChangeTransport>>,
|
|
||||||
/// Share remove session.
|
|
||||||
share_remove_session: Option<ShareRemoveSessionImpl<ShareChangeTransport>>,
|
|
||||||
/// Is finished.
|
/// Is finished.
|
||||||
is_finished: bool,
|
is_finished: bool,
|
||||||
}
|
}
|
||||||
@ -73,14 +61,12 @@ pub struct ShareChangeSession {
|
|||||||
/// Share change session plan.
|
/// Share change session plan.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct ShareChangeSessionPlan {
|
pub struct ShareChangeSessionPlan {
|
||||||
/// Nodes that are isolated and need to be removed before share addition.
|
/// Key version that plan is valid for.
|
||||||
pub isolated_nodes: BTreeSet<NodeId>,
|
pub key_version: H256,
|
||||||
|
/// Consensus group to use in ShareAdd session.
|
||||||
|
pub consensus_group: BTreeSet<NodeId>,
|
||||||
/// Nodes to add shares for.
|
/// Nodes to add shares for.
|
||||||
pub nodes_to_add: BTreeMap<NodeId, Secret>,
|
pub new_nodes_map: BTreeMap<NodeId, Option<Secret>>,
|
||||||
/// Nodes to move shares from/to (keys = target nodes, values = source nodes).
|
|
||||||
pub nodes_to_move: BTreeMap<NodeId, NodeId>,
|
|
||||||
/// Nodes to remove shares from.
|
|
||||||
pub nodes_to_remove: BTreeSet<NodeId>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Session parameters.
|
/// Session parameters.
|
||||||
@ -95,10 +81,6 @@ pub struct ShareChangeSessionParams {
|
|||||||
pub cluster: Arc<Cluster>,
|
pub cluster: Arc<Cluster>,
|
||||||
/// Keys storage.
|
/// Keys storage.
|
||||||
pub key_storage: Arc<KeyStorage>,
|
pub key_storage: Arc<KeyStorage>,
|
||||||
/// All cluster nodes set.
|
|
||||||
pub cluster_nodes_set: BTreeSet<NodeId>,
|
|
||||||
/// Old nodes set.
|
|
||||||
pub old_nodes_set: BTreeSet<NodeId>,
|
|
||||||
/// Session plan.
|
/// Session plan.
|
||||||
pub plan: ShareChangeSessionPlan,
|
pub plan: ShareChangeSessionPlan,
|
||||||
}
|
}
|
||||||
@ -118,33 +100,22 @@ impl ShareChangeSession {
|
|||||||
/// Create new share change session.
|
/// Create new share change session.
|
||||||
pub fn new(params: ShareChangeSessionParams) -> Result<Self, Error> {
|
pub fn new(params: ShareChangeSessionParams) -> Result<Self, Error> {
|
||||||
// we can't create sessions right now, because key share is read when session is created, but it can change in previous session
|
// we can't create sessions right now, because key share is read when session is created, but it can change in previous session
|
||||||
let isolated_nodes = if !params.plan.isolated_nodes.is_empty() { Some(params.plan.isolated_nodes) } else { None };
|
let key_version = params.plan.key_version;
|
||||||
let nodes_to_add = if !params.plan.nodes_to_add.is_empty() { Some(params.plan.nodes_to_add) } else { None };
|
let consensus_group = if !params.plan.consensus_group.is_empty() { Some(params.plan.consensus_group) } else { None };
|
||||||
let nodes_to_remove = if !params.plan.nodes_to_remove.is_empty() { Some(params.plan.nodes_to_remove) } else { None };
|
let new_nodes_map = if !params.plan.new_nodes_map.is_empty() { Some(params.plan.new_nodes_map) } else { None };
|
||||||
let nodes_to_move = if !params.plan.nodes_to_move.is_empty() { Some(params.plan.nodes_to_move) } else { None };
|
debug_assert!(new_nodes_map.is_some());
|
||||||
debug_assert!(isolated_nodes.is_some() || nodes_to_add.is_some() || nodes_to_move.is_some() || nodes_to_remove.is_some());
|
|
||||||
|
|
||||||
// if it is degenerated session (only isolated nodes are removed && no network communication required)
|
let is_finished = new_nodes_map.is_none();
|
||||||
// => remove isolated nodes && finish session
|
|
||||||
if let Some(isolated_nodes) = isolated_nodes {
|
|
||||||
Self::remove_isolated_nodes(¶ms.meta, ¶ms.key_storage, isolated_nodes)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let is_finished = nodes_to_add.is_none() && nodes_to_remove.is_none() && nodes_to_move.is_none();
|
|
||||||
Ok(ShareChangeSession {
|
Ok(ShareChangeSession {
|
||||||
session_id: params.session_id,
|
session_id: params.session_id,
|
||||||
nonce: params.nonce,
|
nonce: params.nonce,
|
||||||
meta: params.meta,
|
meta: params.meta,
|
||||||
cluster: params.cluster,
|
cluster: params.cluster,
|
||||||
key_storage: params.key_storage,
|
key_storage: params.key_storage,
|
||||||
old_nodes_set: params.old_nodes_set,
|
key_version: key_version,
|
||||||
cluster_nodes_set: params.cluster_nodes_set,
|
consensus_group: consensus_group,
|
||||||
nodes_to_add: nodes_to_add,
|
new_nodes_map: new_nodes_map,
|
||||||
nodes_to_remove: nodes_to_remove,
|
|
||||||
nodes_to_move: nodes_to_move,
|
|
||||||
share_add_session: None,
|
share_add_session: None,
|
||||||
share_move_session: None,
|
|
||||||
share_remove_session: None,
|
|
||||||
is_finished: is_finished,
|
is_finished: is_finished,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -184,52 +155,10 @@ impl ShareChangeSession {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When share-move message is received.
|
|
||||||
pub fn on_share_move_message(&mut self, sender: &NodeId, message: &ShareMoveMessage) -> Result<(), Error> {
|
|
||||||
if self.share_move_session.is_none() {
|
|
||||||
self.create_share_move_session()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let change_state_needed = self.share_move_session.as_ref()
|
|
||||||
.map(|share_move_session| {
|
|
||||||
let was_finished = share_move_session.is_finished();
|
|
||||||
share_move_session.process_message(sender, message)
|
|
||||||
.map(|_| share_move_session.is_finished() && !was_finished)
|
|
||||||
})
|
|
||||||
.unwrap_or(Err(Error::InvalidMessage))?;
|
|
||||||
if change_state_needed {
|
|
||||||
self.proceed_to_next_state()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When share-remove message is received.
|
|
||||||
pub fn on_share_remove_message(&mut self, sender: &NodeId, message: &ShareRemoveMessage) -> Result<(), Error> {
|
|
||||||
if self.share_remove_session.is_none() {
|
|
||||||
self.create_share_remove_session()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let change_state_needed = self.share_remove_session.as_ref()
|
|
||||||
.map(|share_remove_session| {
|
|
||||||
let was_finished = share_remove_session.is_finished();
|
|
||||||
share_remove_session.process_message(sender, message)
|
|
||||||
.map(|_| share_remove_session.is_finished() && !was_finished)
|
|
||||||
})
|
|
||||||
.unwrap_or(Err(Error::InvalidMessage))?;
|
|
||||||
if change_state_needed {
|
|
||||||
self.proceed_to_next_state()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create new share add session.
|
/// Create new share add session.
|
||||||
fn create_share_add_session(&mut self) -> Result<(), Error> {
|
fn create_share_add_session(&mut self) -> Result<(), Error> {
|
||||||
let nodes_to_add = self.nodes_to_add.take().ok_or(Error::InvalidStateForRequest)?;
|
let consensus_group = self.consensus_group.take().ok_or(Error::InvalidStateForRequest)?;
|
||||||
let new_nodes_set = self.old_nodes_set.iter().map(|n| (n.clone(), None))
|
let new_nodes_map = self.new_nodes_map.take().ok_or(Error::InvalidStateForRequest)?;
|
||||||
.chain(nodes_to_add.clone().into_iter().map(|(k, v)| (k, Some(v))))
|
|
||||||
.collect();
|
|
||||||
let share_add_session = ShareAddSessionImpl::new(ShareAddSessionParams {
|
let share_add_session = ShareAddSessionImpl::new(ShareAddSessionParams {
|
||||||
meta: self.meta.clone(),
|
meta: self.meta.clone(),
|
||||||
nonce: self.nonce,
|
nonce: self.nonce,
|
||||||
@ -237,88 +166,31 @@ impl ShareChangeSession {
|
|||||||
key_storage: self.key_storage.clone(),
|
key_storage: self.key_storage.clone(),
|
||||||
admin_public: None,
|
admin_public: None,
|
||||||
})?;
|
})?;
|
||||||
share_add_session.set_consensus_output(self.old_nodes_set.clone(), new_nodes_set)?;
|
share_add_session.set_consensus_output(&self.key_version, consensus_group, new_nodes_map)?;
|
||||||
self.share_add_session = Some(share_add_session);
|
self.share_add_session = Some(share_add_session);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create new share move session.
|
|
||||||
fn create_share_move_session(&mut self) -> Result<(), Error> {
|
|
||||||
let nodes_to_move = self.nodes_to_move.take().ok_or(Error::InvalidStateForRequest)?;
|
|
||||||
let share_move_session = ShareMoveSessionImpl::new(ShareMoveSessionParams {
|
|
||||||
meta: self.meta.clone(),
|
|
||||||
nonce: self.nonce,
|
|
||||||
transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()),
|
|
||||||
key_storage: self.key_storage.clone(),
|
|
||||||
admin_public: None,
|
|
||||||
})?;
|
|
||||||
share_move_session.set_consensus_output(nodes_to_move)?;
|
|
||||||
self.share_move_session = Some(share_move_session);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create new share remove session.
|
|
||||||
fn create_share_remove_session(&mut self) -> Result<(), Error> {
|
|
||||||
let nodes_to_remove = self.nodes_to_remove.take().ok_or(Error::InvalidStateForRequest)?;
|
|
||||||
let share_remove_session = ShareRemoveSessionImpl::new(ShareRemoveSessionParams {
|
|
||||||
meta: self.meta.clone(),
|
|
||||||
nonce: self.nonce,
|
|
||||||
cluster_nodes_set: self.cluster_nodes_set.clone(),
|
|
||||||
transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()),
|
|
||||||
key_storage: self.key_storage.clone(),
|
|
||||||
admin_public: None,
|
|
||||||
})?;
|
|
||||||
share_remove_session.set_consensus_output(nodes_to_remove)?;
|
|
||||||
self.share_remove_session = Some(share_remove_session);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Proceed to the next state.
|
/// Proceed to the next state.
|
||||||
fn proceed_to_next_state(&mut self) -> Result<(), Error> {
|
fn proceed_to_next_state(&mut self) -> Result<(), Error> {
|
||||||
if self.meta.self_node_id != self.meta.master_node_id {
|
if self.meta.self_node_id != self.meta.master_node_id {
|
||||||
if self.nodes_to_add.is_none() && self.nodes_to_move.is_none() && self.nodes_to_remove.is_none() {
|
if self.new_nodes_map.is_none() {
|
||||||
self.is_finished = true;
|
self.is_finished = true;
|
||||||
}
|
}
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.nodes_to_add.is_some() {
|
if self.new_nodes_map.is_some() {
|
||||||
self.create_share_add_session()?;
|
self.create_share_add_session()?;
|
||||||
return self.share_add_session.as_ref()
|
return self.share_add_session.as_ref()
|
||||||
.expect("either create_share_add_session fails, or session is created; qed")
|
.expect("either create_share_add_session fails, or session is created; qed")
|
||||||
.initialize(None, None, None);
|
.initialize(None, None, None, None);
|
||||||
}
|
|
||||||
|
|
||||||
if self.nodes_to_move.is_some() {
|
|
||||||
self.create_share_move_session()?;
|
|
||||||
return self.share_move_session.as_ref()
|
|
||||||
.expect("either create_share_move_session fails, or session is created; qed")
|
|
||||||
.initialize(None, None, None);
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.nodes_to_remove.is_some() {
|
|
||||||
self.create_share_remove_session()?;
|
|
||||||
return self.share_remove_session.as_ref()
|
|
||||||
.expect("either create_share_remove_session fails, or session is created; qed")
|
|
||||||
.initialize(None, None, None);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
self.is_finished = true;
|
self.is_finished = true;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Remove isolated nodes from key share.
|
|
||||||
fn remove_isolated_nodes(meta: &ShareChangeSessionMeta, key_storage: &Arc<KeyStorage>, isolated_nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
|
||||||
let mut key_share = key_storage.get(&meta.id).map_err(|e| Error::KeyStorage(e.into()))?;
|
|
||||||
for isolated_node in &isolated_nodes {
|
|
||||||
key_share.id_numbers.remove(isolated_node);
|
|
||||||
}
|
|
||||||
if key_share.id_numbers.len() < key_share.threshold + 1 {
|
|
||||||
return Err(Error::InvalidNodesConfiguration);
|
|
||||||
}
|
|
||||||
key_storage.update(meta.id.clone(), key_share).map_err(|e| Error::KeyStorage(e.into()))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ShareChangeTransport {
|
impl ShareChangeTransport {
|
||||||
@ -345,7 +217,11 @@ impl JobTransport for ShareChangeTransport {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ShareAddSessionTransport for ShareChangeTransport {
|
impl ShareAddSessionTransport for ShareChangeTransport {
|
||||||
fn set_id_numbers(&mut self, _id_numbers: BTreeMap<NodeId, Secret>) {
|
fn nodes(&self) -> BTreeSet<NodeId> {
|
||||||
|
self.cluster.nodes()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_master_data(&mut self, _consensus_group: BTreeSet<NodeId>, _id_numbers: BTreeMap<NodeId, Option<Secret>>) {
|
||||||
unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed")
|
unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -358,69 +234,72 @@ impl ShareAddSessionTransport for ShareChangeTransport {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ShareMoveSessionTransport for ShareChangeTransport {
|
/// Prepare share change plan for moving from old `old_key_version_owners` to `new_nodes_set`.
|
||||||
fn set_shares_to_move_reversed(&mut self, _shares_to_move: BTreeMap<NodeId, NodeId>) {
|
pub fn prepare_share_change_session_plan(cluster_nodes: &BTreeSet<NodeId>, threshold: usize, key_version: H256, master: &NodeId, old_key_version_owners: &BTreeSet<NodeId>, new_nodes_set: &BTreeSet<NodeId>) -> Result<ShareChangeSessionPlan, Error> {
|
||||||
unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed")
|
// make new nodes map, so that:
|
||||||
|
// all non-isolated old nodes will have their id number preserved
|
||||||
|
// all new nodes will have new id number
|
||||||
|
let mut new_nodes_map = new_nodes_set.difference(&old_key_version_owners)
|
||||||
|
.map(|n| math::generate_random_scalar().map(|id| (n.clone(), Some(id))))
|
||||||
|
.collect::<Result<BTreeMap<_, _>, _>>()?;
|
||||||
|
if !new_nodes_map.is_empty() {
|
||||||
|
for old_node in old_key_version_owners.iter().filter(|n| cluster_nodes.contains(n)) {
|
||||||
|
new_nodes_map.insert(old_node.clone(), None);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn send(&self, node: &NodeId, message: ShareMoveMessage) -> Result<(), Error> {
|
// select consensus group if there are some nodes to add
|
||||||
self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ServersSetChangeShareMoveMessage {
|
let consensus_group = if !new_nodes_map.is_empty() {
|
||||||
session: self.session_id.clone().into(),
|
::std::iter::once(master.clone())
|
||||||
session_nonce: self.nonce,
|
.chain(old_key_version_owners.iter()
|
||||||
message: message,
|
.filter(|n| *n != master && cluster_nodes.contains(*n))
|
||||||
})))
|
.take(threshold)
|
||||||
}
|
.cloned())
|
||||||
}
|
.collect()
|
||||||
|
} else {
|
||||||
impl ShareRemoveSessionTransport for ShareChangeTransport {
|
BTreeSet::new()
|
||||||
fn send(&self, node: &NodeId, message: ShareRemoveMessage) -> Result<(), Error> {
|
};
|
||||||
self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ServersSetChangeShareRemoveMessage {
|
|
||||||
session: self.session_id.clone().into(),
|
|
||||||
session_nonce: self.nonce,
|
|
||||||
message: message,
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prepare share change plan for moving from old `session_nodes` to `new_nodes_set`.
|
|
||||||
pub fn prepare_share_change_session_plan(cluster_nodes_set: &BTreeSet<NodeId>, session_nodes: &BTreeSet<NodeId>, new_nodes_set: &BTreeSet<NodeId>) -> Result<ShareChangeSessionPlan, Error> {
|
|
||||||
let mut nodes_to_add: BTreeSet<_> = new_nodes_set.difference(&session_nodes).cloned().collect();
|
|
||||||
let mut nodes_to_move = BTreeMap::new();
|
|
||||||
// isolated nodes are the nodes that are not currently in cluster + that are in new nodes set
|
|
||||||
let isolated_nodes: BTreeSet<_> = session_nodes.difference(&cluster_nodes_set)
|
|
||||||
.filter(|n| !new_nodes_set.contains(n))
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
// removed nodes are all old session nodes, except nodes that are in new set + except isolated nodes
|
|
||||||
let mut nodes_to_remove: BTreeSet<_> = session_nodes.difference(&new_nodes_set)
|
|
||||||
.filter(|n| !isolated_nodes.contains(n))
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
while !nodes_to_remove.is_empty() && !nodes_to_add.is_empty() {
|
|
||||||
let source_node = nodes_to_remove.iter().cloned().nth(0).expect("nodes_to_remove.is_empty is checked in while condition; qed");
|
|
||||||
let target_node = nodes_to_add.iter().cloned().nth(0).expect("nodes_to_add.is_empty is checked in while condition; qed");
|
|
||||||
nodes_to_remove.remove(&source_node);
|
|
||||||
nodes_to_add.remove(&target_node);
|
|
||||||
nodes_to_move.insert(target_node, source_node);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(ShareChangeSessionPlan {
|
Ok(ShareChangeSessionPlan {
|
||||||
isolated_nodes: isolated_nodes,
|
key_version: key_version,
|
||||||
nodes_to_add: nodes_to_add.into_iter()
|
consensus_group: consensus_group,
|
||||||
.map(|n| math::generate_random_scalar().map(|s| (n, s)))
|
new_nodes_map: new_nodes_map,
|
||||||
.collect::<Result<BTreeMap<_, _>, _>>()?,
|
|
||||||
nodes_to_move: nodes_to_move,
|
|
||||||
nodes_to_remove: nodes_to_remove,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ShareChangeSessionPlan {
|
impl ShareChangeSessionPlan {
|
||||||
/// Is empty (nothing-to-do) plan?
|
/// Is empty (nothing-to-do) plan?
|
||||||
pub fn is_empty(&self) -> bool {
|
pub fn is_empty(&self) -> bool {
|
||||||
self.isolated_nodes.is_empty()
|
self.new_nodes_map.is_empty()
|
||||||
&& self.nodes_to_add.is_empty()
|
}
|
||||||
&& self.nodes_to_move.is_empty()
|
}
|
||||||
&& self.nodes_to_remove.is_empty()
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use key_server_cluster::math;
|
||||||
|
use super::prepare_share_change_session_plan;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn share_change_plan_creates_empty_plan() {
|
||||||
|
let cluster_nodes: Vec<_> = (0..3).map(|_| math::generate_random_point().unwrap()).collect();
|
||||||
|
let master = cluster_nodes[0].clone();
|
||||||
|
let old_key_version_owners = cluster_nodes.iter().cloned().collect();
|
||||||
|
let new_nodes_set = cluster_nodes.iter().cloned().collect();
|
||||||
|
let plan = prepare_share_change_session_plan(&cluster_nodes.iter().cloned().collect(), 1, Default::default(), &master, &old_key_version_owners, &new_nodes_set).unwrap();
|
||||||
|
|
||||||
|
assert!(plan.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn share_change_plan_adds_new_nodes() {
|
||||||
|
let cluster_nodes: Vec<_> = (0..3).map(|_| math::generate_random_point().unwrap()).collect();
|
||||||
|
let master = cluster_nodes[0].clone();
|
||||||
|
let old_key_version_owners = cluster_nodes[0..2].iter().cloned().collect();
|
||||||
|
let new_nodes_set = cluster_nodes.iter().cloned().collect();
|
||||||
|
let plan = prepare_share_change_session_plan(&cluster_nodes.iter().cloned().collect(), 1, Default::default(), &master, &old_key_version_owners, &new_nodes_set).unwrap();
|
||||||
|
|
||||||
|
assert!(!plan.is_empty());
|
||||||
|
assert_eq!(old_key_version_owners, plan.consensus_group);
|
||||||
|
assert_eq!(new_nodes_set, plan.new_nodes_map.keys().cloned().collect());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,828 +0,0 @@
|
|||||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity.
|
|
||||||
|
|
||||||
// Parity is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use parking_lot::{Mutex, Condvar};
|
|
||||||
use ethkey::{Public, Signature};
|
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, DocumentKeyShare, KeyStorage};
|
|
||||||
use key_server_cluster::cluster::Cluster;
|
|
||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
|
||||||
use key_server_cluster::message::{Message, ShareRemoveMessage, ShareRemoveConsensusMessage, ConsensusMessageWithServersSet,
|
|
||||||
ShareRemoveRequest, ShareRemoveConfirm, ShareRemoveError, InitializeConsensusSessionWithServersSet,
|
|
||||||
ConfirmConsensusInitialization};
|
|
||||||
use key_server_cluster::jobs::job_session::JobTransport;
|
|
||||||
use key_server_cluster::jobs::dummy_job::{DummyJob, DummyJobTransport};
|
|
||||||
use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest};
|
|
||||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
|
||||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
|
||||||
|
|
||||||
/// Share remove session API.
|
|
||||||
pub trait Session: Send + Sync + 'static {
|
|
||||||
/// Wait until session is completed.
|
|
||||||
fn wait(&self) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Share remove session transport.
|
|
||||||
pub trait SessionTransport: Clone + JobTransport<PartialJobRequest=ServersSetChangeAccessRequest, PartialJobResponse=bool> {
|
|
||||||
/// Send message to given node.
|
|
||||||
fn send(&self, node: &NodeId, message: ShareRemoveMessage) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Share remove session.
|
|
||||||
pub struct SessionImpl<T: SessionTransport> {
|
|
||||||
/// Session core.
|
|
||||||
core: SessionCore<T>,
|
|
||||||
/// Session data.
|
|
||||||
data: Mutex<SessionData<T>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Immutable session data.
|
|
||||||
struct SessionCore<T: SessionTransport> {
|
|
||||||
/// Session metadata.
|
|
||||||
pub meta: ShareChangeSessionMeta,
|
|
||||||
/// Session-level nonce.
|
|
||||||
pub nonce: u64,
|
|
||||||
/// Original key share.
|
|
||||||
pub key_share: DocumentKeyShare,
|
|
||||||
/// All known cluster nodes.
|
|
||||||
pub cluster_nodes_set: BTreeSet<NodeId>,
|
|
||||||
/// Session transport to communicate to other cluster nodes.
|
|
||||||
pub transport: T,
|
|
||||||
/// Key storage.
|
|
||||||
pub key_storage: Arc<KeyStorage>,
|
|
||||||
/// Administrator public key.
|
|
||||||
pub admin_public: Option<Public>,
|
|
||||||
/// SessionImpl completion condvar.
|
|
||||||
pub completed: Condvar,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Share remove consensus session type.
|
|
||||||
type ShareRemoveChangeConsensusSession<T> = ConsensusSession<ServersSetChangeAccessJob, T, DummyJob, DummyJobTransport>;
|
|
||||||
|
|
||||||
/// Mutable session data.
|
|
||||||
struct SessionData<T: SessionTransport> {
|
|
||||||
/// Session state.
|
|
||||||
pub state: SessionState,
|
|
||||||
/// Consensus session.
|
|
||||||
pub consensus_session: Option<ShareRemoveChangeConsensusSession<T>>,
|
|
||||||
/// Shares to remove.
|
|
||||||
pub shares_to_remove: Option<BTreeSet<NodeId>>,
|
|
||||||
/// Remove confirmations to receive.
|
|
||||||
pub remove_confirmations_to_receive: Option<BTreeSet<NodeId>>,
|
|
||||||
/// Share remove change result.
|
|
||||||
pub result: Option<Result<(), Error>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// SessionImpl creation parameters
|
|
||||||
pub struct SessionParams<T: SessionTransport> {
|
|
||||||
/// Session meta.
|
|
||||||
pub meta: ShareChangeSessionMeta,
|
|
||||||
/// Session nonce.
|
|
||||||
pub nonce: u64,
|
|
||||||
/// All known cluster nodes.
|
|
||||||
pub cluster_nodes_set: BTreeSet<NodeId>,
|
|
||||||
/// Session transport to communicate to other cluster nodes.
|
|
||||||
pub transport: T,
|
|
||||||
/// Key storage.
|
|
||||||
pub key_storage: Arc<KeyStorage>,
|
|
||||||
/// Administrator public key.
|
|
||||||
pub admin_public: Option<Public>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Share move session state.
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
enum SessionState {
|
|
||||||
/// State when consensus is establishing.
|
|
||||||
ConsensusEstablishing,
|
|
||||||
/// Waiting for remove confirmation.
|
|
||||||
WaitingForRemoveConfirmation,
|
|
||||||
/// Session is finished.
|
|
||||||
Finished,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Isolated ShareRemove session transport.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct IsolatedSessionTransport {
|
|
||||||
/// Key id.
|
|
||||||
session: SessionId,
|
|
||||||
/// Session-level nonce.
|
|
||||||
nonce: u64,
|
|
||||||
/// Cluster.
|
|
||||||
cluster: Arc<Cluster>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> SessionImpl<T> where T: SessionTransport {
|
|
||||||
/// Create new share remove session.
|
|
||||||
pub fn new(params: SessionParams<T>) -> Result<Self, Error> {
|
|
||||||
Ok(SessionImpl {
|
|
||||||
core: SessionCore {
|
|
||||||
meta: params.meta.clone(),
|
|
||||||
nonce: params.nonce,
|
|
||||||
key_share: params.key_storage.get(¶ms.meta.id).map_err(|e| Error::KeyStorage(e.into()))?,
|
|
||||||
cluster_nodes_set: params.cluster_nodes_set,
|
|
||||||
transport: params.transport,
|
|
||||||
key_storage: params.key_storage,
|
|
||||||
admin_public: params.admin_public,
|
|
||||||
completed: Condvar::new(),
|
|
||||||
},
|
|
||||||
data: Mutex::new(SessionData {
|
|
||||||
state: SessionState::ConsensusEstablishing,
|
|
||||||
consensus_session: None,
|
|
||||||
shares_to_remove: None,
|
|
||||||
remove_confirmations_to_receive: None,
|
|
||||||
result: None,
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set pre-established consensus data.
|
|
||||||
pub fn set_consensus_output(&self, shares_to_remove: BTreeSet<NodeId>) -> Result<(), Error> {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
|
|
||||||
// check state
|
|
||||||
if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
check_shares_to_remove(&self.core, &shares_to_remove)?;
|
|
||||||
|
|
||||||
let remove_confirmations_to_receive: BTreeSet<NodeId> = shares_to_remove.iter()
|
|
||||||
.filter(|n| self.core.cluster_nodes_set.contains(n))
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
let need_wait_for_confirmations = !remove_confirmations_to_receive.is_empty();
|
|
||||||
data.shares_to_remove = Some(shares_to_remove);
|
|
||||||
data.remove_confirmations_to_receive = Some(remove_confirmations_to_receive);
|
|
||||||
|
|
||||||
// on slave nodes it can happen that all nodes being removed are isolated
|
|
||||||
// => there's no need to wait for confirmations
|
|
||||||
if !need_wait_for_confirmations {
|
|
||||||
Self::complete_session(&self.core, &mut *data)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Initialize share remove session on master node.
|
|
||||||
pub fn initialize(&self, shares_to_remove: Option<BTreeSet<NodeId>>, old_set_signature: Option<Signature>, new_set_signature: Option<Signature>) -> Result<(), Error> {
|
|
||||||
debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id);
|
|
||||||
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
// check state
|
|
||||||
if data.state == SessionState::Finished {
|
|
||||||
// probably there are isolated nodes && we only remove isolated nodes from session
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
// if consensus is not yet established => start consensus session
|
|
||||||
let is_consensus_pre_established = data.shares_to_remove.is_some();
|
|
||||||
if !is_consensus_pre_established {
|
|
||||||
let shares_to_remove = shares_to_remove.ok_or(Error::InvalidMessage)?;
|
|
||||||
check_shares_to_remove(&self.core, &shares_to_remove)?;
|
|
||||||
|
|
||||||
let old_set_signature = old_set_signature.ok_or(Error::InvalidMessage)?;
|
|
||||||
let new_set_signature = new_set_signature.ok_or(Error::InvalidMessage)?;
|
|
||||||
let old_nodes_set: BTreeSet<_> = self.core.key_share.id_numbers.keys().cloned().collect();
|
|
||||||
let new_nodes_set: BTreeSet<_> = old_nodes_set.iter().cloned().filter(|n| !shares_to_remove.contains(&n)).collect();
|
|
||||||
let mut active_nodes_set = old_nodes_set.clone();
|
|
||||||
let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?;
|
|
||||||
|
|
||||||
// if some session nodes were removed from cluster (we treat this as a failure, or as a 'improper' removal)
|
|
||||||
// => do not require these nodes to be connected
|
|
||||||
for isolated_node in old_nodes_set.difference(&self.core.cluster_nodes_set) {
|
|
||||||
active_nodes_set.remove(&isolated_node);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut consensus_session = ConsensusSession::new(ConsensusSessionParams {
|
|
||||||
meta: self.core.meta.clone().into_consensus_meta(active_nodes_set.len())?,
|
|
||||||
consensus_executor: ServersSetChangeAccessJob::new_on_master(admin_public,
|
|
||||||
old_nodes_set.clone(),
|
|
||||||
old_nodes_set,
|
|
||||||
new_nodes_set,
|
|
||||||
old_set_signature,
|
|
||||||
new_set_signature),
|
|
||||||
consensus_transport: self.core.transport.clone(),
|
|
||||||
})?;
|
|
||||||
consensus_session.initialize(active_nodes_set)?;
|
|
||||||
data.consensus_session = Some(consensus_session);
|
|
||||||
data.remove_confirmations_to_receive = Some(shares_to_remove.clone());
|
|
||||||
data.shares_to_remove = Some(shares_to_remove);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// otherwise => start sending ShareRemove-specific messages
|
|
||||||
Self::on_consensus_established(&self.core, &mut *data)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process single message.
|
|
||||||
pub fn process_message(&self, sender: &NodeId, message: &ShareRemoveMessage) -> Result<(), Error> {
|
|
||||||
if self.core.nonce != message.session_nonce() {
|
|
||||||
return Err(Error::ReplayProtection);
|
|
||||||
}
|
|
||||||
|
|
||||||
match message {
|
|
||||||
&ShareRemoveMessage::ShareRemoveConsensusMessage(ref message) =>
|
|
||||||
self.on_consensus_message(sender, message),
|
|
||||||
&ShareRemoveMessage::ShareRemoveRequest(ref message) =>
|
|
||||||
self.on_share_remove_request(sender, message),
|
|
||||||
&ShareRemoveMessage::ShareRemoveConfirm(ref message) =>
|
|
||||||
self.on_share_remove_confirmation(sender, message),
|
|
||||||
&ShareRemoveMessage::ShareRemoveError(ref message) =>
|
|
||||||
self.on_session_error(sender, message),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When consensus-related message is received.
|
|
||||||
pub fn on_consensus_message(&self, sender: &NodeId, message: &ShareRemoveConsensusMessage) -> Result<(), Error> {
|
|
||||||
debug_assert!(self.core.meta.id == *message.session);
|
|
||||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
|
||||||
|
|
||||||
// start slave consensus session if needed
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
if data.consensus_session.is_none() && sender == &self.core.meta.master_node_id {
|
|
||||||
match &message.message {
|
|
||||||
&ConsensusMessageWithServersSet::InitializeConsensusSession(ref message) => {
|
|
||||||
let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?;
|
|
||||||
let current_nodes_set = self.core.key_share.id_numbers.keys().cloned().collect();
|
|
||||||
data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams {
|
|
||||||
meta: self.core.meta.clone().into_consensus_meta(message.old_nodes_set.len())?,
|
|
||||||
consensus_executor: ServersSetChangeAccessJob::new_on_slave(admin_public, current_nodes_set),
|
|
||||||
consensus_transport: self.core.transport.clone(),
|
|
||||||
})?);
|
|
||||||
},
|
|
||||||
_ => return Err(Error::InvalidStateForRequest),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let (is_establishing_consensus, is_consensus_established, shares_to_remove) = {
|
|
||||||
let consensus_session = data.consensus_session.as_mut().ok_or(Error::InvalidMessage)?;
|
|
||||||
let is_establishing_consensus = consensus_session.state() == ConsensusSessionState::EstablishingConsensus;
|
|
||||||
let shares_to_remove = match &message.message {
|
|
||||||
&ConsensusMessageWithServersSet::InitializeConsensusSession(ref message) => {
|
|
||||||
consensus_session.on_consensus_partial_request(sender, ServersSetChangeAccessRequest::from(message))?;
|
|
||||||
let shares_to_remove = message.old_nodes_set.difference(&message.new_nodes_set).cloned().map(Into::into).collect::<BTreeSet<_>>();
|
|
||||||
check_shares_to_remove(&self.core, &shares_to_remove)?;
|
|
||||||
Some(shares_to_remove)
|
|
||||||
},
|
|
||||||
&ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ref message) => {
|
|
||||||
consensus_session.on_consensus_partial_response(sender, message.is_confirmed)?;
|
|
||||||
None
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
(
|
|
||||||
is_establishing_consensus,
|
|
||||||
consensus_session.state() == ConsensusSessionState::ConsensusEstablished,
|
|
||||||
shares_to_remove
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(shares_to_remove) = shares_to_remove {
|
|
||||||
data.remove_confirmations_to_receive = Some(shares_to_remove.clone());
|
|
||||||
data.shares_to_remove = Some(shares_to_remove);
|
|
||||||
}
|
|
||||||
if self.core.meta.self_node_id != self.core.meta.master_node_id || !is_establishing_consensus || !is_consensus_established {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
Self::on_consensus_established(&self.core, &mut *data)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When share remove request is received.
|
|
||||||
pub fn on_share_remove_request(&self, sender: &NodeId, message: &ShareRemoveRequest) -> Result<(), Error> {
|
|
||||||
debug_assert!(self.core.meta.id == *message.session);
|
|
||||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
|
||||||
|
|
||||||
// awaiting this message from master node only
|
|
||||||
if sender != &self.core.meta.master_node_id {
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
|
|
||||||
// check state
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
if data.state == SessionState::ConsensusEstablishing && data.shares_to_remove.is_some() {
|
|
||||||
data.state = SessionState::WaitingForRemoveConfirmation;
|
|
||||||
} else if data.state != SessionState::WaitingForRemoveConfirmation {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
// only process if we are waiting for this request
|
|
||||||
{
|
|
||||||
let shares_to_remove = data.shares_to_remove.as_ref()
|
|
||||||
.expect("shares_to_remove is filled when consensus is established; we only process share move request after consensus is established; qed");
|
|
||||||
if !shares_to_remove.contains(&self.core.meta.self_node_id) {
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove share
|
|
||||||
Self::complete_session(&self.core, &mut *data)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When share is received from destination node.
|
|
||||||
pub fn on_share_remove_confirmation(&self, sender: &NodeId, message: &ShareRemoveConfirm) -> Result<(), Error> {
|
|
||||||
debug_assert!(self.core.meta.id == *message.session);
|
|
||||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
|
||||||
|
|
||||||
// check state
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
if data.state == SessionState::ConsensusEstablishing && data.shares_to_remove.is_some() {
|
|
||||||
data.state = SessionState::WaitingForRemoveConfirmation;
|
|
||||||
} else if data.state != SessionState::WaitingForRemoveConfirmation {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
// find share source
|
|
||||||
{
|
|
||||||
let remove_confirmations_to_receive = data.remove_confirmations_to_receive.as_mut()
|
|
||||||
.expect("remove_confirmations_to_receive is filled when consensus is established; we only process share move confirmations after consensus is established; qed");
|
|
||||||
if !remove_confirmations_to_receive.remove(sender) {
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
|
|
||||||
if !remove_confirmations_to_receive.is_empty() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Self::complete_session(&self.core, &mut *data)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When error has occured on another node.
|
|
||||||
pub fn on_session_error(&self, sender: &NodeId, message: &ShareRemoveError) -> Result<(), Error> {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
|
|
||||||
warn!("{}: share remove session failed with error: {} from {}", self.core.meta.self_node_id, message.error, sender);
|
|
||||||
|
|
||||||
data.state = SessionState::Finished;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Start sending ShareMove-specific messages, when consensus is established.
|
|
||||||
fn on_consensus_established(core: &SessionCore<T>, data: &mut SessionData<T>) -> Result<(), Error> {
|
|
||||||
// update state
|
|
||||||
data.state = SessionState::WaitingForRemoveConfirmation;
|
|
||||||
|
|
||||||
// send share remove requests to every required node
|
|
||||||
Self::disseminate_share_remove_requests(core, data)?;
|
|
||||||
|
|
||||||
{
|
|
||||||
let shares_to_remove = data.shares_to_remove.as_ref()
|
|
||||||
.expect("shares_to_remove is filled when consensus is established; on_consensus_established is called after consensus is established; qed");
|
|
||||||
let remove_confirmations_to_receive: BTreeSet<_> = shares_to_remove.iter()
|
|
||||||
.filter(|n| core.cluster_nodes_set.contains(n))
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
if !shares_to_remove.contains(&core.meta.self_node_id) && !remove_confirmations_to_receive.is_empty() {
|
|
||||||
// remember remove confirmations to receive
|
|
||||||
data.remove_confirmations_to_receive = Some(remove_confirmations_to_receive);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// complete session if share is lost
|
|
||||||
Self::complete_session(core, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Disseminate share remove requests.
|
|
||||||
fn disseminate_share_remove_requests(core: &SessionCore<T>, data: &mut SessionData<T>) -> Result<(), Error> {
|
|
||||||
let shares_to_remove = data.shares_to_remove.as_ref()
|
|
||||||
.expect("shares_to_remove is filled when consensus is established; disseminate_share_remove_requests is called after consensus is established; qed");
|
|
||||||
for node in shares_to_remove.iter().filter(|n| **n != core.meta.self_node_id && core.cluster_nodes_set.contains(n)) {
|
|
||||||
core.transport.send(node, ShareRemoveMessage::ShareRemoveRequest(ShareRemoveRequest {
|
|
||||||
session: core.meta.id.clone().into(),
|
|
||||||
session_nonce: core.nonce,
|
|
||||||
}))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Complete session on this node.
|
|
||||||
fn complete_session(core: &SessionCore<T>, data: &mut SessionData<T>) -> Result<(), Error> {
|
|
||||||
// update state
|
|
||||||
data.state = SessionState::Finished;
|
|
||||||
|
|
||||||
// if we are 'removing' node => remove share from storage
|
|
||||||
let shares_to_remove = data.shares_to_remove.as_ref()
|
|
||||||
.expect("shares_to_remove is filled when consensus is established; complete_session is called after consensus is established; qed");
|
|
||||||
if shares_to_remove.contains(&core.meta.self_node_id) {
|
|
||||||
// send confirmation to all other nodes
|
|
||||||
let new_nodes_set = core.key_share.id_numbers.keys().filter(|n| !shares_to_remove.contains(n)).collect::<Vec<_>>();
|
|
||||||
for node in new_nodes_set.into_iter().filter(|n| **n != core.meta.self_node_id && core.cluster_nodes_set.contains(n)) {
|
|
||||||
core.transport.send(&node, ShareRemoveMessage::ShareRemoveConfirm(ShareRemoveConfirm {
|
|
||||||
session: core.meta.id.clone().into(),
|
|
||||||
session_nonce: core.nonce,
|
|
||||||
}))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
return core.key_storage.remove(&core.meta.id)
|
|
||||||
.map_err(|e| Error::KeyStorage(e.into()));
|
|
||||||
}
|
|
||||||
|
|
||||||
// else we need to update key_share.id_numbers.keys()
|
|
||||||
let mut key_share = core.key_share.clone();
|
|
||||||
for share_to_remove in shares_to_remove {
|
|
||||||
key_share.id_numbers.remove(share_to_remove);
|
|
||||||
}
|
|
||||||
|
|
||||||
// ... and update key share in storage
|
|
||||||
core.key_storage.update(core.meta.id.clone(), key_share)
|
|
||||||
.map_err(|e| Error::KeyStorage(e.into()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Session for SessionImpl<T> where T: SessionTransport + Send + Sync + 'static {
|
|
||||||
fn wait(&self) -> Result<(), Error> {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
if !data.result.is_some() {
|
|
||||||
self.core.completed.wait(&mut data);
|
|
||||||
}
|
|
||||||
|
|
||||||
data.result.clone()
|
|
||||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> ClusterSession for SessionImpl<T> where T: SessionTransport {
|
|
||||||
fn is_finished(&self) -> bool {
|
|
||||||
self.data.lock().state == SessionState::Finished
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_session_timeout(&self) {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
|
|
||||||
warn!("{}: share remove session failed with timeout", self.core.meta.self_node_id);
|
|
||||||
|
|
||||||
data.state = SessionState::Finished;
|
|
||||||
data.result = Some(Err(Error::NodeDisconnected));
|
|
||||||
self.core.completed.notify_all();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_node_timeout(&self, node: &NodeId) {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
|
|
||||||
warn!("{}: share remove session failed because {} connection has timeouted", self.core.meta.self_node_id, node);
|
|
||||||
|
|
||||||
data.state = SessionState::Finished;
|
|
||||||
data.result = Some(Err(Error::NodeDisconnected));
|
|
||||||
self.core.completed.notify_all();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IsolatedSessionTransport {
|
|
||||||
pub fn new(session_id: SessionId, nonce: u64, cluster: Arc<Cluster>) -> Self {
|
|
||||||
IsolatedSessionTransport {
|
|
||||||
session: session_id,
|
|
||||||
nonce: nonce,
|
|
||||||
cluster: cluster,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl JobTransport for IsolatedSessionTransport {
|
|
||||||
type PartialJobRequest = ServersSetChangeAccessRequest;
|
|
||||||
type PartialJobResponse = bool;
|
|
||||||
|
|
||||||
fn send_partial_request(&self, node: &NodeId, request: ServersSetChangeAccessRequest) -> Result<(), Error> {
|
|
||||||
self.cluster.send(node, Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(ShareRemoveConsensusMessage {
|
|
||||||
session: self.session.clone().into(),
|
|
||||||
session_nonce: self.nonce,
|
|
||||||
message: ConsensusMessageWithServersSet::InitializeConsensusSession(InitializeConsensusSessionWithServersSet {
|
|
||||||
old_nodes_set: request.old_servers_set.into_iter().map(Into::into).collect(),
|
|
||||||
new_nodes_set: request.new_servers_set.into_iter().map(Into::into).collect(),
|
|
||||||
old_set_signature: request.old_set_signature.into(),
|
|
||||||
new_set_signature: request.new_set_signature.into(),
|
|
||||||
}),
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> {
|
|
||||||
self.cluster.send(node, Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(ShareRemoveConsensusMessage {
|
|
||||||
session: self.session.clone().into(),
|
|
||||||
session_nonce: self.nonce,
|
|
||||||
message: ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: response,
|
|
||||||
}),
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SessionTransport for IsolatedSessionTransport {
|
|
||||||
fn send(&self, node: &NodeId, message: ShareRemoveMessage) -> Result<(), Error> {
|
|
||||||
self.cluster.send(node, Message::ShareRemove(message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_shares_to_remove<T: SessionTransport>(core: &SessionCore<T>, shares_to_remove: &BTreeSet<NodeId>) -> Result<(), Error> {
|
|
||||||
// shares to remove must not be empty
|
|
||||||
if shares_to_remove.is_empty() {
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
|
|
||||||
// all shares_to_remove nodes must be old nodes of the session
|
|
||||||
if shares_to_remove.iter().any(|n| !core.key_share.id_numbers.contains_key(n)) {
|
|
||||||
return Err(Error::InvalidNodesConfiguration);
|
|
||||||
}
|
|
||||||
|
|
||||||
// do not allow removing more shares than possible
|
|
||||||
let nodes_left = core.key_share.id_numbers.len() - shares_to_remove.len();
|
|
||||||
if core.key_share.threshold + 1 > nodes_left {
|
|
||||||
return Err(Error::InvalidNodesConfiguration);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::{VecDeque, BTreeMap, BTreeSet};
|
|
||||||
use ethkey::{Random, Generator, Public, Signature, KeyPair, sign};
|
|
||||||
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage};
|
|
||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
|
||||||
use key_server_cluster::cluster::tests::DummyCluster;
|
|
||||||
use key_server_cluster::generation_session::tests::{Node as GenerationNode, generate_nodes_ids};
|
|
||||||
use key_server_cluster::math;
|
|
||||||
use key_server_cluster::message::Message;
|
|
||||||
use key_server_cluster::servers_set_change_session::tests::generate_key;
|
|
||||||
use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash;
|
|
||||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
|
||||||
use key_server_cluster::admin_sessions::share_add_session::tests::check_secret_is_preserved;
|
|
||||||
use super::{SessionImpl, SessionParams, IsolatedSessionTransport};
|
|
||||||
|
|
||||||
struct Node {
|
|
||||||
pub cluster: Arc<DummyCluster>,
|
|
||||||
pub key_storage: Arc<DummyKeyStorage>,
|
|
||||||
pub session: SessionImpl<IsolatedSessionTransport>,
|
|
||||||
}
|
|
||||||
|
|
||||||
struct MessageLoop {
|
|
||||||
pub admin_key_pair: KeyPair,
|
|
||||||
pub original_key_pair: KeyPair,
|
|
||||||
pub old_nodes_set: BTreeSet<NodeId>,
|
|
||||||
pub new_nodes_set: BTreeSet<NodeId>,
|
|
||||||
pub old_set_signature: Signature,
|
|
||||||
pub new_set_signature: Signature,
|
|
||||||
pub nodes: BTreeMap<NodeId, Node>,
|
|
||||||
pub queue: VecDeque<(NodeId, NodeId, Message)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_session(mut meta: ShareChangeSessionMeta, admin_public: Public, self_node_id: NodeId, cluster: Arc<DummyCluster>, key_storage: Arc<KeyStorage>, all_cluster_nodes: BTreeSet<NodeId>) -> SessionImpl<IsolatedSessionTransport> {
|
|
||||||
let session_id = meta.id.clone();
|
|
||||||
meta.self_node_id = self_node_id;
|
|
||||||
SessionImpl::new(SessionParams {
|
|
||||||
meta: meta.clone(),
|
|
||||||
transport: IsolatedSessionTransport::new(session_id, 1, cluster),
|
|
||||||
key_storage: key_storage,
|
|
||||||
admin_public: Some(admin_public),
|
|
||||||
cluster_nodes_set: all_cluster_nodes,
|
|
||||||
nonce: 1,
|
|
||||||
}).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_node(meta: ShareChangeSessionMeta, admin_public: Public, node: GenerationNode, all_nodes_set: BTreeSet<NodeId>) -> Node {
|
|
||||||
Node {
|
|
||||||
cluster: node.cluster.clone(),
|
|
||||||
key_storage: node.key_storage.clone(),
|
|
||||||
session: create_session(meta, admin_public, node.session.node().clone(), node.cluster, node.key_storage, all_nodes_set),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MessageLoop {
|
|
||||||
pub fn new(t: usize, master_node_id: NodeId, old_nodes_set: BTreeSet<NodeId>, shares_to_remove: BTreeSet<NodeId>) -> Self {
|
|
||||||
// generate admin key pair
|
|
||||||
let admin_key_pair = Random.generate().unwrap();
|
|
||||||
let admin_public = admin_key_pair.public().clone();
|
|
||||||
|
|
||||||
// run initial generation session
|
|
||||||
let gml = generate_key(t, old_nodes_set.clone());
|
|
||||||
let original_secret = math::compute_joint_secret(gml.nodes.values()
|
|
||||||
.map(|nd| nd.key_storage.get(&SessionId::default()).unwrap().polynom1[0].clone())
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.iter()).unwrap();
|
|
||||||
let original_key_pair = KeyPair::from_secret(original_secret).unwrap();
|
|
||||||
|
|
||||||
// prepare sessions on all nodes
|
|
||||||
let meta = ShareChangeSessionMeta {
|
|
||||||
id: SessionId::default(),
|
|
||||||
self_node_id: NodeId::default(),
|
|
||||||
master_node_id: master_node_id,
|
|
||||||
};
|
|
||||||
let new_nodes_set: BTreeSet<_> = old_nodes_set.iter()
|
|
||||||
.filter(|n| !shares_to_remove.contains(n))
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
let nodes = gml.nodes.into_iter().map(|gn| create_node(meta.clone(), admin_public.clone(), gn.1, old_nodes_set.clone()));
|
|
||||||
let nodes = nodes.map(|n| (n.session.core.meta.self_node_id.clone(), n)).collect();
|
|
||||||
|
|
||||||
let old_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&old_nodes_set)).unwrap();
|
|
||||||
let new_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap();
|
|
||||||
MessageLoop {
|
|
||||||
admin_key_pair: admin_key_pair,
|
|
||||||
original_key_pair: original_key_pair,
|
|
||||||
old_nodes_set: old_nodes_set.clone(),
|
|
||||||
new_nodes_set: new_nodes_set.clone(),
|
|
||||||
old_set_signature: old_set_signature,
|
|
||||||
new_set_signature: new_set_signature,
|
|
||||||
nodes: nodes,
|
|
||||||
queue: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn run(&mut self) {
|
|
||||||
while let Some((from, to, message)) = self.take_message() {
|
|
||||||
self.process_message((from, to, message)).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> {
|
|
||||||
self.nodes.values()
|
|
||||||
.filter_map(|n| n.cluster.take_message().map(|m| (n.session.core.meta.self_node_id.clone(), m.0, m.1)))
|
|
||||||
.nth(0)
|
|
||||||
.or_else(|| self.queue.pop_front())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> {
|
|
||||||
match { match msg.2 {
|
|
||||||
Message::ShareRemove(ref message) =>
|
|
||||||
self.nodes[&msg.1].session.process_message(&msg.0, message),
|
|
||||||
_ => unreachable!("only servers set change messages are expected"),
|
|
||||||
} } {
|
|
||||||
Ok(_) => Ok(()),
|
|
||||||
Err(Error::TooEarlyForRequest) => {
|
|
||||||
self.queue.push_back(msg);
|
|
||||||
Ok(())
|
|
||||||
},
|
|
||||||
Err(err) => Err(err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn remove_session_fails_if_no_nodes_are_removed() {
|
|
||||||
let (t, n) = (1, 3);
|
|
||||||
let old_nodes_set = generate_nodes_ids(n);
|
|
||||||
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
|
||||||
let nodes_to_remove = BTreeSet::new();
|
|
||||||
let ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
|
||||||
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
|
||||||
Some(ml.old_set_signature.clone()),
|
|
||||||
Some(ml.new_set_signature.clone())), Err(Error::InvalidMessage));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn remove_session_fails_if_foreign_nodes_are_removed() {
|
|
||||||
let (t, n) = (1, 3);
|
|
||||||
let old_nodes_set = generate_nodes_ids(n);
|
|
||||||
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
|
||||||
let nodes_to_remove: BTreeSet<_> = vec![math::generate_random_point().unwrap()].into_iter().collect();
|
|
||||||
let ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
|
||||||
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
|
||||||
Some(ml.old_set_signature.clone()),
|
|
||||||
Some(ml.new_set_signature.clone())), Err(Error::InvalidNodesConfiguration));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn remove_session_fails_if_too_many_nodes_are_removed() {
|
|
||||||
let (t, n) = (1, 3);
|
|
||||||
let old_nodes_set = generate_nodes_ids(n);
|
|
||||||
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
|
||||||
let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().take(2).collect();
|
|
||||||
let ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
|
||||||
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
|
||||||
Some(ml.old_set_signature.clone()),
|
|
||||||
Some(ml.new_set_signature.clone())), Err(Error::InvalidNodesConfiguration));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn nodes_removed_using_share_remove_from_master_node() {
|
|
||||||
let t = 1;
|
|
||||||
let test_cases = vec![(3, 1), (5, 3)];
|
|
||||||
for (n, nodes_to_remove) in test_cases {
|
|
||||||
// generate key && prepare ShareRemove sessions
|
|
||||||
let old_nodes_set = generate_nodes_ids(n);
|
|
||||||
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
|
||||||
let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().take(nodes_to_remove).collect();
|
|
||||||
let mut ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
|
||||||
|
|
||||||
// initialize session on master node && run to completion
|
|
||||||
ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
|
||||||
Some(ml.old_set_signature.clone()),
|
|
||||||
Some(ml.new_set_signature.clone())).unwrap();
|
|
||||||
ml.run();
|
|
||||||
|
|
||||||
// check that session has completed on all nodes
|
|
||||||
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
|
|
||||||
|
|
||||||
// check that secret is still the same as before adding the share
|
|
||||||
check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter()
|
|
||||||
.filter(|&(k, _)| !nodes_to_remove.contains(k))
|
|
||||||
.map(|(k, v)| (k.clone(), v.key_storage.clone()))
|
|
||||||
.collect());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn nodes_removed_using_share_remove_from_non_master_node() {
|
|
||||||
let t = 1;
|
|
||||||
let test_cases = vec![(3, 1), (5, 3)];
|
|
||||||
for (n, nodes_to_remove) in test_cases {
|
|
||||||
// generate key && prepare ShareRemove sessions
|
|
||||||
let old_nodes_set = generate_nodes_ids(n);
|
|
||||||
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
|
||||||
let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().skip(1).take(nodes_to_remove).collect();
|
|
||||||
let mut ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
|
||||||
|
|
||||||
// initialize session on master node && run to completion
|
|
||||||
ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
|
||||||
Some(ml.old_set_signature.clone()),
|
|
||||||
Some(ml.new_set_signature.clone())).unwrap();
|
|
||||||
ml.run();
|
|
||||||
|
|
||||||
// check that session has completed on all nodes
|
|
||||||
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
|
|
||||||
|
|
||||||
// check that secret is still the same as before adding the share
|
|
||||||
check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter()
|
|
||||||
.filter(|&(k, _)| !nodes_to_remove.contains(k))
|
|
||||||
.map(|(k, v)| (k.clone(), v.key_storage.clone()))
|
|
||||||
.collect());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn nodes_are_removed_even_if_some_other_nodes_are_isolated_from_cluster() {
|
|
||||||
let t = 1;
|
|
||||||
let (n, nodes_to_remove, nodes_to_isolate) = (5, 1, 2);
|
|
||||||
|
|
||||||
// generate key && prepare ShareRemove sessions
|
|
||||||
let old_nodes_set = generate_nodes_ids(n);
|
|
||||||
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
|
||||||
let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().skip(1).take(nodes_to_remove).collect();
|
|
||||||
let nodes_to_isolate: BTreeSet<_> = old_nodes_set.iter().cloned().skip(1 + nodes_to_remove.len()).take(nodes_to_isolate).collect();
|
|
||||||
let mut ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
|
||||||
|
|
||||||
// simulate node failure - isolate nodes (it is removed from cluster completely, but it is still a part of session)
|
|
||||||
for node_to_isolate in &nodes_to_isolate {
|
|
||||||
ml.nodes.remove(node_to_isolate);
|
|
||||||
}
|
|
||||||
for node in ml.nodes.values_mut() {
|
|
||||||
for node_to_isolate in &nodes_to_isolate {
|
|
||||||
node.session.core.cluster_nodes_set.remove(node_to_isolate);
|
|
||||||
node.cluster.remove_node(node_to_isolate);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// initialize session on master node && run to completion
|
|
||||||
ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
|
||||||
Some(ml.old_set_signature.clone()),
|
|
||||||
Some(ml.new_set_signature.clone())).unwrap();
|
|
||||||
ml.run();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn nodes_are_removed_even_if_isolated_from_cluster() {
|
|
||||||
let t = 1;
|
|
||||||
let (n, nodes_to_isolate_and_remove) = (5, 3);
|
|
||||||
|
|
||||||
// generate key && prepare ShareRemove sessions
|
|
||||||
let old_nodes_set = generate_nodes_ids(n);
|
|
||||||
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
|
||||||
let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().skip(1).take(nodes_to_isolate_and_remove).collect();
|
|
||||||
let mut ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
|
||||||
|
|
||||||
// simulate node failure - isolate nodes (it is removed from cluster completely, but it is still a part of session)
|
|
||||||
for node_to_isolate in &nodes_to_remove {
|
|
||||||
ml.nodes.remove(node_to_isolate);
|
|
||||||
}
|
|
||||||
for node in ml.nodes.values_mut() {
|
|
||||||
for node_to_isolate in &nodes_to_remove {
|
|
||||||
node.session.core.cluster_nodes_set.remove(node_to_isolate);
|
|
||||||
node.cluster.remove_node(node_to_isolate);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// initialize session on master node && run to completion
|
|
||||||
ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
|
||||||
Some(ml.old_set_signature.clone()),
|
|
||||||
Some(ml.new_set_signature.clone())).unwrap();
|
|
||||||
ml.run();
|
|
||||||
}
|
|
||||||
}
|
|
@ -14,16 +14,16 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::cmp::{Ord, PartialOrd, Ordering};
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use parking_lot::{Mutex, Condvar};
|
use parking_lot::{Mutex, Condvar};
|
||||||
|
use bigint::hash::H256;
|
||||||
use ethkey::{Secret, Signature};
|
use ethkey::{Secret, Signature};
|
||||||
use key_server_cluster::{Error, AclStorage, DocumentKeyShare, NodeId, SessionId, EncryptedDocumentKeyShadow, SessionMeta};
|
use key_server_cluster::{Error, AclStorage, DocumentKeyShare, NodeId, SessionId, EncryptedDocumentKeyShadow, SessionMeta};
|
||||||
use key_server_cluster::cluster::Cluster;
|
use key_server_cluster::cluster::Cluster;
|
||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession};
|
||||||
use key_server_cluster::message::{Message, DecryptionMessage, DecryptionConsensusMessage, RequestPartialDecryption,
|
use key_server_cluster::message::{Message, DecryptionMessage, DecryptionConsensusMessage, RequestPartialDecryption,
|
||||||
PartialDecryption, DecryptionSessionError, DecryptionSessionCompleted, ConsensusMessage, InitializeConsensusSession,
|
PartialDecryption, DecryptionSessionError, DecryptionSessionCompleted, ConsensusMessage, InitializeConsensusSession,
|
||||||
ConfirmConsensusInitialization};
|
ConfirmConsensusInitialization, DecryptionSessionDelegation, DecryptionSessionDelegationCompleted};
|
||||||
use key_server_cluster::jobs::job_session::JobTransport;
|
use key_server_cluster::jobs::job_session::JobTransport;
|
||||||
use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
||||||
use key_server_cluster::jobs::decryption_job::{PartialDecryptionRequest, PartialDecryptionResponse, DecryptionJob};
|
use key_server_cluster::jobs::decryption_job::{PartialDecryptionRequest, PartialDecryptionResponse, DecryptionJob};
|
||||||
@ -57,7 +57,7 @@ struct SessionCore {
|
|||||||
/// Decryption session access key.
|
/// Decryption session access key.
|
||||||
pub access_key: Secret,
|
pub access_key: Secret,
|
||||||
/// Key share.
|
/// Key share.
|
||||||
pub key_share: DocumentKeyShare,
|
pub key_share: Option<DocumentKeyShare>,
|
||||||
/// Cluster which allows this node to send messages to other nodes in the cluster.
|
/// Cluster which allows this node to send messages to other nodes in the cluster.
|
||||||
pub cluster: Arc<Cluster>,
|
pub cluster: Arc<Cluster>,
|
||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
@ -71,23 +71,18 @@ type DecryptionConsensusSession = ConsensusSession<KeyAccessJob, DecryptionConse
|
|||||||
|
|
||||||
/// Mutable session data.
|
/// Mutable session data.
|
||||||
struct SessionData {
|
struct SessionData {
|
||||||
|
/// Key version to use for decryption.
|
||||||
|
pub version: Option<H256>,
|
||||||
/// Consensus-based decryption session.
|
/// Consensus-based decryption session.
|
||||||
pub consensus_session: DecryptionConsensusSession,
|
pub consensus_session: DecryptionConsensusSession,
|
||||||
/// Is shadow decryption requested?
|
/// Is shadow decryption requested?
|
||||||
pub is_shadow_decryption: Option<bool>,
|
pub is_shadow_decryption: Option<bool>,
|
||||||
|
/// Delegation status.
|
||||||
|
pub delegation_status: Option<DelegationStatus>,
|
||||||
/// Decryption result.
|
/// Decryption result.
|
||||||
pub result: Option<Result<EncryptedDocumentKeyShadow, Error>>,
|
pub result: Option<Result<EncryptedDocumentKeyShadow, Error>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Decryption session Id.
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
||||||
pub struct DecryptionSessionId {
|
|
||||||
/// Encryption session id.
|
|
||||||
pub id: SessionId,
|
|
||||||
/// Decryption session access key.
|
|
||||||
pub access_key: Secret,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// SessionImpl creation parameters
|
/// SessionImpl creation parameters
|
||||||
pub struct SessionParams {
|
pub struct SessionParams {
|
||||||
/// Session metadata.
|
/// Session metadata.
|
||||||
@ -95,7 +90,7 @@ pub struct SessionParams {
|
|||||||
/// Session access key.
|
/// Session access key.
|
||||||
pub access_key: Secret,
|
pub access_key: Secret,
|
||||||
/// Key share.
|
/// Key share.
|
||||||
pub key_share: DocumentKeyShare,
|
pub key_share: Option<DocumentKeyShare>,
|
||||||
/// ACL storage.
|
/// ACL storage.
|
||||||
pub acl_storage: Arc<AclStorage>,
|
pub acl_storage: Arc<AclStorage>,
|
||||||
/// Cluster.
|
/// Cluster.
|
||||||
@ -112,6 +107,8 @@ struct DecryptionConsensusTransport {
|
|||||||
access_key: Secret,
|
access_key: Secret,
|
||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
nonce: u64,
|
nonce: u64,
|
||||||
|
/// Selected key version (on master node).
|
||||||
|
version: Option<H256>,
|
||||||
/// Cluster.
|
/// Cluster.
|
||||||
cluster: Arc<Cluster>,
|
cluster: Arc<Cluster>,
|
||||||
}
|
}
|
||||||
@ -128,28 +125,32 @@ struct DecryptionJobTransport {
|
|||||||
cluster: Arc<Cluster>,
|
cluster: Arc<Cluster>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Session delegation status.
|
||||||
|
enum DelegationStatus {
|
||||||
|
/// Delegated to other node.
|
||||||
|
DelegatedTo(NodeId),
|
||||||
|
/// Delegated from other node.
|
||||||
|
DelegatedFrom(NodeId, u64),
|
||||||
|
}
|
||||||
|
|
||||||
impl SessionImpl {
|
impl SessionImpl {
|
||||||
/// Create new decryption session.
|
/// Create new decryption session.
|
||||||
pub fn new(params: SessionParams, requester_signature: Option<Signature>) -> Result<Self, Error> {
|
pub fn new(params: SessionParams, requester_signature: Option<Signature>) -> Result<Self, Error> {
|
||||||
debug_assert_eq!(params.meta.threshold, params.key_share.threshold);
|
debug_assert_eq!(params.meta.threshold, params.key_share.as_ref().map(|ks| ks.threshold).unwrap_or_default());
|
||||||
debug_assert_eq!(params.meta.self_node_id == params.meta.master_node_id, requester_signature.is_some());
|
|
||||||
|
|
||||||
use key_server_cluster::generation_session::{check_cluster_nodes, check_threshold};
|
|
||||||
|
|
||||||
// check that common_point and encrypted_point are already set
|
// check that common_point and encrypted_point are already set
|
||||||
if params.key_share.common_point.is_none() || params.key_share.encrypted_point.is_none() {
|
if let Some(key_share) = params.key_share.as_ref() {
|
||||||
return Err(Error::NotStartedSessionId);
|
// encrypted data must be set
|
||||||
|
if key_share.common_point.is_none() || key_share.encrypted_point.is_none() {
|
||||||
|
return Err(Error::NotStartedSessionId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// check nodes and threshold
|
|
||||||
let nodes = params.key_share.id_numbers.keys().cloned().collect();
|
|
||||||
check_cluster_nodes(¶ms.meta.self_node_id, &nodes)?;
|
|
||||||
check_threshold(params.key_share.threshold, &nodes)?;
|
|
||||||
|
|
||||||
let consensus_transport = DecryptionConsensusTransport {
|
let consensus_transport = DecryptionConsensusTransport {
|
||||||
id: params.meta.id.clone(),
|
id: params.meta.id.clone(),
|
||||||
access_key: params.access_key.clone(),
|
access_key: params.access_key.clone(),
|
||||||
nonce: params.nonce,
|
nonce: params.nonce,
|
||||||
|
version: None,
|
||||||
cluster: params.cluster.clone(),
|
cluster: params.cluster.clone(),
|
||||||
};
|
};
|
||||||
let consensus_session = ConsensusSession::new(ConsensusSessionParams {
|
let consensus_session = ConsensusSession::new(ConsensusSessionParams {
|
||||||
@ -171,8 +172,10 @@ impl SessionImpl {
|
|||||||
completed: Condvar::new(),
|
completed: Condvar::new(),
|
||||||
},
|
},
|
||||||
data: Mutex::new(SessionData {
|
data: Mutex::new(SessionData {
|
||||||
|
version: None,
|
||||||
consensus_session: consensus_session,
|
consensus_session: consensus_session,
|
||||||
is_shadow_decryption: None,
|
is_shadow_decryption: None,
|
||||||
|
delegation_status: None,
|
||||||
result: None,
|
result: None,
|
||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
@ -202,18 +205,59 @@ impl SessionImpl {
|
|||||||
self.data.lock().result.clone()
|
self.data.lock().result.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Initialize decryption session on master node.
|
/// Delegate session to other node.
|
||||||
pub fn initialize(&self, is_shadow_decryption: bool) -> Result<(), Error> {
|
pub fn delegate(&self, master: NodeId, version: H256, is_shadow_decryption: bool) -> Result<(), Error> {
|
||||||
|
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
|
if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization || data.delegation_status.is_some() {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(false);
|
||||||
|
self.core.cluster.send(&master, Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(DecryptionSessionDelegation {
|
||||||
|
session: self.core.meta.id.clone().into(),
|
||||||
|
sub_session: self.core.access_key.clone().into(),
|
||||||
|
session_nonce: self.core.nonce,
|
||||||
|
requestor_signature: data.consensus_session.consensus_job().executor().requester_signature()
|
||||||
|
.expect("signature is passed to master node on creation; session can be delegated from master node only; qed")
|
||||||
|
.clone().into(),
|
||||||
|
version: version.into(),
|
||||||
|
is_shadow_decryption: is_shadow_decryption,
|
||||||
|
})))?;
|
||||||
|
data.delegation_status = Some(DelegationStatus::DelegatedTo(master));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize decryption session on master node.
|
||||||
|
pub fn initialize(&self, version: H256, is_shadow_decryption: bool) -> Result<(), Error> {
|
||||||
|
debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id);
|
||||||
|
|
||||||
|
// check if version exists
|
||||||
|
let key_version = match self.core.key_share.as_ref() {
|
||||||
|
None => return Err(Error::InvalidMessage),
|
||||||
|
Some(key_share) => key_share.version(&version).map_err(|e| Error::KeyStorage(e.into()))?,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
let non_isolated_nodes = self.core.cluster.nodes();
|
||||||
|
data.consensus_session.consensus_job_mut().transport_mut().version = Some(version.clone());
|
||||||
|
data.version = Some(version.clone());
|
||||||
data.is_shadow_decryption = Some(is_shadow_decryption);
|
data.is_shadow_decryption = Some(is_shadow_decryption);
|
||||||
data.consensus_session.initialize(self.core.key_share.id_numbers.keys().cloned().collect())?;
|
data.consensus_session.initialize(key_version.id_numbers.keys()
|
||||||
|
.filter(|n| non_isolated_nodes.contains(*n))
|
||||||
|
.cloned()
|
||||||
|
.chain(::std::iter::once(self.core.meta.self_node_id.clone()))
|
||||||
|
.collect())?;
|
||||||
|
|
||||||
if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished {
|
if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished {
|
||||||
self.core.disseminate_jobs(&mut data.consensus_session, is_shadow_decryption)?;
|
self.core.disseminate_jobs(&mut data.consensus_session, &version, is_shadow_decryption)?;
|
||||||
|
|
||||||
debug_assert!(data.consensus_session.state() == ConsensusSessionState::Finished);
|
debug_assert!(data.consensus_session.state() == ConsensusSessionState::Finished);
|
||||||
data.result = Some(Ok(data.consensus_session.result()?));
|
let result = data.consensus_session.result()?;
|
||||||
self.core.completed.notify_all();
|
Self::set_decryption_result(&self.core, &mut *data, Ok(result));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -233,12 +277,58 @@ impl SessionImpl {
|
|||||||
&DecryptionMessage::PartialDecryption(ref message) =>
|
&DecryptionMessage::PartialDecryption(ref message) =>
|
||||||
self.on_partial_decryption(sender, message),
|
self.on_partial_decryption(sender, message),
|
||||||
&DecryptionMessage::DecryptionSessionError(ref message) =>
|
&DecryptionMessage::DecryptionSessionError(ref message) =>
|
||||||
self.on_session_error(sender, message),
|
self.process_node_error(Some(&sender), Error::Io(message.error.clone())),
|
||||||
&DecryptionMessage::DecryptionSessionCompleted(ref message) =>
|
&DecryptionMessage::DecryptionSessionCompleted(ref message) =>
|
||||||
self.on_session_completed(sender, message),
|
self.on_session_completed(sender, message),
|
||||||
|
&DecryptionMessage::DecryptionSessionDelegation(ref message) =>
|
||||||
|
self.on_session_delegated(sender, message),
|
||||||
|
&DecryptionMessage::DecryptionSessionDelegationCompleted(ref message) =>
|
||||||
|
self.on_session_delegation_completed(sender, message),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// When session is delegated to this node.
|
||||||
|
pub fn on_session_delegated(&self, sender: &NodeId, message: &DecryptionSessionDelegation) -> Result<(), Error> {
|
||||||
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
|
debug_assert!(self.core.access_key == *message.sub_session);
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization || data.delegation_status.is_some() {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
data.consensus_session.consensus_job_mut().executor_mut().set_requester_signature(message.requestor_signature.clone().into());
|
||||||
|
data.delegation_status = Some(DelegationStatus::DelegatedFrom(sender.clone(), message.session_nonce));
|
||||||
|
}
|
||||||
|
|
||||||
|
self.initialize(message.version.clone().into(), message.is_shadow_decryption)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When delegated session is completed on other node.
|
||||||
|
pub fn on_session_delegation_completed(&self, sender: &NodeId, message: &DecryptionSessionDelegationCompleted) -> Result<(), Error> {
|
||||||
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
|
debug_assert!(self.core.access_key == *message.sub_session);
|
||||||
|
|
||||||
|
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
match data.delegation_status.as_ref() {
|
||||||
|
Some(&DelegationStatus::DelegatedTo(ref node)) if node == sender => (),
|
||||||
|
_ => return Err(Error::InvalidMessage),
|
||||||
|
}
|
||||||
|
|
||||||
|
Self::set_decryption_result(&self.core, &mut *data, Ok(EncryptedDocumentKeyShadow {
|
||||||
|
decrypted_secret: message.decrypted_secret.clone().into(),
|
||||||
|
common_point: message.common_point.clone().map(Into::into),
|
||||||
|
decrypt_shadows: message.decrypt_shadows.clone().map(Into::into),
|
||||||
|
}));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// When consensus-related message is received.
|
/// When consensus-related message is received.
|
||||||
pub fn on_consensus_message(&self, sender: &NodeId, message: &DecryptionConsensusMessage) -> Result<(), Error> {
|
pub fn on_consensus_message(&self, sender: &NodeId, message: &DecryptionConsensusMessage) -> Result<(), Error> {
|
||||||
debug_assert!(self.core.meta.id == *message.session);
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
@ -246,6 +336,14 @@ impl SessionImpl {
|
|||||||
|
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
let is_establishing_consensus = data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus;
|
let is_establishing_consensus = data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus;
|
||||||
|
if let &ConsensusMessage::InitializeConsensusSession(ref msg) = &message.message {
|
||||||
|
let version = msg.version.clone().into();
|
||||||
|
let has_key_share = self.core.key_share.as_ref()
|
||||||
|
.map(|ks| ks.version(&version).is_ok())
|
||||||
|
.unwrap_or(false);
|
||||||
|
data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(has_key_share);
|
||||||
|
data.version = Some(version);
|
||||||
|
}
|
||||||
data.consensus_session.on_consensus_message(&sender, &message.message)?;
|
data.consensus_session.on_consensus_message(&sender, &message.message)?;
|
||||||
|
|
||||||
let is_consensus_established = data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished;
|
let is_consensus_established = data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished;
|
||||||
@ -253,9 +351,10 @@ impl SessionImpl {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone();
|
||||||
let is_shadow_decryption = data.is_shadow_decryption
|
let is_shadow_decryption = data.is_shadow_decryption
|
||||||
.expect("we are on master node; on master node is_shadow_decryption is filled in initialize(); on_consensus_message follows initialize (state check in consensus_session); qed");
|
.expect("we are on master node; on master node is_shadow_decryption is filled in initialize(); on_consensus_message follows initialize (state check in consensus_session); qed");
|
||||||
self.core.disseminate_jobs(&mut data.consensus_session, is_shadow_decryption)
|
self.core.disseminate_jobs(&mut data.consensus_session, &version, is_shadow_decryption)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When partial decryption is requested.
|
/// When partial decryption is requested.
|
||||||
@ -264,9 +363,16 @@ impl SessionImpl {
|
|||||||
debug_assert!(self.core.access_key == *message.sub_session);
|
debug_assert!(self.core.access_key == *message.sub_session);
|
||||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
let key_share = match self.core.key_share.as_ref() {
|
||||||
|
None => return Err(Error::InvalidMessage),
|
||||||
|
Some(key_share) => key_share,
|
||||||
|
};
|
||||||
|
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
|
let key_version = key_share.version(data.version.as_ref().ok_or(Error::InvalidMessage)?)
|
||||||
|
.map_err(|e| Error::KeyStorage(e.into()))?.hash.clone();
|
||||||
let requester = data.consensus_session.consensus_job().executor().requester()?.ok_or(Error::InvalidStateForRequest)?.clone();
|
let requester = data.consensus_session.consensus_job().executor().requester()?.ok_or(Error::InvalidStateForRequest)?.clone();
|
||||||
let decryption_job = DecryptionJob::new_on_slave(self.core.meta.self_node_id.clone(), self.core.access_key.clone(), requester, self.core.key_share.clone())?;
|
let decryption_job = DecryptionJob::new_on_slave(self.core.meta.self_node_id.clone(), self.core.access_key.clone(), requester, key_share.clone(), key_version)?;
|
||||||
let decryption_transport = self.core.decryption_transport();
|
let decryption_transport = self.core.decryption_transport();
|
||||||
|
|
||||||
data.consensus_session.on_job_request(&sender, PartialDecryptionRequest {
|
data.consensus_session.on_job_request(&sender, PartialDecryptionRequest {
|
||||||
@ -302,8 +408,8 @@ impl SessionImpl {
|
|||||||
})))?;
|
})))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
data.result = Some(Ok(data.consensus_session.result()?));
|
let result = data.consensus_session.result()?;
|
||||||
self.core.completed.notify_all();
|
Self::set_decryption_result(&self.core, &mut *data, Ok(result));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -317,14 +423,16 @@ impl SessionImpl {
|
|||||||
self.data.lock().consensus_session.on_session_completed(sender)
|
self.data.lock().consensus_session.on_session_completed(sender)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When error has occured on another node.
|
|
||||||
pub fn on_session_error(&self, sender: &NodeId, message: &DecryptionSessionError) -> Result<(), Error> {
|
|
||||||
self.process_node_error(Some(&sender), &message.error)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process error from the other node.
|
/// Process error from the other node.
|
||||||
fn process_node_error(&self, node: Option<&NodeId>, error: &String) -> Result<(), Error> {
|
fn process_node_error(&self, node: Option<&NodeId>, error: Error) -> Result<(), Error> {
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
|
let is_self_node_error = node.map(|n| n == &self.core.meta.self_node_id).unwrap_or(false);
|
||||||
|
// error is always fatal if coming from this node
|
||||||
|
if is_self_node_error {
|
||||||
|
Self::set_decryption_result(&self.core, &mut *data, Err(error.clone()));
|
||||||
|
return Err(error);
|
||||||
|
}
|
||||||
|
|
||||||
match {
|
match {
|
||||||
match node {
|
match node {
|
||||||
Some(node) => data.consensus_session.on_node_error(node),
|
Some(node) => data.consensus_session.on_node_error(node),
|
||||||
@ -333,15 +441,15 @@ impl SessionImpl {
|
|||||||
} {
|
} {
|
||||||
Ok(false) => Ok(()),
|
Ok(false) => Ok(()),
|
||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
|
let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone();
|
||||||
let is_shadow_decryption = data.is_shadow_decryption.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when is_shadow_decryption.is_some(); qed");
|
let is_shadow_decryption = data.is_shadow_decryption.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when is_shadow_decryption.is_some(); qed");
|
||||||
let disseminate_result = self.core.disseminate_jobs(&mut data.consensus_session, is_shadow_decryption);
|
let disseminate_result = self.core.disseminate_jobs(&mut data.consensus_session, &version, is_shadow_decryption);
|
||||||
match disseminate_result {
|
match disseminate_result {
|
||||||
Ok(()) => Ok(()),
|
Ok(()) => Ok(()),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!("{}: decryption session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node);
|
warn!("{}: decryption session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node);
|
||||||
|
|
||||||
data.result = Some(Err(err.clone()));
|
Self::set_decryption_result(&self.core, &mut *data, Err(err.clone()));
|
||||||
self.core.completed.notify_all();
|
|
||||||
Err(err)
|
Err(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -349,29 +457,92 @@ impl SessionImpl {
|
|||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!("{}: decryption session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node);
|
warn!("{}: decryption session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node);
|
||||||
|
|
||||||
data.result = Some(Err(err.clone()));
|
Self::set_decryption_result(&self.core, &mut *data, Err(err.clone()));
|
||||||
self.core.completed.notify_all();
|
|
||||||
Err(err)
|
Err(err)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set decryption result.
|
||||||
|
fn set_decryption_result(core: &SessionCore, data: &mut SessionData, result: Result<EncryptedDocumentKeyShadow, Error>) {
|
||||||
|
if let Some(DelegationStatus::DelegatedFrom(master, nonce)) = data.delegation_status.take() {
|
||||||
|
// error means can't communicate => ignore it
|
||||||
|
let _ = match result.as_ref() {
|
||||||
|
Ok(document_key) => core.cluster.send(&master, Message::Decryption(DecryptionMessage::DecryptionSessionDelegationCompleted(DecryptionSessionDelegationCompleted {
|
||||||
|
session: core.meta.id.clone().into(),
|
||||||
|
sub_session: core.access_key.clone().into(),
|
||||||
|
session_nonce: nonce,
|
||||||
|
decrypted_secret: document_key.decrypted_secret.clone().into(),
|
||||||
|
common_point: document_key.common_point.clone().map(Into::into),
|
||||||
|
decrypt_shadows: document_key.decrypt_shadows.clone(),
|
||||||
|
}))),
|
||||||
|
Err(error) => core.cluster.send(&master, Message::Decryption(DecryptionMessage::DecryptionSessionError(DecryptionSessionError {
|
||||||
|
session: core.meta.id.clone().into(),
|
||||||
|
sub_session: core.access_key.clone().into(),
|
||||||
|
session_nonce: nonce,
|
||||||
|
error: error.clone().into(),
|
||||||
|
}))),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
data.result = Some(result);
|
||||||
|
core.completed.notify_all();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ClusterSession for SessionImpl {
|
impl ClusterSession for SessionImpl {
|
||||||
|
type Id = SessionIdWithSubSession;
|
||||||
|
|
||||||
|
fn type_name() -> &'static str {
|
||||||
|
"decryption"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn id(&self) -> SessionIdWithSubSession {
|
||||||
|
SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.access_key.clone())
|
||||||
|
}
|
||||||
|
|
||||||
fn is_finished(&self) -> bool {
|
fn is_finished(&self) -> bool {
|
||||||
let data = self.data.lock();
|
self.data.lock().result.is_some()
|
||||||
data.consensus_session.state() == ConsensusSessionState::Failed
|
|
||||||
|| data.consensus_session.state() == ConsensusSessionState::Finished
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_node_timeout(&self, node: &NodeId) {
|
fn on_node_timeout(&self, node: &NodeId) {
|
||||||
// ignore error, only state matters
|
// ignore error, only state matters
|
||||||
let _ = self.process_node_error(Some(node), &Error::NodeDisconnected.into());
|
let _ = self.process_node_error(Some(node), Error::NodeDisconnected);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_session_timeout(&self) {
|
fn on_session_timeout(&self) {
|
||||||
// ignore error, only state matters
|
// ignore error, only state matters
|
||||||
let _ = self.process_node_error(None, &Error::NodeDisconnected.into());
|
let _ = self.process_node_error(None, Error::NodeDisconnected);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_session_error(&self, node: &NodeId, error: Error) {
|
||||||
|
let is_fatal = self.process_node_error(Some(node), error.clone()).is_err();
|
||||||
|
let is_this_node_error = *node == self.core.meta.self_node_id;
|
||||||
|
if is_fatal || is_this_node_error {
|
||||||
|
// error in signing session is non-fatal, if occurs on slave node
|
||||||
|
// => either respond with error
|
||||||
|
// => or broadcast error
|
||||||
|
let message = Message::Decryption(DecryptionMessage::DecryptionSessionError(DecryptionSessionError {
|
||||||
|
session: self.core.meta.id.clone().into(),
|
||||||
|
sub_session: self.core.access_key.clone().into(),
|
||||||
|
session_nonce: self.core.nonce,
|
||||||
|
error: error.clone().into(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
// do not bother processing send error, as we already processing error
|
||||||
|
let _ = if self.core.meta.master_node_id == self.core.meta.self_node_id {
|
||||||
|
self.core.cluster.broadcast(message)
|
||||||
|
} else {
|
||||||
|
self.core.cluster.send(&self.core.meta.master_node_id, message)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
||||||
|
match *message {
|
||||||
|
Message::Decryption(ref message) => self.process_message(sender, message),
|
||||||
|
_ => unreachable!("cluster checks message to be correct before passing; qed"),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -398,9 +569,15 @@ impl SessionCore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn disseminate_jobs(&self, consensus_session: &mut DecryptionConsensusSession, is_shadow_decryption: bool) -> Result<(), Error> {
|
pub fn disseminate_jobs(&self, consensus_session: &mut DecryptionConsensusSession, version: &H256, is_shadow_decryption: bool) -> Result<(), Error> {
|
||||||
|
let key_share = match self.key_share.as_ref() {
|
||||||
|
None => return Err(Error::InvalidMessage),
|
||||||
|
Some(key_share) => key_share,
|
||||||
|
};
|
||||||
|
|
||||||
|
let key_version = key_share.version(version).map_err(|e| Error::KeyStorage(e.into()))?.hash.clone();
|
||||||
let requester = consensus_session.consensus_job().executor().requester()?.ok_or(Error::InvalidStateForRequest)?.clone();
|
let requester = consensus_session.consensus_job().executor().requester()?.ok_or(Error::InvalidStateForRequest)?.clone();
|
||||||
let decryption_job = DecryptionJob::new_on_master(self.meta.self_node_id.clone(), self.access_key.clone(), requester, self.key_share.clone(), is_shadow_decryption)?;
|
let decryption_job = DecryptionJob::new_on_master(self.meta.self_node_id.clone(), self.access_key.clone(), requester, key_share.clone(), key_version, is_shadow_decryption)?;
|
||||||
consensus_session.disseminate_jobs(decryption_job, self.decryption_transport())
|
consensus_session.disseminate_jobs(decryption_job, self.decryption_transport())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -410,12 +587,15 @@ impl JobTransport for DecryptionConsensusTransport {
|
|||||||
type PartialJobResponse=bool;
|
type PartialJobResponse=bool;
|
||||||
|
|
||||||
fn send_partial_request(&self, node: &NodeId, request: Signature) -> Result<(), Error> {
|
fn send_partial_request(&self, node: &NodeId, request: Signature) -> Result<(), Error> {
|
||||||
|
let version = self.version.as_ref()
|
||||||
|
.expect("send_partial_request is called on initialized master node only; version is filled in before initialization starts on master node; qed");
|
||||||
self.cluster.send(node, Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(DecryptionConsensusMessage {
|
self.cluster.send(node, Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(DecryptionConsensusMessage {
|
||||||
session: self.id.clone().into(),
|
session: self.id.clone().into(),
|
||||||
sub_session: self.access_key.clone().into(),
|
sub_session: self.access_key.clone().into(),
|
||||||
session_nonce: self.nonce,
|
session_nonce: self.nonce,
|
||||||
message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
||||||
requestor_signature: request.into(),
|
requestor_signature: request.into(),
|
||||||
|
version: version.clone().into(),
|
||||||
})
|
})
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
@ -459,38 +639,13 @@ impl JobTransport for DecryptionJobTransport {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DecryptionSessionId {
|
|
||||||
/// Create new decryption session Id.
|
|
||||||
pub fn new(session_id: SessionId, sub_session_id: Secret) -> Self {
|
|
||||||
DecryptionSessionId {
|
|
||||||
id: session_id,
|
|
||||||
access_key: sub_session_id,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialOrd for DecryptionSessionId {
|
|
||||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
|
||||||
Some(self.cmp(other))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Ord for DecryptionSessionId {
|
|
||||||
fn cmp(&self, other: &Self) -> Ordering {
|
|
||||||
match self.id.cmp(&other.id) {
|
|
||||||
Ordering::Equal => self.access_key.cmp(&other.access_key),
|
|
||||||
r @ _ => r,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use acl_storage::DummyAclStorage;
|
use acl_storage::DummyAclStorage;
|
||||||
use ethkey::{self, KeyPair, Random, Generator, Public, Secret};
|
use ethkey::{self, KeyPair, Random, Generator, Public, Secret};
|
||||||
use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, Error, EncryptedDocumentKeyShadow, SessionMeta};
|
use key_server_cluster::{NodeId, DocumentKeyShare, DocumentKeyShareVersion, SessionId, Error, EncryptedDocumentKeyShadow, SessionMeta};
|
||||||
use key_server_cluster::cluster::tests::DummyCluster;
|
use key_server_cluster::cluster::tests::DummyCluster;
|
||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||||
use key_server_cluster::decryption_session::{SessionImpl, SessionParams};
|
use key_server_cluster::decryption_session::{SessionImpl, SessionParams};
|
||||||
@ -528,11 +683,13 @@ mod tests {
|
|||||||
let encrypted_datas: Vec<_> = (0..5).map(|i| DocumentKeyShare {
|
let encrypted_datas: Vec<_> = (0..5).map(|i| DocumentKeyShare {
|
||||||
author: Public::default(),
|
author: Public::default(),
|
||||||
threshold: 3,
|
threshold: 3,
|
||||||
id_numbers: id_numbers.clone().into_iter().collect(),
|
|
||||||
secret_share: secret_shares[i].clone(),
|
|
||||||
polynom1: Vec::new(),
|
|
||||||
common_point: Some(common_point.clone()),
|
common_point: Some(common_point.clone()),
|
||||||
encrypted_point: Some(encrypted_point.clone()),
|
encrypted_point: Some(encrypted_point.clone()),
|
||||||
|
versions: vec![DocumentKeyShareVersion {
|
||||||
|
hash: Default::default(),
|
||||||
|
id_numbers: id_numbers.clone().into_iter().collect(),
|
||||||
|
secret_share: secret_shares[i].clone(),
|
||||||
|
}],
|
||||||
}).collect();
|
}).collect();
|
||||||
let acl_storages: Vec<_> = (0..5).map(|_| Arc::new(DummyAclStorage::default())).collect();
|
let acl_storages: Vec<_> = (0..5).map(|_| Arc::new(DummyAclStorage::default())).collect();
|
||||||
let clusters: Vec<_> = (0..5).map(|i| {
|
let clusters: Vec<_> = (0..5).map(|i| {
|
||||||
@ -552,7 +709,7 @@ mod tests {
|
|||||||
threshold: encrypted_datas[i].threshold,
|
threshold: encrypted_datas[i].threshold,
|
||||||
},
|
},
|
||||||
access_key: access_key.clone(),
|
access_key: access_key.clone(),
|
||||||
key_share: encrypted_datas[i].clone(),
|
key_share: Some(encrypted_datas[i].clone()),
|
||||||
acl_storage: acl_storages[i].clone(),
|
acl_storage: acl_storages[i].clone(),
|
||||||
cluster: clusters[i].clone(),
|
cluster: clusters[i].clone(),
|
||||||
nonce: 0,
|
nonce: 0,
|
||||||
@ -594,15 +751,17 @@ mod tests {
|
|||||||
threshold: 0,
|
threshold: 0,
|
||||||
},
|
},
|
||||||
access_key: Random.generate().unwrap().secret().clone(),
|
access_key: Random.generate().unwrap().secret().clone(),
|
||||||
key_share: DocumentKeyShare {
|
key_share: Some(DocumentKeyShare {
|
||||||
author: Public::default(),
|
author: Public::default(),
|
||||||
threshold: 0,
|
threshold: 0,
|
||||||
id_numbers: nodes,
|
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
|
||||||
polynom1: Vec::new(),
|
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
},
|
versions: vec![DocumentKeyShareVersion {
|
||||||
|
hash: Default::default(),
|
||||||
|
id_numbers: nodes,
|
||||||
|
secret_share: Random.generate().unwrap().secret().clone(),
|
||||||
|
}],
|
||||||
|
}),
|
||||||
acl_storage: Arc::new(DummyAclStorage::default()),
|
acl_storage: Arc::new(DummyAclStorage::default()),
|
||||||
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
||||||
nonce: 0,
|
nonce: 0,
|
||||||
@ -613,12 +772,9 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn fails_to_construct_if_not_a_part_of_cluster() {
|
fn fails_to_initialize_if_does_not_have_a_share() {
|
||||||
let mut nodes = BTreeMap::new();
|
|
||||||
let self_node_id = Random.generate().unwrap().public().clone();
|
let self_node_id = Random.generate().unwrap().public().clone();
|
||||||
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
|
let session = SessionImpl::new(SessionParams {
|
||||||
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
|
|
||||||
match SessionImpl::new(SessionParams {
|
|
||||||
meta: SessionMeta {
|
meta: SessionMeta {
|
||||||
id: SessionId::default(),
|
id: SessionId::default(),
|
||||||
self_node_id: self_node_id.clone(),
|
self_node_id: self_node_id.clone(),
|
||||||
@ -626,31 +782,21 @@ mod tests {
|
|||||||
threshold: 0,
|
threshold: 0,
|
||||||
},
|
},
|
||||||
access_key: Random.generate().unwrap().secret().clone(),
|
access_key: Random.generate().unwrap().secret().clone(),
|
||||||
key_share: DocumentKeyShare {
|
key_share: None,
|
||||||
author: Public::default(),
|
|
||||||
threshold: 0,
|
|
||||||
id_numbers: nodes,
|
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
|
||||||
polynom1: Vec::new(),
|
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
|
||||||
},
|
|
||||||
acl_storage: Arc::new(DummyAclStorage::default()),
|
acl_storage: Arc::new(DummyAclStorage::default()),
|
||||||
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
||||||
nonce: 0,
|
nonce: 0,
|
||||||
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) {
|
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())).unwrap();
|
||||||
Err(Error::InvalidNodesConfiguration) => (),
|
assert_eq!(session.initialize(Default::default(), false), Err(Error::InvalidMessage));
|
||||||
_ => panic!("unexpected"),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn fails_to_construct_if_threshold_is_wrong() {
|
fn fails_to_initialize_if_threshold_is_wrong() {
|
||||||
let mut nodes = BTreeMap::new();
|
let mut nodes = BTreeMap::new();
|
||||||
let self_node_id = Random.generate().unwrap().public().clone();
|
let self_node_id = Random.generate().unwrap().public().clone();
|
||||||
nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone());
|
nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone());
|
||||||
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
|
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
|
||||||
match SessionImpl::new(SessionParams {
|
let session = SessionImpl::new(SessionParams {
|
||||||
meta: SessionMeta {
|
meta: SessionMeta {
|
||||||
id: SessionId::default(),
|
id: SessionId::default(),
|
||||||
self_node_id: self_node_id.clone(),
|
self_node_id: self_node_id.clone(),
|
||||||
@ -658,41 +804,42 @@ mod tests {
|
|||||||
threshold: 2,
|
threshold: 2,
|
||||||
},
|
},
|
||||||
access_key: Random.generate().unwrap().secret().clone(),
|
access_key: Random.generate().unwrap().secret().clone(),
|
||||||
key_share: DocumentKeyShare {
|
key_share: Some(DocumentKeyShare {
|
||||||
author: Public::default(),
|
author: Public::default(),
|
||||||
threshold: 2,
|
threshold: 2,
|
||||||
id_numbers: nodes,
|
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
|
||||||
polynom1: Vec::new(),
|
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
},
|
versions: vec![DocumentKeyShareVersion {
|
||||||
|
hash: Default::default(),
|
||||||
|
id_numbers: nodes,
|
||||||
|
secret_share: Random.generate().unwrap().secret().clone(),
|
||||||
|
}],
|
||||||
|
}),
|
||||||
acl_storage: Arc::new(DummyAclStorage::default()),
|
acl_storage: Arc::new(DummyAclStorage::default()),
|
||||||
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
||||||
nonce: 0,
|
nonce: 0,
|
||||||
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) {
|
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())).unwrap();
|
||||||
Err(Error::InvalidThreshold) => (),
|
assert_eq!(session.initialize(Default::default(), false), Err(Error::ConsensusUnreachable));
|
||||||
_ => panic!("unexpected"),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn fails_to_initialize_when_already_initialized() {
|
fn fails_to_initialize_when_already_initialized() {
|
||||||
let (_, _, _, sessions) = prepare_decryption_sessions();
|
let (_, _, _, sessions) = prepare_decryption_sessions();
|
||||||
assert_eq!(sessions[0].initialize(false).unwrap(), ());
|
assert_eq!(sessions[0].initialize(Default::default(), false).unwrap(), ());
|
||||||
assert_eq!(sessions[0].initialize(false).unwrap_err(), Error::InvalidStateForRequest);
|
assert_eq!(sessions[0].initialize(Default::default(), false).unwrap_err(), Error::InvalidStateForRequest);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn fails_to_accept_initialization_when_already_initialized() {
|
fn fails_to_accept_initialization_when_already_initialized() {
|
||||||
let (_, _, _, sessions) = prepare_decryption_sessions();
|
let (_, _, _, sessions) = prepare_decryption_sessions();
|
||||||
assert_eq!(sessions[0].initialize(false).unwrap(), ());
|
assert_eq!(sessions[0].initialize(Default::default(), false).unwrap(), ());
|
||||||
assert_eq!(sessions[0].on_consensus_message(sessions[1].node(), &message::DecryptionConsensusMessage {
|
assert_eq!(sessions[0].on_consensus_message(sessions[1].node(), &message::DecryptionConsensusMessage {
|
||||||
session: SessionId::default().into(),
|
session: SessionId::default().into(),
|
||||||
sub_session: sessions[0].access_key().clone().into(),
|
sub_session: sessions[0].access_key().clone().into(),
|
||||||
session_nonce: 0,
|
session_nonce: 0,
|
||||||
message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession {
|
message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession {
|
||||||
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||||
|
version: Default::default(),
|
||||||
}),
|
}),
|
||||||
}).unwrap_err(), Error::InvalidMessage);
|
}).unwrap_err(), Error::InvalidMessage);
|
||||||
}
|
}
|
||||||
@ -706,6 +853,7 @@ mod tests {
|
|||||||
session_nonce: 0,
|
session_nonce: 0,
|
||||||
message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession {
|
message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession {
|
||||||
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||||
|
version: Default::default(),
|
||||||
}),
|
}),
|
||||||
}).unwrap(), ());
|
}).unwrap(), ());
|
||||||
assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node(), &message::RequestPartialDecryption {
|
assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node(), &message::RequestPartialDecryption {
|
||||||
@ -727,6 +875,7 @@ mod tests {
|
|||||||
session_nonce: 0,
|
session_nonce: 0,
|
||||||
message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession {
|
message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession {
|
||||||
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||||
|
version: Default::default(),
|
||||||
}),
|
}),
|
||||||
}).unwrap(), ());
|
}).unwrap(), ());
|
||||||
assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node(), &message::RequestPartialDecryption {
|
assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node(), &message::RequestPartialDecryption {
|
||||||
@ -755,7 +904,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn fails_to_accept_partial_decrypt_twice() {
|
fn fails_to_accept_partial_decrypt_twice() {
|
||||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
sessions[0].initialize(false).unwrap();
|
sessions[0].initialize(Default::default(), false).unwrap();
|
||||||
|
|
||||||
let mut pd_from = None;
|
let mut pd_from = None;
|
||||||
let mut pd_msg = None;
|
let mut pd_msg = None;
|
||||||
@ -783,7 +932,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn node_is_marked_rejected_when_timed_out_during_initialization_confirmation() {
|
fn node_is_marked_rejected_when_timed_out_during_initialization_confirmation() {
|
||||||
let (_, _, _, sessions) = prepare_decryption_sessions();
|
let (_, _, _, sessions) = prepare_decryption_sessions();
|
||||||
sessions[0].initialize(false).unwrap();
|
sessions[0].initialize(Default::default(), false).unwrap();
|
||||||
|
|
||||||
// 1 node disconnects => we still can recover secret
|
// 1 node disconnects => we still can recover secret
|
||||||
sessions[0].on_node_timeout(sessions[1].node());
|
sessions[0].on_node_timeout(sessions[1].node());
|
||||||
@ -801,7 +950,7 @@ mod tests {
|
|||||||
let key_pair = Random.generate().unwrap();
|
let key_pair = Random.generate().unwrap();
|
||||||
|
|
||||||
acl_storages[1].prohibit(key_pair.public().clone(), SessionId::default());
|
acl_storages[1].prohibit(key_pair.public().clone(), SessionId::default());
|
||||||
sessions[0].initialize(false).unwrap();
|
sessions[0].initialize(Default::default(), false).unwrap();
|
||||||
|
|
||||||
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
||||||
|
|
||||||
@ -813,7 +962,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn session_does_not_fail_if_requested_node_disconnects() {
|
fn session_does_not_fail_if_requested_node_disconnects() {
|
||||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
sessions[0].initialize(false).unwrap();
|
sessions[0].initialize(Default::default(), false).unwrap();
|
||||||
|
|
||||||
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
||||||
|
|
||||||
@ -829,7 +978,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn session_does_not_fail_if_node_with_shadow_point_disconnects() {
|
fn session_does_not_fail_if_node_with_shadow_point_disconnects() {
|
||||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
sessions[0].initialize(false).unwrap();
|
sessions[0].initialize(Default::default(), false).unwrap();
|
||||||
|
|
||||||
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults
|
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults
|
||||||
&& sessions[0].data.lock().consensus_session.computation_job().responses().len() == 2).unwrap();
|
&& sessions[0].data.lock().consensus_session.computation_job().responses().len() == 2).unwrap();
|
||||||
@ -846,7 +995,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn session_restarts_if_confirmed_node_disconnects() {
|
fn session_restarts_if_confirmed_node_disconnects() {
|
||||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
sessions[0].initialize(false).unwrap();
|
sessions[0].initialize(Default::default(), false).unwrap();
|
||||||
|
|
||||||
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
||||||
|
|
||||||
@ -861,7 +1010,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn session_does_not_fail_if_non_master_node_disconnects_from_non_master_node() {
|
fn session_does_not_fail_if_non_master_node_disconnects_from_non_master_node() {
|
||||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
sessions[0].initialize(false).unwrap();
|
sessions[0].initialize(Default::default(), false).unwrap();
|
||||||
|
|
||||||
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
||||||
|
|
||||||
@ -876,7 +1025,7 @@ mod tests {
|
|||||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
|
|
||||||
// now let's try to do a decryption
|
// now let's try to do a decryption
|
||||||
sessions[0].initialize(false).unwrap();
|
sessions[0].initialize(Default::default(), false).unwrap();
|
||||||
|
|
||||||
do_messages_exchange(&clusters, &sessions).unwrap();
|
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||||
|
|
||||||
@ -898,7 +1047,7 @@ mod tests {
|
|||||||
let (key_pair, clusters, _, sessions) = prepare_decryption_sessions();
|
let (key_pair, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
|
|
||||||
// now let's try to do a decryption
|
// now let's try to do a decryption
|
||||||
sessions[0].initialize(true).unwrap();
|
sessions[0].initialize(Default::default(), true).unwrap();
|
||||||
|
|
||||||
do_messages_exchange(&clusters, &sessions).unwrap();
|
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||||
|
|
||||||
@ -929,7 +1078,7 @@ mod tests {
|
|||||||
let (key_pair, clusters, acl_storages, sessions) = prepare_decryption_sessions();
|
let (key_pair, clusters, acl_storages, sessions) = prepare_decryption_sessions();
|
||||||
|
|
||||||
// now let's try to do a decryption
|
// now let's try to do a decryption
|
||||||
sessions[0].initialize(false).unwrap();
|
sessions[0].initialize(Default::default(), false).unwrap();
|
||||||
|
|
||||||
// we need 4 out of 5 nodes to agree to do a decryption
|
// we need 4 out of 5 nodes to agree to do a decryption
|
||||||
// let's say that 2 of these nodes are disagree
|
// let's say that 2 of these nodes are disagree
|
||||||
@ -952,7 +1101,7 @@ mod tests {
|
|||||||
acl_storages[0].prohibit(key_pair.public().clone(), SessionId::default());
|
acl_storages[0].prohibit(key_pair.public().clone(), SessionId::default());
|
||||||
|
|
||||||
// now let's try to do a decryption
|
// now let's try to do a decryption
|
||||||
sessions[0].initialize(false).unwrap();
|
sessions[0].initialize(Default::default(), false).unwrap();
|
||||||
|
|
||||||
do_messages_exchange(&clusters, &sessions).unwrap();
|
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||||
|
|
||||||
@ -979,4 +1128,52 @@ mod tests {
|
|||||||
}
|
}
|
||||||
)), Err(Error::ReplayProtection));
|
)), Err(Error::ReplayProtection));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn decryption_works_when_delegated_to_other_node() {
|
||||||
|
let (_, clusters, _, mut sessions) = prepare_decryption_sessions();
|
||||||
|
|
||||||
|
// let's say node1 doesn't have a share && delegates decryption request to node0
|
||||||
|
// initially session is created on node1 => node1 is master for itself, but for other nodes node0 is still master
|
||||||
|
sessions[1].core.meta.master_node_id = sessions[1].core.meta.self_node_id.clone();
|
||||||
|
sessions[1].data.lock().consensus_session.consensus_job_mut().executor_mut().set_requester_signature(
|
||||||
|
sessions[0].data.lock().consensus_session.consensus_job().executor().requester_signature().unwrap().clone()
|
||||||
|
);
|
||||||
|
|
||||||
|
// now let's try to do a decryption
|
||||||
|
sessions[1].delegate(sessions[0].core.meta.self_node_id.clone(), Default::default(), false).unwrap();
|
||||||
|
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||||
|
|
||||||
|
// now check that:
|
||||||
|
// 1) 4 of 5 sessions are in Finished state
|
||||||
|
assert_eq!(sessions.iter().filter(|s| s.state() == ConsensusSessionState::Finished).count(), 5);
|
||||||
|
// 2) 1 session has decrypted key value
|
||||||
|
assert_eq!(sessions[1].decrypted_secret().unwrap().unwrap(), EncryptedDocumentKeyShadow {
|
||||||
|
decrypted_secret: SECRET_PLAIN.into(),
|
||||||
|
common_point: None,
|
||||||
|
decrypt_shadows: None,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn decryption_works_when_share_owners_are_isolated() {
|
||||||
|
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
|
|
||||||
|
// we need 4 out of 5 nodes to agree to do a decryption
|
||||||
|
// let's say that 1 of these nodes (master) is isolated
|
||||||
|
let isolated_node_id = sessions[4].core.meta.self_node_id.clone();
|
||||||
|
for cluster in &clusters {
|
||||||
|
cluster.remove_node(&isolated_node_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// now let's try to do a decryption
|
||||||
|
sessions[0].initialize(Default::default(), false).unwrap();
|
||||||
|
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(sessions[0].decrypted_secret().unwrap().unwrap(), EncryptedDocumentKeyShadow {
|
||||||
|
decrypted_secret: SECRET_PLAIN.into(),
|
||||||
|
common_point: None,
|
||||||
|
decrypt_shadows: None,
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -48,7 +48,7 @@ pub struct SessionImpl {
|
|||||||
/// Public identifier of this node.
|
/// Public identifier of this node.
|
||||||
self_node_id: NodeId,
|
self_node_id: NodeId,
|
||||||
/// Encrypted data.
|
/// Encrypted data.
|
||||||
encrypted_data: DocumentKeyShare,
|
encrypted_data: Option<DocumentKeyShare>,
|
||||||
/// Key storage.
|
/// Key storage.
|
||||||
key_storage: Arc<KeyStorage>,
|
key_storage: Arc<KeyStorage>,
|
||||||
/// Cluster which allows this node to send messages to other nodes in the cluster.
|
/// Cluster which allows this node to send messages to other nodes in the cluster.
|
||||||
@ -68,7 +68,7 @@ pub struct SessionParams {
|
|||||||
/// Id of node, on which this session is running.
|
/// Id of node, on which this session is running.
|
||||||
pub self_node_id: Public,
|
pub self_node_id: Public,
|
||||||
/// Encrypted data (result of running generation_session::SessionImpl).
|
/// Encrypted data (result of running generation_session::SessionImpl).
|
||||||
pub encrypted_data: DocumentKeyShare,
|
pub encrypted_data: Option<DocumentKeyShare>,
|
||||||
/// Key storage.
|
/// Key storage.
|
||||||
pub key_storage: Arc<KeyStorage>,
|
pub key_storage: Arc<KeyStorage>,
|
||||||
/// Cluster
|
/// Cluster
|
||||||
@ -115,7 +115,7 @@ pub enum SessionState {
|
|||||||
impl SessionImpl {
|
impl SessionImpl {
|
||||||
/// Create new encryption session.
|
/// Create new encryption session.
|
||||||
pub fn new(params: SessionParams) -> Result<Self, Error> {
|
pub fn new(params: SessionParams) -> Result<Self, Error> {
|
||||||
check_encrypted_data(¶ms.self_node_id, ¶ms.encrypted_data)?;
|
check_encrypted_data(¶ms.encrypted_data)?;
|
||||||
|
|
||||||
Ok(SessionImpl {
|
Ok(SessionImpl {
|
||||||
id: params.id,
|
id: params.id,
|
||||||
@ -147,31 +147,31 @@ impl SessionImpl {
|
|||||||
return Err(Error::InvalidStateForRequest);
|
return Err(Error::InvalidStateForRequest);
|
||||||
}
|
}
|
||||||
|
|
||||||
// check that the requester is the author of the encrypted data
|
|
||||||
let requestor_public = ethkey::recover(&requestor_signature, &self.id)?;
|
|
||||||
if self.encrypted_data.author != requestor_public {
|
|
||||||
return Err(Error::AccessDenied);
|
|
||||||
}
|
|
||||||
|
|
||||||
// update state
|
// update state
|
||||||
data.state = SessionState::WaitingForInitializationConfirm;
|
data.state = SessionState::WaitingForInitializationConfirm;
|
||||||
for node_id in self.encrypted_data.id_numbers.keys() {
|
data.nodes.extend(self.cluster.nodes().into_iter().map(|n| (n, NodeData {
|
||||||
data.nodes.insert(node_id.clone(), NodeData {
|
initialization_confirmed: &n == self.node(),
|
||||||
initialization_confirmed: node_id == self.node(),
|
})));
|
||||||
});
|
|
||||||
|
// TODO: id signature is not enough here, as it was already used in key generation
|
||||||
|
// TODO: there could be situation when some nodes have failed to store encrypted data
|
||||||
|
// => potential problems during restore. some confirmation step is needed (2pc)?
|
||||||
|
// save encryption data
|
||||||
|
if let Some(mut encrypted_data) = self.encrypted_data.clone() {
|
||||||
|
// check that the requester is the author of the encrypted data
|
||||||
|
let requestor_public = ethkey::recover(&requestor_signature, &self.id)?;
|
||||||
|
if encrypted_data.author != requestor_public {
|
||||||
|
return Err(Error::AccessDenied);
|
||||||
|
}
|
||||||
|
|
||||||
|
encrypted_data.common_point = Some(common_point.clone());
|
||||||
|
encrypted_data.encrypted_point = Some(encrypted_point.clone());
|
||||||
|
self.key_storage.update(self.id.clone(), encrypted_data)
|
||||||
|
.map_err(|e| Error::KeyStorage(e.into()))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: there could be situation when some nodes have failed to store encrypted data
|
|
||||||
// => potential problems during restore. some confirmation step is needed?
|
|
||||||
// save encryption data
|
|
||||||
let mut encrypted_data = self.encrypted_data.clone();
|
|
||||||
encrypted_data.common_point = Some(common_point.clone());
|
|
||||||
encrypted_data.encrypted_point = Some(encrypted_point.clone());
|
|
||||||
self.key_storage.update(self.id.clone(), encrypted_data)
|
|
||||||
.map_err(|e| Error::KeyStorage(e.into()))?;
|
|
||||||
|
|
||||||
// start initialization
|
// start initialization
|
||||||
if self.encrypted_data.id_numbers.len() > 1 {
|
if data.nodes.len() > 1 {
|
||||||
self.cluster.broadcast(Message::Encryption(EncryptionMessage::InitializeEncryptionSession(InitializeEncryptionSession {
|
self.cluster.broadcast(Message::Encryption(EncryptionMessage::InitializeEncryptionSession(InitializeEncryptionSession {
|
||||||
session: self.id.clone().into(),
|
session: self.id.clone().into(),
|
||||||
session_nonce: self.nonce,
|
session_nonce: self.nonce,
|
||||||
@ -193,8 +193,6 @@ impl SessionImpl {
|
|||||||
debug_assert!(self.id == *message.session);
|
debug_assert!(self.id == *message.session);
|
||||||
debug_assert!(&sender != self.node());
|
debug_assert!(&sender != self.node());
|
||||||
|
|
||||||
self.check_nonce(message.session_nonce)?;
|
|
||||||
|
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
// check state
|
// check state
|
||||||
@ -203,17 +201,18 @@ impl SessionImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check that the requester is the author of the encrypted data
|
// check that the requester is the author of the encrypted data
|
||||||
let requestor_public = ethkey::recover(&message.requestor_signature.clone().into(), &self.id)?;
|
if let Some(mut encrypted_data) = self.encrypted_data.clone() {
|
||||||
if self.encrypted_data.author != requestor_public {
|
let requestor_public = ethkey::recover(&message.requestor_signature.clone().into(), &self.id)?;
|
||||||
return Err(Error::AccessDenied);
|
if encrypted_data.author != requestor_public {
|
||||||
}
|
return Err(Error::AccessDenied);
|
||||||
|
}
|
||||||
|
|
||||||
// save encryption data
|
// save encryption data
|
||||||
let mut encrypted_data = self.encrypted_data.clone();
|
encrypted_data.common_point = Some(message.common_point.clone().into());
|
||||||
encrypted_data.common_point = Some(message.common_point.clone().into());
|
encrypted_data.encrypted_point = Some(message.encrypted_point.clone().into());
|
||||||
encrypted_data.encrypted_point = Some(message.encrypted_point.clone().into());
|
self.key_storage.update(self.id.clone(), encrypted_data)
|
||||||
self.key_storage.update(self.id.clone(), encrypted_data)
|
.map_err(|e| Error::KeyStorage(e.into()))?;
|
||||||
.map_err(|e| Error::KeyStorage(e.into()))?;
|
}
|
||||||
|
|
||||||
// update state
|
// update state
|
||||||
data.state = SessionState::Finished;
|
data.state = SessionState::Finished;
|
||||||
@ -230,8 +229,6 @@ impl SessionImpl {
|
|||||||
debug_assert!(self.id == *message.session);
|
debug_assert!(self.id == *message.session);
|
||||||
debug_assert!(&sender != self.node());
|
debug_assert!(&sender != self.node());
|
||||||
|
|
||||||
self.check_nonce(message.session_nonce)?;
|
|
||||||
|
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
debug_assert!(data.nodes.contains_key(&sender));
|
debug_assert!(data.nodes.contains_key(&sender));
|
||||||
|
|
||||||
@ -250,32 +247,19 @@ impl SessionImpl {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When error has occured on another node.
|
|
||||||
pub fn on_session_error(&self, sender: &NodeId, message: &EncryptionSessionError) -> Result<(), Error> {
|
|
||||||
self.check_nonce(message.session_nonce)?;
|
|
||||||
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
|
|
||||||
warn!("{}: encryption session failed with error: {} from {}", self.node(), message.error, sender);
|
|
||||||
|
|
||||||
data.state = SessionState::Failed;
|
|
||||||
data.result = Some(Err(Error::Io(message.error.clone())));
|
|
||||||
self.completed.notify_all();
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check session nonce.
|
|
||||||
fn check_nonce(&self, message_session_nonce: u64) -> Result<(), Error> {
|
|
||||||
match self.nonce == message_session_nonce {
|
|
||||||
true => Ok(()),
|
|
||||||
false => Err(Error::ReplayProtection),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ClusterSession for SessionImpl {
|
impl ClusterSession for SessionImpl {
|
||||||
|
type Id = SessionId;
|
||||||
|
|
||||||
|
fn type_name() -> &'static str {
|
||||||
|
"encryption"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn id(&self) -> SessionId {
|
||||||
|
self.id.clone()
|
||||||
|
}
|
||||||
|
|
||||||
fn is_finished(&self) -> bool {
|
fn is_finished(&self) -> bool {
|
||||||
let data = self.data.lock();
|
let data = self.data.lock();
|
||||||
data.state == SessionState::Failed
|
data.state == SessionState::Failed
|
||||||
@ -301,6 +285,47 @@ impl ClusterSession for SessionImpl {
|
|||||||
data.result = Some(Err(Error::NodeDisconnected));
|
data.result = Some(Err(Error::NodeDisconnected));
|
||||||
self.completed.notify_all();
|
self.completed.notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn on_session_error(&self, node: &NodeId, error: Error) {
|
||||||
|
// error in encryption session is considered fatal
|
||||||
|
// => broadcast error if error occured on this node
|
||||||
|
if *node == self.self_node_id {
|
||||||
|
// do not bother processing send error, as we already processing error
|
||||||
|
let _ = self.cluster.broadcast(Message::Encryption(EncryptionMessage::EncryptionSessionError(EncryptionSessionError {
|
||||||
|
session: self.id.clone().into(),
|
||||||
|
session_nonce: self.nonce,
|
||||||
|
error: error.clone().into(),
|
||||||
|
})));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
|
warn!("{}: encryption session failed with error: {} from {}", self.node(), error, node);
|
||||||
|
|
||||||
|
data.state = SessionState::Failed;
|
||||||
|
data.result = Some(Err(error));
|
||||||
|
self.completed.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
||||||
|
if Some(self.nonce) != message.session_nonce() {
|
||||||
|
return Err(Error::ReplayProtection);
|
||||||
|
}
|
||||||
|
|
||||||
|
match message {
|
||||||
|
&Message::Encryption(ref message) => match message {
|
||||||
|
&EncryptionMessage::InitializeEncryptionSession(ref message) =>
|
||||||
|
self.on_initialize_session(sender.clone(), message),
|
||||||
|
&EncryptionMessage::ConfirmEncryptionInitialization(ref message) =>
|
||||||
|
self.on_confirm_initialization(sender.clone(), message),
|
||||||
|
&EncryptionMessage::EncryptionSessionError(ref message) => {
|
||||||
|
self.on_session_error(sender, Error::Io(message.error.clone().into()));
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
},
|
||||||
|
_ => unreachable!("cluster checks message to be correct before passing; qed"),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Session for SessionImpl {
|
impl Session for SessionImpl {
|
||||||
@ -329,15 +354,13 @@ impl Debug for SessionImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_encrypted_data(self_node_id: &Public, encrypted_data: &DocumentKeyShare) -> Result<(), Error> {
|
fn check_encrypted_data(encrypted_data: &Option<DocumentKeyShare>) -> Result<(), Error> {
|
||||||
use key_server_cluster::generation_session::{check_cluster_nodes, check_threshold};
|
if let &Some(ref encrypted_data) = encrypted_data {
|
||||||
|
// check that common_point and encrypted_point are still not set yet
|
||||||
// check that common_point and encrypted_point are still not set yet
|
if encrypted_data.common_point.is_some() || encrypted_data.encrypted_point.is_some() {
|
||||||
if encrypted_data.common_point.is_some() || encrypted_data.encrypted_point.is_some() {
|
return Err(Error::CompletedSessionId);
|
||||||
return Err(Error::CompletedSessionId);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let nodes = encrypted_data.id_numbers.keys().cloned().collect();
|
Ok(())
|
||||||
check_cluster_nodes(self_node_id, &nodes)?;
|
|
||||||
check_threshold(encrypted_data.threshold, &nodes)
|
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ use std::time;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use parking_lot::{Condvar, Mutex};
|
use parking_lot::{Condvar, Mutex};
|
||||||
use ethkey::{Public, Secret};
|
use ethkey::{Public, Secret};
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare};
|
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare, DocumentKeyShareVersion};
|
||||||
use key_server_cluster::math;
|
use key_server_cluster::math;
|
||||||
use key_server_cluster::cluster::Cluster;
|
use key_server_cluster::cluster::Cluster;
|
||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||||
@ -291,8 +291,10 @@ impl SessionImpl {
|
|||||||
self.on_keys_dissemination(sender.clone(), message),
|
self.on_keys_dissemination(sender.clone(), message),
|
||||||
&GenerationMessage::PublicKeyShare(ref message) =>
|
&GenerationMessage::PublicKeyShare(ref message) =>
|
||||||
self.on_public_key_share(sender.clone(), message),
|
self.on_public_key_share(sender.clone(), message),
|
||||||
&GenerationMessage::SessionError(ref message) =>
|
&GenerationMessage::SessionError(ref message) => {
|
||||||
self.on_session_error(sender, message),
|
self.on_session_error(sender, Error::Io(message.error.clone().into()));
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
&GenerationMessage::SessionCompleted(ref message) =>
|
&GenerationMessage::SessionCompleted(ref message) =>
|
||||||
self.on_session_completed(sender.clone(), message),
|
self.on_session_completed(sender.clone(), message),
|
||||||
}
|
}
|
||||||
@ -504,11 +506,12 @@ impl SessionImpl {
|
|||||||
let encrypted_data = DocumentKeyShare {
|
let encrypted_data = DocumentKeyShare {
|
||||||
author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(),
|
author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(),
|
||||||
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
||||||
id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(),
|
|
||||||
secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
|
||||||
polynom1: data.polynom1.as_ref().expect("polynom1 is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
|
||||||
common_point: None,
|
common_point: None,
|
||||||
encrypted_point: None,
|
encrypted_point: None,
|
||||||
|
versions: vec![DocumentKeyShareVersion::new(
|
||||||
|
data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(),
|
||||||
|
data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
||||||
|
)],
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(ref key_storage) = self.key_storage {
|
if let Some(ref key_storage) = self.key_storage {
|
||||||
@ -546,20 +549,6 @@ impl SessionImpl {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When error has occured on another node.
|
|
||||||
pub fn on_session_error(&self, sender: &NodeId, message: &SessionError) -> Result<(), Error> {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
|
|
||||||
warn!("{}: generation session failed with error: {} from {}", self.node(), message.error, sender);
|
|
||||||
|
|
||||||
data.state = SessionState::Failed;
|
|
||||||
data.key_share = Some(Err(Error::Io(message.error.clone())));
|
|
||||||
data.joint_public_and_secret = Some(Err(Error::Io(message.error.clone())));
|
|
||||||
self.completed.notify_all();
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Complete initialization (when all other nodex has responded with confirmation)
|
/// Complete initialization (when all other nodex has responded with confirmation)
|
||||||
fn complete_initialization(&self, mut derived_point: Public) -> Result<(), Error> {
|
fn complete_initialization(&self, mut derived_point: Public) -> Result<(), Error> {
|
||||||
// update point once again to make sure that derived point is not generated by last node
|
// update point once again to make sure that derived point is not generated by last node
|
||||||
@ -683,11 +672,12 @@ impl SessionImpl {
|
|||||||
let encrypted_data = DocumentKeyShare {
|
let encrypted_data = DocumentKeyShare {
|
||||||
author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(),
|
author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(),
|
||||||
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
||||||
id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(),
|
|
||||||
secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
|
||||||
polynom1: data.polynom1.as_ref().expect("polynom1 is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
|
||||||
common_point: None,
|
common_point: None,
|
||||||
encrypted_point: None,
|
encrypted_point: None,
|
||||||
|
versions: vec![DocumentKeyShareVersion::new(
|
||||||
|
data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(),
|
||||||
|
data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
||||||
|
)],
|
||||||
};
|
};
|
||||||
|
|
||||||
// if we are at the slave node - wait for session completion
|
// if we are at the slave node - wait for session completion
|
||||||
@ -725,6 +715,16 @@ impl SessionImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ClusterSession for SessionImpl {
|
impl ClusterSession for SessionImpl {
|
||||||
|
type Id = SessionId;
|
||||||
|
|
||||||
|
fn type_name() -> &'static str {
|
||||||
|
"generation"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn id(&self) -> SessionId {
|
||||||
|
self.id.clone()
|
||||||
|
}
|
||||||
|
|
||||||
fn is_finished(&self) -> bool {
|
fn is_finished(&self) -> bool {
|
||||||
let data = self.data.lock();
|
let data = self.data.lock();
|
||||||
data.state == SessionState::Failed
|
data.state == SessionState::Failed
|
||||||
@ -754,6 +754,32 @@ impl ClusterSession for SessionImpl {
|
|||||||
data.joint_public_and_secret = Some(Err(Error::NodeDisconnected));
|
data.joint_public_and_secret = Some(Err(Error::NodeDisconnected));
|
||||||
self.completed.notify_all();
|
self.completed.notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn on_session_error(&self, node: &NodeId, error: Error) {
|
||||||
|
// error in generation session is considered fatal
|
||||||
|
// => broadcast error if error occured on this node
|
||||||
|
if *node == self.self_node_id {
|
||||||
|
// do not bother processing send error, as we already processing error
|
||||||
|
let _ = self.cluster.broadcast(Message::Generation(GenerationMessage::SessionError(SessionError {
|
||||||
|
session: self.id.clone().into(),
|
||||||
|
session_nonce: self.nonce,
|
||||||
|
error: error.clone().into(),
|
||||||
|
})));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
data.state = SessionState::Failed;
|
||||||
|
data.key_share = Some(Err(error.clone()));
|
||||||
|
data.joint_public_and_secret = Some(Err(error));
|
||||||
|
self.completed.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
||||||
|
match *message {
|
||||||
|
Message::Generation(ref message) => self.process_message(sender, message),
|
||||||
|
_ => unreachable!("cluster checks message to be correct before passing; qed"),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Session for SessionImpl {
|
impl Session for SessionImpl {
|
||||||
@ -852,8 +878,8 @@ pub mod tests {
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::{BTreeSet, BTreeMap, VecDeque};
|
use std::collections::{BTreeSet, BTreeMap, VecDeque};
|
||||||
use tokio_core::reactor::Core;
|
use tokio_core::reactor::Core;
|
||||||
use ethkey::{Random, Generator, Public};
|
use ethkey::{Random, Generator, Public, KeyPair};
|
||||||
use key_server_cluster::{NodeId, SessionId, Error, DummyKeyStorage};
|
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage};
|
||||||
use key_server_cluster::message::{self, Message, GenerationMessage};
|
use key_server_cluster::message::{self, Message, GenerationMessage};
|
||||||
use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established};
|
use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established};
|
||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||||
@ -956,6 +982,26 @@ pub mod tests {
|
|||||||
let msg = self.take_message().unwrap();
|
let msg = self.take_message().unwrap();
|
||||||
self.process_message(msg)
|
self.process_message(msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn compute_key_pair(&self, t: usize) -> KeyPair {
|
||||||
|
let secret_shares = self.nodes.values()
|
||||||
|
.map(|nd| nd.key_storage.get(&SessionId::default()).unwrap().unwrap().last_version().unwrap().secret_share.clone())
|
||||||
|
.take(t + 1)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let secret_shares = secret_shares.iter().collect::<Vec<_>>();
|
||||||
|
let id_numbers = self.nodes.iter()
|
||||||
|
.map(|(n, nd)| nd.key_storage.get(&SessionId::default()).unwrap().unwrap().last_version().unwrap().id_numbers[n].clone())
|
||||||
|
.take(t + 1)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let id_numbers = id_numbers.iter().collect::<Vec<_>>();
|
||||||
|
let joint_secret1 = math::compute_joint_secret_from_shares(t, &secret_shares, &id_numbers).unwrap();
|
||||||
|
|
||||||
|
let secret_values: Vec<_> = self.nodes.values().map(|s| s.session.joint_public_and_secret().unwrap().unwrap().1).collect();
|
||||||
|
let joint_secret2 = math::compute_joint_secret(secret_values.iter()).unwrap();
|
||||||
|
assert_eq!(joint_secret1, joint_secret2);
|
||||||
|
|
||||||
|
KeyPair::from_secret(joint_secret1).unwrap()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_simple_cluster(threshold: usize, num_nodes: usize) -> Result<(SessionId, NodeId, NodeId, MessageLoop), Error> {
|
fn make_simple_cluster(threshold: usize, num_nodes: usize) -> Result<(SessionId, NodeId, NodeId, MessageLoop), Error> {
|
||||||
|
@ -21,19 +21,17 @@ use ethkey::{Public, Secret, Signature};
|
|||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, SessionMeta, AclStorage, DocumentKeyShare};
|
use key_server_cluster::{Error, NodeId, SessionId, SessionMeta, AclStorage, DocumentKeyShare};
|
||||||
use key_server_cluster::cluster::{Cluster};
|
use key_server_cluster::cluster::{Cluster};
|
||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession};
|
||||||
use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionParams as GenerationSessionParams,
|
use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionParams as GenerationSessionParams,
|
||||||
Session as GenerationSessionApi, SessionState as GenerationSessionState};
|
Session as GenerationSessionApi, SessionState as GenerationSessionState};
|
||||||
use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessage, SigningGenerationMessage,
|
use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessage, SigningGenerationMessage,
|
||||||
RequestPartialSignature, PartialSignature, SigningSessionCompleted, GenerationMessage, ConsensusMessage, SigningSessionError,
|
RequestPartialSignature, PartialSignature, SigningSessionCompleted, GenerationMessage, ConsensusMessage, SigningSessionError,
|
||||||
InitializeConsensusSession, ConfirmConsensusInitialization};
|
InitializeConsensusSession, ConfirmConsensusInitialization, SigningSessionDelegation, SigningSessionDelegationCompleted};
|
||||||
use key_server_cluster::jobs::job_session::JobTransport;
|
use key_server_cluster::jobs::job_session::JobTransport;
|
||||||
use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
||||||
use key_server_cluster::jobs::signing_job::{PartialSigningRequest, PartialSigningResponse, SigningJob};
|
use key_server_cluster::jobs::signing_job::{PartialSigningRequest, PartialSigningResponse, SigningJob};
|
||||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||||
|
|
||||||
pub use key_server_cluster::decryption_session::DecryptionSessionId as SigningSessionId;
|
|
||||||
|
|
||||||
/// Signing session API.
|
/// Signing session API.
|
||||||
pub trait Session: Send + Sync + 'static {
|
pub trait Session: Send + Sync + 'static {
|
||||||
/// Wait until session is completed. Returns signed message.
|
/// Wait until session is completed. Returns signed message.
|
||||||
@ -61,7 +59,7 @@ struct SessionCore {
|
|||||||
/// Signing session access key.
|
/// Signing session access key.
|
||||||
pub access_key: Secret,
|
pub access_key: Secret,
|
||||||
/// Key share.
|
/// Key share.
|
||||||
pub key_share: DocumentKeyShare,
|
pub key_share: Option<DocumentKeyShare>,
|
||||||
/// Cluster which allows this node to send messages to other nodes in the cluster.
|
/// Cluster which allows this node to send messages to other nodes in the cluster.
|
||||||
pub cluster: Arc<Cluster>,
|
pub cluster: Arc<Cluster>,
|
||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
@ -79,10 +77,14 @@ struct SessionData {
|
|||||||
pub state: SessionState,
|
pub state: SessionState,
|
||||||
/// Message hash.
|
/// Message hash.
|
||||||
pub message_hash: Option<H256>,
|
pub message_hash: Option<H256>,
|
||||||
|
/// Key version to use for decryption.
|
||||||
|
pub version: Option<H256>,
|
||||||
/// Consensus-based signing session.
|
/// Consensus-based signing session.
|
||||||
pub consensus_session: SigningConsensusSession,
|
pub consensus_session: SigningConsensusSession,
|
||||||
/// Session key generation session.
|
/// Session key generation session.
|
||||||
pub generation_session: Option<GenerationSession>,
|
pub generation_session: Option<GenerationSession>,
|
||||||
|
/// Delegation status.
|
||||||
|
pub delegation_status: Option<DelegationStatus>,
|
||||||
/// Decryption result.
|
/// Decryption result.
|
||||||
pub result: Option<Result<(Secret, Secret), Error>>,
|
pub result: Option<Result<(Secret, Secret), Error>>,
|
||||||
}
|
}
|
||||||
@ -106,7 +108,7 @@ pub struct SessionParams {
|
|||||||
/// Session access key.
|
/// Session access key.
|
||||||
pub access_key: Secret,
|
pub access_key: Secret,
|
||||||
/// Key share.
|
/// Key share.
|
||||||
pub key_share: DocumentKeyShare,
|
pub key_share: Option<DocumentKeyShare>,
|
||||||
/// ACL storage.
|
/// ACL storage.
|
||||||
pub acl_storage: Arc<AclStorage>,
|
pub acl_storage: Arc<AclStorage>,
|
||||||
/// Cluster
|
/// Cluster
|
||||||
@ -123,6 +125,8 @@ struct SigningConsensusTransport {
|
|||||||
access_key: Secret,
|
access_key: Secret,
|
||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
nonce: u64,
|
nonce: u64,
|
||||||
|
/// Selected key version (on master node).
|
||||||
|
version: Option<H256>,
|
||||||
/// Cluster.
|
/// Cluster.
|
||||||
cluster: Arc<Cluster>,
|
cluster: Arc<Cluster>,
|
||||||
}
|
}
|
||||||
@ -151,23 +155,24 @@ struct SigningJobTransport {
|
|||||||
cluster: Arc<Cluster>,
|
cluster: Arc<Cluster>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Session delegation status.
|
||||||
|
enum DelegationStatus {
|
||||||
|
/// Delegated to other node.
|
||||||
|
DelegatedTo(NodeId),
|
||||||
|
/// Delegated from other node.
|
||||||
|
DelegatedFrom(NodeId, u64),
|
||||||
|
}
|
||||||
|
|
||||||
impl SessionImpl {
|
impl SessionImpl {
|
||||||
/// Create new signing session.
|
/// Create new signing session.
|
||||||
pub fn new(params: SessionParams, requester_signature: Option<Signature>) -> Result<Self, Error> {
|
pub fn new(params: SessionParams, requester_signature: Option<Signature>) -> Result<Self, Error> {
|
||||||
debug_assert_eq!(params.meta.threshold, params.key_share.threshold);
|
debug_assert_eq!(params.meta.threshold, params.key_share.as_ref().map(|ks| ks.threshold).unwrap_or_default());
|
||||||
debug_assert_eq!(params.meta.self_node_id == params.meta.master_node_id, requester_signature.is_some());
|
|
||||||
|
|
||||||
use key_server_cluster::generation_session::{check_cluster_nodes, check_threshold};
|
|
||||||
|
|
||||||
// check nodes and threshold
|
|
||||||
let nodes = params.key_share.id_numbers.keys().cloned().collect();
|
|
||||||
check_cluster_nodes(¶ms.meta.self_node_id, &nodes)?;
|
|
||||||
check_threshold(params.key_share.threshold, &nodes)?;
|
|
||||||
|
|
||||||
let consensus_transport = SigningConsensusTransport {
|
let consensus_transport = SigningConsensusTransport {
|
||||||
id: params.meta.id.clone(),
|
id: params.meta.id.clone(),
|
||||||
access_key: params.access_key.clone(),
|
access_key: params.access_key.clone(),
|
||||||
nonce: params.nonce,
|
nonce: params.nonce,
|
||||||
|
version: None,
|
||||||
cluster: params.cluster.clone(),
|
cluster: params.cluster.clone(),
|
||||||
};
|
};
|
||||||
let consensus_session = ConsensusSession::new(ConsensusSessionParams {
|
let consensus_session = ConsensusSession::new(ConsensusSessionParams {
|
||||||
@ -191,8 +196,10 @@ impl SessionImpl {
|
|||||||
data: Mutex::new(SessionData {
|
data: Mutex::new(SessionData {
|
||||||
state: SessionState::ConsensusEstablishing,
|
state: SessionState::ConsensusEstablishing,
|
||||||
message_hash: None,
|
message_hash: None,
|
||||||
|
version: None,
|
||||||
consensus_session: consensus_session,
|
consensus_session: consensus_session,
|
||||||
generation_session: None,
|
generation_session: None,
|
||||||
|
delegation_status: None,
|
||||||
result: None,
|
result: None,
|
||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
@ -204,11 +211,53 @@ impl SessionImpl {
|
|||||||
self.data.lock().state
|
self.data.lock().state
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Initialize signing session on master node.
|
/// Delegate session to other node.
|
||||||
pub fn initialize(&self, message_hash: H256) -> Result<(), Error> {
|
pub fn delegate(&self, master: NodeId, version: H256, message_hash: H256) -> Result<(), Error> {
|
||||||
|
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
|
if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization || data.delegation_status.is_some() {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(false);
|
||||||
|
self.core.cluster.send(&master, Message::Signing(SigningMessage::SigningSessionDelegation(SigningSessionDelegation {
|
||||||
|
session: self.core.meta.id.clone().into(),
|
||||||
|
sub_session: self.core.access_key.clone().into(),
|
||||||
|
session_nonce: self.core.nonce,
|
||||||
|
requestor_signature: data.consensus_session.consensus_job().executor().requester_signature()
|
||||||
|
.expect("signature is passed to master node on creation; session can be delegated from master node only; qed")
|
||||||
|
.clone().into(),
|
||||||
|
version: version.into(),
|
||||||
|
message_hash: message_hash.into(),
|
||||||
|
})))?;
|
||||||
|
data.delegation_status = Some(DelegationStatus::DelegatedTo(master));
|
||||||
|
Ok(())
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize signing session on master node.
|
||||||
|
pub fn initialize(&self, version: H256, message_hash: H256) -> Result<(), Error> {
|
||||||
|
debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id);
|
||||||
|
|
||||||
|
// check if version exists
|
||||||
|
let key_version = match self.core.key_share.as_ref() {
|
||||||
|
None => return Err(Error::InvalidMessage),
|
||||||
|
Some(key_share) => key_share.version(&version).map_err(|e| Error::KeyStorage(e.into()))?,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
let non_isolated_nodes = self.core.cluster.nodes();
|
||||||
|
data.consensus_session.consensus_job_mut().transport_mut().version = Some(version.clone());
|
||||||
|
data.version = Some(version.clone());
|
||||||
data.message_hash = Some(message_hash);
|
data.message_hash = Some(message_hash);
|
||||||
data.consensus_session.initialize(self.core.key_share.id_numbers.keys().cloned().collect())?;
|
data.consensus_session.initialize(key_version.id_numbers.keys()
|
||||||
|
.filter(|n| non_isolated_nodes.contains(*n))
|
||||||
|
.cloned()
|
||||||
|
.chain(::std::iter::once(self.core.meta.self_node_id.clone()))
|
||||||
|
.collect())?;
|
||||||
|
|
||||||
if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished {
|
if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished {
|
||||||
let generation_session = GenerationSession::new(GenerationSessionParams {
|
let generation_session = GenerationSession::new(GenerationSessionParams {
|
||||||
@ -232,11 +281,11 @@ impl SessionImpl {
|
|||||||
data.generation_session = Some(generation_session);
|
data.generation_session = Some(generation_session);
|
||||||
data.state = SessionState::SignatureComputing;
|
data.state = SessionState::SignatureComputing;
|
||||||
|
|
||||||
self.core.disseminate_jobs(&mut data.consensus_session, joint_public_and_secret.0, joint_public_and_secret.1, message_hash)?;
|
self.core.disseminate_jobs(&mut data.consensus_session, &version, joint_public_and_secret.0, joint_public_and_secret.1, message_hash)?;
|
||||||
|
|
||||||
debug_assert!(data.consensus_session.state() == ConsensusSessionState::Finished);
|
debug_assert!(data.consensus_session.state() == ConsensusSessionState::Finished);
|
||||||
data.result = Some(Ok(data.consensus_session.result()?));
|
let result = data.consensus_session.result()?;
|
||||||
self.core.completed.notify_all();
|
Self::set_signing_result(&self.core, &mut *data, Ok(result));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -258,12 +307,55 @@ impl SessionImpl {
|
|||||||
&SigningMessage::PartialSignature(ref message) =>
|
&SigningMessage::PartialSignature(ref message) =>
|
||||||
self.on_partial_signature(sender, message),
|
self.on_partial_signature(sender, message),
|
||||||
&SigningMessage::SigningSessionError(ref message) =>
|
&SigningMessage::SigningSessionError(ref message) =>
|
||||||
self.on_session_error(sender, message),
|
self.process_node_error(Some(&sender), Error::Io(message.error.clone())),
|
||||||
&SigningMessage::SigningSessionCompleted(ref message) =>
|
&SigningMessage::SigningSessionCompleted(ref message) =>
|
||||||
self.on_session_completed(sender, message),
|
self.on_session_completed(sender, message),
|
||||||
|
&SigningMessage::SigningSessionDelegation(ref message) =>
|
||||||
|
self.on_session_delegated(sender, message),
|
||||||
|
&SigningMessage::SigningSessionDelegationCompleted(ref message) =>
|
||||||
|
self.on_session_delegation_completed(sender, message),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// When session is delegated to this node.
|
||||||
|
pub fn on_session_delegated(&self, sender: &NodeId, message: &SigningSessionDelegation) -> Result<(), Error> {
|
||||||
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
|
debug_assert!(self.core.access_key == *message.sub_session);
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization || data.delegation_status.is_some() {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
data.consensus_session.consensus_job_mut().executor_mut().set_requester_signature(message.requestor_signature.clone().into());
|
||||||
|
data.delegation_status = Some(DelegationStatus::DelegatedFrom(sender.clone(), message.session_nonce));
|
||||||
|
}
|
||||||
|
|
||||||
|
self.initialize(message.version.clone().into(), message.message_hash.clone().into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When delegated session is completed on other node.
|
||||||
|
pub fn on_session_delegation_completed(&self, sender: &NodeId, message: &SigningSessionDelegationCompleted) -> Result<(), Error> {
|
||||||
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
|
debug_assert!(self.core.access_key == *message.sub_session);
|
||||||
|
|
||||||
|
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
match data.delegation_status.as_ref() {
|
||||||
|
Some(&DelegationStatus::DelegatedTo(ref node)) if node == sender => (),
|
||||||
|
_ => return Err(Error::InvalidMessage),
|
||||||
|
}
|
||||||
|
|
||||||
|
Self::set_signing_result(&self.core, &mut *data, Ok((message.signature_c.clone().into(), message.signature_s.clone().into())));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/// When consensus-related message is received.
|
/// When consensus-related message is received.
|
||||||
pub fn on_consensus_message(&self, sender: &NodeId, message: &SigningConsensusMessage) -> Result<(), Error> {
|
pub fn on_consensus_message(&self, sender: &NodeId, message: &SigningConsensusMessage) -> Result<(), Error> {
|
||||||
debug_assert!(self.core.meta.id == *message.session);
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
@ -272,6 +364,15 @@ impl SessionImpl {
|
|||||||
|
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
let is_establishing_consensus = data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus;
|
let is_establishing_consensus = data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus;
|
||||||
|
|
||||||
|
if let &ConsensusMessage::InitializeConsensusSession(ref msg) = &message.message {
|
||||||
|
let version = msg.version.clone().into();
|
||||||
|
let has_key_share = self.core.key_share.as_ref()
|
||||||
|
.map(|ks| ks.version(&version).is_ok())
|
||||||
|
.unwrap_or(false);
|
||||||
|
data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(has_key_share);
|
||||||
|
data.version = Some(version);
|
||||||
|
}
|
||||||
data.consensus_session.on_consensus_message(&sender, &message.message)?;
|
data.consensus_session.on_consensus_message(&sender, &message.message)?;
|
||||||
|
|
||||||
let is_consensus_established = data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished;
|
let is_consensus_established = data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished;
|
||||||
@ -283,6 +384,11 @@ impl SessionImpl {
|
|||||||
let mut other_consensus_group_nodes = consensus_group.clone();
|
let mut other_consensus_group_nodes = consensus_group.clone();
|
||||||
other_consensus_group_nodes.remove(&self.core.meta.self_node_id);
|
other_consensus_group_nodes.remove(&self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
let key_share = match self.core.key_share.as_ref() {
|
||||||
|
None => return Err(Error::InvalidMessage),
|
||||||
|
Some(key_share) => key_share,
|
||||||
|
};
|
||||||
|
|
||||||
let generation_session = GenerationSession::new(GenerationSessionParams {
|
let generation_session = GenerationSession::new(GenerationSessionParams {
|
||||||
id: self.core.meta.id.clone(),
|
id: self.core.meta.id.clone(),
|
||||||
self_node_id: self.core.meta.self_node_id.clone(),
|
self_node_id: self.core.meta.self_node_id.clone(),
|
||||||
@ -295,7 +401,7 @@ impl SessionImpl {
|
|||||||
}),
|
}),
|
||||||
nonce: None,
|
nonce: None,
|
||||||
});
|
});
|
||||||
generation_session.initialize(Public::default(), self.core.key_share.threshold, consensus_group)?;
|
generation_session.initialize(Public::default(), key_share.threshold, consensus_group)?;
|
||||||
data.generation_session = Some(generation_session);
|
data.generation_session = Some(generation_session);
|
||||||
data.state = SessionState::SessionKeyGeneration;
|
data.state = SessionState::SessionKeyGeneration;
|
||||||
|
|
||||||
@ -312,7 +418,10 @@ impl SessionImpl {
|
|||||||
|
|
||||||
if let &GenerationMessage::InitializeSession(ref message) = &message.message {
|
if let &GenerationMessage::InitializeSession(ref message) = &message.message {
|
||||||
if &self.core.meta.master_node_id != sender {
|
if &self.core.meta.master_node_id != sender {
|
||||||
return Err(Error::InvalidMessage);
|
match data.delegation_status.as_ref() {
|
||||||
|
Some(&DelegationStatus::DelegatedTo(s)) if s == *sender => (),
|
||||||
|
_ => return Err(Error::InvalidMessage),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let consensus_group: BTreeSet<NodeId> = message.nodes.keys().cloned().map(Into::into).collect();
|
let consensus_group: BTreeSet<NodeId> = message.nodes.keys().cloned().map(Into::into).collect();
|
||||||
@ -351,13 +460,14 @@ impl SessionImpl {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone();
|
||||||
let message_hash = data.message_hash
|
let message_hash = data.message_hash
|
||||||
.expect("we are on master node; on master node message_hash is filled in initialize(); on_generation_message follows initialize; qed");
|
.expect("we are on master node; on master node message_hash is filled in initialize(); on_generation_message follows initialize; qed");
|
||||||
let joint_public_and_secret = data.generation_session.as_ref()
|
let joint_public_and_secret = data.generation_session.as_ref()
|
||||||
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")
|
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")
|
||||||
.joint_public_and_secret()
|
.joint_public_and_secret()
|
||||||
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?;
|
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?;
|
||||||
self.core.disseminate_jobs(&mut data.consensus_session, joint_public_and_secret.0, joint_public_and_secret.1, message_hash)
|
self.core.disseminate_jobs(&mut data.consensus_session, &version, joint_public_and_secret.0, joint_public_and_secret.1, message_hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When partial signature is requested.
|
/// When partial signature is requested.
|
||||||
@ -366,6 +476,11 @@ impl SessionImpl {
|
|||||||
debug_assert!(self.core.access_key == *message.sub_session);
|
debug_assert!(self.core.access_key == *message.sub_session);
|
||||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
let key_share = match self.core.key_share.as_ref() {
|
||||||
|
None => return Err(Error::InvalidMessage),
|
||||||
|
Some(key_share) => key_share,
|
||||||
|
};
|
||||||
|
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
if sender != &self.core.meta.master_node_id {
|
if sender != &self.core.meta.master_node_id {
|
||||||
@ -379,7 +494,9 @@ impl SessionImpl {
|
|||||||
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")
|
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")
|
||||||
.joint_public_and_secret()
|
.joint_public_and_secret()
|
||||||
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?;
|
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?;
|
||||||
let signing_job = SigningJob::new_on_slave(self.core.meta.self_node_id.clone(), self.core.key_share.clone(), joint_public_and_secret.0, joint_public_and_secret.1)?;
|
let key_version = key_share.version(data.version.as_ref().ok_or(Error::InvalidMessage)?)
|
||||||
|
.map_err(|e| Error::KeyStorage(e.into()))?.hash.clone();
|
||||||
|
let signing_job = SigningJob::new_on_slave(self.core.meta.self_node_id.clone(), key_share.clone(), key_version, joint_public_and_secret.0, joint_public_and_secret.1)?;
|
||||||
let signing_transport = self.core.signing_transport();
|
let signing_transport = self.core.signing_transport();
|
||||||
|
|
||||||
data.consensus_session.on_job_request(sender, PartialSigningRequest {
|
data.consensus_session.on_job_request(sender, PartialSigningRequest {
|
||||||
@ -414,8 +531,8 @@ impl SessionImpl {
|
|||||||
})))?;
|
})))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
data.result = Some(Ok(data.consensus_session.result()?));
|
let result = data.consensus_session.result()?;
|
||||||
self.core.completed.notify_all();
|
Self::set_signing_result(&self.core, &mut *data, Ok(result));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -429,66 +546,130 @@ impl SessionImpl {
|
|||||||
self.data.lock().consensus_session.on_session_completed(sender)
|
self.data.lock().consensus_session.on_session_completed(sender)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When error has occured on another node.
|
|
||||||
pub fn on_session_error(&self, sender: &NodeId, message: &SigningSessionError) -> Result<(), Error> {
|
|
||||||
self.process_node_error(Some(&sender), &message.error)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process error from the other node.
|
/// Process error from the other node.
|
||||||
fn process_node_error(&self, node: Option<&NodeId>, error: &String) -> Result<(), Error> {
|
fn process_node_error(&self, node: Option<&NodeId>, error: Error) -> Result<(), Error> {
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
|
let is_self_node_error = node.map(|n| n == &self.core.meta.self_node_id).unwrap_or(false);
|
||||||
|
// error is always fatal if coming from this node
|
||||||
|
if is_self_node_error {
|
||||||
|
Self::set_signing_result(&self.core, &mut *data, Err(error.clone()));
|
||||||
|
return Err(error);
|
||||||
|
}
|
||||||
|
|
||||||
match {
|
match {
|
||||||
match node {
|
match node {
|
||||||
Some(node) => data.consensus_session.on_node_error(node),
|
Some(node) => data.consensus_session.on_node_error(node),
|
||||||
None => data.consensus_session.on_session_timeout(),
|
None => data.consensus_session.on_session_timeout(),
|
||||||
}
|
}
|
||||||
} {
|
} {
|
||||||
Ok(false) => Ok(()),
|
Ok(false) => {
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
|
let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone();
|
||||||
let message_hash = data.message_hash.as_ref().cloned()
|
let message_hash = data.message_hash.as_ref().cloned()
|
||||||
.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed");
|
.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed");
|
||||||
let joint_public_and_secret = data.generation_session.as_ref()
|
let joint_public_and_secret = data.generation_session.as_ref()
|
||||||
.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed")
|
.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed")
|
||||||
.joint_public_and_secret()
|
.joint_public_and_secret()
|
||||||
.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed")?;
|
.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed")?;
|
||||||
let disseminate_result = self.core.disseminate_jobs(&mut data.consensus_session, joint_public_and_secret.0, joint_public_and_secret.1, message_hash);
|
let disseminate_result = self.core.disseminate_jobs(&mut data.consensus_session, &version, joint_public_and_secret.0, joint_public_and_secret.1, message_hash);
|
||||||
match disseminate_result {
|
match disseminate_result {
|
||||||
Ok(()) => Ok(()),
|
Ok(()) => Ok(()),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!("{}: signing session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node);
|
warn!("{}: signing session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node);
|
||||||
|
Self::set_signing_result(&self.core, &mut *data, Err(err.clone()));
|
||||||
data.result = Some(Err(err.clone()));
|
|
||||||
self.core.completed.notify_all();
|
|
||||||
Err(err)
|
Err(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!("{}: signing session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node);
|
warn!("{}: signing session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node);
|
||||||
|
Self::set_signing_result(&self.core, &mut *data, Err(err.clone()));
|
||||||
data.result = Some(Err(err.clone()));
|
|
||||||
self.core.completed.notify_all();
|
|
||||||
Err(err)
|
Err(err)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set signing session result.
|
||||||
|
fn set_signing_result(core: &SessionCore, data: &mut SessionData, result: Result<(Secret, Secret), Error>) {
|
||||||
|
if let Some(DelegationStatus::DelegatedFrom(master, nonce)) = data.delegation_status.take() {
|
||||||
|
// error means can't communicate => ignore it
|
||||||
|
let _ = match result.as_ref() {
|
||||||
|
Ok(signature) => core.cluster.send(&master, Message::Signing(SigningMessage::SigningSessionDelegationCompleted(SigningSessionDelegationCompleted {
|
||||||
|
session: core.meta.id.clone().into(),
|
||||||
|
sub_session: core.access_key.clone().into(),
|
||||||
|
session_nonce: nonce,
|
||||||
|
signature_c: signature.0.clone().into(),
|
||||||
|
signature_s: signature.1.clone().into(),
|
||||||
|
}))),
|
||||||
|
Err(error) => core.cluster.send(&master, Message::Signing(SigningMessage::SigningSessionError(SigningSessionError {
|
||||||
|
session: core.meta.id.clone().into(),
|
||||||
|
sub_session: core.access_key.clone().into(),
|
||||||
|
session_nonce: nonce,
|
||||||
|
error: error.clone().into(),
|
||||||
|
}))),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
data.result = Some(result);
|
||||||
|
core.completed.notify_all();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ClusterSession for SessionImpl {
|
impl ClusterSession for SessionImpl {
|
||||||
|
type Id = SessionIdWithSubSession;
|
||||||
|
|
||||||
|
fn type_name() -> &'static str {
|
||||||
|
"signing"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn id(&self) -> SessionIdWithSubSession {
|
||||||
|
SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.access_key.clone())
|
||||||
|
}
|
||||||
|
|
||||||
fn is_finished(&self) -> bool {
|
fn is_finished(&self) -> bool {
|
||||||
let data = self.data.lock();
|
self.data.lock().result.is_some()
|
||||||
data.consensus_session.state() == ConsensusSessionState::Failed
|
|
||||||
|| data.consensus_session.state() == ConsensusSessionState::Finished
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_node_timeout(&self, node: &NodeId) {
|
fn on_node_timeout(&self, node: &NodeId) {
|
||||||
// ignore error, only state matters
|
// ignore error, only state matters
|
||||||
let _ = self.process_node_error(Some(node), &Error::NodeDisconnected.into());
|
let _ = self.process_node_error(Some(node), Error::NodeDisconnected);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_session_timeout(&self) {
|
fn on_session_timeout(&self) {
|
||||||
// ignore error, only state matters
|
// ignore error, only state matters
|
||||||
let _ = self.process_node_error(None, &Error::NodeDisconnected.into());
|
let _ = self.process_node_error(None, Error::NodeDisconnected);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_session_error(&self, node: &NodeId, error: Error) {
|
||||||
|
let is_fatal = self.process_node_error(Some(node), error.clone()).is_err();
|
||||||
|
let is_this_node_error = *node == self.core.meta.self_node_id;
|
||||||
|
if is_fatal || is_this_node_error {
|
||||||
|
// error in signing session is non-fatal, if occurs on slave node
|
||||||
|
// => either respond with error
|
||||||
|
// => or broadcast error
|
||||||
|
let message = Message::Signing(SigningMessage::SigningSessionError(SigningSessionError {
|
||||||
|
session: self.core.meta.id.clone().into(),
|
||||||
|
sub_session: self.core.access_key.clone().into(),
|
||||||
|
session_nonce: self.core.nonce,
|
||||||
|
error: error.clone().into(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
// do not bother processing send error, as we already processing error
|
||||||
|
let _ = if self.core.meta.master_node_id == self.core.meta.self_node_id {
|
||||||
|
self.core.cluster.broadcast(message)
|
||||||
|
} else {
|
||||||
|
self.core.cluster.send(&self.core.meta.master_node_id, message)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
||||||
|
match *message {
|
||||||
|
Message::Signing(ref message) => self.process_message(sender, message),
|
||||||
|
_ => unreachable!("cluster checks message to be correct before passing; qed"),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -552,8 +733,14 @@ impl SessionCore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn disseminate_jobs(&self, consensus_session: &mut SigningConsensusSession, session_public: Public, session_secret_share: Secret, message_hash: H256) -> Result<(), Error> {
|
pub fn disseminate_jobs(&self, consensus_session: &mut SigningConsensusSession, version: &H256, session_public: Public, session_secret_share: Secret, message_hash: H256) -> Result<(), Error> {
|
||||||
let signing_job = SigningJob::new_on_master(self.meta.self_node_id.clone(), self.key_share.clone(), session_public, session_secret_share, message_hash)?;
|
let key_share = match self.key_share.as_ref() {
|
||||||
|
None => return Err(Error::InvalidMessage),
|
||||||
|
Some(key_share) => key_share,
|
||||||
|
};
|
||||||
|
|
||||||
|
let key_version = key_share.version(version).map_err(|e| Error::KeyStorage(e.into()))?.hash.clone();
|
||||||
|
let signing_job = SigningJob::new_on_master(self.meta.self_node_id.clone(), key_share.clone(), key_version, session_public, session_secret_share, message_hash)?;
|
||||||
consensus_session.disseminate_jobs(signing_job, self.signing_transport())
|
consensus_session.disseminate_jobs(signing_job, self.signing_transport())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -563,12 +750,15 @@ impl JobTransport for SigningConsensusTransport {
|
|||||||
type PartialJobResponse=bool;
|
type PartialJobResponse=bool;
|
||||||
|
|
||||||
fn send_partial_request(&self, node: &NodeId, request: Signature) -> Result<(), Error> {
|
fn send_partial_request(&self, node: &NodeId, request: Signature) -> Result<(), Error> {
|
||||||
|
let version = self.version.as_ref()
|
||||||
|
.expect("send_partial_request is called on initialized master node only; version is filled in before initialization starts on master node; qed");
|
||||||
self.cluster.send(node, Message::Signing(SigningMessage::SigningConsensusMessage(SigningConsensusMessage {
|
self.cluster.send(node, Message::Signing(SigningMessage::SigningConsensusMessage(SigningConsensusMessage {
|
||||||
session: self.id.clone().into(),
|
session: self.id.clone().into(),
|
||||||
sub_session: self.access_key.clone().into(),
|
sub_session: self.access_key.clone().into(),
|
||||||
session_nonce: self.nonce,
|
session_nonce: self.nonce,
|
||||||
message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
||||||
requestor_signature: request.into(),
|
requestor_signature: request.into(),
|
||||||
|
version: version.clone().into(),
|
||||||
})
|
})
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
@ -619,7 +809,8 @@ mod tests {
|
|||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use ethkey::{self, Random, Generator, Public, Secret, KeyPair};
|
use ethkey::{self, Random, Generator, Public, Secret, KeyPair};
|
||||||
use acl_storage::DummyAclStorage;
|
use acl_storage::DummyAclStorage;
|
||||||
use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, SessionMeta, Error, KeyStorage};
|
use key_server_cluster::{NodeId, DummyKeyStorage, DocumentKeyShare, DocumentKeyShareVersion, SessionId, SessionMeta, Error, KeyStorage};
|
||||||
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||||
use key_server_cluster::cluster::tests::DummyCluster;
|
use key_server_cluster::cluster::tests::DummyCluster;
|
||||||
use key_server_cluster::generation_session::{Session as GenerationSession};
|
use key_server_cluster::generation_session::{Session as GenerationSession};
|
||||||
use key_server_cluster::generation_session::tests::MessageLoop as KeyGenerationMessageLoop;
|
use key_server_cluster::generation_session::tests::MessageLoop as KeyGenerationMessageLoop;
|
||||||
@ -631,6 +822,7 @@ mod tests {
|
|||||||
struct Node {
|
struct Node {
|
||||||
pub node_id: NodeId,
|
pub node_id: NodeId,
|
||||||
pub cluster: Arc<DummyCluster>,
|
pub cluster: Arc<DummyCluster>,
|
||||||
|
pub key_storage: Arc<DummyKeyStorage>,
|
||||||
pub session: SessionImpl,
|
pub session: SessionImpl,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -640,10 +832,12 @@ mod tests {
|
|||||||
pub nodes: BTreeMap<NodeId, Node>,
|
pub nodes: BTreeMap<NodeId, Node>,
|
||||||
pub queue: VecDeque<(NodeId, NodeId, Message)>,
|
pub queue: VecDeque<(NodeId, NodeId, Message)>,
|
||||||
pub acl_storages: Vec<Arc<DummyAclStorage>>,
|
pub acl_storages: Vec<Arc<DummyAclStorage>>,
|
||||||
|
pub version: H256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MessageLoop {
|
impl MessageLoop {
|
||||||
pub fn new(gl: &KeyGenerationMessageLoop) -> Self {
|
pub fn new(gl: &KeyGenerationMessageLoop) -> Self {
|
||||||
|
let version = gl.nodes.values().nth(0).unwrap().key_storage.get(&Default::default()).unwrap().unwrap().versions.iter().last().unwrap().hash;
|
||||||
let mut nodes = BTreeMap::new();
|
let mut nodes = BTreeMap::new();
|
||||||
let session_id = gl.session_id.clone();
|
let session_id = gl.session_id.clone();
|
||||||
let requester = Random.generate().unwrap();
|
let requester = Random.generate().unwrap();
|
||||||
@ -659,15 +853,15 @@ mod tests {
|
|||||||
id: session_id.clone(),
|
id: session_id.clone(),
|
||||||
self_node_id: gl_node_id.clone(),
|
self_node_id: gl_node_id.clone(),
|
||||||
master_node_id: master_node_id.clone(),
|
master_node_id: master_node_id.clone(),
|
||||||
threshold: gl_node.key_storage.get(&session_id).unwrap().threshold,
|
threshold: gl_node.key_storage.get(&session_id).unwrap().unwrap().threshold,
|
||||||
},
|
},
|
||||||
access_key: "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec".parse().unwrap(),
|
access_key: "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec".parse().unwrap(),
|
||||||
key_share: gl_node.key_storage.get(&session_id).unwrap(),
|
key_share: Some(gl_node.key_storage.get(&session_id).unwrap().unwrap()),
|
||||||
acl_storage: acl_storage,
|
acl_storage: acl_storage,
|
||||||
cluster: cluster.clone(),
|
cluster: cluster.clone(),
|
||||||
nonce: 0,
|
nonce: 0,
|
||||||
}, if i == 0 { signature.clone() } else { None }).unwrap();
|
}, if i == 0 { signature.clone() } else { None }).unwrap();
|
||||||
nodes.insert(gl_node_id.clone(), Node { node_id: gl_node_id.clone(), cluster: cluster, session: session });
|
nodes.insert(gl_node_id.clone(), Node { node_id: gl_node_id.clone(), cluster: cluster, key_storage: gl_node.key_storage.clone(), session: session });
|
||||||
}
|
}
|
||||||
|
|
||||||
let nodes_ids: Vec<_> = nodes.keys().cloned().collect();
|
let nodes_ids: Vec<_> = nodes.keys().cloned().collect();
|
||||||
@ -683,6 +877,7 @@ mod tests {
|
|||||||
nodes: nodes,
|
nodes: nodes,
|
||||||
queue: VecDeque::new(),
|
queue: VecDeque::new(),
|
||||||
acl_storages: acl_storages,
|
acl_storages: acl_storages,
|
||||||
|
version: version,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -700,16 +895,7 @@ mod tests {
|
|||||||
pub fn process_message(&mut self, mut msg: (NodeId, NodeId, Message)) -> Result<(), Error> {
|
pub fn process_message(&mut self, mut msg: (NodeId, NodeId, Message)) -> Result<(), Error> {
|
||||||
let mut is_queued_message = false;
|
let mut is_queued_message = false;
|
||||||
loop {
|
loop {
|
||||||
match {
|
match self.nodes[&msg.1].session.on_message(&msg.0, &msg.2) {
|
||||||
match msg.2 {
|
|
||||||
Message::Signing(SigningMessage::SigningConsensusMessage(ref message)) => self.nodes[&msg.1].session.on_consensus_message(&msg.0, &message),
|
|
||||||
Message::Signing(SigningMessage::SigningGenerationMessage(ref message)) => self.nodes[&msg.1].session.on_generation_message(&msg.0, &message),
|
|
||||||
Message::Signing(SigningMessage::RequestPartialSignature(ref message)) => self.nodes[&msg.1].session.on_partial_signature_requested(&msg.0, &message),
|
|
||||||
Message::Signing(SigningMessage::PartialSignature(ref message)) => self.nodes[&msg.1].session.on_partial_signature(&msg.0, &message),
|
|
||||||
Message::Signing(SigningMessage::SigningSessionCompleted(ref message)) => self.nodes[&msg.1].session.on_session_completed(&msg.0, &message),
|
|
||||||
_ => panic!("unexpected"),
|
|
||||||
}
|
|
||||||
} {
|
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
if let Some(message) = self.queue.pop_front() {
|
if let Some(message) = self.queue.pop_front() {
|
||||||
msg = message;
|
msg = message;
|
||||||
@ -765,7 +951,7 @@ mod tests {
|
|||||||
|
|
||||||
// run signing session
|
// run signing session
|
||||||
let message_hash = H256::from(777);
|
let message_hash = H256::from(777);
|
||||||
sl.master().initialize(message_hash).unwrap();
|
sl.master().initialize(sl.version.clone(), message_hash).unwrap();
|
||||||
while let Some((from, to, message)) = sl.take_message() {
|
while let Some((from, to, message)) = sl.take_message() {
|
||||||
sl.process_message((from, to, message)).unwrap();
|
sl.process_message((from, to, message)).unwrap();
|
||||||
}
|
}
|
||||||
@ -790,15 +976,17 @@ mod tests {
|
|||||||
threshold: 0,
|
threshold: 0,
|
||||||
},
|
},
|
||||||
access_key: Random.generate().unwrap().secret().clone(),
|
access_key: Random.generate().unwrap().secret().clone(),
|
||||||
key_share: DocumentKeyShare {
|
key_share: Some(DocumentKeyShare {
|
||||||
author: Public::default(),
|
author: Public::default(),
|
||||||
threshold: 0,
|
threshold: 0,
|
||||||
id_numbers: nodes,
|
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
|
||||||
polynom1: Vec::new(),
|
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
},
|
versions: vec![DocumentKeyShareVersion {
|
||||||
|
hash: Default::default(),
|
||||||
|
id_numbers: nodes,
|
||||||
|
secret_share: Random.generate().unwrap().secret().clone(),
|
||||||
|
}],
|
||||||
|
}),
|
||||||
acl_storage: Arc::new(DummyAclStorage::default()),
|
acl_storage: Arc::new(DummyAclStorage::default()),
|
||||||
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
||||||
nonce: 0,
|
nonce: 0,
|
||||||
@ -809,12 +997,9 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn fails_to_construct_if_not_a_part_of_cluster() {
|
fn fails_to_initialize_if_does_not_have_a_share() {
|
||||||
let mut nodes = BTreeMap::new();
|
|
||||||
let self_node_id = Random.generate().unwrap().public().clone();
|
let self_node_id = Random.generate().unwrap().public().clone();
|
||||||
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
|
let session = SessionImpl::new(SessionParams {
|
||||||
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
|
|
||||||
match SessionImpl::new(SessionParams {
|
|
||||||
meta: SessionMeta {
|
meta: SessionMeta {
|
||||||
id: SessionId::default(),
|
id: SessionId::default(),
|
||||||
self_node_id: self_node_id.clone(),
|
self_node_id: self_node_id.clone(),
|
||||||
@ -822,31 +1007,21 @@ mod tests {
|
|||||||
threshold: 0,
|
threshold: 0,
|
||||||
},
|
},
|
||||||
access_key: Random.generate().unwrap().secret().clone(),
|
access_key: Random.generate().unwrap().secret().clone(),
|
||||||
key_share: DocumentKeyShare {
|
key_share: None,
|
||||||
author: Public::default(),
|
|
||||||
threshold: 0,
|
|
||||||
id_numbers: nodes,
|
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
|
||||||
polynom1: Vec::new(),
|
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
|
||||||
},
|
|
||||||
acl_storage: Arc::new(DummyAclStorage::default()),
|
acl_storage: Arc::new(DummyAclStorage::default()),
|
||||||
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
||||||
nonce: 0,
|
nonce: 0,
|
||||||
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) {
|
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())).unwrap();
|
||||||
Err(Error::InvalidNodesConfiguration) => (),
|
assert_eq!(session.initialize(Default::default(), Default::default()), Err(Error::InvalidMessage));
|
||||||
_ => panic!("unexpected"),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn fails_to_construct_if_threshold_is_wrong() {
|
fn fails_to_initialize_if_threshold_is_wrong() {
|
||||||
let mut nodes = BTreeMap::new();
|
let mut nodes = BTreeMap::new();
|
||||||
let self_node_id = Random.generate().unwrap().public().clone();
|
let self_node_id = Random.generate().unwrap().public().clone();
|
||||||
nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone());
|
nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone());
|
||||||
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
|
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
|
||||||
match SessionImpl::new(SessionParams {
|
let session = SessionImpl::new(SessionParams {
|
||||||
meta: SessionMeta {
|
meta: SessionMeta {
|
||||||
id: SessionId::default(),
|
id: SessionId::default(),
|
||||||
self_node_id: self_node_id.clone(),
|
self_node_id: self_node_id.clone(),
|
||||||
@ -854,35 +1029,35 @@ mod tests {
|
|||||||
threshold: 2,
|
threshold: 2,
|
||||||
},
|
},
|
||||||
access_key: Random.generate().unwrap().secret().clone(),
|
access_key: Random.generate().unwrap().secret().clone(),
|
||||||
key_share: DocumentKeyShare {
|
key_share: Some(DocumentKeyShare {
|
||||||
author: Public::default(),
|
author: Public::default(),
|
||||||
threshold: 2,
|
threshold: 2,
|
||||||
id_numbers: nodes,
|
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
|
||||||
polynom1: Vec::new(),
|
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
},
|
versions: vec![DocumentKeyShareVersion {
|
||||||
|
hash: Default::default(),
|
||||||
|
id_numbers: nodes,
|
||||||
|
secret_share: Random.generate().unwrap().secret().clone(),
|
||||||
|
}],
|
||||||
|
}),
|
||||||
acl_storage: Arc::new(DummyAclStorage::default()),
|
acl_storage: Arc::new(DummyAclStorage::default()),
|
||||||
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
||||||
nonce: 0,
|
nonce: 0,
|
||||||
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) {
|
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())).unwrap();
|
||||||
Err(Error::InvalidThreshold) => (),
|
assert_eq!(session.initialize(Default::default(), Default::default()), Err(Error::ConsensusUnreachable));
|
||||||
_ => panic!("unexpected"),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn fails_to_initialize_when_already_initialized() {
|
fn fails_to_initialize_when_already_initialized() {
|
||||||
let (_, sl) = prepare_signing_sessions(1, 3);
|
let (_, sl) = prepare_signing_sessions(1, 3);
|
||||||
assert_eq!(sl.master().initialize(777.into()), Ok(()));
|
assert_eq!(sl.master().initialize(sl.version.clone(), 777.into()), Ok(()));
|
||||||
assert_eq!(sl.master().initialize(777.into()), Err(Error::InvalidStateForRequest));
|
assert_eq!(sl.master().initialize(sl.version.clone(), 777.into()), Err(Error::InvalidStateForRequest));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn does_not_fail_when_consensus_message_received_after_consensus_established() {
|
fn does_not_fail_when_consensus_message_received_after_consensus_established() {
|
||||||
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
||||||
sl.master().initialize(777.into()).unwrap();
|
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
|
||||||
// consensus is established
|
// consensus is established
|
||||||
sl.run_until(|sl| sl.master().state() == SessionState::SessionKeyGeneration).unwrap();
|
sl.run_until(|sl| sl.master().state() == SessionState::SessionKeyGeneration).unwrap();
|
||||||
// but 3rd node continues to send its messages
|
// but 3rd node continues to send its messages
|
||||||
@ -929,7 +1104,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn fails_when_generation_sesson_is_initialized_by_slave_node() {
|
fn fails_when_generation_sesson_is_initialized_by_slave_node() {
|
||||||
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
||||||
sl.master().initialize(777.into()).unwrap();
|
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
|
||||||
sl.run_until(|sl| sl.master().state() == SessionState::SessionKeyGeneration).unwrap();
|
sl.run_until(|sl| sl.master().state() == SessionState::SessionKeyGeneration).unwrap();
|
||||||
|
|
||||||
let slave2_id = sl.nodes.keys().nth(2).unwrap().clone();
|
let slave2_id = sl.nodes.keys().nth(2).unwrap().clone();
|
||||||
@ -980,7 +1155,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn failed_signing_session() {
|
fn failed_signing_session() {
|
||||||
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
||||||
sl.master().initialize(777.into()).unwrap();
|
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
|
||||||
|
|
||||||
// we need at least 2-of-3 nodes to agree to reach consensus
|
// we need at least 2-of-3 nodes to agree to reach consensus
|
||||||
// let's say 2 of 3 nodes disagee
|
// let's say 2 of 3 nodes disagee
|
||||||
@ -994,7 +1169,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn complete_signing_session_with_single_node_failing() {
|
fn complete_signing_session_with_single_node_failing() {
|
||||||
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
||||||
sl.master().initialize(777.into()).unwrap();
|
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
|
||||||
|
|
||||||
// we need at least 2-of-3 nodes to agree to reach consensus
|
// we need at least 2-of-3 nodes to agree to reach consensus
|
||||||
// let's say 1 of 3 nodes disagee
|
// let's say 1 of 3 nodes disagee
|
||||||
@ -1015,7 +1190,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn complete_signing_session_with_acl_check_failed_on_master() {
|
fn complete_signing_session_with_acl_check_failed_on_master() {
|
||||||
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
||||||
sl.master().initialize(777.into()).unwrap();
|
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
|
||||||
|
|
||||||
// we need at least 2-of-3 nodes to agree to reach consensus
|
// we need at least 2-of-3 nodes to agree to reach consensus
|
||||||
// let's say 1 of 3 nodes disagee
|
// let's say 1 of 3 nodes disagee
|
||||||
@ -1047,4 +1222,55 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
})), Err(Error::ReplayProtection));
|
})), Err(Error::ReplayProtection));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn signing_works_when_delegated_to_other_node() {
|
||||||
|
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
||||||
|
|
||||||
|
// let's say node1 doesn't have a share && delegates decryption request to node0
|
||||||
|
// initially session is created on node1 => node1 is master for itself, but for other nodes node0 is still master
|
||||||
|
let actual_master = sl.nodes.keys().nth(0).cloned().unwrap();
|
||||||
|
let requested_node = sl.nodes.keys().skip(1).nth(0).cloned().unwrap();
|
||||||
|
let version = sl.nodes[&actual_master].key_storage.get(&Default::default()).unwrap().unwrap().last_version().unwrap().hash.clone();
|
||||||
|
sl.nodes[&requested_node].key_storage.remove(&Default::default()).unwrap();
|
||||||
|
sl.nodes.get_mut(&requested_node).unwrap().session.core.key_share = None;
|
||||||
|
sl.nodes.get_mut(&requested_node).unwrap().session.core.meta.master_node_id = sl.nodes[&requested_node].session.core.meta.self_node_id.clone();
|
||||||
|
sl.nodes[&requested_node].session.data.lock().consensus_session.consensus_job_mut().executor_mut().set_requester_signature(
|
||||||
|
sl.nodes[&actual_master].session.data.lock().consensus_session.consensus_job().executor().requester_signature().unwrap().clone()
|
||||||
|
);
|
||||||
|
|
||||||
|
// now let's try to do a decryption
|
||||||
|
sl.nodes[&requested_node].session.delegate(actual_master, version, Default::default()).unwrap();
|
||||||
|
|
||||||
|
// then consensus reachable, but single node will disagree
|
||||||
|
while let Some((from, to, message)) = sl.take_message() {
|
||||||
|
sl.process_message((from, to, message)).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn signing_works_when_share_owners_are_isolated() {
|
||||||
|
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
||||||
|
|
||||||
|
// we need 2 out of 3 nodes to agree to do a decryption
|
||||||
|
// let's say that 1 of these nodes (master) is isolated
|
||||||
|
let isolated_node_id = sl.nodes.keys().skip(2).nth(0).cloned().unwrap();
|
||||||
|
for node in sl.nodes.values() {
|
||||||
|
node.cluster.remove_node(&isolated_node_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// now let's try to do a signing
|
||||||
|
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
|
||||||
|
|
||||||
|
// then consensus reachable, but single node will disagree
|
||||||
|
while let Some((from, to, message)) = sl.take_message() {
|
||||||
|
sl.process_message((from, to, message)).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let data = sl.master().data.lock();
|
||||||
|
match data.result {
|
||||||
|
Some(Ok(_)) => (),
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -16,31 +16,28 @@
|
|||||||
|
|
||||||
use std::time;
|
use std::time;
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::collections::{VecDeque, BTreeSet, BTreeMap};
|
use std::collections::{VecDeque, BTreeMap};
|
||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock};
|
||||||
|
use bigint::hash::H256;
|
||||||
use ethkey::{Public, Secret, Signature};
|
use ethkey::{Public, Secret, Signature};
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, DocumentKeyShare, EncryptedDocumentKeyShadow, SessionMeta};
|
use key_server_cluster::{Error, NodeId, SessionId, EncryptedDocumentKeyShadow};
|
||||||
use key_server_cluster::cluster::{Cluster, ClusterData, ClusterConfiguration};
|
use key_server_cluster::cluster::{Cluster, ClusterData, ClusterConfiguration, ClusterView};
|
||||||
use key_server_cluster::message::{self, Message, GenerationMessage, EncryptionMessage, DecryptionMessage, SigningMessage,
|
use key_server_cluster::message::{self, Message};
|
||||||
ShareAddMessage, ShareMoveMessage, ShareRemoveMessage, ServersSetChangeMessage};
|
|
||||||
use key_server_cluster::generation_session::{Session as GenerationSession, SessionImpl as GenerationSessionImpl,
|
use key_server_cluster::generation_session::{Session as GenerationSession, SessionImpl as GenerationSessionImpl,
|
||||||
SessionParams as GenerationSessionParams, SessionState as GenerationSessionState};
|
SessionState as GenerationSessionState};
|
||||||
use key_server_cluster::decryption_session::{Session as DecryptionSession, SessionImpl as DecryptionSessionImpl,
|
use key_server_cluster::decryption_session::{Session as DecryptionSession, SessionImpl as DecryptionSessionImpl};
|
||||||
DecryptionSessionId, SessionParams as DecryptionSessionParams};
|
|
||||||
use key_server_cluster::encryption_session::{Session as EncryptionSession, SessionImpl as EncryptionSessionImpl,
|
use key_server_cluster::encryption_session::{Session as EncryptionSession, SessionImpl as EncryptionSessionImpl,
|
||||||
SessionParams as EncryptionSessionParams, SessionState as EncryptionSessionState};
|
SessionState as EncryptionSessionState};
|
||||||
use key_server_cluster::signing_session::{Session as SigningSession, SessionImpl as SigningSessionImpl,
|
use key_server_cluster::signing_session::{Session as SigningSession, SessionImpl as SigningSessionImpl};
|
||||||
SigningSessionId, SessionParams as SigningSessionParams};
|
|
||||||
use key_server_cluster::share_add_session::{Session as ShareAddSession, SessionImpl as ShareAddSessionImpl,
|
use key_server_cluster::share_add_session::{Session as ShareAddSession, SessionImpl as ShareAddSessionImpl,
|
||||||
SessionParams as ShareAddSessionParams, IsolatedSessionTransport as ShareAddTransport};
|
IsolatedSessionTransport as ShareAddTransport};
|
||||||
use key_server_cluster::share_move_session::{Session as ShareMoveSession, SessionImpl as ShareMoveSessionImpl,
|
use key_server_cluster::servers_set_change_session::{Session as ServersSetChangeSession, SessionImpl as ServersSetChangeSessionImpl};
|
||||||
SessionParams as ShareMoveSessionParams, IsolatedSessionTransport as ShareMoveTransport};
|
use key_server_cluster::key_version_negotiation_session::{Session as KeyVersionNegotiationSession, SessionImpl as KeyVersionNegotiationSessionImpl,
|
||||||
use key_server_cluster::share_remove_session::{Session as ShareRemoveSession, SessionImpl as ShareRemoveSessionImpl,
|
IsolatedSessionTransport as VersionNegotiationTransport, ContinueAction};
|
||||||
SessionParams as ShareRemoveSessionParams, IsolatedSessionTransport as ShareRemoveTransport};
|
|
||||||
use key_server_cluster::servers_set_change_session::{Session as ServersSetChangeSession, SessionImpl as ServersSetChangeSessionImpl,
|
use key_server_cluster::cluster_sessions_creator::{GenerationSessionCreator, EncryptionSessionCreator, DecryptionSessionCreator, SigningSessionCreator,
|
||||||
SessionParams as ServersSetChangeSessionParams};
|
KeyVersionNegotiationSessionCreator, AdminSessionCreator, SessionCreatorCore, ClusterSessionCreator};
|
||||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
|
||||||
|
|
||||||
/// When there are no session-related messages for SESSION_TIMEOUT_INTERVAL seconds,
|
/// When there are no session-related messages for SESSION_TIMEOUT_INTERVAL seconds,
|
||||||
/// we must treat this session as stalled && finish it with an error.
|
/// we must treat this session as stalled && finish it with an error.
|
||||||
@ -52,81 +49,91 @@ const SESSION_KEEP_ALIVE_INTERVAL: u64 = 30;
|
|||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
/// Servers set change session id (there could be at most 1 session => hardcoded id).
|
/// Servers set change session id (there could be at most 1 session => hardcoded id).
|
||||||
static ref SERVERS_SET_CHANGE_SESSION_ID: SessionId = "10b7af423bb551d5dc8645db754163a2145d37d78d468fa7330435ed77064c1c"
|
pub static ref SERVERS_SET_CHANGE_SESSION_ID: SessionId = "10b7af423bb551d5dc8645db754163a2145d37d78d468fa7330435ed77064c1c"
|
||||||
.parse()
|
.parse()
|
||||||
.expect("hardcoded id should parse without errors; qed");
|
.expect("hardcoded id should parse without errors; qed");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Session id with sub session.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub struct SessionIdWithSubSession {
|
||||||
|
/// Key id.
|
||||||
|
pub id: SessionId,
|
||||||
|
/// Sub session id.
|
||||||
|
pub access_key: Secret,
|
||||||
|
}
|
||||||
|
|
||||||
/// Generic cluster session.
|
/// Generic cluster session.
|
||||||
pub trait ClusterSession {
|
pub trait ClusterSession {
|
||||||
|
/// Session identifier type.
|
||||||
|
type Id: Ord + Clone;
|
||||||
|
|
||||||
|
/// Session type name.
|
||||||
|
fn type_name() -> &'static str;
|
||||||
|
/// Get session id.
|
||||||
|
fn id(&self) -> Self::Id;
|
||||||
/// If session is finished (either with succcess or not).
|
/// If session is finished (either with succcess or not).
|
||||||
fn is_finished(&self) -> bool;
|
fn is_finished(&self) -> bool;
|
||||||
/// When it takes too much time to complete session.
|
/// When it takes too much time to complete session.
|
||||||
fn on_session_timeout(&self);
|
fn on_session_timeout(&self);
|
||||||
/// When it takes too much time to receive response from the node.
|
/// When it takes too much time to receive response from the node.
|
||||||
fn on_node_timeout(&self, node_id: &NodeId);
|
fn on_node_timeout(&self, node_id: &NodeId);
|
||||||
|
/// Process error that has occured during session + propagate this error to required nodes.
|
||||||
|
fn on_session_error(&self, sender: &NodeId, error: Error);
|
||||||
|
/// Process session message.
|
||||||
|
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Administrative session.
|
/// Administrative session.
|
||||||
pub enum AdminSession {
|
pub enum AdminSession {
|
||||||
/// Share add session.
|
/// Share add session.
|
||||||
ShareAdd(ShareAddSessionImpl<ShareAddTransport>),
|
ShareAdd(ShareAddSessionImpl<ShareAddTransport>),
|
||||||
/// Share move session.
|
|
||||||
ShareMove(ShareMoveSessionImpl<ShareMoveTransport>),
|
|
||||||
/// Share remove session.
|
|
||||||
ShareRemove(ShareRemoveSessionImpl<ShareRemoveTransport>),
|
|
||||||
/// Servers set change session.
|
/// Servers set change session.
|
||||||
ServersSetChange(ServersSetChangeSessionImpl),
|
ServersSetChange(ServersSetChangeSessionImpl),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Administrative session creation data.
|
||||||
|
pub enum AdminSessionCreationData {
|
||||||
|
/// Share add session.
|
||||||
|
ShareAdd(H256),
|
||||||
|
/// Servers set change session.
|
||||||
|
ServersSetChange,
|
||||||
|
}
|
||||||
|
|
||||||
/// Active sessions on this cluster.
|
/// Active sessions on this cluster.
|
||||||
pub struct ClusterSessions {
|
pub struct ClusterSessions {
|
||||||
/// Key generation sessions.
|
/// Key generation sessions.
|
||||||
pub generation_sessions: ClusterSessionsContainer<SessionId, GenerationSessionImpl, GenerationMessage>,
|
pub generation_sessions: ClusterSessionsContainer<GenerationSessionImpl, GenerationSessionCreator, ()>,
|
||||||
/// Encryption sessions.
|
/// Encryption sessions.
|
||||||
pub encryption_sessions: ClusterSessionsContainer<SessionId, EncryptionSessionImpl, EncryptionMessage>,
|
pub encryption_sessions: ClusterSessionsContainer<EncryptionSessionImpl, EncryptionSessionCreator, ()>,
|
||||||
/// Decryption sessions.
|
/// Decryption sessions.
|
||||||
pub decryption_sessions: ClusterSessionsContainer<DecryptionSessionId, DecryptionSessionImpl, DecryptionMessage>,
|
pub decryption_sessions: ClusterSessionsContainer<DecryptionSessionImpl, DecryptionSessionCreator, Signature>,
|
||||||
/// Signing sessions.
|
/// Signing sessions.
|
||||||
pub signing_sessions: ClusterSessionsContainer<SigningSessionId, SigningSessionImpl, SigningMessage>,
|
pub signing_sessions: ClusterSessionsContainer<SigningSessionImpl, SigningSessionCreator, Signature>,
|
||||||
|
/// Key version negotiation sessions.
|
||||||
|
pub negotiation_sessions: ClusterSessionsContainer<KeyVersionNegotiationSessionImpl<VersionNegotiationTransport>, KeyVersionNegotiationSessionCreator, ()>,
|
||||||
/// Administrative sessions.
|
/// Administrative sessions.
|
||||||
pub admin_sessions: ClusterSessionsContainer<SessionId, AdminSession, Message>,
|
pub admin_sessions: ClusterSessionsContainer<AdminSession, AdminSessionCreator, AdminSessionCreationData>,
|
||||||
/// Self node id.
|
/// Self node id.
|
||||||
self_node_id: NodeId,
|
self_node_id: NodeId,
|
||||||
/// All nodes ids.
|
/// Creator core.
|
||||||
nodes: BTreeSet<NodeId>,
|
creator_core: Arc<SessionCreatorCore>,
|
||||||
/// Reference to key storage
|
|
||||||
key_storage: Arc<KeyStorage>,
|
|
||||||
/// Reference to ACL storage
|
|
||||||
acl_storage: Arc<AclStorage>,
|
|
||||||
/// Administrator public.
|
|
||||||
admin_public: Option<Public>,
|
|
||||||
/// Make faulty generation sessions.
|
|
||||||
make_faulty_generation_sessions: AtomicBool,
|
|
||||||
/// Always-increasing sessions counter. Is used as session nonce to prevent replay attacks:
|
|
||||||
/// 1) during handshake, KeyServers generate new random key to encrypt messages
|
|
||||||
/// => there's no way to use messages from previous connections for replay attacks
|
|
||||||
/// 2) when session (of any type) is started, master node increases its own session counter and broadcasts it
|
|
||||||
/// 3) when slave KeyServer receives session initialization message, it checks that new nonce is larger than previous (from the same master)
|
|
||||||
/// => there's no way to use messages from previous sessions for replay attacks
|
|
||||||
/// 4) KeyServer checks that each session message contains the same nonce that initialization message
|
|
||||||
/// Given that: (A) handshake is secure and (B) session itself is initially replay-protected
|
|
||||||
/// => this guarantees that sessions are replay-protected.
|
|
||||||
session_counter: AtomicUsize,
|
|
||||||
/// Maximal session nonce, received from given connection.
|
|
||||||
max_nonce: RwLock<BTreeMap<NodeId, u64>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Active sessions container.
|
/// Active sessions container.
|
||||||
pub struct ClusterSessionsContainer<K, V, M> {
|
pub struct ClusterSessionsContainer<S: ClusterSession, SC: ClusterSessionCreator<S, D>, D> {
|
||||||
|
/// Sessions creator.
|
||||||
|
pub creator: SC,
|
||||||
/// Active sessions.
|
/// Active sessions.
|
||||||
pub sessions: RwLock<BTreeMap<K, QueuedSession<V, M>>>,
|
sessions: RwLock<BTreeMap<S::Id, QueuedSession<S>>>,
|
||||||
/// Sessions container state.
|
/// Sessions container state.
|
||||||
container_state: Arc<Mutex<ClusterSessionsContainerState>>
|
container_state: Arc<Mutex<ClusterSessionsContainerState>>,
|
||||||
|
/// Phantom data.
|
||||||
|
_pd: ::std::marker::PhantomData<D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Session and its message queue.
|
/// Session and its message queue.
|
||||||
pub struct QueuedSession<V, M> {
|
pub struct QueuedSession<S> {
|
||||||
/// Session master.
|
/// Session master.
|
||||||
pub master: NodeId,
|
pub master: NodeId,
|
||||||
/// Cluster view.
|
/// Cluster view.
|
||||||
@ -136,9 +143,9 @@ pub struct QueuedSession<V, M> {
|
|||||||
/// Last received message time.
|
/// Last received message time.
|
||||||
pub last_message_time: time::Instant,
|
pub last_message_time: time::Instant,
|
||||||
/// Generation session.
|
/// Generation session.
|
||||||
pub session: Arc<V>,
|
pub session: Arc<S>,
|
||||||
/// Messages queue.
|
/// Messages queue.
|
||||||
pub queue: VecDeque<(NodeId, M)>,
|
pub queue: VecDeque<(NodeId, Message)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cluster sessions container state.
|
/// Cluster sessions container state.
|
||||||
@ -177,7 +184,7 @@ pub struct DecryptionSessionWrapper {
|
|||||||
/// Wrapped session.
|
/// Wrapped session.
|
||||||
session: Arc<DecryptionSession>,
|
session: Arc<DecryptionSession>,
|
||||||
/// Session Id.
|
/// Session Id.
|
||||||
session_id: DecryptionSessionId,
|
session_id: SessionIdWithSubSession,
|
||||||
/// Cluster data reference.
|
/// Cluster data reference.
|
||||||
cluster: Weak<ClusterData>,
|
cluster: Weak<ClusterData>,
|
||||||
}
|
}
|
||||||
@ -187,7 +194,7 @@ pub struct SigningSessionWrapper {
|
|||||||
/// Wrapped session.
|
/// Wrapped session.
|
||||||
session: Arc<SigningSession>,
|
session: Arc<SigningSession>,
|
||||||
/// Session Id.
|
/// Session Id.
|
||||||
session_id: SigningSessionId,
|
session_id: SessionIdWithSubSession,
|
||||||
/// Cluster data reference.
|
/// Cluster data reference.
|
||||||
cluster: Weak<ClusterData>,
|
cluster: Weak<ClusterData>,
|
||||||
}
|
}
|
||||||
@ -202,30 +209,50 @@ pub struct AdminSessionWrapper {
|
|||||||
cluster: Weak<ClusterData>,
|
cluster: Weak<ClusterData>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Key server version negotiation session implementation, which removes session from cluster on drop.
|
||||||
|
pub struct KeyNegotiationSessionWrapper {
|
||||||
|
/// Wrapped session.
|
||||||
|
session: Arc<KeyVersionNegotiationSession>,
|
||||||
|
/// Session Id.
|
||||||
|
session_id: SessionIdWithSubSession,
|
||||||
|
/// Cluster data reference.
|
||||||
|
cluster: Weak<ClusterData>,
|
||||||
|
}
|
||||||
|
|
||||||
impl ClusterSessions {
|
impl ClusterSessions {
|
||||||
/// Create new cluster sessions container.
|
/// Create new cluster sessions container.
|
||||||
pub fn new(config: &ClusterConfiguration) -> Self {
|
pub fn new(config: &ClusterConfiguration) -> Self {
|
||||||
let container_state = Arc::new(Mutex::new(ClusterSessionsContainerState::Idle));
|
let container_state = Arc::new(Mutex::new(ClusterSessionsContainerState::Idle));
|
||||||
|
let creator_core = Arc::new(SessionCreatorCore::new(config));
|
||||||
ClusterSessions {
|
ClusterSessions {
|
||||||
self_node_id: config.self_key_pair.public().clone(),
|
self_node_id: config.self_key_pair.public().clone(),
|
||||||
nodes: config.key_server_set.get().keys().cloned().collect(),
|
generation_sessions: ClusterSessionsContainer::new(GenerationSessionCreator {
|
||||||
acl_storage: config.acl_storage.clone(),
|
core: creator_core.clone(),
|
||||||
key_storage: config.key_storage.clone(),
|
make_faulty_generation_sessions: AtomicBool::new(false),
|
||||||
admin_public: config.admin_public.clone(),
|
}, container_state.clone()),
|
||||||
generation_sessions: ClusterSessionsContainer::new(container_state.clone()),
|
encryption_sessions: ClusterSessionsContainer::new(EncryptionSessionCreator {
|
||||||
encryption_sessions: ClusterSessionsContainer::new(container_state.clone()),
|
core: creator_core.clone(),
|
||||||
decryption_sessions: ClusterSessionsContainer::new(container_state.clone()),
|
}, container_state.clone()),
|
||||||
signing_sessions: ClusterSessionsContainer::new(container_state.clone()),
|
decryption_sessions: ClusterSessionsContainer::new(DecryptionSessionCreator {
|
||||||
admin_sessions: ClusterSessionsContainer::new(container_state),
|
core: creator_core.clone(),
|
||||||
make_faulty_generation_sessions: AtomicBool::new(false),
|
}, container_state.clone()),
|
||||||
session_counter: AtomicUsize::new(0),
|
signing_sessions: ClusterSessionsContainer::new(SigningSessionCreator {
|
||||||
max_nonce: RwLock::new(BTreeMap::new()),
|
core: creator_core.clone(),
|
||||||
|
}, container_state.clone()),
|
||||||
|
negotiation_sessions: ClusterSessionsContainer::new(KeyVersionNegotiationSessionCreator {
|
||||||
|
core: creator_core.clone(),
|
||||||
|
}, container_state.clone()),
|
||||||
|
admin_sessions: ClusterSessionsContainer::new(AdminSessionCreator {
|
||||||
|
core: creator_core.clone(),
|
||||||
|
admin_public: config.admin_public.clone(),
|
||||||
|
}, container_state),
|
||||||
|
creator_core: creator_core,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub fn make_faulty_generation_sessions(&self) {
|
pub fn make_faulty_generation_sessions(&self) {
|
||||||
self.make_faulty_generation_sessions.store(true, Ordering::Relaxed);
|
self.generation_sessions.creator.make_faulty_generation_sessions();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send session-level keep-alive messages.
|
/// Send session-level keep-alive messages.
|
||||||
@ -240,296 +267,13 @@ impl ClusterSessions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create new generation session.
|
|
||||||
pub fn new_generation_session(&self, master: NodeId, session_id: SessionId, nonce: Option<u64>, cluster: Arc<Cluster>) -> Result<Arc<GenerationSessionImpl>, Error> {
|
|
||||||
// check that there's no finished encryption session with the same id
|
|
||||||
if self.key_storage.contains(&session_id) {
|
|
||||||
return Err(Error::DuplicateSessionId);
|
|
||||||
}
|
|
||||||
|
|
||||||
// communicating to all other nodes is crucial for encryption session
|
|
||||||
// => check that we have connections to all cluster nodes
|
|
||||||
if self.nodes.iter().any(|n| !cluster.is_connected(n)) {
|
|
||||||
return Err(Error::NodeDisconnected);
|
|
||||||
}
|
|
||||||
|
|
||||||
// check that there's no active encryption session with the same id
|
|
||||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
|
||||||
self.generation_sessions.insert(master, session_id, cluster.clone(), false, move ||
|
|
||||||
Ok(GenerationSessionImpl::new(GenerationSessionParams {
|
|
||||||
id: session_id.clone(),
|
|
||||||
self_node_id: self.self_node_id.clone(),
|
|
||||||
key_storage: Some(self.key_storage.clone()),
|
|
||||||
cluster: cluster,
|
|
||||||
nonce: Some(nonce),
|
|
||||||
})))
|
|
||||||
.map(|session| {
|
|
||||||
if self.make_faulty_generation_sessions.load(Ordering::Relaxed) {
|
|
||||||
session.simulate_faulty_behaviour();
|
|
||||||
}
|
|
||||||
session
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send generation session error.
|
|
||||||
pub fn respond_with_generation_error(&self, session_id: &SessionId, error: message::SessionError) {
|
|
||||||
self.generation_sessions.sessions.read().get(session_id)
|
|
||||||
.map(|s| {
|
|
||||||
// error in generation session is considered fatal
|
|
||||||
// => broadcast error
|
|
||||||
|
|
||||||
// do not bother processing send error, as we already processing error
|
|
||||||
let _ = s.cluster_view.broadcast(Message::Generation(GenerationMessage::SessionError(error)));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create new encryption session.
|
|
||||||
pub fn new_encryption_session(&self, master: NodeId, session_id: SessionId, nonce: Option<u64>, cluster: Arc<Cluster>) -> Result<Arc<EncryptionSessionImpl>, Error> {
|
|
||||||
let encrypted_data = self.read_key_share(&session_id, &cluster)?;
|
|
||||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
|
||||||
|
|
||||||
self.encryption_sessions.insert(master, session_id, cluster.clone(), false, move || EncryptionSessionImpl::new(EncryptionSessionParams {
|
|
||||||
id: session_id.clone(),
|
|
||||||
self_node_id: self.self_node_id.clone(),
|
|
||||||
encrypted_data: encrypted_data,
|
|
||||||
key_storage: self.key_storage.clone(),
|
|
||||||
cluster: cluster,
|
|
||||||
nonce: nonce,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send encryption session error.
|
|
||||||
pub fn respond_with_encryption_error(&self, session_id: &SessionId, error: message::EncryptionSessionError) {
|
|
||||||
self.encryption_sessions.sessions.read().get(session_id)
|
|
||||||
.map(|s| {
|
|
||||||
// error in encryption session is considered fatal
|
|
||||||
// => broadcast error
|
|
||||||
|
|
||||||
// do not bother processing send error, as we already processing error
|
|
||||||
let _ = s.cluster_view.broadcast(Message::Encryption(EncryptionMessage::EncryptionSessionError(error)));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create new decryption session.
|
|
||||||
pub fn new_decryption_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, nonce: Option<u64>, cluster: Arc<Cluster>, requester_signature: Option<Signature>) -> Result<Arc<DecryptionSessionImpl>, Error> {
|
|
||||||
let session_id = DecryptionSessionId::new(session_id, sub_session_id);
|
|
||||||
let encrypted_data = self.read_key_share(&session_id.id, &cluster)?;
|
|
||||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
|
||||||
|
|
||||||
self.decryption_sessions.insert(master, session_id.clone(), cluster.clone(), false, move || DecryptionSessionImpl::new(DecryptionSessionParams {
|
|
||||||
meta: SessionMeta {
|
|
||||||
id: session_id.id,
|
|
||||||
self_node_id: self.self_node_id.clone(),
|
|
||||||
master_node_id: master,
|
|
||||||
threshold: encrypted_data.threshold,
|
|
||||||
},
|
|
||||||
access_key: session_id.access_key,
|
|
||||||
key_share: encrypted_data,
|
|
||||||
acl_storage: self.acl_storage.clone(),
|
|
||||||
cluster: cluster,
|
|
||||||
nonce: nonce,
|
|
||||||
}, requester_signature))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send decryption session error.
|
|
||||||
pub fn respond_with_decryption_error(&self, session_id: &SessionId, sub_session_id: &Secret, to: &NodeId, error: message::DecryptionSessionError) {
|
|
||||||
let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone());
|
|
||||||
self.decryption_sessions.sessions.read().get(&session_id)
|
|
||||||
.map(|s| {
|
|
||||||
// error in decryption session is non-fatal, if occurs on slave node
|
|
||||||
// => either respond with error
|
|
||||||
// => or broadcast error
|
|
||||||
|
|
||||||
// do not bother processing send error, as we already processing error
|
|
||||||
if s.master == self.self_node_id {
|
|
||||||
let _ = s.cluster_view.broadcast(Message::Decryption(DecryptionMessage::DecryptionSessionError(error)));
|
|
||||||
} else {
|
|
||||||
let _ = s.cluster_view.send(to, Message::Decryption(DecryptionMessage::DecryptionSessionError(error)));
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create new signing session.
|
|
||||||
pub fn new_signing_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, nonce: Option<u64>, cluster: Arc<Cluster>, requester_signature: Option<Signature>) -> Result<Arc<SigningSessionImpl>, Error> {
|
|
||||||
let session_id = SigningSessionId::new(session_id, sub_session_id);
|
|
||||||
let encrypted_data = self.read_key_share(&session_id.id, &cluster)?;
|
|
||||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
|
||||||
|
|
||||||
self.signing_sessions.insert(master, session_id.clone(), cluster.clone(), false, move || SigningSessionImpl::new(SigningSessionParams {
|
|
||||||
meta: SessionMeta {
|
|
||||||
id: session_id.id,
|
|
||||||
self_node_id: self.self_node_id.clone(),
|
|
||||||
master_node_id: master,
|
|
||||||
threshold: encrypted_data.threshold,
|
|
||||||
},
|
|
||||||
access_key: session_id.access_key,
|
|
||||||
key_share: encrypted_data,
|
|
||||||
acl_storage: self.acl_storage.clone(),
|
|
||||||
cluster: cluster,
|
|
||||||
nonce: nonce,
|
|
||||||
}, requester_signature))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send signing session error.
|
|
||||||
pub fn respond_with_signing_error(&self, session_id: &SessionId, sub_session_id: &Secret, to: &NodeId, error: message::SigningSessionError) {
|
|
||||||
let session_id = SigningSessionId::new(session_id.clone(), sub_session_id.clone());
|
|
||||||
self.signing_sessions.sessions.read().get(&session_id)
|
|
||||||
.map(|s| {
|
|
||||||
// error in signing session is non-fatal, if occurs on slave node
|
|
||||||
// => either respond with error
|
|
||||||
// => or broadcast error
|
|
||||||
|
|
||||||
// do not bother processing send error, as we already processing error
|
|
||||||
if s.master == self.self_node_id {
|
|
||||||
let _ = s.cluster_view.broadcast(Message::Signing(SigningMessage::SigningSessionError(error)));
|
|
||||||
} else {
|
|
||||||
let _ = s.cluster_view.send(to, Message::Signing(SigningMessage::SigningSessionError(error)));
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create new share add session.
|
|
||||||
pub fn new_share_add_session(&self, master: NodeId, session_id: SessionId, nonce: Option<u64>, cluster: Arc<Cluster>) -> Result<Arc<AdminSession>, Error> {
|
|
||||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
|
||||||
let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?;
|
|
||||||
|
|
||||||
self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), false, move || ShareAddSessionImpl::new(ShareAddSessionParams {
|
|
||||||
meta: ShareChangeSessionMeta {
|
|
||||||
id: session_id,
|
|
||||||
self_node_id: self.self_node_id.clone(),
|
|
||||||
master_node_id: master,
|
|
||||||
},
|
|
||||||
transport: ShareAddTransport::new(session_id.clone(), nonce, cluster),
|
|
||||||
key_storage: self.key_storage.clone(),
|
|
||||||
admin_public: Some(admin_public),
|
|
||||||
nonce: nonce,
|
|
||||||
}).map(AdminSession::ShareAdd))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send share add session error.
|
|
||||||
pub fn respond_with_share_add_error(&self, session_id: &SessionId, error: message::ShareAddError) {
|
|
||||||
self.admin_sessions.sessions.read().get(&session_id)
|
|
||||||
.map(|s| {
|
|
||||||
// error in any share change session is considered fatal
|
|
||||||
// => broadcast error
|
|
||||||
|
|
||||||
// do not bother processing send error, as we already processing error
|
|
||||||
let _ = s.cluster_view.broadcast(Message::ShareAdd(ShareAddMessage::ShareAddError(error)));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create new share move session.
|
|
||||||
pub fn new_share_move_session(&self, master: NodeId, session_id: SessionId, nonce: Option<u64>, cluster: Arc<Cluster>) -> Result<Arc<AdminSession>, Error> {
|
|
||||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
|
||||||
let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?;
|
|
||||||
|
|
||||||
self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), false, move || ShareMoveSessionImpl::new(ShareMoveSessionParams {
|
|
||||||
meta: ShareChangeSessionMeta {
|
|
||||||
id: session_id,
|
|
||||||
self_node_id: self.self_node_id.clone(),
|
|
||||||
master_node_id: master,
|
|
||||||
},
|
|
||||||
transport: ShareMoveTransport::new(session_id.clone(), nonce, cluster),
|
|
||||||
key_storage: self.key_storage.clone(),
|
|
||||||
admin_public: Some(admin_public),
|
|
||||||
nonce: nonce,
|
|
||||||
}).map(AdminSession::ShareMove))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send share move session error.
|
|
||||||
pub fn respond_with_share_move_error(&self, session_id: &SessionId, error: message::ShareMoveError) {
|
|
||||||
self.admin_sessions.sessions.read().get(&session_id)
|
|
||||||
.map(|s| {
|
|
||||||
// error in any share change session is considered fatal
|
|
||||||
// => broadcast error
|
|
||||||
|
|
||||||
// do not bother processing send error, as we already processing error
|
|
||||||
let _ = s.cluster_view.broadcast(Message::ShareMove(ShareMoveMessage::ShareMoveError(error)));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create new share remove session.
|
|
||||||
pub fn new_share_remove_session(&self, master: NodeId, session_id: SessionId, nonce: Option<u64>, cluster: Arc<Cluster>, all_nodes_set: BTreeSet<NodeId>) -> Result<Arc<AdminSession>, Error> {
|
|
||||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
|
||||||
let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?;
|
|
||||||
|
|
||||||
self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), false, move || ShareRemoveSessionImpl::new(ShareRemoveSessionParams {
|
|
||||||
meta: ShareChangeSessionMeta {
|
|
||||||
id: session_id,
|
|
||||||
self_node_id: self.self_node_id.clone(),
|
|
||||||
master_node_id: master,
|
|
||||||
},
|
|
||||||
cluster_nodes_set: all_nodes_set,
|
|
||||||
transport: ShareRemoveTransport::new(session_id.clone(), nonce, cluster),
|
|
||||||
key_storage: self.key_storage.clone(),
|
|
||||||
admin_public: Some(admin_public),
|
|
||||||
nonce: nonce,
|
|
||||||
}).map(AdminSession::ShareRemove))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send share remove session error.
|
|
||||||
pub fn respond_with_share_remove_error(&self, session_id: &SessionId, error: message::ShareRemoveError) {
|
|
||||||
self.admin_sessions.sessions.read().get(&session_id)
|
|
||||||
.map(|s| {
|
|
||||||
// error in any share change session is considered fatal
|
|
||||||
// => broadcast error
|
|
||||||
|
|
||||||
// do not bother processing send error, as we already processing error
|
|
||||||
let _ = s.cluster_view.broadcast(Message::ShareRemove(ShareRemoveMessage::ShareRemoveError(error)));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create new servers set change session.
|
|
||||||
pub fn new_servers_set_change_session(&self, master: NodeId, session_id: Option<SessionId>, nonce: Option<u64>, cluster: Arc<Cluster>, all_nodes_set: BTreeSet<NodeId>) -> Result<Arc<AdminSession>, Error> {
|
|
||||||
// communicating to all other nodes is crucial for ServersSetChange session
|
|
||||||
// => check that we have connections to all cluster nodes
|
|
||||||
if self.nodes.iter().any(|n| !cluster.is_connected(n)) {
|
|
||||||
return Err(Error::NodeDisconnected);
|
|
||||||
}
|
|
||||||
|
|
||||||
let session_id = match session_id {
|
|
||||||
Some(session_id) => if session_id == *SERVERS_SET_CHANGE_SESSION_ID {
|
|
||||||
session_id
|
|
||||||
} else {
|
|
||||||
return Err(Error::InvalidMessage)
|
|
||||||
},
|
|
||||||
None => (*SERVERS_SET_CHANGE_SESSION_ID).clone(),
|
|
||||||
};
|
|
||||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
|
||||||
let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?;
|
|
||||||
|
|
||||||
self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), true, move || ServersSetChangeSessionImpl::new(ServersSetChangeSessionParams {
|
|
||||||
meta: ShareChangeSessionMeta {
|
|
||||||
id: session_id,
|
|
||||||
self_node_id: self.self_node_id.clone(),
|
|
||||||
master_node_id: master,
|
|
||||||
},
|
|
||||||
cluster: cluster,
|
|
||||||
key_storage: self.key_storage.clone(),
|
|
||||||
admin_public: admin_public,
|
|
||||||
nonce: nonce,
|
|
||||||
all_nodes_set: all_nodes_set,
|
|
||||||
}).map(AdminSession::ServersSetChange))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send share remove session error.
|
|
||||||
pub fn respond_with_servers_set_change_error(&self, session_id: &SessionId, error: message::ServersSetChangeError) {
|
|
||||||
self.admin_sessions.sessions.read().get(&session_id)
|
|
||||||
.map(|s| {
|
|
||||||
// error in any share change session is considered fatal
|
|
||||||
// => broadcast error
|
|
||||||
|
|
||||||
// do not bother processing send error, as we already processing error
|
|
||||||
let _ = s.cluster_view.broadcast(Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(error)));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Stop sessions that are stalling.
|
/// Stop sessions that are stalling.
|
||||||
pub fn stop_stalled_sessions(&self) {
|
pub fn stop_stalled_sessions(&self) {
|
||||||
self.generation_sessions.stop_stalled_sessions();
|
self.generation_sessions.stop_stalled_sessions();
|
||||||
self.encryption_sessions.stop_stalled_sessions();
|
self.encryption_sessions.stop_stalled_sessions();
|
||||||
self.decryption_sessions.stop_stalled_sessions();
|
self.decryption_sessions.stop_stalled_sessions();
|
||||||
self.signing_sessions.stop_stalled_sessions();
|
self.signing_sessions.stop_stalled_sessions();
|
||||||
|
self.negotiation_sessions.stop_stalled_sessions();
|
||||||
self.admin_sessions.stop_stalled_sessions();
|
self.admin_sessions.stop_stalled_sessions();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -539,44 +283,19 @@ impl ClusterSessions {
|
|||||||
self.encryption_sessions.on_connection_timeout(node_id);
|
self.encryption_sessions.on_connection_timeout(node_id);
|
||||||
self.decryption_sessions.on_connection_timeout(node_id);
|
self.decryption_sessions.on_connection_timeout(node_id);
|
||||||
self.signing_sessions.on_connection_timeout(node_id);
|
self.signing_sessions.on_connection_timeout(node_id);
|
||||||
|
self.negotiation_sessions.on_connection_timeout(node_id);
|
||||||
self.admin_sessions.on_connection_timeout(node_id);
|
self.admin_sessions.on_connection_timeout(node_id);
|
||||||
self.max_nonce.write().remove(node_id);
|
self.creator_core.on_connection_timeout(node_id);
|
||||||
}
|
|
||||||
|
|
||||||
/// Read key share && remove disconnected nodes.
|
|
||||||
fn read_key_share(&self, key_id: &SessionId, cluster: &Arc<Cluster>) -> Result<DocumentKeyShare, Error> {
|
|
||||||
let mut encrypted_data = self.key_storage.get(key_id).map_err(|e| Error::KeyStorage(e.into()))?;
|
|
||||||
|
|
||||||
// some of nodes, which were encrypting secret may be down
|
|
||||||
// => do not use these in session
|
|
||||||
let disconnected_nodes: BTreeSet<_> = encrypted_data.id_numbers.keys().cloned().collect();
|
|
||||||
for disconnected_node in disconnected_nodes.difference(&cluster.nodes()) {
|
|
||||||
encrypted_data.id_numbers.remove(&disconnected_node);
|
|
||||||
}
|
|
||||||
Ok(encrypted_data)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check or generate new session nonce.
|
|
||||||
fn check_session_nonce(&self, master: &NodeId, nonce: Option<u64>) -> Result<u64, Error> {
|
|
||||||
// if we're master node of the session, then nonce should be generated
|
|
||||||
// if we're slave node of the session, then nonce should be passed from outside
|
|
||||||
debug_assert!((master == &self.self_node_id) == nonce.is_none());
|
|
||||||
|
|
||||||
match nonce {
|
|
||||||
Some(nonce) => match nonce > *self.max_nonce.write().entry(master.clone()).or_insert(0) {
|
|
||||||
true => Ok(nonce),
|
|
||||||
false => Err(Error::ReplayProtection),
|
|
||||||
},
|
|
||||||
None => Ok(self.session_counter.fetch_add(1, Ordering::Relaxed) as u64 + 1),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: ClusterSession {
|
impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: ClusterSessionCreator<S, D> {
|
||||||
pub fn new(container_state: Arc<Mutex<ClusterSessionsContainerState>>) -> Self {
|
pub fn new(creator: SC, container_state: Arc<Mutex<ClusterSessionsContainerState>>) -> Self {
|
||||||
ClusterSessionsContainer {
|
ClusterSessionsContainer {
|
||||||
|
creator: creator,
|
||||||
sessions: RwLock::new(BTreeMap::new()),
|
sessions: RwLock::new(BTreeMap::new()),
|
||||||
container_state: container_state,
|
container_state: container_state,
|
||||||
|
_pd: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -584,7 +303,7 @@ impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: Cluster
|
|||||||
self.sessions.read().is_empty()
|
self.sessions.read().is_empty()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get(&self, session_id: &K, update_last_message_time: bool) -> Option<Arc<V>> {
|
pub fn get(&self, session_id: &S::Id, update_last_message_time: bool) -> Option<Arc<S>> {
|
||||||
let mut sessions = self.sessions.write();
|
let mut sessions = self.sessions.write();
|
||||||
sessions.get_mut(session_id)
|
sessions.get_mut(session_id)
|
||||||
.map(|s| {
|
.map(|s| {
|
||||||
@ -595,14 +314,21 @@ impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: Cluster
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn insert<F: FnOnce() -> Result<V, Error>>(&self, master: NodeId, session_id: K, cluster: Arc<Cluster>, is_exclusive_session: bool, session: F) -> Result<Arc<V>, Error> {
|
#[cfg(test)]
|
||||||
|
pub fn first(&self) -> Option<Arc<S>> {
|
||||||
|
self.sessions.read().values().nth(0).map(|s| s.session.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn insert(&self, cluster: Arc<Cluster>, master: NodeId, session_id: S::Id, session_nonce: Option<u64>, is_exclusive_session: bool, creation_data: Option<D>) -> Result<Arc<S>, Error> {
|
||||||
let mut sessions = self.sessions.write();
|
let mut sessions = self.sessions.write();
|
||||||
if sessions.contains_key(&session_id) {
|
if sessions.contains_key(&session_id) {
|
||||||
return Err(Error::DuplicateSessionId);
|
return Err(Error::DuplicateSessionId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// create cluster
|
||||||
|
// let cluster = create_cluster_view(data, requires_all_connections)?;
|
||||||
// create session
|
// create session
|
||||||
let session = Arc::new(session()?);
|
let session = self.creator.create(cluster.clone(), master.clone(), session_nonce, session_id.clone(), creation_data)?;
|
||||||
// check if session can be started
|
// check if session can be started
|
||||||
self.container_state.lock().on_session_starting(is_exclusive_session)?;
|
self.container_state.lock().on_session_starting(is_exclusive_session)?;
|
||||||
|
|
||||||
@ -619,19 +345,19 @@ impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: Cluster
|
|||||||
Ok(session)
|
Ok(session)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn remove(&self, session_id: &K) {
|
pub fn remove(&self, session_id: &S::Id) {
|
||||||
if self.sessions.write().remove(session_id).is_some() {
|
if self.sessions.write().remove(session_id).is_some() {
|
||||||
self.container_state.lock().on_session_completed();
|
self.container_state.lock().on_session_completed();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn enqueue_message(&self, session_id: &K, sender: NodeId, message: M, is_queued_message: bool) {
|
pub fn enqueue_message(&self, session_id: &S::Id, sender: NodeId, message: Message, is_queued_message: bool) {
|
||||||
self.sessions.write().get_mut(session_id)
|
self.sessions.write().get_mut(session_id)
|
||||||
.map(|session| if is_queued_message { session.queue.push_front((sender, message)) }
|
.map(|session| if is_queued_message { session.queue.push_front((sender, message)) }
|
||||||
else { session.queue.push_back((sender, message)) });
|
else { session.queue.push_back((sender, message)) });
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn dequeue_message(&self, session_id: &K) -> Option<(NodeId, M)> {
|
pub fn dequeue_message(&self, session_id: &S::Id) -> Option<(NodeId, Message)> {
|
||||||
self.sessions.write().get_mut(session_id)
|
self.sessions.write().get_mut(session_id)
|
||||||
.and_then(|session| session.queue.pop_front())
|
.and_then(|session| session.queue.pop_front())
|
||||||
}
|
}
|
||||||
@ -670,8 +396,8 @@ impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: Cluster
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: ClusterSession, SessionId: From<K> {
|
impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: ClusterSessionCreator<S, D>, SessionId: From<S::Id> {
|
||||||
pub fn send_keep_alive(&self, session_id: &K, self_node_id: &NodeId) {
|
pub fn send_keep_alive(&self, session_id: &S::Id, self_node_id: &NodeId) {
|
||||||
if let Some(session) = self.sessions.write().get_mut(session_id) {
|
if let Some(session) = self.sessions.write().get_mut(session_id) {
|
||||||
let now = time::Instant::now();
|
let now = time::Instant::now();
|
||||||
if self_node_id == &session.master && now - session.last_keep_alive_time > time::Duration::from_secs(SESSION_KEEP_ALIVE_INTERVAL) {
|
if self_node_id == &session.master && now - session.last_keep_alive_time > time::Duration::from_secs(SESSION_KEEP_ALIVE_INTERVAL) {
|
||||||
@ -686,7 +412,7 @@ impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: Cluster
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn on_keep_alive(&self, session_id: &K, sender: &NodeId) {
|
pub fn on_keep_alive(&self, session_id: &S::Id, sender: &NodeId) {
|
||||||
if let Some(session) = self.sessions.write().get_mut(session_id) {
|
if let Some(session) = self.sessions.write().get_mut(session_id) {
|
||||||
let now = time::Instant::now();
|
let now = time::Instant::now();
|
||||||
// we only accept keep alive from master node of ServersSetChange session
|
// we only accept keep alive from master node of ServersSetChange session
|
||||||
@ -736,28 +462,32 @@ impl ClusterSessionsContainerState {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl SessionIdWithSubSession {
|
||||||
|
/// Create new decryption session Id.
|
||||||
|
pub fn new(session_id: SessionId, sub_session_id: Secret) -> Self {
|
||||||
|
SessionIdWithSubSession {
|
||||||
|
id: session_id,
|
||||||
|
access_key: sub_session_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialOrd for SessionIdWithSubSession {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<::std::cmp::Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for SessionIdWithSubSession {
|
||||||
|
fn cmp(&self, other: &Self) -> ::std::cmp::Ordering {
|
||||||
|
match self.id.cmp(&other.id) {
|
||||||
|
::std::cmp::Ordering::Equal => self.access_key.cmp(&other.access_key),
|
||||||
|
r @ _ => r,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl AdminSession {
|
impl AdminSession {
|
||||||
pub fn as_share_add(&self) -> Option<&ShareAddSessionImpl<ShareAddTransport>> {
|
|
||||||
match *self {
|
|
||||||
AdminSession::ShareAdd(ref session) => Some(session),
|
|
||||||
_ => None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn as_share_move(&self) -> Option<&ShareMoveSessionImpl<ShareMoveTransport>> {
|
|
||||||
match *self {
|
|
||||||
AdminSession::ShareMove(ref session) => Some(session),
|
|
||||||
_ => None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn as_share_remove(&self) -> Option<&ShareRemoveSessionImpl<ShareRemoveTransport>> {
|
|
||||||
match *self {
|
|
||||||
AdminSession::ShareRemove(ref session) => Some(session),
|
|
||||||
_ => None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn as_servers_set_change(&self) -> Option<&ServersSetChangeSessionImpl> {
|
pub fn as_servers_set_change(&self) -> Option<&ServersSetChangeSessionImpl> {
|
||||||
match *self {
|
match *self {
|
||||||
AdminSession::ServersSetChange(ref session) => Some(session),
|
AdminSession::ServersSetChange(ref session) => Some(session),
|
||||||
@ -767,11 +497,22 @@ impl AdminSession {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ClusterSession for AdminSession {
|
impl ClusterSession for AdminSession {
|
||||||
|
type Id = SessionId;
|
||||||
|
|
||||||
|
fn type_name() -> &'static str {
|
||||||
|
"admin"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn id(&self) -> SessionId {
|
||||||
|
match *self {
|
||||||
|
AdminSession::ShareAdd(ref session) => session.id().clone(),
|
||||||
|
AdminSession::ServersSetChange(ref session) => session.id().clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn is_finished(&self) -> bool {
|
fn is_finished(&self) -> bool {
|
||||||
match *self {
|
match *self {
|
||||||
AdminSession::ShareAdd(ref session) => session.is_finished(),
|
AdminSession::ShareAdd(ref session) => session.is_finished(),
|
||||||
AdminSession::ShareMove(ref session) => session.is_finished(),
|
|
||||||
AdminSession::ShareRemove(ref session) => session.is_finished(),
|
|
||||||
AdminSession::ServersSetChange(ref session) => session.is_finished(),
|
AdminSession::ServersSetChange(ref session) => session.is_finished(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -779,8 +520,6 @@ impl ClusterSession for AdminSession {
|
|||||||
fn on_session_timeout(&self) {
|
fn on_session_timeout(&self) {
|
||||||
match *self {
|
match *self {
|
||||||
AdminSession::ShareAdd(ref session) => session.on_session_timeout(),
|
AdminSession::ShareAdd(ref session) => session.on_session_timeout(),
|
||||||
AdminSession::ShareMove(ref session) => session.on_session_timeout(),
|
|
||||||
AdminSession::ShareRemove(ref session) => session.on_session_timeout(),
|
|
||||||
AdminSession::ServersSetChange(ref session) => session.on_session_timeout(),
|
AdminSession::ServersSetChange(ref session) => session.on_session_timeout(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -788,11 +527,23 @@ impl ClusterSession for AdminSession {
|
|||||||
fn on_node_timeout(&self, node_id: &NodeId) {
|
fn on_node_timeout(&self, node_id: &NodeId) {
|
||||||
match *self {
|
match *self {
|
||||||
AdminSession::ShareAdd(ref session) => session.on_node_timeout(node_id),
|
AdminSession::ShareAdd(ref session) => session.on_node_timeout(node_id),
|
||||||
AdminSession::ShareMove(ref session) => session.on_node_timeout(node_id),
|
|
||||||
AdminSession::ShareRemove(ref session) => session.on_node_timeout(node_id),
|
|
||||||
AdminSession::ServersSetChange(ref session) => session.on_node_timeout(node_id),
|
AdminSession::ServersSetChange(ref session) => session.on_node_timeout(node_id),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn on_session_error(&self, node: &NodeId, error: Error) {
|
||||||
|
match *self {
|
||||||
|
AdminSession::ShareAdd(ref session) => session.on_session_error(node, error),
|
||||||
|
AdminSession::ServersSetChange(ref session) => session.on_session_error(node, error),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
||||||
|
match *self {
|
||||||
|
AdminSession::ShareAdd(ref session) => session.on_message(sender, message),
|
||||||
|
AdminSession::ServersSetChange(ref session) => session.on_message(sender, message),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GenerationSessionWrapper {
|
impl GenerationSessionWrapper {
|
||||||
@ -856,7 +607,7 @@ impl Drop for EncryptionSessionWrapper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DecryptionSessionWrapper {
|
impl DecryptionSessionWrapper {
|
||||||
pub fn new(cluster: Weak<ClusterData>, session_id: DecryptionSessionId, session: Arc<DecryptionSession>) -> Arc<Self> {
|
pub fn new(cluster: Weak<ClusterData>, session_id: SessionIdWithSubSession, session: Arc<DecryptionSession>) -> Arc<Self> {
|
||||||
Arc::new(DecryptionSessionWrapper {
|
Arc::new(DecryptionSessionWrapper {
|
||||||
session: session,
|
session: session,
|
||||||
session_id: session_id,
|
session_id: session_id,
|
||||||
@ -880,7 +631,7 @@ impl Drop for DecryptionSessionWrapper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SigningSessionWrapper {
|
impl SigningSessionWrapper {
|
||||||
pub fn new(cluster: Weak<ClusterData>, session_id: SigningSessionId, session: Arc<SigningSession>) -> Arc<Self> {
|
pub fn new(cluster: Weak<ClusterData>, session_id: SessionIdWithSubSession, session: Arc<SigningSession>) -> Arc<Self> {
|
||||||
Arc::new(SigningSessionWrapper {
|
Arc::new(SigningSessionWrapper {
|
||||||
session: session,
|
session: session,
|
||||||
session_id: session_id,
|
session_id: session_id,
|
||||||
@ -922,24 +673,6 @@ impl ShareAddSession for AdminSessionWrapper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ShareMoveSession for AdminSessionWrapper {
|
|
||||||
fn wait(&self) -> Result<(), Error> {
|
|
||||||
match *self.session {
|
|
||||||
AdminSession::ShareMove(ref session) => session.wait(),
|
|
||||||
_ => Err(Error::InvalidMessage),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ShareRemoveSession for AdminSessionWrapper {
|
|
||||||
fn wait(&self) -> Result<(), Error> {
|
|
||||||
match *self.session {
|
|
||||||
AdminSession::ShareRemove(ref session) => session.wait(),
|
|
||||||
_ => Err(Error::InvalidMessage),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServersSetChangeSession for AdminSessionWrapper {
|
impl ServersSetChangeSession for AdminSessionWrapper {
|
||||||
fn wait(&self) -> Result<(), Error> {
|
fn wait(&self) -> Result<(), Error> {
|
||||||
match *self.session {
|
match *self.session {
|
||||||
@ -957,15 +690,60 @@ impl Drop for AdminSessionWrapper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn create_cluster_view(data: &Arc<ClusterData>, requires_all_connections: bool) -> Result<Arc<Cluster>, Error> {
|
||||||
|
if requires_all_connections {
|
||||||
|
if !data.connections.disconnected_nodes().is_empty() {
|
||||||
|
return Err(Error::NodeDisconnected);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut connected_nodes = data.connections.connected_nodes();
|
||||||
|
connected_nodes.insert(data.self_key_pair.public().clone());
|
||||||
|
|
||||||
|
Ok(Arc::new(ClusterView::new(data.clone(), connected_nodes)))
|
||||||
|
}
|
||||||
|
|
||||||
|
impl KeyNegotiationSessionWrapper {
|
||||||
|
pub fn new(cluster: Weak<ClusterData>, session_id: SessionIdWithSubSession, session: Arc<KeyVersionNegotiationSession>) -> Arc<Self> {
|
||||||
|
Arc::new(KeyNegotiationSessionWrapper {
|
||||||
|
session: session,
|
||||||
|
session_id: session_id,
|
||||||
|
cluster: cluster,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl KeyVersionNegotiationSession for KeyNegotiationSessionWrapper {
|
||||||
|
fn set_continue_action(&self, action: ContinueAction) {
|
||||||
|
self.session.set_continue_action(action)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn continue_action(&self) -> Option<ContinueAction> {
|
||||||
|
self.session.continue_action()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wait(&self) -> Result<(H256, NodeId), Error> {
|
||||||
|
self.session.wait()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for KeyNegotiationSessionWrapper {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if let Some(cluster) = self.cluster.upgrade() {
|
||||||
|
cluster.sessions().negotiation_sessions.remove(&self.session_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use ethkey::{Random, Generator};
|
use ethkey::{Random, Generator};
|
||||||
use key_server_cluster::{Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair};
|
use key_server_cluster::{Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair};
|
||||||
use key_server_cluster::cluster::ClusterConfiguration;
|
use key_server_cluster::cluster::ClusterConfiguration;
|
||||||
use key_server_cluster::cluster::tests::DummyCluster;
|
use key_server_cluster::cluster::tests::DummyCluster;
|
||||||
use super::ClusterSessions;
|
use super::{ClusterSessions, AdminSessionCreationData};
|
||||||
|
|
||||||
pub fn make_cluster_sessions() -> ClusterSessions {
|
pub fn make_cluster_sessions() -> ClusterSessions {
|
||||||
let key_pair = Random.generate().unwrap();
|
let key_pair = Random.generate().unwrap();
|
||||||
@ -985,9 +763,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn cluster_session_cannot_be_started_if_exclusive_session_is_active() {
|
fn cluster_session_cannot_be_started_if_exclusive_session_is_active() {
|
||||||
let sessions = make_cluster_sessions();
|
let sessions = make_cluster_sessions();
|
||||||
|
sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None).unwrap();
|
||||||
sessions.new_generation_session(Default::default(), Default::default(), Some(1), Arc::new(DummyCluster::new(sessions.self_node_id.clone()))).unwrap();
|
match sessions.admin_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, true, Some(AdminSessionCreationData::ShareAdd(Default::default()))) {
|
||||||
match sessions.new_servers_set_change_session(Default::default(), None, Some(1), Arc::new(DummyCluster::new(sessions.self_node_id.clone())), BTreeSet::new()) {
|
|
||||||
Err(Error::HasActiveSessions) => (),
|
Err(Error::HasActiveSessions) => (),
|
||||||
Err(e) => unreachable!(format!("{}", e)),
|
Err(e) => unreachable!(format!("{}", e)),
|
||||||
Ok(_) => unreachable!("OK"),
|
Ok(_) => unreachable!("OK"),
|
||||||
@ -998,8 +775,8 @@ mod tests {
|
|||||||
fn exclusive_session_cannot_be_started_if_other_session_is_active() {
|
fn exclusive_session_cannot_be_started_if_other_session_is_active() {
|
||||||
let sessions = make_cluster_sessions();
|
let sessions = make_cluster_sessions();
|
||||||
|
|
||||||
sessions.new_servers_set_change_session(Default::default(), None, Some(1), Arc::new(DummyCluster::new(sessions.self_node_id.clone())), BTreeSet::new()).unwrap();
|
sessions.admin_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, true, Some(AdminSessionCreationData::ShareAdd(Default::default()))).unwrap();
|
||||||
match sessions.new_generation_session(Default::default(), Default::default(), Some(1), Arc::new(DummyCluster::new(sessions.self_node_id.clone()))) {
|
match sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None) {
|
||||||
Err(Error::ExclusiveSessionActive) => (),
|
Err(Error::ExclusiveSessionActive) => (),
|
||||||
Err(e) => unreachable!(format!("{}", e)),
|
Err(e) => unreachable!(format!("{}", e)),
|
||||||
Ok(_) => unreachable!("OK"),
|
Ok(_) => unreachable!("OK"),
|
||||||
|
423
secret_store/src/key_server_cluster/cluster_sessions_creator.rs
Normal file
423
secret_store/src/key_server_cluster/cluster_sessions_creator.rs
Normal file
@ -0,0 +1,423 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use parking_lot::RwLock;
|
||||||
|
use ethkey::{Public, Signature};
|
||||||
|
use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, DocumentKeyShare, SessionMeta};
|
||||||
|
use key_server_cluster::cluster::{Cluster, ClusterConfiguration};
|
||||||
|
use key_server_cluster::cluster_sessions::{ClusterSession, SessionIdWithSubSession, AdminSession, AdminSessionCreationData};
|
||||||
|
use key_server_cluster::message::{self, Message, DecryptionMessage, SigningMessage, ConsensusMessageOfShareAdd,
|
||||||
|
ShareAddMessage, ServersSetChangeMessage, ConsensusMessage, ConsensusMessageWithServersSet};
|
||||||
|
use key_server_cluster::generation_session::{SessionImpl as GenerationSessionImpl, SessionParams as GenerationSessionParams};
|
||||||
|
use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl,
|
||||||
|
SessionParams as DecryptionSessionParams};
|
||||||
|
use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl, SessionParams as EncryptionSessionParams};
|
||||||
|
use key_server_cluster::signing_session::{SessionImpl as SigningSessionImpl,
|
||||||
|
SessionParams as SigningSessionParams};
|
||||||
|
use key_server_cluster::share_add_session::{SessionImpl as ShareAddSessionImpl,
|
||||||
|
SessionParams as ShareAddSessionParams, IsolatedSessionTransport as ShareAddTransport};
|
||||||
|
use key_server_cluster::servers_set_change_session::{SessionImpl as ServersSetChangeSessionImpl,
|
||||||
|
SessionParams as ServersSetChangeSessionParams};
|
||||||
|
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl,
|
||||||
|
SessionParams as KeyVersionNegotiationSessionParams, IsolatedSessionTransport as VersionNegotiationTransport,
|
||||||
|
FastestResultComputer as FastestResultKeyVersionsResultComputer};
|
||||||
|
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||||
|
|
||||||
|
/// Generic cluster session creator.
|
||||||
|
pub trait ClusterSessionCreator<S: ClusterSession, D> {
|
||||||
|
/// Get creation data from message.
|
||||||
|
fn creation_data_from_message(_message: &Message) -> Result<Option<D>, Error> {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prepare error message.
|
||||||
|
fn make_error_message(sid: S::Id, nonce: u64, err: Error) -> Message;
|
||||||
|
|
||||||
|
/// Create cluster session.
|
||||||
|
fn create(&self, cluster: Arc<Cluster>, master: NodeId, nonce: Option<u64>, id: S::Id, creation_data: Option<D>) -> Result<Arc<S>, Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Message with session id.
|
||||||
|
pub trait IntoSessionId<K> {
|
||||||
|
/// Get session id.
|
||||||
|
fn into_session_id(&self) -> Result<K, Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct SessionCreatorCore {
|
||||||
|
/// Self node id.
|
||||||
|
self_node_id: NodeId,
|
||||||
|
/// Reference to key storage
|
||||||
|
key_storage: Arc<KeyStorage>,
|
||||||
|
/// Reference to ACL storage
|
||||||
|
acl_storage: Arc<AclStorage>,
|
||||||
|
/// Always-increasing sessions counter. Is used as session nonce to prevent replay attacks:
|
||||||
|
/// 1) during handshake, KeyServers generate new random key to encrypt messages
|
||||||
|
/// => there's no way to use messages from previous connections for replay attacks
|
||||||
|
/// 2) when session (of any type) is started, master node increases its own session counter and broadcasts it
|
||||||
|
/// 3) when slave KeyServer receives session initialization message, it checks that new nonce is larger than previous (from the same master)
|
||||||
|
/// => there's no way to use messages from previous sessions for replay attacks
|
||||||
|
/// 4) KeyServer checks that each session message contains the same nonce that initialization message
|
||||||
|
/// Given that: (A) handshake is secure and (B) session itself is initially replay-protected
|
||||||
|
/// => this guarantees that sessions are replay-protected.
|
||||||
|
session_counter: AtomicUsize,
|
||||||
|
/// Maximal session nonce, received from given connection.
|
||||||
|
max_nonce: RwLock<BTreeMap<NodeId, u64>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SessionCreatorCore {
|
||||||
|
/// Create new session creator core.
|
||||||
|
pub fn new(config: &ClusterConfiguration) -> Self {
|
||||||
|
SessionCreatorCore {
|
||||||
|
self_node_id: config.self_key_pair.public().clone(),
|
||||||
|
acl_storage: config.acl_storage.clone(),
|
||||||
|
key_storage: config.key_storage.clone(),
|
||||||
|
session_counter: AtomicUsize::new(0),
|
||||||
|
max_nonce: RwLock::new(BTreeMap::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When node has teimtouted.
|
||||||
|
pub fn on_connection_timeout(&self, node_id: &NodeId) {
|
||||||
|
self.max_nonce.write().remove(node_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check or generate new session nonce.
|
||||||
|
fn check_session_nonce(&self, master: &NodeId, nonce: Option<u64>) -> Result<u64, Error> {
|
||||||
|
// if we're master node of the session, then nonce should be generated
|
||||||
|
// if we're slave node of the session, then nonce should be passed from outside
|
||||||
|
match nonce {
|
||||||
|
Some(nonce) => match nonce > *self.max_nonce.write().entry(master.clone()).or_insert(0) {
|
||||||
|
true => Ok(nonce),
|
||||||
|
false => Err(Error::ReplayProtection),
|
||||||
|
},
|
||||||
|
None => Ok(self.session_counter.fetch_add(1, Ordering::Relaxed) as u64 + 1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read key share && remove disconnected nodes.
|
||||||
|
fn read_key_share(&self, key_id: &SessionId) -> Result<Option<DocumentKeyShare>, Error> {
|
||||||
|
self.key_storage.get(key_id).map_err(|e| Error::KeyStorage(e.into()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generation session creator.
|
||||||
|
pub struct GenerationSessionCreator {
|
||||||
|
/// True if generation sessions must fail.
|
||||||
|
pub make_faulty_generation_sessions: AtomicBool,
|
||||||
|
/// Creator core.
|
||||||
|
pub core: Arc<SessionCreatorCore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GenerationSessionCreator {
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn make_faulty_generation_sessions(&self) {
|
||||||
|
self.make_faulty_generation_sessions.store(true, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClusterSessionCreator<GenerationSessionImpl, ()> for GenerationSessionCreator {
|
||||||
|
fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message {
|
||||||
|
message::Message::Generation(message::GenerationMessage::SessionError(message::SessionError {
|
||||||
|
session: sid.into(),
|
||||||
|
session_nonce: nonce,
|
||||||
|
error: err.into(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create(&self, cluster: Arc<Cluster>, master: NodeId, nonce: Option<u64>, id: SessionId, _creation_data: Option<()>) -> Result<Arc<GenerationSessionImpl>, Error> {
|
||||||
|
// check that there's no finished encryption session with the same id
|
||||||
|
if self.core.key_storage.contains(&id) {
|
||||||
|
return Err(Error::DuplicateSessionId);
|
||||||
|
}
|
||||||
|
|
||||||
|
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
||||||
|
Ok(GenerationSessionImpl::new(GenerationSessionParams {
|
||||||
|
id: id.clone(),
|
||||||
|
self_node_id: self.core.self_node_id.clone(),
|
||||||
|
key_storage: Some(self.core.key_storage.clone()),
|
||||||
|
cluster: cluster,
|
||||||
|
nonce: Some(nonce),
|
||||||
|
}))
|
||||||
|
.map(|session| {
|
||||||
|
if self.make_faulty_generation_sessions.load(Ordering::Relaxed) {
|
||||||
|
session.simulate_faulty_behaviour();
|
||||||
|
}
|
||||||
|
session
|
||||||
|
})
|
||||||
|
.map(Arc::new)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Encryption session creator.
|
||||||
|
pub struct EncryptionSessionCreator {
|
||||||
|
/// Creator core.
|
||||||
|
pub core: Arc<SessionCreatorCore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClusterSessionCreator<EncryptionSessionImpl, ()> for EncryptionSessionCreator {
|
||||||
|
fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message {
|
||||||
|
message::Message::Encryption(message::EncryptionMessage::EncryptionSessionError(message::EncryptionSessionError {
|
||||||
|
session: sid.into(),
|
||||||
|
session_nonce: nonce,
|
||||||
|
error: err.into(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create(&self, cluster: Arc<Cluster>, master: NodeId, nonce: Option<u64>, id: SessionId, _creation_data: Option<()>) -> Result<Arc<EncryptionSessionImpl>, Error> {
|
||||||
|
let encrypted_data = self.core.read_key_share(&id)?;
|
||||||
|
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
||||||
|
Ok(Arc::new(EncryptionSessionImpl::new(EncryptionSessionParams {
|
||||||
|
id: id,
|
||||||
|
self_node_id: self.core.self_node_id.clone(),
|
||||||
|
encrypted_data: encrypted_data,
|
||||||
|
key_storage: self.core.key_storage.clone(),
|
||||||
|
cluster: cluster,
|
||||||
|
nonce: nonce,
|
||||||
|
})?))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Decryption session creator.
|
||||||
|
pub struct DecryptionSessionCreator {
|
||||||
|
/// Creator core.
|
||||||
|
pub core: Arc<SessionCreatorCore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClusterSessionCreator<DecryptionSessionImpl, Signature> for DecryptionSessionCreator {
|
||||||
|
fn creation_data_from_message(message: &Message) -> Result<Option<Signature>, Error> {
|
||||||
|
match *message {
|
||||||
|
Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(ref message)) => match &message.message {
|
||||||
|
&ConsensusMessage::InitializeConsensusSession(ref message) => Ok(Some(message.requestor_signature.clone().into())),
|
||||||
|
_ => Err(Error::InvalidMessage),
|
||||||
|
},
|
||||||
|
Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(ref message)) => Ok(Some(message.requestor_signature.clone().into())),
|
||||||
|
_ => Err(Error::InvalidMessage),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message {
|
||||||
|
message::Message::Decryption(message::DecryptionMessage::DecryptionSessionError(message::DecryptionSessionError {
|
||||||
|
session: sid.id.into(),
|
||||||
|
sub_session: sid.access_key.into(),
|
||||||
|
session_nonce: nonce,
|
||||||
|
error: err.into(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create(&self, cluster: Arc<Cluster>, master: NodeId, nonce: Option<u64>, id: SessionIdWithSubSession, requester_signature: Option<Signature>) -> Result<Arc<DecryptionSessionImpl>, Error> {
|
||||||
|
let encrypted_data = self.core.read_key_share(&id.id)?;
|
||||||
|
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
||||||
|
Ok(Arc::new(DecryptionSessionImpl::new(DecryptionSessionParams {
|
||||||
|
meta: SessionMeta {
|
||||||
|
id: id.id,
|
||||||
|
self_node_id: self.core.self_node_id.clone(),
|
||||||
|
master_node_id: master,
|
||||||
|
threshold: encrypted_data.as_ref().map(|ks| ks.threshold).unwrap_or_default(),
|
||||||
|
},
|
||||||
|
access_key: id.access_key,
|
||||||
|
key_share: encrypted_data,
|
||||||
|
acl_storage: self.core.acl_storage.clone(),
|
||||||
|
cluster: cluster,
|
||||||
|
nonce: nonce,
|
||||||
|
}, requester_signature)?))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Signing session creator.
|
||||||
|
pub struct SigningSessionCreator {
|
||||||
|
/// Creator core.
|
||||||
|
pub core: Arc<SessionCreatorCore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClusterSessionCreator<SigningSessionImpl, Signature> for SigningSessionCreator {
|
||||||
|
fn creation_data_from_message(message: &Message) -> Result<Option<Signature>, Error> {
|
||||||
|
match *message {
|
||||||
|
Message::Signing(SigningMessage::SigningConsensusMessage(ref message)) => match &message.message {
|
||||||
|
&ConsensusMessage::InitializeConsensusSession(ref message) => Ok(Some(message.requestor_signature.clone().into())),
|
||||||
|
_ => Err(Error::InvalidMessage),
|
||||||
|
},
|
||||||
|
Message::Signing(SigningMessage::SigningSessionDelegation(ref message)) => Ok(Some(message.requestor_signature.clone().into())),
|
||||||
|
_ => Err(Error::InvalidMessage),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message {
|
||||||
|
message::Message::Signing(message::SigningMessage::SigningSessionError(message::SigningSessionError {
|
||||||
|
session: sid.id.into(),
|
||||||
|
sub_session: sid.access_key.into(),
|
||||||
|
session_nonce: nonce,
|
||||||
|
error: err.into(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create(&self, cluster: Arc<Cluster>, master: NodeId, nonce: Option<u64>, id: SessionIdWithSubSession, requester_signature: Option<Signature>) -> Result<Arc<SigningSessionImpl>, Error> {
|
||||||
|
let encrypted_data = self.core.read_key_share(&id.id)?;
|
||||||
|
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
||||||
|
Ok(Arc::new(SigningSessionImpl::new(SigningSessionParams {
|
||||||
|
meta: SessionMeta {
|
||||||
|
id: id.id,
|
||||||
|
self_node_id: self.core.self_node_id.clone(),
|
||||||
|
master_node_id: master,
|
||||||
|
threshold: encrypted_data.as_ref().map(|ks| ks.threshold).unwrap_or_default(),
|
||||||
|
},
|
||||||
|
access_key: id.access_key,
|
||||||
|
key_share: encrypted_data,
|
||||||
|
acl_storage: self.core.acl_storage.clone(),
|
||||||
|
cluster: cluster,
|
||||||
|
nonce: nonce,
|
||||||
|
}, requester_signature)?))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Key version negotiation session creator.
|
||||||
|
pub struct KeyVersionNegotiationSessionCreator {
|
||||||
|
/// Creator core.
|
||||||
|
pub core: Arc<SessionCreatorCore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClusterSessionCreator<KeyVersionNegotiationSessionImpl<VersionNegotiationTransport>, ()> for KeyVersionNegotiationSessionCreator {
|
||||||
|
fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message {
|
||||||
|
message::Message::KeyVersionNegotiation(message::KeyVersionNegotiationMessage::KeyVersionsError(message::KeyVersionsError {
|
||||||
|
session: sid.id.into(),
|
||||||
|
sub_session: sid.access_key.into(),
|
||||||
|
session_nonce: nonce,
|
||||||
|
error: err.into(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create(&self, cluster: Arc<Cluster>, master: NodeId, nonce: Option<u64>, id: SessionIdWithSubSession, _creation_data: Option<()>) -> Result<Arc<KeyVersionNegotiationSessionImpl<VersionNegotiationTransport>>, Error> {
|
||||||
|
let encrypted_data = self.core.read_key_share(&id.id)?;
|
||||||
|
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
||||||
|
let computer = Arc::new(FastestResultKeyVersionsResultComputer::new(self.core.self_node_id.clone(), encrypted_data.as_ref()));
|
||||||
|
Ok(Arc::new(KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams {
|
||||||
|
meta: ShareChangeSessionMeta {
|
||||||
|
id: id.id.clone(),
|
||||||
|
self_node_id: self.core.self_node_id.clone(),
|
||||||
|
master_node_id: master,
|
||||||
|
},
|
||||||
|
sub_session: id.access_key.clone(),
|
||||||
|
key_share: encrypted_data,
|
||||||
|
result_computer: computer,
|
||||||
|
transport: VersionNegotiationTransport {
|
||||||
|
cluster: cluster,
|
||||||
|
key_id: id.id,
|
||||||
|
sub_session: id.access_key.clone(),
|
||||||
|
nonce: nonce,
|
||||||
|
},
|
||||||
|
nonce: nonce,
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Administrative session creator.
|
||||||
|
pub struct AdminSessionCreator {
|
||||||
|
/// Creator core.
|
||||||
|
pub core: Arc<SessionCreatorCore>,
|
||||||
|
/// Administrator public.
|
||||||
|
pub admin_public: Option<Public>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClusterSessionCreator<AdminSession, AdminSessionCreationData> for AdminSessionCreator {
|
||||||
|
fn creation_data_from_message(message: &Message) -> Result<Option<AdminSessionCreationData>, Error> {
|
||||||
|
match *message {
|
||||||
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref message)) => match &message.message {
|
||||||
|
&ConsensusMessageWithServersSet::InitializeConsensusSession(_) => Ok(Some(AdminSessionCreationData::ServersSetChange)),
|
||||||
|
_ => Err(Error::InvalidMessage),
|
||||||
|
},
|
||||||
|
Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(ref message)) => match &message.message {
|
||||||
|
&ConsensusMessageOfShareAdd::InitializeConsensusSession(ref message) => Ok(Some(AdminSessionCreationData::ShareAdd(message.version.clone().into()))),
|
||||||
|
_ => Err(Error::InvalidMessage),
|
||||||
|
},
|
||||||
|
_ => Err(Error::InvalidMessage),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message {
|
||||||
|
message::Message::ServersSetChange(message::ServersSetChangeMessage::ServersSetChangeError(message::ServersSetChangeError {
|
||||||
|
session: sid.into(),
|
||||||
|
session_nonce: nonce,
|
||||||
|
error: err.into(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create(&self, cluster: Arc<Cluster>, master: NodeId, nonce: Option<u64>, id: SessionId, creation_data: Option<AdminSessionCreationData>) -> Result<Arc<AdminSession>, Error> {
|
||||||
|
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
||||||
|
let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?;
|
||||||
|
Ok(Arc::new(match creation_data {
|
||||||
|
Some(AdminSessionCreationData::ShareAdd(version)) => {
|
||||||
|
AdminSession::ShareAdd(ShareAddSessionImpl::new(ShareAddSessionParams {
|
||||||
|
meta: ShareChangeSessionMeta {
|
||||||
|
id: id.clone(),
|
||||||
|
self_node_id: self.core.self_node_id.clone(),
|
||||||
|
master_node_id: master,
|
||||||
|
},
|
||||||
|
transport: ShareAddTransport::new(id.clone(), Some(version), nonce, cluster),
|
||||||
|
key_storage: self.core.key_storage.clone(),
|
||||||
|
nonce: nonce,
|
||||||
|
admin_public: Some(admin_public),
|
||||||
|
})?)
|
||||||
|
},
|
||||||
|
Some(AdminSessionCreationData::ServersSetChange) => {
|
||||||
|
AdminSession::ServersSetChange(ServersSetChangeSessionImpl::new(ServersSetChangeSessionParams {
|
||||||
|
meta: ShareChangeSessionMeta {
|
||||||
|
id: id.clone(),
|
||||||
|
self_node_id: self.core.self_node_id.clone(),
|
||||||
|
master_node_id: master,
|
||||||
|
},
|
||||||
|
cluster: cluster.clone(),
|
||||||
|
key_storage: self.core.key_storage.clone(),
|
||||||
|
nonce: nonce,
|
||||||
|
all_nodes_set: cluster.nodes(),
|
||||||
|
admin_public: admin_public,
|
||||||
|
})?)
|
||||||
|
},
|
||||||
|
None => unreachable!("expected to call with non-empty creation data; qed"),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IntoSessionId<SessionId> for Message {
|
||||||
|
fn into_session_id(&self) -> Result<SessionId, Error> {
|
||||||
|
match *self {
|
||||||
|
Message::Generation(ref message) => Ok(message.session_id().clone()),
|
||||||
|
Message::Encryption(ref message) => Ok(message.session_id().clone()),
|
||||||
|
Message::Decryption(_) => Err(Error::InvalidMessage),
|
||||||
|
Message::Signing(_) => Err(Error::InvalidMessage),
|
||||||
|
Message::ServersSetChange(ref message) => Ok(message.session_id().clone()),
|
||||||
|
Message::ShareAdd(ref message) => Ok(message.session_id().clone()),
|
||||||
|
Message::KeyVersionNegotiation(_) => Err(Error::InvalidMessage),
|
||||||
|
Message::Cluster(_) => Err(Error::InvalidMessage),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IntoSessionId<SessionIdWithSubSession> for Message {
|
||||||
|
fn into_session_id(&self) -> Result<SessionIdWithSubSession, Error> {
|
||||||
|
match *self {
|
||||||
|
Message::Generation(_) => Err(Error::InvalidMessage),
|
||||||
|
Message::Encryption(_) => Err(Error::InvalidMessage),
|
||||||
|
Message::Decryption(ref message) => Ok(SessionIdWithSubSession::new(message.session_id().clone(), message.sub_session_id().clone())),
|
||||||
|
Message::Signing(ref message) => Ok(SessionIdWithSubSession::new(message.session_id().clone(), message.sub_session_id().clone())),
|
||||||
|
Message::ServersSetChange(_) => Err(Error::InvalidMessage),
|
||||||
|
Message::ShareAdd(_) => Err(Error::InvalidMessage),
|
||||||
|
Message::KeyVersionNegotiation(ref message) => Ok(SessionIdWithSubSession::new(message.session_id().clone(), message.sub_session_id().clone())),
|
||||||
|
Message::Cluster(_) => Err(Error::InvalidMessage),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -26,8 +26,7 @@ use bigint::prelude::U256;
|
|||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use key_server_cluster::Error;
|
use key_server_cluster::Error;
|
||||||
use key_server_cluster::message::{Message, ClusterMessage, GenerationMessage, EncryptionMessage,
|
use key_server_cluster::message::{Message, ClusterMessage, GenerationMessage, EncryptionMessage,
|
||||||
DecryptionMessage, SigningMessage, ServersSetChangeMessage, ShareAddMessage, ShareMoveMessage,
|
DecryptionMessage, SigningMessage, ServersSetChangeMessage, ShareAddMessage, KeyVersionNegotiationMessage};
|
||||||
ShareRemoveMessage};
|
|
||||||
|
|
||||||
/// Size of serialized header.
|
/// Size of serialized header.
|
||||||
pub const MESSAGE_HEADER_SIZE: usize = 18;
|
pub const MESSAGE_HEADER_SIZE: usize = 18;
|
||||||
@ -88,6 +87,9 @@ pub fn serialize_message(message: Message) -> Result<SerializedMessage, Error> {
|
|||||||
Message::Decryption(DecryptionMessage::PartialDecryption(payload)) => (152, serde_json::to_vec(&payload)),
|
Message::Decryption(DecryptionMessage::PartialDecryption(payload)) => (152, serde_json::to_vec(&payload)),
|
||||||
Message::Decryption(DecryptionMessage::DecryptionSessionError(payload)) => (153, serde_json::to_vec(&payload)),
|
Message::Decryption(DecryptionMessage::DecryptionSessionError(payload)) => (153, serde_json::to_vec(&payload)),
|
||||||
Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(payload)) => (154, serde_json::to_vec(&payload)),
|
Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(payload)) => (154, serde_json::to_vec(&payload)),
|
||||||
|
Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(payload)) => (155, serde_json::to_vec(&payload)),
|
||||||
|
Message::Decryption(DecryptionMessage::DecryptionSessionDelegationCompleted(payload))
|
||||||
|
=> (156, serde_json::to_vec(&payload)),
|
||||||
|
|
||||||
Message::Signing(SigningMessage::SigningConsensusMessage(payload)) => (200, serde_json::to_vec(&payload)),
|
Message::Signing(SigningMessage::SigningConsensusMessage(payload)) => (200, serde_json::to_vec(&payload)),
|
||||||
Message::Signing(SigningMessage::SigningGenerationMessage(payload)) => (201, serde_json::to_vec(&payload)),
|
Message::Signing(SigningMessage::SigningGenerationMessage(payload)) => (201, serde_json::to_vec(&payload)),
|
||||||
@ -95,45 +97,40 @@ pub fn serialize_message(message: Message) -> Result<SerializedMessage, Error> {
|
|||||||
Message::Signing(SigningMessage::PartialSignature(payload)) => (203, serde_json::to_vec(&payload)),
|
Message::Signing(SigningMessage::PartialSignature(payload)) => (203, serde_json::to_vec(&payload)),
|
||||||
Message::Signing(SigningMessage::SigningSessionError(payload)) => (204, serde_json::to_vec(&payload)),
|
Message::Signing(SigningMessage::SigningSessionError(payload)) => (204, serde_json::to_vec(&payload)),
|
||||||
Message::Signing(SigningMessage::SigningSessionCompleted(payload)) => (205, serde_json::to_vec(&payload)),
|
Message::Signing(SigningMessage::SigningSessionCompleted(payload)) => (205, serde_json::to_vec(&payload)),
|
||||||
|
Message::Signing(SigningMessage::SigningSessionDelegation(payload)) => (206, serde_json::to_vec(&payload)),
|
||||||
|
Message::Signing(SigningMessage::SigningSessionDelegationCompleted(payload)) => (207, serde_json::to_vec(&payload)),
|
||||||
|
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(payload))
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(payload))
|
||||||
=> (250, serde_json::to_vec(&payload)),
|
=> (250, serde_json::to_vec(&payload)),
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(payload)) => (251, serde_json::to_vec(&payload)),
|
Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(payload)) => (251, serde_json::to_vec(&payload)),
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(payload)) => (252, serde_json::to_vec(&payload)),
|
Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(payload)) => (252, serde_json::to_vec(&payload)),
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(payload))
|
Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(payload))
|
||||||
=> (253, serde_json::to_vec(&payload)),
|
=> (253, serde_json::to_vec(&payload)),
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(payload))
|
Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(payload))
|
||||||
=> (254, serde_json::to_vec(&payload)),
|
=> (254, serde_json::to_vec(&payload)),
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(payload))
|
Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(payload))
|
||||||
=> (255, serde_json::to_vec(&payload)),
|
=> (255, serde_json::to_vec(&payload)),
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(payload))
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(payload))
|
||||||
=> (256, serde_json::to_vec(&payload)),
|
=> (256, serde_json::to_vec(&payload)),
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(payload))
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(payload))
|
||||||
=> (257, serde_json::to_vec(&payload)),
|
=> (257, serde_json::to_vec(&payload)),
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareMoveMessage(payload))
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(payload))
|
||||||
=> (258, serde_json::to_vec(&payload)),
|
=> (258, serde_json::to_vec(&payload)),
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(payload))
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(payload)) => (261, serde_json::to_vec(&payload)),
|
||||||
=> (259, serde_json::to_vec(&payload)),
|
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(payload)) => (260, serde_json::to_vec(&payload)),
|
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(payload))
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(payload))
|
||||||
=> (261, serde_json::to_vec(&payload)),
|
=> (262, serde_json::to_vec(&payload)),
|
||||||
|
|
||||||
Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(payload)) => (300, serde_json::to_vec(&payload)),
|
Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(payload)) => (300, serde_json::to_vec(&payload)),
|
||||||
Message::ShareAdd(ShareAddMessage::KeyShareCommon(payload)) => (301, serde_json::to_vec(&payload)),
|
Message::ShareAdd(ShareAddMessage::KeyShareCommon(payload)) => (301, serde_json::to_vec(&payload)),
|
||||||
Message::ShareAdd(ShareAddMessage::NewAbsoluteTermShare(payload)) => (302, serde_json::to_vec(&payload)),
|
Message::ShareAdd(ShareAddMessage::NewKeysDissemination(payload)) => (302, serde_json::to_vec(&payload)),
|
||||||
Message::ShareAdd(ShareAddMessage::NewKeysDissemination(payload)) => (303, serde_json::to_vec(&payload)),
|
Message::ShareAdd(ShareAddMessage::ShareAddError(payload)) => (303, serde_json::to_vec(&payload)),
|
||||||
Message::ShareAdd(ShareAddMessage::ShareAddError(payload)) => (304, serde_json::to_vec(&payload)),
|
|
||||||
|
|
||||||
Message::ShareMove(ShareMoveMessage::ShareMoveConsensusMessage(payload)) => (350, serde_json::to_vec(&payload)),
|
Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::RequestKeyVersions(payload))
|
||||||
Message::ShareMove(ShareMoveMessage::ShareMoveRequest(payload)) => (351, serde_json::to_vec(&payload)),
|
=> (450, serde_json::to_vec(&payload)),
|
||||||
Message::ShareMove(ShareMoveMessage::ShareMove(payload)) => (352, serde_json::to_vec(&payload)),
|
Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersions(payload))
|
||||||
Message::ShareMove(ShareMoveMessage::ShareMoveConfirm(payload)) => (353, serde_json::to_vec(&payload)),
|
=> (451, serde_json::to_vec(&payload)),
|
||||||
Message::ShareMove(ShareMoveMessage::ShareMoveError(payload)) => (354, serde_json::to_vec(&payload)),
|
Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersionsError(payload))
|
||||||
|
=> (452, serde_json::to_vec(&payload)),
|
||||||
Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(payload)) => (400, serde_json::to_vec(&payload)),
|
|
||||||
Message::ShareRemove(ShareRemoveMessage::ShareRemoveRequest(payload)) => (401, serde_json::to_vec(&payload)),
|
|
||||||
Message::ShareRemove(ShareRemoveMessage::ShareRemoveConfirm(payload)) => (402, serde_json::to_vec(&payload)),
|
|
||||||
Message::ShareRemove(ShareRemoveMessage::ShareRemoveError(payload)) => (403, serde_json::to_vec(&payload)),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let payload = payload.map_err(|err| Error::Serde(err.to_string()))?;
|
let payload = payload.map_err(|err| Error::Serde(err.to_string()))?;
|
||||||
@ -169,6 +166,8 @@ pub fn deserialize_message(header: &MessageHeader, payload: Vec<u8>) -> Result<M
|
|||||||
152 => Message::Decryption(DecryptionMessage::PartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
152 => Message::Decryption(DecryptionMessage::PartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
153 => Message::Decryption(DecryptionMessage::DecryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
153 => Message::Decryption(DecryptionMessage::DecryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
154 => Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
154 => Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
155 => Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
156 => Message::Decryption(DecryptionMessage::DecryptionSessionDelegationCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
|
||||||
200 => Message::Signing(SigningMessage::SigningConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
200 => Message::Signing(SigningMessage::SigningConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
201 => Message::Signing(SigningMessage::SigningGenerationMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
201 => Message::Signing(SigningMessage::SigningGenerationMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
@ -176,36 +175,29 @@ pub fn deserialize_message(header: &MessageHeader, payload: Vec<u8>) -> Result<M
|
|||||||
203 => Message::Signing(SigningMessage::PartialSignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
203 => Message::Signing(SigningMessage::PartialSignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
204 => Message::Signing(SigningMessage::SigningSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
204 => Message::Signing(SigningMessage::SigningSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
205 => Message::Signing(SigningMessage::SigningSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
205 => Message::Signing(SigningMessage::SigningSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
206 => Message::Signing(SigningMessage::SigningSessionDelegation(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
207 => Message::Signing(SigningMessage::SigningSessionDelegationCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
|
||||||
250 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
250 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
251 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
251 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
252 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
252 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
253 => Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
253 => Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
254 => Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
254 => Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
255 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
255 => Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
256 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
256 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
257 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
257 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
258 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareMoveMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
258 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
259 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
261 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
260 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
262 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
261 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
|
|
||||||
300 => Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
300 => Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
301 => Message::ShareAdd(ShareAddMessage::KeyShareCommon(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
301 => Message::ShareAdd(ShareAddMessage::KeyShareCommon(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
302 => Message::ShareAdd(ShareAddMessage::NewAbsoluteTermShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
302 => Message::ShareAdd(ShareAddMessage::NewKeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
303 => Message::ShareAdd(ShareAddMessage::NewKeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
303 => Message::ShareAdd(ShareAddMessage::ShareAddError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
304 => Message::ShareAdd(ShareAddMessage::ShareAddError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
|
|
||||||
350 => Message::ShareMove(ShareMoveMessage::ShareMoveConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
450 => Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::RequestKeyVersions(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
351 => Message::ShareMove(ShareMoveMessage::ShareMoveRequest(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
451 => Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersions(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
352 => Message::ShareMove(ShareMoveMessage::ShareMove(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
452 => Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersionsError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
353 => Message::ShareMove(ShareMoveMessage::ShareMoveConfirm(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
354 => Message::ShareMove(ShareMoveMessage::ShareMoveError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
|
|
||||||
400 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
401 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveRequest(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
402 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveConfirm(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
403 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
|
|
||||||
_ => return Err(Error::Serde(format!("unknown message type {}", header.kind))),
|
_ => return Err(Error::Serde(format!("unknown message type {}", header.kind))),
|
||||||
})
|
})
|
||||||
|
@ -44,7 +44,6 @@ pub fn write_encrypted_message<A>(a: A, key: &KeyPair, message: Message) -> Writ
|
|||||||
Err(error) => (Some(error), write_all(a, Vec::new())),
|
Err(error) => (Some(error), write_all(a, Vec::new())),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
WriteMessage {
|
WriteMessage {
|
||||||
error: error,
|
error: error,
|
||||||
future: future,
|
future: future,
|
||||||
|
@ -98,6 +98,11 @@ impl<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTran
|
|||||||
&self.consensus_job
|
&self.consensus_job
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get mutable consensus job reference.
|
||||||
|
pub fn consensus_job_mut(&mut self) -> &mut JobSession<ConsensusExecutor, ConsensusTransport> {
|
||||||
|
&mut self.consensus_job
|
||||||
|
}
|
||||||
|
|
||||||
/// Get all nodes, which has not rejected consensus request.
|
/// Get all nodes, which has not rejected consensus request.
|
||||||
pub fn consensus_non_rejected_nodes(&self) -> BTreeSet<NodeId> {
|
pub fn consensus_non_rejected_nodes(&self) -> BTreeSet<NodeId> {
|
||||||
self.consensus_job.responses().iter()
|
self.consensus_job.responses().iter()
|
||||||
@ -231,8 +236,9 @@ impl<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTran
|
|||||||
let (is_restart_needed, timeout_result) = match self.state {
|
let (is_restart_needed, timeout_result) = match self.state {
|
||||||
ConsensusSessionState::WaitingForInitialization if is_self_master => {
|
ConsensusSessionState::WaitingForInitialization if is_self_master => {
|
||||||
// it is strange to receive error before session is initialized && slave doesn't know access_key
|
// it is strange to receive error before session is initialized && slave doesn't know access_key
|
||||||
// => ignore this error for now
|
// => fatal error
|
||||||
(false, Ok(()))
|
self.state = ConsensusSessionState::Failed;
|
||||||
|
(false, Err(Error::ConsensusUnreachable))
|
||||||
}
|
}
|
||||||
ConsensusSessionState::WaitingForInitialization if is_node_master => {
|
ConsensusSessionState::WaitingForInitialization if is_node_master => {
|
||||||
// can not establish consensus
|
// can not establish consensus
|
||||||
@ -496,6 +502,7 @@ mod tests {
|
|||||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForInitialization);
|
assert_eq!(session.state(), ConsensusSessionState::WaitingForInitialization);
|
||||||
session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
||||||
requestor_signature: sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
requestor_signature: sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||||
|
version: Default::default(),
|
||||||
})).unwrap();
|
})).unwrap();
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||||
assert_eq!(session.on_job_request(&NodeId::from(1), 20, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidMessage);
|
assert_eq!(session.on_job_request(&NodeId::from(1), 20, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidMessage);
|
||||||
@ -508,6 +515,7 @@ mod tests {
|
|||||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForInitialization);
|
assert_eq!(session.state(), ConsensusSessionState::WaitingForInitialization);
|
||||||
session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
||||||
requestor_signature: sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
requestor_signature: sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||||
|
version: Default::default(),
|
||||||
})).unwrap();
|
})).unwrap();
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||||
session.on_job_request(&NodeId::from(1), 2, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
session.on_job_request(&NodeId::from(1), 2, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||||
@ -537,15 +545,17 @@ mod tests {
|
|||||||
let mut session = make_slave_consensus_session(0, None);
|
let mut session = make_slave_consensus_session(0, None);
|
||||||
session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
||||||
requestor_signature: sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
requestor_signature: sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||||
|
version: Default::default(),
|
||||||
})).unwrap();
|
})).unwrap();
|
||||||
session.on_session_completed(&NodeId::from(1)).unwrap();
|
session.on_session_completed(&NodeId::from(1)).unwrap();
|
||||||
assert_eq!(session.state(), ConsensusSessionState::Finished);
|
assert_eq!(session.state(), ConsensusSessionState::Finished);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn consensus_session_continues_if_node_error_received_by_uninitialized_master() {
|
fn consensus_session_fails_if_node_error_received_by_uninitialized_master() {
|
||||||
let mut session = make_master_consensus_session(0, None, None);
|
let mut session = make_master_consensus_session(0, None, None);
|
||||||
assert_eq!(session.on_node_error(&NodeId::from(2)), Ok(false));
|
assert_eq!(session.on_node_error(&NodeId::from(2)), Err(Error::ConsensusUnreachable));
|
||||||
|
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::collections::{BTreeSet, BTreeMap};
|
use std::collections::{BTreeSet, BTreeMap};
|
||||||
|
use bigint::hash::H256;
|
||||||
use ethkey::{Public, Secret};
|
use ethkey::{Public, Secret};
|
||||||
use ethcrypto::ecies::encrypt;
|
use ethcrypto::ecies::encrypt;
|
||||||
use ethcrypto::DEFAULT_MAC;
|
use ethcrypto::DEFAULT_MAC;
|
||||||
@ -32,6 +33,8 @@ pub struct DecryptionJob {
|
|||||||
requester: Public,
|
requester: Public,
|
||||||
/// Key share.
|
/// Key share.
|
||||||
key_share: DocumentKeyShare,
|
key_share: DocumentKeyShare,
|
||||||
|
/// Key version.
|
||||||
|
key_version: H256,
|
||||||
/// Request id.
|
/// Request id.
|
||||||
request_id: Option<Secret>,
|
request_id: Option<Secret>,
|
||||||
/// Is shadow decryption requested.
|
/// Is shadow decryption requested.
|
||||||
@ -59,25 +62,27 @@ pub struct PartialDecryptionResponse {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DecryptionJob {
|
impl DecryptionJob {
|
||||||
pub fn new_on_slave(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare) -> Result<Self, Error> {
|
pub fn new_on_slave(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare, key_version: H256) -> Result<Self, Error> {
|
||||||
debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some());
|
debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some());
|
||||||
Ok(DecryptionJob {
|
Ok(DecryptionJob {
|
||||||
self_node_id: self_node_id,
|
self_node_id: self_node_id,
|
||||||
access_key: access_key,
|
access_key: access_key,
|
||||||
requester: requester,
|
requester: requester,
|
||||||
key_share: key_share,
|
key_share: key_share,
|
||||||
|
key_version: key_version,
|
||||||
request_id: None,
|
request_id: None,
|
||||||
is_shadow_decryption: None,
|
is_shadow_decryption: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_on_master(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare, is_shadow_decryption: bool) -> Result<Self, Error> {
|
pub fn new_on_master(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare, key_version: H256, is_shadow_decryption: bool) -> Result<Self, Error> {
|
||||||
debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some());
|
debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some());
|
||||||
Ok(DecryptionJob {
|
Ok(DecryptionJob {
|
||||||
self_node_id: self_node_id,
|
self_node_id: self_node_id,
|
||||||
access_key: access_key,
|
access_key: access_key,
|
||||||
requester: requester,
|
requester: requester,
|
||||||
key_share: key_share,
|
key_share: key_share,
|
||||||
|
key_version: key_version,
|
||||||
request_id: Some(math::generate_random_scalar()?),
|
request_id: Some(math::generate_random_scalar()?),
|
||||||
is_shadow_decryption: Some(is_shadow_decryption),
|
is_shadow_decryption: Some(is_shadow_decryption),
|
||||||
})
|
})
|
||||||
@ -107,15 +112,16 @@ impl JobExecutor for DecryptionJob {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn process_partial_request(&mut self, partial_request: PartialDecryptionRequest) -> Result<JobPartialRequestAction<PartialDecryptionResponse>, Error> {
|
fn process_partial_request(&mut self, partial_request: PartialDecryptionRequest) -> Result<JobPartialRequestAction<PartialDecryptionResponse>, Error> {
|
||||||
|
let key_version = self.key_share.version(&self.key_version).map_err(|e| Error::KeyStorage(e.into()))?;
|
||||||
if partial_request.other_nodes_ids.len() != self.key_share.threshold
|
if partial_request.other_nodes_ids.len() != self.key_share.threshold
|
||||||
|| partial_request.other_nodes_ids.contains(&self.self_node_id)
|
|| partial_request.other_nodes_ids.contains(&self.self_node_id)
|
||||||
|| partial_request.other_nodes_ids.iter().any(|n| !self.key_share.id_numbers.contains_key(n)) {
|
|| partial_request.other_nodes_ids.iter().any(|n| !key_version.id_numbers.contains_key(n)) {
|
||||||
return Err(Error::InvalidMessage);
|
return Err(Error::InvalidMessage);
|
||||||
}
|
}
|
||||||
|
|
||||||
let self_id_number = &self.key_share.id_numbers[&self.self_node_id];
|
let self_id_number = &key_version.id_numbers[&self.self_node_id];
|
||||||
let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &self.key_share.id_numbers[n]);
|
let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &key_version.id_numbers[n]);
|
||||||
let node_shadow = math::compute_node_shadow(&self.key_share.secret_share, &self_id_number, other_id_numbers)?;
|
let node_shadow = math::compute_node_shadow(&key_version.secret_share, &self_id_number, other_id_numbers)?;
|
||||||
let decrypt_shadow = if partial_request.is_shadow_decryption { Some(math::generate_random_scalar()?) } else { None };
|
let decrypt_shadow = if partial_request.is_shadow_decryption { Some(math::generate_random_scalar()?) } else { None };
|
||||||
let common_point = self.key_share.common_point.as_ref().expect("DecryptionJob is only created when common_point is known; qed");
|
let common_point = self.key_share.common_point.as_ref().expect("DecryptionJob is only created when common_point is known; qed");
|
||||||
let (shadow_point, decrypt_shadow) = math::compute_node_shadow_point(&self.access_key, &common_point, &node_shadow, decrypt_shadow)?;
|
let (shadow_point, decrypt_shadow) = math::compute_node_shadow_point(&self.access_key, &common_point, &node_shadow, decrypt_shadow)?;
|
||||||
@ -129,7 +135,7 @@ impl JobExecutor for DecryptionJob {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_partial_response(&self, partial_response: &PartialDecryptionResponse) -> Result<JobPartialResponseAction, Error> {
|
fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &PartialDecryptionResponse) -> Result<JobPartialResponseAction, Error> {
|
||||||
if Some(&partial_response.request_id) != self.request_id.as_ref() {
|
if Some(&partial_response.request_id) != self.request_id.as_ref() {
|
||||||
return Ok(JobPartialResponseAction::Ignore);
|
return Ok(JobPartialResponseAction::Ignore);
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,7 @@ impl JobExecutor for DummyJob {
|
|||||||
unreachable!("dummy job methods are never called")
|
unreachable!("dummy job methods are never called")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_partial_response(&self, _r: &()) -> Result<JobPartialResponseAction, Error> {
|
fn check_partial_response(&mut self, _s: &NodeId, _r: &()) -> Result<JobPartialResponseAction, Error> {
|
||||||
unreachable!("dummy job methods are never called")
|
unreachable!("dummy job methods are never called")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ pub trait JobExecutor {
|
|||||||
/// Process partial request.
|
/// Process partial request.
|
||||||
fn process_partial_request(&mut self, partial_request: Self::PartialJobRequest) -> Result<JobPartialRequestAction<Self::PartialJobResponse>, Error>;
|
fn process_partial_request(&mut self, partial_request: Self::PartialJobRequest) -> Result<JobPartialRequestAction<Self::PartialJobResponse>, Error>;
|
||||||
/// Check partial response of given node.
|
/// Check partial response of given node.
|
||||||
fn check_partial_response(&self, partial_response: &Self::PartialJobResponse) -> Result<JobPartialResponseAction, Error>;
|
fn check_partial_response(&mut self, sender: &NodeId, partial_response: &Self::PartialJobResponse) -> Result<JobPartialResponseAction, Error>;
|
||||||
/// Compute final job response.
|
/// Compute final job response.
|
||||||
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, Self::PartialJobResponse>) -> Result<Self::JobResponse, Error>;
|
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, Self::PartialJobResponse>) -> Result<Self::JobResponse, Error>;
|
||||||
}
|
}
|
||||||
@ -127,11 +127,21 @@ impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExe
|
|||||||
&self.transport
|
&self.transport
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get mutable transport reference.
|
||||||
|
pub fn transport_mut(&mut self) -> &mut Transport {
|
||||||
|
&mut self.transport
|
||||||
|
}
|
||||||
|
|
||||||
/// Get executor reference.
|
/// Get executor reference.
|
||||||
pub fn executor(&self) -> &Executor {
|
pub fn executor(&self) -> &Executor {
|
||||||
&self.executor
|
&self.executor
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get mutable executor reference.
|
||||||
|
pub fn executor_mut(&mut self) -> &mut Executor {
|
||||||
|
&mut self.executor
|
||||||
|
}
|
||||||
|
|
||||||
/// Get job state.
|
/// Get job state.
|
||||||
pub fn state(&self) -> JobSessionState {
|
pub fn state(&self) -> JobSessionState {
|
||||||
self.data.state
|
self.data.state
|
||||||
@ -181,7 +191,10 @@ impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExe
|
|||||||
/// Initialize.
|
/// Initialize.
|
||||||
pub fn initialize(&mut self, nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
pub fn initialize(&mut self, nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
||||||
debug_assert!(nodes.len() >= self.meta.threshold + 1);
|
|
||||||
|
if nodes.len() < self.meta.threshold + 1 {
|
||||||
|
return Err(Error::ConsensusUnreachable);
|
||||||
|
}
|
||||||
|
|
||||||
if self.data.state != JobSessionState::Inactive {
|
if self.data.state != JobSessionState::Inactive {
|
||||||
return Err(Error::InvalidStateForRequest);
|
return Err(Error::InvalidStateForRequest);
|
||||||
@ -266,7 +279,7 @@ impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExe
|
|||||||
return Err(Error::InvalidNodeForRequest);
|
return Err(Error::InvalidNodeForRequest);
|
||||||
}
|
}
|
||||||
|
|
||||||
match self.executor.check_partial_response(&response)? {
|
match self.executor.check_partial_response(node, &response)? {
|
||||||
JobPartialResponseAction::Ignore => Ok(()),
|
JobPartialResponseAction::Ignore => Ok(()),
|
||||||
JobPartialResponseAction::Reject => {
|
JobPartialResponseAction::Reject => {
|
||||||
active_data.rejects.insert(node.clone());
|
active_data.rejects.insert(node.clone());
|
||||||
@ -279,7 +292,6 @@ impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExe
|
|||||||
},
|
},
|
||||||
JobPartialResponseAction::Accept => {
|
JobPartialResponseAction::Accept => {
|
||||||
active_data.responses.insert(node.clone(), response);
|
active_data.responses.insert(node.clone(), response);
|
||||||
|
|
||||||
if active_data.responses.len() < self.meta.threshold + 1 {
|
if active_data.responses.len() < self.meta.threshold + 1 {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
@ -351,7 +363,7 @@ pub mod tests {
|
|||||||
|
|
||||||
fn prepare_partial_request(&self, _n: &NodeId, _nodes: &BTreeSet<NodeId>) -> Result<u32, Error> { Ok(2) }
|
fn prepare_partial_request(&self, _n: &NodeId, _nodes: &BTreeSet<NodeId>) -> Result<u32, Error> { Ok(2) }
|
||||||
fn process_partial_request(&mut self, r: u32) -> Result<JobPartialRequestAction<u32>, Error> { if r <= 10 { Ok(JobPartialRequestAction::Respond(r * r)) } else { Err(Error::InvalidMessage) } }
|
fn process_partial_request(&mut self, r: u32) -> Result<JobPartialRequestAction<u32>, Error> { if r <= 10 { Ok(JobPartialRequestAction::Respond(r * r)) } else { Err(Error::InvalidMessage) } }
|
||||||
fn check_partial_response(&self, r: &u32) -> Result<JobPartialResponseAction, Error> { if r % 2 == 0 { Ok(JobPartialResponseAction::Accept) } else { Ok(JobPartialResponseAction::Reject) } }
|
fn check_partial_response(&mut self, _s: &NodeId, r: &u32) -> Result<JobPartialResponseAction, Error> { if r % 2 == 0 { Ok(JobPartialResponseAction::Accept) } else { Ok(JobPartialResponseAction::Reject) } }
|
||||||
fn compute_response(&self, r: &BTreeMap<NodeId, u32>) -> Result<u32, Error> { Ok(r.values().fold(0, |v1, v2| v1 + v2)) }
|
fn compute_response(&self, r: &BTreeMap<NodeId, u32>) -> Result<u32, Error> { Ok(r.values().fold(0, |v1, v2| v1 + v2)) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,6 +24,8 @@ use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartial
|
|||||||
pub struct KeyAccessJob {
|
pub struct KeyAccessJob {
|
||||||
/// Key id.
|
/// Key id.
|
||||||
id: SessionId,
|
id: SessionId,
|
||||||
|
/// Has key share?
|
||||||
|
has_key_share: bool,
|
||||||
/// ACL storage.
|
/// ACL storage.
|
||||||
acl_storage: Arc<AclStorage>,
|
acl_storage: Arc<AclStorage>,
|
||||||
/// Requester signature.
|
/// Requester signature.
|
||||||
@ -34,6 +36,7 @@ impl KeyAccessJob {
|
|||||||
pub fn new_on_slave(id: SessionId, acl_storage: Arc<AclStorage>) -> Self {
|
pub fn new_on_slave(id: SessionId, acl_storage: Arc<AclStorage>) -> Self {
|
||||||
KeyAccessJob {
|
KeyAccessJob {
|
||||||
id: id,
|
id: id,
|
||||||
|
has_key_share: true,
|
||||||
acl_storage: acl_storage,
|
acl_storage: acl_storage,
|
||||||
signature: None,
|
signature: None,
|
||||||
}
|
}
|
||||||
@ -42,11 +45,24 @@ impl KeyAccessJob {
|
|||||||
pub fn new_on_master(id: SessionId, acl_storage: Arc<AclStorage>, signature: Signature) -> Self {
|
pub fn new_on_master(id: SessionId, acl_storage: Arc<AclStorage>, signature: Signature) -> Self {
|
||||||
KeyAccessJob {
|
KeyAccessJob {
|
||||||
id: id,
|
id: id,
|
||||||
|
has_key_share: true,
|
||||||
acl_storage: acl_storage,
|
acl_storage: acl_storage,
|
||||||
signature: Some(signature),
|
signature: Some(signature),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn set_has_key_share(&mut self, has_key_share: bool) {
|
||||||
|
self.has_key_share = has_key_share;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_requester_signature(&mut self, signature: Signature) {
|
||||||
|
self.signature = Some(signature);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn requester_signature(&self) -> Option<&Signature> {
|
||||||
|
self.signature.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn requester(&self) -> Result<Option<Public>, Error> {
|
pub fn requester(&self) -> Result<Option<Public>, Error> {
|
||||||
match self.signature.as_ref() {
|
match self.signature.as_ref() {
|
||||||
Some(signature) => Ok(Some(recover(signature, &self.id)?)),
|
Some(signature) => Ok(Some(recover(signature, &self.id)?)),
|
||||||
@ -65,13 +81,17 @@ impl JobExecutor for KeyAccessJob {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn process_partial_request(&mut self, partial_request: Signature) -> Result<JobPartialRequestAction<bool>, Error> {
|
fn process_partial_request(&mut self, partial_request: Signature) -> Result<JobPartialRequestAction<bool>, Error> {
|
||||||
|
if !self.has_key_share {
|
||||||
|
return Ok(JobPartialRequestAction::Reject(false));
|
||||||
|
}
|
||||||
|
|
||||||
self.signature = Some(partial_request.clone());
|
self.signature = Some(partial_request.clone());
|
||||||
self.acl_storage.check(&recover(&partial_request, &self.id)?, &self.id)
|
self.acl_storage.check(&recover(&partial_request, &self.id)?, &self.id)
|
||||||
.map_err(|_| Error::AccessDenied)
|
.map_err(|_| Error::AccessDenied)
|
||||||
.map(|is_confirmed| if is_confirmed { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) })
|
.map(|is_confirmed| if is_confirmed { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) })
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_partial_response(&self, partial_response: &bool) -> Result<JobPartialResponseAction, Error> {
|
fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &bool) -> Result<JobPartialResponseAction, Error> {
|
||||||
Ok(if *partial_response { JobPartialResponseAction::Accept } else { JobPartialResponseAction::Reject })
|
Ok(if *partial_response { JobPartialResponseAction::Accept } else { JobPartialResponseAction::Reject })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,16 +18,13 @@ use std::collections::{BTreeSet, BTreeMap};
|
|||||||
use ethkey::{Public, Signature, recover};
|
use ethkey::{Public, Signature, recover};
|
||||||
use tiny_keccak::Keccak;
|
use tiny_keccak::Keccak;
|
||||||
use key_server_cluster::{Error, NodeId, SessionId};
|
use key_server_cluster::{Error, NodeId, SessionId};
|
||||||
use key_server_cluster::message::{InitializeConsensusSessionWithServersSet, InitializeConsensusSessionWithServersMap,
|
use key_server_cluster::message::{InitializeConsensusSessionWithServersSet, InitializeConsensusSessionOfShareAdd};
|
||||||
InitializeConsensusSessionWithServersSecretMap};
|
|
||||||
use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor};
|
use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor};
|
||||||
|
|
||||||
/// Purpose of this job is to check if requestor is administrator of SecretStore (i.e. it have access to change key servers set).
|
/// Purpose of this job is to check if requestor is administrator of SecretStore (i.e. it have access to change key servers set).
|
||||||
pub struct ServersSetChangeAccessJob {
|
pub struct ServersSetChangeAccessJob {
|
||||||
/// Servers set administrator public key (this could be changed to ACL-based check later).
|
/// Servers set administrator public key (this could be changed to ACL-based check later).
|
||||||
administrator: Public,
|
administrator: Public,
|
||||||
/// Current servers set (in session/cluster).
|
|
||||||
current_servers_set: BTreeSet<NodeId>,
|
|
||||||
/// Old servers set.
|
/// Old servers set.
|
||||||
old_servers_set: Option<BTreeSet<NodeId>>,
|
old_servers_set: Option<BTreeSet<NodeId>>,
|
||||||
/// New servers set.
|
/// New servers set.
|
||||||
@ -61,22 +58,11 @@ impl<'a> From<&'a InitializeConsensusSessionWithServersSet> for ServersSetChange
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> From<&'a InitializeConsensusSessionWithServersMap> for ServersSetChangeAccessRequest {
|
impl<'a> From<&'a InitializeConsensusSessionOfShareAdd> for ServersSetChangeAccessRequest {
|
||||||
fn from(message: &InitializeConsensusSessionWithServersMap) -> Self {
|
fn from(message: &InitializeConsensusSessionOfShareAdd) -> Self {
|
||||||
ServersSetChangeAccessRequest {
|
ServersSetChangeAccessRequest {
|
||||||
old_servers_set: message.old_nodes_set.iter().cloned().map(Into::into).collect(),
|
old_servers_set: message.old_nodes_set.iter().cloned().map(Into::into).collect(),
|
||||||
new_servers_set: message.new_nodes_set.keys().cloned().map(Into::into).collect(),
|
new_servers_set: message.new_nodes_map.keys().cloned().map(Into::into).collect(),
|
||||||
old_set_signature: message.old_set_signature.clone().into(),
|
|
||||||
new_set_signature: message.new_set_signature.clone().into(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> From<&'a InitializeConsensusSessionWithServersSecretMap> for ServersSetChangeAccessRequest {
|
|
||||||
fn from(message: &InitializeConsensusSessionWithServersSecretMap) -> Self {
|
|
||||||
ServersSetChangeAccessRequest {
|
|
||||||
old_servers_set: message.old_nodes_set.iter().cloned().map(Into::into).collect(),
|
|
||||||
new_servers_set: message.new_nodes_set.keys().cloned().map(Into::into).collect(),
|
|
||||||
old_set_signature: message.old_set_signature.clone().into(),
|
old_set_signature: message.old_set_signature.clone().into(),
|
||||||
new_set_signature: message.new_set_signature.clone().into(),
|
new_set_signature: message.new_set_signature.clone().into(),
|
||||||
}
|
}
|
||||||
@ -84,10 +70,9 @@ impl<'a> From<&'a InitializeConsensusSessionWithServersSecretMap> for ServersSet
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ServersSetChangeAccessJob {
|
impl ServersSetChangeAccessJob {
|
||||||
pub fn new_on_slave(administrator: Public, current_servers_set: BTreeSet<NodeId>) -> Self {
|
pub fn new_on_slave(administrator: Public) -> Self {
|
||||||
ServersSetChangeAccessJob {
|
ServersSetChangeAccessJob {
|
||||||
administrator: administrator,
|
administrator: administrator,
|
||||||
current_servers_set: current_servers_set,
|
|
||||||
old_servers_set: None,
|
old_servers_set: None,
|
||||||
new_servers_set: None,
|
new_servers_set: None,
|
||||||
old_set_signature: None,
|
old_set_signature: None,
|
||||||
@ -95,10 +80,9 @@ impl ServersSetChangeAccessJob {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_on_master(administrator: Public, current_servers_set: BTreeSet<NodeId>, old_servers_set: BTreeSet<NodeId>, new_servers_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Self {
|
pub fn new_on_master(administrator: Public, old_servers_set: BTreeSet<NodeId>, new_servers_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Self {
|
||||||
ServersSetChangeAccessJob {
|
ServersSetChangeAccessJob {
|
||||||
administrator: administrator,
|
administrator: administrator,
|
||||||
current_servers_set: current_servers_set,
|
|
||||||
old_servers_set: Some(old_servers_set),
|
old_servers_set: Some(old_servers_set),
|
||||||
new_servers_set: Some(new_servers_set),
|
new_servers_set: Some(new_servers_set),
|
||||||
old_set_signature: Some(old_set_signature),
|
old_set_signature: Some(old_set_signature),
|
||||||
@ -134,11 +118,6 @@ impl JobExecutor for ServersSetChangeAccessJob {
|
|||||||
new_set_signature,
|
new_set_signature,
|
||||||
} = partial_request;
|
} = partial_request;
|
||||||
|
|
||||||
// check that current set is exactly the same set as old set
|
|
||||||
if self.current_servers_set.symmetric_difference(&old_servers_set).next().is_some() {
|
|
||||||
return Ok(JobPartialRequestAction::Reject(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
// check old servers set signature
|
// check old servers set signature
|
||||||
let old_actual_public = recover(&old_set_signature, &ordered_nodes_hash(&old_servers_set).into())?;
|
let old_actual_public = recover(&old_set_signature, &ordered_nodes_hash(&old_servers_set).into())?;
|
||||||
let new_actual_public = recover(&new_set_signature, &ordered_nodes_hash(&new_servers_set).into())?;
|
let new_actual_public = recover(&new_set_signature, &ordered_nodes_hash(&new_servers_set).into())?;
|
||||||
@ -148,7 +127,7 @@ impl JobExecutor for ServersSetChangeAccessJob {
|
|||||||
Ok(if is_administrator { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) })
|
Ok(if is_administrator { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) })
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_partial_response(&self, partial_response: &bool) -> Result<JobPartialResponseAction, Error> {
|
fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &bool) -> Result<JobPartialResponseAction, Error> {
|
||||||
Ok(if *partial_response { JobPartialResponseAction::Accept } else { JobPartialResponseAction::Reject })
|
Ok(if *partial_response { JobPartialResponseAction::Accept } else { JobPartialResponseAction::Reject })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,6 +27,8 @@ pub struct SigningJob {
|
|||||||
self_node_id: NodeId,
|
self_node_id: NodeId,
|
||||||
/// Key share.
|
/// Key share.
|
||||||
key_share: DocumentKeyShare,
|
key_share: DocumentKeyShare,
|
||||||
|
/// Key version.
|
||||||
|
key_version: H256,
|
||||||
/// Session public key.
|
/// Session public key.
|
||||||
session_public: Public,
|
session_public: Public,
|
||||||
/// Session secret coefficient.
|
/// Session secret coefficient.
|
||||||
@ -56,10 +58,11 @@ pub struct PartialSigningResponse {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SigningJob {
|
impl SigningJob {
|
||||||
pub fn new_on_slave(self_node_id: NodeId, key_share: DocumentKeyShare, session_public: Public, session_secret_coeff: Secret) -> Result<Self, Error> {
|
pub fn new_on_slave(self_node_id: NodeId, key_share: DocumentKeyShare, key_version: H256, session_public: Public, session_secret_coeff: Secret) -> Result<Self, Error> {
|
||||||
Ok(SigningJob {
|
Ok(SigningJob {
|
||||||
self_node_id: self_node_id,
|
self_node_id: self_node_id,
|
||||||
key_share: key_share,
|
key_share: key_share,
|
||||||
|
key_version: key_version,
|
||||||
session_public: session_public,
|
session_public: session_public,
|
||||||
session_secret_coeff: session_secret_coeff,
|
session_secret_coeff: session_secret_coeff,
|
||||||
request_id: None,
|
request_id: None,
|
||||||
@ -67,10 +70,11 @@ impl SigningJob {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_on_master(self_node_id: NodeId, key_share: DocumentKeyShare, session_public: Public, session_secret_coeff: Secret, message_hash: H256) -> Result<Self, Error> {
|
pub fn new_on_master(self_node_id: NodeId, key_share: DocumentKeyShare, key_version: H256, session_public: Public, session_secret_coeff: Secret, message_hash: H256) -> Result<Self, Error> {
|
||||||
Ok(SigningJob {
|
Ok(SigningJob {
|
||||||
self_node_id: self_node_id,
|
self_node_id: self_node_id,
|
||||||
key_share: key_share,
|
key_share: key_share,
|
||||||
|
key_version: key_version,
|
||||||
session_public: session_public,
|
session_public: session_public,
|
||||||
session_secret_coeff: session_secret_coeff,
|
session_secret_coeff: session_secret_coeff,
|
||||||
request_id: Some(math::generate_random_scalar()?),
|
request_id: Some(math::generate_random_scalar()?),
|
||||||
@ -102,14 +106,15 @@ impl JobExecutor for SigningJob {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn process_partial_request(&mut self, partial_request: PartialSigningRequest) -> Result<JobPartialRequestAction<PartialSigningResponse>, Error> {
|
fn process_partial_request(&mut self, partial_request: PartialSigningRequest) -> Result<JobPartialRequestAction<PartialSigningResponse>, Error> {
|
||||||
|
let key_version = self.key_share.version(&self.key_version).map_err(|e| Error::KeyStorage(e.into()))?;
|
||||||
if partial_request.other_nodes_ids.len() != self.key_share.threshold
|
if partial_request.other_nodes_ids.len() != self.key_share.threshold
|
||||||
|| partial_request.other_nodes_ids.contains(&self.self_node_id)
|
|| partial_request.other_nodes_ids.contains(&self.self_node_id)
|
||||||
|| partial_request.other_nodes_ids.iter().any(|n| !self.key_share.id_numbers.contains_key(n)) {
|
|| partial_request.other_nodes_ids.iter().any(|n| !key_version.id_numbers.contains_key(n)) {
|
||||||
return Err(Error::InvalidMessage);
|
return Err(Error::InvalidMessage);
|
||||||
}
|
}
|
||||||
|
|
||||||
let self_id_number = &self.key_share.id_numbers[&self.self_node_id];
|
let self_id_number = &key_version.id_numbers[&self.self_node_id];
|
||||||
let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &self.key_share.id_numbers[n]);
|
let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &key_version.id_numbers[n]);
|
||||||
let combined_hash = math::combine_message_hash_with_public(&partial_request.message_hash, &self.session_public)?;
|
let combined_hash = math::combine_message_hash_with_public(&partial_request.message_hash, &self.session_public)?;
|
||||||
Ok(JobPartialRequestAction::Respond(PartialSigningResponse {
|
Ok(JobPartialRequestAction::Respond(PartialSigningResponse {
|
||||||
request_id: partial_request.id,
|
request_id: partial_request.id,
|
||||||
@ -117,14 +122,14 @@ impl JobExecutor for SigningJob {
|
|||||||
self.key_share.threshold,
|
self.key_share.threshold,
|
||||||
&combined_hash,
|
&combined_hash,
|
||||||
&self.session_secret_coeff,
|
&self.session_secret_coeff,
|
||||||
&self.key_share.secret_share,
|
&key_version.secret_share,
|
||||||
self_id_number,
|
self_id_number,
|
||||||
other_id_numbers
|
other_id_numbers
|
||||||
)?,
|
)?,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_partial_response(&self, partial_response: &PartialSigningResponse) -> Result<JobPartialResponseAction, Error> {
|
fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &PartialSigningResponse) -> Result<JobPartialResponseAction, Error> {
|
||||||
if Some(&partial_response.request_id) != self.request_id.as_ref() {
|
if Some(&partial_response.request_id) != self.request_id.as_ref() {
|
||||||
return Ok(JobPartialResponseAction::Ignore);
|
return Ok(JobPartialResponseAction::Ignore);
|
||||||
}
|
}
|
||||||
|
@ -54,12 +54,12 @@ impl JobExecutor for UnknownSessionsJob {
|
|||||||
|
|
||||||
fn process_partial_request(&mut self, partial_request: NodeId) -> Result<JobPartialRequestAction<BTreeSet<SessionId>>, Error> {
|
fn process_partial_request(&mut self, partial_request: NodeId) -> Result<JobPartialRequestAction<BTreeSet<SessionId>>, Error> {
|
||||||
Ok(JobPartialRequestAction::Respond(self.key_storage.iter()
|
Ok(JobPartialRequestAction::Respond(self.key_storage.iter()
|
||||||
.filter(|&(_, ref key_share)| !key_share.id_numbers.contains_key(&partial_request))
|
.filter(|&(_, ref key_share)| !key_share.versions.last().map(|v| v.id_numbers.contains_key(&partial_request)).unwrap_or(true))
|
||||||
.map(|(id, _)| id.clone())
|
.map(|(id, _)| id.clone())
|
||||||
.collect()))
|
.collect()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_partial_response(&self, _partial_response: &BTreeSet<SessionId>) -> Result<JobPartialResponseAction, Error> {
|
fn check_partial_response(&mut self, _sender: &NodeId, _partial_response: &BTreeSet<SessionId>) -> Result<JobPartialResponseAction, Error> {
|
||||||
Ok(JobPartialResponseAction::Accept)
|
Ok(JobPartialResponseAction::Accept)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,27 +93,6 @@ pub fn generate_random_polynom(threshold: usize) -> Result<Vec<Secret>, Error> {
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute absolute term of additional polynom1 when new node is added to the existing generation node set
|
|
||||||
pub fn compute_additional_polynom1_absolute_term<'a, I>(secret_values: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
|
||||||
let mut absolute_term = compute_secret_sum(secret_values)?;
|
|
||||||
absolute_term.neg()?;
|
|
||||||
Ok(absolute_term)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add two polynoms together (coeff = coeff1 + coeff2).
|
|
||||||
pub fn add_polynoms(polynom1: &[Secret], polynom2: &[Secret], is_absolute_term2_zero: bool) -> Result<Vec<Secret>, Error> {
|
|
||||||
polynom1.iter().zip(polynom2.iter())
|
|
||||||
.enumerate()
|
|
||||||
.map(|(i, (c1, c2))| {
|
|
||||||
let mut sum_coeff = c1.clone();
|
|
||||||
if !is_absolute_term2_zero || i != 0 {
|
|
||||||
sum_coeff.add(c2)?;
|
|
||||||
}
|
|
||||||
Ok(sum_coeff)
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compute value of polynom, using `node_number` as argument
|
/// Compute value of polynom, using `node_number` as argument
|
||||||
pub fn compute_polynom(polynom: &[Secret], node_number: &Secret) -> Result<Secret, Error> {
|
pub fn compute_polynom(polynom: &[Secret], node_number: &Secret) -> Result<Secret, Error> {
|
||||||
debug_assert!(!polynom.is_empty());
|
debug_assert!(!polynom.is_empty());
|
||||||
@ -160,13 +139,6 @@ pub fn public_values_generation(threshold: usize, derived_point: &Public, polyno
|
|||||||
Ok(publics)
|
Ok(publics)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generate refreshed public keys for other participants.
|
|
||||||
pub fn refreshed_public_values_generation(threshold: usize, refreshed_polynom1: &[Secret]) -> Result<Vec<Public>, Error> {
|
|
||||||
debug_assert_eq!(refreshed_polynom1.len(), threshold + 1);
|
|
||||||
|
|
||||||
(0..threshold + 1).map(|i| compute_public_share(&refreshed_polynom1[i])).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check keys passed by other participants.
|
/// Check keys passed by other participants.
|
||||||
pub fn keys_verification(threshold: usize, derived_point: &Public, number_id: &Secret, secret1: &Secret, secret2: &Secret, publics: &[Public]) -> Result<bool, Error> {
|
pub fn keys_verification(threshold: usize, derived_point: &Public, number_id: &Secret, secret1: &Secret, secret2: &Secret, publics: &[Public]) -> Result<bool, Error> {
|
||||||
// calculate left part
|
// calculate left part
|
||||||
@ -194,25 +166,14 @@ pub fn keys_verification(threshold: usize, derived_point: &Public, number_id: &S
|
|||||||
Ok(left == right)
|
Ok(left == right)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check refreshed keys passed by other participants.
|
/// Compute secret subshare from passed secret value.
|
||||||
pub fn refreshed_keys_verification(threshold: usize, number_id: &Secret, secret1: &Secret, publics: &[Public]) -> Result<bool, Error> {
|
pub fn compute_secret_subshare<'a, I>(threshold: usize, secret_value: &Secret, sender_id_number: &Secret, other_id_numbers: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
||||||
// calculate left part
|
let mut subshare = compute_shadow_mul(secret_value, sender_id_number, other_id_numbers)?;
|
||||||
let mut left = math::generation_point();
|
if threshold % 2 != 0 {
|
||||||
math::public_mul_secret(&mut left, secret1)?;
|
subshare.neg()?;
|
||||||
|
|
||||||
// calculate right part
|
|
||||||
let mut right = publics[0].clone();
|
|
||||||
for i in 1..threshold + 1 {
|
|
||||||
let mut secret_pow = number_id.clone();
|
|
||||||
secret_pow.pow(i)?;
|
|
||||||
|
|
||||||
let mut public_k = publics[i].clone();
|
|
||||||
math::public_mul_secret(&mut public_k, &secret_pow)?;
|
|
||||||
|
|
||||||
math::public_add(&mut right, &public_k)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(left == right)
|
Ok(subshare)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute secret share.
|
/// Compute secret share.
|
||||||
@ -232,12 +193,34 @@ pub fn compute_joint_public<'a, I>(public_shares: I) -> Result<Public, Error> wh
|
|||||||
compute_public_sum(public_shares)
|
compute_public_sum(public_shares)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute joint secret key.
|
/// Compute joint secret key from N secret coefficients.
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub fn compute_joint_secret<'a, I>(secret_coeffs: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
pub fn compute_joint_secret<'a, I>(secret_coeffs: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
||||||
compute_secret_sum(secret_coeffs)
|
compute_secret_sum(secret_coeffs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Compute joint secret key from t+1 secret shares.
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn compute_joint_secret_from_shares<'a>(t: usize, secret_shares: &[&'a Secret], id_numbers: &[&'a Secret]) -> Result<Secret, Error> {
|
||||||
|
let secret_share_0 = secret_shares[0];
|
||||||
|
let id_number_0 = id_numbers[0];
|
||||||
|
let other_nodes_numbers = id_numbers.iter().skip(1).cloned();
|
||||||
|
let mut result = compute_node_shadow(secret_share_0, id_number_0, other_nodes_numbers)?;
|
||||||
|
for i in 1..secret_shares.len() {
|
||||||
|
let secret_share_i = secret_shares[i];
|
||||||
|
let id_number_i = id_numbers[i];
|
||||||
|
let other_nodes_numbers = id_numbers.iter().enumerate().filter(|&(j, _)| j != i).map(|(_, n)| n).cloned();
|
||||||
|
let addendum = compute_node_shadow(secret_share_i, id_number_i, other_nodes_numbers)?;
|
||||||
|
result.add(&addendum)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if t % 2 != 0 {
|
||||||
|
result.neg()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
/// Encrypt secret with joint public key.
|
/// Encrypt secret with joint public key.
|
||||||
pub fn encrypt_secret(secret: &Public, joint_public: &Public) -> Result<EncryptedSecret, Error> {
|
pub fn encrypt_secret(secret: &Public, joint_public: &Public) -> Result<EncryptedSecret, Error> {
|
||||||
// this is performed by KS-cluster client (or KS master)
|
// this is performed by KS-cluster client (or KS master)
|
||||||
@ -492,94 +475,58 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_key_share_refreshing(t: usize, n: usize, artifacts: &KeyGenerationArtifacts) -> KeyGenerationArtifacts {
|
fn run_key_share_refreshing(old_t: usize, new_t: usize, new_n: usize, old_artifacts: &KeyGenerationArtifacts) -> KeyGenerationArtifacts {
|
||||||
// === share refreshing protocol from http://www.wu.ece.ufl.edu/mypapers/msig.pdf
|
// === share refreshing protocol from
|
||||||
|
// === based on "Verifiable Secret Redistribution for Threshold Sharing Schemes"
|
||||||
|
// === http://www.cs.cmu.edu/~wing/publications/CMU-CS-02-114.pdf
|
||||||
|
|
||||||
// key refreshing distribution algorithm (KRD)
|
// generate new id_numbers for new nodes
|
||||||
let refreshed_polynoms1: Vec<_> = (0..n).map(|_| generate_random_polynom(t).unwrap()).collect();
|
let new_nodes = new_n.saturating_sub(old_artifacts.id_numbers.len());
|
||||||
let refreshed_polynoms1_sum: Vec<_> = (0..n).map(|i| add_polynoms(&artifacts.polynoms1[i], &refreshed_polynoms1[i], true).unwrap()).collect();
|
let id_numbers: Vec<_> = old_artifacts.id_numbers.iter().take(new_n).cloned()
|
||||||
let refreshed_secrets1: Vec<_> = (0..n).map(|i| (0..n).map(|j| compute_polynom(&refreshed_polynoms1_sum[i], &artifacts.id_numbers[j]).unwrap()).collect::<Vec<_>>()).collect();
|
.chain((0..new_nodes).map(|_| generate_random_scalar().unwrap()))
|
||||||
let refreshed_publics: Vec<_> = (0..n).map(|i| {
|
.collect();
|
||||||
(0..t+1).map(|j| compute_public_share(&refreshed_polynoms1_sum[i][j]).unwrap()).collect::<Vec<_>>()
|
|
||||||
}).collect();
|
|
||||||
|
|
||||||
// key refreshing verification algorithm (KRV)
|
// on every authorized node: generate random polynomial ai(j) = si + ... + ai[new_t - 1] * j^(new_t - 1)
|
||||||
(0..n).map(|i| (0..n).map(|j| if i != j {
|
let mut subshare_polynoms = Vec::new();
|
||||||
assert!(refreshed_keys_verification(t, &artifacts.id_numbers[i], &refreshed_secrets1[j][i], &refreshed_publics[j]).unwrap())
|
for i in 0..old_t+1 {
|
||||||
}).collect::<Vec<_>>()).collect::<Vec<_>>();
|
let mut subshare_polynom = generate_random_polynom(new_t).unwrap();
|
||||||
|
subshare_polynom[0] = old_artifacts.secret_shares[i].clone();
|
||||||
// data, generated during keys generation
|
subshare_polynoms.push(subshare_polynom);
|
||||||
let public_shares: Vec<_> = (0..n).map(|i| compute_public_share(&refreshed_polynoms1_sum[i][0]).unwrap()).collect();
|
|
||||||
let secret_shares: Vec<_> = (0..n).map(|i| compute_secret_share(refreshed_secrets1.iter().map(|s| &s[i])).unwrap()).collect();
|
|
||||||
|
|
||||||
// joint public key, as a result of DKG
|
|
||||||
let joint_public = compute_joint_public(public_shares.iter()).unwrap();
|
|
||||||
|
|
||||||
KeyGenerationArtifacts {
|
|
||||||
id_numbers: artifacts.id_numbers.clone(),
|
|
||||||
polynoms1: refreshed_polynoms1_sum,
|
|
||||||
secrets1: refreshed_secrets1,
|
|
||||||
public_shares: public_shares,
|
|
||||||
secret_shares: secret_shares,
|
|
||||||
joint_public: joint_public,
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
fn run_key_share_refreshing_and_add_new_nodes(t: usize, n: usize, new_nodes: usize, artifacts: &KeyGenerationArtifacts) -> KeyGenerationArtifacts {
|
// on every authorized node: calculate subshare for every new node
|
||||||
// === share refreshing protocol (with new node addition) from http://www.wu.ece.ufl.edu/mypapers/msig.pdf
|
let mut subshares = Vec::new();
|
||||||
let mut id_numbers: Vec<_> = artifacts.id_numbers.iter().cloned().collect();
|
for j in 0..new_n {
|
||||||
|
let mut subshares_to_j = Vec::new();
|
||||||
// key refreshing distribution algorithm (KRD)
|
for i in 0..old_t+1 {
|
||||||
// for each new node: generate random polynom
|
let subshare_from_i_to_j = compute_polynom(&subshare_polynoms[i], &id_numbers[j]).unwrap();
|
||||||
let refreshed_polynoms1: Vec<_> = (0..n).map(|_| (0..new_nodes).map(|_| generate_random_polynom(t).unwrap()).collect::<Vec<_>>()).collect();
|
subshares_to_j.push(subshare_from_i_to_j);
|
||||||
let mut refreshed_polynoms1_sum: Vec<_> = (0..n).map(|i| {
|
|
||||||
let mut refreshed_polynom1_sum = artifacts.polynoms1[i].clone();
|
|
||||||
for refreshed_polynom1 in &refreshed_polynoms1[i] {
|
|
||||||
refreshed_polynom1_sum = add_polynoms(&refreshed_polynom1_sum, refreshed_polynom1, false).unwrap();
|
|
||||||
}
|
}
|
||||||
refreshed_polynom1_sum
|
subshares.push(subshares_to_j);
|
||||||
}).collect();
|
|
||||||
|
|
||||||
// new nodes receiving private information and generates its own polynom
|
|
||||||
let mut new_nodes_polynom1 = Vec::with_capacity(new_nodes);
|
|
||||||
for i in 0..new_nodes {
|
|
||||||
let mut new_polynom1 = generate_random_polynom(t).unwrap();
|
|
||||||
let new_polynom_absolute_term = compute_additional_polynom1_absolute_term(refreshed_polynoms1.iter().map(|polynom1| &polynom1[i][0])).unwrap();
|
|
||||||
new_polynom1[0] = new_polynom_absolute_term;
|
|
||||||
new_nodes_polynom1.push(new_polynom1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// new nodes sends its own information to all other nodes
|
// on every new node: generate new share using Lagrange interpolation
|
||||||
let n = n + new_nodes;
|
// on every node: generate new share using Lagrange interpolation
|
||||||
id_numbers.extend((0..new_nodes).map(|_| Random.generate().unwrap().secret().clone()));
|
let mut new_secret_shares = Vec::new();
|
||||||
refreshed_polynoms1_sum.extend(new_nodes_polynom1);
|
for j in 0..new_n {
|
||||||
|
let mut subshares_to_j = Vec::new();
|
||||||
// the rest of protocol is the same as without new node
|
for i in 0..old_t+1 {
|
||||||
let refreshed_secrets1: Vec<_> = (0..n).map(|i| (0..n).map(|j| compute_polynom(&refreshed_polynoms1_sum[i], &id_numbers[j]).unwrap()).collect::<Vec<_>>()).collect();
|
let subshare_from_i = &subshares[j][i];
|
||||||
let refreshed_publics: Vec<_> = (0..n).map(|i| {
|
let id_number_i = &id_numbers[i];
|
||||||
(0..t+1).map(|j| compute_public_share(&refreshed_polynoms1_sum[i][j]).unwrap()).collect::<Vec<_>>()
|
let other_id_numbers = (0usize..old_t+1).filter(|j| *j != i).map(|j| &id_numbers[j]);
|
||||||
}).collect();
|
let mut subshare_from_i = compute_shadow_mul(subshare_from_i, id_number_i, other_id_numbers).unwrap();
|
||||||
|
if old_t % 2 != 0 {
|
||||||
// key refreshing verification algorithm (KRV)
|
subshare_from_i.neg().unwrap();
|
||||||
(0..n).map(|i| (0..n).map(|j| if i != j {
|
}
|
||||||
assert!(refreshed_keys_verification(t, &id_numbers[i], &refreshed_secrets1[j][i], &refreshed_publics[j]).unwrap())
|
subshares_to_j.push(subshare_from_i);
|
||||||
}).collect::<Vec<_>>()).collect::<Vec<_>>();
|
}
|
||||||
|
new_secret_shares.push(compute_secret_sum(subshares_to_j.iter()).unwrap());
|
||||||
// data, generated during keys generation
|
|
||||||
let public_shares: Vec<_> = (0..n).map(|i| compute_public_share(&refreshed_polynoms1_sum[i][0]).unwrap()).collect();
|
|
||||||
let secret_shares: Vec<_> = (0..n).map(|i| compute_secret_share(refreshed_secrets1.iter().map(|s| &s[i])).unwrap()).collect();
|
|
||||||
|
|
||||||
// joint public key, as a result of DKG
|
|
||||||
let joint_public = compute_joint_public(public_shares.iter()).unwrap();
|
|
||||||
|
|
||||||
KeyGenerationArtifacts {
|
|
||||||
id_numbers: id_numbers,
|
|
||||||
polynoms1: refreshed_polynoms1_sum,
|
|
||||||
secrets1: refreshed_secrets1,
|
|
||||||
public_shares: public_shares,
|
|
||||||
secret_shares: secret_shares,
|
|
||||||
joint_public: joint_public,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut result = old_artifacts.clone();
|
||||||
|
result.id_numbers = id_numbers;
|
||||||
|
result.secret_shares = new_secret_shares;
|
||||||
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn do_encryption_and_decryption(t: usize, joint_public: &Public, id_numbers: &[Secret], secret_shares: &[Secret], joint_secret: Option<&Secret>, document_secret_plain: Public) -> (Public, Public) {
|
pub fn do_encryption_and_decryption(t: usize, joint_public: &Public, id_numbers: &[Secret], secret_shares: &[Secret], joint_secret: Option<&Secret>, document_secret_plain: Public) -> (Public, Public) {
|
||||||
@ -737,39 +684,48 @@ pub mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn full_generation_math_session_with_refreshing_shares() {
|
fn full_generation_math_session_with_refreshing_shares() {
|
||||||
// generate key using 6-of-10 session
|
let test_cases = vec![(1, 4), (6, 10)];
|
||||||
let (t, n) = (5, 10);
|
for (t, n) in test_cases {
|
||||||
let artifacts1 = run_key_generation(t, n, None);
|
// generate key using t-of-n session
|
||||||
|
let artifacts1 = run_key_generation(t, n, None);
|
||||||
|
let joint_secret1 = compute_joint_secret(artifacts1.polynoms1.iter().map(|p1| &p1[0])).unwrap();
|
||||||
|
|
||||||
// let's say we want to refresh existing secret shares
|
// let's say we want to refresh existing secret shares
|
||||||
// by doing this every T seconds, and assuming that in each T-second period adversary KS is not able to collect t+1 secret shares
|
// by doing this every T seconds, and assuming that in each T-second period adversary KS is not able to collect t+1 secret shares
|
||||||
// we can be sure that the scheme is secure
|
// we can be sure that the scheme is secure
|
||||||
let artifacts2 = run_key_share_refreshing(t, n, &artifacts1);
|
let artifacts2 = run_key_share_refreshing(t, t, n, &artifacts1);
|
||||||
assert_eq!(artifacts1.joint_public, artifacts2.joint_public);
|
let joint_secret2 = compute_joint_secret_from_shares(t, &artifacts2.secret_shares.iter().take(t + 1).collect::<Vec<_>>(),
|
||||||
|
&artifacts2.id_numbers.iter().take(t + 1).collect::<Vec<_>>()).unwrap();
|
||||||
|
assert_eq!(joint_secret1, joint_secret2);
|
||||||
|
|
||||||
// refresh again
|
// refresh again
|
||||||
let artifacts3 = run_key_share_refreshing(t, n, &artifacts2);
|
let artifacts3 = run_key_share_refreshing(t, t, n, &artifacts2);
|
||||||
assert_eq!(artifacts1.joint_public, artifacts3.joint_public);
|
let joint_secret3 = compute_joint_secret_from_shares(t, &artifacts3.secret_shares.iter().take(t + 1).collect::<Vec<_>>(),
|
||||||
|
&artifacts3.id_numbers.iter().take(t + 1).collect::<Vec<_>>()).unwrap();
|
||||||
|
assert_eq!(joint_secret1, joint_secret3);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn full_generation_math_session_with_adding_new_nodes() {
|
fn full_generation_math_session_with_adding_new_nodes() {
|
||||||
// generate key using 6-of-10 session
|
let test_cases = vec![(1, 3), (1, 4), (6, 10)];
|
||||||
let (t, n) = (5, 10);
|
for (t, n) in test_cases {
|
||||||
let artifacts1 = run_key_generation(t, n, None);
|
// generate key using t-of-n session
|
||||||
let joint_secret1 = compute_joint_secret(artifacts1.polynoms1.iter().map(|p1| &p1[0])).unwrap();
|
let artifacts1 = run_key_generation(t, n, None);
|
||||||
|
let joint_secret1 = compute_joint_secret(artifacts1.polynoms1.iter().map(|p1| &p1[0])).unwrap();
|
||||||
|
|
||||||
// let's say we want to include additional server to the set
|
// let's say we want to include additional couple of servers to the set
|
||||||
// so that scheme becames 6-of-11
|
// so that scheme becames t-of-n+2
|
||||||
let artifacts2 = run_key_share_refreshing_and_add_new_nodes(t, n, 1, &artifacts1);
|
let artifacts2 = run_key_share_refreshing(t, t, n + 2, &artifacts1);
|
||||||
let joint_secret2 = compute_joint_secret(artifacts2.polynoms1.iter().map(|p1| &p1[0])).unwrap();
|
let joint_secret2 = compute_joint_secret_from_shares(t, &artifacts2.secret_shares.iter().take(t + 1).collect::<Vec<_>>(),
|
||||||
assert_eq!(artifacts1.joint_public, artifacts2.joint_public);
|
&artifacts2.id_numbers.iter().take(t + 1).collect::<Vec<_>>()).unwrap();
|
||||||
assert_eq!(joint_secret1, joint_secret2);
|
assert_eq!(joint_secret1, joint_secret2);
|
||||||
|
|
||||||
// include another couple of servers (6-of-13)
|
// include another server (t-of-n+3)
|
||||||
let artifacts3 = run_key_share_refreshing_and_add_new_nodes(t, n + 1, 2, &artifacts2);
|
let artifacts3 = run_key_share_refreshing(t, t, n + 3, &artifacts2);
|
||||||
let joint_secret3 = compute_joint_secret(artifacts3.polynoms1.iter().map(|p1| &p1[0])).unwrap();
|
let joint_secret3 = compute_joint_secret_from_shares(t, &artifacts3.secret_shares.iter().take(t + 1).collect::<Vec<_>>(),
|
||||||
assert_eq!(artifacts1.joint_public, artifacts3.joint_public);
|
&artifacts3.id_numbers.iter().take(t + 1).collect::<Vec<_>>()).unwrap();
|
||||||
assert_eq!(joint_secret1, joint_secret3);
|
assert_eq!(joint_secret1, joint_secret3);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,12 +36,10 @@ pub enum Message {
|
|||||||
Decryption(DecryptionMessage),
|
Decryption(DecryptionMessage),
|
||||||
/// Signing message.
|
/// Signing message.
|
||||||
Signing(SigningMessage),
|
Signing(SigningMessage),
|
||||||
|
/// Key version negotiation message.
|
||||||
|
KeyVersionNegotiation(KeyVersionNegotiationMessage),
|
||||||
/// Share add message.
|
/// Share add message.
|
||||||
ShareAdd(ShareAddMessage),
|
ShareAdd(ShareAddMessage),
|
||||||
/// Share move message.
|
|
||||||
ShareMove(ShareMoveMessage),
|
|
||||||
/// Share add message.
|
|
||||||
ShareRemove(ShareRemoveMessage),
|
|
||||||
/// Servers set change message.
|
/// Servers set change message.
|
||||||
ServersSetChange(ServersSetChangeMessage),
|
ServersSetChange(ServersSetChangeMessage),
|
||||||
}
|
}
|
||||||
@ -109,18 +107,9 @@ pub enum ConsensusMessageWithServersSet {
|
|||||||
|
|
||||||
/// All possible messages that can be sent during share add consensus establishing.
|
/// All possible messages that can be sent during share add consensus establishing.
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub enum ConsensusMessageWithServersMap {
|
pub enum ConsensusMessageOfShareAdd {
|
||||||
/// Initialize consensus session.
|
/// Initialize consensus session.
|
||||||
InitializeConsensusSession(InitializeConsensusSessionWithServersMap),
|
InitializeConsensusSession(InitializeConsensusSessionOfShareAdd),
|
||||||
/// Confirm/reject consensus session initialization.
|
|
||||||
ConfirmConsensusInitialization(ConfirmConsensusInitialization),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// All possible messages that can be sent during share add consensus establishing.
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub enum ConsensusMessageWithServersSecretMap {
|
|
||||||
/// Initialize consensus session.
|
|
||||||
InitializeConsensusSession(InitializeConsensusSessionWithServersSecretMap),
|
|
||||||
/// Confirm/reject consensus session initialization.
|
/// Confirm/reject consensus session initialization.
|
||||||
ConfirmConsensusInitialization(ConfirmConsensusInitialization),
|
ConfirmConsensusInitialization(ConfirmConsensusInitialization),
|
||||||
}
|
}
|
||||||
@ -138,6 +127,10 @@ pub enum DecryptionMessage {
|
|||||||
DecryptionSessionError(DecryptionSessionError),
|
DecryptionSessionError(DecryptionSessionError),
|
||||||
/// When decryption session is completed.
|
/// When decryption session is completed.
|
||||||
DecryptionSessionCompleted(DecryptionSessionCompleted),
|
DecryptionSessionCompleted(DecryptionSessionCompleted),
|
||||||
|
/// When decryption session is delegated to another node.
|
||||||
|
DecryptionSessionDelegation(DecryptionSessionDelegation),
|
||||||
|
/// When delegated decryption session is completed.
|
||||||
|
DecryptionSessionDelegationCompleted(DecryptionSessionDelegationCompleted),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// All possible messages that can be sent during signing session.
|
/// All possible messages that can be sent during signing session.
|
||||||
@ -155,6 +148,10 @@ pub enum SigningMessage {
|
|||||||
SigningSessionError(SigningSessionError),
|
SigningSessionError(SigningSessionError),
|
||||||
/// Signing session completed.
|
/// Signing session completed.
|
||||||
SigningSessionCompleted(SigningSessionCompleted),
|
SigningSessionCompleted(SigningSessionCompleted),
|
||||||
|
/// When signing session is delegated to another node.
|
||||||
|
SigningSessionDelegation(SigningSessionDelegation),
|
||||||
|
/// When delegated signing session is completed.
|
||||||
|
SigningSessionDelegationCompleted(SigningSessionDelegationCompleted),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// All possible messages that can be sent during servers set change session.
|
/// All possible messages that can be sent during servers set change session.
|
||||||
@ -166,6 +163,8 @@ pub enum ServersSetChangeMessage {
|
|||||||
UnknownSessionsRequest(UnknownSessionsRequest),
|
UnknownSessionsRequest(UnknownSessionsRequest),
|
||||||
/// Unknown sessions ids.
|
/// Unknown sessions ids.
|
||||||
UnknownSessions(UnknownSessions),
|
UnknownSessions(UnknownSessions),
|
||||||
|
/// Negotiating key version to use as a base for ShareAdd session.
|
||||||
|
ShareChangeKeyVersionNegotiation(ShareChangeKeyVersionNegotiation),
|
||||||
/// Initialize share change session(s).
|
/// Initialize share change session(s).
|
||||||
InitializeShareChangeSession(InitializeShareChangeSession),
|
InitializeShareChangeSession(InitializeShareChangeSession),
|
||||||
/// Confirm share change session(s) initialization.
|
/// Confirm share change session(s) initialization.
|
||||||
@ -176,10 +175,6 @@ pub enum ServersSetChangeMessage {
|
|||||||
ServersSetChangeDelegateResponse(ServersSetChangeDelegateResponse),
|
ServersSetChangeDelegateResponse(ServersSetChangeDelegateResponse),
|
||||||
/// Share add message.
|
/// Share add message.
|
||||||
ServersSetChangeShareAddMessage(ServersSetChangeShareAddMessage),
|
ServersSetChangeShareAddMessage(ServersSetChangeShareAddMessage),
|
||||||
/// Share move message.
|
|
||||||
ServersSetChangeShareMoveMessage(ServersSetChangeShareMoveMessage),
|
|
||||||
/// Share remove message.
|
|
||||||
ServersSetChangeShareRemoveMessage(ServersSetChangeShareRemoveMessage),
|
|
||||||
/// Servers set change session completed.
|
/// Servers set change session completed.
|
||||||
ServersSetChangeError(ServersSetChangeError),
|
ServersSetChangeError(ServersSetChangeError),
|
||||||
/// Servers set change session completed.
|
/// Servers set change session completed.
|
||||||
@ -193,40 +188,21 @@ pub enum ShareAddMessage {
|
|||||||
ShareAddConsensusMessage(ShareAddConsensusMessage),
|
ShareAddConsensusMessage(ShareAddConsensusMessage),
|
||||||
/// Common key share data is sent to new node.
|
/// Common key share data is sent to new node.
|
||||||
KeyShareCommon(KeyShareCommon),
|
KeyShareCommon(KeyShareCommon),
|
||||||
/// Absolute term share of secret polynom is sent to new node.
|
|
||||||
NewAbsoluteTermShare(NewAbsoluteTermShare),
|
|
||||||
/// Generated keys are sent to every node.
|
/// Generated keys are sent to every node.
|
||||||
NewKeysDissemination(NewKeysDissemination),
|
NewKeysDissemination(NewKeysDissemination),
|
||||||
/// When session error has occured.
|
/// When session error has occured.
|
||||||
ShareAddError(ShareAddError),
|
ShareAddError(ShareAddError),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// All possible messages that can be sent during share move session.
|
/// All possible messages that can be sent during key version negotiation message.
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub enum ShareMoveMessage {
|
pub enum KeyVersionNegotiationMessage {
|
||||||
/// Consensus establishing message.
|
/// Request key versions.
|
||||||
ShareMoveConsensusMessage(ShareMoveConsensusMessage),
|
RequestKeyVersions(RequestKeyVersions),
|
||||||
/// Share move request.
|
/// Key versions.
|
||||||
ShareMoveRequest(ShareMoveRequest),
|
KeyVersions(KeyVersions),
|
||||||
/// Share move.
|
|
||||||
ShareMove(ShareMove),
|
|
||||||
/// Share move confirmation.
|
|
||||||
ShareMoveConfirm(ShareMoveConfirm),
|
|
||||||
/// When session error has occured.
|
/// When session error has occured.
|
||||||
ShareMoveError(ShareMoveError),
|
KeyVersionsError(KeyVersionsError),
|
||||||
}
|
|
||||||
|
|
||||||
/// All possible messages that can be sent during share remove session.
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub enum ShareRemoveMessage {
|
|
||||||
/// Consensus establishing message.
|
|
||||||
ShareRemoveConsensusMessage(ShareRemoveConsensusMessage),
|
|
||||||
/// Share remove request.
|
|
||||||
ShareRemoveRequest(ShareRemoveRequest),
|
|
||||||
/// Share remove confirmation.
|
|
||||||
ShareRemoveConfirm(ShareRemoveConfirm),
|
|
||||||
/// When session error has occured.
|
|
||||||
ShareRemoveError(ShareRemoveError),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Introduce node public key.
|
/// Introduce node public key.
|
||||||
@ -388,6 +364,8 @@ pub struct EncryptionSessionError {
|
|||||||
pub struct InitializeConsensusSession {
|
pub struct InitializeConsensusSession {
|
||||||
/// Requestor signature.
|
/// Requestor signature.
|
||||||
pub requestor_signature: SerializableSignature,
|
pub requestor_signature: SerializableSignature,
|
||||||
|
/// Key version.
|
||||||
|
pub version: SerializableH256,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Node is responding to consensus initialization request.
|
/// Node is responding to consensus initialization request.
|
||||||
@ -412,24 +390,15 @@ pub struct InitializeConsensusSessionWithServersSet {
|
|||||||
|
|
||||||
/// Node is asked to be part of servers-set consensus group.
|
/// Node is asked to be part of servers-set consensus group.
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct InitializeConsensusSessionWithServersSecretMap {
|
pub struct InitializeConsensusSessionOfShareAdd {
|
||||||
/// Old nodes set.
|
/// Key version.
|
||||||
|
pub version: SerializableH256,
|
||||||
|
/// threshold+1 nodes from old_nodes_set selected for shares redistribution.
|
||||||
|
pub consensus_group: BTreeSet<MessageNodeId>,
|
||||||
|
/// Old nodes set: all non-isolated owners of selected key share version.
|
||||||
pub old_nodes_set: BTreeSet<MessageNodeId>,
|
pub old_nodes_set: BTreeSet<MessageNodeId>,
|
||||||
/// New nodes set.
|
/// New nodes map: node id => node id number.
|
||||||
pub new_nodes_set: BTreeMap<MessageNodeId, SerializableSecret>,
|
pub new_nodes_map: BTreeMap<MessageNodeId, SerializableSecret>,
|
||||||
/// Old server set, signed by requester.
|
|
||||||
pub old_set_signature: SerializableSignature,
|
|
||||||
/// New server set, signed by requester.
|
|
||||||
pub new_set_signature: SerializableSignature,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Node is asked to be part of servers-set consensus group.
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct InitializeConsensusSessionWithServersMap {
|
|
||||||
/// Old nodes set.
|
|
||||||
pub old_nodes_set: BTreeSet<MessageNodeId>,
|
|
||||||
/// New nodes set (keys() = new_nodes_set, values = old nodes [differs from new if share is moved]).
|
|
||||||
pub new_nodes_set: BTreeMap<MessageNodeId, MessageNodeId>,
|
|
||||||
/// Old server set, signed by requester.
|
/// Old server set, signed by requester.
|
||||||
pub old_set_signature: SerializableSignature,
|
pub old_set_signature: SerializableSignature,
|
||||||
/// New server set, signed by requester.
|
/// New server set, signed by requester.
|
||||||
@ -518,6 +487,38 @@ pub struct SigningSessionCompleted {
|
|||||||
pub session_nonce: u64,
|
pub session_nonce: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// When signing session is delegated to another node.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct SigningSessionDelegation {
|
||||||
|
/// Encryption session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Decryption session Id.
|
||||||
|
pub sub_session: SerializableSecret,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Requestor signature.
|
||||||
|
pub requestor_signature: SerializableSignature,
|
||||||
|
/// Key version.
|
||||||
|
pub version: SerializableH256,
|
||||||
|
/// Message hash.
|
||||||
|
pub message_hash: SerializableH256,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When delegated signing session is completed.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct SigningSessionDelegationCompleted {
|
||||||
|
/// Encryption session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Decryption session Id.
|
||||||
|
pub sub_session: SerializableSecret,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// S-portion of signature.
|
||||||
|
pub signature_s: SerializableSecret,
|
||||||
|
/// C-portion of signature.
|
||||||
|
pub signature_c: SerializableSecret,
|
||||||
|
}
|
||||||
|
|
||||||
/// Consensus-related decryption message.
|
/// Consensus-related decryption message.
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct DecryptionConsensusMessage {
|
pub struct DecryptionConsensusMessage {
|
||||||
@ -590,6 +591,41 @@ pub struct DecryptionSessionCompleted {
|
|||||||
pub session_nonce: u64,
|
pub session_nonce: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// When decryption session is delegated to another node.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct DecryptionSessionDelegation {
|
||||||
|
/// Encryption session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Decryption session Id.
|
||||||
|
pub sub_session: SerializableSecret,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Requestor signature.
|
||||||
|
pub requestor_signature: SerializableSignature,
|
||||||
|
/// Key version.
|
||||||
|
pub version: SerializableH256,
|
||||||
|
/// Is shadow decryption requested? When true, decryption result
|
||||||
|
/// will be visible to the owner of requestor public key only.
|
||||||
|
pub is_shadow_decryption: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When delegated decryption session is completed.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct DecryptionSessionDelegationCompleted {
|
||||||
|
/// Encryption session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Decryption session Id.
|
||||||
|
pub sub_session: SerializableSecret,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Decrypted secret point. It is partially decrypted if shadow decrpytion was requested.
|
||||||
|
pub decrypted_secret: SerializablePublic,
|
||||||
|
/// Shared common point.
|
||||||
|
pub common_point: Option<SerializablePublic>,
|
||||||
|
/// If shadow decryption was requested: shadow decryption coefficients, encrypted with requestor public.
|
||||||
|
pub decrypt_shadows: Option<Vec<Vec<u8>>>,
|
||||||
|
}
|
||||||
|
|
||||||
/// Consensus-related servers set change message.
|
/// Consensus-related servers set change message.
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct ServersSetChangeConsensusMessage {
|
pub struct ServersSetChangeConsensusMessage {
|
||||||
@ -621,6 +657,17 @@ pub struct UnknownSessions {
|
|||||||
pub unknown_sessions: BTreeSet<MessageSessionId>,
|
pub unknown_sessions: BTreeSet<MessageSessionId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Key version negotiation message.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ShareChangeKeyVersionNegotiation {
|
||||||
|
/// Servers set change session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Key version negotiation message.
|
||||||
|
pub message: KeyVersionNegotiationMessage,
|
||||||
|
}
|
||||||
|
|
||||||
/// Master node opens share initialize session on other nodes.
|
/// Master node opens share initialize session on other nodes.
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct InitializeShareChangeSession {
|
pub struct InitializeShareChangeSession {
|
||||||
@ -630,18 +677,14 @@ pub struct InitializeShareChangeSession {
|
|||||||
pub session_nonce: u64,
|
pub session_nonce: u64,
|
||||||
/// Key id.
|
/// Key id.
|
||||||
pub key_id: MessageSessionId,
|
pub key_id: MessageSessionId,
|
||||||
|
/// Key vesion to use in ShareAdd session.
|
||||||
|
pub version: SerializableH256,
|
||||||
/// Master node.
|
/// Master node.
|
||||||
pub master_node_id: MessageNodeId,
|
pub master_node_id: MessageNodeId,
|
||||||
/// Old nodes set.
|
/// Consensus group to use in ShareAdd session.
|
||||||
pub old_shares_set: BTreeSet<MessageNodeId>,
|
pub consensus_group: BTreeSet<MessageNodeId>,
|
||||||
/// Isolated nodes.
|
|
||||||
pub isolated_nodes: BTreeSet<MessageNodeId>,
|
|
||||||
/// Shares to add. Values are filled for new nodes only.
|
/// Shares to add. Values are filled for new nodes only.
|
||||||
pub shares_to_add: BTreeMap<MessageNodeId, SerializableSecret>,
|
pub new_nodes_map: BTreeMap<MessageNodeId, Option<SerializableSecret>>,
|
||||||
/// Shares to move.
|
|
||||||
pub shares_to_move: BTreeMap<MessageNodeId, MessageNodeId>,
|
|
||||||
/// Shares to remove.
|
|
||||||
pub shares_to_remove: BTreeSet<MessageNodeId>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Slave node confirms session initialization.
|
/// Slave node confirms session initialization.
|
||||||
@ -688,28 +731,6 @@ pub struct ServersSetChangeShareAddMessage {
|
|||||||
pub message: ShareAddMessage,
|
pub message: ShareAddMessage,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Servers set change share move message.
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ServersSetChangeShareMoveMessage {
|
|
||||||
/// Servers set change session Id.
|
|
||||||
pub session: MessageSessionId,
|
|
||||||
/// Session-level nonce.
|
|
||||||
pub session_nonce: u64,
|
|
||||||
/// Unknown session id.
|
|
||||||
pub message: ShareMoveMessage,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Servers set change share remove message.
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ServersSetChangeShareRemoveMessage {
|
|
||||||
/// Servers set change session Id.
|
|
||||||
pub session: MessageSessionId,
|
|
||||||
/// Session-level nonce.
|
|
||||||
pub session_nonce: u64,
|
|
||||||
/// Unknown session id.
|
|
||||||
pub message: ShareRemoveMessage,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When servers set change session error has occured.
|
/// When servers set change session error has occured.
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct ServersSetChangeError {
|
pub struct ServersSetChangeError {
|
||||||
@ -738,7 +759,7 @@ pub struct ShareAddConsensusMessage {
|
|||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
pub session_nonce: u64,
|
pub session_nonce: u64,
|
||||||
/// Consensus message.
|
/// Consensus message.
|
||||||
pub message: ConsensusMessageWithServersSecretMap,
|
pub message: ConsensusMessageOfShareAdd,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Key share common data is passed to new node.
|
/// Key share common data is passed to new node.
|
||||||
@ -756,19 +777,8 @@ pub struct KeyShareCommon {
|
|||||||
pub common_point: Option<SerializablePublic>,
|
pub common_point: Option<SerializablePublic>,
|
||||||
/// Encrypted point.
|
/// Encrypted point.
|
||||||
pub encrypted_point: Option<SerializablePublic>,
|
pub encrypted_point: Option<SerializablePublic>,
|
||||||
}
|
/// Selected version id numbers.
|
||||||
|
pub id_numbers: BTreeMap<MessageNodeId, SerializableSecret>,
|
||||||
/// Absolute term share is passed to new node.
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct NewAbsoluteTermShare {
|
|
||||||
/// Generation session Id.
|
|
||||||
pub session: MessageSessionId,
|
|
||||||
/// Sender id number.
|
|
||||||
pub sender_id: SerializableSecret,
|
|
||||||
/// Session-level nonce.
|
|
||||||
pub session_nonce: u64,
|
|
||||||
/// Absolute term share.
|
|
||||||
pub absolute_term_share: SerializableSecret,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generated keys are sent to every node.
|
/// Generated keys are sent to every node.
|
||||||
@ -778,10 +788,8 @@ pub struct NewKeysDissemination {
|
|||||||
pub session: MessageSessionId,
|
pub session: MessageSessionId,
|
||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
pub session_nonce: u64,
|
pub session_nonce: u64,
|
||||||
/// Refreshed secret1 value.
|
/// Sub share of rcevier' secret share.
|
||||||
pub refreshed_secret1: SerializableSecret,
|
pub secret_subshare: SerializableSecret,
|
||||||
/// Refreshed public values.
|
|
||||||
pub refreshed_publics: Vec<SerializablePublic>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When share add session error has occured.
|
/// When share add session error has occured.
|
||||||
@ -795,107 +803,98 @@ pub struct ShareAddError {
|
|||||||
pub error: String,
|
pub error: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Consensus-related share move session message.
|
/// Key versions are requested.
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct ShareMoveConsensusMessage {
|
pub struct RequestKeyVersions {
|
||||||
/// Share move session Id.
|
/// Generation session id.
|
||||||
pub session: MessageSessionId,
|
|
||||||
/// Session-level nonce.
|
|
||||||
pub session_nonce: u64,
|
|
||||||
/// Consensus message.
|
|
||||||
pub message: ConsensusMessageWithServersMap,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Share move is requested.
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ShareMoveRequest {
|
|
||||||
/// Generation session Id.
|
|
||||||
pub session: MessageSessionId,
|
pub session: MessageSessionId,
|
||||||
|
/// Version negotiation session Id.
|
||||||
|
pub sub_session: SerializableSecret,
|
||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
pub session_nonce: u64,
|
pub session_nonce: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Share is moved from source to destination.
|
/// Key versions are sent.
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct ShareMove {
|
pub struct KeyVersions {
|
||||||
/// Generation session Id.
|
/// Generation session id.
|
||||||
pub session: MessageSessionId,
|
pub session: MessageSessionId,
|
||||||
|
/// Version negotiation session Id.
|
||||||
|
pub sub_session: SerializableSecret,
|
||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
pub session_nonce: u64,
|
pub session_nonce: u64,
|
||||||
/// Author of the entry.
|
/// Key threshold.
|
||||||
pub author: SerializablePublic,
|
pub threshold: Option<usize>,
|
||||||
/// Decryption threshold.
|
/// Key versions.
|
||||||
pub threshold: usize,
|
pub versions: Vec<SerializableH256>,
|
||||||
/// Nodes ids numbers.
|
|
||||||
pub id_numbers: BTreeMap<MessageNodeId, SerializableSecret>,
|
|
||||||
/// Polynom1.
|
|
||||||
pub polynom1: Vec<SerializableSecret>,
|
|
||||||
/// Node secret share.
|
|
||||||
pub secret_share: SerializableSecret,
|
|
||||||
/// Common (shared) encryption point.
|
|
||||||
pub common_point: Option<SerializablePublic>,
|
|
||||||
/// Encrypted point.
|
|
||||||
pub encrypted_point: Option<SerializablePublic>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Share move is confirmed (destination node confirms to all other nodes).
|
/// When key versions error has occured.
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct ShareMoveConfirm {
|
pub struct KeyVersionsError {
|
||||||
/// Generation session Id.
|
/// Generation session id.
|
||||||
pub session: MessageSessionId,
|
|
||||||
/// Session-level nonce.
|
|
||||||
pub session_nonce: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When share move session error has occured.
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ShareMoveError {
|
|
||||||
/// Generation session Id.
|
|
||||||
pub session: MessageSessionId,
|
pub session: MessageSessionId,
|
||||||
|
/// Version negotiation session Id.
|
||||||
|
pub sub_session: SerializableSecret,
|
||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
pub session_nonce: u64,
|
pub session_nonce: u64,
|
||||||
/// Error message.
|
/// Error message.
|
||||||
pub error: String,
|
pub error: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Consensus-related share remove session message.
|
impl Message {
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
pub fn is_initialization_message(&self) -> bool {
|
||||||
pub struct ShareRemoveConsensusMessage {
|
match *self {
|
||||||
/// Share move session Id.
|
Message::Generation(GenerationMessage::InitializeSession(_)) => true,
|
||||||
pub session: MessageSessionId,
|
Message::Encryption(EncryptionMessage::InitializeEncryptionSession(_)) => true,
|
||||||
/// Session-level nonce.
|
Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(ref msg)) => match msg.message {
|
||||||
pub session_nonce: u64,
|
ConsensusMessage::InitializeConsensusSession(_) => true,
|
||||||
/// Consensus message.
|
_ => false
|
||||||
pub message: ConsensusMessageWithServersSet,
|
},
|
||||||
}
|
Message::Signing(SigningMessage::SigningConsensusMessage(ref msg)) => match msg.message {
|
||||||
|
ConsensusMessage::InitializeConsensusSession(_) => true,
|
||||||
|
_ => false
|
||||||
|
},
|
||||||
|
Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::RequestKeyVersions(_)) => true,
|
||||||
|
Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(ref msg)) => match msg.message {
|
||||||
|
ConsensusMessageOfShareAdd::InitializeConsensusSession(_) => true,
|
||||||
|
_ => false
|
||||||
|
},
|
||||||
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref msg)) => match msg.message {
|
||||||
|
ConsensusMessageWithServersSet::InitializeConsensusSession(_) => true,
|
||||||
|
_ => false
|
||||||
|
},
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Share remove is requested.
|
pub fn is_delegation_message(&self) -> bool {
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
match *self {
|
||||||
pub struct ShareRemoveRequest {
|
Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(_)) => true,
|
||||||
/// Generation session Id.
|
Message::Signing(SigningMessage::SigningSessionDelegation(_)) => true,
|
||||||
pub session: MessageSessionId,
|
_ => false,
|
||||||
/// Session-level nonce.
|
}
|
||||||
pub session_nonce: u64,
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/// Share remove is confirmed (destination node confirms to all other nodes).
|
pub fn is_exclusive_session_message(&self) -> bool {
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
match *self {
|
||||||
pub struct ShareRemoveConfirm {
|
Message::ServersSetChange(_) => true,
|
||||||
/// Generation session Id.
|
_ => false,
|
||||||
pub session: MessageSessionId,
|
}
|
||||||
/// Session-level nonce.
|
}
|
||||||
pub session_nonce: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When share remove session error has occured.
|
pub fn session_nonce(&self) -> Option<u64> {
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
match *self {
|
||||||
pub struct ShareRemoveError {
|
Message::Cluster(_) => None,
|
||||||
/// Generation session Id.
|
Message::Generation(ref message) => Some(message.session_nonce()),
|
||||||
pub session: MessageSessionId,
|
Message::Encryption(ref message) => Some(message.session_nonce()),
|
||||||
/// Session-level nonce.
|
Message::Decryption(ref message) => Some(message.session_nonce()),
|
||||||
pub session_nonce: u64,
|
Message::Signing(ref message) => Some(message.session_nonce()),
|
||||||
/// Error message.
|
Message::ShareAdd(ref message) => Some(message.session_nonce()),
|
||||||
pub error: String,
|
Message::ServersSetChange(ref message) => Some(message.session_nonce()),
|
||||||
|
Message::KeyVersionNegotiation(ref message) => Some(message.session_nonce()),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GenerationMessage {
|
impl GenerationMessage {
|
||||||
@ -950,6 +949,8 @@ impl DecryptionMessage {
|
|||||||
DecryptionMessage::PartialDecryption(ref msg) => &msg.session,
|
DecryptionMessage::PartialDecryption(ref msg) => &msg.session,
|
||||||
DecryptionMessage::DecryptionSessionError(ref msg) => &msg.session,
|
DecryptionMessage::DecryptionSessionError(ref msg) => &msg.session,
|
||||||
DecryptionMessage::DecryptionSessionCompleted(ref msg) => &msg.session,
|
DecryptionMessage::DecryptionSessionCompleted(ref msg) => &msg.session,
|
||||||
|
DecryptionMessage::DecryptionSessionDelegation(ref msg) => &msg.session,
|
||||||
|
DecryptionMessage::DecryptionSessionDelegationCompleted(ref msg) => &msg.session,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -960,6 +961,8 @@ impl DecryptionMessage {
|
|||||||
DecryptionMessage::PartialDecryption(ref msg) => &msg.sub_session,
|
DecryptionMessage::PartialDecryption(ref msg) => &msg.sub_session,
|
||||||
DecryptionMessage::DecryptionSessionError(ref msg) => &msg.sub_session,
|
DecryptionMessage::DecryptionSessionError(ref msg) => &msg.sub_session,
|
||||||
DecryptionMessage::DecryptionSessionCompleted(ref msg) => &msg.sub_session,
|
DecryptionMessage::DecryptionSessionCompleted(ref msg) => &msg.sub_session,
|
||||||
|
DecryptionMessage::DecryptionSessionDelegation(ref msg) => &msg.sub_session,
|
||||||
|
DecryptionMessage::DecryptionSessionDelegationCompleted(ref msg) => &msg.sub_session,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -970,6 +973,8 @@ impl DecryptionMessage {
|
|||||||
DecryptionMessage::PartialDecryption(ref msg) => msg.session_nonce,
|
DecryptionMessage::PartialDecryption(ref msg) => msg.session_nonce,
|
||||||
DecryptionMessage::DecryptionSessionError(ref msg) => msg.session_nonce,
|
DecryptionMessage::DecryptionSessionError(ref msg) => msg.session_nonce,
|
||||||
DecryptionMessage::DecryptionSessionCompleted(ref msg) => msg.session_nonce,
|
DecryptionMessage::DecryptionSessionCompleted(ref msg) => msg.session_nonce,
|
||||||
|
DecryptionMessage::DecryptionSessionDelegation(ref msg) => msg.session_nonce,
|
||||||
|
DecryptionMessage::DecryptionSessionDelegationCompleted(ref msg) => msg.session_nonce,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -983,6 +988,8 @@ impl SigningMessage {
|
|||||||
SigningMessage::PartialSignature(ref msg) => &msg.session,
|
SigningMessage::PartialSignature(ref msg) => &msg.session,
|
||||||
SigningMessage::SigningSessionError(ref msg) => &msg.session,
|
SigningMessage::SigningSessionError(ref msg) => &msg.session,
|
||||||
SigningMessage::SigningSessionCompleted(ref msg) => &msg.session,
|
SigningMessage::SigningSessionCompleted(ref msg) => &msg.session,
|
||||||
|
SigningMessage::SigningSessionDelegation(ref msg) => &msg.session,
|
||||||
|
SigningMessage::SigningSessionDelegationCompleted(ref msg) => &msg.session,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -994,6 +1001,8 @@ impl SigningMessage {
|
|||||||
SigningMessage::PartialSignature(ref msg) => &msg.sub_session,
|
SigningMessage::PartialSignature(ref msg) => &msg.sub_session,
|
||||||
SigningMessage::SigningSessionError(ref msg) => &msg.sub_session,
|
SigningMessage::SigningSessionError(ref msg) => &msg.sub_session,
|
||||||
SigningMessage::SigningSessionCompleted(ref msg) => &msg.sub_session,
|
SigningMessage::SigningSessionCompleted(ref msg) => &msg.sub_session,
|
||||||
|
SigningMessage::SigningSessionDelegation(ref msg) => &msg.sub_session,
|
||||||
|
SigningMessage::SigningSessionDelegationCompleted(ref msg) => &msg.sub_session,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1005,6 +1014,8 @@ impl SigningMessage {
|
|||||||
SigningMessage::PartialSignature(ref msg) => msg.session_nonce,
|
SigningMessage::PartialSignature(ref msg) => msg.session_nonce,
|
||||||
SigningMessage::SigningSessionError(ref msg) => msg.session_nonce,
|
SigningMessage::SigningSessionError(ref msg) => msg.session_nonce,
|
||||||
SigningMessage::SigningSessionCompleted(ref msg) => msg.session_nonce,
|
SigningMessage::SigningSessionCompleted(ref msg) => msg.session_nonce,
|
||||||
|
SigningMessage::SigningSessionDelegation(ref msg) => msg.session_nonce,
|
||||||
|
SigningMessage::SigningSessionDelegationCompleted(ref msg) => msg.session_nonce,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1015,13 +1026,12 @@ impl ServersSetChangeMessage {
|
|||||||
ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref msg) => &msg.session,
|
ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref msg) => &msg.session,
|
||||||
ServersSetChangeMessage::UnknownSessionsRequest(ref msg) => &msg.session,
|
ServersSetChangeMessage::UnknownSessionsRequest(ref msg) => &msg.session,
|
||||||
ServersSetChangeMessage::UnknownSessions(ref msg) => &msg.session,
|
ServersSetChangeMessage::UnknownSessions(ref msg) => &msg.session,
|
||||||
|
ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ref msg) => &msg.session,
|
||||||
ServersSetChangeMessage::InitializeShareChangeSession(ref msg) => &msg.session,
|
ServersSetChangeMessage::InitializeShareChangeSession(ref msg) => &msg.session,
|
||||||
ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref msg) => &msg.session,
|
ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref msg) => &msg.session,
|
||||||
ServersSetChangeMessage::ServersSetChangeDelegate(ref msg) => &msg.session,
|
ServersSetChangeMessage::ServersSetChangeDelegate(ref msg) => &msg.session,
|
||||||
ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref msg) => &msg.session,
|
ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref msg) => &msg.session,
|
||||||
ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref msg) => &msg.session,
|
ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref msg) => &msg.session,
|
||||||
ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ref msg) => &msg.session,
|
|
||||||
ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ref msg) => &msg.session,
|
|
||||||
ServersSetChangeMessage::ServersSetChangeError(ref msg) => &msg.session,
|
ServersSetChangeMessage::ServersSetChangeError(ref msg) => &msg.session,
|
||||||
ServersSetChangeMessage::ServersSetChangeCompleted(ref msg) => &msg.session,
|
ServersSetChangeMessage::ServersSetChangeCompleted(ref msg) => &msg.session,
|
||||||
}
|
}
|
||||||
@ -1032,13 +1042,12 @@ impl ServersSetChangeMessage {
|
|||||||
ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref msg) => msg.session_nonce,
|
ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref msg) => msg.session_nonce,
|
||||||
ServersSetChangeMessage::UnknownSessionsRequest(ref msg) => msg.session_nonce,
|
ServersSetChangeMessage::UnknownSessionsRequest(ref msg) => msg.session_nonce,
|
||||||
ServersSetChangeMessage::UnknownSessions(ref msg) => msg.session_nonce,
|
ServersSetChangeMessage::UnknownSessions(ref msg) => msg.session_nonce,
|
||||||
|
ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ref msg) => msg.session_nonce,
|
||||||
ServersSetChangeMessage::InitializeShareChangeSession(ref msg) => msg.session_nonce,
|
ServersSetChangeMessage::InitializeShareChangeSession(ref msg) => msg.session_nonce,
|
||||||
ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref msg) => msg.session_nonce,
|
ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref msg) => msg.session_nonce,
|
||||||
ServersSetChangeMessage::ServersSetChangeDelegate(ref msg) => msg.session_nonce,
|
ServersSetChangeMessage::ServersSetChangeDelegate(ref msg) => msg.session_nonce,
|
||||||
ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref msg) => msg.session_nonce,
|
ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref msg) => msg.session_nonce,
|
||||||
ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref msg) => msg.session_nonce,
|
ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref msg) => msg.session_nonce,
|
||||||
ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ref msg) => msg.session_nonce,
|
|
||||||
ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ref msg) => msg.session_nonce,
|
|
||||||
ServersSetChangeMessage::ServersSetChangeError(ref msg) => msg.session_nonce,
|
ServersSetChangeMessage::ServersSetChangeError(ref msg) => msg.session_nonce,
|
||||||
ServersSetChangeMessage::ServersSetChangeCompleted(ref msg) => msg.session_nonce,
|
ServersSetChangeMessage::ServersSetChangeCompleted(ref msg) => msg.session_nonce,
|
||||||
}
|
}
|
||||||
@ -1050,7 +1059,6 @@ impl ShareAddMessage {
|
|||||||
match *self {
|
match *self {
|
||||||
ShareAddMessage::ShareAddConsensusMessage(ref msg) => &msg.session,
|
ShareAddMessage::ShareAddConsensusMessage(ref msg) => &msg.session,
|
||||||
ShareAddMessage::KeyShareCommon(ref msg) => &msg.session,
|
ShareAddMessage::KeyShareCommon(ref msg) => &msg.session,
|
||||||
ShareAddMessage::NewAbsoluteTermShare(ref msg) => &msg.session,
|
|
||||||
ShareAddMessage::NewKeysDissemination(ref msg) => &msg.session,
|
ShareAddMessage::NewKeysDissemination(ref msg) => &msg.session,
|
||||||
ShareAddMessage::ShareAddError(ref msg) => &msg.session,
|
ShareAddMessage::ShareAddError(ref msg) => &msg.session,
|
||||||
}
|
}
|
||||||
@ -1060,51 +1068,34 @@ impl ShareAddMessage {
|
|||||||
match *self {
|
match *self {
|
||||||
ShareAddMessage::ShareAddConsensusMessage(ref msg) => msg.session_nonce,
|
ShareAddMessage::ShareAddConsensusMessage(ref msg) => msg.session_nonce,
|
||||||
ShareAddMessage::KeyShareCommon(ref msg) => msg.session_nonce,
|
ShareAddMessage::KeyShareCommon(ref msg) => msg.session_nonce,
|
||||||
ShareAddMessage::NewAbsoluteTermShare(ref msg) => msg.session_nonce,
|
|
||||||
ShareAddMessage::NewKeysDissemination(ref msg) => msg.session_nonce,
|
ShareAddMessage::NewKeysDissemination(ref msg) => msg.session_nonce,
|
||||||
ShareAddMessage::ShareAddError(ref msg) => msg.session_nonce,
|
ShareAddMessage::ShareAddError(ref msg) => msg.session_nonce,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ShareMoveMessage {
|
impl KeyVersionNegotiationMessage {
|
||||||
pub fn session_id(&self) -> &SessionId {
|
pub fn session_id(&self) -> &SessionId {
|
||||||
match *self {
|
match *self {
|
||||||
ShareMoveMessage::ShareMoveConsensusMessage(ref msg) => &msg.session,
|
KeyVersionNegotiationMessage::RequestKeyVersions(ref msg) => &msg.session,
|
||||||
ShareMoveMessage::ShareMoveRequest(ref msg) => &msg.session,
|
KeyVersionNegotiationMessage::KeyVersions(ref msg) => &msg.session,
|
||||||
ShareMoveMessage::ShareMove(ref msg) => &msg.session,
|
KeyVersionNegotiationMessage::KeyVersionsError(ref msg) => &msg.session,
|
||||||
ShareMoveMessage::ShareMoveConfirm(ref msg) => &msg.session,
|
}
|
||||||
ShareMoveMessage::ShareMoveError(ref msg) => &msg.session,
|
}
|
||||||
|
|
||||||
|
pub fn sub_session_id(&self) -> &Secret {
|
||||||
|
match *self {
|
||||||
|
KeyVersionNegotiationMessage::RequestKeyVersions(ref msg) => &msg.sub_session,
|
||||||
|
KeyVersionNegotiationMessage::KeyVersions(ref msg) => &msg.sub_session,
|
||||||
|
KeyVersionNegotiationMessage::KeyVersionsError(ref msg) => &msg.sub_session,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn session_nonce(&self) -> u64 {
|
pub fn session_nonce(&self) -> u64 {
|
||||||
match *self {
|
match *self {
|
||||||
ShareMoveMessage::ShareMoveConsensusMessage(ref msg) => msg.session_nonce,
|
KeyVersionNegotiationMessage::RequestKeyVersions(ref msg) => msg.session_nonce,
|
||||||
ShareMoveMessage::ShareMoveRequest(ref msg) => msg.session_nonce,
|
KeyVersionNegotiationMessage::KeyVersions(ref msg) => msg.session_nonce,
|
||||||
ShareMoveMessage::ShareMove(ref msg) => msg.session_nonce,
|
KeyVersionNegotiationMessage::KeyVersionsError(ref msg) => msg.session_nonce,
|
||||||
ShareMoveMessage::ShareMoveConfirm(ref msg) => msg.session_nonce,
|
|
||||||
ShareMoveMessage::ShareMoveError(ref msg) => msg.session_nonce,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ShareRemoveMessage {
|
|
||||||
pub fn session_id(&self) -> &SessionId {
|
|
||||||
match *self {
|
|
||||||
ShareRemoveMessage::ShareRemoveConsensusMessage(ref msg) => &msg.session,
|
|
||||||
ShareRemoveMessage::ShareRemoveRequest(ref msg) => &msg.session,
|
|
||||||
ShareRemoveMessage::ShareRemoveConfirm(ref msg) => &msg.session,
|
|
||||||
ShareRemoveMessage::ShareRemoveError(ref msg) => &msg.session,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn session_nonce(&self) -> u64 {
|
|
||||||
match *self {
|
|
||||||
ShareRemoveMessage::ShareRemoveConsensusMessage(ref msg) => msg.session_nonce,
|
|
||||||
ShareRemoveMessage::ShareRemoveRequest(ref msg) => msg.session_nonce,
|
|
||||||
ShareRemoveMessage::ShareRemoveConfirm(ref msg) => msg.session_nonce,
|
|
||||||
ShareRemoveMessage::ShareRemoveError(ref msg) => msg.session_nonce,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1119,8 +1110,7 @@ impl fmt::Display for Message {
|
|||||||
Message::Signing(ref message) => write!(f, "Signing.{}", message),
|
Message::Signing(ref message) => write!(f, "Signing.{}", message),
|
||||||
Message::ServersSetChange(ref message) => write!(f, "ServersSetChange.{}", message),
|
Message::ServersSetChange(ref message) => write!(f, "ServersSetChange.{}", message),
|
||||||
Message::ShareAdd(ref message) => write!(f, "ShareAdd.{}", message),
|
Message::ShareAdd(ref message) => write!(f, "ShareAdd.{}", message),
|
||||||
Message::ShareMove(ref message) => write!(f, "ShareMove.{}", message),
|
Message::KeyVersionNegotiation(ref message) => write!(f, "KeyVersionNegotiation.{}", message),
|
||||||
Message::ShareRemove(ref message) => write!(f, "ShareRemove.{}", message),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1164,7 +1154,7 @@ impl fmt::Display for ConsensusMessage {
|
|||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
match *self {
|
match *self {
|
||||||
ConsensusMessage::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"),
|
ConsensusMessage::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"),
|
||||||
ConsensusMessage::ConfirmConsensusInitialization(_) => write!(f, "ConfirmConsensusInitialization"),
|
ConsensusMessage::ConfirmConsensusInitialization(ref msg) => write!(f, "ConfirmConsensusInitialization({})", msg.is_confirmed),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1178,20 +1168,11 @@ impl fmt::Display for ConsensusMessageWithServersSet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for ConsensusMessageWithServersMap {
|
impl fmt::Display for ConsensusMessageOfShareAdd {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
match *self {
|
match *self {
|
||||||
ConsensusMessageWithServersMap::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"),
|
ConsensusMessageOfShareAdd::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"),
|
||||||
ConsensusMessageWithServersMap::ConfirmConsensusInitialization(_) => write!(f, "ConfirmConsensusInitialization"),
|
ConsensusMessageOfShareAdd::ConfirmConsensusInitialization(_) => write!(f, "ConfirmConsensusInitialization"),
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for ConsensusMessageWithServersSecretMap {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
match *self {
|
|
||||||
ConsensusMessageWithServersSecretMap::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"),
|
|
||||||
ConsensusMessageWithServersSecretMap::ConfirmConsensusInitialization(_) => write!(f, "ConfirmConsensusInitialization"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1204,6 +1185,8 @@ impl fmt::Display for DecryptionMessage {
|
|||||||
DecryptionMessage::PartialDecryption(_) => write!(f, "PartialDecryption"),
|
DecryptionMessage::PartialDecryption(_) => write!(f, "PartialDecryption"),
|
||||||
DecryptionMessage::DecryptionSessionError(_) => write!(f, "DecryptionSessionError"),
|
DecryptionMessage::DecryptionSessionError(_) => write!(f, "DecryptionSessionError"),
|
||||||
DecryptionMessage::DecryptionSessionCompleted(_) => write!(f, "DecryptionSessionCompleted"),
|
DecryptionMessage::DecryptionSessionCompleted(_) => write!(f, "DecryptionSessionCompleted"),
|
||||||
|
DecryptionMessage::DecryptionSessionDelegation(_) => write!(f, "DecryptionSessionDelegation"),
|
||||||
|
DecryptionMessage::DecryptionSessionDelegationCompleted(_) => write!(f, "DecryptionSessionDelegationCompleted"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1217,6 +1200,8 @@ impl fmt::Display for SigningMessage {
|
|||||||
SigningMessage::PartialSignature(_) => write!(f, "PartialSignature"),
|
SigningMessage::PartialSignature(_) => write!(f, "PartialSignature"),
|
||||||
SigningMessage::SigningSessionError(_) => write!(f, "SigningSessionError"),
|
SigningMessage::SigningSessionError(_) => write!(f, "SigningSessionError"),
|
||||||
SigningMessage::SigningSessionCompleted(_) => write!(f, "SigningSessionCompleted"),
|
SigningMessage::SigningSessionCompleted(_) => write!(f, "SigningSessionCompleted"),
|
||||||
|
SigningMessage::SigningSessionDelegation(_) => write!(f, "SigningSessionDelegation"),
|
||||||
|
SigningMessage::SigningSessionDelegationCompleted(_) => write!(f, "SigningSessionDelegationCompleted"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1227,13 +1212,12 @@ impl fmt::Display for ServersSetChangeMessage {
|
|||||||
ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref m) => write!(f, "ServersSetChangeConsensusMessage.{}", m.message),
|
ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref m) => write!(f, "ServersSetChangeConsensusMessage.{}", m.message),
|
||||||
ServersSetChangeMessage::UnknownSessionsRequest(_) => write!(f, "UnknownSessionsRequest"),
|
ServersSetChangeMessage::UnknownSessionsRequest(_) => write!(f, "UnknownSessionsRequest"),
|
||||||
ServersSetChangeMessage::UnknownSessions(_) => write!(f, "UnknownSessions"),
|
ServersSetChangeMessage::UnknownSessions(_) => write!(f, "UnknownSessions"),
|
||||||
|
ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ref m) => write!(f, "ShareChangeKeyVersionNegotiation.{}", m.message),
|
||||||
ServersSetChangeMessage::InitializeShareChangeSession(_) => write!(f, "InitializeShareChangeSession"),
|
ServersSetChangeMessage::InitializeShareChangeSession(_) => write!(f, "InitializeShareChangeSession"),
|
||||||
ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(_) => write!(f, "ConfirmShareChangeSessionInitialization"),
|
ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(_) => write!(f, "ConfirmShareChangeSessionInitialization"),
|
||||||
ServersSetChangeMessage::ServersSetChangeDelegate(_) => write!(f, "ServersSetChangeDelegate"),
|
ServersSetChangeMessage::ServersSetChangeDelegate(_) => write!(f, "ServersSetChangeDelegate"),
|
||||||
ServersSetChangeMessage::ServersSetChangeDelegateResponse(_) => write!(f, "ServersSetChangeDelegateResponse"),
|
ServersSetChangeMessage::ServersSetChangeDelegateResponse(_) => write!(f, "ServersSetChangeDelegateResponse"),
|
||||||
ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref m) => write!(f, "ServersSetChangeShareAddMessage.{}", m.message),
|
ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref m) => write!(f, "ServersSetChangeShareAddMessage.{}", m.message),
|
||||||
ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ref m) => write!(f, "ServersSetChangeShareMoveMessage.{}", m.message),
|
|
||||||
ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ref m) => write!(f, "ServersSetChangeShareRemoveMessage.{}", m.message),
|
|
||||||
ServersSetChangeMessage::ServersSetChangeError(_) => write!(f, "ServersSetChangeError"),
|
ServersSetChangeMessage::ServersSetChangeError(_) => write!(f, "ServersSetChangeError"),
|
||||||
ServersSetChangeMessage::ServersSetChangeCompleted(_) => write!(f, "ServersSetChangeCompleted"),
|
ServersSetChangeMessage::ServersSetChangeCompleted(_) => write!(f, "ServersSetChangeCompleted"),
|
||||||
}
|
}
|
||||||
@ -1245,7 +1229,6 @@ impl fmt::Display for ShareAddMessage {
|
|||||||
match *self {
|
match *self {
|
||||||
ShareAddMessage::ShareAddConsensusMessage(ref m) => write!(f, "ShareAddConsensusMessage.{}", m.message),
|
ShareAddMessage::ShareAddConsensusMessage(ref m) => write!(f, "ShareAddConsensusMessage.{}", m.message),
|
||||||
ShareAddMessage::KeyShareCommon(_) => write!(f, "KeyShareCommon"),
|
ShareAddMessage::KeyShareCommon(_) => write!(f, "KeyShareCommon"),
|
||||||
ShareAddMessage::NewAbsoluteTermShare(_) => write!(f, "NewAbsoluteTermShare"),
|
|
||||||
ShareAddMessage::NewKeysDissemination(_) => write!(f, "NewKeysDissemination"),
|
ShareAddMessage::NewKeysDissemination(_) => write!(f, "NewKeysDissemination"),
|
||||||
ShareAddMessage::ShareAddError(_) => write!(f, "ShareAddError"),
|
ShareAddMessage::ShareAddError(_) => write!(f, "ShareAddError"),
|
||||||
|
|
||||||
@ -1253,25 +1236,12 @@ impl fmt::Display for ShareAddMessage {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for ShareMoveMessage {
|
impl fmt::Display for KeyVersionNegotiationMessage {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
match *self {
|
match *self {
|
||||||
ShareMoveMessage::ShareMoveConsensusMessage(ref m) => write!(f, "ShareMoveConsensusMessage.{}", m.message),
|
KeyVersionNegotiationMessage::RequestKeyVersions(_) => write!(f, "RequestKeyVersions"),
|
||||||
ShareMoveMessage::ShareMoveRequest(_) => write!(f, "ShareMoveRequest"),
|
KeyVersionNegotiationMessage::KeyVersions(_) => write!(f, "KeyVersions"),
|
||||||
ShareMoveMessage::ShareMove(_) => write!(f, "ShareMove"),
|
KeyVersionNegotiationMessage::KeyVersionsError(_) => write!(f, "KeyVersionsError"),
|
||||||
ShareMoveMessage::ShareMoveConfirm(_) => write!(f, "ShareMoveConfirm"),
|
|
||||||
ShareMoveMessage::ShareMoveError(_) => write!(f, "ShareMoveError"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for ShareRemoveMessage {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
match *self {
|
|
||||||
ShareRemoveMessage::ShareRemoveConsensusMessage(ref m) => write!(f, "InitializeShareRemoveSession.{}", m.message),
|
|
||||||
ShareRemoveMessage::ShareRemoveRequest(_) => write!(f, "ShareRemoveRequest"),
|
|
||||||
ShareRemoveMessage::ShareRemoveConfirm(_) => write!(f, "ShareRemoveConfirm"),
|
|
||||||
ShareRemoveMessage::ShareRemoveError(_) => write!(f, "ShareRemoveError"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ use super::types::all::ServerKeyId;
|
|||||||
pub use super::traits::NodeKeyPair;
|
pub use super::traits::NodeKeyPair;
|
||||||
pub use super::types::all::{NodeId, EncryptedDocumentKeyShadow};
|
pub use super::types::all::{NodeId, EncryptedDocumentKeyShadow};
|
||||||
pub use super::acl_storage::AclStorage;
|
pub use super::acl_storage::AclStorage;
|
||||||
pub use super::key_storage::{KeyStorage, DocumentKeyShare};
|
pub use super::key_storage::{KeyStorage, DocumentKeyShare, DocumentKeyShareVersion};
|
||||||
pub use super::key_server_set::KeyServerSet;
|
pub use super::key_server_set::KeyServerSet;
|
||||||
pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic, SerializableMessageHash};
|
pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic, SerializableMessageHash};
|
||||||
pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient};
|
pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient};
|
||||||
@ -95,6 +95,8 @@ pub enum Error {
|
|||||||
ReplayProtection,
|
ReplayProtection,
|
||||||
/// Connection to node, required for this session is not established.
|
/// Connection to node, required for this session is not established.
|
||||||
NodeDisconnected,
|
NodeDisconnected,
|
||||||
|
/// Node is missing requested key share.
|
||||||
|
MissingKeyShare,
|
||||||
/// Cryptographic error.
|
/// Cryptographic error.
|
||||||
EthKey(String),
|
EthKey(String),
|
||||||
/// I/O error has occured.
|
/// I/O error has occured.
|
||||||
@ -150,6 +152,7 @@ impl fmt::Display for Error {
|
|||||||
Error::InvalidMessageVersion => write!(f, "unsupported message is received"),
|
Error::InvalidMessageVersion => write!(f, "unsupported message is received"),
|
||||||
Error::ReplayProtection => write!(f, "replay message is received"),
|
Error::ReplayProtection => write!(f, "replay message is received"),
|
||||||
Error::NodeDisconnected => write!(f, "node required for this operation is currently disconnected"),
|
Error::NodeDisconnected => write!(f, "node required for this operation is currently disconnected"),
|
||||||
|
Error::MissingKeyShare => write!(f, "requested key share version is not found"),
|
||||||
Error::EthKey(ref e) => write!(f, "cryptographic error {}", e),
|
Error::EthKey(ref e) => write!(f, "cryptographic error {}", e),
|
||||||
Error::Io(ref e) => write!(f, "i/o error {}", e),
|
Error::Io(ref e) => write!(f, "i/o error {}", e),
|
||||||
Error::Serde(ref e) => write!(f, "serde error {}", e),
|
Error::Serde(ref e) => write!(f, "serde error {}", e),
|
||||||
@ -171,18 +174,19 @@ impl Into<String> for Error {
|
|||||||
mod admin_sessions;
|
mod admin_sessions;
|
||||||
mod client_sessions;
|
mod client_sessions;
|
||||||
|
|
||||||
|
pub use self::admin_sessions::key_version_negotiation_session;
|
||||||
pub use self::admin_sessions::servers_set_change_session;
|
pub use self::admin_sessions::servers_set_change_session;
|
||||||
pub use self::admin_sessions::share_add_session;
|
pub use self::admin_sessions::share_add_session;
|
||||||
pub use self::admin_sessions::share_change_session;
|
pub use self::admin_sessions::share_change_session;
|
||||||
pub use self::admin_sessions::share_move_session;
|
|
||||||
pub use self::admin_sessions::share_remove_session;
|
|
||||||
|
|
||||||
pub use self::client_sessions::decryption_session;
|
pub use self::client_sessions::decryption_session;
|
||||||
pub use self::client_sessions::encryption_session;
|
pub use self::client_sessions::encryption_session;
|
||||||
pub use self::client_sessions::generation_session;
|
pub use self::client_sessions::generation_session;
|
||||||
pub use self::client_sessions::signing_session;
|
pub use self::client_sessions::signing_session;
|
||||||
|
|
||||||
mod cluster;
|
mod cluster;
|
||||||
mod cluster_sessions;
|
mod cluster_sessions;
|
||||||
|
mod cluster_sessions_creator;
|
||||||
mod io;
|
mod io;
|
||||||
mod jobs;
|
mod jobs;
|
||||||
pub mod math;
|
pub mod math;
|
||||||
|
@ -17,10 +17,12 @@
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
|
use tiny_keccak::Keccak;
|
||||||
|
use bigint::hash::H256;
|
||||||
use ethkey::{Secret, Public};
|
use ethkey::{Secret, Public};
|
||||||
use kvdb_rocksdb::{Database, DatabaseIterator};
|
use kvdb_rocksdb::{Database, DatabaseIterator};
|
||||||
use types::all::{Error, ServiceConfiguration, ServerKeyId, NodeId};
|
use types::all::{Error, ServiceConfiguration, ServerKeyId, NodeId};
|
||||||
use serialization::{SerializablePublic, SerializableSecret};
|
use serialization::{SerializablePublic, SerializableSecret, SerializableH256};
|
||||||
|
|
||||||
/// Key of version value.
|
/// Key of version value.
|
||||||
const DB_META_KEY_VERSION: &'static [u8; 7] = b"version";
|
const DB_META_KEY_VERSION: &'static [u8; 7] = b"version";
|
||||||
@ -28,6 +30,8 @@ const DB_META_KEY_VERSION: &'static [u8; 7] = b"version";
|
|||||||
const CURRENT_VERSION: u8 = 2;
|
const CURRENT_VERSION: u8 = 2;
|
||||||
/// Current type of serialized key shares.
|
/// Current type of serialized key shares.
|
||||||
type CurrentSerializableDocumentKeyShare = SerializableDocumentKeyShareV2;
|
type CurrentSerializableDocumentKeyShare = SerializableDocumentKeyShareV2;
|
||||||
|
/// Current type of serialized key shares versions.
|
||||||
|
type CurrentSerializableDocumentKeyVersion = SerializableDocumentKeyShareVersionV2;
|
||||||
|
|
||||||
/// Encrypted key share, stored by key storage on the single key server.
|
/// Encrypted key share, stored by key storage on the single key server.
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
@ -36,16 +40,23 @@ pub struct DocumentKeyShare {
|
|||||||
pub author: Public,
|
pub author: Public,
|
||||||
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
||||||
pub threshold: usize,
|
pub threshold: usize,
|
||||||
/// Nodes ids numbers.
|
|
||||||
pub id_numbers: BTreeMap<NodeId, Secret>,
|
|
||||||
/// Polynom1.
|
|
||||||
pub polynom1: Vec<Secret>,
|
|
||||||
/// Node secret share.
|
|
||||||
pub secret_share: Secret,
|
|
||||||
/// Common (shared) encryption point.
|
/// Common (shared) encryption point.
|
||||||
pub common_point: Option<Public>,
|
pub common_point: Option<Public>,
|
||||||
/// Encrypted point.
|
/// Encrypted point.
|
||||||
pub encrypted_point: Option<Public>,
|
pub encrypted_point: Option<Public>,
|
||||||
|
/// Key share versions.
|
||||||
|
pub versions: Vec<DocumentKeyShareVersion>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Versioned portion of document key share.
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub struct DocumentKeyShareVersion {
|
||||||
|
/// Version hash (Keccak(time + id_numbers)).
|
||||||
|
pub hash: H256,
|
||||||
|
/// Nodes ids numbers.
|
||||||
|
pub id_numbers: BTreeMap<NodeId, Secret>,
|
||||||
|
/// Node secret share.
|
||||||
|
pub secret_share: Secret,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Document encryption keys storage
|
/// Document encryption keys storage
|
||||||
@ -55,9 +66,11 @@ pub trait KeyStorage: Send + Sync {
|
|||||||
/// Update document encryption key
|
/// Update document encryption key
|
||||||
fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error>;
|
fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error>;
|
||||||
/// Get document encryption key
|
/// Get document encryption key
|
||||||
fn get(&self, document: &ServerKeyId) -> Result<DocumentKeyShare, Error>;
|
fn get(&self, document: &ServerKeyId) -> Result<Option<DocumentKeyShare>, Error>;
|
||||||
/// Remove document encryption key
|
/// Remove document encryption key
|
||||||
fn remove(&self, document: &ServerKeyId) -> Result<(), Error>;
|
fn remove(&self, document: &ServerKeyId) -> Result<(), Error>;
|
||||||
|
/// Clears the database
|
||||||
|
fn clear(&self) -> Result<(), Error>;
|
||||||
/// Check if storage contains document encryption key
|
/// Check if storage contains document encryption key
|
||||||
fn contains(&self, document: &ServerKeyId) -> bool;
|
fn contains(&self, document: &ServerKeyId) -> bool;
|
||||||
/// Iterate through storage
|
/// Iterate through storage
|
||||||
@ -113,16 +126,22 @@ struct SerializableDocumentKeyShareV2 {
|
|||||||
pub author: SerializablePublic,
|
pub author: SerializablePublic,
|
||||||
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
||||||
pub threshold: usize,
|
pub threshold: usize,
|
||||||
/// Nodes ids numbers.
|
|
||||||
pub id_numbers: BTreeMap<SerializablePublic, SerializableSecret>,
|
|
||||||
/// Polynom1.
|
|
||||||
pub polynom1: Vec<SerializableSecret>,
|
|
||||||
/// Node secret share.
|
|
||||||
pub secret_share: SerializableSecret,
|
|
||||||
/// Common (shared) encryption point.
|
/// Common (shared) encryption point.
|
||||||
pub common_point: Option<SerializablePublic>,
|
pub common_point: Option<SerializablePublic>,
|
||||||
/// Encrypted point.
|
/// Encrypted point.
|
||||||
pub encrypted_point: Option<SerializablePublic>,
|
pub encrypted_point: Option<SerializablePublic>,
|
||||||
|
/// Versions.
|
||||||
|
pub versions: Vec<SerializableDocumentKeyShareVersionV2>}
|
||||||
|
|
||||||
|
/// V2 of encrypted key share version, as it is stored by key storage on the single key server.
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
struct SerializableDocumentKeyShareVersionV2 {
|
||||||
|
/// Version hash.
|
||||||
|
pub hash: SerializableH256,
|
||||||
|
/// Nodes ids numbers.
|
||||||
|
pub id_numbers: BTreeMap<SerializablePublic, SerializableSecret>,
|
||||||
|
/// Node secret share.
|
||||||
|
pub secret_share: SerializableSecret,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PersistentKeyStorage {
|
impl PersistentKeyStorage {
|
||||||
@ -150,18 +169,20 @@ fn upgrade_db(db: Database) -> Result<Database, Error> {
|
|||||||
batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]);
|
batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]);
|
||||||
for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner).filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) {
|
for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner).filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) {
|
||||||
let v0_key = serde_json::from_slice::<SerializableDocumentKeyShareV0>(&db_value).map_err(|e| Error::Database(e.to_string()))?;
|
let v0_key = serde_json::from_slice::<SerializableDocumentKeyShareV0>(&db_value).map_err(|e| Error::Database(e.to_string()))?;
|
||||||
let v2_key = CurrentSerializableDocumentKeyShare {
|
let current_key = CurrentSerializableDocumentKeyShare {
|
||||||
// author is used in separate generation + encrypt sessions.
|
// author is used in separate generation + encrypt sessions.
|
||||||
// in v0 there have been only simultaneous GenEnc sessions.
|
// in v0 there have been only simultaneous GenEnc sessions.
|
||||||
author: Public::default().into(), // added in v1
|
author: Public::default().into(), // added in v1
|
||||||
threshold: v0_key.threshold,
|
threshold: v0_key.threshold,
|
||||||
id_numbers: v0_key.id_numbers,
|
|
||||||
secret_share: v0_key.secret_share,
|
|
||||||
polynom1: Vec::new(), // added in v2
|
|
||||||
common_point: Some(v0_key.common_point),
|
common_point: Some(v0_key.common_point),
|
||||||
encrypted_point: Some(v0_key.encrypted_point),
|
encrypted_point: Some(v0_key.encrypted_point),
|
||||||
|
versions: vec![CurrentSerializableDocumentKeyVersion {
|
||||||
|
hash: DocumentKeyShareVersion::data_hash(v0_key.id_numbers.iter().map(|(k, v)| (&***k, &****v))).into(),
|
||||||
|
id_numbers: v0_key.id_numbers,
|
||||||
|
secret_share: v0_key.secret_share,
|
||||||
|
}],
|
||||||
};
|
};
|
||||||
let db_value = serde_json::to_vec(&v2_key).map_err(|e| Error::Database(e.to_string()))?;
|
let db_value = serde_json::to_vec(¤t_key).map_err(|e| Error::Database(e.to_string()))?;
|
||||||
batch.put(None, &*db_key, &*db_value);
|
batch.put(None, &*db_key, &*db_value);
|
||||||
}
|
}
|
||||||
db.write(batch)?;
|
db.write(batch)?;
|
||||||
@ -172,16 +193,18 @@ fn upgrade_db(db: Database) -> Result<Database, Error> {
|
|||||||
batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]);
|
batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]);
|
||||||
for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner).filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) {
|
for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner).filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) {
|
||||||
let v1_key = serde_json::from_slice::<SerializableDocumentKeyShareV1>(&db_value).map_err(|e| Error::Database(e.to_string()))?;
|
let v1_key = serde_json::from_slice::<SerializableDocumentKeyShareV1>(&db_value).map_err(|e| Error::Database(e.to_string()))?;
|
||||||
let v2_key = CurrentSerializableDocumentKeyShare {
|
let current_key = CurrentSerializableDocumentKeyShare {
|
||||||
author: v1_key.author, // added in v1
|
author: v1_key.author, // added in v1
|
||||||
threshold: v1_key.threshold,
|
threshold: v1_key.threshold,
|
||||||
id_numbers: v1_key.id_numbers,
|
|
||||||
secret_share: v1_key.secret_share,
|
|
||||||
polynom1: Vec::new(), // added in v2
|
|
||||||
common_point: v1_key.common_point,
|
common_point: v1_key.common_point,
|
||||||
encrypted_point: v1_key.encrypted_point,
|
encrypted_point: v1_key.encrypted_point,
|
||||||
|
versions: vec![CurrentSerializableDocumentKeyVersion {
|
||||||
|
hash: DocumentKeyShareVersion::data_hash(v1_key.id_numbers.iter().map(|(k, v)| (&***k, &****v))).into(),
|
||||||
|
id_numbers: v1_key.id_numbers,
|
||||||
|
secret_share: v1_key.secret_share,
|
||||||
|
}],
|
||||||
};
|
};
|
||||||
let db_value = serde_json::to_vec(&v2_key).map_err(|e| Error::Database(e.to_string()))?;
|
let db_value = serde_json::to_vec(¤t_key).map_err(|e| Error::Database(e.to_string()))?;
|
||||||
batch.put(None, &*db_key, &*db_value);
|
batch.put(None, &*db_key, &*db_value);
|
||||||
}
|
}
|
||||||
db.write(batch)?;
|
db.write(batch)?;
|
||||||
@ -205,12 +228,16 @@ impl KeyStorage for PersistentKeyStorage {
|
|||||||
self.insert(document, key)
|
self.insert(document, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get(&self, document: &ServerKeyId) -> Result<DocumentKeyShare, Error> {
|
fn get(&self, document: &ServerKeyId) -> Result<Option<DocumentKeyShare>, Error> {
|
||||||
self.db.get(None, document)?
|
self.db.get(None, document)
|
||||||
.ok_or(Error::DocumentNotFound)
|
.map_err(|e| Error::Database(e.to_string()))
|
||||||
.map(|key| key.into_vec())
|
.and_then(|key| match key {
|
||||||
.and_then(|key| serde_json::from_slice::<CurrentSerializableDocumentKeyShare>(&key).map_err(|e| Error::Database(e.to_string())))
|
None => Ok(None),
|
||||||
.map(Into::into)
|
Some(key) => serde_json::from_slice::<CurrentSerializableDocumentKeyShare>(&key)
|
||||||
|
.map_err(|e| Error::Database(e.to_string()))
|
||||||
|
.map(Into::into)
|
||||||
|
.map(Some),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove(&self, document: &ServerKeyId) -> Result<(), Error> {
|
fn remove(&self, document: &ServerKeyId) -> Result<(), Error> {
|
||||||
@ -219,6 +246,15 @@ impl KeyStorage for PersistentKeyStorage {
|
|||||||
self.db.write(batch).map_err(Into::into)
|
self.db.write(batch).map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn clear(&self) -> Result<(), Error> {
|
||||||
|
let mut batch = self.db.transaction();
|
||||||
|
for (key, _) in self.iter() {
|
||||||
|
batch.delete(None, &key);
|
||||||
|
}
|
||||||
|
self.db.write(batch)
|
||||||
|
.map_err(|e| Error::Database(e.to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
fn contains(&self, document: &ServerKeyId) -> bool {
|
fn contains(&self, document: &ServerKeyId) -> bool {
|
||||||
self.db.get(None, document)
|
self.db.get(None, document)
|
||||||
.map(|k| k.is_some())
|
.map(|k| k.is_some())
|
||||||
@ -244,29 +280,68 @@ impl<'a> Iterator for PersistentKeyStorageIterator<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl DocumentKeyShare {
|
||||||
|
/// Get last version reference.
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn last_version(&self) -> Result<&DocumentKeyShareVersion, Error> {
|
||||||
|
self.versions.iter().rev()
|
||||||
|
.nth(0)
|
||||||
|
.ok_or_else(|| Error::Database("key version is not found".into()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get given version reference.
|
||||||
|
pub fn version(&self, version: &H256) -> Result<&DocumentKeyShareVersion, Error> {
|
||||||
|
self.versions.iter().rev()
|
||||||
|
.find(|v| &v.hash == version)
|
||||||
|
.ok_or_else(|| Error::Database("key version is not found".into()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DocumentKeyShareVersion {
|
||||||
|
/// Create new version
|
||||||
|
pub fn new(id_numbers: BTreeMap<NodeId, Secret>, secret_share: Secret) -> Self {
|
||||||
|
DocumentKeyShareVersion {
|
||||||
|
hash: Self::data_hash(id_numbers.iter().map(|(k, v)| (&**k, &***v))),
|
||||||
|
id_numbers: id_numbers,
|
||||||
|
secret_share: secret_share,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Calculate hash of given version data.
|
||||||
|
pub fn data_hash<'a, I>(id_numbers: I) -> H256 where I: Iterator<Item=(&'a [u8], &'a [u8])> {
|
||||||
|
let mut nodes_keccak = Keccak::new_keccak256();
|
||||||
|
|
||||||
|
for (node, node_number) in id_numbers {
|
||||||
|
nodes_keccak.update(node);
|
||||||
|
nodes_keccak.update(node_number);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut nodes_keccak_value = [0u8; 32];
|
||||||
|
nodes_keccak.finalize(&mut nodes_keccak_value);
|
||||||
|
|
||||||
|
nodes_keccak_value.into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<DocumentKeyShare> for SerializableDocumentKeyShareV2 {
|
impl From<DocumentKeyShare> for SerializableDocumentKeyShareV2 {
|
||||||
fn from(key: DocumentKeyShare) -> Self {
|
fn from(key: DocumentKeyShare) -> Self {
|
||||||
SerializableDocumentKeyShareV2 {
|
SerializableDocumentKeyShareV2 {
|
||||||
author: key.author.into(),
|
author: key.author.into(),
|
||||||
threshold: key.threshold,
|
threshold: key.threshold,
|
||||||
id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
|
||||||
secret_share: key.secret_share.into(),
|
|
||||||
polynom1: key.polynom1.into_iter().map(Into::into).collect(),
|
|
||||||
common_point: key.common_point.map(Into::into),
|
common_point: key.common_point.map(Into::into),
|
||||||
encrypted_point: key.encrypted_point.map(Into::into),
|
encrypted_point: key.encrypted_point.map(Into::into),
|
||||||
|
versions: key.versions.into_iter().map(Into::into).collect(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<DocumentKeyShare> for SerializableDocumentKeyShareV1 {
|
impl From<DocumentKeyShareVersion> for SerializableDocumentKeyShareVersionV2 {
|
||||||
fn from(key: DocumentKeyShare) -> Self {
|
fn from(version: DocumentKeyShareVersion) -> Self {
|
||||||
SerializableDocumentKeyShareV1 {
|
SerializableDocumentKeyShareVersionV2 {
|
||||||
author: key.author.into(),
|
hash: version.hash.into(),
|
||||||
threshold: key.threshold,
|
id_numbers: version.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
||||||
id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
secret_share: version.secret_share.into(),
|
||||||
secret_share: key.secret_share.into(),
|
|
||||||
common_point: key.common_point.map(Into::into),
|
|
||||||
encrypted_point: key.encrypted_point.map(Into::into),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -276,11 +351,15 @@ impl From<SerializableDocumentKeyShareV2> for DocumentKeyShare {
|
|||||||
DocumentKeyShare {
|
DocumentKeyShare {
|
||||||
author: key.author.into(),
|
author: key.author.into(),
|
||||||
threshold: key.threshold,
|
threshold: key.threshold,
|
||||||
id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
|
||||||
secret_share: key.secret_share.into(),
|
|
||||||
polynom1: key.polynom1.into_iter().map(Into::into).collect(),
|
|
||||||
common_point: key.common_point.map(Into::into),
|
common_point: key.common_point.map(Into::into),
|
||||||
encrypted_point: key.encrypted_point.map(Into::into),
|
encrypted_point: key.encrypted_point.map(Into::into),
|
||||||
|
versions: key.versions.into_iter()
|
||||||
|
.map(|v| DocumentKeyShareVersion {
|
||||||
|
hash: v.hash.into(),
|
||||||
|
id_numbers: v.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
||||||
|
secret_share: v.secret_share.into(),
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -295,7 +374,7 @@ pub mod tests {
|
|||||||
use kvdb_rocksdb::Database;
|
use kvdb_rocksdb::Database;
|
||||||
use types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId};
|
use types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId};
|
||||||
use super::{DB_META_KEY_VERSION, CURRENT_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare,
|
use super::{DB_META_KEY_VERSION, CURRENT_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare,
|
||||||
SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1,
|
DocumentKeyShareVersion, SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1,
|
||||||
CurrentSerializableDocumentKeyShare, upgrade_db};
|
CurrentSerializableDocumentKeyShare, upgrade_db};
|
||||||
|
|
||||||
/// In-memory document encryption keys storage
|
/// In-memory document encryption keys storage
|
||||||
@ -315,8 +394,8 @@ pub mod tests {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get(&self, document: &ServerKeyId) -> Result<DocumentKeyShare, Error> {
|
fn get(&self, document: &ServerKeyId) -> Result<Option<DocumentKeyShare>, Error> {
|
||||||
self.keys.read().get(document).cloned().ok_or(Error::DocumentNotFound)
|
Ok(self.keys.read().get(document).cloned())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove(&self, document: &ServerKeyId) -> Result<(), Error> {
|
fn remove(&self, document: &ServerKeyId) -> Result<(), Error> {
|
||||||
@ -324,6 +403,11 @@ pub mod tests {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn clear(&self) -> Result<(), Error> {
|
||||||
|
self.keys.write().clear();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn contains(&self, document: &ServerKeyId) -> bool {
|
fn contains(&self, document: &ServerKeyId) -> bool {
|
||||||
self.keys.read().contains_key(document)
|
self.keys.read().contains_key(document)
|
||||||
}
|
}
|
||||||
@ -356,40 +440,44 @@ pub mod tests {
|
|||||||
let value1 = DocumentKeyShare {
|
let value1 = DocumentKeyShare {
|
||||||
author: Public::default(),
|
author: Public::default(),
|
||||||
threshold: 100,
|
threshold: 100,
|
||||||
id_numbers: vec![
|
|
||||||
(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone())
|
|
||||||
].into_iter().collect(),
|
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
|
||||||
polynom1: Vec::new(),
|
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
|
versions: vec![DocumentKeyShareVersion {
|
||||||
|
hash: Default::default(),
|
||||||
|
id_numbers: vec![
|
||||||
|
(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone())
|
||||||
|
].into_iter().collect(),
|
||||||
|
secret_share: Random.generate().unwrap().secret().clone(),
|
||||||
|
}],
|
||||||
};
|
};
|
||||||
let key2 = ServerKeyId::from(2);
|
let key2 = ServerKeyId::from(2);
|
||||||
let value2 = DocumentKeyShare {
|
let value2 = DocumentKeyShare {
|
||||||
author: Public::default(),
|
author: Public::default(),
|
||||||
threshold: 200,
|
threshold: 200,
|
||||||
id_numbers: vec![
|
|
||||||
(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone())
|
|
||||||
].into_iter().collect(),
|
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
|
||||||
polynom1: Vec::new(),
|
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
|
versions: vec![DocumentKeyShareVersion {
|
||||||
|
hash: Default::default(),
|
||||||
|
id_numbers: vec![
|
||||||
|
(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone())
|
||||||
|
].into_iter().collect(),
|
||||||
|
secret_share: Random.generate().unwrap().secret().clone(),
|
||||||
|
}],
|
||||||
};
|
};
|
||||||
let key3 = ServerKeyId::from(3);
|
let key3 = ServerKeyId::from(3);
|
||||||
|
|
||||||
let key_storage = PersistentKeyStorage::new(&config).unwrap();
|
let key_storage = PersistentKeyStorage::new(&config).unwrap();
|
||||||
key_storage.insert(key1.clone(), value1.clone()).unwrap();
|
key_storage.insert(key1.clone(), value1.clone()).unwrap();
|
||||||
key_storage.insert(key2.clone(), value2.clone()).unwrap();
|
key_storage.insert(key2.clone(), value2.clone()).unwrap();
|
||||||
assert_eq!(key_storage.get(&key1), Ok(value1.clone()));
|
assert_eq!(key_storage.get(&key1), Ok(Some(value1.clone())));
|
||||||
assert_eq!(key_storage.get(&key2), Ok(value2.clone()));
|
assert_eq!(key_storage.get(&key2), Ok(Some(value2.clone())));
|
||||||
assert_eq!(key_storage.get(&key3), Err(Error::DocumentNotFound));
|
assert_eq!(key_storage.get(&key3), Ok(None));
|
||||||
drop(key_storage);
|
drop(key_storage);
|
||||||
|
|
||||||
let key_storage = PersistentKeyStorage::new(&config).unwrap();
|
let key_storage = PersistentKeyStorage::new(&config).unwrap();
|
||||||
assert_eq!(key_storage.get(&key1), Ok(value1));
|
assert_eq!(key_storage.get(&key1), Ok(Some(value1)));
|
||||||
assert_eq!(key_storage.get(&key2), Ok(value2));
|
assert_eq!(key_storage.get(&key2), Ok(Some(value2)));
|
||||||
assert_eq!(key_storage.get(&key3), Err(Error::DocumentNotFound));
|
assert_eq!(key_storage.get(&key3), Ok(None));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -422,13 +510,15 @@ pub mod tests {
|
|||||||
let key = serde_json::from_slice::<CurrentSerializableDocumentKeyShare>(&db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap()).unwrap();
|
let key = serde_json::from_slice::<CurrentSerializableDocumentKeyShare>(&db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap()).unwrap();
|
||||||
assert_eq!(Public::default(), key.author.clone().into());
|
assert_eq!(Public::default(), key.author.clone().into());
|
||||||
assert_eq!(777, key.threshold);
|
assert_eq!(777, key.threshold);
|
||||||
|
assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::<Public>().unwrap()), key.common_point.clone().map(Into::into));
|
||||||
|
assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::<Public>().unwrap()), key.encrypted_point.clone().map(Into::into));
|
||||||
|
|
||||||
|
assert_eq!(key.versions.len(), 1);
|
||||||
assert_eq!(vec![(
|
assert_eq!(vec![(
|
||||||
"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse::<Public>().unwrap(),
|
"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse::<Public>().unwrap(),
|
||||||
"281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse::<Secret>().unwrap(),
|
"281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse::<Secret>().unwrap(),
|
||||||
)], key.id_numbers.clone().into_iter().map(|(k, v)| (k.into(), v.into())).collect::<Vec<(Public, Secret)>>());
|
)], key.versions[0].id_numbers.clone().into_iter().map(|(k, v)| (k.into(), v.into())).collect::<Vec<(Public, Secret)>>());
|
||||||
assert_eq!("00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::<Secret>().unwrap(), key.secret_share.into());
|
assert_eq!("00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::<Secret>().unwrap(), key.versions[0].secret_share.clone().into());
|
||||||
assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::<Public>().unwrap()), key.common_point.clone().map(Into::into));
|
|
||||||
assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::<Public>().unwrap()), key.encrypted_point.clone().map(Into::into));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -462,14 +552,16 @@ pub mod tests {
|
|||||||
assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], CURRENT_VERSION);
|
assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], CURRENT_VERSION);
|
||||||
let key = serde_json::from_slice::<CurrentSerializableDocumentKeyShare>(&db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap()).unwrap();
|
let key = serde_json::from_slice::<CurrentSerializableDocumentKeyShare>(&db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap()).unwrap();
|
||||||
assert_eq!(777, key.threshold);
|
assert_eq!(777, key.threshold);
|
||||||
assert_eq!(vec![(
|
|
||||||
"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse::<Public>().unwrap(),
|
|
||||||
"281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse::<Secret>().unwrap(),
|
|
||||||
)], key.id_numbers.clone().into_iter().map(|(k, v)| (k.into(), v.into())).collect::<Vec<(Public, Secret)>>());
|
|
||||||
assert_eq!("00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::<Secret>().unwrap(), key.secret_share.into());
|
|
||||||
assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::<Public>().unwrap()), key.common_point.clone().map(Into::into));
|
assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::<Public>().unwrap()), key.common_point.clone().map(Into::into));
|
||||||
assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::<Public>().unwrap()), key.encrypted_point.clone().map(Into::into));
|
assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::<Public>().unwrap()), key.encrypted_point.clone().map(Into::into));
|
||||||
assert_eq!(key.author, "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into());
|
assert_eq!(key.author, "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into());
|
||||||
assert_eq!(key.polynom1, vec![]);
|
|
||||||
|
assert_eq!(key.versions.len(), 1);
|
||||||
|
assert_eq!(vec![(
|
||||||
|
"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse::<Public>().unwrap(),
|
||||||
|
"281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse::<Secret>().unwrap(),
|
||||||
|
)], key.versions[0].id_numbers.clone().into_iter().map(|(k, v)| (k.into(), v.into())).collect::<Vec<(Public, Secret)>>());
|
||||||
|
|
||||||
|
assert_eq!("00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::<Secret>().unwrap(), key.versions[0].secret_share.clone().into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -148,6 +148,12 @@ impl<'a> Deserialize<'a> for SerializableSignature {
|
|||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct SerializableH256(pub H256);
|
pub struct SerializableH256(pub H256);
|
||||||
|
|
||||||
|
impl Default for SerializableH256 {
|
||||||
|
fn default() -> Self {
|
||||||
|
SerializableH256(Default::default())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> From<T> for SerializableH256 where H256: From<T> {
|
impl<T> From<T> for SerializableH256 where H256: From<T> {
|
||||||
fn from(s: T) -> SerializableH256 {
|
fn from(s: T) -> SerializableH256 {
|
||||||
SerializableH256(s.into())
|
SerializableH256(s.into())
|
||||||
@ -249,6 +255,13 @@ impl Deref for SerializableSecret {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl AsRef<[u8]> for SerializableSecret {
|
||||||
|
#[inline]
|
||||||
|
fn as_ref(&self) -> &[u8] {
|
||||||
|
&*self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Serialize for SerializableSecret {
|
impl Serialize for SerializableSecret {
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer {
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer {
|
||||||
let mut serialized = "0x".to_owned();
|
let mut serialized = "0x".to_owned();
|
||||||
@ -309,6 +322,13 @@ impl Deref for SerializablePublic {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl AsRef<[u8]> for SerializablePublic {
|
||||||
|
#[inline]
|
||||||
|
fn as_ref(&self) -> &[u8] {
|
||||||
|
&*self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Eq for SerializablePublic { }
|
impl Eq for SerializablePublic { }
|
||||||
|
|
||||||
impl PartialEq for SerializablePublic {
|
impl PartialEq for SerializablePublic {
|
||||||
|
Loading…
Reference in New Issue
Block a user