SecretStore: administrative sessions prototypes (#6605)
* generate random channel encryption key on restart * session-level nonces * fixed warning after rebase * session_nonce -> nonce * full_generation_math_session_with_refreshing_shares && full_generation_math_session_with_adding_new_node * add serveral secret shares at once * SecretStore: initial ShareAdd session prototype * SecretStore: ServersSetChange jobs * SecretStore: servers set change session continued * SecretStore: servers set change session continued * SecretStore: servers set change session continued * SecretStore: known_sessions iterator * SecretStore: implemented SessionsQueue * SecretStore: UnknownSessionsJobTransport * SecretStore: node_added_using_servers_set_change almost done * SecretStore: continue adding tests * SecretStore: node_added_using_servers_set_change + node_added_using_share_add * SecretStore: node_added_using_server_set_change_from_this_node * SecretStore: node_moved_using_share_move * SecretStore: node_moved_using_servers_set_change * SecretStore: node_removed_using_share_remove * SecretStore: node_removed_using_servers_set_change * SecretStore: different folders for client && admin sessions * SecretStore: started adding share change consensus (flush) * SecretStore: fixed spurious tests failures * enum JobPartialRequestAction * SecretStore: started adding consensus layer to ShareAdd session * SecretStore: starting external consensus for ShareAdd * SecretStore: started restoring node_added_using_servers_set_change * SecretStore: node_added_using_servers_set_change works with external consensus * SecretStore: node_added_using_server_set_change_from_this_node works with external consensus * removed debug comments/printlns * SecretStore: share move session supports consensus * SecretStore: share remove with external consensus * SecretStore: started adding basic ShareAdd tests * SecretStore: added ShareAdd tests * SecretStore: added ShareAdd session to cluster * SecretStore: added share move && remove sessions to cluster * SecretStore: ShareMove session tests cleanup * SecretStore: ShareRemove session tests cleanup * SecretStore: added check_secret_is_preserved check * SecretStore: added servers set change to cluster * SecretStore: cleaned up ServersSetChange session tests * SecretStore: cleaning + added tests for ShareRemove * SecretStore: cleaning up * SecretStore: propagated admin_public * SecretStore: fixed persistent_key_storage test * SecretStore: upgrade_db_from_1 * SecretStore: fixed ServersSetChange session completion * SecretStore: check polynom1 in ShareAdd sessions (error for pre-v2 shares) * SecretStore: fixing TODOs * SecretStore: fixing TODOs * SecretStore: check share change plan on 'old' slave nodes * SecretStore: fixing TODOs * SecretStore: store all admin sessions in single container to avoid overlaps * SecretStore: do not update nodes set during admin sessions * SecretStore: moved TODOs to appropriate methods * SecretStore: TODOs * SecretStore: added admin_public arg && fixed warnigs * SecretStore: added shares_to_move_reversed to ShareMove session * SecretStore: additional checks during consensus establishing * license * SecretStore: added TODO about starting ServersSetChange session * SecretStore: redundant clones + docs + lsot unimplemented-s * SecretStore: generation_session_completion_signalled_if_failed_on_master * SecretStore: updated obsolete comment * SecretStore: added type alias for current DocumentKeyShare serialization format * SecretStore: fixed typo * SecretStore; fixed warnings for futures 0.1.15 * fixed warning
This commit is contained in:
parent
561e8b42a8
commit
9a086face4
1
Cargo.lock
generated
1
Cargo.lock
generated
@ -832,6 +832,7 @@ dependencies = [
|
|||||||
"serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tokio-proto 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tokio-proto 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
@ -591,6 +591,10 @@ usage! {
|
|||||||
"--secretstore-secret=[SECRET]",
|
"--secretstore-secret=[SECRET]",
|
||||||
"Hex-encoded secret key of this node.",
|
"Hex-encoded secret key of this node.",
|
||||||
|
|
||||||
|
ARG arg_secretstore_admin_public: (Option<String>) = None, or |c: &Config| otry!(c.secretstore).admin_public.clone(),
|
||||||
|
"--secretstore-admin-public=[PUBLIC]",
|
||||||
|
"Hex-encoded public key of secret store administrator.",
|
||||||
|
|
||||||
["Sealing/Mining options"]
|
["Sealing/Mining options"]
|
||||||
FLAG flag_force_sealing: (bool) = false, or |c: &Config| otry!(c.mining).force_sealing.clone(),
|
FLAG flag_force_sealing: (bool) = false, or |c: &Config| otry!(c.mining).force_sealing.clone(),
|
||||||
"--force-sealing",
|
"--force-sealing",
|
||||||
@ -1089,6 +1093,7 @@ struct SecretStore {
|
|||||||
disable_http: Option<bool>,
|
disable_http: Option<bool>,
|
||||||
disable_acl_check: Option<bool>,
|
disable_acl_check: Option<bool>,
|
||||||
self_secret: Option<String>,
|
self_secret: Option<String>,
|
||||||
|
admin_public: Option<String>,
|
||||||
nodes: Option<Vec<String>>,
|
nodes: Option<Vec<String>>,
|
||||||
interface: Option<String>,
|
interface: Option<String>,
|
||||||
port: Option<u16>,
|
port: Option<u16>,
|
||||||
@ -1445,6 +1450,7 @@ mod tests {
|
|||||||
flag_no_secretstore_http: false,
|
flag_no_secretstore_http: false,
|
||||||
flag_no_secretstore_acl_check: false,
|
flag_no_secretstore_acl_check: false,
|
||||||
arg_secretstore_secret: None,
|
arg_secretstore_secret: None,
|
||||||
|
arg_secretstore_admin_public: None,
|
||||||
arg_secretstore_nodes: "".into(),
|
arg_secretstore_nodes: "".into(),
|
||||||
arg_secretstore_interface: "local".into(),
|
arg_secretstore_interface: "local".into(),
|
||||||
arg_secretstore_port: 8083u16,
|
arg_secretstore_port: 8083u16,
|
||||||
@ -1684,6 +1690,7 @@ mod tests {
|
|||||||
disable_http: None,
|
disable_http: None,
|
||||||
disable_acl_check: None,
|
disable_acl_check: None,
|
||||||
self_secret: None,
|
self_secret: None,
|
||||||
|
admin_public: None,
|
||||||
nodes: None,
|
nodes: None,
|
||||||
interface: None,
|
interface: None,
|
||||||
port: Some(8083),
|
port: Some(8083),
|
||||||
|
@ -626,6 +626,7 @@ impl Configuration {
|
|||||||
http_interface: self.secretstore_http_interface(),
|
http_interface: self.secretstore_http_interface(),
|
||||||
http_port: self.args.arg_ports_shift + self.args.arg_secretstore_http_port,
|
http_port: self.args.arg_ports_shift + self.args.arg_secretstore_http_port,
|
||||||
data_path: self.directories().secretstore,
|
data_path: self.directories().secretstore,
|
||||||
|
admin_public: self.secretstore_admin_public()?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1037,6 +1038,13 @@ impl Configuration {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn secretstore_admin_public(&self) -> Result<Option<Public>, String> {
|
||||||
|
match self.args.arg_secretstore_admin_public.as_ref() {
|
||||||
|
Some(admin_public) => Ok(Some(admin_public.parse().map_err(|e| format!("Invalid secret store admin public: {}", e))?)),
|
||||||
|
None => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn secretstore_nodes(&self) -> Result<BTreeMap<Public, (String, u16)>, String> {
|
fn secretstore_nodes(&self) -> Result<BTreeMap<Public, (String, u16)>, String> {
|
||||||
let mut nodes = BTreeMap::new();
|
let mut nodes = BTreeMap::new();
|
||||||
for node in self.args.arg_secretstore_nodes.split(',').filter(|n| n != &"") {
|
for node in self.args.arg_secretstore_nodes.split(',').filter(|n| n != &"") {
|
||||||
|
@ -55,6 +55,8 @@ pub struct Configuration {
|
|||||||
pub http_port: u16,
|
pub http_port: u16,
|
||||||
/// Data directory path for secret store
|
/// Data directory path for secret store
|
||||||
pub data_path: String,
|
pub data_path: String,
|
||||||
|
/// Administrator public key.
|
||||||
|
pub admin_public: Option<Public>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Secret store dependencies
|
/// Secret store dependencies
|
||||||
@ -145,6 +147,7 @@ mod server {
|
|||||||
port: port,
|
port: port,
|
||||||
})).collect(),
|
})).collect(),
|
||||||
allow_connecting_to_higher_nodes: true,
|
allow_connecting_to_higher_nodes: true,
|
||||||
|
admin_public: conf.admin_public,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -170,6 +173,7 @@ impl Default for Configuration {
|
|||||||
http_enabled: true,
|
http_enabled: true,
|
||||||
acl_check_enabled: true,
|
acl_check_enabled: true,
|
||||||
self_secret: None,
|
self_secret: None,
|
||||||
|
admin_public: None,
|
||||||
nodes: BTreeMap::new(),
|
nodes: BTreeMap::new(),
|
||||||
interface: "127.0.0.1".to_owned(),
|
interface: "127.0.0.1".to_owned(),
|
||||||
port: 8083,
|
port: 8083,
|
||||||
|
@ -20,6 +20,7 @@ serde_derive = "1.0"
|
|||||||
futures = "0.1"
|
futures = "0.1"
|
||||||
futures-cpupool = "0.1"
|
futures-cpupool = "0.1"
|
||||||
rustc-hex = "1.0"
|
rustc-hex = "1.0"
|
||||||
|
tiny-keccak = "1.3"
|
||||||
tokio-core = "0.1.6"
|
tokio-core = "0.1.6"
|
||||||
tokio-io = "0.1.0"
|
tokio-io = "0.1.0"
|
||||||
tokio-service = "0.1"
|
tokio-service = "0.1"
|
||||||
|
@ -153,6 +153,7 @@ impl KeyServerCore {
|
|||||||
allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes,
|
allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes,
|
||||||
acl_storage: acl_storage,
|
acl_storage: acl_storage,
|
||||||
key_storage: key_storage,
|
key_storage: key_storage,
|
||||||
|
admin_public: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let (stop, stopped) = futures::oneshot();
|
let (stop, stopped) = futures::oneshot();
|
||||||
@ -255,6 +256,7 @@ pub mod tests {
|
|||||||
port: start_port + (j as u16),
|
port: start_port + (j as u16),
|
||||||
})).collect(),
|
})).collect(),
|
||||||
allow_connecting_to_higher_nodes: false,
|
allow_connecting_to_higher_nodes: false,
|
||||||
|
admin_public: None,
|
||||||
}).collect();
|
}).collect();
|
||||||
let key_servers_set: BTreeMap<Public, SocketAddr> = configs[0].nodes.iter()
|
let key_servers_set: BTreeMap<Public, SocketAddr> = configs[0].nodes.iter()
|
||||||
.map(|(k, a)| (k.clone(), format!("{}:{}", a.address, a.port).parse().unwrap()))
|
.map(|(k, a)| (k.clone(), format!("{}:{}", a.address, a.port).parse().unwrap()))
|
||||||
|
48
secret_store/src/key_server_cluster/admin_sessions/mod.rs
Normal file
48
secret_store/src/key_server_cluster/admin_sessions/mod.rs
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
pub mod servers_set_change_session;
|
||||||
|
pub mod share_add_session;
|
||||||
|
pub mod share_change_session;
|
||||||
|
pub mod share_move_session;
|
||||||
|
pub mod share_remove_session;
|
||||||
|
|
||||||
|
mod sessions_queue;
|
||||||
|
|
||||||
|
use key_server_cluster::{SessionId, NodeId, SessionMeta};
|
||||||
|
|
||||||
|
/// Share change session metadata.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ShareChangeSessionMeta {
|
||||||
|
/// Key id.
|
||||||
|
pub id: SessionId,
|
||||||
|
/// Id of node, which has started this session.
|
||||||
|
pub master_node_id: NodeId,
|
||||||
|
/// Id of node, on which this session is running.
|
||||||
|
pub self_node_id: NodeId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShareChangeSessionMeta {
|
||||||
|
/// Convert to consensus session meta. `all_nodes_set` is the union of `old_nodes_set` && `new_nodes_set`.
|
||||||
|
pub fn into_consensus_meta(self, all_nodes_set_len: usize) -> SessionMeta {
|
||||||
|
SessionMeta {
|
||||||
|
id: self.id,
|
||||||
|
master_node_id: self.master_node_id,
|
||||||
|
self_node_id: self.self_node_id,
|
||||||
|
threshold: all_nodes_set_len - 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,87 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::collections::{VecDeque, BTreeSet, BTreeMap};
|
||||||
|
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare};
|
||||||
|
|
||||||
|
/// Session, queued for change.
|
||||||
|
pub enum QueuedSession {
|
||||||
|
/// Session is known on this node.
|
||||||
|
Known(SessionId, DocumentKeyShare),
|
||||||
|
/// Session is unknown on this node.
|
||||||
|
Unknown(SessionId, BTreeSet<NodeId>),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Queue of share change sessions.
|
||||||
|
pub struct SessionsQueue {
|
||||||
|
/// Key storage.
|
||||||
|
key_storage: Arc<KeyStorage>,
|
||||||
|
/// Sessions, known on this node.
|
||||||
|
known_sessions: VecDeque<SessionId>,
|
||||||
|
/// Unknown sessions.
|
||||||
|
unknown_sessions: VecDeque<(SessionId, BTreeSet<NodeId>)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SessionsQueue {
|
||||||
|
/// Create new sessions queue.
|
||||||
|
pub fn new(key_storage: Arc<KeyStorage>, unknown_sessions: BTreeMap<SessionId, BTreeSet<NodeId>>) -> Self {
|
||||||
|
// TODO: optimizations:
|
||||||
|
// 1) known sessions - change to iter
|
||||||
|
// 2) unknown sesions - request chunk-by-chunk
|
||||||
|
SessionsQueue {
|
||||||
|
key_storage: key_storage.clone(),
|
||||||
|
known_sessions: key_storage.iter().map(|(k, _)| k).collect(),
|
||||||
|
unknown_sessions: unknown_sessions.into_iter().collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Iterator for SessionsQueue {
|
||||||
|
type Item = Result<QueuedSession, Error>;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
if let Some(known_session) = self.known_sessions.pop_front() {
|
||||||
|
return Some(self.key_storage.get(&known_session)
|
||||||
|
.map(|session| QueuedSession::Known(known_session, session))
|
||||||
|
.map_err(|e| Error::KeyStorage(e.into())));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(unknown_session) = self.unknown_sessions.pop_front() {
|
||||||
|
return Some(Ok(QueuedSession::Unknown(unknown_session.0, unknown_session.1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl QueuedSession {
|
||||||
|
/// Queued session (key) id.
|
||||||
|
pub fn id(&self) -> &SessionId {
|
||||||
|
match *self {
|
||||||
|
QueuedSession::Known(ref session_id, _) => session_id,
|
||||||
|
QueuedSession::Unknown(ref session_id, _) => session_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// OWners of key shares (aka session nodes).
|
||||||
|
pub fn nodes(&self) -> BTreeSet<NodeId> {
|
||||||
|
match *self {
|
||||||
|
QueuedSession::Known(_, ref key_share) => key_share.id_numbers.keys().cloned().collect(),
|
||||||
|
QueuedSession::Unknown(_, ref nodes) => nodes.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,384 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::collections::{BTreeSet, BTreeMap};
|
||||||
|
use ethkey::Secret;
|
||||||
|
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage};
|
||||||
|
use key_server_cluster::cluster::Cluster;
|
||||||
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||||
|
use key_server_cluster::math;
|
||||||
|
use key_server_cluster::jobs::servers_set_change_access_job::ServersSetChangeAccessRequest;
|
||||||
|
use key_server_cluster::jobs::job_session::JobTransport;
|
||||||
|
use key_server_cluster::message::{Message, ServersSetChangeMessage, ServersSetChangeShareAddMessage, ServersSetChangeShareMoveMessage,
|
||||||
|
ServersSetChangeShareRemoveMessage};
|
||||||
|
use key_server_cluster::share_add_session::{SessionTransport as ShareAddSessionTransport,
|
||||||
|
SessionImpl as ShareAddSessionImpl, SessionParams as ShareAddSessionParams};
|
||||||
|
use key_server_cluster::share_move_session::{SessionTransport as ShareMoveSessionTransport,
|
||||||
|
SessionImpl as ShareMoveSessionImpl, SessionParams as ShareMoveSessionParams};
|
||||||
|
use key_server_cluster::share_remove_session::{SessionTransport as ShareRemoveSessionTransport,
|
||||||
|
SessionImpl as ShareRemoveSessionImpl, SessionParams as ShareRemoveSessionParams};
|
||||||
|
use key_server_cluster::message::{ShareAddMessage, ShareMoveMessage, ShareRemoveMessage};
|
||||||
|
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||||
|
|
||||||
|
/// Single session meta-change session. Brief overview:
|
||||||
|
/// 1) new shares are added to the session
|
||||||
|
/// 2) shares are moved between nodes
|
||||||
|
/// 3) shares are removed from nodes
|
||||||
|
pub struct ShareChangeSession {
|
||||||
|
/// Servers set change session id.
|
||||||
|
session_id: SessionId,
|
||||||
|
/// Session nonce.
|
||||||
|
nonce: u64,
|
||||||
|
/// Share change session meta.
|
||||||
|
meta: ShareChangeSessionMeta,
|
||||||
|
/// Cluster.
|
||||||
|
cluster: Arc<Cluster>,
|
||||||
|
/// Key storage.
|
||||||
|
key_storage: Arc<KeyStorage>,
|
||||||
|
/// Old nodes set.
|
||||||
|
old_nodes_set: BTreeSet<NodeId>,
|
||||||
|
/// Nodes to add shares for.
|
||||||
|
nodes_to_add: Option<BTreeMap<NodeId, Secret>>,
|
||||||
|
/// Nodes to move shares from/to.
|
||||||
|
nodes_to_move: Option<BTreeMap<NodeId, NodeId>>,
|
||||||
|
/// Nodes to remove shares from.
|
||||||
|
nodes_to_remove: Option<BTreeSet<NodeId>>,
|
||||||
|
/// Share add session.
|
||||||
|
share_add_session: Option<ShareAddSessionImpl<ShareChangeTransport>>,
|
||||||
|
/// Share move session.
|
||||||
|
share_move_session: Option<ShareMoveSessionImpl<ShareChangeTransport>>,
|
||||||
|
/// Share remove session.
|
||||||
|
share_remove_session: Option<ShareRemoveSessionImpl<ShareChangeTransport>>,
|
||||||
|
/// Is finished.
|
||||||
|
is_finished: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share change session plan.
|
||||||
|
pub struct ShareChangeSessionPlan {
|
||||||
|
/// Nodes to add shares for.
|
||||||
|
pub nodes_to_add: BTreeMap<NodeId, Secret>,
|
||||||
|
/// Nodes to move shares from/to (keys = target nodes, values = source nodes).
|
||||||
|
pub nodes_to_move: BTreeMap<NodeId, NodeId>,
|
||||||
|
/// Nodes to remove shares from.
|
||||||
|
pub nodes_to_remove: BTreeSet<NodeId>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Session parameters.
|
||||||
|
pub struct ShareChangeSessionParams {
|
||||||
|
/// Servers set change session id.
|
||||||
|
pub session_id: SessionId,
|
||||||
|
/// Session nonce.
|
||||||
|
pub nonce: u64,
|
||||||
|
/// Share change session meta.
|
||||||
|
pub meta: ShareChangeSessionMeta,
|
||||||
|
/// Cluster.
|
||||||
|
pub cluster: Arc<Cluster>,
|
||||||
|
/// Keys storage.
|
||||||
|
pub key_storage: Arc<KeyStorage>,
|
||||||
|
/// Old nodes set.
|
||||||
|
pub old_nodes_set: BTreeSet<NodeId>,
|
||||||
|
/// Session plan.
|
||||||
|
pub plan: ShareChangeSessionPlan,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share add session transport.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct ShareChangeTransport {
|
||||||
|
/// Servers set change session id.
|
||||||
|
session_id: SessionId,
|
||||||
|
/// Session nonce.
|
||||||
|
nonce: u64,
|
||||||
|
/// Cluster.
|
||||||
|
cluster: Arc<Cluster>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShareChangeSession {
|
||||||
|
/// Create new share change session.
|
||||||
|
pub fn new(params: ShareChangeSessionParams) -> Result<Self, Error> {
|
||||||
|
// we can't create sessions right now, because key share is read when session is created, but it can change in previous session
|
||||||
|
let nodes_to_add = if !params.plan.nodes_to_add.is_empty() { Some(params.plan.nodes_to_add) } else { None };
|
||||||
|
let nodes_to_remove = if !params.plan.nodes_to_remove.is_empty() { Some(params.plan.nodes_to_remove) } else { None };
|
||||||
|
let nodes_to_move = if !params.plan.nodes_to_move.is_empty() { Some(params.plan.nodes_to_move) } else { None };
|
||||||
|
debug_assert!(nodes_to_add.is_some() || nodes_to_move.is_some() || nodes_to_remove.is_some());
|
||||||
|
|
||||||
|
Ok(ShareChangeSession {
|
||||||
|
session_id: params.session_id,
|
||||||
|
nonce: params.nonce,
|
||||||
|
meta: params.meta,
|
||||||
|
cluster: params.cluster,
|
||||||
|
key_storage: params.key_storage,
|
||||||
|
old_nodes_set: params.old_nodes_set,
|
||||||
|
nodes_to_add: nodes_to_add,
|
||||||
|
nodes_to_remove: nodes_to_remove,
|
||||||
|
nodes_to_move: nodes_to_move,
|
||||||
|
share_add_session: None,
|
||||||
|
share_move_session: None,
|
||||||
|
share_remove_session: None,
|
||||||
|
is_finished: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Is finished?.
|
||||||
|
pub fn is_finished(&self) -> bool {
|
||||||
|
self.is_finished
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Is master node?.
|
||||||
|
pub fn is_master(&self) -> bool {
|
||||||
|
self.meta.self_node_id == self.meta.master_node_id
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize session (on master node).
|
||||||
|
pub fn initialize(&mut self) -> Result<(), Error> {
|
||||||
|
self.proceed_to_next_state()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When share-add message is received.
|
||||||
|
pub fn on_share_add_message(&mut self, sender: &NodeId, message: &ShareAddMessage) -> Result<(), Error> {
|
||||||
|
if self.share_add_session.is_none() {
|
||||||
|
self.create_share_add_session()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let change_state_needed = self.share_add_session.as_ref()
|
||||||
|
.map(|share_add_session| {
|
||||||
|
let was_finished = share_add_session.is_finished();
|
||||||
|
share_add_session.process_message(sender, message)
|
||||||
|
.map(|_| share_add_session.is_finished() && !was_finished)
|
||||||
|
})
|
||||||
|
.unwrap_or(Err(Error::InvalidMessage))?;
|
||||||
|
if change_state_needed {
|
||||||
|
self.proceed_to_next_state()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When share-move message is received.
|
||||||
|
pub fn on_share_move_message(&mut self, sender: &NodeId, message: &ShareMoveMessage) -> Result<(), Error> {
|
||||||
|
if self.share_move_session.is_none() {
|
||||||
|
self.create_share_move_session()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let change_state_needed = self.share_move_session.as_ref()
|
||||||
|
.map(|share_move_session| {
|
||||||
|
let was_finished = share_move_session.is_finished();
|
||||||
|
share_move_session.process_message(sender, message)
|
||||||
|
.map(|_| share_move_session.is_finished() && !was_finished)
|
||||||
|
})
|
||||||
|
.unwrap_or(Err(Error::InvalidMessage))?;
|
||||||
|
if change_state_needed {
|
||||||
|
self.proceed_to_next_state()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When share-remove message is received.
|
||||||
|
pub fn on_share_remove_message(&mut self, sender: &NodeId, message: &ShareRemoveMessage) -> Result<(), Error> {
|
||||||
|
if self.share_remove_session.is_none() {
|
||||||
|
self.create_share_remove_session()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let change_state_needed = self.share_remove_session.as_ref()
|
||||||
|
.map(|share_remove_session| {
|
||||||
|
let was_finished = share_remove_session.is_finished();
|
||||||
|
share_remove_session.process_message(sender, message)
|
||||||
|
.map(|_| share_remove_session.is_finished() && !was_finished)
|
||||||
|
})
|
||||||
|
.unwrap_or(Err(Error::InvalidMessage))?;
|
||||||
|
if change_state_needed {
|
||||||
|
self.proceed_to_next_state()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new share add session.
|
||||||
|
fn create_share_add_session(&mut self) -> Result<(), Error> {
|
||||||
|
let nodes_to_add = self.nodes_to_add.take().ok_or(Error::InvalidStateForRequest)?;
|
||||||
|
let new_nodes_set = self.old_nodes_set.iter().map(|n| (n.clone(), None))
|
||||||
|
.chain(nodes_to_add.clone().into_iter().map(|(k, v)| (k, Some(v))))
|
||||||
|
.collect();
|
||||||
|
let share_add_session = ShareAddSessionImpl::new(ShareAddSessionParams {
|
||||||
|
meta: self.meta.clone(),
|
||||||
|
nonce: self.nonce,
|
||||||
|
transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()),
|
||||||
|
key_storage: self.key_storage.clone(),
|
||||||
|
admin_public: None,
|
||||||
|
})?;
|
||||||
|
share_add_session.set_consensus_output(self.old_nodes_set.clone(), new_nodes_set)?;
|
||||||
|
self.share_add_session = Some(share_add_session);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new share move session.
|
||||||
|
fn create_share_move_session(&mut self) -> Result<(), Error> {
|
||||||
|
let nodes_to_move = self.nodes_to_move.take().ok_or(Error::InvalidStateForRequest)?;
|
||||||
|
let share_move_session = ShareMoveSessionImpl::new(ShareMoveSessionParams {
|
||||||
|
meta: self.meta.clone(),
|
||||||
|
nonce: self.nonce,
|
||||||
|
transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()),
|
||||||
|
key_storage: self.key_storage.clone(),
|
||||||
|
admin_public: None,
|
||||||
|
})?;
|
||||||
|
share_move_session.set_consensus_output(nodes_to_move)?;
|
||||||
|
self.share_move_session = Some(share_move_session);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new share remove session.
|
||||||
|
fn create_share_remove_session(&mut self) -> Result<(), Error> {
|
||||||
|
let nodes_to_remove = self.nodes_to_remove.take().ok_or(Error::InvalidStateForRequest)?;
|
||||||
|
let share_remove_session = ShareRemoveSessionImpl::new(ShareRemoveSessionParams {
|
||||||
|
meta: self.meta.clone(),
|
||||||
|
nonce: self.nonce,
|
||||||
|
transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()),
|
||||||
|
key_storage: self.key_storage.clone(),
|
||||||
|
admin_public: None,
|
||||||
|
})?;
|
||||||
|
share_remove_session.set_consensus_output(nodes_to_remove)?;
|
||||||
|
self.share_remove_session = Some(share_remove_session);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Proceed to the next state.
|
||||||
|
fn proceed_to_next_state(&mut self) -> Result<(), Error> {
|
||||||
|
if self.meta.self_node_id != self.meta.master_node_id {
|
||||||
|
if self.nodes_to_add.is_none() && self.nodes_to_move.is_none() && self.nodes_to_remove.is_none() {
|
||||||
|
self.is_finished = true;
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.nodes_to_add.is_some() {
|
||||||
|
self.create_share_add_session()?;
|
||||||
|
return self.share_add_session.as_ref()
|
||||||
|
.expect("either create_share_add_session fails, or session is created; qed")
|
||||||
|
.initialize(None, None, None);
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.nodes_to_move.is_some() {
|
||||||
|
self.create_share_move_session()?;
|
||||||
|
return self.share_move_session.as_ref()
|
||||||
|
.expect("either create_share_move_session fails, or session is created; qed")
|
||||||
|
.initialize(None, None, None);
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.nodes_to_remove.is_some() {
|
||||||
|
self.create_share_remove_session()?;
|
||||||
|
return self.share_remove_session.as_ref()
|
||||||
|
.expect("either create_share_remove_session fails, or session is created; qed")
|
||||||
|
.initialize(None, None, None);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.is_finished = true;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShareChangeTransport {
|
||||||
|
pub fn new(session_id: SessionId, nonce: u64, cluster: Arc<Cluster>) -> Self {
|
||||||
|
ShareChangeTransport {
|
||||||
|
session_id: session_id,
|
||||||
|
nonce: nonce,
|
||||||
|
cluster: cluster,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl JobTransport for ShareChangeTransport {
|
||||||
|
type PartialJobRequest = ServersSetChangeAccessRequest;
|
||||||
|
type PartialJobResponse = bool;
|
||||||
|
|
||||||
|
fn send_partial_request(&self, _node: &NodeId, _request: ServersSetChangeAccessRequest) -> Result<(), Error> {
|
||||||
|
unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn send_partial_response(&self, _node: &NodeId, _response: bool) -> Result<(), Error> {
|
||||||
|
unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShareAddSessionTransport for ShareChangeTransport {
|
||||||
|
fn set_id_numbers(&mut self, _id_numbers: BTreeMap<NodeId, Secret>) {
|
||||||
|
unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn send(&self, node: &NodeId, message: ShareAddMessage) -> Result<(), Error> {
|
||||||
|
self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(ServersSetChangeShareAddMessage {
|
||||||
|
session: self.session_id.clone().into(),
|
||||||
|
session_nonce: self.nonce,
|
||||||
|
message: message,
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShareMoveSessionTransport for ShareChangeTransport {
|
||||||
|
fn set_shares_to_move_reversed(&mut self, _shares_to_move: BTreeMap<NodeId, NodeId>) {
|
||||||
|
unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn send(&self, node: &NodeId, message: ShareMoveMessage) -> Result<(), Error> {
|
||||||
|
self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ServersSetChangeShareMoveMessage {
|
||||||
|
session: self.session_id.clone().into(),
|
||||||
|
session_nonce: self.nonce,
|
||||||
|
message: message,
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShareRemoveSessionTransport for ShareChangeTransport {
|
||||||
|
fn send(&self, node: &NodeId, message: ShareRemoveMessage) -> Result<(), Error> {
|
||||||
|
self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ServersSetChangeShareRemoveMessage {
|
||||||
|
session: self.session_id.clone().into(),
|
||||||
|
session_nonce: self.nonce,
|
||||||
|
message: message,
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prepare share change plan for moving from old `session_nodes` to `new_nodes_set`.
|
||||||
|
pub fn prepare_share_change_session_plan(session_nodes: &BTreeSet<NodeId>, new_nodes_set: &BTreeSet<NodeId>) -> Result<ShareChangeSessionPlan, Error> {
|
||||||
|
let mut nodes_to_add: BTreeSet<_> = new_nodes_set.difference(&session_nodes).cloned().collect();
|
||||||
|
let mut nodes_to_move = BTreeMap::new();
|
||||||
|
let mut nodes_to_remove: BTreeSet<_> = session_nodes.difference(&new_nodes_set).cloned().collect();
|
||||||
|
while !nodes_to_remove.is_empty() && !nodes_to_add.is_empty() {
|
||||||
|
let source_node = nodes_to_remove.iter().cloned().nth(0).expect("nodes_to_remove.is_empty is checked in while condition; qed");
|
||||||
|
let target_node = nodes_to_add.iter().cloned().nth(0).expect("nodes_to_add.is_empty is checked in while condition; qed");
|
||||||
|
nodes_to_remove.remove(&source_node);
|
||||||
|
nodes_to_add.remove(&target_node);
|
||||||
|
nodes_to_move.insert(target_node, source_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(ShareChangeSessionPlan {
|
||||||
|
nodes_to_add: nodes_to_add.into_iter()
|
||||||
|
.map(|n| math::generate_random_scalar().map(|s| (n, s)))
|
||||||
|
.collect::<Result<BTreeMap<_, _>, _>>()?,
|
||||||
|
nodes_to_move: nodes_to_move,
|
||||||
|
nodes_to_remove: nodes_to_remove,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShareChangeSessionPlan {
|
||||||
|
/// Is empty (nothing-to-do) plan?
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.nodes_to_add.is_empty()
|
||||||
|
&& self.nodes_to_move.is_empty()
|
||||||
|
&& self.nodes_to_remove.is_empty()
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,829 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::collections::{BTreeMap, BTreeSet};
|
||||||
|
use parking_lot::{Mutex, Condvar};
|
||||||
|
use ethkey::{Public, Secret, Signature};
|
||||||
|
use key_server_cluster::{Error, NodeId, SessionId, DocumentKeyShare, KeyStorage};
|
||||||
|
use key_server_cluster::cluster::Cluster;
|
||||||
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||||
|
use key_server_cluster::message::{Message, ShareMoveMessage, ShareMoveConsensusMessage,
|
||||||
|
ShareMoveRequest, ShareMove, ShareMoveConfirm, ShareMoveError, ConsensusMessageWithServersMap,
|
||||||
|
InitializeConsensusSessionWithServersMap, ConfirmConsensusInitialization};
|
||||||
|
use key_server_cluster::jobs::job_session::JobTransport;
|
||||||
|
use key_server_cluster::jobs::dummy_job::{DummyJob, DummyJobTransport};
|
||||||
|
use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest};
|
||||||
|
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||||
|
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||||
|
|
||||||
|
/// Share move session API.
|
||||||
|
pub trait Session: Send + Sync + 'static {
|
||||||
|
/// Wait until session is completed.
|
||||||
|
fn wait(&self) -> Result<(), Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share move session transport.
|
||||||
|
pub trait SessionTransport: Clone + JobTransport<PartialJobRequest=ServersSetChangeAccessRequest, PartialJobResponse=bool> {
|
||||||
|
/// Send message to given node.
|
||||||
|
fn send(&self, node: &NodeId, message: ShareMoveMessage) -> Result<(), Error>;
|
||||||
|
/// Set share destinations.
|
||||||
|
fn set_shares_to_move_reversed(&mut self, shares_to_move_reversed: BTreeMap<NodeId, NodeId>);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share move session.
|
||||||
|
pub struct SessionImpl<T: SessionTransport> {
|
||||||
|
/// Session core.
|
||||||
|
core: SessionCore<T>,
|
||||||
|
/// Session data.
|
||||||
|
data: Mutex<SessionData<T>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Immutable session data.
|
||||||
|
struct SessionCore<T: SessionTransport> {
|
||||||
|
/// Session metadata.
|
||||||
|
pub meta: ShareChangeSessionMeta,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub nonce: u64,
|
||||||
|
/// Original key share (for old nodes only).
|
||||||
|
pub key_share: Option<DocumentKeyShare>,
|
||||||
|
/// Session transport to communicate to other cluster nodes.
|
||||||
|
pub transport: T,
|
||||||
|
/// Key storage.
|
||||||
|
pub key_storage: Arc<KeyStorage>,
|
||||||
|
/// Administrator public key.
|
||||||
|
pub admin_public: Option<Public>,
|
||||||
|
/// SessionImpl completion condvar.
|
||||||
|
pub completed: Condvar,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share move consensus session type.
|
||||||
|
type ShareMoveChangeConsensusSession<T> = ConsensusSession<ServersSetChangeAccessJob, T, DummyJob, DummyJobTransport>;
|
||||||
|
|
||||||
|
/// Mutable session data.
|
||||||
|
struct SessionData<T: SessionTransport> {
|
||||||
|
/// Session state.
|
||||||
|
pub state: SessionState,
|
||||||
|
/// Consensus session.
|
||||||
|
pub consensus_session: Option<ShareMoveChangeConsensusSession<T>>,
|
||||||
|
/// Shares to move. Keys = new nodes, Values = old nodes.
|
||||||
|
pub shares_to_move_reversed: Option<BTreeMap<NodeId, NodeId>>,
|
||||||
|
/// Reversed shares to move. Keys = old nodes, Values = new nodes.
|
||||||
|
pub shares_to_move: Option<BTreeMap<NodeId, NodeId>>,
|
||||||
|
/// Move confirmations to receive.
|
||||||
|
pub move_confirmations_to_receive: Option<BTreeSet<NodeId>>,
|
||||||
|
/// Received key share (filled on destination nodes only).
|
||||||
|
pub received_key_share: Option<DocumentKeyShare>,
|
||||||
|
/// Share move change result.
|
||||||
|
pub result: Option<Result<(), Error>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// SessionImpl creation parameters
|
||||||
|
pub struct SessionParams<T: SessionTransport> {
|
||||||
|
/// Session meta.
|
||||||
|
pub meta: ShareChangeSessionMeta,
|
||||||
|
/// Session nonce.
|
||||||
|
pub nonce: u64,
|
||||||
|
/// Session transport to communicate to other cluster nodes.
|
||||||
|
pub transport: T,
|
||||||
|
/// Key storage.
|
||||||
|
pub key_storage: Arc<KeyStorage>,
|
||||||
|
/// Administrator public key.
|
||||||
|
pub admin_public: Option<Public>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share move session state.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
enum SessionState {
|
||||||
|
/// State when consensus is establishing.
|
||||||
|
ConsensusEstablishing,
|
||||||
|
/// Waiting for move confirmation.
|
||||||
|
WaitingForMoveConfirmation,
|
||||||
|
/// Session is completed.
|
||||||
|
Finished,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Isolated ShareMove session transport.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct IsolatedSessionTransport {
|
||||||
|
/// Key id.
|
||||||
|
session: SessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
nonce: u64,
|
||||||
|
/// Shares to move between. Keys = new nodes, values = old nodes.
|
||||||
|
shares_to_move_reversed: Option<BTreeMap<NodeId, NodeId>>,
|
||||||
|
/// Cluster.
|
||||||
|
cluster: Arc<Cluster>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> SessionImpl<T> where T: SessionTransport {
|
||||||
|
/// Create new share move session.
|
||||||
|
pub fn new(params: SessionParams<T>) -> Result<Self, Error> {
|
||||||
|
Ok(SessionImpl {
|
||||||
|
core: SessionCore {
|
||||||
|
meta: params.meta.clone(),
|
||||||
|
nonce: params.nonce,
|
||||||
|
key_share: params.key_storage.get(¶ms.meta.id).ok(), // ignore error, it will be checked later
|
||||||
|
transport: params.transport,
|
||||||
|
key_storage: params.key_storage,
|
||||||
|
admin_public: params.admin_public,
|
||||||
|
completed: Condvar::new(),
|
||||||
|
},
|
||||||
|
data: Mutex::new(SessionData {
|
||||||
|
state: SessionState::ConsensusEstablishing,
|
||||||
|
consensus_session: None,
|
||||||
|
shares_to_move_reversed: None,
|
||||||
|
shares_to_move: None,
|
||||||
|
move_confirmations_to_receive: None,
|
||||||
|
received_key_share: None,
|
||||||
|
result: None,
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set pre-established consensus data.
|
||||||
|
pub fn set_consensus_output(&self, shares_to_move_reversed: BTreeMap<NodeId, NodeId>) -> Result<(), Error> {
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
|
// check state
|
||||||
|
if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
let old_id_numbers = self.core.key_share.as_ref().map(|ks| &ks.id_numbers);
|
||||||
|
check_shares_to_move(&self.core.meta.self_node_id, &shares_to_move_reversed, old_id_numbers)?;
|
||||||
|
|
||||||
|
data.move_confirmations_to_receive = Some(shares_to_move_reversed.keys().cloned().collect());
|
||||||
|
data.shares_to_move = Some(shares_to_move_reversed.iter().map(|(k, v)| (v.clone(), k.clone())).collect());
|
||||||
|
data.shares_to_move_reversed = Some(shares_to_move_reversed);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize share add session on master node.
|
||||||
|
pub fn initialize(&self, shares_to_move_reversed: Option<BTreeMap<NodeId, NodeId>>, old_set_signature: Option<Signature>, new_set_signature: Option<Signature>) -> Result<(), Error> {
|
||||||
|
debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id);
|
||||||
|
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
|
// check state
|
||||||
|
if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
// if consensus is not yet established => start consensus session
|
||||||
|
let is_consensus_pre_established = data.shares_to_move.is_some();
|
||||||
|
if !is_consensus_pre_established {
|
||||||
|
let shares_to_move_reversed = shares_to_move_reversed.ok_or(Error::InvalidMessage)?;
|
||||||
|
let key_share = self.core.key_share.as_ref().ok_or(Error::KeyStorage("key share is not found on master node".into()))?;
|
||||||
|
check_shares_to_move(&self.core.meta.self_node_id, &shares_to_move_reversed, Some(&key_share.id_numbers))?;
|
||||||
|
|
||||||
|
let old_set_signature = old_set_signature.ok_or(Error::InvalidMessage)?;
|
||||||
|
let new_set_signature = new_set_signature.ok_or(Error::InvalidMessage)?;
|
||||||
|
let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?;
|
||||||
|
let old_nodes_set: BTreeSet<_> = key_share.id_numbers.keys().cloned().collect();
|
||||||
|
let mut all_nodes_set = old_nodes_set.clone();
|
||||||
|
let mut new_nodes_set = all_nodes_set.clone();
|
||||||
|
for (target, source) in &shares_to_move_reversed {
|
||||||
|
new_nodes_set.remove(source);
|
||||||
|
new_nodes_set.insert(target.clone());
|
||||||
|
all_nodes_set.insert(target.clone());
|
||||||
|
}
|
||||||
|
let mut consensus_transport = self.core.transport.clone();
|
||||||
|
consensus_transport.set_shares_to_move_reversed(shares_to_move_reversed.clone());
|
||||||
|
|
||||||
|
let mut consensus_session = ConsensusSession::new(ConsensusSessionParams {
|
||||||
|
meta: self.core.meta.clone().into_consensus_meta(all_nodes_set.len()),
|
||||||
|
consensus_executor: ServersSetChangeAccessJob::new_on_master(admin_public,
|
||||||
|
old_nodes_set.clone(),
|
||||||
|
old_nodes_set.clone(),
|
||||||
|
new_nodes_set,
|
||||||
|
old_set_signature,
|
||||||
|
new_set_signature),
|
||||||
|
consensus_transport: consensus_transport,
|
||||||
|
})?;
|
||||||
|
consensus_session.initialize(all_nodes_set)?;
|
||||||
|
data.consensus_session = Some(consensus_session);
|
||||||
|
data.move_confirmations_to_receive = Some(shares_to_move_reversed.keys().cloned().collect());
|
||||||
|
data.shares_to_move = Some(shares_to_move_reversed.iter().map(|(k, v)| (v.clone(), k.clone())).collect());
|
||||||
|
data.shares_to_move_reversed = Some(shares_to_move_reversed);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// otherwise => start sending ShareMove-specific messages
|
||||||
|
Self::on_consensus_established(&self.core, &mut *data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process single message.
|
||||||
|
pub fn process_message(&self, sender: &NodeId, message: &ShareMoveMessage) -> Result<(), Error> {
|
||||||
|
if self.core.nonce != message.session_nonce() {
|
||||||
|
return Err(Error::ReplayProtection);
|
||||||
|
}
|
||||||
|
|
||||||
|
match message {
|
||||||
|
&ShareMoveMessage::ShareMoveConsensusMessage(ref message) =>
|
||||||
|
self.on_consensus_message(sender, message),
|
||||||
|
&ShareMoveMessage::ShareMoveRequest(ref message) =>
|
||||||
|
self.on_share_move_request(sender, message),
|
||||||
|
&ShareMoveMessage::ShareMove(ref message) =>
|
||||||
|
self.on_share_move(sender, message),
|
||||||
|
&ShareMoveMessage::ShareMoveConfirm(ref message) =>
|
||||||
|
self.on_share_move_confirmation(sender, message),
|
||||||
|
&ShareMoveMessage::ShareMoveError(ref message) =>
|
||||||
|
self.on_session_error(sender, message),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When consensus-related message is received.
|
||||||
|
pub fn on_consensus_message(&self, sender: &NodeId, message: &ShareMoveConsensusMessage) -> Result<(), Error> {
|
||||||
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
|
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
// start slave consensus session if needed
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if data.consensus_session.is_none() && sender == &self.core.meta.master_node_id {
|
||||||
|
match &message.message {
|
||||||
|
&ConsensusMessageWithServersMap::InitializeConsensusSession(ref message) => {
|
||||||
|
let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?;
|
||||||
|
let current_nodes_set = self.core.key_share.as_ref()
|
||||||
|
.map(|ks| ks.id_numbers.keys().cloned().collect())
|
||||||
|
.unwrap_or_else(|| message.old_nodes_set.clone().into_iter().map(Into::into).collect());
|
||||||
|
let all_nodes_set_len = message.new_nodes_set.keys().chain(message.old_nodes_set.iter()).collect::<BTreeSet<_>>().len();
|
||||||
|
data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams {
|
||||||
|
meta: self.core.meta.clone().into_consensus_meta(all_nodes_set_len),
|
||||||
|
consensus_executor: ServersSetChangeAccessJob::new_on_slave(admin_public, current_nodes_set),
|
||||||
|
consensus_transport: self.core.transport.clone(),
|
||||||
|
})?);
|
||||||
|
},
|
||||||
|
_ => return Err(Error::InvalidStateForRequest),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let (is_establishing_consensus, is_consensus_established, shares_to_move_reversed) = {
|
||||||
|
let consensus_session = data.consensus_session.as_mut().ok_or(Error::InvalidMessage)?;
|
||||||
|
let is_establishing_consensus = consensus_session.state() == ConsensusSessionState::EstablishingConsensus;
|
||||||
|
let shares_to_move_reversed = match &message.message {
|
||||||
|
&ConsensusMessageWithServersMap::InitializeConsensusSession(ref message) => {
|
||||||
|
consensus_session.on_consensus_partial_request(sender, ServersSetChangeAccessRequest::from(message))?;
|
||||||
|
let shares_to_move_reversed = message.new_nodes_set.iter()
|
||||||
|
.filter(|&(old, new)| old != new)
|
||||||
|
.map(|(old, new)| (old.clone().into(), new.clone().into()))
|
||||||
|
.collect::<BTreeMap<NodeId, NodeId>>();
|
||||||
|
check_shares_to_move(&self.core.meta.self_node_id, &shares_to_move_reversed, self.core.key_share.as_ref().map(|ks| &ks.id_numbers))?;
|
||||||
|
Some(shares_to_move_reversed)
|
||||||
|
},
|
||||||
|
&ConsensusMessageWithServersMap::ConfirmConsensusInitialization(ref message) => {
|
||||||
|
consensus_session.on_consensus_partial_response(sender, message.is_confirmed)?;
|
||||||
|
None
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
(
|
||||||
|
is_establishing_consensus,
|
||||||
|
consensus_session.state() == ConsensusSessionState::ConsensusEstablished,
|
||||||
|
shares_to_move_reversed
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(shares_to_move_reversed) = shares_to_move_reversed {
|
||||||
|
data.move_confirmations_to_receive = Some(shares_to_move_reversed.keys().cloned().collect());
|
||||||
|
data.shares_to_move = Some(shares_to_move_reversed.iter().map(|(k, v)| (v.clone(), k.clone())).collect());
|
||||||
|
data.shares_to_move_reversed = Some(shares_to_move_reversed);
|
||||||
|
}
|
||||||
|
if self.core.meta.self_node_id != self.core.meta.master_node_id || !is_establishing_consensus || !is_consensus_established {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
Self::on_consensus_established(&self.core, &mut *data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When share move request is received.
|
||||||
|
pub fn on_share_move_request(&self, sender: &NodeId, message: &ShareMoveRequest) -> Result<(), Error> {
|
||||||
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
|
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
// awaiting this message from master node only
|
||||||
|
if sender != &self.core.meta.master_node_id {
|
||||||
|
return Err(Error::InvalidMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
// check state
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if data.state == SessionState::ConsensusEstablishing && data.shares_to_move.is_some() {
|
||||||
|
data.state = SessionState::WaitingForMoveConfirmation;
|
||||||
|
} else if data.state != SessionState::WaitingForMoveConfirmation {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
// move share
|
||||||
|
{
|
||||||
|
let shares_to_move = data.shares_to_move.as_ref()
|
||||||
|
.expect("shares_to_move are filled during consensus establishing; share move requests are processed after this; qed");
|
||||||
|
if let Some(share_destination) = shares_to_move.get(&self.core.meta.self_node_id) {
|
||||||
|
Self::move_share(&self.core, share_destination)?;
|
||||||
|
} else {
|
||||||
|
return Err(Error::InvalidMessage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// and complete session
|
||||||
|
Self::complete_session(&self.core, &mut *data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When moving share is received.
|
||||||
|
pub fn on_share_move(&self, sender: &NodeId, message: &ShareMove) -> Result<(), Error> {
|
||||||
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
|
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
// check state
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if data.state == SessionState::ConsensusEstablishing && data.shares_to_move.is_some() {
|
||||||
|
data.state = SessionState::WaitingForMoveConfirmation;
|
||||||
|
} else if data.state != SessionState::WaitingForMoveConfirmation {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that we are expecting this share
|
||||||
|
if data.shares_to_move_reversed.as_ref()
|
||||||
|
.expect("shares_to_move are filled during consensus establishing; share moves are processed after this; qed")
|
||||||
|
.get(&self.core.meta.self_node_id) != Some(sender) {
|
||||||
|
return Err(Error::InvalidMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
// update state
|
||||||
|
let is_last_confirmation = {
|
||||||
|
let move_confirmations_to_receive = data.move_confirmations_to_receive.as_mut()
|
||||||
|
.expect("move_confirmations_to_receive are filled during consensus establishing; share moves are processed after this; qed");
|
||||||
|
move_confirmations_to_receive.remove(&self.core.meta.self_node_id);
|
||||||
|
move_confirmations_to_receive.is_empty()
|
||||||
|
};
|
||||||
|
data.received_key_share = Some(DocumentKeyShare {
|
||||||
|
author: message.author.clone().into(),
|
||||||
|
threshold: message.threshold,
|
||||||
|
id_numbers: message.id_numbers.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(),
|
||||||
|
polynom1: message.polynom1.iter().cloned().map(Into::into).collect(),
|
||||||
|
secret_share: message.secret_share.clone().into(),
|
||||||
|
common_point: message.common_point.clone().map(Into::into),
|
||||||
|
encrypted_point: message.encrypted_point.clone().map(Into::into),
|
||||||
|
});
|
||||||
|
|
||||||
|
// send confirmation to all other nodes
|
||||||
|
{
|
||||||
|
let shares_to_move = data.shares_to_move.as_ref()
|
||||||
|
.expect("shares_to_move are filled during consensus establishing; share moves are processed after this; qed");
|
||||||
|
let new_nodes_set: BTreeSet<_> = shares_to_move.values().cloned()
|
||||||
|
.chain(message.id_numbers.keys().filter(|n| !shares_to_move.contains_key(n)).cloned().map(Into::into))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
for node in new_nodes_set.into_iter().filter(|n| n != &self.core.meta.self_node_id) {
|
||||||
|
self.core.transport.send(&node, ShareMoveMessage::ShareMoveConfirm(ShareMoveConfirm {
|
||||||
|
session: self.core.meta.id.clone().into(),
|
||||||
|
session_nonce: self.core.nonce,
|
||||||
|
}))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// complete session if this was last share
|
||||||
|
if is_last_confirmation {
|
||||||
|
Self::complete_session(&self.core, &mut *data)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When share is received from destination node.
|
||||||
|
pub fn on_share_move_confirmation(&self, sender: &NodeId, message: &ShareMoveConfirm) -> Result<(), Error> {
|
||||||
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
|
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
// check state
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if data.state == SessionState::ConsensusEstablishing && data.shares_to_move.is_some() {
|
||||||
|
data.state = SessionState::WaitingForMoveConfirmation;
|
||||||
|
} else if data.state != SessionState::WaitingForMoveConfirmation {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
// find share source
|
||||||
|
{
|
||||||
|
let mut move_confirmations_to_receive = data.move_confirmations_to_receive.as_mut()
|
||||||
|
.expect("move_confirmations_to_receive are filled during consensus establishing; move confirmations are processed after this; qed");
|
||||||
|
if !move_confirmations_to_receive.remove(sender) {
|
||||||
|
return Err(Error::InvalidMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
if !move_confirmations_to_receive.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Self::complete_session(&self.core, &mut *data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When error has occured on another node.
|
||||||
|
pub fn on_session_error(&self, sender: &NodeId, message: &ShareMoveError) -> Result<(), Error> {
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
|
warn!("{}: share move session failed with error: {} from {}", self.core.meta.self_node_id, message.error, sender);
|
||||||
|
|
||||||
|
data.state = SessionState::Finished;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Start sending ShareMove-specific messages, when consensus is established.
|
||||||
|
fn on_consensus_established(core: &SessionCore<T>, data: &mut SessionData<T>) -> Result<(), Error> {
|
||||||
|
// update state
|
||||||
|
data.state = SessionState::WaitingForMoveConfirmation;
|
||||||
|
|
||||||
|
// send share move requests to every required node
|
||||||
|
Self::disseminate_share_move_requests(core, data)?;
|
||||||
|
|
||||||
|
{
|
||||||
|
let shares_to_move = data.shares_to_move.as_ref()
|
||||||
|
.expect("shares_to_move are filled during consensus establishing; this method is called after consensus established; qed");
|
||||||
|
if let Some(share_destination) = shares_to_move.get(&core.meta.self_node_id) {
|
||||||
|
// move share
|
||||||
|
Self::move_share(core, share_destination)?;
|
||||||
|
} else {
|
||||||
|
// remember move confirmations to receive
|
||||||
|
data.move_confirmations_to_receive = Some(shares_to_move.values().cloned().collect());
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// complete session if share is lost
|
||||||
|
Self::complete_session(core, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Disseminate share move requests.
|
||||||
|
fn disseminate_share_move_requests(core: &SessionCore<T>, data: &mut SessionData<T>) -> Result<(), Error> {
|
||||||
|
let shares_to_move = data.shares_to_move.as_ref()
|
||||||
|
.expect("shares_to_move are filled during consensus establishing; this method is called after consensus established; qed");
|
||||||
|
for share_source in shares_to_move.keys().filter(|n| **n != core.meta.self_node_id) {
|
||||||
|
core.transport.send(share_source, ShareMoveMessage::ShareMoveRequest(ShareMoveRequest {
|
||||||
|
session: core.meta.id.clone().into(),
|
||||||
|
session_nonce: core.nonce,
|
||||||
|
}))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send share move message.
|
||||||
|
fn move_share(core: &SessionCore<T>, share_destination: &NodeId) -> Result<(), Error> {
|
||||||
|
let key_share = core.key_share.as_ref()
|
||||||
|
.expect("move_share is called on nodes from shares_to_move.values(); all 'values' nodes have shares; qed");
|
||||||
|
core.transport.send(share_destination, ShareMoveMessage::ShareMove(ShareMove {
|
||||||
|
session: core.meta.id.clone().into(),
|
||||||
|
session_nonce: core.nonce,
|
||||||
|
author: key_share.author.clone().into(),
|
||||||
|
threshold: key_share.threshold,
|
||||||
|
id_numbers: key_share.id_numbers.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(),
|
||||||
|
polynom1: key_share.polynom1.iter().cloned().map(Into::into).collect(),
|
||||||
|
secret_share: key_share.secret_share.clone().into(),
|
||||||
|
common_point: key_share.common_point.clone().map(Into::into),
|
||||||
|
encrypted_point: key_share.encrypted_point.clone().map(Into::into),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Complete session on this node.
|
||||||
|
fn complete_session(core: &SessionCore<T>, data: &mut SessionData<T>) -> Result<(), Error> {
|
||||||
|
// update state
|
||||||
|
data.state = SessionState::Finished;
|
||||||
|
|
||||||
|
// if we are source node => remove share from storage
|
||||||
|
let shares_to_move = data.shares_to_move.as_ref()
|
||||||
|
.expect("shares_to_move are filled during consensus establishing; this method is called after consensus established; qed");
|
||||||
|
if shares_to_move.contains_key(&core.meta.self_node_id) {
|
||||||
|
return core.key_storage.remove(&core.meta.id)
|
||||||
|
.map_err(|e| Error::KeyStorage(e.into()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// else we need to update key_share.id_numbers.keys()
|
||||||
|
let is_old_node = data.received_key_share.is_none();
|
||||||
|
let mut key_share = data.received_key_share.take()
|
||||||
|
.unwrap_or_else(|| core.key_share.as_ref()
|
||||||
|
.expect("on target nodes received_key_share is non-empty; on old nodes key_share is not empty; qed")
|
||||||
|
.clone());
|
||||||
|
for (source_node, target_node) in shares_to_move {
|
||||||
|
let id_number = key_share.id_numbers.remove(source_node)
|
||||||
|
.expect("source_node is old node; there's entry in id_numbers for each old node; qed");
|
||||||
|
key_share.id_numbers.insert(target_node.clone(), id_number);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ... and update key share in storage
|
||||||
|
if is_old_node {
|
||||||
|
core.key_storage.update(core.meta.id.clone(), key_share)
|
||||||
|
} else {
|
||||||
|
core.key_storage.insert(core.meta.id.clone(), key_share)
|
||||||
|
}.map_err(|e| Error::KeyStorage(e.into()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Session for SessionImpl<T> where T: SessionTransport + Send + Sync + 'static {
|
||||||
|
fn wait(&self) -> Result<(), Error> {
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if !data.result.is_some() {
|
||||||
|
self.core.completed.wait(&mut data);
|
||||||
|
}
|
||||||
|
|
||||||
|
data.result.clone()
|
||||||
|
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> ClusterSession for SessionImpl<T> where T: SessionTransport {
|
||||||
|
fn is_finished(&self) -> bool {
|
||||||
|
self.data.lock().state == SessionState::Finished
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_session_timeout(&self) {
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
|
warn!("{}: share move session failed with timeout", self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
data.state = SessionState::Finished;
|
||||||
|
data.result = Some(Err(Error::NodeDisconnected));
|
||||||
|
self.core.completed.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_node_timeout(&self, node: &NodeId) {
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
|
warn!("{}: share move session failed because {} connection has timeouted", self.core.meta.self_node_id, node);
|
||||||
|
|
||||||
|
data.state = SessionState::Finished;
|
||||||
|
data.result = Some(Err(Error::NodeDisconnected));
|
||||||
|
self.core.completed.notify_all();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IsolatedSessionTransport {
|
||||||
|
pub fn new(session_id: SessionId, nonce: u64, cluster: Arc<Cluster>) -> Self {
|
||||||
|
IsolatedSessionTransport {
|
||||||
|
session: session_id,
|
||||||
|
nonce: nonce,
|
||||||
|
cluster: cluster,
|
||||||
|
shares_to_move_reversed: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl JobTransport for IsolatedSessionTransport {
|
||||||
|
type PartialJobRequest = ServersSetChangeAccessRequest;
|
||||||
|
type PartialJobResponse = bool;
|
||||||
|
|
||||||
|
fn send_partial_request(&self, node: &NodeId, request: ServersSetChangeAccessRequest) -> Result<(), Error> {
|
||||||
|
let shares_to_move_reversed = self.shares_to_move_reversed.as_ref()
|
||||||
|
.expect("partial requests are sent from master node only; on master node shares_to_move_reversed are filled during creation; qed");
|
||||||
|
self.cluster.send(node, Message::ShareMove(ShareMoveMessage::ShareMoveConsensusMessage(ShareMoveConsensusMessage {
|
||||||
|
session: self.session.clone().into(),
|
||||||
|
session_nonce: self.nonce,
|
||||||
|
message: ConsensusMessageWithServersMap::InitializeConsensusSession(InitializeConsensusSessionWithServersMap {
|
||||||
|
old_nodes_set: request.old_servers_set.into_iter().map(Into::into).collect(),
|
||||||
|
new_nodes_set: request.new_servers_set.into_iter().map(|n| (n.into(),
|
||||||
|
shares_to_move_reversed.get(&n).cloned().unwrap_or_else(|| n.clone()).into())).collect(),
|
||||||
|
old_set_signature: request.old_set_signature.into(),
|
||||||
|
new_set_signature: request.new_set_signature.into(),
|
||||||
|
}),
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> {
|
||||||
|
self.cluster.send(node, Message::ShareMove(ShareMoveMessage::ShareMoveConsensusMessage(ShareMoveConsensusMessage {
|
||||||
|
session: self.session.clone().into(),
|
||||||
|
session_nonce: self.nonce,
|
||||||
|
message: ConsensusMessageWithServersMap::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||||
|
is_confirmed: response,
|
||||||
|
}),
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SessionTransport for IsolatedSessionTransport {
|
||||||
|
fn set_shares_to_move_reversed(&mut self, shares_to_move_reversed: BTreeMap<NodeId, NodeId>) {
|
||||||
|
self.shares_to_move_reversed = Some(shares_to_move_reversed);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn send(&self, node: &NodeId, message: ShareMoveMessage) -> Result<(), Error> {
|
||||||
|
self.cluster.send(node, Message::ShareMove(message))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_shares_to_move(self_node_id: &NodeId, shares_to_move_reversed: &BTreeMap<NodeId, NodeId>, id_numbers: Option<&BTreeMap<NodeId, Secret>>) -> Result<(), Error> {
|
||||||
|
// shares to move must not be empty
|
||||||
|
if shares_to_move_reversed.is_empty() {
|
||||||
|
return Err(Error::InvalidMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(id_numbers) = id_numbers {
|
||||||
|
// all values in share_to_move_reversed must be old nodes of the session
|
||||||
|
if shares_to_move_reversed.values().any(|n| !id_numbers.contains_key(n)) {
|
||||||
|
return Err(Error::InvalidNodesConfiguration);
|
||||||
|
}
|
||||||
|
// all keys in share_to_move_reversed must be new nodes for the session
|
||||||
|
if shares_to_move_reversed.keys().any(|n| id_numbers.contains_key(n)) {
|
||||||
|
return Err(Error::InvalidNodesConfiguration);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// this node must NOT in values of share_to_move_reversed
|
||||||
|
if shares_to_move_reversed.values().any(|n| n == self_node_id) {
|
||||||
|
return Err(Error::InvalidMessage);
|
||||||
|
}
|
||||||
|
// this node must be in keys of share_to_move_reversed
|
||||||
|
if !shares_to_move_reversed.contains_key(self_node_id) {
|
||||||
|
return Err(Error::InvalidMessage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// all values of the shares_to_move must be distinct
|
||||||
|
if shares_to_move_reversed.values().collect::<BTreeSet<_>>().len() != shares_to_move_reversed.len() {
|
||||||
|
return Err(Error::InvalidNodesConfiguration);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::collections::{VecDeque, BTreeMap, BTreeSet};
|
||||||
|
use ethkey::{Random, Generator, Public, Signature, KeyPair, sign};
|
||||||
|
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage};
|
||||||
|
use key_server_cluster::cluster::Cluster;
|
||||||
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||||
|
use key_server_cluster::cluster::tests::DummyCluster;
|
||||||
|
use key_server_cluster::generation_session::tests::{Node as GenerationNode, generate_nodes_ids};
|
||||||
|
use key_server_cluster::math;
|
||||||
|
use key_server_cluster::message::Message;
|
||||||
|
use key_server_cluster::servers_set_change_session::tests::generate_key;
|
||||||
|
use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash;
|
||||||
|
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||||
|
use key_server_cluster::admin_sessions::share_add_session::tests::check_secret_is_preserved;
|
||||||
|
use super::{SessionImpl, SessionParams, IsolatedSessionTransport};
|
||||||
|
|
||||||
|
struct Node {
|
||||||
|
pub cluster: Arc<DummyCluster>,
|
||||||
|
pub key_storage: Arc<DummyKeyStorage>,
|
||||||
|
pub session: SessionImpl<IsolatedSessionTransport>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct MessageLoop {
|
||||||
|
pub admin_key_pair: KeyPair,
|
||||||
|
pub original_key_pair: KeyPair,
|
||||||
|
pub old_nodes_set: BTreeSet<NodeId>,
|
||||||
|
pub new_nodes_set: BTreeSet<NodeId>,
|
||||||
|
pub old_set_signature: Signature,
|
||||||
|
pub new_set_signature: Signature,
|
||||||
|
pub nodes: BTreeMap<NodeId, Node>,
|
||||||
|
pub queue: VecDeque<(NodeId, NodeId, Message)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_session(mut meta: ShareChangeSessionMeta, admin_public: Public, self_node_id: NodeId, cluster: Arc<Cluster>, key_storage: Arc<KeyStorage>) -> SessionImpl<IsolatedSessionTransport> {
|
||||||
|
let session_id = meta.id.clone();
|
||||||
|
meta.self_node_id = self_node_id;
|
||||||
|
SessionImpl::new(SessionParams {
|
||||||
|
meta: meta.clone(),
|
||||||
|
transport: IsolatedSessionTransport::new(session_id, 1, cluster),
|
||||||
|
key_storage: key_storage,
|
||||||
|
admin_public: Some(admin_public),
|
||||||
|
nonce: 1,
|
||||||
|
}).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_node(meta: ShareChangeSessionMeta, admin_public: Public, node: GenerationNode) -> Node {
|
||||||
|
Node {
|
||||||
|
cluster: node.cluster.clone(),
|
||||||
|
key_storage: node.key_storage.clone(),
|
||||||
|
session: create_session(meta, admin_public, node.session.node().clone(), node.cluster, node.key_storage),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MessageLoop {
|
||||||
|
pub fn new(t: usize, master_node_id: NodeId, old_nodes_set: BTreeSet<NodeId>, shares_to_move: BTreeMap<NodeId, NodeId>) -> Self {
|
||||||
|
// generate admin key pair
|
||||||
|
let admin_key_pair = Random.generate().unwrap();
|
||||||
|
let admin_public = admin_key_pair.public().clone();
|
||||||
|
|
||||||
|
// run initial generation session
|
||||||
|
let gml = generate_key(t, old_nodes_set.clone());
|
||||||
|
let original_secret = math::compute_joint_secret(gml.nodes.values()
|
||||||
|
.map(|nd| nd.key_storage.get(&SessionId::default()).unwrap().polynom1[0].clone())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.iter()).unwrap();
|
||||||
|
let original_key_pair = KeyPair::from_secret(original_secret).unwrap();
|
||||||
|
|
||||||
|
// prepare sessions on all nodes
|
||||||
|
let meta = ShareChangeSessionMeta {
|
||||||
|
id: SessionId::default(),
|
||||||
|
self_node_id: NodeId::default(),
|
||||||
|
master_node_id: master_node_id,
|
||||||
|
};
|
||||||
|
let new_nodes_set: BTreeSet<_> = old_nodes_set.iter()
|
||||||
|
.filter(|n| !shares_to_move.values().any(|n2| *n == n2))
|
||||||
|
.cloned()
|
||||||
|
.chain(shares_to_move.keys().cloned())
|
||||||
|
.collect();
|
||||||
|
let new_nodes = new_nodes_set.iter()
|
||||||
|
.filter(|n| !old_nodes_set.contains(&n))
|
||||||
|
.map(|new_node_id| {
|
||||||
|
let new_node_cluster = Arc::new(DummyCluster::new(new_node_id.clone()));
|
||||||
|
let new_node_key_storage = Arc::new(DummyKeyStorage::default());
|
||||||
|
let new_node_session = create_session(meta.clone(), admin_public.clone(), new_node_id.clone(), new_node_cluster.clone(), new_node_key_storage.clone());
|
||||||
|
Node {
|
||||||
|
cluster: new_node_cluster,
|
||||||
|
key_storage: new_node_key_storage,
|
||||||
|
session: new_node_session,
|
||||||
|
}
|
||||||
|
});
|
||||||
|
let old_nodes = gml.nodes.into_iter().map(|gn| create_node(meta.clone(), admin_public.clone(), gn.1));
|
||||||
|
let nodes = old_nodes.chain(new_nodes).map(|n| (n.session.core.meta.self_node_id.clone(), n)).collect();
|
||||||
|
|
||||||
|
let old_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&old_nodes_set)).unwrap();
|
||||||
|
let new_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap();
|
||||||
|
MessageLoop {
|
||||||
|
admin_key_pair: admin_key_pair,
|
||||||
|
original_key_pair: original_key_pair,
|
||||||
|
old_nodes_set: old_nodes_set.clone(),
|
||||||
|
new_nodes_set: new_nodes_set.clone(),
|
||||||
|
old_set_signature: old_set_signature,
|
||||||
|
new_set_signature: new_set_signature,
|
||||||
|
nodes: nodes,
|
||||||
|
queue: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn run(&mut self) {
|
||||||
|
while let Some((from, to, message)) = self.take_message() {
|
||||||
|
self.process_message((from, to, message)).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> {
|
||||||
|
self.nodes.values()
|
||||||
|
.filter_map(|n| n.cluster.take_message().map(|m| (n.session.core.meta.self_node_id.clone(), m.0, m.1)))
|
||||||
|
.nth(0)
|
||||||
|
.or_else(|| self.queue.pop_front())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> {
|
||||||
|
match { match msg.2 {
|
||||||
|
Message::ShareMove(ref message) =>
|
||||||
|
self.nodes[&msg.1].session.process_message(&msg.0, message),
|
||||||
|
_ => unreachable!("only servers set change messages are expected"),
|
||||||
|
} } {
|
||||||
|
Ok(_) => Ok(()),
|
||||||
|
Err(Error::TooEarlyForRequest) => {
|
||||||
|
self.queue.push_back(msg);
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
Err(err) => Err(err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn nodes_moved_using_share_move_from_master_node() {
|
||||||
|
let test_cases = vec![(3, 1), (3, 3)];
|
||||||
|
for (n, nodes_to_add) in test_cases {
|
||||||
|
// generate key && prepare ShareAdd sessions
|
||||||
|
let old_nodes_set = generate_nodes_ids(n);
|
||||||
|
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
||||||
|
let nodes_to_add = generate_nodes_ids(nodes_to_add);
|
||||||
|
let mut shares_to_move = BTreeMap::new();
|
||||||
|
for (source, target) in old_nodes_set.iter().zip(nodes_to_add.iter()) {
|
||||||
|
shares_to_move.insert(target.clone(), source.clone());
|
||||||
|
}
|
||||||
|
let mut ml = MessageLoop::new(1, master_node_id.clone(), old_nodes_set, shares_to_move.clone());
|
||||||
|
|
||||||
|
// initialize session on master node && run to completion
|
||||||
|
ml.nodes[&master_node_id].session.initialize(Some(shares_to_move.clone()),
|
||||||
|
Some(ml.old_set_signature.clone()),
|
||||||
|
Some(ml.new_set_signature.clone())).unwrap();
|
||||||
|
ml.run();
|
||||||
|
|
||||||
|
// check that session has completed on all nodes
|
||||||
|
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
|
||||||
|
|
||||||
|
// check that secret is still the same as before adding the share
|
||||||
|
check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter()
|
||||||
|
.filter(|&(k, _)| !shares_to_move.values().any(|v| v == k))
|
||||||
|
.map(|(k, v)| (k.clone(), v.key_storage.clone()))
|
||||||
|
.collect());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,740 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::collections::BTreeSet;
|
||||||
|
use parking_lot::{Mutex, Condvar};
|
||||||
|
use ethkey::{Public, Signature};
|
||||||
|
use key_server_cluster::{Error, NodeId, SessionId, DocumentKeyShare, KeyStorage};
|
||||||
|
use key_server_cluster::cluster::Cluster;
|
||||||
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||||
|
use key_server_cluster::message::{Message, ShareRemoveMessage, ShareRemoveConsensusMessage, ConsensusMessageWithServersSet,
|
||||||
|
ShareRemoveRequest, ShareRemoveConfirm, ShareRemoveError, InitializeConsensusSessionWithServersSet,
|
||||||
|
ConfirmConsensusInitialization};
|
||||||
|
use key_server_cluster::jobs::job_session::JobTransport;
|
||||||
|
use key_server_cluster::jobs::dummy_job::{DummyJob, DummyJobTransport};
|
||||||
|
use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest};
|
||||||
|
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||||
|
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||||
|
|
||||||
|
/// Share remove session API.
|
||||||
|
pub trait Session: Send + Sync + 'static {
|
||||||
|
/// Wait until session is completed.
|
||||||
|
fn wait(&self) -> Result<(), Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share remove session transport.
|
||||||
|
pub trait SessionTransport: Clone + JobTransport<PartialJobRequest=ServersSetChangeAccessRequest, PartialJobResponse=bool> {
|
||||||
|
/// Send message to given node.
|
||||||
|
fn send(&self, node: &NodeId, message: ShareRemoveMessage) -> Result<(), Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share remove session.
|
||||||
|
pub struct SessionImpl<T: SessionTransport> {
|
||||||
|
/// Session core.
|
||||||
|
core: SessionCore<T>,
|
||||||
|
/// Session data.
|
||||||
|
data: Mutex<SessionData<T>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Immutable session data.
|
||||||
|
struct SessionCore<T: SessionTransport> {
|
||||||
|
/// Session metadata.
|
||||||
|
pub meta: ShareChangeSessionMeta,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub nonce: u64,
|
||||||
|
/// Original key share.
|
||||||
|
pub key_share: DocumentKeyShare,
|
||||||
|
/// Session transport to communicate to other cluster nodes.
|
||||||
|
pub transport: T,
|
||||||
|
/// Key storage.
|
||||||
|
pub key_storage: Arc<KeyStorage>,
|
||||||
|
/// Administrator public key.
|
||||||
|
pub admin_public: Option<Public>,
|
||||||
|
/// SessionImpl completion condvar.
|
||||||
|
pub completed: Condvar,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share remove consensus session type.
|
||||||
|
type ShareRemoveChangeConsensusSession<T> = ConsensusSession<ServersSetChangeAccessJob, T, DummyJob, DummyJobTransport>;
|
||||||
|
|
||||||
|
/// Mutable session data.
|
||||||
|
struct SessionData<T: SessionTransport> {
|
||||||
|
/// Session state.
|
||||||
|
pub state: SessionState,
|
||||||
|
/// Consensus session.
|
||||||
|
pub consensus_session: Option<ShareRemoveChangeConsensusSession<T>>,
|
||||||
|
/// Shares to remove.
|
||||||
|
pub shares_to_remove: Option<BTreeSet<NodeId>>,
|
||||||
|
/// Remove confirmations to receive.
|
||||||
|
pub remove_confirmations_to_receive: Option<BTreeSet<NodeId>>,
|
||||||
|
/// Share remove change result.
|
||||||
|
pub result: Option<Result<(), Error>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// SessionImpl creation parameters
|
||||||
|
pub struct SessionParams<T: SessionTransport> {
|
||||||
|
/// Session meta.
|
||||||
|
pub meta: ShareChangeSessionMeta,
|
||||||
|
/// Session nonce.
|
||||||
|
pub nonce: u64,
|
||||||
|
/// Session transport to communicate to other cluster nodes.
|
||||||
|
pub transport: T,
|
||||||
|
/// Key storage.
|
||||||
|
pub key_storage: Arc<KeyStorage>,
|
||||||
|
/// Administrator public key.
|
||||||
|
pub admin_public: Option<Public>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share move session state.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
enum SessionState {
|
||||||
|
/// State when consensus is establishing.
|
||||||
|
ConsensusEstablishing,
|
||||||
|
/// Waiting for remove confirmation.
|
||||||
|
WaitingForRemoveConfirmation,
|
||||||
|
/// Session is finished.
|
||||||
|
Finished,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Isolated ShareRemove session transport.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct IsolatedSessionTransport {
|
||||||
|
/// Key id.
|
||||||
|
session: SessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
nonce: u64,
|
||||||
|
/// Cluster.
|
||||||
|
cluster: Arc<Cluster>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> SessionImpl<T> where T: SessionTransport {
|
||||||
|
/// Create new share remove session.
|
||||||
|
pub fn new(params: SessionParams<T>) -> Result<Self, Error> {
|
||||||
|
Ok(SessionImpl {
|
||||||
|
core: SessionCore {
|
||||||
|
meta: params.meta.clone(),
|
||||||
|
nonce: params.nonce,
|
||||||
|
key_share: params.key_storage.get(¶ms.meta.id).map_err(|e| Error::KeyStorage(e.into()))?,
|
||||||
|
transport: params.transport,
|
||||||
|
key_storage: params.key_storage,
|
||||||
|
admin_public: params.admin_public,
|
||||||
|
completed: Condvar::new(),
|
||||||
|
},
|
||||||
|
data: Mutex::new(SessionData {
|
||||||
|
state: SessionState::ConsensusEstablishing,
|
||||||
|
consensus_session: None,
|
||||||
|
shares_to_remove: None,
|
||||||
|
remove_confirmations_to_receive: None,
|
||||||
|
result: None,
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set pre-established consensus data.
|
||||||
|
pub fn set_consensus_output(&self, shares_to_remove: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
|
// check state
|
||||||
|
if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
check_shares_to_remove(&self.core, &shares_to_remove)?;
|
||||||
|
|
||||||
|
data.remove_confirmations_to_receive = Some(shares_to_remove.clone());
|
||||||
|
data.shares_to_remove = Some(shares_to_remove);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize share remove session on master node.
|
||||||
|
pub fn initialize(&self, shares_to_remove: Option<BTreeSet<NodeId>>, old_set_signature: Option<Signature>, new_set_signature: Option<Signature>) -> Result<(), Error> {
|
||||||
|
debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id);
|
||||||
|
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
// check state
|
||||||
|
if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
// if consensus is not yet established => start consensus session
|
||||||
|
let is_consensus_pre_established = data.shares_to_remove.is_some();
|
||||||
|
if !is_consensus_pre_established {
|
||||||
|
// TODO: even if node was lost, it is still required for ShareRemove session to complete.
|
||||||
|
// It is wrong - if node is not in all_nodes_set, it must be excluded from consensus.
|
||||||
|
let shares_to_remove = shares_to_remove.ok_or(Error::InvalidMessage)?;
|
||||||
|
check_shares_to_remove(&self.core, &shares_to_remove)?;
|
||||||
|
|
||||||
|
let old_set_signature = old_set_signature.ok_or(Error::InvalidMessage)?;
|
||||||
|
let new_set_signature = new_set_signature.ok_or(Error::InvalidMessage)?;
|
||||||
|
let all_nodes_set: BTreeSet<_> = self.core.key_share.id_numbers.keys().cloned().collect();
|
||||||
|
let new_nodes_set: BTreeSet<_> = all_nodes_set.iter().cloned().filter(|n| !shares_to_remove.contains(&n)).collect();
|
||||||
|
let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?;
|
||||||
|
|
||||||
|
let mut consensus_session = ConsensusSession::new(ConsensusSessionParams {
|
||||||
|
meta: self.core.meta.clone().into_consensus_meta(all_nodes_set.len()),
|
||||||
|
consensus_executor: ServersSetChangeAccessJob::new_on_master(admin_public,
|
||||||
|
all_nodes_set.clone(),
|
||||||
|
all_nodes_set.clone(),
|
||||||
|
new_nodes_set,
|
||||||
|
old_set_signature,
|
||||||
|
new_set_signature),
|
||||||
|
consensus_transport: self.core.transport.clone(),
|
||||||
|
})?;
|
||||||
|
consensus_session.initialize(all_nodes_set)?;
|
||||||
|
data.consensus_session = Some(consensus_session);
|
||||||
|
data.remove_confirmations_to_receive = Some(shares_to_remove.clone());
|
||||||
|
data.shares_to_remove = Some(shares_to_remove);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// otherwise => start sending ShareRemove-specific messages
|
||||||
|
Self::on_consensus_established(&self.core, &mut *data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process single message.
|
||||||
|
pub fn process_message(&self, sender: &NodeId, message: &ShareRemoveMessage) -> Result<(), Error> {
|
||||||
|
if self.core.nonce != message.session_nonce() {
|
||||||
|
return Err(Error::ReplayProtection);
|
||||||
|
}
|
||||||
|
|
||||||
|
match message {
|
||||||
|
&ShareRemoveMessage::ShareRemoveConsensusMessage(ref message) =>
|
||||||
|
self.on_consensus_message(sender, message),
|
||||||
|
&ShareRemoveMessage::ShareRemoveRequest(ref message) =>
|
||||||
|
self.on_share_remove_request(sender, message),
|
||||||
|
&ShareRemoveMessage::ShareRemoveConfirm(ref message) =>
|
||||||
|
self.on_share_remove_confirmation(sender, message),
|
||||||
|
&ShareRemoveMessage::ShareRemoveError(ref message) =>
|
||||||
|
self.on_session_error(sender, message),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When consensus-related message is received.
|
||||||
|
pub fn on_consensus_message(&self, sender: &NodeId, message: &ShareRemoveConsensusMessage) -> Result<(), Error> {
|
||||||
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
|
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
// start slave consensus session if needed
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if data.consensus_session.is_none() && sender == &self.core.meta.master_node_id {
|
||||||
|
match &message.message {
|
||||||
|
&ConsensusMessageWithServersSet::InitializeConsensusSession(ref message) => {
|
||||||
|
let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?;
|
||||||
|
let current_nodes_set = self.core.key_share.id_numbers.keys().cloned().collect();
|
||||||
|
data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams {
|
||||||
|
meta: self.core.meta.clone().into_consensus_meta(message.old_nodes_set.len()),
|
||||||
|
consensus_executor: ServersSetChangeAccessJob::new_on_slave(admin_public, current_nodes_set),
|
||||||
|
consensus_transport: self.core.transport.clone(),
|
||||||
|
})?);
|
||||||
|
},
|
||||||
|
_ => return Err(Error::InvalidStateForRequest),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let (is_establishing_consensus, is_consensus_established, shares_to_remove) = {
|
||||||
|
let consensus_session = data.consensus_session.as_mut().ok_or(Error::InvalidMessage)?;
|
||||||
|
let is_establishing_consensus = consensus_session.state() == ConsensusSessionState::EstablishingConsensus;
|
||||||
|
let shares_to_remove = match &message.message {
|
||||||
|
&ConsensusMessageWithServersSet::InitializeConsensusSession(ref message) => {
|
||||||
|
consensus_session.on_consensus_partial_request(sender, ServersSetChangeAccessRequest::from(message))?;
|
||||||
|
let shares_to_remove = message.old_nodes_set.difference(&message.new_nodes_set).cloned().map(Into::into).collect::<BTreeSet<_>>();
|
||||||
|
check_shares_to_remove(&self.core, &shares_to_remove)?;
|
||||||
|
Some(shares_to_remove)
|
||||||
|
},
|
||||||
|
&ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ref message) => {
|
||||||
|
consensus_session.on_consensus_partial_response(sender, message.is_confirmed)?;
|
||||||
|
None
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
(
|
||||||
|
is_establishing_consensus,
|
||||||
|
consensus_session.state() == ConsensusSessionState::ConsensusEstablished,
|
||||||
|
shares_to_remove
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(shares_to_remove) = shares_to_remove {
|
||||||
|
data.remove_confirmations_to_receive = Some(shares_to_remove.clone());
|
||||||
|
data.shares_to_remove = Some(shares_to_remove);
|
||||||
|
}
|
||||||
|
if self.core.meta.self_node_id != self.core.meta.master_node_id || !is_establishing_consensus || !is_consensus_established {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
Self::on_consensus_established(&self.core, &mut *data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When share remove request is received.
|
||||||
|
pub fn on_share_remove_request(&self, sender: &NodeId, message: &ShareRemoveRequest) -> Result<(), Error> {
|
||||||
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
|
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
// awaiting this message from master node only
|
||||||
|
if sender != &self.core.meta.master_node_id {
|
||||||
|
return Err(Error::InvalidMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
// check state
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if data.state == SessionState::ConsensusEstablishing && data.shares_to_remove.is_some() {
|
||||||
|
data.state = SessionState::WaitingForRemoveConfirmation;
|
||||||
|
} else if data.state != SessionState::WaitingForRemoveConfirmation {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
// only process if we are waiting for this request
|
||||||
|
{
|
||||||
|
let shares_to_remove = data.shares_to_remove.as_ref()
|
||||||
|
.expect("shares_to_remove is filled when consensus is established; we only process share move request after consensus is established; qed");
|
||||||
|
if !shares_to_remove.contains(&self.core.meta.self_node_id) {
|
||||||
|
return Err(Error::InvalidMessage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove share
|
||||||
|
Self::complete_session(&self.core, &mut *data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When share is received from destination node.
|
||||||
|
pub fn on_share_remove_confirmation(&self, sender: &NodeId, message: &ShareRemoveConfirm) -> Result<(), Error> {
|
||||||
|
debug_assert!(self.core.meta.id == *message.session);
|
||||||
|
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
// check state
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if data.state == SessionState::ConsensusEstablishing && data.shares_to_remove.is_some() {
|
||||||
|
data.state = SessionState::WaitingForRemoveConfirmation;
|
||||||
|
} else if data.state != SessionState::WaitingForRemoveConfirmation {
|
||||||
|
return Err(Error::InvalidStateForRequest);
|
||||||
|
}
|
||||||
|
// find share source
|
||||||
|
{
|
||||||
|
let remove_confirmations_to_receive = data.remove_confirmations_to_receive.as_mut()
|
||||||
|
.expect("remove_confirmations_to_receive is filled when consensus is established; we only process share move confirmations after consensus is established; qed");
|
||||||
|
if !remove_confirmations_to_receive.remove(sender) {
|
||||||
|
return Err(Error::InvalidMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
if !remove_confirmations_to_receive.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Self::complete_session(&self.core, &mut *data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When error has occured on another node.
|
||||||
|
pub fn on_session_error(&self, sender: &NodeId, message: &ShareRemoveError) -> Result<(), Error> {
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
|
warn!("{}: share remove session failed with error: {} from {}", self.core.meta.self_node_id, message.error, sender);
|
||||||
|
|
||||||
|
data.state = SessionState::Finished;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Start sending ShareMove-specific messages, when consensus is established.
|
||||||
|
fn on_consensus_established(core: &SessionCore<T>, data: &mut SessionData<T>) -> Result<(), Error> {
|
||||||
|
// update state
|
||||||
|
data.state = SessionState::WaitingForRemoveConfirmation;
|
||||||
|
|
||||||
|
// send share remove requests to every required node
|
||||||
|
Self::disseminate_share_remove_requests(core, data)?;
|
||||||
|
|
||||||
|
{
|
||||||
|
let shares_to_remove = data.shares_to_remove.as_ref()
|
||||||
|
.expect("shares_to_remove is filled when consensus is established; on_consensus_established is called after consensus is established; qed");
|
||||||
|
if !shares_to_remove.contains(&core.meta.self_node_id) {
|
||||||
|
// remember remove confirmations to receive
|
||||||
|
data.remove_confirmations_to_receive = Some(shares_to_remove.iter().cloned().collect());
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// complete session if share is lost
|
||||||
|
Self::complete_session(core, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Disseminate share remove requests.
|
||||||
|
fn disseminate_share_remove_requests(core: &SessionCore<T>, data: &mut SessionData<T>) -> Result<(), Error> {
|
||||||
|
let shares_to_remove = data.shares_to_remove.as_ref()
|
||||||
|
.expect("shares_to_remove is filled when consensus is established; disseminate_share_remove_requests is called after consensus is established; qed");
|
||||||
|
for node in shares_to_remove.iter().filter(|n| **n != core.meta.self_node_id) {
|
||||||
|
core.transport.send(node, ShareRemoveMessage::ShareRemoveRequest(ShareRemoveRequest {
|
||||||
|
session: core.meta.id.clone().into(),
|
||||||
|
session_nonce: core.nonce,
|
||||||
|
}))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Complete session on this node.
|
||||||
|
fn complete_session(core: &SessionCore<T>, data: &mut SessionData<T>) -> Result<(), Error> {
|
||||||
|
// update state
|
||||||
|
data.state = SessionState::Finished;
|
||||||
|
|
||||||
|
// if we are 'removing' node => remove share from storage
|
||||||
|
let shares_to_remove = data.shares_to_remove.as_ref()
|
||||||
|
.expect("shares_to_remove is filled when consensus is established; complete_session is called after consensus is established; qed");
|
||||||
|
if shares_to_remove.contains(&core.meta.self_node_id) {
|
||||||
|
// send confirmation to all other nodes
|
||||||
|
let new_nodes_set = core.key_share.id_numbers.keys().filter(|n| !shares_to_remove.contains(n)).collect::<Vec<_>>();
|
||||||
|
for node in new_nodes_set.into_iter().filter(|n| **n != core.meta.self_node_id) {
|
||||||
|
core.transport.send(&node, ShareRemoveMessage::ShareRemoveConfirm(ShareRemoveConfirm {
|
||||||
|
session: core.meta.id.clone().into(),
|
||||||
|
session_nonce: core.nonce,
|
||||||
|
}))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
return core.key_storage.remove(&core.meta.id)
|
||||||
|
.map_err(|e| Error::KeyStorage(e.into()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// else we need to update key_share.id_numbers.keys()
|
||||||
|
let mut key_share = core.key_share.clone();
|
||||||
|
for share_to_remove in shares_to_remove {
|
||||||
|
key_share.id_numbers.remove(share_to_remove);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ... and update key share in storage
|
||||||
|
core.key_storage.update(core.meta.id.clone(), key_share)
|
||||||
|
.map_err(|e| Error::KeyStorage(e.into()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Session for SessionImpl<T> where T: SessionTransport + Send + Sync + 'static {
|
||||||
|
fn wait(&self) -> Result<(), Error> {
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
if !data.result.is_some() {
|
||||||
|
self.core.completed.wait(&mut data);
|
||||||
|
}
|
||||||
|
|
||||||
|
data.result.clone()
|
||||||
|
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> ClusterSession for SessionImpl<T> where T: SessionTransport {
|
||||||
|
fn is_finished(&self) -> bool {
|
||||||
|
self.data.lock().state == SessionState::Finished
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_session_timeout(&self) {
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
|
warn!("{}: share remove session failed with timeout", self.core.meta.self_node_id);
|
||||||
|
|
||||||
|
data.state = SessionState::Finished;
|
||||||
|
data.result = Some(Err(Error::NodeDisconnected));
|
||||||
|
self.core.completed.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_node_timeout(&self, node: &NodeId) {
|
||||||
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
|
warn!("{}: share remove session failed because {} connection has timeouted", self.core.meta.self_node_id, node);
|
||||||
|
|
||||||
|
data.state = SessionState::Finished;
|
||||||
|
data.result = Some(Err(Error::NodeDisconnected));
|
||||||
|
self.core.completed.notify_all();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IsolatedSessionTransport {
|
||||||
|
pub fn new(session_id: SessionId, nonce: u64, cluster: Arc<Cluster>) -> Self {
|
||||||
|
IsolatedSessionTransport {
|
||||||
|
session: session_id,
|
||||||
|
nonce: nonce,
|
||||||
|
cluster: cluster,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl JobTransport for IsolatedSessionTransport {
|
||||||
|
type PartialJobRequest = ServersSetChangeAccessRequest;
|
||||||
|
type PartialJobResponse = bool;
|
||||||
|
|
||||||
|
fn send_partial_request(&self, node: &NodeId, request: ServersSetChangeAccessRequest) -> Result<(), Error> {
|
||||||
|
self.cluster.send(node, Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(ShareRemoveConsensusMessage {
|
||||||
|
session: self.session.clone().into(),
|
||||||
|
session_nonce: self.nonce,
|
||||||
|
message: ConsensusMessageWithServersSet::InitializeConsensusSession(InitializeConsensusSessionWithServersSet {
|
||||||
|
old_nodes_set: request.old_servers_set.into_iter().map(Into::into).collect(),
|
||||||
|
new_nodes_set: request.new_servers_set.into_iter().map(Into::into).collect(),
|
||||||
|
old_set_signature: request.old_set_signature.into(),
|
||||||
|
new_set_signature: request.new_set_signature.into(),
|
||||||
|
}),
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> {
|
||||||
|
self.cluster.send(node, Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(ShareRemoveConsensusMessage {
|
||||||
|
session: self.session.clone().into(),
|
||||||
|
session_nonce: self.nonce,
|
||||||
|
message: ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||||
|
is_confirmed: response,
|
||||||
|
}),
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SessionTransport for IsolatedSessionTransport {
|
||||||
|
fn send(&self, node: &NodeId, message: ShareRemoveMessage) -> Result<(), Error> {
|
||||||
|
self.cluster.send(node, Message::ShareRemove(message))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_shares_to_remove<T: SessionTransport>(core: &SessionCore<T>, shares_to_remove: &BTreeSet<NodeId>) -> Result<(), Error> {
|
||||||
|
// shares to remove must not be empty
|
||||||
|
if shares_to_remove.is_empty() {
|
||||||
|
return Err(Error::InvalidMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
// all shares_to_remove nodes must be old nodes of the session
|
||||||
|
if shares_to_remove.iter().any(|n| !core.key_share.id_numbers.contains_key(n)) {
|
||||||
|
return Err(Error::InvalidNodesConfiguration);
|
||||||
|
}
|
||||||
|
|
||||||
|
// do not allow removing more shares than possible
|
||||||
|
let nodes_left = core.key_share.id_numbers.len() - shares_to_remove.len();
|
||||||
|
if core.key_share.threshold + 1 > nodes_left {
|
||||||
|
return Err(Error::InvalidNodesConfiguration);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::collections::{VecDeque, BTreeMap, BTreeSet};
|
||||||
|
use ethkey::{Random, Generator, Public, Signature, KeyPair, sign};
|
||||||
|
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage};
|
||||||
|
use key_server_cluster::cluster::Cluster;
|
||||||
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||||
|
use key_server_cluster::cluster::tests::DummyCluster;
|
||||||
|
use key_server_cluster::generation_session::tests::{Node as GenerationNode, generate_nodes_ids};
|
||||||
|
use key_server_cluster::math;
|
||||||
|
use key_server_cluster::message::Message;
|
||||||
|
use key_server_cluster::servers_set_change_session::tests::generate_key;
|
||||||
|
use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash;
|
||||||
|
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||||
|
use key_server_cluster::admin_sessions::share_add_session::tests::check_secret_is_preserved;
|
||||||
|
use super::{SessionImpl, SessionParams, IsolatedSessionTransport};
|
||||||
|
|
||||||
|
struct Node {
|
||||||
|
pub cluster: Arc<DummyCluster>,
|
||||||
|
pub key_storage: Arc<DummyKeyStorage>,
|
||||||
|
pub session: SessionImpl<IsolatedSessionTransport>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct MessageLoop {
|
||||||
|
pub admin_key_pair: KeyPair,
|
||||||
|
pub original_key_pair: KeyPair,
|
||||||
|
pub old_nodes_set: BTreeSet<NodeId>,
|
||||||
|
pub new_nodes_set: BTreeSet<NodeId>,
|
||||||
|
pub old_set_signature: Signature,
|
||||||
|
pub new_set_signature: Signature,
|
||||||
|
pub nodes: BTreeMap<NodeId, Node>,
|
||||||
|
pub queue: VecDeque<(NodeId, NodeId, Message)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_session(mut meta: ShareChangeSessionMeta, admin_public: Public, self_node_id: NodeId, cluster: Arc<Cluster>, key_storage: Arc<KeyStorage>) -> SessionImpl<IsolatedSessionTransport> {
|
||||||
|
let session_id = meta.id.clone();
|
||||||
|
meta.self_node_id = self_node_id;
|
||||||
|
SessionImpl::new(SessionParams {
|
||||||
|
meta: meta.clone(),
|
||||||
|
transport: IsolatedSessionTransport::new(session_id, 1, cluster),
|
||||||
|
key_storage: key_storage,
|
||||||
|
admin_public: Some(admin_public),
|
||||||
|
nonce: 1,
|
||||||
|
}).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_node(meta: ShareChangeSessionMeta, admin_public: Public, node: GenerationNode) -> Node {
|
||||||
|
Node {
|
||||||
|
cluster: node.cluster.clone(),
|
||||||
|
key_storage: node.key_storage.clone(),
|
||||||
|
session: create_session(meta, admin_public, node.session.node().clone(), node.cluster, node.key_storage),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MessageLoop {
|
||||||
|
pub fn new(t: usize, master_node_id: NodeId, old_nodes_set: BTreeSet<NodeId>, shares_to_remove: BTreeSet<NodeId>) -> Self {
|
||||||
|
// generate admin key pair
|
||||||
|
let admin_key_pair = Random.generate().unwrap();
|
||||||
|
let admin_public = admin_key_pair.public().clone();
|
||||||
|
|
||||||
|
// run initial generation session
|
||||||
|
let gml = generate_key(t, old_nodes_set.clone());
|
||||||
|
let original_secret = math::compute_joint_secret(gml.nodes.values()
|
||||||
|
.map(|nd| nd.key_storage.get(&SessionId::default()).unwrap().polynom1[0].clone())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.iter()).unwrap();
|
||||||
|
let original_key_pair = KeyPair::from_secret(original_secret).unwrap();
|
||||||
|
|
||||||
|
// prepare sessions on all nodes
|
||||||
|
let meta = ShareChangeSessionMeta {
|
||||||
|
id: SessionId::default(),
|
||||||
|
self_node_id: NodeId::default(),
|
||||||
|
master_node_id: master_node_id,
|
||||||
|
};
|
||||||
|
let new_nodes_set: BTreeSet<_> = old_nodes_set.iter()
|
||||||
|
.filter(|n| !shares_to_remove.contains(n))
|
||||||
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
let nodes = gml.nodes.into_iter().map(|gn| create_node(meta.clone(), admin_public.clone(), gn.1));
|
||||||
|
let nodes = nodes.map(|n| (n.session.core.meta.self_node_id.clone(), n)).collect();
|
||||||
|
|
||||||
|
let old_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&old_nodes_set)).unwrap();
|
||||||
|
let new_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap();
|
||||||
|
MessageLoop {
|
||||||
|
admin_key_pair: admin_key_pair,
|
||||||
|
original_key_pair: original_key_pair,
|
||||||
|
old_nodes_set: old_nodes_set.clone(),
|
||||||
|
new_nodes_set: new_nodes_set.clone(),
|
||||||
|
old_set_signature: old_set_signature,
|
||||||
|
new_set_signature: new_set_signature,
|
||||||
|
nodes: nodes,
|
||||||
|
queue: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn run(&mut self) {
|
||||||
|
while let Some((from, to, message)) = self.take_message() {
|
||||||
|
self.process_message((from, to, message)).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> {
|
||||||
|
self.nodes.values()
|
||||||
|
.filter_map(|n| n.cluster.take_message().map(|m| (n.session.core.meta.self_node_id.clone(), m.0, m.1)))
|
||||||
|
.nth(0)
|
||||||
|
.or_else(|| self.queue.pop_front())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> {
|
||||||
|
match { match msg.2 {
|
||||||
|
Message::ShareRemove(ref message) =>
|
||||||
|
self.nodes[&msg.1].session.process_message(&msg.0, message),
|
||||||
|
_ => unreachable!("only servers set change messages are expected"),
|
||||||
|
} } {
|
||||||
|
Ok(_) => Ok(()),
|
||||||
|
Err(Error::TooEarlyForRequest) => {
|
||||||
|
self.queue.push_back(msg);
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
Err(err) => Err(err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn remove_session_fails_if_no_nodes_are_removed() {
|
||||||
|
let (t, n) = (1, 3);
|
||||||
|
let old_nodes_set = generate_nodes_ids(n);
|
||||||
|
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
||||||
|
let nodes_to_remove = BTreeSet::new();
|
||||||
|
let ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
||||||
|
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
||||||
|
Some(ml.old_set_signature.clone()),
|
||||||
|
Some(ml.new_set_signature.clone())), Err(Error::InvalidMessage));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn remove_session_fails_if_foreign_nodes_are_removed() {
|
||||||
|
let (t, n) = (1, 3);
|
||||||
|
let old_nodes_set = generate_nodes_ids(n);
|
||||||
|
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
||||||
|
let nodes_to_remove: BTreeSet<_> = vec![math::generate_random_point().unwrap()].into_iter().collect();
|
||||||
|
let ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
||||||
|
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
||||||
|
Some(ml.old_set_signature.clone()),
|
||||||
|
Some(ml.new_set_signature.clone())), Err(Error::InvalidNodesConfiguration));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn remove_session_fails_if_too_many_nodes_are_removed() {
|
||||||
|
let (t, n) = (1, 3);
|
||||||
|
let old_nodes_set = generate_nodes_ids(n);
|
||||||
|
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
||||||
|
let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().take(2).collect();
|
||||||
|
let ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
||||||
|
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
||||||
|
Some(ml.old_set_signature.clone()),
|
||||||
|
Some(ml.new_set_signature.clone())), Err(Error::InvalidNodesConfiguration));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn nodes_removed_using_share_remove_from_master_node() {
|
||||||
|
let t = 1;
|
||||||
|
let test_cases = vec![(3, 1), (5, 3)];
|
||||||
|
for (n, nodes_to_remove) in test_cases {
|
||||||
|
// generate key && prepare ShareMove sessions
|
||||||
|
let old_nodes_set = generate_nodes_ids(n);
|
||||||
|
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
||||||
|
let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().take(nodes_to_remove).collect();
|
||||||
|
let mut ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
||||||
|
|
||||||
|
// initialize session on master node && run to completion
|
||||||
|
ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
||||||
|
Some(ml.old_set_signature.clone()),
|
||||||
|
Some(ml.new_set_signature.clone())).unwrap();
|
||||||
|
ml.run();
|
||||||
|
|
||||||
|
// check that session has completed on all nodes
|
||||||
|
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
|
||||||
|
|
||||||
|
// check that secret is still the same as before adding the share
|
||||||
|
check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter()
|
||||||
|
.filter(|&(k, _)| !nodes_to_remove.contains(k))
|
||||||
|
.map(|(k, v)| (k.clone(), v.key_storage.clone()))
|
||||||
|
.collect());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn nodes_removed_using_share_remove_from_non_master_node() {
|
||||||
|
let t = 1;
|
||||||
|
let test_cases = vec![(3, 1), (5, 3)];
|
||||||
|
for (n, nodes_to_remove) in test_cases {
|
||||||
|
// generate key && prepare ShareMove sessions
|
||||||
|
let old_nodes_set = generate_nodes_ids(n);
|
||||||
|
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
||||||
|
let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().skip(1).take(nodes_to_remove).collect();
|
||||||
|
let mut ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
||||||
|
|
||||||
|
// initialize session on master node && run to completion
|
||||||
|
ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
||||||
|
Some(ml.old_set_signature.clone()),
|
||||||
|
Some(ml.new_set_signature.clone())).unwrap();
|
||||||
|
ml.run();
|
||||||
|
|
||||||
|
// check that session has completed on all nodes
|
||||||
|
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
|
||||||
|
|
||||||
|
// check that secret is still the same as before adding the share
|
||||||
|
check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter()
|
||||||
|
.filter(|&(k, _)| !nodes_to_remove.contains(k))
|
||||||
|
.map(|(k, v)| (k.clone(), v.key_storage.clone()))
|
||||||
|
.collect());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -25,6 +25,7 @@ use key_server_cluster::message::{Message, DecryptionMessage, DecryptionConsensu
|
|||||||
PartialDecryption, DecryptionSessionError, DecryptionSessionCompleted, ConsensusMessage, InitializeConsensusSession,
|
PartialDecryption, DecryptionSessionError, DecryptionSessionCompleted, ConsensusMessage, InitializeConsensusSession,
|
||||||
ConfirmConsensusInitialization};
|
ConfirmConsensusInitialization};
|
||||||
use key_server_cluster::jobs::job_session::JobTransport;
|
use key_server_cluster::jobs::job_session::JobTransport;
|
||||||
|
use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
||||||
use key_server_cluster::jobs::decryption_job::{PartialDecryptionRequest, PartialDecryptionResponse, DecryptionJob};
|
use key_server_cluster::jobs::decryption_job::{PartialDecryptionRequest, PartialDecryptionResponse, DecryptionJob};
|
||||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||||
|
|
||||||
@ -66,7 +67,7 @@ struct SessionCore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Decryption consensus session type.
|
/// Decryption consensus session type.
|
||||||
type DecryptionConsensusSession = ConsensusSession<DecryptionConsensusTransport, DecryptionJob, DecryptionJobTransport>;
|
type DecryptionConsensusSession = ConsensusSession<KeyAccessJob, DecryptionConsensusTransport, DecryptionJob, DecryptionJobTransport>;
|
||||||
|
|
||||||
/// Mutable session data.
|
/// Mutable session data.
|
||||||
struct SessionData {
|
struct SessionData {
|
||||||
@ -151,10 +152,18 @@ impl SessionImpl {
|
|||||||
nonce: params.nonce,
|
nonce: params.nonce,
|
||||||
cluster: params.cluster.clone(),
|
cluster: params.cluster.clone(),
|
||||||
};
|
};
|
||||||
|
let consensus_session = ConsensusSession::new(ConsensusSessionParams {
|
||||||
|
meta: params.meta.clone(),
|
||||||
|
consensus_executor: match requester_signature {
|
||||||
|
Some(requester_signature) => KeyAccessJob::new_on_master(params.meta.id.clone(), params.acl_storage.clone(), requester_signature),
|
||||||
|
None => KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()),
|
||||||
|
},
|
||||||
|
consensus_transport: consensus_transport,
|
||||||
|
})?;
|
||||||
|
|
||||||
Ok(SessionImpl {
|
Ok(SessionImpl {
|
||||||
core: SessionCore {
|
core: SessionCore {
|
||||||
meta: params.meta.clone(),
|
meta: params.meta,
|
||||||
access_key: params.access_key,
|
access_key: params.access_key,
|
||||||
key_share: params.key_share,
|
key_share: params.key_share,
|
||||||
cluster: params.cluster,
|
cluster: params.cluster,
|
||||||
@ -162,18 +171,7 @@ impl SessionImpl {
|
|||||||
completed: Condvar::new(),
|
completed: Condvar::new(),
|
||||||
},
|
},
|
||||||
data: Mutex::new(SessionData {
|
data: Mutex::new(SessionData {
|
||||||
consensus_session: match requester_signature {
|
consensus_session: consensus_session,
|
||||||
Some(requester_signature) => ConsensusSession::new_on_master(ConsensusSessionParams {
|
|
||||||
meta: params.meta,
|
|
||||||
acl_storage: params.acl_storage.clone(),
|
|
||||||
consensus_transport: consensus_transport,
|
|
||||||
}, requester_signature)?,
|
|
||||||
None => ConsensusSession::new_on_slave(ConsensusSessionParams {
|
|
||||||
meta: params.meta,
|
|
||||||
acl_storage: params.acl_storage.clone(),
|
|
||||||
consensus_transport: consensus_transport,
|
|
||||||
})?,
|
|
||||||
},
|
|
||||||
is_shadow_decryption: None,
|
is_shadow_decryption: None,
|
||||||
result: None,
|
result: None,
|
||||||
}),
|
}),
|
||||||
@ -267,7 +265,7 @@ impl SessionImpl {
|
|||||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||||
|
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
let requester = data.consensus_session.requester()?.clone();
|
let requester = data.consensus_session.consensus_job().executor().requester()?.ok_or(Error::InvalidStateForRequest)?.clone();
|
||||||
let decryption_job = DecryptionJob::new_on_slave(self.core.meta.self_node_id.clone(), self.core.access_key.clone(), requester, self.core.key_share.clone())?;
|
let decryption_job = DecryptionJob::new_on_slave(self.core.meta.self_node_id.clone(), self.core.access_key.clone(), requester, self.core.key_share.clone())?;
|
||||||
let decryption_transport = self.core.decryption_transport();
|
let decryption_transport = self.core.decryption_transport();
|
||||||
|
|
||||||
@ -401,7 +399,7 @@ impl SessionCore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn disseminate_jobs(&self, consensus_session: &mut DecryptionConsensusSession, is_shadow_decryption: bool) -> Result<(), Error> {
|
pub fn disseminate_jobs(&self, consensus_session: &mut DecryptionConsensusSession, is_shadow_decryption: bool) -> Result<(), Error> {
|
||||||
let requester = consensus_session.requester()?.clone();
|
let requester = consensus_session.consensus_job().executor().requester()?.ok_or(Error::InvalidStateForRequest)?.clone();
|
||||||
let decryption_job = DecryptionJob::new_on_master(self.meta.self_node_id.clone(), self.access_key.clone(), requester, self.key_share.clone(), is_shadow_decryption)?;
|
let decryption_job = DecryptionJob::new_on_master(self.meta.self_node_id.clone(), self.access_key.clone(), requester, self.key_share.clone(), is_shadow_decryption)?;
|
||||||
consensus_session.disseminate_jobs(decryption_job, self.decryption_transport())
|
consensus_session.disseminate_jobs(decryption_job, self.decryption_transport())
|
||||||
}
|
}
|
||||||
@ -532,6 +530,7 @@ mod tests {
|
|||||||
threshold: 3,
|
threshold: 3,
|
||||||
id_numbers: id_numbers.clone().into_iter().collect(),
|
id_numbers: id_numbers.clone().into_iter().collect(),
|
||||||
secret_share: secret_shares[i].clone(),
|
secret_share: secret_shares[i].clone(),
|
||||||
|
polynom1: Vec::new(),
|
||||||
common_point: Some(common_point.clone()),
|
common_point: Some(common_point.clone()),
|
||||||
encrypted_point: Some(encrypted_point.clone()),
|
encrypted_point: Some(encrypted_point.clone()),
|
||||||
}).collect();
|
}).collect();
|
||||||
@ -600,6 +599,7 @@ mod tests {
|
|||||||
threshold: 0,
|
threshold: 0,
|
||||||
id_numbers: nodes,
|
id_numbers: nodes,
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
secret_share: Random.generate().unwrap().secret().clone(),
|
||||||
|
polynom1: Vec::new(),
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
},
|
},
|
||||||
@ -631,6 +631,7 @@ mod tests {
|
|||||||
threshold: 0,
|
threshold: 0,
|
||||||
id_numbers: nodes,
|
id_numbers: nodes,
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
secret_share: Random.generate().unwrap().secret().clone(),
|
||||||
|
polynom1: Vec::new(),
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
},
|
},
|
||||||
@ -662,6 +663,7 @@ mod tests {
|
|||||||
threshold: 2,
|
threshold: 2,
|
||||||
id_numbers: nodes,
|
id_numbers: nodes,
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
secret_share: Random.generate().unwrap().secret().clone(),
|
||||||
|
polynom1: Vec::new(),
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
},
|
},
|
@ -252,7 +252,7 @@ impl SessionImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// When error has occured on another node.
|
/// When error has occured on another node.
|
||||||
pub fn on_session_error(&self, sender: NodeId, message: &EncryptionSessionError) -> Result<(), Error> {
|
pub fn on_session_error(&self, sender: &NodeId, message: &EncryptionSessionError) -> Result<(), Error> {
|
||||||
self.check_nonce(message.session_nonce)?;
|
self.check_nonce(message.session_nonce)?;
|
||||||
|
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
@ -100,6 +100,8 @@ struct SessionData {
|
|||||||
nodes: BTreeMap<NodeId, NodeData>,
|
nodes: BTreeMap<NodeId, NodeData>,
|
||||||
|
|
||||||
// === Values, filled during KD phase ===
|
// === Values, filled during KD phase ===
|
||||||
|
/// Polynom1.
|
||||||
|
polynom1: Option<Vec<Secret>>,
|
||||||
/// Value of polynom1[0], generated by this node.
|
/// Value of polynom1[0], generated by this node.
|
||||||
secret_coeff: Option<Secret>,
|
secret_coeff: Option<Secret>,
|
||||||
|
|
||||||
@ -121,10 +123,6 @@ struct NodeData {
|
|||||||
pub id_number: Secret,
|
pub id_number: Secret,
|
||||||
|
|
||||||
// === Values, filled during KD phase ===
|
// === Values, filled during KD phase ===
|
||||||
/// Secret value1, which has been sent to this node.
|
|
||||||
pub secret1_sent: Option<Secret>,
|
|
||||||
/// Secret value2, which has been sent to this node.
|
|
||||||
pub secret2_sent: Option<Secret>,
|
|
||||||
/// Secret value1, which has been received from this node.
|
/// Secret value1, which has been received from this node.
|
||||||
pub secret1: Option<Secret>,
|
pub secret1: Option<Secret>,
|
||||||
/// Secret value2, which has been received from this node.
|
/// Secret value2, which has been received from this node.
|
||||||
@ -203,6 +201,7 @@ impl SessionImpl {
|
|||||||
threshold: None,
|
threshold: None,
|
||||||
derived_point: None,
|
derived_point: None,
|
||||||
nodes: BTreeMap::new(),
|
nodes: BTreeMap::new(),
|
||||||
|
polynom1: None,
|
||||||
secret_coeff: None,
|
secret_coeff: None,
|
||||||
secret_share: None,
|
secret_share: None,
|
||||||
key_share: None,
|
key_share: None,
|
||||||
@ -293,7 +292,7 @@ impl SessionImpl {
|
|||||||
&GenerationMessage::PublicKeyShare(ref message) =>
|
&GenerationMessage::PublicKeyShare(ref message) =>
|
||||||
self.on_public_key_share(sender.clone(), message),
|
self.on_public_key_share(sender.clone(), message),
|
||||||
&GenerationMessage::SessionError(ref message) =>
|
&GenerationMessage::SessionError(ref message) =>
|
||||||
self.on_session_error(sender.clone(), message),
|
self.on_session_error(sender, message),
|
||||||
&GenerationMessage::SessionCompleted(ref message) =>
|
&GenerationMessage::SessionCompleted(ref message) =>
|
||||||
self.on_session_completed(sender.clone(), message),
|
self.on_session_completed(sender.clone(), message),
|
||||||
}
|
}
|
||||||
@ -507,6 +506,7 @@ impl SessionImpl {
|
|||||||
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
||||||
id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(),
|
id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(),
|
||||||
secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
||||||
|
polynom1: data.polynom1.as_ref().expect("polynom1 is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
||||||
common_point: None,
|
common_point: None,
|
||||||
encrypted_point: None,
|
encrypted_point: None,
|
||||||
};
|
};
|
||||||
@ -547,7 +547,7 @@ impl SessionImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// When error has occured on another node.
|
/// When error has occured on another node.
|
||||||
pub fn on_session_error(&self, sender: NodeId, message: &SessionError) -> Result<(), Error> {
|
pub fn on_session_error(&self, sender: &NodeId, message: &SessionError) -> Result<(), Error> {
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
warn!("{}: generation session failed with error: {} from {}", self.node(), message.error, sender);
|
warn!("{}: generation session failed with error: {} from {}", self.node(), message.error, sender);
|
||||||
@ -585,6 +585,7 @@ impl SessionImpl {
|
|||||||
let threshold = data.threshold.expect("threshold is filled on initialization phase; KD phase follows initialization phase; qed");
|
let threshold = data.threshold.expect("threshold is filled on initialization phase; KD phase follows initialization phase; qed");
|
||||||
let polynom1 = math::generate_random_polynom(threshold)?;
|
let polynom1 = math::generate_random_polynom(threshold)?;
|
||||||
let polynom2 = math::generate_random_polynom(threshold)?;
|
let polynom2 = math::generate_random_polynom(threshold)?;
|
||||||
|
data.polynom1 = Some(polynom1.clone());
|
||||||
data.secret_coeff = Some(polynom1[0].clone());
|
data.secret_coeff = Some(polynom1[0].clone());
|
||||||
|
|
||||||
// compute t+1 public values
|
// compute t+1 public values
|
||||||
@ -600,9 +601,6 @@ impl SessionImpl {
|
|||||||
|
|
||||||
// send a message containing secret1 && secret2 to other node
|
// send a message containing secret1 && secret2 to other node
|
||||||
if node != self.node() {
|
if node != self.node() {
|
||||||
node_data.secret1_sent = Some(secret1.clone());
|
|
||||||
node_data.secret2_sent = Some(secret2.clone());
|
|
||||||
|
|
||||||
self.cluster.send(&node, Message::Generation(GenerationMessage::KeysDissemination(KeysDissemination {
|
self.cluster.send(&node, Message::Generation(GenerationMessage::KeysDissemination(KeysDissemination {
|
||||||
session: self.id.clone().into(),
|
session: self.id.clone().into(),
|
||||||
session_nonce: self.nonce,
|
session_nonce: self.nonce,
|
||||||
@ -687,6 +685,7 @@ impl SessionImpl {
|
|||||||
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
||||||
id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(),
|
id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(),
|
||||||
secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
||||||
|
polynom1: data.polynom1.as_ref().expect("polynom1 is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
||||||
common_point: None,
|
common_point: None,
|
||||||
encrypted_point: None,
|
encrypted_point: None,
|
||||||
};
|
};
|
||||||
@ -810,8 +809,6 @@ impl NodeData {
|
|||||||
fn with_id_number(node_id_number: Secret) -> Self {
|
fn with_id_number(node_id_number: Secret) -> Self {
|
||||||
NodeData {
|
NodeData {
|
||||||
id_number: node_id_number,
|
id_number: node_id_number,
|
||||||
secret1_sent: None,
|
|
||||||
secret2_sent: None,
|
|
||||||
secret1: None,
|
secret1: None,
|
||||||
secret2: None,
|
secret2: None,
|
||||||
publics: None,
|
publics: None,
|
||||||
@ -876,13 +873,19 @@ pub mod tests {
|
|||||||
pub queue: VecDeque<(NodeId, NodeId, Message)>,
|
pub queue: VecDeque<(NodeId, NodeId, Message)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn generate_nodes_ids(n: usize) -> BTreeSet<NodeId> {
|
||||||
|
(0..n).map(|_| math::generate_random_point().unwrap()).collect()
|
||||||
|
}
|
||||||
|
|
||||||
impl MessageLoop {
|
impl MessageLoop {
|
||||||
pub fn new(nodes_num: usize) -> Self {
|
pub fn new(nodes_num: usize) -> Self {
|
||||||
|
Self::with_nodes_ids(generate_nodes_ids(nodes_num))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_nodes_ids(nodes_ids: BTreeSet<NodeId>) -> Self {
|
||||||
let mut nodes = BTreeMap::new();
|
let mut nodes = BTreeMap::new();
|
||||||
let session_id = SessionId::default();
|
let session_id = SessionId::default();
|
||||||
for _ in 0..nodes_num {
|
for node_id in nodes_ids {
|
||||||
let key_pair = Random.generate().unwrap();
|
|
||||||
let node_id = key_pair.public().clone();
|
|
||||||
let cluster = Arc::new(DummyCluster::new(node_id.clone()));
|
let cluster = Arc::new(DummyCluster::new(node_id.clone()));
|
||||||
let key_storage = Arc::new(DummyKeyStorage::default());
|
let key_storage = Arc::new(DummyKeyStorage::default());
|
||||||
let session = SessionImpl::new(SessionParams {
|
let session = SessionImpl::new(SessionParams {
|
20
secret_store/src/key_server_cluster/client_sessions/mod.rs
Normal file
20
secret_store/src/key_server_cluster/client_sessions/mod.rs
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
pub mod decryption_session;
|
||||||
|
pub mod encryption_session;
|
||||||
|
pub mod generation_session;
|
||||||
|
pub mod signing_session;
|
@ -28,6 +28,7 @@ use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessa
|
|||||||
RequestPartialSignature, PartialSignature, SigningSessionCompleted, GenerationMessage, ConsensusMessage, SigningSessionError,
|
RequestPartialSignature, PartialSignature, SigningSessionCompleted, GenerationMessage, ConsensusMessage, SigningSessionError,
|
||||||
InitializeConsensusSession, ConfirmConsensusInitialization};
|
InitializeConsensusSession, ConfirmConsensusInitialization};
|
||||||
use key_server_cluster::jobs::job_session::JobTransport;
|
use key_server_cluster::jobs::job_session::JobTransport;
|
||||||
|
use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
||||||
use key_server_cluster::jobs::signing_job::{PartialSigningRequest, PartialSigningResponse, SigningJob};
|
use key_server_cluster::jobs::signing_job::{PartialSigningRequest, PartialSigningResponse, SigningJob};
|
||||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||||
|
|
||||||
@ -70,7 +71,7 @@ struct SessionCore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Signing consensus session type.
|
/// Signing consensus session type.
|
||||||
type SigningConsensusSession = ConsensusSession<SigningConsensusTransport, SigningJob, SigningJobTransport>;
|
type SigningConsensusSession = ConsensusSession<KeyAccessJob, SigningConsensusTransport, SigningJob, SigningJobTransport>;
|
||||||
|
|
||||||
/// Mutable session data.
|
/// Mutable session data.
|
||||||
struct SessionData {
|
struct SessionData {
|
||||||
@ -169,10 +170,18 @@ impl SessionImpl {
|
|||||||
nonce: params.nonce,
|
nonce: params.nonce,
|
||||||
cluster: params.cluster.clone(),
|
cluster: params.cluster.clone(),
|
||||||
};
|
};
|
||||||
|
let consensus_session = ConsensusSession::new(ConsensusSessionParams {
|
||||||
|
meta: params.meta.clone(),
|
||||||
|
consensus_executor: match requester_signature {
|
||||||
|
Some(requester_signature) => KeyAccessJob::new_on_master(params.meta.id.clone(), params.acl_storage.clone(), requester_signature),
|
||||||
|
None => KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()),
|
||||||
|
},
|
||||||
|
consensus_transport: consensus_transport,
|
||||||
|
})?;
|
||||||
|
|
||||||
Ok(SessionImpl {
|
Ok(SessionImpl {
|
||||||
core: SessionCore {
|
core: SessionCore {
|
||||||
meta: params.meta.clone(),
|
meta: params.meta,
|
||||||
access_key: params.access_key,
|
access_key: params.access_key,
|
||||||
key_share: params.key_share,
|
key_share: params.key_share,
|
||||||
cluster: params.cluster,
|
cluster: params.cluster,
|
||||||
@ -182,18 +191,7 @@ impl SessionImpl {
|
|||||||
data: Mutex::new(SessionData {
|
data: Mutex::new(SessionData {
|
||||||
state: SessionState::ConsensusEstablishing,
|
state: SessionState::ConsensusEstablishing,
|
||||||
message_hash: None,
|
message_hash: None,
|
||||||
consensus_session: match requester_signature {
|
consensus_session: consensus_session,
|
||||||
Some(requester_signature) => ConsensusSession::new_on_master(ConsensusSessionParams {
|
|
||||||
meta: params.meta,
|
|
||||||
acl_storage: params.acl_storage.clone(),
|
|
||||||
consensus_transport: consensus_transport,
|
|
||||||
}, requester_signature)?,
|
|
||||||
None => ConsensusSession::new_on_slave(ConsensusSessionParams {
|
|
||||||
meta: params.meta,
|
|
||||||
acl_storage: params.acl_storage.clone(),
|
|
||||||
consensus_transport: consensus_transport,
|
|
||||||
})?,
|
|
||||||
},
|
|
||||||
generation_session: None,
|
generation_session: None,
|
||||||
result: None,
|
result: None,
|
||||||
}),
|
}),
|
||||||
@ -789,6 +787,7 @@ mod tests {
|
|||||||
threshold: 0,
|
threshold: 0,
|
||||||
id_numbers: nodes,
|
id_numbers: nodes,
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
secret_share: Random.generate().unwrap().secret().clone(),
|
||||||
|
polynom1: Vec::new(),
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
},
|
},
|
||||||
@ -820,6 +819,7 @@ mod tests {
|
|||||||
threshold: 0,
|
threshold: 0,
|
||||||
id_numbers: nodes,
|
id_numbers: nodes,
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
secret_share: Random.generate().unwrap().secret().clone(),
|
||||||
|
polynom1: Vec::new(),
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
},
|
},
|
||||||
@ -851,6 +851,7 @@ mod tests {
|
|||||||
threshold: 2,
|
threshold: 2,
|
||||||
id_numbers: nodes,
|
id_numbers: nodes,
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
secret_share: Random.generate().unwrap().secret().clone(),
|
||||||
|
polynom1: Vec::new(),
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
},
|
},
|
@ -20,7 +20,7 @@ use std::sync::Arc;
|
|||||||
use std::collections::{BTreeMap, BTreeSet};
|
use std::collections::{BTreeMap, BTreeSet};
|
||||||
use std::collections::btree_map::Entry;
|
use std::collections::btree_map::Entry;
|
||||||
use std::net::{SocketAddr, IpAddr};
|
use std::net::{SocketAddr, IpAddr};
|
||||||
use futures::{finished, failed, Future, Stream, BoxFuture};
|
use futures::{finished, failed, Future, Stream};
|
||||||
use futures_cpupool::CpuPool;
|
use futures_cpupool::CpuPool;
|
||||||
use parking_lot::{RwLock, Mutex};
|
use parking_lot::{RwLock, Mutex};
|
||||||
use tokio_io::IoFuture;
|
use tokio_io::IoFuture;
|
||||||
@ -30,9 +30,10 @@ use ethkey::{Public, KeyPair, Signature, Random, Generator};
|
|||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair};
|
use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair};
|
||||||
use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, GenerationSessionWrapper, EncryptionSessionWrapper,
|
use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, GenerationSessionWrapper, EncryptionSessionWrapper,
|
||||||
DecryptionSessionWrapper, SigningSessionWrapper};
|
DecryptionSessionWrapper, SigningSessionWrapper, AdminSessionWrapper};
|
||||||
use key_server_cluster::message::{self, Message, ClusterMessage, GenerationMessage, EncryptionMessage, DecryptionMessage,
|
use key_server_cluster::message::{self, Message, ClusterMessage, GenerationMessage, EncryptionMessage, DecryptionMessage,
|
||||||
SigningMessage, ConsensusMessage};
|
SigningMessage, ServersSetChangeMessage, ConsensusMessage, ShareAddMessage, ShareMoveMessage, ShareRemoveMessage,
|
||||||
|
ConsensusMessageWithServersSecretMap, ConsensusMessageWithServersMap, ConsensusMessageWithServersSet};
|
||||||
use key_server_cluster::generation_session::{Session as GenerationSession, SessionState as GenerationSessionState};
|
use key_server_cluster::generation_session::{Session as GenerationSession, SessionState as GenerationSessionState};
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
use key_server_cluster::generation_session::SessionImpl as GenerationSessionImpl;
|
use key_server_cluster::generation_session::SessionImpl as GenerationSessionImpl;
|
||||||
@ -55,9 +56,8 @@ const KEEP_ALIVE_SEND_INTERVAL: u64 = 30;
|
|||||||
/// we must treat this node as non-responding && disconnect from it.
|
/// we must treat this node as non-responding && disconnect from it.
|
||||||
const KEEP_ALIVE_DISCONNECT_INTERVAL: u64 = 60;
|
const KEEP_ALIVE_DISCONNECT_INTERVAL: u64 = 60;
|
||||||
|
|
||||||
/// Encryption sesion timeout interval. It works
|
|
||||||
/// Empty future.
|
/// Empty future.
|
||||||
type BoxedEmptyFuture = BoxFuture<(), ()>;
|
type BoxedEmptyFuture = ::std::boxed::Box<Future<Item = (), Error = ()> + Send>;
|
||||||
|
|
||||||
/// Cluster interface for external clients.
|
/// Cluster interface for external clients.
|
||||||
pub trait ClusterClient: Send + Sync {
|
pub trait ClusterClient: Send + Sync {
|
||||||
@ -71,6 +71,14 @@ pub trait ClusterClient: Send + Sync {
|
|||||||
fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature, is_shadow_decryption: bool) -> Result<Arc<DecryptionSession>, Error>;
|
fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature, is_shadow_decryption: bool) -> Result<Arc<DecryptionSession>, Error>;
|
||||||
/// Start new signing session.
|
/// Start new signing session.
|
||||||
fn new_signing_session(&self, session_id: SessionId, requestor_signature: Signature, message_hash: H256) -> Result<Arc<SigningSession>, Error>;
|
fn new_signing_session(&self, session_id: SessionId, requestor_signature: Signature, message_hash: H256) -> Result<Arc<SigningSession>, Error>;
|
||||||
|
/// Start new share add session.
|
||||||
|
fn new_share_add_session(&self, session_id: SessionId, new_nodes_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSessionWrapper>, Error>;
|
||||||
|
/// Start new share move session.
|
||||||
|
fn new_share_move_session(&self, session_id: SessionId, shares_to_move: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSessionWrapper>, Error>;
|
||||||
|
/// Start new share remove session.
|
||||||
|
fn new_share_remove_session(&self, session_id: SessionId, new_nodes_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSessionWrapper>, Error>;
|
||||||
|
/// Start new servers set change session.
|
||||||
|
fn new_servers_set_change_session(&self, session_id: Option<SessionId>, new_nodes_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSessionWrapper>, Error>;
|
||||||
|
|
||||||
/// Ask node to make 'faulty' generation sessions.
|
/// Ask node to make 'faulty' generation sessions.
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -83,7 +91,7 @@ pub trait ClusterClient: Send + Sync {
|
|||||||
fn connect(&self);
|
fn connect(&self);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cluster access for single encryption/decryption/signing participant.
|
/// Cluster access for single session participant.
|
||||||
pub trait Cluster: Send + Sync {
|
pub trait Cluster: Send + Sync {
|
||||||
/// Broadcast message to all other nodes.
|
/// Broadcast message to all other nodes.
|
||||||
fn broadcast(&self, message: Message) -> Result<(), Error>;
|
fn broadcast(&self, message: Message) -> Result<(), Error>;
|
||||||
@ -108,6 +116,8 @@ pub struct ClusterConfiguration {
|
|||||||
pub key_storage: Arc<KeyStorage>,
|
pub key_storage: Arc<KeyStorage>,
|
||||||
/// Reference to ACL storage
|
/// Reference to ACL storage
|
||||||
pub acl_storage: Arc<AclStorage>,
|
pub acl_storage: Arc<AclStorage>,
|
||||||
|
/// Administrator public key.
|
||||||
|
pub admin_public: Option<Public>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cluster state.
|
/// Cluster state.
|
||||||
@ -261,23 +271,21 @@ impl ClusterCore {
|
|||||||
/// Connect to socket using given context and handle.
|
/// Connect to socket using given context and handle.
|
||||||
fn connect_future(handle: &Handle, data: Arc<ClusterData>, node_address: SocketAddr) -> BoxedEmptyFuture {
|
fn connect_future(handle: &Handle, data: Arc<ClusterData>, node_address: SocketAddr) -> BoxedEmptyFuture {
|
||||||
let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect();
|
let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect();
|
||||||
net_connect(&node_address, handle, data.self_key_pair.clone(), disconnected_nodes)
|
Box::new(net_connect(&node_address, handle, data.self_key_pair.clone(), disconnected_nodes)
|
||||||
.then(move |result| ClusterCore::process_connection_result(data, Some(node_address), result))
|
.then(move |result| ClusterCore::process_connection_result(data, Some(node_address), result))
|
||||||
.then(|_| finished(()))
|
.then(|_| finished(())))
|
||||||
.boxed()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Start listening for incoming connections.
|
/// Start listening for incoming connections.
|
||||||
fn listen(handle: &Handle, data: Arc<ClusterData>, listen_address: SocketAddr) -> Result<BoxedEmptyFuture, Error> {
|
fn listen(handle: &Handle, data: Arc<ClusterData>, listen_address: SocketAddr) -> Result<BoxedEmptyFuture, Error> {
|
||||||
Ok(TcpListener::bind(&listen_address, &handle)?
|
Ok(Box::new(TcpListener::bind(&listen_address, &handle)?
|
||||||
.incoming()
|
.incoming()
|
||||||
.and_then(move |(stream, node_address)| {
|
.and_then(move |(stream, node_address)| {
|
||||||
ClusterCore::accept_connection(data.clone(), stream, node_address);
|
ClusterCore::accept_connection(data.clone(), stream, node_address);
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
})
|
||||||
.for_each(|_| Ok(()))
|
.for_each(|_| Ok(()))
|
||||||
.then(|_| finished(()))
|
.then(|_| finished(()))))
|
||||||
.boxed())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accept connection.
|
/// Accept connection.
|
||||||
@ -289,21 +297,19 @@ impl ClusterCore {
|
|||||||
|
|
||||||
/// Accept connection future.
|
/// Accept connection future.
|
||||||
fn accept_connection_future(handle: &Handle, data: Arc<ClusterData>, stream: TcpStream, node_address: SocketAddr) -> BoxedEmptyFuture {
|
fn accept_connection_future(handle: &Handle, data: Arc<ClusterData>, stream: TcpStream, node_address: SocketAddr) -> BoxedEmptyFuture {
|
||||||
net_accept_connection(node_address, stream, handle, data.self_key_pair.clone())
|
Box::new(net_accept_connection(node_address, stream, handle, data.self_key_pair.clone())
|
||||||
.then(move |result| ClusterCore::process_connection_result(data, None, result))
|
.then(move |result| ClusterCore::process_connection_result(data, None, result))
|
||||||
.then(|_| finished(()))
|
.then(|_| finished(())))
|
||||||
.boxed()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Schedule mainatain procedures.
|
/// Schedule mainatain procedures.
|
||||||
fn schedule_maintain(handle: &Handle, data: Arc<ClusterData>) {
|
fn schedule_maintain(handle: &Handle, data: Arc<ClusterData>) {
|
||||||
let d = data.clone();
|
let d = data.clone();
|
||||||
let interval: BoxedEmptyFuture = Interval::new(time::Duration::new(MAINTAIN_INTERVAL, 0), handle)
|
let interval: BoxedEmptyFuture = Box::new(Interval::new(time::Duration::new(MAINTAIN_INTERVAL, 0), handle)
|
||||||
.expect("failed to create interval")
|
.expect("failed to create interval")
|
||||||
.and_then(move |_| Ok(ClusterCore::maintain(data.clone())))
|
.and_then(move |_| Ok(ClusterCore::maintain(data.clone())))
|
||||||
.for_each(|_| Ok(()))
|
.for_each(|_| Ok(()))
|
||||||
.then(|_| finished(()))
|
.then(|_| finished(())));
|
||||||
.boxed();
|
|
||||||
|
|
||||||
d.spawn(interval);
|
d.spawn(interval);
|
||||||
}
|
}
|
||||||
@ -319,7 +325,7 @@ impl ClusterCore {
|
|||||||
|
|
||||||
/// Called for every incomming mesage.
|
/// Called for every incomming mesage.
|
||||||
fn process_connection_messages(data: Arc<ClusterData>, connection: Arc<Connection>) -> IoFuture<Result<(), Error>> {
|
fn process_connection_messages(data: Arc<ClusterData>, connection: Arc<Connection>) -> IoFuture<Result<(), Error>> {
|
||||||
connection
|
Box::new(connection
|
||||||
.read_message()
|
.read_message()
|
||||||
.then(move |result|
|
.then(move |result|
|
||||||
match result {
|
match result {
|
||||||
@ -327,22 +333,22 @@ impl ClusterCore {
|
|||||||
ClusterCore::process_connection_message(data.clone(), connection.clone(), message);
|
ClusterCore::process_connection_message(data.clone(), connection.clone(), message);
|
||||||
// continue serving connection
|
// continue serving connection
|
||||||
data.spawn(ClusterCore::process_connection_messages(data.clone(), connection));
|
data.spawn(ClusterCore::process_connection_messages(data.clone(), connection));
|
||||||
finished(Ok(())).boxed()
|
Box::new(finished(Ok(())))
|
||||||
},
|
},
|
||||||
Ok((_, Err(err))) => {
|
Ok((_, Err(err))) => {
|
||||||
warn!(target: "secretstore_net", "{}: protocol error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id());
|
warn!(target: "secretstore_net", "{}: protocol error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id());
|
||||||
// continue serving connection
|
// continue serving connection
|
||||||
data.spawn(ClusterCore::process_connection_messages(data.clone(), connection));
|
data.spawn(ClusterCore::process_connection_messages(data.clone(), connection));
|
||||||
finished(Err(err)).boxed()
|
Box::new(finished(Err(err)))
|
||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!(target: "secretstore_net", "{}: network error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id());
|
warn!(target: "secretstore_net", "{}: network error '{}' when reading message from node {}", data.self_key_pair.public(), err, connection.node_id());
|
||||||
// close connection
|
// close connection
|
||||||
data.connections.remove(connection.node_id(), connection.is_inbound());
|
data.connections.remove(connection.node_id(), connection.is_inbound());
|
||||||
failed(err).boxed()
|
Box::new(failed(err))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
).boxed()
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send keepalive messages to every othe node.
|
/// Send keepalive messages to every othe node.
|
||||||
@ -361,7 +367,13 @@ impl ClusterCore {
|
|||||||
|
|
||||||
/// Try to connect to every disconnected node.
|
/// Try to connect to every disconnected node.
|
||||||
fn connect_disconnected_nodes(data: Arc<ClusterData>) {
|
fn connect_disconnected_nodes(data: Arc<ClusterData>) {
|
||||||
|
// do not update nodes set if any admin session is active
|
||||||
|
// this could happen, but will possibly lead to admin session error
|
||||||
|
// => should be performed later
|
||||||
|
if data.sessions.admin_sessions.is_empty() {
|
||||||
data.connections.update_nodes_set();
|
data.connections.update_nodes_set();
|
||||||
|
}
|
||||||
|
|
||||||
for (node_id, node_address) in data.connections.disconnected_nodes() {
|
for (node_id, node_address) in data.connections.disconnected_nodes() {
|
||||||
if data.config.allow_connecting_to_higher_nodes || data.self_key_pair.public() < &node_id {
|
if data.config.allow_connecting_to_higher_nodes || data.self_key_pair.public() < &node_id {
|
||||||
ClusterCore::connect(data.clone(), node_address);
|
ClusterCore::connect(data.clone(), node_address);
|
||||||
@ -377,26 +389,26 @@ impl ClusterCore {
|
|||||||
if data.connections.insert(connection.clone()) {
|
if data.connections.insert(connection.clone()) {
|
||||||
ClusterCore::process_connection_messages(data.clone(), connection)
|
ClusterCore::process_connection_messages(data.clone(), connection)
|
||||||
} else {
|
} else {
|
||||||
finished(Ok(())).boxed()
|
Box::new(finished(Ok(())))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Ok(DeadlineStatus::Meet(Err(err))) => {
|
Ok(DeadlineStatus::Meet(Err(err))) => {
|
||||||
warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}",
|
warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}",
|
||||||
data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" },
|
data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" },
|
||||||
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
|
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
|
||||||
finished(Ok(())).boxed()
|
Box::new(finished(Ok(())))
|
||||||
},
|
},
|
||||||
Ok(DeadlineStatus::Timeout) => {
|
Ok(DeadlineStatus::Timeout) => {
|
||||||
warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}",
|
warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}",
|
||||||
data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" },
|
data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" },
|
||||||
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
|
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
|
||||||
finished(Ok(())).boxed()
|
Box::new(finished(Ok(())))
|
||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}",
|
warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}",
|
||||||
data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" },
|
data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" },
|
||||||
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
|
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
|
||||||
finished(Ok(())).boxed()
|
Box::new(finished(Ok(())))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -410,6 +422,10 @@ impl ClusterCore {
|
|||||||
Message::Encryption(message) => ClusterCore::process_encryption_message(data, connection, message),
|
Message::Encryption(message) => ClusterCore::process_encryption_message(data, connection, message),
|
||||||
Message::Decryption(message) => ClusterCore::process_decryption_message(data, connection, message),
|
Message::Decryption(message) => ClusterCore::process_decryption_message(data, connection, message),
|
||||||
Message::Signing(message) => ClusterCore::process_signing_message(data, connection, message),
|
Message::Signing(message) => ClusterCore::process_signing_message(data, connection, message),
|
||||||
|
Message::ServersSetChange(message) => ClusterCore::process_servers_set_change_message(data, connection, message),
|
||||||
|
Message::ShareAdd(message) => ClusterCore::process_share_add_message(data, connection, message),
|
||||||
|
Message::ShareMove(message) => ClusterCore::process_share_move_message(data, connection, message),
|
||||||
|
Message::ShareRemove(message) => ClusterCore::process_share_remove_message(data, connection, message),
|
||||||
Message::Cluster(message) => ClusterCore::process_cluster_message(data, connection, message),
|
Message::Cluster(message) => ClusterCore::process_cluster_message(data, connection, message),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -476,11 +492,13 @@ impl ClusterCore {
|
|||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!(target: "secretstore_net", "{}: generation session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
warn!(target: "secretstore_net", "{}: generation session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
||||||
data.sessions.respond_with_generation_error(&session_id, message::SessionError {
|
let error_message = message::SessionError {
|
||||||
session: session_id.clone().into(),
|
session: session_id.clone().into(),
|
||||||
session_nonce: session_nonce,
|
session_nonce: session_nonce,
|
||||||
error: format!("{:?}", err),
|
error: format!("{:?}", err),
|
||||||
});
|
};
|
||||||
|
let _ = session.and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message)); // processing error => ignore error
|
||||||
|
data.sessions.respond_with_generation_error(&session_id, error_message);
|
||||||
if err != Error::InvalidSessionId {
|
if err != Error::InvalidSessionId {
|
||||||
data.sessions.generation_sessions.remove(&session_id);
|
data.sessions.generation_sessions.remove(&session_id);
|
||||||
}
|
}
|
||||||
@ -529,7 +547,7 @@ impl ClusterCore {
|
|||||||
EncryptionMessage::ConfirmEncryptionInitialization(ref message) =>
|
EncryptionMessage::ConfirmEncryptionInitialization(ref message) =>
|
||||||
session.on_confirm_initialization(sender.clone(), message),
|
session.on_confirm_initialization(sender.clone(), message),
|
||||||
EncryptionMessage::EncryptionSessionError(ref message) =>
|
EncryptionMessage::EncryptionSessionError(ref message) =>
|
||||||
session.on_session_error(sender.clone(), message),
|
session.on_session_error(&sender, message),
|
||||||
}) {
|
}) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
// if session is completed => stop
|
// if session is completed => stop
|
||||||
@ -559,11 +577,13 @@ impl ClusterCore {
|
|||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!(target: "secretstore_net", "{}: encryption session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
warn!(target: "secretstore_net", "{}: encryption session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
||||||
data.sessions.respond_with_encryption_error(&session_id, message::EncryptionSessionError {
|
let error_message = message::EncryptionSessionError {
|
||||||
session: session_id.clone().into(),
|
session: session_id.clone().into(),
|
||||||
session_nonce: session_nonce,
|
session_nonce: session_nonce,
|
||||||
error: format!("{:?}", err),
|
error: format!("{:?}", err),
|
||||||
});
|
};
|
||||||
|
let _ = session.and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message)); // processing error => ignore error
|
||||||
|
data.sessions.respond_with_encryption_error(&session_id, error_message);
|
||||||
if err != Error::InvalidSessionId {
|
if err != Error::InvalidSessionId {
|
||||||
data.sessions.encryption_sessions.remove(&session_id);
|
data.sessions.encryption_sessions.remove(&session_id);
|
||||||
}
|
}
|
||||||
@ -632,12 +652,14 @@ impl ClusterCore {
|
|||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!(target: "secretstore_net", "{}: decryption session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
warn!(target: "secretstore_net", "{}: decryption session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
||||||
data.sessions.respond_with_decryption_error(&session_id, &sub_session_id, &sender, message::DecryptionSessionError {
|
let error_message = message::DecryptionSessionError {
|
||||||
session: session_id.clone().into(),
|
session: session_id.clone().into(),
|
||||||
sub_session: sub_session_id.clone().into(),
|
sub_session: sub_session_id.clone().into(),
|
||||||
session_nonce: session_nonce,
|
session_nonce: session_nonce,
|
||||||
error: format!("{:?}", err),
|
error: format!("{:?}", err),
|
||||||
});
|
};
|
||||||
|
let _ = session.and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message)); // processing error => ignore error
|
||||||
|
data.sessions.respond_with_decryption_error(&session_id, &sub_session_id, &sender, error_message);
|
||||||
if err != Error::InvalidSessionId {
|
if err != Error::InvalidSessionId {
|
||||||
data.sessions.decryption_sessions.remove(&decryption_session_id);
|
data.sessions.decryption_sessions.remove(&decryption_session_id);
|
||||||
}
|
}
|
||||||
@ -712,12 +734,14 @@ impl ClusterCore {
|
|||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!(target: "secretstore_net", "{}: signing session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
warn!(target: "secretstore_net", "{}: signing session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
||||||
data.sessions.respond_with_signing_error(&session_id, &sub_session_id, &sender, message::SigningSessionError {
|
let error_message = message::SigningSessionError {
|
||||||
session: session_id.clone().into(),
|
session: session_id.clone().into(),
|
||||||
sub_session: sub_session_id.clone().into(),
|
sub_session: sub_session_id.clone().into(),
|
||||||
session_nonce: session_nonce,
|
session_nonce: session_nonce,
|
||||||
error: format!("{:?}", err),
|
error: format!("{:?}", err),
|
||||||
});
|
};
|
||||||
|
let _ = session.and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message)); // processing error => ignore error
|
||||||
|
data.sessions.respond_with_signing_error(&session_id, &sub_session_id, &sender, error_message);
|
||||||
if err != Error::InvalidSessionId {
|
if err != Error::InvalidSessionId {
|
||||||
data.sessions.signing_sessions.remove(&signing_session_id);
|
data.sessions.signing_sessions.remove(&signing_session_id);
|
||||||
}
|
}
|
||||||
@ -727,6 +751,330 @@ impl ClusterCore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Process singlesigning message from the connection.
|
||||||
|
fn process_servers_set_change_message(data: Arc<ClusterData>, connection: Arc<Connection>, mut message: ServersSetChangeMessage) {
|
||||||
|
let session_id = message.session_id().clone();
|
||||||
|
let session_nonce = message.session_nonce();
|
||||||
|
let mut sender = connection.node_id().clone();
|
||||||
|
let session = match message {
|
||||||
|
ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref message) if match message.message {
|
||||||
|
ConsensusMessageWithServersSet::InitializeConsensusSession(_) => true,
|
||||||
|
_ => false,
|
||||||
|
} => {
|
||||||
|
let mut connected_nodes = data.connections.connected_nodes();
|
||||||
|
connected_nodes.insert(data.self_key_pair.public().clone());
|
||||||
|
|
||||||
|
let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes.clone()));
|
||||||
|
match data.sessions.new_servers_set_change_session(sender.clone(), Some(session_id.clone()), Some(session_nonce), cluster, connected_nodes) {
|
||||||
|
Ok(session) => Ok(session),
|
||||||
|
Err(err) => {
|
||||||
|
// this is new session => it is not yet in container
|
||||||
|
warn!(target: "secretstore_net", "{}: servers set change session initialization error '{}' when requested for new session from node {}", data.self_key_pair.public(), err, sender);
|
||||||
|
data.spawn(connection.send_message(Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(message::ServersSetChangeError {
|
||||||
|
session: session_id.into(),
|
||||||
|
session_nonce: session_nonce,
|
||||||
|
error: format!("{:?}", err),
|
||||||
|
}))));
|
||||||
|
return;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
data.sessions.admin_sessions.get(&session_id)
|
||||||
|
.ok_or(Error::InvalidSessionId)
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut is_queued_message = false;
|
||||||
|
loop {
|
||||||
|
match session.clone().and_then(|session| session.as_servers_set_change().ok_or(Error::InvalidMessage)?.process_message(&sender, &message)) {
|
||||||
|
Ok(_) => {
|
||||||
|
// if session is completed => stop
|
||||||
|
let session = session.clone().expect("session.method() call finished with success; session exists; qed");
|
||||||
|
if session.is_finished() {
|
||||||
|
info!(target: "secretstore_net", "{}: servers set change session completed", data.self_key_pair.public());
|
||||||
|
data.sessions.admin_sessions.remove(&session_id);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to dequeue message
|
||||||
|
match data.sessions.admin_sessions.dequeue_message(&session_id) {
|
||||||
|
Some((msg_sender, Message::ServersSetChange(msg))) => {
|
||||||
|
is_queued_message = true;
|
||||||
|
sender = msg_sender;
|
||||||
|
message = msg;
|
||||||
|
},
|
||||||
|
Some(_) => unreachable!("we only queue message of the same type; qed"),
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(Error::TooEarlyForRequest) => {
|
||||||
|
data.sessions.admin_sessions.enqueue_message(&session_id, sender, Message::ServersSetChange(message), is_queued_message);
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
Err(err) => {
|
||||||
|
warn!(target: "secretstore_net", "{}: servers set change session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
||||||
|
let error_message = message::ServersSetChangeError {
|
||||||
|
session: session_id.clone().into(),
|
||||||
|
session_nonce: session_nonce,
|
||||||
|
error: format!("{:?}", err),
|
||||||
|
};
|
||||||
|
let _ = session.and_then(|s| s.as_servers_set_change()
|
||||||
|
.ok_or(Error::InvalidMessage)
|
||||||
|
.and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message))); // processing error => ignore error
|
||||||
|
data.sessions.respond_with_servers_set_change_error(&session_id, error_message);
|
||||||
|
if err != Error::InvalidSessionId {
|
||||||
|
data.sessions.admin_sessions.remove(&session_id);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process single share add message from the connection.
|
||||||
|
fn process_share_add_message(data: Arc<ClusterData>, connection: Arc<Connection>, mut message: ShareAddMessage) {
|
||||||
|
let session_id = message.session_id().clone();
|
||||||
|
let session_nonce = message.session_nonce();
|
||||||
|
let mut sender = connection.node_id().clone();
|
||||||
|
let session = match message {
|
||||||
|
ShareAddMessage::ShareAddConsensusMessage(ref message) if match message.message {
|
||||||
|
ConsensusMessageWithServersSecretMap::InitializeConsensusSession(_) => true,
|
||||||
|
_ => false,
|
||||||
|
} => {
|
||||||
|
let mut connected_nodes = data.connections.connected_nodes();
|
||||||
|
connected_nodes.insert(data.self_key_pair.public().clone());
|
||||||
|
|
||||||
|
let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes));
|
||||||
|
match data.sessions.new_share_add_session(sender.clone(), session_id.clone(), Some(session_nonce), cluster) {
|
||||||
|
Ok(session) => Ok(session),
|
||||||
|
Err(err) => {
|
||||||
|
// this is new session => it is not yet in container
|
||||||
|
warn!(target: "secretstore_net", "{}: share add session initialization error '{}' when requested for new session from node {}", data.self_key_pair.public(), err, sender);
|
||||||
|
data.spawn(connection.send_message(Message::ShareAdd(ShareAddMessage::ShareAddError(message::ShareAddError {
|
||||||
|
session: session_id.into(),
|
||||||
|
session_nonce: session_nonce,
|
||||||
|
error: format!("{:?}", err),
|
||||||
|
}))));
|
||||||
|
return;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
data.sessions.admin_sessions.get(&session_id)
|
||||||
|
.ok_or(Error::InvalidSessionId)
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut is_queued_message = false;
|
||||||
|
loop {
|
||||||
|
match session.clone().and_then(|session| session.as_share_add().ok_or(Error::InvalidMessage)?.process_message(&sender, &message)) {
|
||||||
|
Ok(_) => {
|
||||||
|
// if session is completed => stop
|
||||||
|
let session = session.clone().expect("session.method() call finished with success; session exists; qed");
|
||||||
|
if session.is_finished() {
|
||||||
|
info!(target: "secretstore_net", "{}: share add session completed", data.self_key_pair.public());
|
||||||
|
data.sessions.admin_sessions.remove(&session_id);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to dequeue message
|
||||||
|
match data.sessions.admin_sessions.dequeue_message(&session_id) {
|
||||||
|
Some((msg_sender, Message::ShareAdd(msg))) => {
|
||||||
|
is_queued_message = true;
|
||||||
|
sender = msg_sender;
|
||||||
|
message = msg;
|
||||||
|
},
|
||||||
|
Some(_) => unreachable!("we only queue message of the same type; qed"),
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(Error::TooEarlyForRequest) => {
|
||||||
|
data.sessions.admin_sessions.enqueue_message(&session_id, sender, Message::ShareAdd(message), is_queued_message);
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
Err(err) => {
|
||||||
|
warn!(target: "secretstore_net", "{}: share add session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
||||||
|
let error_message = message::ShareAddError {
|
||||||
|
session: session_id.clone().into(),
|
||||||
|
session_nonce: session_nonce,
|
||||||
|
error: format!("{:?}", err),
|
||||||
|
};
|
||||||
|
let _ = session.and_then(|s| s.as_share_add()
|
||||||
|
.ok_or(Error::InvalidMessage)
|
||||||
|
.and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message))); // processing error => ignore error
|
||||||
|
data.sessions.respond_with_share_add_error(&session_id, error_message);
|
||||||
|
if err != Error::InvalidSessionId {
|
||||||
|
data.sessions.admin_sessions.remove(&session_id);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process single share move message from the connection.
|
||||||
|
fn process_share_move_message(data: Arc<ClusterData>, connection: Arc<Connection>, mut message: ShareMoveMessage) {
|
||||||
|
let session_id = message.session_id().clone();
|
||||||
|
let session_nonce = message.session_nonce();
|
||||||
|
let mut sender = connection.node_id().clone();
|
||||||
|
let session = match message {
|
||||||
|
ShareMoveMessage::ShareMoveConsensusMessage(ref message) if match message.message {
|
||||||
|
ConsensusMessageWithServersMap::InitializeConsensusSession(_) => true,
|
||||||
|
_ => false,
|
||||||
|
} => {
|
||||||
|
let mut connected_nodes = data.connections.connected_nodes();
|
||||||
|
connected_nodes.insert(data.self_key_pair.public().clone());
|
||||||
|
|
||||||
|
let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes));
|
||||||
|
match data.sessions.new_share_move_session(sender.clone(), session_id.clone(), Some(session_nonce), cluster) {
|
||||||
|
Ok(session) => Ok(session),
|
||||||
|
Err(err) => {
|
||||||
|
// this is new session => it is not yet in container
|
||||||
|
warn!(target: "secretstore_net", "{}: share move session initialization error '{}' when requested for new session from node {}", data.self_key_pair.public(), err, sender);
|
||||||
|
data.spawn(connection.send_message(Message::ShareMove(ShareMoveMessage::ShareMoveError(message::ShareMoveError {
|
||||||
|
session: session_id.into(),
|
||||||
|
session_nonce: session_nonce,
|
||||||
|
error: format!("{:?}", err),
|
||||||
|
}))));
|
||||||
|
return;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
data.sessions.admin_sessions.get(&session_id)
|
||||||
|
.ok_or(Error::InvalidSessionId)
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut is_queued_message = false;
|
||||||
|
loop {
|
||||||
|
match session.clone().and_then(|session| session.as_share_move().ok_or(Error::InvalidMessage)?.process_message(&sender, &message)) {
|
||||||
|
Ok(_) => {
|
||||||
|
// if session is completed => stop
|
||||||
|
let session = session.clone().expect("session.method() call finished with success; session exists; qed");
|
||||||
|
if session.is_finished() {
|
||||||
|
info!(target: "secretstore_net", "{}: share move session completed", data.self_key_pair.public());
|
||||||
|
data.sessions.admin_sessions.remove(&session_id);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to dequeue message
|
||||||
|
match data.sessions.admin_sessions.dequeue_message(&session_id) {
|
||||||
|
Some((msg_sender, Message::ShareMove(msg))) => {
|
||||||
|
is_queued_message = true;
|
||||||
|
sender = msg_sender;
|
||||||
|
message = msg;
|
||||||
|
},
|
||||||
|
Some(_) => unreachable!("we only queue message of the same type; qed"),
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(Error::TooEarlyForRequest) => {
|
||||||
|
data.sessions.admin_sessions.enqueue_message(&session_id, sender, Message::ShareMove(message), is_queued_message);
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
Err(err) => {
|
||||||
|
warn!(target: "secretstore_net", "{}: share move session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
||||||
|
let error_message = message::ShareMoveError {
|
||||||
|
session: session_id.clone().into(),
|
||||||
|
session_nonce: session_nonce,
|
||||||
|
error: format!("{:?}", err),
|
||||||
|
};
|
||||||
|
let _ = session.and_then(|s| s.as_share_move()
|
||||||
|
.ok_or(Error::InvalidMessage)
|
||||||
|
.and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message))); // processing error => ignore error
|
||||||
|
data.sessions.respond_with_share_move_error(&session_id, error_message);
|
||||||
|
if err != Error::InvalidSessionId {
|
||||||
|
data.sessions.admin_sessions.remove(&session_id);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process single share remove message from the connection.
|
||||||
|
fn process_share_remove_message(data: Arc<ClusterData>, connection: Arc<Connection>, mut message: ShareRemoveMessage) {
|
||||||
|
let session_id = message.session_id().clone();
|
||||||
|
let session_nonce = message.session_nonce();
|
||||||
|
let mut sender = connection.node_id().clone();
|
||||||
|
let session = match message {
|
||||||
|
ShareRemoveMessage::ShareRemoveConsensusMessage(ref message) if match message.message {
|
||||||
|
ConsensusMessageWithServersSet::InitializeConsensusSession(_) => true,
|
||||||
|
_ => false,
|
||||||
|
} => {
|
||||||
|
let mut connected_nodes = data.connections.connected_nodes();
|
||||||
|
connected_nodes.insert(data.self_key_pair.public().clone());
|
||||||
|
|
||||||
|
let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes));
|
||||||
|
match data.sessions.new_share_remove_session(sender.clone(), session_id.clone(), Some(session_nonce), cluster) {
|
||||||
|
Ok(session) => Ok(session),
|
||||||
|
Err(err) => {
|
||||||
|
// this is new session => it is not yet in container
|
||||||
|
warn!(target: "secretstore_net", "{}: share remove session initialization error '{}' when requested for new session from node {}", data.self_key_pair.public(), err, sender);
|
||||||
|
data.spawn(connection.send_message(Message::ShareRemove(ShareRemoveMessage::ShareRemoveError(message::ShareRemoveError {
|
||||||
|
session: session_id.into(),
|
||||||
|
session_nonce: session_nonce,
|
||||||
|
error: format!("{:?}", err),
|
||||||
|
}))));
|
||||||
|
return;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
data.sessions.admin_sessions.get(&session_id)
|
||||||
|
.ok_or(Error::InvalidSessionId)
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut is_queued_message = false;
|
||||||
|
loop {
|
||||||
|
match session.clone().and_then(|session| session.as_share_remove().ok_or(Error::InvalidMessage)?.process_message(&sender, &message)) {
|
||||||
|
Ok(_) => {
|
||||||
|
// if session is completed => stop
|
||||||
|
let session = session.clone().expect("session.method() call finished with success; session exists; qed");
|
||||||
|
if session.is_finished() {
|
||||||
|
info!(target: "secretstore_net", "{}: share remove session completed", data.self_key_pair.public());
|
||||||
|
data.sessions.admin_sessions.remove(&session_id);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to dequeue message
|
||||||
|
match data.sessions.admin_sessions.dequeue_message(&session_id) {
|
||||||
|
Some((msg_sender, Message::ShareRemove(msg))) => {
|
||||||
|
is_queued_message = true;
|
||||||
|
sender = msg_sender;
|
||||||
|
message = msg;
|
||||||
|
},
|
||||||
|
Some(_) => unreachable!("we only queue message of the same type; qed"),
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(Error::TooEarlyForRequest) => {
|
||||||
|
data.sessions.admin_sessions.enqueue_message(&session_id, sender, Message::ShareRemove(message), is_queued_message);
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
Err(err) => {
|
||||||
|
warn!(target: "secretstore_net", "{}: share remove session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
|
||||||
|
let error_message = message::ShareRemoveError {
|
||||||
|
session: session_id.clone().into(),
|
||||||
|
session_nonce: session_nonce,
|
||||||
|
error: format!("{:?}", err),
|
||||||
|
};
|
||||||
|
let _ = session.and_then(|s| s.as_share_remove()
|
||||||
|
.ok_or(Error::InvalidMessage)
|
||||||
|
.and_then(|s| s.on_session_error(data.self_key_pair.public(), &error_message))); // processing error => ignore error
|
||||||
|
data.sessions.respond_with_share_remove_error(&session_id, error_message);
|
||||||
|
if err != Error::InvalidSessionId {
|
||||||
|
data.sessions.admin_sessions.remove(&session_id);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Process single cluster message from the connection.
|
/// Process single cluster message from the connection.
|
||||||
fn process_cluster_message(data: Arc<ClusterData>, connection: Arc<Connection>, message: ClusterMessage) {
|
fn process_cluster_message(data: Arc<ClusterData>, connection: Arc<Connection>, message: ClusterMessage) {
|
||||||
match message {
|
match message {
|
||||||
@ -996,7 +1344,7 @@ impl ClusterClient for ClusterClientImpl {
|
|||||||
let mut connected_nodes = self.data.connections.connected_nodes();
|
let mut connected_nodes = self.data.connections.connected_nodes();
|
||||||
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||||
|
|
||||||
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone()));
|
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes));
|
||||||
let session = self.data.sessions.new_encryption_session(self.data.self_key_pair.public().clone(), session_id, None, cluster)?;
|
let session = self.data.sessions.new_encryption_session(self.data.self_key_pair.public().clone(), session_id, None, cluster)?;
|
||||||
session.initialize(requestor_signature, common_point, encrypted_point)?;
|
session.initialize(requestor_signature, common_point, encrypted_point)?;
|
||||||
Ok(EncryptionSessionWrapper::new(Arc::downgrade(&self.data), session_id, session))
|
Ok(EncryptionSessionWrapper::new(Arc::downgrade(&self.data), session_id, session))
|
||||||
@ -1007,7 +1355,7 @@ impl ClusterClient for ClusterClientImpl {
|
|||||||
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||||
|
|
||||||
let access_key = Random.generate()?.secret().clone();
|
let access_key = Random.generate()?.secret().clone();
|
||||||
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone()));
|
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes));
|
||||||
let session = self.data.sessions.new_decryption_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), None, cluster, Some(requestor_signature))?;
|
let session = self.data.sessions.new_decryption_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), None, cluster, Some(requestor_signature))?;
|
||||||
session.initialize(is_shadow_decryption)?;
|
session.initialize(is_shadow_decryption)?;
|
||||||
Ok(DecryptionSessionWrapper::new(Arc::downgrade(&self.data), DecryptionSessionId::new(session_id, access_key), session))
|
Ok(DecryptionSessionWrapper::new(Arc::downgrade(&self.data), DecryptionSessionId::new(session_id, access_key), session))
|
||||||
@ -1018,12 +1366,75 @@ impl ClusterClient for ClusterClientImpl {
|
|||||||
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||||
|
|
||||||
let access_key = Random.generate()?.secret().clone();
|
let access_key = Random.generate()?.secret().clone();
|
||||||
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone()));
|
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes));
|
||||||
let session = self.data.sessions.new_signing_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), None, cluster, Some(requestor_signature))?;
|
let session = self.data.sessions.new_signing_session(self.data.self_key_pair.public().clone(), session_id, access_key.clone(), None, cluster, Some(requestor_signature))?;
|
||||||
session.initialize(message_hash)?;
|
session.initialize(message_hash)?;
|
||||||
Ok(SigningSessionWrapper::new(Arc::downgrade(&self.data), SigningSessionId::new(session_id, access_key), session))
|
Ok(SigningSessionWrapper::new(Arc::downgrade(&self.data), SigningSessionId::new(session_id, access_key), session))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn new_share_add_session(&self, session_id: SessionId, new_nodes_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSessionWrapper>, Error> {
|
||||||
|
let mut connected_nodes = self.data.connections.connected_nodes();
|
||||||
|
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||||
|
|
||||||
|
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes));
|
||||||
|
let session = self.data.sessions.new_share_add_session(self.data.self_key_pair.public().clone(), session_id, None, cluster)?;
|
||||||
|
session.as_share_add()
|
||||||
|
.expect("created 1 line above; qed")
|
||||||
|
.initialize(Some(new_nodes_set), Some(old_set_signature), Some(new_set_signature))?;
|
||||||
|
Ok(AdminSessionWrapper::new(Arc::downgrade(&self.data), session_id, session))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_share_move_session(&self, session_id: SessionId, new_nodes_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSessionWrapper>, Error> {
|
||||||
|
let key_share = self.data.config.key_storage.get(&session_id).map_err(|e| Error::KeyStorage(e.into()))?;
|
||||||
|
if new_nodes_set.len() != key_share.id_numbers.len() {
|
||||||
|
return Err(Error::InvalidNodesConfiguration);
|
||||||
|
}
|
||||||
|
|
||||||
|
let old_nodes_set: BTreeSet<_> = key_share.id_numbers.keys().cloned().collect();
|
||||||
|
let nodes_to_add: BTreeSet<_> = new_nodes_set.difference(&old_nodes_set).collect();
|
||||||
|
let mut shares_to_move = BTreeMap::new();
|
||||||
|
for (target_node, source_node) in nodes_to_add.into_iter().zip(key_share.id_numbers.keys()) {
|
||||||
|
shares_to_move.insert(target_node.clone(), source_node.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut connected_nodes = self.data.connections.connected_nodes();
|
||||||
|
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||||
|
|
||||||
|
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes));
|
||||||
|
let session = self.data.sessions.new_share_move_session(self.data.self_key_pair.public().clone(), session_id, None, cluster)?;
|
||||||
|
session.as_share_move()
|
||||||
|
.expect("created 1 line above; qed")
|
||||||
|
.initialize(Some(shares_to_move), Some(old_set_signature), Some(new_set_signature))?;
|
||||||
|
Ok(AdminSessionWrapper::new(Arc::downgrade(&self.data), session_id, session))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_share_remove_session(&self, session_id: SessionId, new_nodes_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSessionWrapper>, Error> {
|
||||||
|
let mut connected_nodes = self.data.connections.connected_nodes();
|
||||||
|
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||||
|
|
||||||
|
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes));
|
||||||
|
let session = self.data.sessions.new_share_remove_session(self.data.self_key_pair.public().clone(), session_id, None, cluster)?;
|
||||||
|
session.as_share_remove()
|
||||||
|
.expect("created 1 line above; qed")
|
||||||
|
.initialize(Some(new_nodes_set), Some(old_set_signature), Some(new_set_signature))?;
|
||||||
|
Ok(AdminSessionWrapper::new(Arc::downgrade(&self.data), session_id, session))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_servers_set_change_session(&self, session_id: Option<SessionId>, new_nodes_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSessionWrapper>, Error> {
|
||||||
|
let mut connected_nodes = self.data.connections.connected_nodes();
|
||||||
|
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||||
|
|
||||||
|
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone()));
|
||||||
|
let session = self.data.sessions.new_servers_set_change_session(self.data.self_key_pair.public().clone(), session_id, None, cluster, connected_nodes)?;
|
||||||
|
let session_id = {
|
||||||
|
let servers_set_change_session = session.as_servers_set_change().expect("created 1 line above; qed");
|
||||||
|
servers_set_change_session.initialize(new_nodes_set, old_set_signature, new_set_signature)?;
|
||||||
|
servers_set_change_session.id().clone()
|
||||||
|
};
|
||||||
|
Ok(AdminSessionWrapper::new(Arc::downgrade(&self.data), session_id, session))
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn connect(&self) {
|
fn connect(&self) {
|
||||||
ClusterCore::connect_disconnected_nodes(self.data.clone());
|
ClusterCore::connect_disconnected_nodes(self.data.clone());
|
||||||
@ -1140,6 +1551,7 @@ pub mod tests {
|
|||||||
allow_connecting_to_higher_nodes: false,
|
allow_connecting_to_higher_nodes: false,
|
||||||
key_storage: Arc::new(DummyKeyStorage::default()),
|
key_storage: Arc::new(DummyKeyStorage::default()),
|
||||||
acl_storage: Arc::new(DummyAclStorage::default()),
|
acl_storage: Arc::new(DummyAclStorage::default()),
|
||||||
|
admin_public: None,
|
||||||
}).collect();
|
}).collect();
|
||||||
let clusters: Vec<_> = cluster_params.into_iter().enumerate()
|
let clusters: Vec<_> = cluster_params.into_iter().enumerate()
|
||||||
.map(|(_, params)| ClusterCore::new(core.handle(), params).unwrap())
|
.map(|(_, params)| ClusterCore::new(core.handle(), params).unwrap())
|
||||||
@ -1189,16 +1601,47 @@ pub mod tests {
|
|||||||
|
|
||||||
// start && wait for generation session to fail
|
// start && wait for generation session to fail
|
||||||
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
|
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
|
||||||
loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some());
|
loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some()
|
||||||
|
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
|
||||||
assert!(session.joint_public_and_secret().unwrap().is_err());
|
assert!(session.joint_public_and_secret().unwrap().is_err());
|
||||||
|
|
||||||
// check that faulty session is either removed from all nodes, or nonexistent (already removed)
|
// check that faulty session is either removed from all nodes, or nonexistent (already removed)
|
||||||
assert!(clusters[0].client().generation_session(&SessionId::default()).is_none());
|
|
||||||
for i in 1..3 {
|
for i in 1..3 {
|
||||||
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
|
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
|
||||||
loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some());
|
// wait for both session completion && session removal (session completion event is fired
|
||||||
|
// before session is removed from its own container by cluster)
|
||||||
|
loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some()
|
||||||
|
&& clusters[i].client().generation_session(&SessionId::default()).is_none());
|
||||||
|
assert!(session.joint_public_and_secret().unwrap().is_err());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn generation_session_completion_signalled_if_failed_on_master() {
|
||||||
|
//::logger::init_log();
|
||||||
|
let mut core = Core::new().unwrap();
|
||||||
|
let clusters = make_clusters(&core, 6023, 3);
|
||||||
|
run_clusters(&clusters);
|
||||||
|
loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established));
|
||||||
|
|
||||||
|
// ask one of nodes to produce faulty generation sessions
|
||||||
|
clusters[0].client().make_faulty_generation_sessions();
|
||||||
|
|
||||||
|
// start && wait for generation session to fail
|
||||||
|
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
|
||||||
|
loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some()
|
||||||
|
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
|
||||||
|
assert!(session.joint_public_and_secret().unwrap().is_err());
|
||||||
|
|
||||||
|
// check that faulty session is either removed from all nodes, or nonexistent (already removed)
|
||||||
|
for i in 1..3 {
|
||||||
|
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
|
||||||
|
// wait for both session completion && session removal (session completion event is fired
|
||||||
|
// before session is removed from its own container by cluster)
|
||||||
|
loop_until(&mut core, time::Duration::from_millis(300), || session.joint_public_and_secret().is_some()
|
||||||
|
&& clusters[i].client().generation_session(&SessionId::default()).is_none());
|
||||||
assert!(session.joint_public_and_secret().unwrap().is_err());
|
assert!(session.joint_public_and_secret().unwrap().is_err());
|
||||||
assert!(clusters[i].client().generation_session(&SessionId::default()).is_none());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1213,18 +1656,18 @@ pub mod tests {
|
|||||||
|
|
||||||
// start && wait for generation session to complete
|
// start && wait for generation session to complete
|
||||||
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
|
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
|
||||||
loop_until(&mut core, time::Duration::from_millis(300), || session.state() == GenerationSessionState::Finished
|
loop_until(&mut core, time::Duration::from_millis(300), || (session.state() == GenerationSessionState::Finished
|
||||||
|| session.state() == GenerationSessionState::Failed);
|
|| session.state() == GenerationSessionState::Failed)
|
||||||
|
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
|
||||||
assert!(session.joint_public_and_secret().unwrap().is_ok());
|
assert!(session.joint_public_and_secret().unwrap().is_ok());
|
||||||
|
|
||||||
// check that session is either removed from all nodes, or nonexistent (already removed)
|
// check that session is either removed from all nodes, or nonexistent (already removed)
|
||||||
assert!(clusters[0].client().generation_session(&SessionId::default()).is_none());
|
|
||||||
for i in 1..3 {
|
for i in 1..3 {
|
||||||
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
|
if let Some(session) = clusters[i].client().generation_session(&SessionId::default()) {
|
||||||
loop_until(&mut core, time::Duration::from_millis(300), || session.state() == GenerationSessionState::Finished
|
loop_until(&mut core, time::Duration::from_millis(300), || (session.state() == GenerationSessionState::Finished
|
||||||
|| session.state() == GenerationSessionState::Failed);
|
|| session.state() == GenerationSessionState::Failed)
|
||||||
|
&& clusters[i].client().generation_session(&SessionId::default()).is_none());
|
||||||
assert!(session.joint_public_and_secret().unwrap().is_err());
|
assert!(session.joint_public_and_secret().unwrap().is_err());
|
||||||
assert!(clusters[i].client().generation_session(&SessionId::default()).is_none());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,8 @@ use parking_lot::RwLock;
|
|||||||
use ethkey::{Public, Secret, Signature};
|
use ethkey::{Public, Secret, Signature};
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, DocumentKeyShare, EncryptedDocumentKeyShadow, SessionMeta};
|
use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, DocumentKeyShare, EncryptedDocumentKeyShadow, SessionMeta};
|
||||||
use key_server_cluster::cluster::{Cluster, ClusterData, ClusterView, ClusterConfiguration};
|
use key_server_cluster::cluster::{Cluster, ClusterData, ClusterView, ClusterConfiguration};
|
||||||
use key_server_cluster::message::{self, Message, GenerationMessage, EncryptionMessage, DecryptionMessage, SigningMessage};
|
use key_server_cluster::message::{self, Message, GenerationMessage, EncryptionMessage, DecryptionMessage, SigningMessage,
|
||||||
|
ShareAddMessage, ShareMoveMessage, ShareRemoveMessage, ServersSetChangeMessage};
|
||||||
use key_server_cluster::generation_session::{Session as GenerationSession, SessionImpl as GenerationSessionImpl,
|
use key_server_cluster::generation_session::{Session as GenerationSession, SessionImpl as GenerationSessionImpl,
|
||||||
SessionParams as GenerationSessionParams, SessionState as GenerationSessionState};
|
SessionParams as GenerationSessionParams, SessionState as GenerationSessionState};
|
||||||
use key_server_cluster::decryption_session::{Session as DecryptionSession, SessionImpl as DecryptionSessionImpl,
|
use key_server_cluster::decryption_session::{Session as DecryptionSession, SessionImpl as DecryptionSessionImpl,
|
||||||
@ -31,6 +32,15 @@ use key_server_cluster::encryption_session::{Session as EncryptionSession, Sessi
|
|||||||
SessionParams as EncryptionSessionParams, SessionState as EncryptionSessionState};
|
SessionParams as EncryptionSessionParams, SessionState as EncryptionSessionState};
|
||||||
use key_server_cluster::signing_session::{Session as SigningSession, SessionImpl as SigningSessionImpl,
|
use key_server_cluster::signing_session::{Session as SigningSession, SessionImpl as SigningSessionImpl,
|
||||||
SigningSessionId, SessionParams as SigningSessionParams};
|
SigningSessionId, SessionParams as SigningSessionParams};
|
||||||
|
use key_server_cluster::share_add_session::{Session as ShareAddSession, SessionImpl as ShareAddSessionImpl,
|
||||||
|
SessionParams as ShareAddSessionParams, IsolatedSessionTransport as ShareAddTransport};
|
||||||
|
use key_server_cluster::share_move_session::{Session as ShareMoveSession, SessionImpl as ShareMoveSessionImpl,
|
||||||
|
SessionParams as ShareMoveSessionParams, IsolatedSessionTransport as ShareMoveTransport};
|
||||||
|
use key_server_cluster::share_remove_session::{Session as ShareRemoveSession, SessionImpl as ShareRemoveSessionImpl,
|
||||||
|
SessionParams as ShareRemoveSessionParams, IsolatedSessionTransport as ShareRemoveTransport};
|
||||||
|
use key_server_cluster::servers_set_change_session::{Session as ServersSetChangeSession, SessionImpl as ServersSetChangeSessionImpl,
|
||||||
|
SessionParams as ServersSetChangeSessionParams};
|
||||||
|
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||||
|
|
||||||
/// When there are no session-related messages for SESSION_TIMEOUT_INTERVAL seconds,
|
/// When there are no session-related messages for SESSION_TIMEOUT_INTERVAL seconds,
|
||||||
/// we must treat this session as stalled && finish it with an error.
|
/// we must treat this session as stalled && finish it with an error.
|
||||||
@ -38,6 +48,13 @@ use key_server_cluster::signing_session::{Session as SigningSession, SessionImpl
|
|||||||
/// session messages.
|
/// session messages.
|
||||||
const SESSION_TIMEOUT_INTERVAL: u64 = 60;
|
const SESSION_TIMEOUT_INTERVAL: u64 = 60;
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
/// Servers set change session id (there could be at most 1 session => hardcoded id).
|
||||||
|
static ref SERVERS_SET_CHANGE_SESSION_ID: SessionId = "10b7af423bb551d5dc8645db754163a2145d37d78d468fa7330435ed77064c1c206f4b71d62491dfb9f7dbeccc42a6c112c8bb507de7b4fcad8d646272b2c363"
|
||||||
|
.parse()
|
||||||
|
.expect("hardcoded id should parse without errors; qed");
|
||||||
|
}
|
||||||
|
|
||||||
/// Generic cluster session.
|
/// Generic cluster session.
|
||||||
pub trait ClusterSession {
|
pub trait ClusterSession {
|
||||||
/// If session is finished (either with succcess or not).
|
/// If session is finished (either with succcess or not).
|
||||||
@ -48,6 +65,18 @@ pub trait ClusterSession {
|
|||||||
fn on_node_timeout(&self, node_id: &NodeId);
|
fn on_node_timeout(&self, node_id: &NodeId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Administrative session.
|
||||||
|
pub enum AdminSession {
|
||||||
|
/// Share add session.
|
||||||
|
ShareAdd(ShareAddSessionImpl<ShareAddTransport>),
|
||||||
|
/// Share move session.
|
||||||
|
ShareMove(ShareMoveSessionImpl<ShareMoveTransport>),
|
||||||
|
/// Share remove session.
|
||||||
|
ShareRemove(ShareRemoveSessionImpl<ShareRemoveTransport>),
|
||||||
|
/// Servers set change session.
|
||||||
|
ServersSetChange(ServersSetChangeSessionImpl),
|
||||||
|
}
|
||||||
|
|
||||||
/// Active sessions on this cluster.
|
/// Active sessions on this cluster.
|
||||||
pub struct ClusterSessions {
|
pub struct ClusterSessions {
|
||||||
/// Key generation sessions.
|
/// Key generation sessions.
|
||||||
@ -58,6 +87,8 @@ pub struct ClusterSessions {
|
|||||||
pub decryption_sessions: ClusterSessionsContainer<DecryptionSessionId, DecryptionSessionImpl, DecryptionMessage>,
|
pub decryption_sessions: ClusterSessionsContainer<DecryptionSessionId, DecryptionSessionImpl, DecryptionMessage>,
|
||||||
/// Signing sessions.
|
/// Signing sessions.
|
||||||
pub signing_sessions: ClusterSessionsContainer<SigningSessionId, SigningSessionImpl, SigningMessage>,
|
pub signing_sessions: ClusterSessionsContainer<SigningSessionId, SigningSessionImpl, SigningMessage>,
|
||||||
|
/// Administrative sessions.
|
||||||
|
pub admin_sessions: ClusterSessionsContainer<SessionId, AdminSession, Message>,
|
||||||
/// Self node id.
|
/// Self node id.
|
||||||
self_node_id: NodeId,
|
self_node_id: NodeId,
|
||||||
/// All nodes ids.
|
/// All nodes ids.
|
||||||
@ -66,6 +97,8 @@ pub struct ClusterSessions {
|
|||||||
key_storage: Arc<KeyStorage>,
|
key_storage: Arc<KeyStorage>,
|
||||||
/// Reference to ACL storage
|
/// Reference to ACL storage
|
||||||
acl_storage: Arc<AclStorage>,
|
acl_storage: Arc<AclStorage>,
|
||||||
|
/// Administrator public.
|
||||||
|
admin_public: Option<Public>,
|
||||||
/// Make faulty generation sessions.
|
/// Make faulty generation sessions.
|
||||||
make_faulty_generation_sessions: AtomicBool,
|
make_faulty_generation_sessions: AtomicBool,
|
||||||
/// Always-increasing sessions counter. Is used as session nonce to prevent replay attacks:
|
/// Always-increasing sessions counter. Is used as session nonce to prevent replay attacks:
|
||||||
@ -142,6 +175,16 @@ pub struct SigningSessionWrapper {
|
|||||||
cluster: Weak<ClusterData>,
|
cluster: Weak<ClusterData>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Admin session implementation, which removes session from cluster on drop.
|
||||||
|
pub struct AdminSessionWrapper {
|
||||||
|
/// Wrapped session.
|
||||||
|
session: Arc<AdminSession>,
|
||||||
|
/// Session Id.
|
||||||
|
session_id: SessionId,
|
||||||
|
/// Cluster data reference.
|
||||||
|
cluster: Weak<ClusterData>,
|
||||||
|
}
|
||||||
|
|
||||||
impl ClusterSessions {
|
impl ClusterSessions {
|
||||||
/// Create new cluster sessions container.
|
/// Create new cluster sessions container.
|
||||||
pub fn new(config: &ClusterConfiguration) -> Self {
|
pub fn new(config: &ClusterConfiguration) -> Self {
|
||||||
@ -150,10 +193,12 @@ impl ClusterSessions {
|
|||||||
nodes: config.key_server_set.get().keys().cloned().collect(),
|
nodes: config.key_server_set.get().keys().cloned().collect(),
|
||||||
acl_storage: config.acl_storage.clone(),
|
acl_storage: config.acl_storage.clone(),
|
||||||
key_storage: config.key_storage.clone(),
|
key_storage: config.key_storage.clone(),
|
||||||
|
admin_public: config.admin_public.clone(),
|
||||||
generation_sessions: ClusterSessionsContainer::new(),
|
generation_sessions: ClusterSessionsContainer::new(),
|
||||||
encryption_sessions: ClusterSessionsContainer::new(),
|
encryption_sessions: ClusterSessionsContainer::new(),
|
||||||
decryption_sessions: ClusterSessionsContainer::new(),
|
decryption_sessions: ClusterSessionsContainer::new(),
|
||||||
signing_sessions: ClusterSessionsContainer::new(),
|
signing_sessions: ClusterSessionsContainer::new(),
|
||||||
|
admin_sessions: ClusterSessionsContainer::new(),
|
||||||
make_faulty_generation_sessions: AtomicBool::new(false),
|
make_faulty_generation_sessions: AtomicBool::new(false),
|
||||||
session_counter: AtomicUsize::new(0),
|
session_counter: AtomicUsize::new(0),
|
||||||
max_nonce: RwLock::new(BTreeMap::new()),
|
max_nonce: RwLock::new(BTreeMap::new()),
|
||||||
@ -313,12 +358,146 @@ impl ClusterSessions {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create new share add session.
|
||||||
|
pub fn new_share_add_session(&self, master: NodeId, session_id: SessionId, nonce: Option<u64>, cluster: Arc<ClusterView>) -> Result<Arc<AdminSession>, Error> {
|
||||||
|
let nonce = self.check_session_nonce(&master, nonce)?;
|
||||||
|
let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?;
|
||||||
|
|
||||||
|
self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), move || ShareAddSessionImpl::new(ShareAddSessionParams {
|
||||||
|
meta: ShareChangeSessionMeta {
|
||||||
|
id: session_id,
|
||||||
|
self_node_id: self.self_node_id.clone(),
|
||||||
|
master_node_id: master,
|
||||||
|
},
|
||||||
|
transport: ShareAddTransport::new(session_id.clone(), nonce, cluster),
|
||||||
|
key_storage: self.key_storage.clone(),
|
||||||
|
admin_public: Some(admin_public),
|
||||||
|
nonce: nonce,
|
||||||
|
}).map(AdminSession::ShareAdd))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send share add session error.
|
||||||
|
pub fn respond_with_share_add_error(&self, session_id: &SessionId, error: message::ShareAddError) {
|
||||||
|
self.admin_sessions.sessions.read().get(&session_id)
|
||||||
|
.map(|s| {
|
||||||
|
// error in any share change session is considered fatal
|
||||||
|
// => broadcast error
|
||||||
|
|
||||||
|
// do not bother processing send error, as we already processing error
|
||||||
|
let _ = s.cluster_view.broadcast(Message::ShareAdd(ShareAddMessage::ShareAddError(error)));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new share move session.
|
||||||
|
pub fn new_share_move_session(&self, master: NodeId, session_id: SessionId, nonce: Option<u64>, cluster: Arc<ClusterView>) -> Result<Arc<AdminSession>, Error> {
|
||||||
|
let nonce = self.check_session_nonce(&master, nonce)?;
|
||||||
|
let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?;
|
||||||
|
|
||||||
|
self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), move || ShareMoveSessionImpl::new(ShareMoveSessionParams {
|
||||||
|
meta: ShareChangeSessionMeta {
|
||||||
|
id: session_id,
|
||||||
|
self_node_id: self.self_node_id.clone(),
|
||||||
|
master_node_id: master,
|
||||||
|
},
|
||||||
|
transport: ShareMoveTransport::new(session_id.clone(), nonce, cluster),
|
||||||
|
key_storage: self.key_storage.clone(),
|
||||||
|
admin_public: Some(admin_public),
|
||||||
|
nonce: nonce,
|
||||||
|
}).map(AdminSession::ShareMove))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send share move session error.
|
||||||
|
pub fn respond_with_share_move_error(&self, session_id: &SessionId, error: message::ShareMoveError) {
|
||||||
|
self.admin_sessions.sessions.read().get(&session_id)
|
||||||
|
.map(|s| {
|
||||||
|
// error in any share change session is considered fatal
|
||||||
|
// => broadcast error
|
||||||
|
|
||||||
|
// do not bother processing send error, as we already processing error
|
||||||
|
let _ = s.cluster_view.broadcast(Message::ShareMove(ShareMoveMessage::ShareMoveError(error)));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new share remove session.
|
||||||
|
pub fn new_share_remove_session(&self, master: NodeId, session_id: SessionId, nonce: Option<u64>, cluster: Arc<ClusterView>) -> Result<Arc<AdminSession>, Error> {
|
||||||
|
let nonce = self.check_session_nonce(&master, nonce)?;
|
||||||
|
let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?;
|
||||||
|
|
||||||
|
self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), move || ShareRemoveSessionImpl::new(ShareRemoveSessionParams {
|
||||||
|
meta: ShareChangeSessionMeta {
|
||||||
|
id: session_id,
|
||||||
|
self_node_id: self.self_node_id.clone(),
|
||||||
|
master_node_id: master,
|
||||||
|
},
|
||||||
|
transport: ShareRemoveTransport::new(session_id.clone(), nonce, cluster),
|
||||||
|
key_storage: self.key_storage.clone(),
|
||||||
|
admin_public: Some(admin_public),
|
||||||
|
nonce: nonce,
|
||||||
|
}).map(AdminSession::ShareRemove))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send share remove session error.
|
||||||
|
pub fn respond_with_share_remove_error(&self, session_id: &SessionId, error: message::ShareRemoveError) {
|
||||||
|
self.admin_sessions.sessions.read().get(&session_id)
|
||||||
|
.map(|s| {
|
||||||
|
// error in any share change session is considered fatal
|
||||||
|
// => broadcast error
|
||||||
|
|
||||||
|
// do not bother processing send error, as we already processing error
|
||||||
|
let _ = s.cluster_view.broadcast(Message::ShareRemove(ShareRemoveMessage::ShareRemoveError(error)));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new servers set change session.
|
||||||
|
pub fn new_servers_set_change_session(&self, master: NodeId, session_id: Option<SessionId>, nonce: Option<u64>, cluster: Arc<ClusterView>, all_nodes_set: BTreeSet<NodeId>) -> Result<Arc<AdminSession>, Error> {
|
||||||
|
// TODO: check if there's no other active sessions + do not allow to start other sessions when this session is active
|
||||||
|
let session_id = match session_id {
|
||||||
|
Some(session_id) => if session_id == *SERVERS_SET_CHANGE_SESSION_ID {
|
||||||
|
session_id
|
||||||
|
} else {
|
||||||
|
return Err(Error::InvalidMessage)
|
||||||
|
},
|
||||||
|
None => (*SERVERS_SET_CHANGE_SESSION_ID).clone(),
|
||||||
|
};
|
||||||
|
let nonce = self.check_session_nonce(&master, nonce)?;
|
||||||
|
let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?;
|
||||||
|
|
||||||
|
self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), move || ServersSetChangeSessionImpl::new(ServersSetChangeSessionParams {
|
||||||
|
meta: ShareChangeSessionMeta {
|
||||||
|
id: session_id,
|
||||||
|
self_node_id: self.self_node_id.clone(),
|
||||||
|
master_node_id: master,
|
||||||
|
},
|
||||||
|
cluster: cluster,
|
||||||
|
key_storage: self.key_storage.clone(),
|
||||||
|
admin_public: admin_public,
|
||||||
|
nonce: nonce,
|
||||||
|
all_nodes_set: all_nodes_set,
|
||||||
|
}).map(AdminSession::ServersSetChange))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send share remove session error.
|
||||||
|
pub fn respond_with_servers_set_change_error(&self, session_id: &SessionId, error: message::ServersSetChangeError) {
|
||||||
|
self.admin_sessions.sessions.read().get(&session_id)
|
||||||
|
.map(|s| {
|
||||||
|
// error in any share change session is considered fatal
|
||||||
|
// => broadcast error
|
||||||
|
|
||||||
|
// do not bother processing send error, as we already processing error
|
||||||
|
let _ = s.cluster_view.broadcast(Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(error)));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/// Stop sessions that are stalling.
|
/// Stop sessions that are stalling.
|
||||||
pub fn stop_stalled_sessions(&self) {
|
pub fn stop_stalled_sessions(&self) {
|
||||||
self.generation_sessions.stop_stalled_sessions();
|
self.generation_sessions.stop_stalled_sessions();
|
||||||
self.encryption_sessions.stop_stalled_sessions();
|
self.encryption_sessions.stop_stalled_sessions();
|
||||||
self.decryption_sessions.stop_stalled_sessions();
|
self.decryption_sessions.stop_stalled_sessions();
|
||||||
self.signing_sessions.stop_stalled_sessions();
|
self.signing_sessions.stop_stalled_sessions();
|
||||||
|
// TODO: servers set change session could take a lot of time
|
||||||
|
// && during that session some nodes could not receive messages
|
||||||
|
// => they could stop session as stalled. This must be handled
|
||||||
|
self.admin_sessions.stop_stalled_sessions();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When connection to node is lost.
|
/// When connection to node is lost.
|
||||||
@ -327,6 +506,7 @@ impl ClusterSessions {
|
|||||||
self.encryption_sessions.on_connection_timeout(node_id);
|
self.encryption_sessions.on_connection_timeout(node_id);
|
||||||
self.decryption_sessions.on_connection_timeout(node_id);
|
self.decryption_sessions.on_connection_timeout(node_id);
|
||||||
self.signing_sessions.on_connection_timeout(node_id);
|
self.signing_sessions.on_connection_timeout(node_id);
|
||||||
|
self.admin_sessions.on_connection_timeout(node_id);
|
||||||
self.max_nonce.write().remove(node_id);
|
self.max_nonce.write().remove(node_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -366,6 +546,10 @@ impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: Cluster
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.sessions.read().is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get(&self, session_id: &K) -> Option<Arc<V>> {
|
pub fn get(&self, session_id: &K) -> Option<Arc<V>> {
|
||||||
self.sessions.read().get(session_id).map(|s| s.session.clone())
|
self.sessions.read().get(session_id).map(|s| s.session.clone())
|
||||||
}
|
}
|
||||||
@ -437,6 +621,65 @@ impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: Cluster
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl AdminSession {
|
||||||
|
pub fn as_share_add(&self) -> Option<&ShareAddSessionImpl<ShareAddTransport>> {
|
||||||
|
match *self {
|
||||||
|
AdminSession::ShareAdd(ref session) => Some(session),
|
||||||
|
_ => None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn as_share_move(&self) -> Option<&ShareMoveSessionImpl<ShareMoveTransport>> {
|
||||||
|
match *self {
|
||||||
|
AdminSession::ShareMove(ref session) => Some(session),
|
||||||
|
_ => None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn as_share_remove(&self) -> Option<&ShareRemoveSessionImpl<ShareRemoveTransport>> {
|
||||||
|
match *self {
|
||||||
|
AdminSession::ShareRemove(ref session) => Some(session),
|
||||||
|
_ => None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn as_servers_set_change(&self) -> Option<&ServersSetChangeSessionImpl> {
|
||||||
|
match *self {
|
||||||
|
AdminSession::ServersSetChange(ref session) => Some(session),
|
||||||
|
_ => None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClusterSession for AdminSession {
|
||||||
|
fn is_finished(&self) -> bool {
|
||||||
|
match *self {
|
||||||
|
AdminSession::ShareAdd(ref session) => session.is_finished(),
|
||||||
|
AdminSession::ShareMove(ref session) => session.is_finished(),
|
||||||
|
AdminSession::ShareRemove(ref session) => session.is_finished(),
|
||||||
|
AdminSession::ServersSetChange(ref session) => session.is_finished(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_session_timeout(&self) {
|
||||||
|
match *self {
|
||||||
|
AdminSession::ShareAdd(ref session) => session.on_session_timeout(),
|
||||||
|
AdminSession::ShareMove(ref session) => session.on_session_timeout(),
|
||||||
|
AdminSession::ShareRemove(ref session) => session.on_session_timeout(),
|
||||||
|
AdminSession::ServersSetChange(ref session) => session.on_session_timeout(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_node_timeout(&self, node_id: &NodeId) {
|
||||||
|
match *self {
|
||||||
|
AdminSession::ShareAdd(ref session) => session.on_node_timeout(node_id),
|
||||||
|
AdminSession::ShareMove(ref session) => session.on_node_timeout(node_id),
|
||||||
|
AdminSession::ShareRemove(ref session) => session.on_node_timeout(node_id),
|
||||||
|
AdminSession::ServersSetChange(ref session) => session.on_node_timeout(node_id),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl GenerationSessionWrapper {
|
impl GenerationSessionWrapper {
|
||||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionId, session: Arc<GenerationSession>) -> Arc<Self> {
|
pub fn new(cluster: Weak<ClusterData>, session_id: SessionId, session: Arc<GenerationSession>) -> Arc<Self> {
|
||||||
Arc::new(GenerationSessionWrapper {
|
Arc::new(GenerationSessionWrapper {
|
||||||
@ -544,3 +787,57 @@ impl Drop for SigningSessionWrapper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl AdminSessionWrapper {
|
||||||
|
pub fn new(cluster: Weak<ClusterData>, session_id: SessionId, session: Arc<AdminSession>) -> Arc<Self> {
|
||||||
|
Arc::new(AdminSessionWrapper {
|
||||||
|
session: session,
|
||||||
|
session_id: session_id,
|
||||||
|
cluster: cluster,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShareAddSession for AdminSessionWrapper {
|
||||||
|
fn wait(&self) -> Result<(), Error> {
|
||||||
|
match *self.session {
|
||||||
|
AdminSession::ShareAdd(ref session) => session.wait(),
|
||||||
|
_ => Err(Error::InvalidMessage),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShareMoveSession for AdminSessionWrapper {
|
||||||
|
fn wait(&self) -> Result<(), Error> {
|
||||||
|
match *self.session {
|
||||||
|
AdminSession::ShareMove(ref session) => session.wait(),
|
||||||
|
_ => Err(Error::InvalidMessage),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShareRemoveSession for AdminSessionWrapper {
|
||||||
|
fn wait(&self) -> Result<(), Error> {
|
||||||
|
match *self.session {
|
||||||
|
AdminSession::ShareRemove(ref session) => session.wait(),
|
||||||
|
_ => Err(Error::InvalidMessage),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServersSetChangeSession for AdminSessionWrapper {
|
||||||
|
fn wait(&self) -> Result<(), Error> {
|
||||||
|
match *self.session {
|
||||||
|
AdminSession::ServersSetChange(ref session) => session.wait(),
|
||||||
|
_ => Err(Error::InvalidMessage),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for AdminSessionWrapper {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if let Some(cluster) = self.cluster.upgrade() {
|
||||||
|
cluster.sessions().admin_sessions.remove(&self.session_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -16,16 +16,16 @@
|
|||||||
|
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use futures::{Future, Select, BoxFuture, Poll, Async};
|
use futures::{Future, Select, Poll, Async};
|
||||||
use tokio_core::reactor::{Handle, Timeout};
|
use tokio_core::reactor::{Handle, Timeout};
|
||||||
|
|
||||||
type DeadlineBox<F> = BoxFuture<DeadlineStatus<<F as Future>::Item>, <F as Future>::Error>;
|
type DeadlineBox<F> = ::std::boxed::Box<Future<Item = DeadlineStatus<<F as Future>::Item>, Error = <F as Future>::Error> + Send>;
|
||||||
|
|
||||||
/// Complete a passed future or fail if it is not completed within timeout.
|
/// Complete a passed future or fail if it is not completed within timeout.
|
||||||
pub fn deadline<F, T>(duration: Duration, handle: &Handle, future: F) -> Result<Deadline<F>, io::Error>
|
pub fn deadline<F, T>(duration: Duration, handle: &Handle, future: F) -> Result<Deadline<F>, io::Error>
|
||||||
where F: Future<Item = T, Error = io::Error> + Send + 'static, T: 'static {
|
where F: Future<Item = T, Error = io::Error> + Send + 'static, T: 'static {
|
||||||
let timeout = Timeout::new(duration, handle)?.map(|_| DeadlineStatus::Timeout).boxed();
|
let timeout: DeadlineBox<F> = Box::new(Timeout::new(duration, handle)?.map(|_| DeadlineStatus::Timeout));
|
||||||
let future = future.map(DeadlineStatus::Meet).boxed();
|
let future: DeadlineBox<F> = Box::new(future.map(DeadlineStatus::Meet));
|
||||||
let deadline = Deadline {
|
let deadline = Deadline {
|
||||||
future: timeout.select(future),
|
future: timeout.select(future),
|
||||||
};
|
};
|
||||||
|
@ -26,20 +26,21 @@ use bigint::prelude::U256;
|
|||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use key_server_cluster::Error;
|
use key_server_cluster::Error;
|
||||||
use key_server_cluster::message::{Message, ClusterMessage, GenerationMessage, EncryptionMessage,
|
use key_server_cluster::message::{Message, ClusterMessage, GenerationMessage, EncryptionMessage,
|
||||||
DecryptionMessage, SigningMessage};
|
DecryptionMessage, SigningMessage, ServersSetChangeMessage, ShareAddMessage, ShareMoveMessage,
|
||||||
|
ShareRemoveMessage};
|
||||||
|
|
||||||
/// Size of serialized header.
|
/// Size of serialized header.
|
||||||
pub const MESSAGE_HEADER_SIZE: usize = 4;
|
pub const MESSAGE_HEADER_SIZE: usize = 18;
|
||||||
/// Current header version.
|
/// Current header version.
|
||||||
pub const CURRENT_HEADER_VERSION: u8 = 1;
|
pub const CURRENT_HEADER_VERSION: u64 = 1;
|
||||||
|
|
||||||
/// Message header.
|
/// Message header.
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub struct MessageHeader {
|
pub struct MessageHeader {
|
||||||
/// Message/Header version.
|
/// Message/Header version.
|
||||||
pub version: u8,
|
pub version: u64,
|
||||||
/// Message kind.
|
/// Message kind.
|
||||||
pub kind: u8,
|
pub kind: u64,
|
||||||
/// Message payload size (without header).
|
/// Message payload size (without header).
|
||||||
pub size: u16,
|
pub size: u16,
|
||||||
}
|
}
|
||||||
@ -94,6 +95,45 @@ pub fn serialize_message(message: Message) -> Result<SerializedMessage, Error> {
|
|||||||
Message::Signing(SigningMessage::PartialSignature(payload)) => (203, serde_json::to_vec(&payload)),
|
Message::Signing(SigningMessage::PartialSignature(payload)) => (203, serde_json::to_vec(&payload)),
|
||||||
Message::Signing(SigningMessage::SigningSessionError(payload)) => (204, serde_json::to_vec(&payload)),
|
Message::Signing(SigningMessage::SigningSessionError(payload)) => (204, serde_json::to_vec(&payload)),
|
||||||
Message::Signing(SigningMessage::SigningSessionCompleted(payload)) => (205, serde_json::to_vec(&payload)),
|
Message::Signing(SigningMessage::SigningSessionCompleted(payload)) => (205, serde_json::to_vec(&payload)),
|
||||||
|
|
||||||
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(payload))
|
||||||
|
=> (250, serde_json::to_vec(&payload)),
|
||||||
|
Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(payload)) => (251, serde_json::to_vec(&payload)),
|
||||||
|
Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(payload)) => (252, serde_json::to_vec(&payload)),
|
||||||
|
Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(payload))
|
||||||
|
=> (253, serde_json::to_vec(&payload)),
|
||||||
|
Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(payload))
|
||||||
|
=> (254, serde_json::to_vec(&payload)),
|
||||||
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(payload))
|
||||||
|
=> (255, serde_json::to_vec(&payload)),
|
||||||
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(payload))
|
||||||
|
=> (256, serde_json::to_vec(&payload)),
|
||||||
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(payload))
|
||||||
|
=> (257, serde_json::to_vec(&payload)),
|
||||||
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareMoveMessage(payload))
|
||||||
|
=> (258, serde_json::to_vec(&payload)),
|
||||||
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(payload))
|
||||||
|
=> (259, serde_json::to_vec(&payload)),
|
||||||
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(payload)) => (260, serde_json::to_vec(&payload)),
|
||||||
|
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(payload))
|
||||||
|
=> (261, serde_json::to_vec(&payload)),
|
||||||
|
|
||||||
|
Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(payload)) => (300, serde_json::to_vec(&payload)),
|
||||||
|
Message::ShareAdd(ShareAddMessage::KeyShareCommon(payload)) => (301, serde_json::to_vec(&payload)),
|
||||||
|
Message::ShareAdd(ShareAddMessage::NewAbsoluteTermShare(payload)) => (302, serde_json::to_vec(&payload)),
|
||||||
|
Message::ShareAdd(ShareAddMessage::NewKeysDissemination(payload)) => (303, serde_json::to_vec(&payload)),
|
||||||
|
Message::ShareAdd(ShareAddMessage::ShareAddError(payload)) => (304, serde_json::to_vec(&payload)),
|
||||||
|
|
||||||
|
Message::ShareMove(ShareMoveMessage::ShareMoveConsensusMessage(payload)) => (350, serde_json::to_vec(&payload)),
|
||||||
|
Message::ShareMove(ShareMoveMessage::ShareMoveRequest(payload)) => (351, serde_json::to_vec(&payload)),
|
||||||
|
Message::ShareMove(ShareMoveMessage::ShareMove(payload)) => (352, serde_json::to_vec(&payload)),
|
||||||
|
Message::ShareMove(ShareMoveMessage::ShareMoveConfirm(payload)) => (353, serde_json::to_vec(&payload)),
|
||||||
|
Message::ShareMove(ShareMoveMessage::ShareMoveError(payload)) => (354, serde_json::to_vec(&payload)),
|
||||||
|
|
||||||
|
Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(payload)) => (400, serde_json::to_vec(&payload)),
|
||||||
|
Message::ShareRemove(ShareRemoveMessage::ShareRemoveRequest(payload)) => (401, serde_json::to_vec(&payload)),
|
||||||
|
Message::ShareRemove(ShareRemoveMessage::ShareRemoveConfirm(payload)) => (402, serde_json::to_vec(&payload)),
|
||||||
|
Message::ShareRemove(ShareRemoveMessage::ShareRemoveError(payload)) => (403, serde_json::to_vec(&payload)),
|
||||||
};
|
};
|
||||||
|
|
||||||
let payload = payload.map_err(|err| Error::Serde(err.to_string()))?;
|
let payload = payload.map_err(|err| Error::Serde(err.to_string()))?;
|
||||||
@ -137,6 +177,36 @@ pub fn deserialize_message(header: &MessageHeader, payload: Vec<u8>) -> Result<M
|
|||||||
204 => Message::Signing(SigningMessage::SigningSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
204 => Message::Signing(SigningMessage::SigningSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
205 => Message::Signing(SigningMessage::SigningSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
205 => Message::Signing(SigningMessage::SigningSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
|
||||||
|
250 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
251 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
252 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
253 => Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
254 => Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
255 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
256 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
257 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
258 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareMoveMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
259 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
260 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
261 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
|
||||||
|
300 => Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
301 => Message::ShareAdd(ShareAddMessage::KeyShareCommon(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
302 => Message::ShareAdd(ShareAddMessage::NewAbsoluteTermShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
303 => Message::ShareAdd(ShareAddMessage::NewKeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
304 => Message::ShareAdd(ShareAddMessage::ShareAddError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
|
||||||
|
350 => Message::ShareMove(ShareMoveMessage::ShareMoveConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
351 => Message::ShareMove(ShareMoveMessage::ShareMoveRequest(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
352 => Message::ShareMove(ShareMoveMessage::ShareMove(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
353 => Message::ShareMove(ShareMoveMessage::ShareMoveConfirm(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
354 => Message::ShareMove(ShareMoveMessage::ShareMoveError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
|
||||||
|
400 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
401 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveRequest(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
402 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveConfirm(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
403 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||||
|
|
||||||
_ => return Err(Error::Serde(format!("unknown message type {}", header.kind))),
|
_ => return Err(Error::Serde(format!("unknown message type {}", header.kind))),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -170,8 +240,8 @@ pub fn fix_shared_key(shared_secret: &Secret) -> Result<KeyPair, Error> {
|
|||||||
/// Serialize message header.
|
/// Serialize message header.
|
||||||
fn serialize_header(header: &MessageHeader) -> Result<Vec<u8>, Error> {
|
fn serialize_header(header: &MessageHeader) -> Result<Vec<u8>, Error> {
|
||||||
let mut buffer = Vec::with_capacity(MESSAGE_HEADER_SIZE);
|
let mut buffer = Vec::with_capacity(MESSAGE_HEADER_SIZE);
|
||||||
buffer.write_u8(header.version)?;
|
buffer.write_u64::<LittleEndian>(header.version)?;
|
||||||
buffer.write_u8(header.kind)?;
|
buffer.write_u64::<LittleEndian>(header.kind)?;
|
||||||
buffer.write_u16::<LittleEndian>(header.size)?;
|
buffer.write_u16::<LittleEndian>(header.size)?;
|
||||||
Ok(buffer)
|
Ok(buffer)
|
||||||
}
|
}
|
||||||
@ -179,14 +249,14 @@ fn serialize_header(header: &MessageHeader) -> Result<Vec<u8>, Error> {
|
|||||||
/// Deserialize message header.
|
/// Deserialize message header.
|
||||||
pub fn deserialize_header(data: &[u8]) -> Result<MessageHeader, Error> {
|
pub fn deserialize_header(data: &[u8]) -> Result<MessageHeader, Error> {
|
||||||
let mut reader = Cursor::new(data);
|
let mut reader = Cursor::new(data);
|
||||||
let version = reader.read_u8()?;
|
let version = reader.read_u64::<LittleEndian>()?;
|
||||||
if version != CURRENT_HEADER_VERSION {
|
if version != CURRENT_HEADER_VERSION {
|
||||||
return Err(Error::InvalidMessageVersion);
|
return Err(Error::InvalidMessageVersion);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(MessageHeader {
|
Ok(MessageHeader {
|
||||||
version: version,
|
version: version,
|
||||||
kind: reader.read_u8()?,
|
kind: reader.read_u64::<LittleEndian>()?,
|
||||||
size: reader.read_u16::<LittleEndian>()?,
|
size: reader.read_u16::<LittleEndian>()?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -15,12 +15,10 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use std::sync::Arc;
|
use ethkey::Signature;
|
||||||
use ethkey::{Public, Signature, recover};
|
use key_server_cluster::{Error, NodeId, SessionMeta};
|
||||||
use key_server_cluster::{Error, NodeId, SessionMeta, AclStorage};
|
|
||||||
use key_server_cluster::message::ConsensusMessage;
|
use key_server_cluster::message::ConsensusMessage;
|
||||||
use key_server_cluster::jobs::job_session::{JobSession, JobSessionState, JobTransport, JobExecutor};
|
use key_server_cluster::jobs::job_session::{JobSession, JobSessionState, JobTransport, JobExecutor};
|
||||||
use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
|
||||||
|
|
||||||
/// Consensus session state.
|
/// Consensus session state.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||||
@ -47,15 +45,17 @@ pub enum ConsensusSessionState {
|
|||||||
/// 2) master node sends partial job requests to every member of consensus group
|
/// 2) master node sends partial job requests to every member of consensus group
|
||||||
/// 3) slave nodes are computing partial responses
|
/// 3) slave nodes are computing partial responses
|
||||||
/// 4) master node computes result from partial responses
|
/// 4) master node computes result from partial responses
|
||||||
pub struct ConsensusSession<ConsensusTransport: JobTransport<PartialJobRequest=Signature, PartialJobResponse=bool>, ComputationExecutor: JobExecutor, ComputationTransport: JobTransport<PartialJobRequest=ComputationExecutor::PartialJobRequest, PartialJobResponse=ComputationExecutor::PartialJobResponse>> {
|
pub struct ConsensusSession<ConsensusExecutor: JobExecutor<PartialJobResponse=bool>,
|
||||||
|
ConsensusTransport: JobTransport<PartialJobRequest=ConsensusExecutor::PartialJobRequest, PartialJobResponse=ConsensusExecutor::PartialJobResponse>,
|
||||||
|
ComputationExecutor: JobExecutor,
|
||||||
|
ComputationTransport: JobTransport<PartialJobRequest=ComputationExecutor::PartialJobRequest, PartialJobResponse=ComputationExecutor::PartialJobResponse>
|
||||||
|
> {
|
||||||
/// Current session state.
|
/// Current session state.
|
||||||
state: ConsensusSessionState,
|
state: ConsensusSessionState,
|
||||||
/// Session metadata.
|
/// Session metadata.
|
||||||
meta: SessionMeta,
|
meta: SessionMeta,
|
||||||
/// Requester, for which consensus group has allowed access.
|
|
||||||
requester: Option<Public>,
|
|
||||||
/// Consensus establish job.
|
/// Consensus establish job.
|
||||||
consensus_job: JobSession<KeyAccessJob, ConsensusTransport>,
|
consensus_job: JobSession<ConsensusExecutor, ConsensusTransport>,
|
||||||
/// Consensus group.
|
/// Consensus group.
|
||||||
consensus_group: BTreeSet<NodeId>,
|
consensus_group: BTreeSet<NodeId>,
|
||||||
/// Computation job.
|
/// Computation job.
|
||||||
@ -63,38 +63,30 @@ pub struct ConsensusSession<ConsensusTransport: JobTransport<PartialJobRequest=S
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Consensus session creation parameters.
|
/// Consensus session creation parameters.
|
||||||
pub struct ConsensusSessionParams<ConsensusTransport: JobTransport<PartialJobRequest=Signature, PartialJobResponse=bool>> {
|
pub struct ConsensusSessionParams<ConsensusExecutor: JobExecutor<PartialJobResponse=bool>,
|
||||||
|
ConsensusTransport: JobTransport<PartialJobRequest=ConsensusExecutor::PartialJobRequest, PartialJobResponse=ConsensusExecutor::PartialJobResponse>
|
||||||
|
> {
|
||||||
/// Session metadata.
|
/// Session metadata.
|
||||||
pub meta: SessionMeta,
|
pub meta: SessionMeta,
|
||||||
/// ACL storage for access check.
|
/// ACL storage for access check.
|
||||||
pub acl_storage: Arc<AclStorage>,
|
pub consensus_executor: ConsensusExecutor,
|
||||||
/// Transport for consensus establish job.
|
/// Transport for consensus establish job.
|
||||||
pub consensus_transport: ConsensusTransport,
|
pub consensus_transport: ConsensusTransport,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<ConsensusTransport, ComputationExecutor, ComputationTransport> ConsensusSession<ConsensusTransport, ComputationExecutor, ComputationTransport> where ConsensusTransport: JobTransport<PartialJobRequest=Signature, PartialJobResponse=bool>, ComputationExecutor: JobExecutor, ComputationTransport: JobTransport<PartialJobRequest=ComputationExecutor::PartialJobRequest, PartialJobResponse=ComputationExecutor::PartialJobResponse> {
|
impl<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTransport> ConsensusSession<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTransport>
|
||||||
/// Create new consensus session on slave node.
|
where ConsensusExecutor: JobExecutor<PartialJobResponse=bool, JobResponse=BTreeSet<NodeId>>,
|
||||||
pub fn new_on_slave(params: ConsensusSessionParams<ConsensusTransport>) -> Result<Self, Error> {
|
ConsensusTransport: JobTransport<PartialJobRequest=ConsensusExecutor::PartialJobRequest, PartialJobResponse=ConsensusExecutor::PartialJobResponse>,
|
||||||
debug_assert!(params.meta.self_node_id != params.meta.master_node_id);
|
ComputationExecutor: JobExecutor,
|
||||||
Self::new(None, KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()), params)
|
ComputationTransport: JobTransport<PartialJobRequest=ComputationExecutor::PartialJobRequest, PartialJobResponse=ComputationExecutor::PartialJobResponse> {
|
||||||
}
|
|
||||||
|
|
||||||
/// Create new consensus session on master node.
|
|
||||||
pub fn new_on_master(params: ConsensusSessionParams<ConsensusTransport>, signature: Signature) -> Result<Self, Error> {
|
|
||||||
debug_assert!(params.meta.self_node_id == params.meta.master_node_id);
|
|
||||||
Self::new(Some(recover(&signature, ¶ms.meta.id)?),
|
|
||||||
KeyAccessJob::new_on_master(params.meta.id.clone(), params.acl_storage.clone(), signature), params)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create new consensus session.
|
/// Create new consensus session.
|
||||||
fn new(requester: Option<Public>, consensus_job_executor: KeyAccessJob, params: ConsensusSessionParams<ConsensusTransport>) -> Result<Self, Error> {
|
pub fn new(params: ConsensusSessionParams<ConsensusExecutor, ConsensusTransport>) -> Result<Self, Error> {
|
||||||
let consensus_job = JobSession::new(params.meta.clone(), consensus_job_executor, params.consensus_transport);
|
let consensus_job = JobSession::new(params.meta.clone(), params.consensus_executor, params.consensus_transport);
|
||||||
debug_assert!(consensus_job.state() == JobSessionState::Inactive);
|
debug_assert!(consensus_job.state() == JobSessionState::Inactive);
|
||||||
|
|
||||||
Ok(ConsensusSession {
|
Ok(ConsensusSession {
|
||||||
state: ConsensusSessionState::WaitingForInitialization,
|
state: ConsensusSessionState::WaitingForInitialization,
|
||||||
meta: params.meta,
|
meta: params.meta,
|
||||||
requester: requester,
|
|
||||||
consensus_job: consensus_job,
|
consensus_job: consensus_job,
|
||||||
consensus_group: BTreeSet::new(),
|
consensus_group: BTreeSet::new(),
|
||||||
computation_job: None,
|
computation_job: None,
|
||||||
@ -102,12 +94,11 @@ impl<ConsensusTransport, ComputationExecutor, ComputationTransport> ConsensusSes
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get consensus job reference.
|
/// Get consensus job reference.
|
||||||
#[cfg(test)]
|
pub fn consensus_job(&self) -> &JobSession<ConsensusExecutor, ConsensusTransport> {
|
||||||
pub fn consensus_job(&self) -> &JobSession<KeyAccessJob, ConsensusTransport> {
|
|
||||||
&self.consensus_job
|
&self.consensus_job
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get all nodes, which chas not rejected consensus request.
|
/// Get all nodes, which has not rejected consensus request.
|
||||||
pub fn consensus_non_rejected_nodes(&self) -> BTreeSet<NodeId> {
|
pub fn consensus_non_rejected_nodes(&self) -> BTreeSet<NodeId> {
|
||||||
self.consensus_job.responses().iter()
|
self.consensus_job.responses().iter()
|
||||||
.filter(|r| *r.1)
|
.filter(|r| *r.1)
|
||||||
@ -130,11 +121,6 @@ impl<ConsensusTransport, ComputationExecutor, ComputationTransport> ConsensusSes
|
|||||||
self.state
|
self.state
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get requester, for which consensus has been reached.
|
|
||||||
pub fn requester(&self) -> Result<&Public, Error> {
|
|
||||||
self.requester.as_ref().ok_or(Error::InvalidStateForRequest)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get computation result.
|
/// Get computation result.
|
||||||
pub fn result(&self) -> Result<ComputationExecutor::JobResponse, Error> {
|
pub fn result(&self) -> Result<ComputationExecutor::JobResponse, Error> {
|
||||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
||||||
@ -155,17 +141,15 @@ impl<ConsensusTransport, ComputationExecutor, ComputationTransport> ConsensusSes
|
|||||||
self.process_result(initialization_result)
|
self.process_result(initialization_result)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process consensus message.
|
/// Process consensus request message.
|
||||||
pub fn on_consensus_message(&mut self, sender: &NodeId, message: &ConsensusMessage) -> Result<(), Error> {
|
pub fn on_consensus_partial_request(&mut self, sender: &NodeId, request: ConsensusExecutor::PartialJobRequest) -> Result<(), Error> {
|
||||||
let consensus_result = match message {
|
let consensus_result = self.consensus_job.on_partial_request(sender, request);
|
||||||
&ConsensusMessage::InitializeConsensusSession(ref message) => {
|
self.process_result(consensus_result)
|
||||||
let signature = message.requestor_signature.clone().into();
|
}
|
||||||
self.requester = Some(recover(&signature, &self.meta.id)?);
|
|
||||||
self.consensus_job.on_partial_request(sender, signature)
|
/// Process consensus message response.
|
||||||
},
|
pub fn on_consensus_partial_response(&mut self, sender: &NodeId, response: bool) -> Result<(), Error> {
|
||||||
&ConsensusMessage::ConfirmConsensusInitialization(ref message) =>
|
let consensus_result = self.consensus_job.on_partial_response(sender, response);
|
||||||
self.consensus_job.on_partial_response(sender, message.is_confirmed),
|
|
||||||
};
|
|
||||||
self.process_result(consensus_result)
|
self.process_result(consensus_result)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -350,6 +334,24 @@ impl<ConsensusTransport, ComputationExecutor, ComputationTransport> ConsensusSes
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTransport> ConsensusSession<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTransport>
|
||||||
|
where ConsensusExecutor: JobExecutor<PartialJobRequest=Signature, PartialJobResponse=bool, JobResponse=BTreeSet<NodeId>>,
|
||||||
|
ConsensusTransport: JobTransport<PartialJobRequest=ConsensusExecutor::PartialJobRequest, PartialJobResponse=ConsensusExecutor::PartialJobResponse>,
|
||||||
|
ComputationExecutor: JobExecutor,
|
||||||
|
ComputationTransport: JobTransport<PartialJobRequest=ComputationExecutor::PartialJobRequest, PartialJobResponse=ComputationExecutor::PartialJobResponse> {
|
||||||
|
/// Process basic consensus message.
|
||||||
|
pub fn on_consensus_message(&mut self, sender: &NodeId, message: &ConsensusMessage) -> Result<(), Error> {
|
||||||
|
let consensus_result = match message {
|
||||||
|
|
||||||
|
&ConsensusMessage::InitializeConsensusSession(ref message) =>
|
||||||
|
self.consensus_job.on_partial_request(sender, message.requestor_signature.clone().into()),
|
||||||
|
&ConsensusMessage::ConfirmConsensusInitialization(ref message) =>
|
||||||
|
self.consensus_job.on_partial_response(sender, message.is_confirmed),
|
||||||
|
};
|
||||||
|
self.process_result(consensus_result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -357,23 +359,24 @@ mod tests {
|
|||||||
use key_server_cluster::{Error, NodeId, SessionId, DummyAclStorage};
|
use key_server_cluster::{Error, NodeId, SessionId, DummyAclStorage};
|
||||||
use key_server_cluster::message::{ConsensusMessage, InitializeConsensusSession, ConfirmConsensusInitialization};
|
use key_server_cluster::message::{ConsensusMessage, InitializeConsensusSession, ConfirmConsensusInitialization};
|
||||||
use key_server_cluster::jobs::job_session::tests::{make_master_session_meta, make_slave_session_meta, SquaredSumJobExecutor, DummyJobTransport};
|
use key_server_cluster::jobs::job_session::tests::{make_master_session_meta, make_slave_session_meta, SquaredSumJobExecutor, DummyJobTransport};
|
||||||
|
use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
||||||
use super::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState};
|
use super::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState};
|
||||||
|
|
||||||
type SquaredSumConsensusSession = ConsensusSession<DummyJobTransport<Signature, bool>, SquaredSumJobExecutor, DummyJobTransport<u32, u32>>;
|
type SquaredSumConsensusSession = ConsensusSession<KeyAccessJob, DummyJobTransport<Signature, bool>, SquaredSumJobExecutor, DummyJobTransport<u32, u32>>;
|
||||||
|
|
||||||
fn make_master_consensus_session(threshold: usize, requester: Option<KeyPair>, acl_storage: Option<DummyAclStorage>) -> SquaredSumConsensusSession {
|
fn make_master_consensus_session(threshold: usize, requester: Option<KeyPair>, acl_storage: Option<DummyAclStorage>) -> SquaredSumConsensusSession {
|
||||||
let secret = requester.map(|kp| kp.secret().clone()).unwrap_or(Random.generate().unwrap().secret().clone());
|
let secret = requester.map(|kp| kp.secret().clone()).unwrap_or(Random.generate().unwrap().secret().clone());
|
||||||
SquaredSumConsensusSession::new_on_master(ConsensusSessionParams {
|
SquaredSumConsensusSession::new(ConsensusSessionParams {
|
||||||
meta: make_master_session_meta(threshold),
|
meta: make_master_session_meta(threshold),
|
||||||
acl_storage: Arc::new(acl_storage.unwrap_or(DummyAclStorage::default())),
|
consensus_executor: KeyAccessJob::new_on_master(SessionId::default(), Arc::new(acl_storage.unwrap_or(DummyAclStorage::default())), sign(&secret, &SessionId::default()).unwrap()),
|
||||||
consensus_transport: DummyJobTransport::default(),
|
consensus_transport: DummyJobTransport::default(),
|
||||||
}, sign(&secret, &SessionId::default()).unwrap()).unwrap()
|
}).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_slave_consensus_session(threshold: usize, acl_storage: Option<DummyAclStorage>) -> SquaredSumConsensusSession {
|
fn make_slave_consensus_session(threshold: usize, acl_storage: Option<DummyAclStorage>) -> SquaredSumConsensusSession {
|
||||||
SquaredSumConsensusSession::new_on_slave(ConsensusSessionParams {
|
SquaredSumConsensusSession::new(ConsensusSessionParams {
|
||||||
meta: make_slave_session_meta(threshold),
|
meta: make_slave_session_meta(threshold),
|
||||||
acl_storage: Arc::new(acl_storage.unwrap_or(DummyAclStorage::default())),
|
consensus_executor: KeyAccessJob::new_on_slave(SessionId::default(), Arc::new(acl_storage.unwrap_or(DummyAclStorage::default()))),
|
||||||
consensus_transport: DummyJobTransport::default(),
|
consensus_transport: DummyJobTransport::default(),
|
||||||
}).unwrap()
|
}).unwrap()
|
||||||
}
|
}
|
||||||
|
@ -106,7 +106,7 @@ impl JobExecutor for DecryptionJob {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_partial_request(&self, partial_request: PartialDecryptionRequest) -> Result<JobPartialRequestAction<PartialDecryptionResponse>, Error> {
|
fn process_partial_request(&mut self, partial_request: PartialDecryptionRequest) -> Result<JobPartialRequestAction<PartialDecryptionResponse>, Error> {
|
||||||
if partial_request.other_nodes_ids.len() != self.key_share.threshold
|
if partial_request.other_nodes_ids.len() != self.key_share.threshold
|
||||||
|| partial_request.other_nodes_ids.contains(&self.self_node_id)
|
|| partial_request.other_nodes_ids.contains(&self.self_node_id)
|
||||||
|| partial_request.other_nodes_ids.iter().any(|n| !self.key_share.id_numbers.contains_key(n)) {
|
|| partial_request.other_nodes_ids.iter().any(|n| !self.key_share.id_numbers.contains_key(n)) {
|
||||||
|
60
secret_store/src/key_server_cluster/jobs/dummy_job.rs
Normal file
60
secret_store/src/key_server_cluster/jobs/dummy_job.rs
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::collections::{BTreeMap, BTreeSet};
|
||||||
|
use key_server_cluster::{Error, NodeId};
|
||||||
|
use key_server_cluster::jobs::job_session::{JobExecutor, JobTransport, JobPartialRequestAction, JobPartialResponseAction};
|
||||||
|
|
||||||
|
/// No-work job to use in generics (TODO: create separate ShareChangeConsensusSession && remove this)
|
||||||
|
pub struct DummyJob;
|
||||||
|
|
||||||
|
impl JobExecutor for DummyJob {
|
||||||
|
type PartialJobRequest = ();
|
||||||
|
type PartialJobResponse = ();
|
||||||
|
type JobResponse = ();
|
||||||
|
|
||||||
|
fn prepare_partial_request(&self, _n: &NodeId, _nodes: &BTreeSet<NodeId>) -> Result<(), Error> {
|
||||||
|
unreachable!("dummy job methods are never called")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_partial_request(&mut self, _r: ()) -> Result<JobPartialRequestAction<()>, Error> {
|
||||||
|
unreachable!("dummy job methods are never called")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_partial_response(&self, _r: &()) -> Result<JobPartialResponseAction, Error> {
|
||||||
|
unreachable!("dummy job methods are never called")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_response(&self, _r: &BTreeMap<NodeId, ()>) -> Result<(), Error> {
|
||||||
|
unreachable!("dummy job methods are never called")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// No-work job transport to use in generics (TODO: create separate ShareChangeConsensusSession && remove this)
|
||||||
|
pub struct DummyJobTransport;
|
||||||
|
|
||||||
|
impl JobTransport for DummyJobTransport {
|
||||||
|
type PartialJobRequest = ();
|
||||||
|
type PartialJobResponse = ();
|
||||||
|
|
||||||
|
fn send_partial_request(&self, _node: &NodeId, _request: ()) -> Result<(), Error> {
|
||||||
|
unreachable!("dummy transport methods are never called")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn send_partial_response(&self, _node: &NodeId, _response: ()) -> Result<(), Error> {
|
||||||
|
unreachable!("dummy transport methods are never called")
|
||||||
|
}
|
||||||
|
}
|
@ -31,7 +31,7 @@ pub enum JobPartialResponseAction {
|
|||||||
/// Partial request action.
|
/// Partial request action.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||||
pub enum JobPartialRequestAction<PartialJobResponse> {
|
pub enum JobPartialRequestAction<PartialJobResponse> {
|
||||||
/// Repond with reject.
|
/// Respond with reject.
|
||||||
Reject(PartialJobResponse),
|
Reject(PartialJobResponse),
|
||||||
/// Respond with this response.
|
/// Respond with this response.
|
||||||
Respond(PartialJobResponse),
|
Respond(PartialJobResponse),
|
||||||
@ -46,7 +46,7 @@ pub trait JobExecutor {
|
|||||||
/// Prepare job request for given node.
|
/// Prepare job request for given node.
|
||||||
fn prepare_partial_request(&self, node: &NodeId, nodes: &BTreeSet<NodeId>) -> Result<Self::PartialJobRequest, Error>;
|
fn prepare_partial_request(&self, node: &NodeId, nodes: &BTreeSet<NodeId>) -> Result<Self::PartialJobRequest, Error>;
|
||||||
/// Process partial request.
|
/// Process partial request.
|
||||||
fn process_partial_request(&self, partial_request: Self::PartialJobRequest) -> Result<JobPartialRequestAction<Self::PartialJobResponse>, Error>;
|
fn process_partial_request(&mut self, partial_request: Self::PartialJobRequest) -> Result<JobPartialRequestAction<Self::PartialJobResponse>, Error>;
|
||||||
/// Check partial response of given node.
|
/// Check partial response of given node.
|
||||||
fn check_partial_response(&self, partial_response: &Self::PartialJobResponse) -> Result<JobPartialResponseAction, Error>;
|
fn check_partial_response(&self, partial_response: &Self::PartialJobResponse) -> Result<JobPartialResponseAction, Error>;
|
||||||
/// Compute final job response.
|
/// Compute final job response.
|
||||||
@ -87,8 +87,6 @@ pub struct JobSession<Executor: JobExecutor, Transport> where Transport: JobTran
|
|||||||
transport: Transport,
|
transport: Transport,
|
||||||
/// Session data.
|
/// Session data.
|
||||||
data: JobSessionData<Executor::PartialJobResponse>,
|
data: JobSessionData<Executor::PartialJobResponse>,
|
||||||
//// PartialJobRequest dummy.
|
|
||||||
// dummy: PhantomData<PartialJobRequest>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Data of job session.
|
/// Data of job session.
|
||||||
@ -129,6 +127,11 @@ impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExe
|
|||||||
&self.transport
|
&self.transport
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get executor reference.
|
||||||
|
pub fn executor(&self) -> &Executor {
|
||||||
|
&self.executor
|
||||||
|
}
|
||||||
|
|
||||||
/// Get job state.
|
/// Get job state.
|
||||||
pub fn state(&self) -> JobSessionState {
|
pub fn state(&self) -> JobSessionState {
|
||||||
self.data.state
|
self.data.state
|
||||||
@ -347,7 +350,7 @@ pub mod tests {
|
|||||||
type JobResponse = u32;
|
type JobResponse = u32;
|
||||||
|
|
||||||
fn prepare_partial_request(&self, _n: &NodeId, _nodes: &BTreeSet<NodeId>) -> Result<u32, Error> { Ok(2) }
|
fn prepare_partial_request(&self, _n: &NodeId, _nodes: &BTreeSet<NodeId>) -> Result<u32, Error> { Ok(2) }
|
||||||
fn process_partial_request(&self, r: u32) -> Result<JobPartialRequestAction<u32>, Error> { if r <= 10 { Ok(JobPartialRequestAction::Respond(r * r)) } else { Err(Error::InvalidMessage) } }
|
fn process_partial_request(&mut self, r: u32) -> Result<JobPartialRequestAction<u32>, Error> { if r <= 10 { Ok(JobPartialRequestAction::Respond(r * r)) } else { Err(Error::InvalidMessage) } }
|
||||||
fn check_partial_response(&self, r: &u32) -> Result<JobPartialResponseAction, Error> { if r % 2 == 0 { Ok(JobPartialResponseAction::Accept) } else { Ok(JobPartialResponseAction::Reject) } }
|
fn check_partial_response(&self, r: &u32) -> Result<JobPartialResponseAction, Error> { if r % 2 == 0 { Ok(JobPartialResponseAction::Accept) } else { Ok(JobPartialResponseAction::Reject) } }
|
||||||
fn compute_response(&self, r: &BTreeMap<NodeId, u32>) -> Result<u32, Error> { Ok(r.values().fold(0, |v1, v2| v1 + v2)) }
|
fn compute_response(&self, r: &BTreeMap<NodeId, u32>) -> Result<u32, Error> { Ok(r.values().fold(0, |v1, v2| v1 + v2)) }
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::{BTreeSet, BTreeMap};
|
use std::collections::{BTreeSet, BTreeMap};
|
||||||
use ethkey::{Signature, recover};
|
use ethkey::{Public, Signature, recover};
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, AclStorage};
|
use key_server_cluster::{Error, NodeId, SessionId, AclStorage};
|
||||||
use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor};
|
use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor};
|
||||||
|
|
||||||
@ -46,6 +46,13 @@ impl KeyAccessJob {
|
|||||||
signature: Some(signature),
|
signature: Some(signature),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn requester(&self) -> Result<Option<Public>, Error> {
|
||||||
|
match self.signature.as_ref() {
|
||||||
|
Some(signature) => Ok(Some(recover(signature, &self.id)?)),
|
||||||
|
None => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl JobExecutor for KeyAccessJob {
|
impl JobExecutor for KeyAccessJob {
|
||||||
@ -57,7 +64,8 @@ impl JobExecutor for KeyAccessJob {
|
|||||||
Ok(self.signature.as_ref().expect("prepare_partial_request is only called on master nodes; new_on_master fills the signature; qed").clone())
|
Ok(self.signature.as_ref().expect("prepare_partial_request is only called on master nodes; new_on_master fills the signature; qed").clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_partial_request(&self, partial_request: Signature) -> Result<JobPartialRequestAction<bool>, Error> {
|
fn process_partial_request(&mut self, partial_request: Signature) -> Result<JobPartialRequestAction<bool>, Error> {
|
||||||
|
self.signature = Some(partial_request.clone());
|
||||||
self.acl_storage.check(&recover(&partial_request, &self.id)?, &self.id)
|
self.acl_storage.check(&recover(&partial_request, &self.id)?, &self.id)
|
||||||
.map_err(|_| Error::AccessDenied)
|
.map_err(|_| Error::AccessDenied)
|
||||||
.map(|is_confirmed| if is_confirmed { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) })
|
.map(|is_confirmed| if is_confirmed { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) })
|
||||||
|
@ -16,6 +16,9 @@
|
|||||||
|
|
||||||
pub mod consensus_session;
|
pub mod consensus_session;
|
||||||
pub mod decryption_job;
|
pub mod decryption_job;
|
||||||
|
pub mod dummy_job;
|
||||||
pub mod job_session;
|
pub mod job_session;
|
||||||
pub mod key_access_job;
|
pub mod key_access_job;
|
||||||
|
pub mod servers_set_change_access_job;
|
||||||
pub mod signing_job;
|
pub mod signing_job;
|
||||||
|
pub mod unknown_sessions_job;
|
||||||
|
@ -0,0 +1,170 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::collections::{BTreeSet, BTreeMap};
|
||||||
|
use ethkey::{Public, Signature, recover};
|
||||||
|
use tiny_keccak::Keccak;
|
||||||
|
use key_server_cluster::{Error, NodeId, SessionId};
|
||||||
|
use key_server_cluster::message::{InitializeConsensusSessionWithServersSet, InitializeConsensusSessionWithServersMap,
|
||||||
|
InitializeConsensusSessionWithServersSecretMap};
|
||||||
|
use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor};
|
||||||
|
|
||||||
|
/// Purpose of this job is to check if requestor is administrator of SecretStore (i.e. it have access to change key servers set).
|
||||||
|
pub struct ServersSetChangeAccessJob {
|
||||||
|
/// Servers set administrator public key (this could be changed to ACL-based check later).
|
||||||
|
administrator: Public,
|
||||||
|
/// Current servers set (in session/cluster).
|
||||||
|
current_servers_set: BTreeSet<NodeId>,
|
||||||
|
/// Old servers set.
|
||||||
|
old_servers_set: Option<BTreeSet<NodeId>>,
|
||||||
|
/// New servers set.
|
||||||
|
new_servers_set: Option<BTreeSet<NodeId>>,
|
||||||
|
/// Old servers set, signed by requester.
|
||||||
|
old_set_signature: Option<Signature>,
|
||||||
|
/// New servers set, signed by requester.
|
||||||
|
new_set_signature: Option<Signature>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Servers set change job partial request.
|
||||||
|
pub struct ServersSetChangeAccessRequest {
|
||||||
|
/// Old servers set.
|
||||||
|
pub old_servers_set: BTreeSet<NodeId>,
|
||||||
|
/// New servers set.
|
||||||
|
pub new_servers_set: BTreeSet<NodeId>,
|
||||||
|
/// Hash(old_servers_set), signed by requester.
|
||||||
|
pub old_set_signature: Signature,
|
||||||
|
/// Hash(new_servers_set), signed by requester.
|
||||||
|
pub new_set_signature: Signature,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> From<&'a InitializeConsensusSessionWithServersSet> for ServersSetChangeAccessRequest {
|
||||||
|
fn from(message: &InitializeConsensusSessionWithServersSet) -> Self {
|
||||||
|
ServersSetChangeAccessRequest {
|
||||||
|
old_servers_set: message.old_nodes_set.iter().cloned().map(Into::into).collect(),
|
||||||
|
new_servers_set: message.new_nodes_set.iter().cloned().map(Into::into).collect(),
|
||||||
|
old_set_signature: message.old_set_signature.clone().into(),
|
||||||
|
new_set_signature: message.new_set_signature.clone().into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> From<&'a InitializeConsensusSessionWithServersMap> for ServersSetChangeAccessRequest {
|
||||||
|
fn from(message: &InitializeConsensusSessionWithServersMap) -> Self {
|
||||||
|
ServersSetChangeAccessRequest {
|
||||||
|
old_servers_set: message.old_nodes_set.iter().cloned().map(Into::into).collect(),
|
||||||
|
new_servers_set: message.new_nodes_set.keys().cloned().map(Into::into).collect(),
|
||||||
|
old_set_signature: message.old_set_signature.clone().into(),
|
||||||
|
new_set_signature: message.new_set_signature.clone().into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> From<&'a InitializeConsensusSessionWithServersSecretMap> for ServersSetChangeAccessRequest {
|
||||||
|
fn from(message: &InitializeConsensusSessionWithServersSecretMap) -> Self {
|
||||||
|
ServersSetChangeAccessRequest {
|
||||||
|
old_servers_set: message.old_nodes_set.iter().cloned().map(Into::into).collect(),
|
||||||
|
new_servers_set: message.new_nodes_set.keys().cloned().map(Into::into).collect(),
|
||||||
|
old_set_signature: message.old_set_signature.clone().into(),
|
||||||
|
new_set_signature: message.new_set_signature.clone().into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServersSetChangeAccessJob {
|
||||||
|
pub fn new_on_slave(administrator: Public, current_servers_set: BTreeSet<NodeId>) -> Self {
|
||||||
|
ServersSetChangeAccessJob {
|
||||||
|
administrator: administrator,
|
||||||
|
current_servers_set: current_servers_set,
|
||||||
|
old_servers_set: None,
|
||||||
|
new_servers_set: None,
|
||||||
|
old_set_signature: None,
|
||||||
|
new_set_signature: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_on_master(administrator: Public, current_servers_set: BTreeSet<NodeId>, old_servers_set: BTreeSet<NodeId>, new_servers_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Self {
|
||||||
|
ServersSetChangeAccessJob {
|
||||||
|
administrator: administrator,
|
||||||
|
current_servers_set: current_servers_set,
|
||||||
|
old_servers_set: Some(old_servers_set),
|
||||||
|
new_servers_set: Some(new_servers_set),
|
||||||
|
old_set_signature: Some(old_set_signature),
|
||||||
|
new_set_signature: Some(new_set_signature),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_servers_set(&self) -> Option<&BTreeSet<NodeId>> {
|
||||||
|
self.new_servers_set.as_ref()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl JobExecutor for ServersSetChangeAccessJob {
|
||||||
|
type PartialJobRequest = ServersSetChangeAccessRequest;
|
||||||
|
type PartialJobResponse = bool;
|
||||||
|
type JobResponse = BTreeSet<NodeId>;
|
||||||
|
|
||||||
|
fn prepare_partial_request(&self, _node: &NodeId, _nodes: &BTreeSet<NodeId>) -> Result<ServersSetChangeAccessRequest, Error> {
|
||||||
|
let explanation = "prepare_partial_request is only called on master nodes; this field is filled on master nodes in constructor; qed";
|
||||||
|
Ok(ServersSetChangeAccessRequest {
|
||||||
|
old_servers_set: self.old_servers_set.clone().expect(explanation),
|
||||||
|
new_servers_set: self.new_servers_set.clone().expect(explanation),
|
||||||
|
old_set_signature: self.old_set_signature.clone().expect(explanation),
|
||||||
|
new_set_signature: self.new_set_signature.clone().expect(explanation),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_partial_request(&mut self, partial_request: ServersSetChangeAccessRequest) -> Result<JobPartialRequestAction<bool>, Error> {
|
||||||
|
let ServersSetChangeAccessRequest {
|
||||||
|
old_servers_set,
|
||||||
|
new_servers_set,
|
||||||
|
old_set_signature,
|
||||||
|
new_set_signature,
|
||||||
|
} = partial_request;
|
||||||
|
|
||||||
|
// check that current set is exactly the same set as old set
|
||||||
|
if self.current_servers_set.symmetric_difference(&old_servers_set).next().is_some() {
|
||||||
|
return Ok(JobPartialRequestAction::Reject(false));
|
||||||
|
}
|
||||||
|
|
||||||
|
// check old servers set signature
|
||||||
|
let old_actual_public = recover(&old_set_signature, &ordered_nodes_hash(&old_servers_set).into())?;
|
||||||
|
let new_actual_public = recover(&new_set_signature, &ordered_nodes_hash(&new_servers_set).into())?;
|
||||||
|
let is_administrator = old_actual_public == self.administrator && new_actual_public == self.administrator;
|
||||||
|
self.new_servers_set = Some(new_servers_set);
|
||||||
|
|
||||||
|
Ok(if is_administrator { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_partial_response(&self, partial_response: &bool) -> Result<JobPartialResponseAction, Error> {
|
||||||
|
Ok(if *partial_response { JobPartialResponseAction::Accept } else { JobPartialResponseAction::Reject })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, bool>) -> Result<BTreeSet<NodeId>, Error> {
|
||||||
|
Ok(partial_responses.keys().cloned().collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ordered_nodes_hash(nodes: &BTreeSet<NodeId>) -> SessionId {
|
||||||
|
let mut nodes_keccak = Keccak::new_keccak256();
|
||||||
|
for node in nodes {
|
||||||
|
nodes_keccak.update(&*node);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut nodes_keccak_value = [0u8; 32];
|
||||||
|
nodes_keccak.finalize(&mut nodes_keccak_value);
|
||||||
|
|
||||||
|
nodes_keccak_value.into()
|
||||||
|
}
|
@ -101,7 +101,7 @@ impl JobExecutor for SigningJob {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_partial_request(&self, partial_request: PartialSigningRequest) -> Result<JobPartialRequestAction<PartialSigningResponse>, Error> {
|
fn process_partial_request(&mut self, partial_request: PartialSigningRequest) -> Result<JobPartialRequestAction<PartialSigningResponse>, Error> {
|
||||||
if partial_request.other_nodes_ids.len() != self.key_share.threshold
|
if partial_request.other_nodes_ids.len() != self.key_share.threshold
|
||||||
|| partial_request.other_nodes_ids.contains(&self.self_node_id)
|
|| partial_request.other_nodes_ids.contains(&self.self_node_id)
|
||||||
|| partial_request.other_nodes_ids.iter().any(|n| !self.key_share.id_numbers.contains_key(n)) {
|
|| partial_request.other_nodes_ids.iter().any(|n| !self.key_share.id_numbers.contains_key(n)) {
|
||||||
|
@ -0,0 +1,80 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::collections::{BTreeSet, BTreeMap};
|
||||||
|
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage};
|
||||||
|
use key_server_cluster::jobs::job_session::{JobPartialRequestAction, JobPartialResponseAction, JobExecutor};
|
||||||
|
|
||||||
|
/// Unknown sessions report job.
|
||||||
|
pub struct UnknownSessionsJob {
|
||||||
|
/// Target node id.
|
||||||
|
target_node_id: Option<NodeId>,
|
||||||
|
/// Keys storage.
|
||||||
|
key_storage: Arc<KeyStorage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UnknownSessionsJob {
|
||||||
|
pub fn new_on_slave(key_storage: Arc<KeyStorage>) -> Self {
|
||||||
|
UnknownSessionsJob {
|
||||||
|
target_node_id: None,
|
||||||
|
key_storage: key_storage,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_on_master(key_storage: Arc<KeyStorage>, self_node_id: NodeId) -> Self {
|
||||||
|
UnknownSessionsJob {
|
||||||
|
target_node_id: Some(self_node_id),
|
||||||
|
key_storage: key_storage,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl JobExecutor for UnknownSessionsJob {
|
||||||
|
type PartialJobRequest = NodeId;
|
||||||
|
type PartialJobResponse = BTreeSet<SessionId>;
|
||||||
|
type JobResponse = BTreeMap<SessionId, BTreeSet<NodeId>>;
|
||||||
|
|
||||||
|
fn prepare_partial_request(&self, _node: &NodeId, _nodes: &BTreeSet<NodeId>) -> Result<NodeId, Error> {
|
||||||
|
Ok(self.target_node_id.clone().expect("prepare_partial_request is only called on master nodes; this field is filled on master nodes in constructor; qed"))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_partial_request(&mut self, partial_request: NodeId) -> Result<JobPartialRequestAction<BTreeSet<SessionId>>, Error> {
|
||||||
|
Ok(JobPartialRequestAction::Respond(self.key_storage.iter()
|
||||||
|
.filter(|&(_, ref key_share)| !key_share.id_numbers.contains_key(&partial_request))
|
||||||
|
.map(|(id, _)| id.clone())
|
||||||
|
.collect()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_partial_response(&self, _partial_response: &BTreeSet<SessionId>) -> Result<JobPartialResponseAction, Error> {
|
||||||
|
Ok(JobPartialResponseAction::Accept)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: optimizations:
|
||||||
|
// currently ALL unknown sessions are sent at once - it is better to limit messages by size/len => add partial-partial responses
|
||||||
|
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, BTreeSet<SessionId>>) -> Result<BTreeMap<SessionId, BTreeSet<NodeId>>, Error> {
|
||||||
|
let mut result: BTreeMap<SessionId, BTreeSet<NodeId>> = BTreeMap::new();
|
||||||
|
for (node_id, node_sessions) in partial_responses {
|
||||||
|
for node_session in node_sessions {
|
||||||
|
result.entry(node_session.clone())
|
||||||
|
.or_insert_with(Default::default)
|
||||||
|
.insert(node_id.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
}
|
@ -94,7 +94,6 @@ pub fn generate_random_polynom(threshold: usize) -> Result<Vec<Secret>, Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Compute absolute term of additional polynom1 when new node is added to the existing generation node set
|
/// Compute absolute term of additional polynom1 when new node is added to the existing generation node set
|
||||||
#[cfg(test)]
|
|
||||||
pub fn compute_additional_polynom1_absolute_term<'a, I>(secret_values: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
pub fn compute_additional_polynom1_absolute_term<'a, I>(secret_values: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
|
||||||
let mut absolute_term = compute_secret_sum(secret_values)?;
|
let mut absolute_term = compute_secret_sum(secret_values)?;
|
||||||
absolute_term.neg()?;
|
absolute_term.neg()?;
|
||||||
@ -102,7 +101,6 @@ pub fn compute_additional_polynom1_absolute_term<'a, I>(secret_values: I) -> Res
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Add two polynoms together (coeff = coeff1 + coeff2).
|
/// Add two polynoms together (coeff = coeff1 + coeff2).
|
||||||
#[cfg(test)]
|
|
||||||
pub fn add_polynoms(polynom1: &[Secret], polynom2: &[Secret], is_absolute_term2_zero: bool) -> Result<Vec<Secret>, Error> {
|
pub fn add_polynoms(polynom1: &[Secret], polynom2: &[Secret], is_absolute_term2_zero: bool) -> Result<Vec<Secret>, Error> {
|
||||||
polynom1.iter().zip(polynom2.iter())
|
polynom1.iter().zip(polynom2.iter())
|
||||||
.enumerate()
|
.enumerate()
|
||||||
@ -162,6 +160,13 @@ pub fn public_values_generation(threshold: usize, derived_point: &Public, polyno
|
|||||||
Ok(publics)
|
Ok(publics)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Generate refreshed public keys for other participants.
|
||||||
|
pub fn refreshed_public_values_generation(threshold: usize, refreshed_polynom1: &[Secret]) -> Result<Vec<Public>, Error> {
|
||||||
|
debug_assert_eq!(refreshed_polynom1.len(), threshold + 1);
|
||||||
|
|
||||||
|
(0..threshold + 1).map(|i| compute_public_share(&refreshed_polynom1[i])).collect()
|
||||||
|
}
|
||||||
|
|
||||||
/// Check keys passed by other participants.
|
/// Check keys passed by other participants.
|
||||||
pub fn keys_verification(threshold: usize, derived_point: &Public, number_id: &Secret, secret1: &Secret, secret2: &Secret, publics: &[Public]) -> Result<bool, Error> {
|
pub fn keys_verification(threshold: usize, derived_point: &Public, number_id: &Secret, secret1: &Secret, secret2: &Secret, publics: &[Public]) -> Result<bool, Error> {
|
||||||
// calculate left part
|
// calculate left part
|
||||||
@ -190,7 +195,6 @@ pub fn keys_verification(threshold: usize, derived_point: &Public, number_id: &S
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Check refreshed keys passed by other participants.
|
/// Check refreshed keys passed by other participants.
|
||||||
#[cfg(test)]
|
|
||||||
pub fn refreshed_keys_verification(threshold: usize, number_id: &Secret, secret1: &Secret, publics: &[Public]) -> Result<bool, Error> {
|
pub fn refreshed_keys_verification(threshold: usize, number_id: &Secret, secret1: &Secret, publics: &[Public]) -> Result<bool, Error> {
|
||||||
// calculate left part
|
// calculate left part
|
||||||
let mut left = math::generation_point();
|
let mut left = math::generation_point();
|
||||||
@ -545,7 +549,6 @@ pub mod tests {
|
|||||||
new_nodes_polynom1.push(new_polynom1);
|
new_nodes_polynom1.push(new_polynom1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// new nodes sends its own information to all other nodes
|
// new nodes sends its own information to all other nodes
|
||||||
let n = n + new_nodes;
|
let n = n + new_nodes;
|
||||||
id_numbers.extend((0..new_nodes).map(|_| Random.generate().unwrap().secret().clone()));
|
id_numbers.extend((0..new_nodes).map(|_| Random.generate().unwrap().secret().clone()));
|
||||||
@ -597,10 +600,12 @@ pub mod tests {
|
|||||||
.filter(|&(j, _)| j != i)
|
.filter(|&(j, _)| j != i)
|
||||||
.take(t)
|
.take(t)
|
||||||
.map(|(_, id_number)| id_number)).unwrap()).collect();
|
.map(|(_, id_number)| id_number)).unwrap()).collect();
|
||||||
|
|
||||||
let nodes_shadow_points: Vec<_> = nodes_shadows.iter()
|
let nodes_shadow_points: Vec<_> = nodes_shadows.iter()
|
||||||
.map(|s| compute_node_shadow_point(&access_key, &encrypted_secret.common_point, s, None).unwrap())
|
.map(|s| compute_node_shadow_point(&access_key, &encrypted_secret.common_point, s, None).unwrap())
|
||||||
.map(|sp| sp.0)
|
.map(|sp| sp.0)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
assert_eq!(nodes_shadows.len(), t + 1);
|
assert_eq!(nodes_shadows.len(), t + 1);
|
||||||
assert_eq!(nodes_shadow_points.len(), t + 1);
|
assert_eq!(nodes_shadow_points.len(), t + 1);
|
||||||
|
|
||||||
@ -752,14 +757,19 @@ pub mod tests {
|
|||||||
// generate key using 6-of-10 session
|
// generate key using 6-of-10 session
|
||||||
let (t, n) = (5, 10);
|
let (t, n) = (5, 10);
|
||||||
let artifacts1 = run_key_generation(t, n, None);
|
let artifacts1 = run_key_generation(t, n, None);
|
||||||
|
let joint_secret1 = compute_joint_secret(artifacts1.polynoms1.iter().map(|p1| &p1[0])).unwrap();
|
||||||
|
|
||||||
// let's say we want to include additional server to the set
|
// let's say we want to include additional server to the set
|
||||||
// so that scheme becames 6-of-11
|
// so that scheme becames 6-of-11
|
||||||
let artifacts2 = run_key_share_refreshing_and_add_new_nodes(t, n, 1, &artifacts1);
|
let artifacts2 = run_key_share_refreshing_and_add_new_nodes(t, n, 1, &artifacts1);
|
||||||
|
let joint_secret2 = compute_joint_secret(artifacts2.polynoms1.iter().map(|p1| &p1[0])).unwrap();
|
||||||
assert_eq!(artifacts1.joint_public, artifacts2.joint_public);
|
assert_eq!(artifacts1.joint_public, artifacts2.joint_public);
|
||||||
|
assert_eq!(joint_secret1, joint_secret2);
|
||||||
|
|
||||||
// include another couple of servers (6-of-13)
|
// include another couple of servers (6-of-13)
|
||||||
let artifacts3 = run_key_share_refreshing_and_add_new_nodes(t, n + 1, 2, &artifacts2);
|
let artifacts3 = run_key_share_refreshing_and_add_new_nodes(t, n + 1, 2, &artifacts2);
|
||||||
|
let joint_secret3 = compute_joint_secret(artifacts3.polynoms1.iter().map(|p1| &p1[0])).unwrap();
|
||||||
assert_eq!(artifacts1.joint_public, artifacts3.joint_public);
|
assert_eq!(artifacts1.joint_public, artifacts3.joint_public);
|
||||||
|
assert_eq!(joint_secret1, joint_secret3);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,6 +36,14 @@ pub enum Message {
|
|||||||
Decryption(DecryptionMessage),
|
Decryption(DecryptionMessage),
|
||||||
/// Signing message.
|
/// Signing message.
|
||||||
Signing(SigningMessage),
|
Signing(SigningMessage),
|
||||||
|
/// Share add message.
|
||||||
|
ShareAdd(ShareAddMessage),
|
||||||
|
/// Share move message.
|
||||||
|
ShareMove(ShareMoveMessage),
|
||||||
|
/// Share add message.
|
||||||
|
ShareRemove(ShareRemoveMessage),
|
||||||
|
/// Servers set change message.
|
||||||
|
ServersSetChange(ServersSetChangeMessage),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// All possible cluster-level messages.
|
/// All possible cluster-level messages.
|
||||||
@ -90,6 +98,33 @@ pub enum ConsensusMessage {
|
|||||||
ConfirmConsensusInitialization(ConfirmConsensusInitialization),
|
ConfirmConsensusInitialization(ConfirmConsensusInitialization),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// All possible messages that can be sent during servers-set consensus establishing.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub enum ConsensusMessageWithServersSet {
|
||||||
|
/// Initialize consensus session.
|
||||||
|
InitializeConsensusSession(InitializeConsensusSessionWithServersSet),
|
||||||
|
/// Confirm/reject consensus session initialization.
|
||||||
|
ConfirmConsensusInitialization(ConfirmConsensusInitialization),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// All possible messages that can be sent during share add consensus establishing.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub enum ConsensusMessageWithServersMap {
|
||||||
|
/// Initialize consensus session.
|
||||||
|
InitializeConsensusSession(InitializeConsensusSessionWithServersMap),
|
||||||
|
/// Confirm/reject consensus session initialization.
|
||||||
|
ConfirmConsensusInitialization(ConfirmConsensusInitialization),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// All possible messages that can be sent during share add consensus establishing.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub enum ConsensusMessageWithServersSecretMap {
|
||||||
|
/// Initialize consensus session.
|
||||||
|
InitializeConsensusSession(InitializeConsensusSessionWithServersSecretMap),
|
||||||
|
/// Confirm/reject consensus session initialization.
|
||||||
|
ConfirmConsensusInitialization(ConfirmConsensusInitialization),
|
||||||
|
}
|
||||||
|
|
||||||
/// All possible messages that can be sent during decryption session.
|
/// All possible messages that can be sent during decryption session.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub enum DecryptionMessage {
|
pub enum DecryptionMessage {
|
||||||
@ -122,6 +157,78 @@ pub enum SigningMessage {
|
|||||||
SigningSessionCompleted(SigningSessionCompleted),
|
SigningSessionCompleted(SigningSessionCompleted),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// All possible messages that can be sent during servers set change session.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum ServersSetChangeMessage {
|
||||||
|
/// Consensus establishing message.
|
||||||
|
ServersSetChangeConsensusMessage(ServersSetChangeConsensusMessage),
|
||||||
|
/// Unknown sessions ids request.
|
||||||
|
UnknownSessionsRequest(UnknownSessionsRequest),
|
||||||
|
/// Unknown sessions ids.
|
||||||
|
UnknownSessions(UnknownSessions),
|
||||||
|
/// Initialize share change session(s).
|
||||||
|
InitializeShareChangeSession(InitializeShareChangeSession),
|
||||||
|
/// Confirm share change session(s) initialization.
|
||||||
|
ConfirmShareChangeSessionInitialization(ConfirmShareChangeSessionInitialization),
|
||||||
|
/// Share change session delegation.
|
||||||
|
ServersSetChangeDelegate(ServersSetChangeDelegate),
|
||||||
|
/// Share change session delegation response.
|
||||||
|
ServersSetChangeDelegateResponse(ServersSetChangeDelegateResponse),
|
||||||
|
/// Share add message.
|
||||||
|
ServersSetChangeShareAddMessage(ServersSetChangeShareAddMessage),
|
||||||
|
/// Share move message.
|
||||||
|
ServersSetChangeShareMoveMessage(ServersSetChangeShareMoveMessage),
|
||||||
|
/// Share remove message.
|
||||||
|
ServersSetChangeShareRemoveMessage(ServersSetChangeShareRemoveMessage),
|
||||||
|
/// Servers set change session completed.
|
||||||
|
ServersSetChangeError(ServersSetChangeError),
|
||||||
|
/// Servers set change session completed.
|
||||||
|
ServersSetChangeCompleted(ServersSetChangeCompleted),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// All possible messages that can be sent during share add session.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub enum ShareAddMessage {
|
||||||
|
/// Consensus establishing message.
|
||||||
|
ShareAddConsensusMessage(ShareAddConsensusMessage),
|
||||||
|
/// Common key share data is sent to new node.
|
||||||
|
KeyShareCommon(KeyShareCommon),
|
||||||
|
/// Absolute term share of secret polynom is sent to new node.
|
||||||
|
NewAbsoluteTermShare(NewAbsoluteTermShare),
|
||||||
|
/// Generated keys are sent to every node.
|
||||||
|
NewKeysDissemination(NewKeysDissemination),
|
||||||
|
/// When session error has occured.
|
||||||
|
ShareAddError(ShareAddError),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// All possible messages that can be sent during share move session.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub enum ShareMoveMessage {
|
||||||
|
/// Consensus establishing message.
|
||||||
|
ShareMoveConsensusMessage(ShareMoveConsensusMessage),
|
||||||
|
/// Share move request.
|
||||||
|
ShareMoveRequest(ShareMoveRequest),
|
||||||
|
/// Share move.
|
||||||
|
ShareMove(ShareMove),
|
||||||
|
/// Share move confirmation.
|
||||||
|
ShareMoveConfirm(ShareMoveConfirm),
|
||||||
|
/// When session error has occured.
|
||||||
|
ShareMoveError(ShareMoveError),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// All possible messages that can be sent during share remove session.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub enum ShareRemoveMessage {
|
||||||
|
/// Consensus establishing message.
|
||||||
|
ShareRemoveConsensusMessage(ShareRemoveConsensusMessage),
|
||||||
|
/// Share remove request.
|
||||||
|
ShareRemoveRequest(ShareRemoveRequest),
|
||||||
|
/// Share remove confirmation.
|
||||||
|
ShareRemoveConfirm(ShareRemoveConfirm),
|
||||||
|
/// When session error has occured.
|
||||||
|
ShareRemoveError(ShareRemoveError),
|
||||||
|
}
|
||||||
|
|
||||||
/// Introduce node public key.
|
/// Introduce node public key.
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct NodePublicKey {
|
pub struct NodePublicKey {
|
||||||
@ -226,7 +333,7 @@ pub struct SessionError {
|
|||||||
pub session: MessageSessionId,
|
pub session: MessageSessionId,
|
||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
pub session_nonce: u64,
|
pub session_nonce: u64,
|
||||||
/// Public key share.
|
/// Error message.
|
||||||
pub error: String,
|
pub error: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -288,6 +395,45 @@ pub struct ConfirmConsensusInitialization {
|
|||||||
pub is_confirmed: bool,
|
pub is_confirmed: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Node is asked to be part of servers-set consensus group.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct InitializeConsensusSessionWithServersSet {
|
||||||
|
/// Old nodes set.
|
||||||
|
pub old_nodes_set: BTreeSet<MessageNodeId>,
|
||||||
|
/// New nodes set.
|
||||||
|
pub new_nodes_set: BTreeSet<MessageNodeId>,
|
||||||
|
/// Old server set, signed by requester.
|
||||||
|
pub old_set_signature: SerializableSignature,
|
||||||
|
/// New server set, signed by requester.
|
||||||
|
pub new_set_signature: SerializableSignature,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Node is asked to be part of servers-set consensus group.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct InitializeConsensusSessionWithServersSecretMap {
|
||||||
|
/// Old nodes set.
|
||||||
|
pub old_nodes_set: BTreeSet<MessageNodeId>,
|
||||||
|
/// New nodes set.
|
||||||
|
pub new_nodes_set: BTreeMap<MessageNodeId, SerializableSecret>,
|
||||||
|
/// Old server set, signed by requester.
|
||||||
|
pub old_set_signature: SerializableSignature,
|
||||||
|
/// New server set, signed by requester.
|
||||||
|
pub new_set_signature: SerializableSignature,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Node is asked to be part of servers-set consensus group.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct InitializeConsensusSessionWithServersMap {
|
||||||
|
/// Old nodes set.
|
||||||
|
pub old_nodes_set: BTreeSet<MessageNodeId>,
|
||||||
|
/// New nodes set (keys() = new_nodes_set, values = old nodes [differs from new if share is moved]).
|
||||||
|
pub new_nodes_set: BTreeMap<MessageNodeId, MessageNodeId>,
|
||||||
|
/// Old server set, signed by requester.
|
||||||
|
pub old_set_signature: SerializableSignature,
|
||||||
|
/// New server set, signed by requester.
|
||||||
|
pub new_set_signature: SerializableSignature,
|
||||||
|
}
|
||||||
|
|
||||||
/// Consensus-related signing message.
|
/// Consensus-related signing message.
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct SigningConsensusMessage {
|
pub struct SigningConsensusMessage {
|
||||||
@ -355,7 +501,7 @@ pub struct SigningSessionError {
|
|||||||
pub sub_session: SerializableSecret,
|
pub sub_session: SerializableSecret,
|
||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
pub session_nonce: u64,
|
pub session_nonce: u64,
|
||||||
/// Error description.
|
/// Error message.
|
||||||
pub error: String,
|
pub error: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -427,7 +573,7 @@ pub struct DecryptionSessionError {
|
|||||||
pub sub_session: SerializableSecret,
|
pub sub_session: SerializableSecret,
|
||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
pub session_nonce: u64,
|
pub session_nonce: u64,
|
||||||
/// Public key share.
|
/// Error message.
|
||||||
pub error: String,
|
pub error: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -442,6 +588,312 @@ pub struct DecryptionSessionCompleted {
|
|||||||
pub session_nonce: u64,
|
pub session_nonce: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Consensus-related servers set change message.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ServersSetChangeConsensusMessage {
|
||||||
|
/// Servers set change session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Consensus message.
|
||||||
|
pub message: ConsensusMessageWithServersSet,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unknown sessions ids request.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct UnknownSessionsRequest {
|
||||||
|
/// Servers set change session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unknown session ids.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct UnknownSessions {
|
||||||
|
/// Servers set change session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Unknown session id.
|
||||||
|
pub unknown_sessions: BTreeSet<MessageSessionId>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Master node opens share initialize session on other nodes.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct InitializeShareChangeSession {
|
||||||
|
/// Servers set change session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Key id.
|
||||||
|
pub key_id: MessageSessionId,
|
||||||
|
/// Master node.
|
||||||
|
pub master_node_id: MessageNodeId,
|
||||||
|
/// Old nodes set.
|
||||||
|
pub old_shares_set: BTreeSet<MessageNodeId>,
|
||||||
|
/// Shares to add. Values are filled for new nodes only.
|
||||||
|
pub shares_to_add: BTreeMap<MessageNodeId, SerializableSecret>,
|
||||||
|
/// Shares to move.
|
||||||
|
pub shares_to_move: BTreeMap<MessageNodeId, MessageNodeId>,
|
||||||
|
/// Shares to remove.
|
||||||
|
pub shares_to_remove: BTreeSet<MessageNodeId>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Slave node confirms session initialization.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ConfirmShareChangeSessionInitialization {
|
||||||
|
/// Servers set change session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Sessions that are confirmed.
|
||||||
|
pub key_id: MessageSessionId,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share change is requested.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ServersSetChangeDelegate {
|
||||||
|
/// Servers set change session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Key id.
|
||||||
|
pub key_id: MessageSessionId,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share change is completed.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ServersSetChangeDelegateResponse {
|
||||||
|
/// Servers set change session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Key id.
|
||||||
|
pub key_id: MessageSessionId,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Servers set change share add message.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ServersSetChangeShareAddMessage {
|
||||||
|
/// Servers set change session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Unknown session id.
|
||||||
|
pub message: ShareAddMessage,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Servers set change share move message.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ServersSetChangeShareMoveMessage {
|
||||||
|
/// Servers set change session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Unknown session id.
|
||||||
|
pub message: ShareMoveMessage,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Servers set change share remove message.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ServersSetChangeShareRemoveMessage {
|
||||||
|
/// Servers set change session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Unknown session id.
|
||||||
|
pub message: ShareRemoveMessage,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When servers set change session error has occured.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ServersSetChangeError {
|
||||||
|
/// Servers set change session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Error message.
|
||||||
|
pub error: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When servers set change session is completed.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ServersSetChangeCompleted {
|
||||||
|
/// Servers set change session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Consensus-related share add session message.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ShareAddConsensusMessage {
|
||||||
|
/// Share add session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Consensus message.
|
||||||
|
pub message: ConsensusMessageWithServersSecretMap,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Key share common data is passed to new node.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct KeyShareCommon {
|
||||||
|
/// Generation session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Key threshold.
|
||||||
|
pub threshold: usize,
|
||||||
|
/// Author of key share entry.
|
||||||
|
pub author: SerializablePublic,
|
||||||
|
/// Common (shared) encryption point.
|
||||||
|
pub common_point: Option<SerializablePublic>,
|
||||||
|
/// Encrypted point.
|
||||||
|
pub encrypted_point: Option<SerializablePublic>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Absolute term share is passed to new node.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct NewAbsoluteTermShare {
|
||||||
|
/// Generation session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Sender id number.
|
||||||
|
pub sender_id: SerializableSecret,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Absolute term share.
|
||||||
|
pub absolute_term_share: SerializableSecret,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generated keys are sent to every node.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct NewKeysDissemination {
|
||||||
|
/// Generation session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Refreshed secret1 value.
|
||||||
|
pub refreshed_secret1: SerializableSecret,
|
||||||
|
/// Refreshed public values.
|
||||||
|
pub refreshed_publics: Vec<SerializablePublic>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When share add session error has occured.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ShareAddError {
|
||||||
|
/// Generation session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Error message.
|
||||||
|
pub error: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Consensus-related share move session message.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ShareMoveConsensusMessage {
|
||||||
|
/// Share move session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Consensus message.
|
||||||
|
pub message: ConsensusMessageWithServersMap,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share move is requested.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ShareMoveRequest {
|
||||||
|
/// Generation session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share is moved from source to destination.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ShareMove {
|
||||||
|
/// Generation session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Author of the entry.
|
||||||
|
pub author: SerializablePublic,
|
||||||
|
/// Decryption threshold.
|
||||||
|
pub threshold: usize,
|
||||||
|
/// Nodes ids numbers.
|
||||||
|
pub id_numbers: BTreeMap<MessageNodeId, SerializableSecret>,
|
||||||
|
/// Polynom1.
|
||||||
|
pub polynom1: Vec<SerializableSecret>,
|
||||||
|
/// Node secret share.
|
||||||
|
pub secret_share: SerializableSecret,
|
||||||
|
/// Common (shared) encryption point.
|
||||||
|
pub common_point: Option<SerializablePublic>,
|
||||||
|
/// Encrypted point.
|
||||||
|
pub encrypted_point: Option<SerializablePublic>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share move is confirmed (destination node confirms to all other nodes).
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ShareMoveConfirm {
|
||||||
|
/// Generation session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When share move session error has occured.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ShareMoveError {
|
||||||
|
/// Generation session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Error message.
|
||||||
|
pub error: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Consensus-related share remove session message.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ShareRemoveConsensusMessage {
|
||||||
|
/// Share move session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Consensus message.
|
||||||
|
pub message: ConsensusMessageWithServersSet,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share remove is requested.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ShareRemoveRequest {
|
||||||
|
/// Generation session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Share remove is confirmed (destination node confirms to all other nodes).
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ShareRemoveConfirm {
|
||||||
|
/// Generation session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When share remove session error has occured.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ShareRemoveError {
|
||||||
|
/// Generation session Id.
|
||||||
|
pub session: MessageSessionId,
|
||||||
|
/// Session-level nonce.
|
||||||
|
pub session_nonce: u64,
|
||||||
|
/// Error message.
|
||||||
|
pub error: String,
|
||||||
|
}
|
||||||
|
|
||||||
impl GenerationMessage {
|
impl GenerationMessage {
|
||||||
pub fn session_id(&self) -> &SessionId {
|
pub fn session_id(&self) -> &SessionId {
|
||||||
match *self {
|
match *self {
|
||||||
@ -553,6 +1005,106 @@ impl SigningMessage {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl ServersSetChangeMessage {
|
||||||
|
pub fn session_id(&self) -> &SessionId {
|
||||||
|
match *self {
|
||||||
|
ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref msg) => &msg.session,
|
||||||
|
ServersSetChangeMessage::UnknownSessionsRequest(ref msg) => &msg.session,
|
||||||
|
ServersSetChangeMessage::UnknownSessions(ref msg) => &msg.session,
|
||||||
|
ServersSetChangeMessage::InitializeShareChangeSession(ref msg) => &msg.session,
|
||||||
|
ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref msg) => &msg.session,
|
||||||
|
ServersSetChangeMessage::ServersSetChangeDelegate(ref msg) => &msg.session,
|
||||||
|
ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref msg) => &msg.session,
|
||||||
|
ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref msg) => &msg.session,
|
||||||
|
ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ref msg) => &msg.session,
|
||||||
|
ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ref msg) => &msg.session,
|
||||||
|
ServersSetChangeMessage::ServersSetChangeError(ref msg) => &msg.session,
|
||||||
|
ServersSetChangeMessage::ServersSetChangeCompleted(ref msg) => &msg.session,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn session_nonce(&self) -> u64 {
|
||||||
|
match *self {
|
||||||
|
ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref msg) => msg.session_nonce,
|
||||||
|
ServersSetChangeMessage::UnknownSessionsRequest(ref msg) => msg.session_nonce,
|
||||||
|
ServersSetChangeMessage::UnknownSessions(ref msg) => msg.session_nonce,
|
||||||
|
ServersSetChangeMessage::InitializeShareChangeSession(ref msg) => msg.session_nonce,
|
||||||
|
ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref msg) => msg.session_nonce,
|
||||||
|
ServersSetChangeMessage::ServersSetChangeDelegate(ref msg) => msg.session_nonce,
|
||||||
|
ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref msg) => msg.session_nonce,
|
||||||
|
ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref msg) => msg.session_nonce,
|
||||||
|
ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ref msg) => msg.session_nonce,
|
||||||
|
ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ref msg) => msg.session_nonce,
|
||||||
|
ServersSetChangeMessage::ServersSetChangeError(ref msg) => msg.session_nonce,
|
||||||
|
ServersSetChangeMessage::ServersSetChangeCompleted(ref msg) => msg.session_nonce,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShareAddMessage {
|
||||||
|
pub fn session_id(&self) -> &SessionId {
|
||||||
|
match *self {
|
||||||
|
ShareAddMessage::ShareAddConsensusMessage(ref msg) => &msg.session,
|
||||||
|
ShareAddMessage::KeyShareCommon(ref msg) => &msg.session,
|
||||||
|
ShareAddMessage::NewAbsoluteTermShare(ref msg) => &msg.session,
|
||||||
|
ShareAddMessage::NewKeysDissemination(ref msg) => &msg.session,
|
||||||
|
ShareAddMessage::ShareAddError(ref msg) => &msg.session,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn session_nonce(&self) -> u64 {
|
||||||
|
match *self {
|
||||||
|
ShareAddMessage::ShareAddConsensusMessage(ref msg) => msg.session_nonce,
|
||||||
|
ShareAddMessage::KeyShareCommon(ref msg) => msg.session_nonce,
|
||||||
|
ShareAddMessage::NewAbsoluteTermShare(ref msg) => msg.session_nonce,
|
||||||
|
ShareAddMessage::NewKeysDissemination(ref msg) => msg.session_nonce,
|
||||||
|
ShareAddMessage::ShareAddError(ref msg) => msg.session_nonce,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShareMoveMessage {
|
||||||
|
pub fn session_id(&self) -> &SessionId {
|
||||||
|
match *self {
|
||||||
|
ShareMoveMessage::ShareMoveConsensusMessage(ref msg) => &msg.session,
|
||||||
|
ShareMoveMessage::ShareMoveRequest(ref msg) => &msg.session,
|
||||||
|
ShareMoveMessage::ShareMove(ref msg) => &msg.session,
|
||||||
|
ShareMoveMessage::ShareMoveConfirm(ref msg) => &msg.session,
|
||||||
|
ShareMoveMessage::ShareMoveError(ref msg) => &msg.session,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn session_nonce(&self) -> u64 {
|
||||||
|
match *self {
|
||||||
|
ShareMoveMessage::ShareMoveConsensusMessage(ref msg) => msg.session_nonce,
|
||||||
|
ShareMoveMessage::ShareMoveRequest(ref msg) => msg.session_nonce,
|
||||||
|
ShareMoveMessage::ShareMove(ref msg) => msg.session_nonce,
|
||||||
|
ShareMoveMessage::ShareMoveConfirm(ref msg) => msg.session_nonce,
|
||||||
|
ShareMoveMessage::ShareMoveError(ref msg) => msg.session_nonce,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShareRemoveMessage {
|
||||||
|
pub fn session_id(&self) -> &SessionId {
|
||||||
|
match *self {
|
||||||
|
ShareRemoveMessage::ShareRemoveConsensusMessage(ref msg) => &msg.session,
|
||||||
|
ShareRemoveMessage::ShareRemoveRequest(ref msg) => &msg.session,
|
||||||
|
ShareRemoveMessage::ShareRemoveConfirm(ref msg) => &msg.session,
|
||||||
|
ShareRemoveMessage::ShareRemoveError(ref msg) => &msg.session,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn session_nonce(&self) -> u64 {
|
||||||
|
match *self {
|
||||||
|
ShareRemoveMessage::ShareRemoveConsensusMessage(ref msg) => msg.session_nonce,
|
||||||
|
ShareRemoveMessage::ShareRemoveRequest(ref msg) => msg.session_nonce,
|
||||||
|
ShareRemoveMessage::ShareRemoveConfirm(ref msg) => msg.session_nonce,
|
||||||
|
ShareRemoveMessage::ShareRemoveError(ref msg) => msg.session_nonce,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl fmt::Display for Message {
|
impl fmt::Display for Message {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
match *self {
|
match *self {
|
||||||
@ -561,6 +1113,10 @@ impl fmt::Display for Message {
|
|||||||
Message::Encryption(ref message) => write!(f, "Encryption.{}", message),
|
Message::Encryption(ref message) => write!(f, "Encryption.{}", message),
|
||||||
Message::Decryption(ref message) => write!(f, "Decryption.{}", message),
|
Message::Decryption(ref message) => write!(f, "Decryption.{}", message),
|
||||||
Message::Signing(ref message) => write!(f, "Signing.{}", message),
|
Message::Signing(ref message) => write!(f, "Signing.{}", message),
|
||||||
|
Message::ServersSetChange(ref message) => write!(f, "ServersSetChange.{}", message),
|
||||||
|
Message::ShareAdd(ref message) => write!(f, "ShareAdd.{}", message),
|
||||||
|
Message::ShareMove(ref message) => write!(f, "ShareMove.{}", message),
|
||||||
|
Message::ShareRemove(ref message) => write!(f, "ShareRemove.{}", message),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -609,6 +1165,33 @@ impl fmt::Display for ConsensusMessage {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for ConsensusMessageWithServersSet {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
match *self {
|
||||||
|
ConsensusMessageWithServersSet::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"),
|
||||||
|
ConsensusMessageWithServersSet::ConfirmConsensusInitialization(_) => write!(f, "ConfirmConsensusInitialization"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for ConsensusMessageWithServersMap {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
match *self {
|
||||||
|
ConsensusMessageWithServersMap::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"),
|
||||||
|
ConsensusMessageWithServersMap::ConfirmConsensusInitialization(_) => write!(f, "ConfirmConsensusInitialization"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for ConsensusMessageWithServersSecretMap {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
match *self {
|
||||||
|
ConsensusMessageWithServersSecretMap::InitializeConsensusSession(_) => write!(f, "InitializeConsensusSession"),
|
||||||
|
ConsensusMessageWithServersSecretMap::ConfirmConsensusInitialization(_) => write!(f, "ConfirmConsensusInitialization"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl fmt::Display for DecryptionMessage {
|
impl fmt::Display for DecryptionMessage {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
match *self {
|
match *self {
|
||||||
@ -633,3 +1216,58 @@ impl fmt::Display for SigningMessage {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for ServersSetChangeMessage {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
match *self {
|
||||||
|
ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref m) => write!(f, "ServersSetChangeConsensusMessage.{}", m.message),
|
||||||
|
ServersSetChangeMessage::UnknownSessionsRequest(_) => write!(f, "UnknownSessionsRequest"),
|
||||||
|
ServersSetChangeMessage::UnknownSessions(_) => write!(f, "UnknownSessions"),
|
||||||
|
ServersSetChangeMessage::InitializeShareChangeSession(_) => write!(f, "InitializeShareChangeSession"),
|
||||||
|
ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(_) => write!(f, "ConfirmShareChangeSessionInitialization"),
|
||||||
|
ServersSetChangeMessage::ServersSetChangeDelegate(_) => write!(f, "ServersSetChangeDelegate"),
|
||||||
|
ServersSetChangeMessage::ServersSetChangeDelegateResponse(_) => write!(f, "ServersSetChangeDelegateResponse"),
|
||||||
|
ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref m) => write!(f, "ServersSetChangeShareAddMessage.{}", m.message),
|
||||||
|
ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ref m) => write!(f, "ServersSetChangeShareMoveMessage.{}", m.message),
|
||||||
|
ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ref m) => write!(f, "ServersSetChangeShareRemoveMessage.{}", m.message),
|
||||||
|
ServersSetChangeMessage::ServersSetChangeError(_) => write!(f, "ServersSetChangeError"),
|
||||||
|
ServersSetChangeMessage::ServersSetChangeCompleted(_) => write!(f, "ServersSetChangeCompleted"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for ShareAddMessage {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
match *self {
|
||||||
|
ShareAddMessage::ShareAddConsensusMessage(ref m) => write!(f, "ShareAddConsensusMessage.{}", m.message),
|
||||||
|
ShareAddMessage::KeyShareCommon(_) => write!(f, "KeyShareCommon"),
|
||||||
|
ShareAddMessage::NewAbsoluteTermShare(_) => write!(f, "NewAbsoluteTermShare"),
|
||||||
|
ShareAddMessage::NewKeysDissemination(_) => write!(f, "NewKeysDissemination"),
|
||||||
|
ShareAddMessage::ShareAddError(_) => write!(f, "ShareAddError"),
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for ShareMoveMessage {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
match *self {
|
||||||
|
ShareMoveMessage::ShareMoveConsensusMessage(ref m) => write!(f, "ShareMoveConsensusMessage.{}", m.message),
|
||||||
|
ShareMoveMessage::ShareMoveRequest(_) => write!(f, "ShareMoveRequest"),
|
||||||
|
ShareMoveMessage::ShareMove(_) => write!(f, "ShareMove"),
|
||||||
|
ShareMoveMessage::ShareMoveConfirm(_) => write!(f, "ShareMoveConfirm"),
|
||||||
|
ShareMoveMessage::ShareMoveError(_) => write!(f, "ShareMoveError"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for ShareRemoveMessage {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
match *self {
|
||||||
|
ShareRemoveMessage::ShareRemoveConsensusMessage(ref m) => write!(f, "InitializeShareRemoveSession.{}", m.message),
|
||||||
|
ShareRemoveMessage::ShareRemoveRequest(_) => write!(f, "ShareRemoveRequest"),
|
||||||
|
ShareRemoveMessage::ShareRemoveConfirm(_) => write!(f, "ShareRemoveConfirm"),
|
||||||
|
ShareRemoveMessage::ShareRemoveError(_) => write!(f, "ShareRemoveError"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -35,7 +35,6 @@ pub use self::decryption_session::Session as DecryptionSession;
|
|||||||
pub use super::node_key_pair::PlainNodeKeyPair;
|
pub use super::node_key_pair::PlainNodeKeyPair;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub use super::key_storage::tests::DummyKeyStorage;
|
pub use super::key_storage::tests::DummyKeyStorage;
|
||||||
#[cfg(test)]
|
|
||||||
pub use super::acl_storage::DummyAclStorage;
|
pub use super::acl_storage::DummyAclStorage;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub use super::key_server_set::tests::MapKeyServerSet;
|
pub use super::key_server_set::tests::MapKeyServerSet;
|
||||||
@ -163,14 +162,24 @@ impl Into<String> for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mod admin_sessions;
|
||||||
|
mod client_sessions;
|
||||||
|
|
||||||
|
pub use self::admin_sessions::servers_set_change_session;
|
||||||
|
pub use self::admin_sessions::share_add_session;
|
||||||
|
pub use self::admin_sessions::share_change_session;
|
||||||
|
pub use self::admin_sessions::share_move_session;
|
||||||
|
pub use self::admin_sessions::share_remove_session;
|
||||||
|
|
||||||
|
pub use self::client_sessions::decryption_session;
|
||||||
|
pub use self::client_sessions::encryption_session;
|
||||||
|
pub use self::client_sessions::generation_session;
|
||||||
|
pub use self::client_sessions::signing_session;
|
||||||
|
|
||||||
mod cluster;
|
mod cluster;
|
||||||
mod cluster_sessions;
|
mod cluster_sessions;
|
||||||
mod decryption_session;
|
|
||||||
mod encryption_session;
|
|
||||||
mod generation_session;
|
|
||||||
mod io;
|
mod io;
|
||||||
mod jobs;
|
mod jobs;
|
||||||
pub mod math;
|
pub mod math;
|
||||||
mod message;
|
mod message;
|
||||||
mod signing_session;
|
|
||||||
mod net;
|
mod net;
|
||||||
|
@ -18,12 +18,16 @@ use std::path::PathBuf;
|
|||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
use ethkey::{Secret, Public};
|
use ethkey::{Secret, Public};
|
||||||
use util::Database;
|
use util::{Database, DatabaseIterator};
|
||||||
use types::all::{Error, ServiceConfiguration, ServerKeyId, NodeId};
|
use types::all::{Error, ServiceConfiguration, ServerKeyId, NodeId};
|
||||||
use serialization::{SerializablePublic, SerializableSecret};
|
use serialization::{SerializablePublic, SerializableSecret};
|
||||||
|
|
||||||
/// Key of version value.
|
/// Key of version value.
|
||||||
const DB_META_KEY_VERSION: &'static [u8; 7] = b"version";
|
const DB_META_KEY_VERSION: &'static [u8; 7] = b"version";
|
||||||
|
/// Current db version.
|
||||||
|
const CURRENT_VERSION: u8 = 2;
|
||||||
|
/// Current type of serialized key shares.
|
||||||
|
type CurrentSerializableDocumentKeyShare = SerializableDocumentKeyShareV2;
|
||||||
|
|
||||||
/// Encrypted key share, stored by key storage on the single key server.
|
/// Encrypted key share, stored by key storage on the single key server.
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
@ -34,6 +38,8 @@ pub struct DocumentKeyShare {
|
|||||||
pub threshold: usize,
|
pub threshold: usize,
|
||||||
/// Nodes ids numbers.
|
/// Nodes ids numbers.
|
||||||
pub id_numbers: BTreeMap<NodeId, Secret>,
|
pub id_numbers: BTreeMap<NodeId, Secret>,
|
||||||
|
/// Polynom1.
|
||||||
|
pub polynom1: Vec<Secret>,
|
||||||
/// Node secret share.
|
/// Node secret share.
|
||||||
pub secret_share: Secret,
|
pub secret_share: Secret,
|
||||||
/// Common (shared) encryption point.
|
/// Common (shared) encryption point.
|
||||||
@ -50,8 +56,12 @@ pub trait KeyStorage: Send + Sync {
|
|||||||
fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error>;
|
fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error>;
|
||||||
/// Get document encryption key
|
/// Get document encryption key
|
||||||
fn get(&self, document: &ServerKeyId) -> Result<DocumentKeyShare, Error>;
|
fn get(&self, document: &ServerKeyId) -> Result<DocumentKeyShare, Error>;
|
||||||
|
/// Remove document encryption key
|
||||||
|
fn remove(&self, document: &ServerKeyId) -> Result<(), Error>;
|
||||||
/// Check if storage contains document encryption key
|
/// Check if storage contains document encryption key
|
||||||
fn contains(&self, document: &ServerKeyId) -> bool;
|
fn contains(&self, document: &ServerKeyId) -> bool;
|
||||||
|
/// Iterate through storage
|
||||||
|
fn iter<'a>(&'a self) -> Box<Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Persistent document encryption keys storage
|
/// Persistent document encryption keys storage
|
||||||
@ -59,6 +69,11 @@ pub struct PersistentKeyStorage {
|
|||||||
db: Database,
|
db: Database,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Persistent document encryption keys storage iterator
|
||||||
|
pub struct PersistentKeyStorageIterator<'a> {
|
||||||
|
iter: Option<DatabaseIterator<'a>>,
|
||||||
|
}
|
||||||
|
|
||||||
/// V0 of encrypted key share, as it is stored by key storage on the single key server.
|
/// V0 of encrypted key share, as it is stored by key storage on the single key server.
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
struct SerializableDocumentKeyShareV0 {
|
struct SerializableDocumentKeyShareV0 {
|
||||||
@ -91,6 +106,25 @@ struct SerializableDocumentKeyShareV1 {
|
|||||||
pub encrypted_point: Option<SerializablePublic>,
|
pub encrypted_point: Option<SerializablePublic>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// V2 of encrypted key share, as it is stored by key storage on the single key server.
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
struct SerializableDocumentKeyShareV2 {
|
||||||
|
/// Authore of the entry.
|
||||||
|
pub author: SerializablePublic,
|
||||||
|
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
||||||
|
pub threshold: usize,
|
||||||
|
/// Nodes ids numbers.
|
||||||
|
pub id_numbers: BTreeMap<SerializablePublic, SerializableSecret>,
|
||||||
|
/// Polynom1.
|
||||||
|
pub polynom1: Vec<SerializableSecret>,
|
||||||
|
/// Node secret share.
|
||||||
|
pub secret_share: SerializableSecret,
|
||||||
|
/// Common (shared) encryption point.
|
||||||
|
pub common_point: Option<SerializablePublic>,
|
||||||
|
/// Encrypted point.
|
||||||
|
pub encrypted_point: Option<SerializablePublic>,
|
||||||
|
}
|
||||||
|
|
||||||
impl PersistentKeyStorage {
|
impl PersistentKeyStorage {
|
||||||
/// Create new persistent document encryption keys storage
|
/// Create new persistent document encryption keys storage
|
||||||
pub fn new(config: &ServiceConfiguration) -> Result<Self, Error> {
|
pub fn new(config: &ServiceConfiguration) -> Result<Self, Error> {
|
||||||
@ -113,33 +147,54 @@ fn upgrade_db(db: Database) -> Result<Database, Error> {
|
|||||||
match version {
|
match version {
|
||||||
0 => {
|
0 => {
|
||||||
let mut batch = db.transaction();
|
let mut batch = db.transaction();
|
||||||
batch.put(None, DB_META_KEY_VERSION, &[1]);
|
batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]);
|
||||||
for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner) {
|
for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner).filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) {
|
||||||
let v0_key = serde_json::from_slice::<SerializableDocumentKeyShareV0>(&db_value).map_err(|e| Error::Database(e.to_string()))?;
|
let v0_key = serde_json::from_slice::<SerializableDocumentKeyShareV0>(&db_value).map_err(|e| Error::Database(e.to_string()))?;
|
||||||
let v1_key = SerializableDocumentKeyShareV1 {
|
let v2_key = CurrentSerializableDocumentKeyShare {
|
||||||
// author is used in separate generation + encrypt sessions.
|
// author is used in separate generation + encrypt sessions.
|
||||||
// in v0 there have been only simultaneous GenEnc sessions.
|
// in v0 there have been only simultaneous GenEnc sessions.
|
||||||
author: Public::default().into(),
|
author: Public::default().into(), // added in v1
|
||||||
threshold: v0_key.threshold,
|
threshold: v0_key.threshold,
|
||||||
id_numbers: v0_key.id_numbers,
|
id_numbers: v0_key.id_numbers,
|
||||||
secret_share: v0_key.secret_share,
|
secret_share: v0_key.secret_share,
|
||||||
|
polynom1: Vec::new(), // added in v2
|
||||||
common_point: Some(v0_key.common_point),
|
common_point: Some(v0_key.common_point),
|
||||||
encrypted_point: Some(v0_key.encrypted_point),
|
encrypted_point: Some(v0_key.encrypted_point),
|
||||||
};
|
};
|
||||||
let db_value = serde_json::to_vec(&v1_key).map_err(|e| Error::Database(e.to_string()))?;
|
let db_value = serde_json::to_vec(&v2_key).map_err(|e| Error::Database(e.to_string()))?;
|
||||||
batch.put(None, &*db_key, &*db_value);
|
batch.put(None, &*db_key, &*db_value);
|
||||||
}
|
}
|
||||||
db.write(batch).map_err(Error::Database)?;
|
db.write(batch).map_err(Error::Database)?;
|
||||||
Ok(db)
|
Ok(db)
|
||||||
},
|
},
|
||||||
1 => Ok(db),
|
1 => {
|
||||||
_ => Err(Error::Database(format!("unsupported SecretStore database version:? {}", version))),
|
let mut batch = db.transaction();
|
||||||
|
batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]);
|
||||||
|
for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner).filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) {
|
||||||
|
let v1_key = serde_json::from_slice::<SerializableDocumentKeyShareV1>(&db_value).map_err(|e| Error::Database(e.to_string()))?;
|
||||||
|
let v2_key = CurrentSerializableDocumentKeyShare {
|
||||||
|
author: v1_key.author, // added in v1
|
||||||
|
threshold: v1_key.threshold,
|
||||||
|
id_numbers: v1_key.id_numbers,
|
||||||
|
secret_share: v1_key.secret_share,
|
||||||
|
polynom1: Vec::new(), // added in v2
|
||||||
|
common_point: v1_key.common_point,
|
||||||
|
encrypted_point: v1_key.encrypted_point,
|
||||||
|
};
|
||||||
|
let db_value = serde_json::to_vec(&v2_key).map_err(|e| Error::Database(e.to_string()))?;
|
||||||
|
batch.put(None, &*db_key, &*db_value);
|
||||||
|
}
|
||||||
|
db.write(batch).map_err(Error::Database)?;
|
||||||
|
Ok(db)
|
||||||
|
}
|
||||||
|
2 => Ok(db),
|
||||||
|
_ => Err(Error::Database(format!("unsupported SecretStore database version: {}", version))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyStorage for PersistentKeyStorage {
|
impl KeyStorage for PersistentKeyStorage {
|
||||||
fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> {
|
fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> {
|
||||||
let key: SerializableDocumentKeyShareV1 = key.into();
|
let key: CurrentSerializableDocumentKeyShare = key.into();
|
||||||
let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?;
|
let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?;
|
||||||
let mut batch = self.db.transaction();
|
let mut batch = self.db.transaction();
|
||||||
batch.put(None, &document, &key);
|
batch.put(None, &document, &key);
|
||||||
@ -155,15 +210,53 @@ impl KeyStorage for PersistentKeyStorage {
|
|||||||
.map_err(Error::Database)?
|
.map_err(Error::Database)?
|
||||||
.ok_or(Error::DocumentNotFound)
|
.ok_or(Error::DocumentNotFound)
|
||||||
.map(|key| key.into_vec())
|
.map(|key| key.into_vec())
|
||||||
.and_then(|key| serde_json::from_slice::<SerializableDocumentKeyShareV1>(&key).map_err(|e| Error::Database(e.to_string())))
|
.and_then(|key| serde_json::from_slice::<CurrentSerializableDocumentKeyShare>(&key).map_err(|e| Error::Database(e.to_string())))
|
||||||
.map(Into::into)
|
.map(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn remove(&self, document: &ServerKeyId) -> Result<(), Error> {
|
||||||
|
let mut batch = self.db.transaction();
|
||||||
|
batch.delete(None, &document);
|
||||||
|
self.db.write(batch).map_err(Error::Database)
|
||||||
|
}
|
||||||
|
|
||||||
fn contains(&self, document: &ServerKeyId) -> bool {
|
fn contains(&self, document: &ServerKeyId) -> bool {
|
||||||
self.db.get(None, document)
|
self.db.get(None, document)
|
||||||
.map(|k| k.is_some())
|
.map(|k| k.is_some())
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn iter<'a>(&'a self) -> Box<Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a> {
|
||||||
|
Box::new(PersistentKeyStorageIterator {
|
||||||
|
iter: self.db.iter(None),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Iterator for PersistentKeyStorageIterator<'a> {
|
||||||
|
type Item = (ServerKeyId, DocumentKeyShare);
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<(ServerKeyId, DocumentKeyShare)> {
|
||||||
|
self.iter.as_mut()
|
||||||
|
.and_then(|iter| iter.next()
|
||||||
|
.and_then(|(db_key, db_val)| serde_json::from_slice::<CurrentSerializableDocumentKeyShare>(&db_val)
|
||||||
|
.ok()
|
||||||
|
.map(|key| ((*db_key).into(), key.into()))))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<DocumentKeyShare> for SerializableDocumentKeyShareV2 {
|
||||||
|
fn from(key: DocumentKeyShare) -> Self {
|
||||||
|
SerializableDocumentKeyShareV2 {
|
||||||
|
author: key.author.into(),
|
||||||
|
threshold: key.threshold,
|
||||||
|
id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
||||||
|
secret_share: key.secret_share.into(),
|
||||||
|
polynom1: key.polynom1.into_iter().map(Into::into).collect(),
|
||||||
|
common_point: key.common_point.map(Into::into),
|
||||||
|
encrypted_point: key.encrypted_point.map(Into::into),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<DocumentKeyShare> for SerializableDocumentKeyShareV1 {
|
impl From<DocumentKeyShare> for SerializableDocumentKeyShareV1 {
|
||||||
@ -179,13 +272,14 @@ impl From<DocumentKeyShare> for SerializableDocumentKeyShareV1 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<SerializableDocumentKeyShareV1> for DocumentKeyShare {
|
impl From<SerializableDocumentKeyShareV2> for DocumentKeyShare {
|
||||||
fn from(key: SerializableDocumentKeyShareV1) -> Self {
|
fn from(key: SerializableDocumentKeyShareV2) -> Self {
|
||||||
DocumentKeyShare {
|
DocumentKeyShare {
|
||||||
author: key.author.into(),
|
author: key.author.into(),
|
||||||
threshold: key.threshold,
|
threshold: key.threshold,
|
||||||
id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
||||||
secret_share: key.secret_share.into(),
|
secret_share: key.secret_share.into(),
|
||||||
|
polynom1: key.polynom1.into_iter().map(Into::into).collect(),
|
||||||
common_point: key.common_point.map(Into::into),
|
common_point: key.common_point.map(Into::into),
|
||||||
encrypted_point: key.encrypted_point.map(Into::into),
|
encrypted_point: key.encrypted_point.map(Into::into),
|
||||||
}
|
}
|
||||||
@ -201,8 +295,9 @@ pub mod tests {
|
|||||||
use ethkey::{Random, Generator, Public, Secret};
|
use ethkey::{Random, Generator, Public, Secret};
|
||||||
use util::Database;
|
use util::Database;
|
||||||
use types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId};
|
use types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId};
|
||||||
use super::{DB_META_KEY_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare,
|
use super::{DB_META_KEY_VERSION, CURRENT_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare,
|
||||||
SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1, upgrade_db};
|
SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1,
|
||||||
|
CurrentSerializableDocumentKeyShare, upgrade_db};
|
||||||
|
|
||||||
/// In-memory document encryption keys storage
|
/// In-memory document encryption keys storage
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
@ -225,9 +320,18 @@ pub mod tests {
|
|||||||
self.keys.read().get(document).cloned().ok_or(Error::DocumentNotFound)
|
self.keys.read().get(document).cloned().ok_or(Error::DocumentNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn remove(&self, document: &ServerKeyId) -> Result<(), Error> {
|
||||||
|
self.keys.write().remove(document);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn contains(&self, document: &ServerKeyId) -> bool {
|
fn contains(&self, document: &ServerKeyId) -> bool {
|
||||||
self.keys.read().contains_key(document)
|
self.keys.read().contains_key(document)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn iter<'a>(&'a self) -> Box<Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a> {
|
||||||
|
Box::new(self.keys.read().clone().into_iter())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -245,6 +349,7 @@ pub mod tests {
|
|||||||
},
|
},
|
||||||
nodes: BTreeMap::new(),
|
nodes: BTreeMap::new(),
|
||||||
allow_connecting_to_higher_nodes: false,
|
allow_connecting_to_higher_nodes: false,
|
||||||
|
admin_public: None,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -256,6 +361,7 @@ pub mod tests {
|
|||||||
(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone())
|
(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone())
|
||||||
].into_iter().collect(),
|
].into_iter().collect(),
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
secret_share: Random.generate().unwrap().secret().clone(),
|
||||||
|
polynom1: Vec::new(),
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
};
|
};
|
||||||
@ -267,6 +373,7 @@ pub mod tests {
|
|||||||
(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone())
|
(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone())
|
||||||
].into_iter().collect(),
|
].into_iter().collect(),
|
||||||
secret_share: Random.generate().unwrap().secret().clone(),
|
secret_share: Random.generate().unwrap().secret().clone(),
|
||||||
|
polynom1: Vec::new(),
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
};
|
};
|
||||||
@ -287,7 +394,7 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn upgrade_db_0_to_1() {
|
fn upgrade_db_from_0() {
|
||||||
let db_path = RandomTempPath::create_dir();
|
let db_path = RandomTempPath::create_dir();
|
||||||
let db = Database::open_default(db_path.as_str()).unwrap();
|
let db = Database::open_default(db_path.as_str()).unwrap();
|
||||||
|
|
||||||
@ -312,8 +419,8 @@ pub mod tests {
|
|||||||
let db = upgrade_db(db).unwrap();
|
let db = upgrade_db(db).unwrap();
|
||||||
|
|
||||||
// check upgrade
|
// check upgrade
|
||||||
assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], 1);
|
assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], CURRENT_VERSION);
|
||||||
let key = serde_json::from_slice::<SerializableDocumentKeyShareV1>(&db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap()).unwrap();
|
let key = serde_json::from_slice::<CurrentSerializableDocumentKeyShare>(&db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap()).unwrap();
|
||||||
assert_eq!(Public::default(), key.author.clone().into());
|
assert_eq!(Public::default(), key.author.clone().into());
|
||||||
assert_eq!(777, key.threshold);
|
assert_eq!(777, key.threshold);
|
||||||
assert_eq!(vec![(
|
assert_eq!(vec![(
|
||||||
@ -324,4 +431,46 @@ pub mod tests {
|
|||||||
assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::<Public>().unwrap()), key.common_point.clone().map(Into::into));
|
assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::<Public>().unwrap()), key.common_point.clone().map(Into::into));
|
||||||
assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::<Public>().unwrap()), key.encrypted_point.clone().map(Into::into));
|
assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::<Public>().unwrap()), key.encrypted_point.clone().map(Into::into));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn upgrade_db_from_1() {
|
||||||
|
let db_path = RandomTempPath::create_dir();
|
||||||
|
let db = Database::open_default(db_path.as_str()).unwrap();
|
||||||
|
|
||||||
|
// prepare v1 database
|
||||||
|
{
|
||||||
|
let key = serde_json::to_vec(&SerializableDocumentKeyShareV1 {
|
||||||
|
author: "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(),
|
||||||
|
threshold: 777,
|
||||||
|
id_numbers: vec![(
|
||||||
|
"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(),
|
||||||
|
"281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse::<Secret>().unwrap().into(),
|
||||||
|
)].into_iter().collect(),
|
||||||
|
secret_share: "00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::<Secret>().unwrap().into(),
|
||||||
|
common_point: Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".into()),
|
||||||
|
encrypted_point: Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".into()),
|
||||||
|
}).unwrap();
|
||||||
|
let mut batch = db.transaction();
|
||||||
|
batch.put(None, DB_META_KEY_VERSION, &[1]);
|
||||||
|
batch.put(None, &[7], &key);
|
||||||
|
db.write(batch).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// upgrade database
|
||||||
|
let db = upgrade_db(db).unwrap();
|
||||||
|
|
||||||
|
// check upgrade
|
||||||
|
assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], CURRENT_VERSION);
|
||||||
|
let key = serde_json::from_slice::<CurrentSerializableDocumentKeyShare>(&db.get(None, &[7]).unwrap().map(|key| key.to_vec()).unwrap()).unwrap();
|
||||||
|
assert_eq!(777, key.threshold);
|
||||||
|
assert_eq!(vec![(
|
||||||
|
"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse::<Public>().unwrap(),
|
||||||
|
"281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse::<Secret>().unwrap(),
|
||||||
|
)], key.id_numbers.clone().into_iter().map(|(k, v)| (k.into(), v.into())).collect::<Vec<(Public, Secret)>>());
|
||||||
|
assert_eq!("00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse::<Secret>().unwrap(), key.secret_share.into());
|
||||||
|
assert_eq!(Some("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".parse::<Public>().unwrap()), key.common_point.clone().map(Into::into));
|
||||||
|
assert_eq!(Some("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".parse::<Public>().unwrap()), key.encrypted_point.clone().map(Into::into));
|
||||||
|
assert_eq!(key.author, "b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into());
|
||||||
|
assert_eq!(key.polynom1, vec![]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,6 +29,7 @@ extern crate serde;
|
|||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate serde_derive;
|
extern crate serde_derive;
|
||||||
|
extern crate tiny_keccak;
|
||||||
extern crate tokio_io;
|
extern crate tokio_io;
|
||||||
extern crate tokio_core;
|
extern crate tokio_core;
|
||||||
extern crate tokio_service;
|
extern crate tokio_service;
|
||||||
|
@ -204,8 +204,29 @@ impl<'a> Deserialize<'a> for SerializableH256 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl PartialEq<SerializableH256> for SerializableH256 {
|
||||||
|
fn eq(&self, other: &Self) -> bool {
|
||||||
|
self.0.eq(&other.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Eq for SerializableH256 {
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialOrd for SerializableH256 {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||||
|
self.0.partial_cmp(&other.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for SerializableH256 {
|
||||||
|
fn cmp(&self, other: &Self) -> Ordering {
|
||||||
|
self.0.cmp(&other.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Serializable EC scalar/secret key.
|
/// Serializable EC scalar/secret key.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub struct SerializableSecret(pub Secret);
|
pub struct SerializableSecret(pub Secret);
|
||||||
|
|
||||||
impl<T> From<T> for SerializableSecret where Secret: From<T> {
|
impl<T> From<T> for SerializableSecret where Secret: From<T> {
|
||||||
|
@ -93,6 +93,8 @@ pub struct ClusterConfiguration {
|
|||||||
/// Allow outbound connections to 'higher' nodes.
|
/// Allow outbound connections to 'higher' nodes.
|
||||||
/// This is useful for tests, but slower a bit for production.
|
/// This is useful for tests, but slower a bit for production.
|
||||||
pub allow_connecting_to_higher_nodes: bool,
|
pub allow_connecting_to_higher_nodes: bool,
|
||||||
|
/// Administrator public key.
|
||||||
|
pub admin_public: Option<Public>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Shadow decryption result.
|
/// Shadow decryption result.
|
||||||
|
Loading…
Reference in New Issue
Block a user