1277 lines
46 KiB
Rust
1277 lines
46 KiB
Rust
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
|
// This file is part of Parity Ethereum.
|
|
|
|
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
|
|
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU General Public License for more details.
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
use std::collections::{BTreeSet, BTreeMap, VecDeque};
|
|
use std::fmt::{Debug, Formatter, Error as FmtError};
|
|
use std::time::Duration;
|
|
use std::sync::Arc;
|
|
use parking_lot::{Condvar, Mutex};
|
|
use ethereum_types::Address;
|
|
use ethkey::{Public, Secret};
|
|
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare, DocumentKeyShareVersion};
|
|
use key_server_cluster::math;
|
|
use key_server_cluster::cluster::Cluster;
|
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
|
use key_server_cluster::message::{Message, GenerationMessage, InitializeSession, ConfirmInitialization, CompleteInitialization,
|
|
KeysDissemination, PublicKeyShare, SessionError, SessionCompleted};
|
|
|
|
/// Distributed key generation session.
|
|
/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper:
|
|
/// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf
|
|
/// Brief overview:
|
|
/// 1) initialization: master node (which has received request for generating joint public + secret) initializes the session on all other nodes
|
|
/// 2) key dissemination (KD): all nodes are generating secret + public values and send these to appropriate nodes
|
|
/// 3) key verification (KV): all nodes are checking values, received for other nodes
|
|
/// 4) key generation phase (KG): nodes are exchanging with information, enough to generate joint public key
|
|
pub struct SessionImpl {
|
|
/// Unique session id.
|
|
id: SessionId,
|
|
/// Public identifier of this node.
|
|
self_node_id: NodeId,
|
|
/// Key storage.
|
|
key_storage: Option<Arc<KeyStorage>>,
|
|
/// Cluster which allows this node to send messages to other nodes in the cluster.
|
|
cluster: Arc<Cluster>,
|
|
/// Session-level nonce.
|
|
nonce: u64,
|
|
/// SessionImpl completion condvar.
|
|
completed: Condvar,
|
|
/// Mutable session data.
|
|
data: Mutex<SessionData>,
|
|
}
|
|
|
|
/// SessionImpl creation parameters
|
|
pub struct SessionParams {
|
|
/// SessionImpl identifier.
|
|
pub id: SessionId,
|
|
/// Id of node, on which this session is running.
|
|
pub self_node_id: Public,
|
|
/// Key storage.
|
|
pub key_storage: Option<Arc<KeyStorage>>,
|
|
/// Cluster
|
|
pub cluster: Arc<Cluster>,
|
|
/// Session nonce.
|
|
pub nonce: Option<u64>,
|
|
}
|
|
|
|
/// Mutable data of distributed key generation session.
|
|
#[derive(Debug)]
|
|
struct SessionData {
|
|
/// Current state of the session.
|
|
state: SessionState,
|
|
/// Simulate faulty behaviour?
|
|
simulate_faulty_behaviour: bool,
|
|
|
|
// === Values, filled when session initialization just starts ===
|
|
/// Reference to the node, which has started this session.
|
|
master: Option<NodeId>,
|
|
/// Address of the creator of the session.
|
|
author: Option<Address>,
|
|
|
|
// === Values, filled when session initialization is completed ===
|
|
/// Session origin (if any).
|
|
origin: Option<Address>,
|
|
/// Is zero secret generation session?
|
|
is_zero: Option<bool>,
|
|
/// Threshold value for this DKG. Only `threshold + 1` will be able to collectively recreate joint secret,
|
|
/// and thus - decrypt message, encrypted with joint public.
|
|
threshold: Option<usize>,
|
|
/// Random point, jointly generated by every node in the cluster.
|
|
derived_point: Option<Public>,
|
|
/// Nodes-specific data.
|
|
nodes: BTreeMap<NodeId, NodeData>,
|
|
|
|
// === Values, filled during KD phase ===
|
|
/// Polynom1.
|
|
polynom1: Option<Vec<Secret>>,
|
|
/// Value of polynom1[0], generated by this node.
|
|
secret_coeff: Option<Secret>,
|
|
|
|
// === Values, filled during KG phase ===
|
|
/// Secret share, which this node holds. Persistent + private.
|
|
secret_share: Option<Secret>,
|
|
|
|
/// === Values, filled when DKG session is completed successfully ===
|
|
/// Key share.
|
|
key_share: Option<Result<DocumentKeyShare, Error>>,
|
|
/// Jointly generated public key, which can be used to encrypt secret. Public.
|
|
joint_public_and_secret: Option<Result<(Public, Secret, Secret), Error>>,
|
|
}
|
|
|
|
/// Mutable node-specific data.
|
|
#[derive(Debug, Clone)]
|
|
struct NodeData {
|
|
/// Random unique scalar. Persistent.
|
|
pub id_number: Secret,
|
|
|
|
// === Values, filled during KD phase ===
|
|
/// Secret value1, which has been received from this node.
|
|
pub secret1: Option<Secret>,
|
|
/// Secret value2, which has been received from this node.
|
|
pub secret2: Option<Secret>,
|
|
/// Public values, which have been received from this node.
|
|
pub publics: Option<Vec<Public>>,
|
|
|
|
// === Values, filled during KG phase ===
|
|
/// Public share, which has been received from this node.
|
|
pub public_share: Option<Public>,
|
|
|
|
// === Values, filled during completion phase ===
|
|
/// Flags marking that node has confirmed session completion (generated key is stored).
|
|
pub completion_confirmed: bool,
|
|
}
|
|
|
|
/// Schedule for visiting other nodes of cluster.
|
|
#[derive(Debug, Clone, PartialEq)]
|
|
pub struct EveryOtherNodeVisitor {
|
|
/// Already visited nodes.
|
|
visited: BTreeSet<NodeId>,
|
|
/// Not yet visited nodes.
|
|
unvisited: VecDeque<NodeId>,
|
|
/// Nodes, which are currently visited.
|
|
in_progress: BTreeSet<NodeId>,
|
|
}
|
|
|
|
/// Distributed key generation session state.
|
|
#[derive(Debug, Clone, PartialEq)]
|
|
pub enum SessionState {
|
|
// === Initialization states ===
|
|
/// Every node starts in this state.
|
|
WaitingForInitialization,
|
|
/// Master node asks every other node to confirm initialization.
|
|
/// Derived point is generated by all nodes in the cluster.
|
|
WaitingForInitializationConfirm(EveryOtherNodeVisitor),
|
|
/// Slave nodes are in this state until initialization completion is reported by master node.
|
|
WaitingForInitializationComplete,
|
|
|
|
// === KD phase states ===
|
|
/// Node is waiting for generated keys from every other node.
|
|
WaitingForKeysDissemination,
|
|
|
|
// === KG phase states ===
|
|
/// Node is waiting for joint public key share to be received from every other node.
|
|
WaitingForPublicKeyShare,
|
|
|
|
// === Generation phase states ===
|
|
/// Node is waiting for session completion/session completion confirmation.
|
|
WaitingForGenerationConfirmation,
|
|
|
|
// === Final states of the session ===
|
|
/// Joint public key generation is completed.
|
|
Finished,
|
|
/// Joint public key generation is failed.
|
|
Failed,
|
|
}
|
|
|
|
pub enum InitializationNodes {
|
|
RandomNumbers(BTreeSet<NodeId>),
|
|
SpecificNumbers(BTreeMap<NodeId, Secret>)
|
|
}
|
|
|
|
impl InitializationNodes {
|
|
pub fn set(&self) -> BTreeSet<NodeId> {
|
|
match *self {
|
|
InitializationNodes::RandomNumbers(ref nodes) => nodes.clone(),
|
|
InitializationNodes::SpecificNumbers(ref nodes) => nodes.keys().cloned().collect(),
|
|
}
|
|
}
|
|
}
|
|
|
|
impl From<BTreeSet<NodeId>> for InitializationNodes {
|
|
fn from(nodes: BTreeSet<NodeId>) -> Self {
|
|
InitializationNodes::RandomNumbers(nodes)
|
|
}
|
|
}
|
|
|
|
impl From<BTreeMap<NodeId, Secret>> for InitializationNodes {
|
|
fn from(nodes: BTreeMap<NodeId, Secret>) -> Self {
|
|
InitializationNodes::SpecificNumbers(nodes)
|
|
}
|
|
}
|
|
|
|
impl SessionImpl {
|
|
/// Create new generation session.
|
|
pub fn new(params: SessionParams) -> Self {
|
|
SessionImpl {
|
|
id: params.id,
|
|
self_node_id: params.self_node_id,
|
|
key_storage: params.key_storage,
|
|
cluster: params.cluster,
|
|
// when nonce.is_nonce(), generation session is wrapped
|
|
// => nonce is checked somewhere else && we can pass any value
|
|
nonce: params.nonce.unwrap_or_default(),
|
|
completed: Condvar::new(),
|
|
data: Mutex::new(SessionData {
|
|
state: SessionState::WaitingForInitialization,
|
|
simulate_faulty_behaviour: false,
|
|
master: None,
|
|
author: None,
|
|
origin: None,
|
|
is_zero: None,
|
|
threshold: None,
|
|
derived_point: None,
|
|
nodes: BTreeMap::new(),
|
|
polynom1: None,
|
|
secret_coeff: None,
|
|
secret_share: None,
|
|
key_share: None,
|
|
joint_public_and_secret: None,
|
|
}),
|
|
}
|
|
}
|
|
|
|
/// Get this node Id.
|
|
pub fn node(&self) -> &NodeId {
|
|
&self.self_node_id
|
|
}
|
|
|
|
/// Get derived point.
|
|
#[cfg(test)]
|
|
pub fn derived_point(&self) -> Option<Public> {
|
|
self.data.lock().derived_point.clone()
|
|
}
|
|
|
|
/// Simulate faulty generation session behaviour.
|
|
pub fn simulate_faulty_behaviour(&self) {
|
|
self.data.lock().simulate_faulty_behaviour = true;
|
|
}
|
|
|
|
/// Get session state.
|
|
pub fn state(&self) -> SessionState {
|
|
self.data.lock().state.clone()
|
|
}
|
|
|
|
/// Get session origin.
|
|
pub fn origin(&self) -> Option<Address> {
|
|
self.data.lock().origin.clone()
|
|
}
|
|
|
|
/// Wait for session completion.
|
|
pub fn wait(&self, timeout: Option<Duration>) -> Option<Result<Public, Error>> {
|
|
Self::wait_session(&self.completed, &self.data, timeout, |data| data.joint_public_and_secret.clone()
|
|
.map(|r| r.map(|r| r.0.clone())))
|
|
}
|
|
|
|
/// Get generated public and secret (if any).
|
|
pub fn joint_public_and_secret(&self) -> Option<Result<(Public, Secret, Secret), Error>> {
|
|
self.data.lock().joint_public_and_secret.clone()
|
|
}
|
|
|
|
/// Start new session initialization. This must be called on master node.
|
|
pub fn initialize(&self, origin: Option<Address>, author: Address, is_zero: bool, threshold: usize, nodes: InitializationNodes) -> Result<(), Error> {
|
|
check_cluster_nodes(self.node(), &nodes.set())?;
|
|
check_threshold(threshold, &nodes.set())?;
|
|
|
|
let mut data = self.data.lock();
|
|
|
|
// check state
|
|
if data.state != SessionState::WaitingForInitialization {
|
|
return Err(Error::InvalidStateForRequest);
|
|
}
|
|
|
|
// update state
|
|
data.master = Some(self.node().clone());
|
|
data.author = Some(author.clone());
|
|
data.origin = origin.clone();
|
|
data.is_zero = Some(is_zero);
|
|
data.threshold = Some(threshold);
|
|
match nodes {
|
|
InitializationNodes::RandomNumbers(nodes) => {
|
|
for node_id in nodes {
|
|
// generate node identification parameter
|
|
let node_id_number = math::generate_random_scalar()?;
|
|
data.nodes.insert(node_id, NodeData::with_id_number(node_id_number));
|
|
}
|
|
},
|
|
InitializationNodes::SpecificNumbers(nodes) => {
|
|
for (node_id, node_id_number) in nodes {
|
|
data.nodes.insert(node_id, NodeData::with_id_number(node_id_number));
|
|
}
|
|
},
|
|
}
|
|
|
|
let mut visit_policy = EveryOtherNodeVisitor::new(self.node(), data.nodes.keys().cloned());
|
|
let derived_point = math::generate_random_point()?;
|
|
match visit_policy.next_node() {
|
|
Some(next_node) => {
|
|
data.state = SessionState::WaitingForInitializationConfirm(visit_policy);
|
|
|
|
// start initialization
|
|
self.cluster.send(&next_node, Message::Generation(GenerationMessage::InitializeSession(InitializeSession {
|
|
session: self.id.clone().into(),
|
|
session_nonce: self.nonce,
|
|
origin: origin.map(Into::into),
|
|
author: author.into(),
|
|
nodes: data.nodes.iter().map(|(k, v)| (k.clone().into(), v.id_number.clone().into())).collect(),
|
|
is_zero: data.is_zero.expect("is_zero is filled in initialization phase; KD phase follows initialization phase; qed"),
|
|
threshold: data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"),
|
|
derived_point: derived_point.into(),
|
|
})))
|
|
},
|
|
None => {
|
|
drop(data);
|
|
self.complete_initialization(derived_point)?;
|
|
self.disseminate_keys()?;
|
|
self.verify_keys()?;
|
|
self.complete_generation()?;
|
|
|
|
self.data.lock().state = SessionState::Finished;
|
|
self.completed.notify_all();
|
|
|
|
Ok(())
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Process single message.
|
|
pub fn process_message(&self, sender: &NodeId, message: &GenerationMessage) -> Result<(), Error> {
|
|
if self.nonce != message.session_nonce() {
|
|
return Err(Error::ReplayProtection);
|
|
}
|
|
|
|
match message {
|
|
&GenerationMessage::InitializeSession(ref message) =>
|
|
self.on_initialize_session(sender.clone(), message),
|
|
&GenerationMessage::ConfirmInitialization(ref message) =>
|
|
self.on_confirm_initialization(sender.clone(), message),
|
|
&GenerationMessage::CompleteInitialization(ref message) =>
|
|
self.on_complete_initialization(sender.clone(), message),
|
|
&GenerationMessage::KeysDissemination(ref message) =>
|
|
self.on_keys_dissemination(sender.clone(), message),
|
|
&GenerationMessage::PublicKeyShare(ref message) =>
|
|
self.on_public_key_share(sender.clone(), message),
|
|
&GenerationMessage::SessionError(ref message) => {
|
|
self.on_session_error(sender, message.error.clone());
|
|
Ok(())
|
|
},
|
|
&GenerationMessage::SessionCompleted(ref message) =>
|
|
self.on_session_completed(sender.clone(), message),
|
|
}
|
|
}
|
|
|
|
/// When session initialization message is received.
|
|
pub fn on_initialize_session(&self, sender: NodeId, message: &InitializeSession) -> Result<(), Error> {
|
|
debug_assert!(self.id == *message.session);
|
|
debug_assert!(&sender != self.node());
|
|
|
|
// check message
|
|
let nodes_ids = message.nodes.keys().cloned().map(Into::into).collect();
|
|
check_threshold(message.threshold, &nodes_ids)?;
|
|
check_cluster_nodes(self.node(), &nodes_ids)?;
|
|
|
|
let mut data = self.data.lock();
|
|
|
|
// check state
|
|
if data.state != SessionState::WaitingForInitialization {
|
|
return Err(Error::InvalidStateForRequest);
|
|
}
|
|
|
|
// update derived point with random scalar
|
|
let mut derived_point = message.derived_point.clone().into();
|
|
math::update_random_point(&mut derived_point)?;
|
|
|
|
// send confirmation back to master node
|
|
self.cluster.send(&sender, Message::Generation(GenerationMessage::ConfirmInitialization(ConfirmInitialization {
|
|
session: self.id.clone().into(),
|
|
session_nonce: self.nonce,
|
|
derived_point: derived_point.into(),
|
|
})))?;
|
|
|
|
// update state
|
|
data.master = Some(sender);
|
|
data.author = Some(message.author.clone().into());
|
|
data.state = SessionState::WaitingForInitializationComplete;
|
|
data.nodes = message.nodes.iter().map(|(id, number)| (id.clone().into(), NodeData::with_id_number(number.clone().into()))).collect();
|
|
data.origin = message.origin.clone().map(Into::into);
|
|
data.is_zero = Some(message.is_zero);
|
|
data.threshold = Some(message.threshold);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// When session initialization confirmation message is reeived.
|
|
pub fn on_confirm_initialization(&self, sender: NodeId, message: &ConfirmInitialization) -> Result<(), Error> {
|
|
debug_assert!(self.id == *message.session);
|
|
debug_assert!(&sender != self.node());
|
|
|
|
let mut data = self.data.lock();
|
|
debug_assert!(data.nodes.contains_key(&sender));
|
|
|
|
// check state && select new node to be initialized
|
|
let next_receiver = match data.state {
|
|
SessionState::WaitingForInitializationConfirm(ref mut visit_policy) => {
|
|
if !visit_policy.mark_visited(&sender) {
|
|
return Err(Error::InvalidStateForRequest);
|
|
}
|
|
|
|
visit_policy.next_node()
|
|
},
|
|
_ => return Err(Error::InvalidStateForRequest),
|
|
};
|
|
|
|
// proceed message
|
|
if let Some(next_receiver) = next_receiver {
|
|
return self.cluster.send(&next_receiver, Message::Generation(GenerationMessage::InitializeSession(InitializeSession {
|
|
session: self.id.clone().into(),
|
|
session_nonce: self.nonce,
|
|
origin: data.origin.clone().map(Into::into),
|
|
author: data.author.as_ref().expect("author is filled on initialization step; confrm initialization follows initialization; qed").clone().into(),
|
|
nodes: data.nodes.iter().map(|(k, v)| (k.clone().into(), v.id_number.clone().into())).collect(),
|
|
is_zero: data.is_zero.expect("is_zero is filled in initialization phase; KD phase follows initialization phase; qed"),
|
|
threshold: data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"),
|
|
derived_point: message.derived_point.clone().into(),
|
|
})));
|
|
}
|
|
|
|
// now it is time for keys dissemination (KD) phase
|
|
drop(data);
|
|
self.complete_initialization(message.derived_point.clone().into())?;
|
|
self.disseminate_keys()
|
|
}
|
|
|
|
/// When session initialization completion message is received.
|
|
pub fn on_complete_initialization(&self, sender: NodeId, message: &CompleteInitialization) -> Result<(), Error> {
|
|
debug_assert!(self.id == *message.session);
|
|
debug_assert!(&sender != self.node());
|
|
|
|
let mut data = self.data.lock();
|
|
|
|
// check state
|
|
if data.state != SessionState::WaitingForInitializationComplete {
|
|
return Err(Error::InvalidStateForRequest);
|
|
}
|
|
if data.master != Some(sender) {
|
|
return Err(Error::InvalidMessage);
|
|
}
|
|
|
|
// remember passed data
|
|
data.derived_point = Some(message.derived_point.clone().into());
|
|
|
|
// now it is time for keys dissemination (KD) phase
|
|
drop(data);
|
|
self.disseminate_keys()
|
|
}
|
|
|
|
/// When keys dissemination message is received.
|
|
pub fn on_keys_dissemination(&self, sender: NodeId, message: &KeysDissemination) -> Result<(), Error> {
|
|
debug_assert!(self.id == *message.session);
|
|
debug_assert!(&sender != self.node());
|
|
|
|
let mut data = self.data.lock();
|
|
|
|
// simulate failure, if required
|
|
if data.simulate_faulty_behaviour {
|
|
return Err(Error::Internal("simulated error".into()));
|
|
}
|
|
|
|
// check state
|
|
if data.state != SessionState::WaitingForKeysDissemination {
|
|
match data.state {
|
|
SessionState::WaitingForInitializationComplete | SessionState::WaitingForInitializationConfirm(_) => return Err(Error::TooEarlyForRequest),
|
|
_ => return Err(Error::InvalidStateForRequest),
|
|
}
|
|
}
|
|
debug_assert!(data.nodes.contains_key(&sender));
|
|
|
|
// check message
|
|
let is_zero = data.is_zero.expect("is_zero is filled in initialization phase; KD phase follows initialization phase; qed");
|
|
let threshold = data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed");
|
|
if !is_zero && message.publics.len() != threshold + 1 {
|
|
return Err(Error::InvalidMessage);
|
|
}
|
|
|
|
// update node data
|
|
{
|
|
let node_data = data.nodes.get_mut(&sender).ok_or(Error::InvalidMessage)?;
|
|
if node_data.secret1.is_some() || node_data.secret2.is_some() || node_data.publics.is_some() {
|
|
return Err(Error::InvalidStateForRequest);
|
|
}
|
|
|
|
node_data.secret1 = Some(message.secret1.clone().into());
|
|
node_data.secret2 = Some(message.secret2.clone().into());
|
|
node_data.publics = Some(message.publics.iter().cloned().map(Into::into).collect());
|
|
}
|
|
|
|
// check if we have received keys from every other node
|
|
if data.nodes.iter().any(|(node_id, node_data)| node_id != self.node() && (node_data.publics.is_none() || node_data.secret1.is_none() || node_data.secret2.is_none())) {
|
|
return Ok(())
|
|
}
|
|
|
|
drop(data);
|
|
self.verify_keys()
|
|
}
|
|
|
|
/// When public key share is received.
|
|
pub fn on_public_key_share(&self, sender: NodeId, message: &PublicKeyShare) -> Result<(), Error> {
|
|
let mut data = self.data.lock();
|
|
|
|
// check state
|
|
if data.state != SessionState::WaitingForPublicKeyShare {
|
|
match data.state {
|
|
SessionState::WaitingForInitializationComplete |
|
|
SessionState::WaitingForKeysDissemination => return Err(Error::TooEarlyForRequest),
|
|
_ => return Err(Error::InvalidStateForRequest),
|
|
}
|
|
}
|
|
|
|
// update node data with received public share
|
|
{
|
|
let node_data = &mut data.nodes.get_mut(&sender).ok_or(Error::InvalidMessage)?;
|
|
if node_data.public_share.is_some() {
|
|
return Err(Error::InvalidMessage);
|
|
}
|
|
|
|
node_data.public_share = Some(message.public_share.clone().into());
|
|
}
|
|
|
|
// if there's also nodes, which has not sent us their public shares - do nothing
|
|
if data.nodes.iter().any(|(node_id, node_data)| node_id != self.node() && node_data.public_share.is_none()) {
|
|
return Ok(());
|
|
}
|
|
|
|
drop(data);
|
|
self.complete_generation()
|
|
}
|
|
|
|
/// When session completion message is received.
|
|
pub fn on_session_completed(&self, sender: NodeId, message: &SessionCompleted) -> Result<(), Error> {
|
|
debug_assert!(self.id == *message.session);
|
|
debug_assert!(&sender != self.node());
|
|
|
|
let mut data = self.data.lock();
|
|
debug_assert!(data.nodes.contains_key(&sender));
|
|
|
|
// check state
|
|
if data.state != SessionState::WaitingForGenerationConfirmation {
|
|
match data.state {
|
|
SessionState::WaitingForPublicKeyShare => return Err(Error::TooEarlyForRequest),
|
|
_ => return Err(Error::InvalidStateForRequest),
|
|
}
|
|
}
|
|
|
|
// if we are not masters, save result and respond with confirmation
|
|
if data.master.as_ref() != Some(self.node()) {
|
|
// check that we have received message from master
|
|
if data.master.as_ref() != Some(&sender) {
|
|
return Err(Error::InvalidMessage);
|
|
}
|
|
|
|
// calculate joint public key
|
|
let is_zero = data.is_zero.expect("is_zero is filled in initialization phase; KG phase follows initialization phase; qed");
|
|
let joint_public = if !is_zero {
|
|
let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed"));
|
|
math::compute_joint_public(public_shares)?
|
|
} else {
|
|
Default::default()
|
|
};
|
|
|
|
// save encrypted data to key storage
|
|
let encrypted_data = DocumentKeyShare {
|
|
author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(),
|
|
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
|
public: joint_public,
|
|
common_point: None,
|
|
encrypted_point: None,
|
|
versions: vec![DocumentKeyShareVersion::new(
|
|
data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(),
|
|
data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
|
)],
|
|
};
|
|
|
|
if let Some(ref key_storage) = self.key_storage {
|
|
key_storage.insert(self.id.clone(), encrypted_data.clone())?;
|
|
}
|
|
|
|
// then respond with confirmation
|
|
data.state = SessionState::Finished;
|
|
return self.cluster.send(&sender, Message::Generation(GenerationMessage::SessionCompleted(SessionCompleted {
|
|
session: self.id.clone().into(),
|
|
session_nonce: self.nonce,
|
|
})));
|
|
}
|
|
|
|
// remember that we have received confirmation from sender node
|
|
{
|
|
let sender_node = data.nodes.get_mut(&sender).expect("node is always qualified by himself; qed");
|
|
if sender_node.completion_confirmed {
|
|
return Err(Error::InvalidMessage);
|
|
}
|
|
|
|
sender_node.completion_confirmed = true;
|
|
}
|
|
|
|
// check if we have received confirmations from all cluster nodes
|
|
if data.nodes.iter().any(|(_, node_data)| !node_data.completion_confirmed) {
|
|
return Ok(())
|
|
}
|
|
|
|
// we have received enough confirmations => complete session
|
|
data.state = SessionState::Finished;
|
|
self.completed.notify_all();
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Complete initialization (when all other nodex has responded with confirmation)
|
|
fn complete_initialization(&self, mut derived_point: Public) -> Result<(), Error> {
|
|
// update point once again to make sure that derived point is not generated by last node
|
|
math::update_random_point(&mut derived_point)?;
|
|
|
|
// remember derived point
|
|
let mut data = self.data.lock();
|
|
data.derived_point = Some(derived_point.clone().into());
|
|
|
|
// broadcast derived point && other session paraeters to every other node
|
|
self.cluster.broadcast(Message::Generation(GenerationMessage::CompleteInitialization(CompleteInitialization {
|
|
session: self.id.clone().into(),
|
|
session_nonce: self.nonce,
|
|
derived_point: derived_point.into(),
|
|
})))
|
|
}
|
|
|
|
/// Keys dissemination (KD) phase
|
|
fn disseminate_keys(&self) -> Result<(), Error> {
|
|
let mut data = self.data.lock();
|
|
|
|
// pick 2t + 2 random numbers as polynomial coefficients for 2 polynoms
|
|
let threshold = data.threshold.expect("threshold is filled on initialization phase; KD phase follows initialization phase; qed");
|
|
let is_zero = data.is_zero.expect("is_zero is filled on initialization phase; KD phase follows initialization phase; qed");
|
|
let mut polynom1 = math::generate_random_polynom(threshold)?;
|
|
if is_zero {
|
|
polynom1[0] = math::zero_scalar();
|
|
}
|
|
let polynom2 = math::generate_random_polynom(threshold)?;
|
|
data.polynom1 = Some(polynom1.clone());
|
|
data.secret_coeff = Some(polynom1[0].clone());
|
|
|
|
// compute t+1 public values
|
|
let publics = match is_zero {
|
|
false => math::public_values_generation(threshold,
|
|
data.derived_point.as_ref().expect("keys dissemination occurs after derived point is agreed; qed"),
|
|
&polynom1,
|
|
&polynom2)?,
|
|
true => Default::default(),
|
|
};
|
|
|
|
// compute secret values for every other node
|
|
for (node, node_data) in data.nodes.iter_mut() {
|
|
let secret1 = math::compute_polynom(&polynom1, &node_data.id_number)?;
|
|
let secret2 = math::compute_polynom(&polynom2, &node_data.id_number)?;
|
|
|
|
// send a message containing secret1 && secret2 to other node
|
|
if node != self.node() {
|
|
self.cluster.send(&node, Message::Generation(GenerationMessage::KeysDissemination(KeysDissemination {
|
|
session: self.id.clone().into(),
|
|
session_nonce: self.nonce,
|
|
secret1: secret1.into(),
|
|
secret2: secret2.into(),
|
|
publics: publics.iter().cloned().map(Into::into).collect(),
|
|
})))?;
|
|
} else {
|
|
node_data.secret1 = Some(secret1);
|
|
node_data.secret2 = Some(secret2);
|
|
node_data.publics = Some(publics.clone());
|
|
}
|
|
}
|
|
|
|
// update state
|
|
data.state = SessionState::WaitingForKeysDissemination;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Keys verification (KV) phase
|
|
fn verify_keys(&self) -> Result<(), Error> {
|
|
let mut data = self.data.lock();
|
|
|
|
// key verification (KV) phase: check that other nodes have passed correct secrets
|
|
let threshold = data.threshold.expect("threshold is filled in initialization phase; KV phase follows initialization phase; qed");
|
|
let is_zero = data.is_zero.expect("is_zero is filled in initialization phase; KV phase follows initialization phase; qed");
|
|
let self_public_share = {
|
|
if !is_zero {
|
|
let derived_point = data.derived_point.clone().expect("derived point generated on initialization phase; KV phase follows initialization phase; qed");
|
|
let number_id = data.nodes[self.node()].id_number.clone();
|
|
for (_ , node_data) in data.nodes.iter_mut().filter(|&(node_id, _)| node_id != self.node()) {
|
|
let secret1 = node_data.secret1.as_ref().expect("keys received on KD phase; KV phase follows KD phase; qed");
|
|
let secret2 = node_data.secret2.as_ref().expect("keys received on KD phase; KV phase follows KD phase; qed");
|
|
let publics = node_data.publics.as_ref().expect("keys received on KD phase; KV phase follows KD phase; qed");
|
|
let is_key_verification_ok = math::keys_verification(threshold, &derived_point, &number_id,
|
|
secret1, secret2, publics)?;
|
|
|
|
if !is_key_verification_ok {
|
|
// node has sent us incorrect values. In original ECDKG protocol we should have sent complaint here.
|
|
return Err(Error::InvalidMessage);
|
|
}
|
|
}
|
|
|
|
// calculate public share
|
|
let self_public_share = {
|
|
let self_secret_coeff = data.secret_coeff.as_ref().expect("secret_coeff is generated on KD phase; KG phase follows KD phase; qed");
|
|
math::compute_public_share(self_secret_coeff)?
|
|
};
|
|
|
|
self_public_share
|
|
} else {
|
|
// TODO [Trust]: add verification when available
|
|
Default::default()
|
|
}
|
|
};
|
|
|
|
// calculate self secret + public shares
|
|
let self_secret_share = {
|
|
let secret_values_iter = data.nodes.values()
|
|
.map(|n| n.secret1.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed"));
|
|
math::compute_secret_share(secret_values_iter)?
|
|
};
|
|
|
|
// update state
|
|
data.state = SessionState::WaitingForPublicKeyShare;
|
|
data.secret_share = Some(self_secret_share);
|
|
let self_node = data.nodes.get_mut(self.node()).expect("node is always qualified by himself; qed");
|
|
self_node.public_share = Some(self_public_share.clone());
|
|
|
|
// broadcast self public key share
|
|
self.cluster.broadcast(Message::Generation(GenerationMessage::PublicKeyShare(PublicKeyShare {
|
|
session: self.id.clone().into(),
|
|
session_nonce: self.nonce,
|
|
public_share: self_public_share.into(),
|
|
})))
|
|
}
|
|
|
|
/// Complete generation
|
|
fn complete_generation(&self) -> Result<(), Error> {
|
|
let mut data = self.data.lock();
|
|
|
|
// calculate joint public key
|
|
let is_zero = data.is_zero.expect("is_zero is filled in initialization phase; KG phase follows initialization phase; qed");
|
|
let joint_public = if !is_zero {
|
|
let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed"));
|
|
math::compute_joint_public(public_shares)?
|
|
} else {
|
|
Default::default()
|
|
};
|
|
|
|
// prepare key data
|
|
let secret_share = data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone();
|
|
let encrypted_data = DocumentKeyShare {
|
|
author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(),
|
|
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
|
public: joint_public.clone(),
|
|
common_point: None,
|
|
encrypted_point: None,
|
|
versions: vec![DocumentKeyShareVersion::new(
|
|
data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(),
|
|
secret_share.clone(),
|
|
)],
|
|
};
|
|
|
|
// if we are at the slave node - wait for session completion
|
|
let secret_coeff = data.secret_coeff.as_ref().expect("secret coeff is selected on initialization phase; current phase follows initialization; qed").clone();
|
|
if data.master.as_ref() != Some(self.node()) {
|
|
data.key_share = Some(Ok(encrypted_data));
|
|
data.joint_public_and_secret = Some(Ok((joint_public, secret_coeff, secret_share)));
|
|
data.state = SessionState::WaitingForGenerationConfirmation;
|
|
return Ok(());
|
|
}
|
|
|
|
// then save encrypted data to the key storage
|
|
if let Some(ref key_storage) = self.key_storage {
|
|
key_storage.insert(self.id.clone(), encrypted_data.clone())?;
|
|
}
|
|
|
|
// then distribute encrypted data to every other node
|
|
self.cluster.broadcast(Message::Generation(GenerationMessage::SessionCompleted(SessionCompleted {
|
|
session: self.id.clone().into(),
|
|
session_nonce: self.nonce,
|
|
})))?;
|
|
|
|
// then wait for confirmation from all other nodes
|
|
{
|
|
let self_node = data.nodes.get_mut(self.node()).expect("node is always qualified by himself; qed");
|
|
self_node.completion_confirmed = true;
|
|
}
|
|
data.key_share = Some(Ok(encrypted_data));
|
|
data.joint_public_and_secret = Some(Ok((joint_public, secret_coeff, secret_share)));
|
|
data.state = SessionState::WaitingForGenerationConfirmation;
|
|
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
impl ClusterSession for SessionImpl {
|
|
type Id = SessionId;
|
|
|
|
fn type_name() -> &'static str {
|
|
"generation"
|
|
}
|
|
|
|
fn id(&self) -> SessionId {
|
|
self.id.clone()
|
|
}
|
|
|
|
fn is_finished(&self) -> bool {
|
|
let data = self.data.lock();
|
|
data.state == SessionState::Failed
|
|
|| data.state == SessionState::Finished
|
|
}
|
|
|
|
fn on_node_timeout(&self, node: &NodeId) {
|
|
let mut data = self.data.lock();
|
|
|
|
// all nodes are required for generation session
|
|
// => fail without check
|
|
warn!("{}: generation session failed because {} connection has timeouted", self.node(), node);
|
|
|
|
data.state = SessionState::Failed;
|
|
data.key_share = Some(Err(Error::NodeDisconnected));
|
|
data.joint_public_and_secret = Some(Err(Error::NodeDisconnected));
|
|
self.completed.notify_all();
|
|
}
|
|
|
|
fn on_session_timeout(&self) {
|
|
let mut data = self.data.lock();
|
|
|
|
warn!("{}: generation session failed with timeout", self.node());
|
|
|
|
data.state = SessionState::Failed;
|
|
data.key_share = Some(Err(Error::NodeDisconnected));
|
|
data.joint_public_and_secret = Some(Err(Error::NodeDisconnected));
|
|
self.completed.notify_all();
|
|
}
|
|
|
|
fn on_session_error(&self, node: &NodeId, error: Error) {
|
|
// error in generation session is considered fatal
|
|
// => broadcast error if error occured on this node
|
|
if *node == self.self_node_id {
|
|
// do not bother processing send error, as we already processing error
|
|
let _ = self.cluster.broadcast(Message::Generation(GenerationMessage::SessionError(SessionError {
|
|
session: self.id.clone().into(),
|
|
session_nonce: self.nonce,
|
|
error: error.clone().into(),
|
|
})));
|
|
}
|
|
|
|
let mut data = self.data.lock();
|
|
data.state = SessionState::Failed;
|
|
data.key_share = Some(Err(error.clone()));
|
|
data.joint_public_and_secret = Some(Err(error));
|
|
self.completed.notify_all();
|
|
}
|
|
|
|
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
|
match *message {
|
|
Message::Generation(ref message) => self.process_message(sender, message),
|
|
_ => unreachable!("cluster checks message to be correct before passing; qed"),
|
|
}
|
|
}
|
|
}
|
|
|
|
impl EveryOtherNodeVisitor {
|
|
pub fn new<I>(self_id: &NodeId, nodes: I) -> Self where I: Iterator<Item=NodeId> {
|
|
EveryOtherNodeVisitor {
|
|
visited: BTreeSet::new(),
|
|
unvisited: nodes.filter(|n| n != self_id).collect(),
|
|
in_progress: BTreeSet::new(),
|
|
}
|
|
}
|
|
|
|
pub fn next_node(&mut self) -> Option<NodeId> {
|
|
let next_node = self.unvisited.pop_front();
|
|
if let Some(ref next_node) = next_node {
|
|
self.in_progress.insert(next_node.clone());
|
|
}
|
|
next_node
|
|
}
|
|
|
|
pub fn mark_visited(&mut self, node: &NodeId) -> bool {
|
|
if !self.in_progress.remove(node) {
|
|
return false;
|
|
}
|
|
self.visited.insert(node.clone())
|
|
}
|
|
}
|
|
|
|
impl NodeData {
|
|
fn with_id_number(node_id_number: Secret) -> Self {
|
|
NodeData {
|
|
id_number: node_id_number,
|
|
secret1: None,
|
|
secret2: None,
|
|
publics: None,
|
|
public_share: None,
|
|
completion_confirmed: false,
|
|
}
|
|
}
|
|
}
|
|
|
|
impl Debug for SessionImpl {
|
|
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
|
|
write!(f, "Generation session {} on {}", self.id, self.self_node_id)
|
|
}
|
|
}
|
|
|
|
fn check_cluster_nodes(self_node_id: &NodeId, nodes: &BTreeSet<NodeId>) -> Result<(), Error> {
|
|
assert!(nodes.contains(self_node_id));
|
|
Ok(())
|
|
}
|
|
|
|
fn check_threshold(threshold: usize, nodes: &BTreeSet<NodeId>) -> Result<(), Error> {
|
|
// at least threshold + 1 nodes are required to collectively decrypt message
|
|
if threshold >= nodes.len() {
|
|
return Err(Error::NotEnoughNodesForThreshold);
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
#[cfg(test)]
|
|
pub mod tests {
|
|
use std::sync::Arc;
|
|
use ethereum_types::H256;
|
|
use ethkey::{Random, Generator, KeyPair, Secret};
|
|
use key_server_cluster::{NodeId, Error, KeyStorage};
|
|
use key_server_cluster::message::{self, Message, GenerationMessage, KeysDissemination,
|
|
PublicKeyShare, ConfirmInitialization};
|
|
use key_server_cluster::cluster::tests::{MessageLoop as ClusterMessageLoop, make_clusters_and_preserve_sessions};
|
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
|
use key_server_cluster::generation_session::{SessionImpl, SessionState};
|
|
use key_server_cluster::math;
|
|
use key_server_cluster::math::tests::do_encryption_and_decryption;
|
|
|
|
#[derive(Debug)]
|
|
pub struct MessageLoop(pub ClusterMessageLoop);
|
|
|
|
impl MessageLoop {
|
|
pub fn new(num_nodes: usize) -> Self {
|
|
MessageLoop(make_clusters_and_preserve_sessions(num_nodes))
|
|
}
|
|
|
|
pub fn init(self, threshold: usize) -> Result<Self, Error> {
|
|
self.0.cluster(0).client().new_generation_session(Default::default(), None, Default::default(), threshold)
|
|
.map(|_| self)
|
|
}
|
|
|
|
pub fn session_at(&self, idx: usize) -> Arc<SessionImpl> {
|
|
self.0.sessions(idx).generation_sessions.first().unwrap()
|
|
}
|
|
|
|
pub fn session_of(&self, node: &NodeId) -> Arc<SessionImpl> {
|
|
self.0.sessions_of(node).generation_sessions.first().unwrap()
|
|
}
|
|
|
|
pub fn take_message_confirm_initialization(&self) -> (NodeId, NodeId, ConfirmInitialization) {
|
|
match self.0.take_message() {
|
|
Some((from, to, Message::Generation(GenerationMessage::ConfirmInitialization(msg)))) =>
|
|
(from, to, msg),
|
|
_ => panic!("unexpected"),
|
|
}
|
|
}
|
|
|
|
pub fn take_message_keys_dissemination(&self) -> (NodeId, NodeId, KeysDissemination) {
|
|
match self.0.take_message() {
|
|
Some((from, to, Message::Generation(GenerationMessage::KeysDissemination(msg)))) =>
|
|
(from, to, msg),
|
|
_ => panic!("unexpected"),
|
|
}
|
|
}
|
|
|
|
pub fn take_message_public_key_share(&self) -> (NodeId, NodeId, PublicKeyShare) {
|
|
match self.0.take_message() {
|
|
Some((from, to, Message::Generation(GenerationMessage::PublicKeyShare(msg)))) =>
|
|
(from, to, msg),
|
|
_ => panic!("unexpected"),
|
|
}
|
|
}
|
|
|
|
pub fn nodes_id_numbers(&self) -> Vec<Secret> {
|
|
let session = self.session_at(0);
|
|
let session_data = session.data.lock();
|
|
session_data.nodes.values().map(|n| n.id_number.clone()).collect()
|
|
}
|
|
|
|
pub fn nodes_secret_shares(&self) -> Vec<Secret> {
|
|
(0..self.0.nodes().len()).map(|i| {
|
|
let session = self.session_at(i);
|
|
let session_data = session.data.lock();
|
|
session_data.secret_share.as_ref().unwrap().clone()
|
|
}).collect()
|
|
}
|
|
|
|
pub fn compute_key_pair(&self) -> KeyPair {
|
|
let t = self.0.key_storage(0).get(&Default::default()).unwrap().unwrap().threshold;
|
|
let secret_shares = self.nodes_secret_shares();
|
|
let id_numbers = self.nodes_id_numbers();
|
|
let secret_shares = secret_shares.iter().take(t + 1).collect::<Vec<_>>();
|
|
let id_numbers = id_numbers.iter().take(t + 1).collect::<Vec<_>>();
|
|
let joint_secret = math::compute_joint_secret_from_shares(t, &secret_shares, &id_numbers).unwrap();
|
|
|
|
KeyPair::from_secret(joint_secret).unwrap()
|
|
}
|
|
|
|
pub fn key_version(&self) -> H256 {
|
|
self.0.key_storage(0).get(&Default::default())
|
|
.unwrap().unwrap().versions.iter().last().unwrap().hash
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn initializes_in_cluster_of_single_node() {
|
|
MessageLoop::new(1).init(0).unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn fails_to_initialize_if_threshold_is_wrong() {
|
|
assert_eq!(MessageLoop::new(2).init(2).unwrap_err(), Error::NotEnoughNodesForThreshold);
|
|
}
|
|
|
|
#[test]
|
|
fn fails_to_initialize_when_already_initialized() {
|
|
let ml = MessageLoop::new(2).init(0).unwrap();
|
|
assert_eq!(
|
|
ml.session_at(0).initialize(Default::default(), Default::default(), false, 0, ml.0.nodes().into()),
|
|
Err(Error::InvalidStateForRequest),
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn fails_to_accept_initialization_when_already_initialized() {
|
|
let ml = MessageLoop::new(2).init(0).unwrap();
|
|
let (from, to, msg) = ml.0.take_message().unwrap();
|
|
ml.0.process_message(from, to, msg.clone());
|
|
assert_eq!(
|
|
ml.session_of(&to).on_message(&from, &msg),
|
|
Err(Error::InvalidStateForRequest),
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn slave_updates_derived_point_on_initialization() {
|
|
let ml = MessageLoop::new(2).init(0).unwrap();
|
|
let original_point = match ml.0.take_message().unwrap() {
|
|
(from, to, Message::Generation(GenerationMessage::InitializeSession(msg))) => {
|
|
let original_point = msg.derived_point.clone();
|
|
let msg = Message::Generation(GenerationMessage::InitializeSession(msg));
|
|
ml.0.process_message(from, to, msg);
|
|
original_point
|
|
},
|
|
_ => panic!("unexpected"),
|
|
};
|
|
|
|
match ml.0.take_message().unwrap() {
|
|
(_, _, Message::Generation(GenerationMessage::ConfirmInitialization(msg))) =>
|
|
assert!(original_point != msg.derived_point),
|
|
_ => panic!("unexpected"),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn fails_to_accept_initialization_confirmation_if_already_accepted_from_the_same_node() {
|
|
let ml = MessageLoop::new(3).init(0).unwrap();
|
|
ml.0.take_and_process_message();
|
|
|
|
let (from, to, msg) = ml.take_message_confirm_initialization();
|
|
ml.0.process_message(from, to, Message::Generation(GenerationMessage::ConfirmInitialization(msg.clone())));
|
|
assert_eq!(ml.session_of(&to).on_confirm_initialization(from, &msg), Err(Error::InvalidStateForRequest));
|
|
}
|
|
|
|
#[test]
|
|
fn fails_to_accept_initialization_confirmation_if_initialization_already_completed() {
|
|
let ml = MessageLoop::new(2).init(0).unwrap();
|
|
ml.0.take_and_process_message();
|
|
ml.0.take_and_process_message();
|
|
assert_eq!(ml.session_at(0).on_confirm_initialization(ml.0.node(1), &message::ConfirmInitialization {
|
|
session: Default::default(),
|
|
session_nonce: 0,
|
|
derived_point: math::generate_random_point().unwrap().into(),
|
|
}), Err(Error::InvalidStateForRequest));
|
|
}
|
|
|
|
#[test]
|
|
fn master_updates_derived_point_on_initialization_completion() {
|
|
let ml = MessageLoop::new(2).init(0).unwrap();
|
|
ml.0.take_and_process_message();
|
|
let original_point = match ml.0.take_message().unwrap() {
|
|
(from, to, Message::Generation(GenerationMessage::ConfirmInitialization(msg))) => {
|
|
let original_point = msg.derived_point.clone();
|
|
let msg = Message::Generation(GenerationMessage::ConfirmInitialization(msg));
|
|
ml.session_of(&to).on_message(&from, &msg).unwrap();
|
|
original_point
|
|
},
|
|
_ => panic!("unexpected"),
|
|
};
|
|
|
|
assert!(ml.session_at(0).derived_point().unwrap() != original_point.into());
|
|
}
|
|
|
|
#[test]
|
|
fn fails_to_complete_initialization_if_not_waiting_for_it() {
|
|
let ml = MessageLoop::new(2).init(0).unwrap();
|
|
ml.0.take_and_process_message();
|
|
assert_eq!(ml.session_at(0).on_complete_initialization(ml.0.node(1), &message::CompleteInitialization {
|
|
session: Default::default(),
|
|
session_nonce: 0,
|
|
derived_point: math::generate_random_point().unwrap().into(),
|
|
}), Err(Error::InvalidStateForRequest));
|
|
}
|
|
|
|
#[test]
|
|
fn fails_to_complete_initialization_from_non_master_node() {
|
|
let ml = MessageLoop::new(3).init(0).unwrap();
|
|
ml.0.take_and_process_message();
|
|
ml.0.take_and_process_message();
|
|
ml.0.take_and_process_message();
|
|
ml.0.take_and_process_message();
|
|
assert_eq!(ml.session_at(1).on_complete_initialization(ml.0.node(2), &message::CompleteInitialization {
|
|
session: Default::default(),
|
|
session_nonce: 0,
|
|
derived_point: math::generate_random_point().unwrap().into(),
|
|
}), Err(Error::InvalidMessage));
|
|
}
|
|
|
|
#[test]
|
|
fn fails_to_accept_keys_dissemination_if_not_waiting_for_it() {
|
|
let ml = MessageLoop::new(2).init(0).unwrap();
|
|
assert_eq!(ml.session_at(0).on_keys_dissemination(ml.0.node(1), &message::KeysDissemination {
|
|
session: Default::default(),
|
|
session_nonce: 0,
|
|
secret1: math::generate_random_scalar().unwrap().into(),
|
|
secret2: math::generate_random_scalar().unwrap().into(),
|
|
publics: vec![math::generate_random_point().unwrap().into()],
|
|
}), Err(Error::TooEarlyForRequest));
|
|
}
|
|
|
|
#[test]
|
|
fn fails_to_accept_keys_dissemination_if_wrong_number_of_publics_passed() {
|
|
let ml = MessageLoop::new(3).init(0).unwrap();
|
|
ml.0.take_and_process_message(); // m -> s1: InitializeSession
|
|
ml.0.take_and_process_message(); // m -> s2: InitializeSession
|
|
ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization
|
|
ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization
|
|
ml.0.take_and_process_message(); // m -> s1: CompleteInitialization
|
|
ml.0.take_and_process_message(); // m -> s2: CompleteInitialization
|
|
|
|
let (from, to, mut msg) = ml.take_message_keys_dissemination();
|
|
msg.publics.clear();
|
|
assert_eq!(ml.session_of(&to).on_keys_dissemination(from, &msg), Err(Error::InvalidMessage));
|
|
}
|
|
|
|
#[test]
|
|
fn fails_to_accept_keys_dissemination_second_time_from_the_same_node() {
|
|
let ml = MessageLoop::new(3).init(0).unwrap();
|
|
ml.0.take_and_process_message(); // m -> s1: InitializeSession
|
|
ml.0.take_and_process_message(); // m -> s2: InitializeSession
|
|
ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization
|
|
ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization
|
|
ml.0.take_and_process_message(); // m -> s1: CompleteInitialization
|
|
ml.0.take_and_process_message(); // m -> s2: CompleteInitialization
|
|
|
|
let (from, to, msg) = ml.take_message_keys_dissemination();
|
|
ml.0.process_message(from, to, Message::Generation(GenerationMessage::KeysDissemination(msg.clone())));
|
|
assert_eq!(ml.session_of(&to).on_keys_dissemination(from, &msg), Err(Error::InvalidStateForRequest));
|
|
}
|
|
|
|
#[test]
|
|
fn should_not_accept_public_key_share_when_is_not_waiting_for_it() {
|
|
let ml = MessageLoop::new(3).init(1).unwrap();
|
|
assert_eq!(ml.session_at(0).on_public_key_share(ml.0.node(1), &message::PublicKeyShare {
|
|
session: Default::default(),
|
|
session_nonce: 0,
|
|
public_share: math::generate_random_point().unwrap().into(),
|
|
}), Err(Error::InvalidStateForRequest));
|
|
}
|
|
|
|
#[test]
|
|
fn should_not_accept_public_key_share_when_receiving_twice() {
|
|
let ml = MessageLoop::new(3).init(0).unwrap();
|
|
ml.0.take_and_process_message(); // m -> s1: InitializeSession
|
|
ml.0.take_and_process_message(); // m -> s2: InitializeSession
|
|
ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization
|
|
ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization
|
|
ml.0.take_and_process_message(); // m -> s1: CompleteInitialization
|
|
ml.0.take_and_process_message(); // m -> s2: CompleteInitialization
|
|
ml.0.take_and_process_message(); // m -> s1: KeysDissemination
|
|
ml.0.take_and_process_message(); // m -> s2: KeysDissemination
|
|
ml.0.take_and_process_message(); // s1 -> m: KeysDissemination
|
|
ml.0.take_and_process_message(); // s1 -> s2: KeysDissemination
|
|
ml.0.take_and_process_message(); // s2 -> m: KeysDissemination
|
|
ml.0.take_and_process_message(); // s2 -> s1: KeysDissemination
|
|
|
|
let (from, to, msg) = ml.take_message_public_key_share();
|
|
ml.0.process_message(from, to, Message::Generation(GenerationMessage::PublicKeyShare(msg.clone())));
|
|
assert_eq!(ml.session_of(&to).on_public_key_share(from, &msg), Err(Error::InvalidMessage));
|
|
}
|
|
|
|
#[test]
|
|
fn encryption_fails_on_session_timeout() {
|
|
let ml = MessageLoop::new(2).init(0).unwrap();
|
|
assert!(ml.session_at(0).joint_public_and_secret().is_none());
|
|
ml.session_at(0).on_session_timeout();
|
|
assert_eq!(ml.session_at(0).joint_public_and_secret().unwrap(), Err(Error::NodeDisconnected));
|
|
}
|
|
|
|
#[test]
|
|
fn encryption_fails_on_node_timeout() {
|
|
let ml = MessageLoop::new(2).init(0).unwrap();
|
|
assert!(ml.session_at(0).joint_public_and_secret().is_none());
|
|
ml.session_at(0).on_node_timeout(&ml.0.node(1));
|
|
assert_eq!(ml.session_at(0).joint_public_and_secret().unwrap(), Err(Error::NodeDisconnected));
|
|
}
|
|
|
|
#[test]
|
|
fn complete_enc_dec_session() {
|
|
let test_cases = [(0, 5), (2, 5), (3, 5)];
|
|
for &(threshold, num_nodes) in &test_cases {
|
|
let ml = MessageLoop::new(num_nodes).init(threshold).unwrap();
|
|
ml.0.loop_until(|| ml.0.is_empty());
|
|
|
|
// check that all nodes has finished joint public generation
|
|
let joint_public_key = ml.session_at(0).joint_public_and_secret().unwrap().unwrap().0;
|
|
for i in 0..num_nodes {
|
|
let session = ml.session_at(i);
|
|
assert_eq!(session.state(), SessionState::Finished);
|
|
assert_eq!(session.joint_public_and_secret().map(|p| p.map(|p| p.0)), Some(Ok(joint_public_key)));
|
|
}
|
|
|
|
// now let's encrypt some secret (which is a point on EC)
|
|
let document_secret_plain = Random.generate().unwrap().public().clone();
|
|
let all_nodes_id_numbers = ml.nodes_id_numbers();
|
|
let all_nodes_secret_shares = ml.nodes_secret_shares();
|
|
let document_secret_decrypted = do_encryption_and_decryption(threshold, &joint_public_key,
|
|
&all_nodes_id_numbers,
|
|
&all_nodes_secret_shares,
|
|
None,
|
|
document_secret_plain.clone()
|
|
).0;
|
|
assert_eq!(document_secret_plain, document_secret_decrypted);
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn generation_message_fails_when_nonce_is_wrong() {
|
|
let ml = MessageLoop::new(2).init(0).unwrap();
|
|
ml.0.take_and_process_message();
|
|
|
|
let msg = message::GenerationMessage::KeysDissemination(message::KeysDissemination {
|
|
session: Default::default(),
|
|
session_nonce: 10,
|
|
secret1: math::generate_random_scalar().unwrap().into(),
|
|
secret2: math::generate_random_scalar().unwrap().into(),
|
|
publics: vec![math::generate_random_point().unwrap().into()],
|
|
});
|
|
assert_eq!(ml.session_at(1).process_message(&ml.0.node(0), &msg).unwrap_err(), Error::ReplayProtection);
|
|
}
|
|
}
|