SecretStore: generating and retrieving decryption keys via service contract (#8029)
* SecretStore: started document keys generation via contract * fixed Cargo.lock * SecretStore: doc key contract gen tests * SecretStore: fixed log parsing * SecretStore: flush * SecretStore: secretstore_generateDocumentKey RPC * SecretStore: return encrypted_key from secretstore_generateDocumentKey * prepare to GenerateDocKey -> StoreDocKey * SecretStore: ability to identify requester via Public/Address * SecretStore: store author address instead of public in db * flush * SecretStore: flush * SecretStore: fixed test * SecretStore: flush * SecretStore: flush * SecretStore: flush * SecretStore: flush * SecretStore: start async generation session * SecretStore: process StoreDocumentKey service tasks * SecretStore: flush * SecretStore: update service contact ABI * SecretStore: flush * SecretStore: flush * SecretStore: fixed event * SecretStore: flush * SecretStore: fixed tests * SecretStore: fix broadcast shadows decryption * SecretStore: finally decryption via service contract works * SecretStore: fix for updated contract * SecretStore: restored pending requests reqding * SecretStore: fixed some TODOs * SecretStore: OnChainServiceContractAggregate * SecretStore: different names for different contracts types * SecretStore: updated contracts interfaces * SecretStore: utilize aggregate service contract * fixed compilation * SecretStore: fixes for updated contract * SecretStore: service fixes after testing * fixed cli test compilation * SecretStore: decryption_session_origin_is_known_to_all_initialized_nodes * SecretStore: added new contract listener tests * SecretStore: session_listener_works * removed optional TODO * SecretStore: fixed KeyServer shutdown * fixed warn + grumble * const durations
This commit is contained in:
parent
0a535bf485
commit
ec96091369
@ -573,7 +573,23 @@ usage! {
|
|||||||
|
|
||||||
ARG arg_secretstore_contract: (String) = "none", or |c: &Config| c.secretstore.as_ref()?.service_contract.clone(),
|
ARG arg_secretstore_contract: (String) = "none", or |c: &Config| c.secretstore.as_ref()?.service_contract.clone(),
|
||||||
"--secretstore-contract=[SOURCE]",
|
"--secretstore-contract=[SOURCE]",
|
||||||
"Secret Store Service contract address source: none, registry (contract address is read from registry) or address.",
|
"Secret Store Service contract address source: none, registry (contract address is read from secretstore_service entry in registry) or address.",
|
||||||
|
|
||||||
|
ARG arg_secretstore_srv_gen_contract: (String) = "none", or |c: &Config| c.secretstore.as_ref()?.service_contract_srv_gen.clone(),
|
||||||
|
"--secretstore-srv-gen-contract=[SOURCE]",
|
||||||
|
"Secret Store Service server key generation contract address source: none, registry (contract address is read from secretstore_service_srv_gen entry in registry) or address.",
|
||||||
|
|
||||||
|
ARG arg_secretstore_srv_retr_contract: (String) = "none", or |c: &Config| c.secretstore.as_ref()?.service_contract_srv_retr.clone(),
|
||||||
|
"--secretstore-srv-retr-contract=[SOURCE]",
|
||||||
|
"Secret Store Service server key retrieval contract address source: none, registry (contract address is read from secretstore_service_srv_retr entry in registry) or address.",
|
||||||
|
|
||||||
|
ARG arg_secretstore_doc_store_contract: (String) = "none", or |c: &Config| c.secretstore.as_ref()?.service_contract_doc_store.clone(),
|
||||||
|
"--secretstore-doc-store-contract=[SOURCE]",
|
||||||
|
"Secret Store Service document key store contract address source: none, registry (contract address is read from secretstore_service_doc_store entry in registry) or address.",
|
||||||
|
|
||||||
|
ARG arg_secretstore_doc_sretr_contract: (String) = "none", or |c: &Config| c.secretstore.as_ref()?.service_contract_doc_sretr.clone(),
|
||||||
|
"--secretstore-doc-sretr-contract=[SOURCE]",
|
||||||
|
"Secret Store Service document key shadow retrieval contract address source: none, registry (contract address is read from secretstore_service_doc_sretr entry in registry) or address.",
|
||||||
|
|
||||||
ARG arg_secretstore_nodes: (String) = "", or |c: &Config| c.secretstore.as_ref()?.nodes.as_ref().map(|vec| vec.join(",")),
|
ARG arg_secretstore_nodes: (String) = "", or |c: &Config| c.secretstore.as_ref()?.nodes.as_ref().map(|vec| vec.join(",")),
|
||||||
"--secretstore-nodes=[NODES]",
|
"--secretstore-nodes=[NODES]",
|
||||||
@ -1133,6 +1149,10 @@ struct SecretStore {
|
|||||||
disable_acl_check: Option<bool>,
|
disable_acl_check: Option<bool>,
|
||||||
disable_auto_migrate: Option<bool>,
|
disable_auto_migrate: Option<bool>,
|
||||||
service_contract: Option<String>,
|
service_contract: Option<String>,
|
||||||
|
service_contract_srv_gen: Option<String>,
|
||||||
|
service_contract_srv_retr: Option<String>,
|
||||||
|
service_contract_doc_store: Option<String>,
|
||||||
|
service_contract_doc_sretr: Option<String>,
|
||||||
self_secret: Option<String>,
|
self_secret: Option<String>,
|
||||||
admin_public: Option<String>,
|
admin_public: Option<String>,
|
||||||
nodes: Option<Vec<String>>,
|
nodes: Option<Vec<String>>,
|
||||||
@ -1554,6 +1574,10 @@ mod tests {
|
|||||||
flag_no_secretstore_acl_check: false,
|
flag_no_secretstore_acl_check: false,
|
||||||
flag_no_secretstore_auto_migrate: false,
|
flag_no_secretstore_auto_migrate: false,
|
||||||
arg_secretstore_contract: "none".into(),
|
arg_secretstore_contract: "none".into(),
|
||||||
|
arg_secretstore_srv_gen_contract: "none".into(),
|
||||||
|
arg_secretstore_srv_retr_contract: "none".into(),
|
||||||
|
arg_secretstore_doc_store_contract: "none".into(),
|
||||||
|
arg_secretstore_doc_sretr_contract: "none".into(),
|
||||||
arg_secretstore_secret: None,
|
arg_secretstore_secret: None,
|
||||||
arg_secretstore_admin_public: None,
|
arg_secretstore_admin_public: None,
|
||||||
arg_secretstore_nodes: "".into(),
|
arg_secretstore_nodes: "".into(),
|
||||||
@ -1812,6 +1836,10 @@ mod tests {
|
|||||||
disable_acl_check: None,
|
disable_acl_check: None,
|
||||||
disable_auto_migrate: None,
|
disable_auto_migrate: None,
|
||||||
service_contract: None,
|
service_contract: None,
|
||||||
|
service_contract_srv_gen: None,
|
||||||
|
service_contract_srv_retr: None,
|
||||||
|
service_contract_doc_store: None,
|
||||||
|
service_contract_doc_sretr: None,
|
||||||
self_secret: None,
|
self_secret: None,
|
||||||
admin_public: None,
|
admin_public: None,
|
||||||
nodes: None,
|
nodes: None,
|
||||||
|
@ -83,6 +83,10 @@ disable = false
|
|||||||
disable_http = false
|
disable_http = false
|
||||||
disable_acl_check = false
|
disable_acl_check = false
|
||||||
service_contract = "none"
|
service_contract = "none"
|
||||||
|
service_contract_srv_gen = "none"
|
||||||
|
service_contract_srv_retr = "none"
|
||||||
|
service_contract_doc_store = "none"
|
||||||
|
service_contract_doc_sretr = "none"
|
||||||
nodes = []
|
nodes = []
|
||||||
http_interface = "local"
|
http_interface = "local"
|
||||||
http_port = 8082
|
http_port = 8082
|
||||||
|
@ -635,6 +635,10 @@ impl Configuration {
|
|||||||
acl_check_enabled: self.secretstore_acl_check_enabled(),
|
acl_check_enabled: self.secretstore_acl_check_enabled(),
|
||||||
auto_migrate_enabled: self.secretstore_auto_migrate_enabled(),
|
auto_migrate_enabled: self.secretstore_auto_migrate_enabled(),
|
||||||
service_contract_address: self.secretstore_service_contract_address()?,
|
service_contract_address: self.secretstore_service_contract_address()?,
|
||||||
|
service_contract_srv_gen_address: self.secretstore_service_contract_srv_gen_address()?,
|
||||||
|
service_contract_srv_retr_address: self.secretstore_service_contract_srv_retr_address()?,
|
||||||
|
service_contract_doc_store_address: self.secretstore_service_contract_doc_store_address()?,
|
||||||
|
service_contract_doc_sretr_address: self.secretstore_service_contract_doc_sretr_address()?,
|
||||||
self_secret: self.secretstore_self_secret()?,
|
self_secret: self.secretstore_self_secret()?,
|
||||||
nodes: self.secretstore_nodes()?,
|
nodes: self.secretstore_nodes()?,
|
||||||
interface: self.secretstore_interface(),
|
interface: self.secretstore_interface(),
|
||||||
@ -1127,11 +1131,23 @@ impl Configuration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn secretstore_service_contract_address(&self) -> Result<Option<SecretStoreContractAddress>, String> {
|
fn secretstore_service_contract_address(&self) -> Result<Option<SecretStoreContractAddress>, String> {
|
||||||
Ok(match self.args.arg_secretstore_contract.as_ref() {
|
into_secretstore_service_contract_address(self.args.arg_secretstore_contract.as_ref())
|
||||||
"none" => None,
|
}
|
||||||
"registry" => Some(SecretStoreContractAddress::Registry),
|
|
||||||
a => Some(SecretStoreContractAddress::Address(a.parse().map_err(|e| format!("{}", e))?)),
|
fn secretstore_service_contract_srv_gen_address(&self) -> Result<Option<SecretStoreContractAddress>, String> {
|
||||||
})
|
into_secretstore_service_contract_address(self.args.arg_secretstore_srv_gen_contract.as_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn secretstore_service_contract_srv_retr_address(&self) -> Result<Option<SecretStoreContractAddress>, String> {
|
||||||
|
into_secretstore_service_contract_address(self.args.arg_secretstore_srv_retr_contract.as_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn secretstore_service_contract_doc_store_address(&self) -> Result<Option<SecretStoreContractAddress>, String> {
|
||||||
|
into_secretstore_service_contract_address(self.args.arg_secretstore_doc_store_contract.as_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn secretstore_service_contract_doc_sretr_address(&self) -> Result<Option<SecretStoreContractAddress>, String> {
|
||||||
|
into_secretstore_service_contract_address(self.args.arg_secretstore_doc_sretr_contract.as_ref())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ui_enabled(&self) -> bool {
|
fn ui_enabled(&self) -> bool {
|
||||||
@ -1164,6 +1180,14 @@ impl Configuration {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn into_secretstore_service_contract_address(s: &str) -> Result<Option<SecretStoreContractAddress>, String> {
|
||||||
|
match s {
|
||||||
|
"none" => Ok(None),
|
||||||
|
"registry" => Ok(Some(SecretStoreContractAddress::Registry)),
|
||||||
|
a => Ok(Some(SecretStoreContractAddress::Address(a.parse().map_err(|e| format!("{}", e))?))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
@ -55,6 +55,14 @@ pub struct Configuration {
|
|||||||
pub auto_migrate_enabled: bool,
|
pub auto_migrate_enabled: bool,
|
||||||
/// Service contract address.
|
/// Service contract address.
|
||||||
pub service_contract_address: Option<ContractAddress>,
|
pub service_contract_address: Option<ContractAddress>,
|
||||||
|
/// Server key generation service contract address.
|
||||||
|
pub service_contract_srv_gen_address: Option<ContractAddress>,
|
||||||
|
/// Server key retrieval service contract address.
|
||||||
|
pub service_contract_srv_retr_address: Option<ContractAddress>,
|
||||||
|
/// Document key store service contract address.
|
||||||
|
pub service_contract_doc_store_address: Option<ContractAddress>,
|
||||||
|
/// Document key shadow retrieval service contract address.
|
||||||
|
pub service_contract_doc_sretr_address: Option<ContractAddress>,
|
||||||
/// This node secret.
|
/// This node secret.
|
||||||
pub self_secret: Option<NodeSecretKey>,
|
pub self_secret: Option<NodeSecretKey>,
|
||||||
/// Other nodes IDs + addresses.
|
/// Other nodes IDs + addresses.
|
||||||
@ -108,6 +116,13 @@ mod server {
|
|||||||
use ansi_term::Colour::Red;
|
use ansi_term::Colour::Red;
|
||||||
use super::{Configuration, Dependencies, NodeSecretKey, ContractAddress};
|
use super::{Configuration, Dependencies, NodeSecretKey, ContractAddress};
|
||||||
|
|
||||||
|
fn into_service_contract_address(address: ContractAddress) -> ethcore_secretstore::ContractAddress {
|
||||||
|
match address {
|
||||||
|
ContractAddress::Registry => ethcore_secretstore::ContractAddress::Registry,
|
||||||
|
ContractAddress::Address(address) => ethcore_secretstore::ContractAddress::Address(address),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Key server
|
/// Key server
|
||||||
pub struct KeyServer {
|
pub struct KeyServer {
|
||||||
_key_server: Box<ethcore_secretstore::KeyServer>,
|
_key_server: Box<ethcore_secretstore::KeyServer>,
|
||||||
@ -150,10 +165,11 @@ mod server {
|
|||||||
address: conf.http_interface.clone(),
|
address: conf.http_interface.clone(),
|
||||||
port: conf.http_port,
|
port: conf.http_port,
|
||||||
}) } else { None },
|
}) } else { None },
|
||||||
service_contract_address: conf.service_contract_address.map(|c| match c {
|
service_contract_address: conf.service_contract_address.map(into_service_contract_address),
|
||||||
ContractAddress::Registry => ethcore_secretstore::ContractAddress::Registry,
|
service_contract_srv_gen_address: conf.service_contract_srv_gen_address.map(into_service_contract_address),
|
||||||
ContractAddress::Address(address) => ethcore_secretstore::ContractAddress::Address(address),
|
service_contract_srv_retr_address: conf.service_contract_srv_retr_address.map(into_service_contract_address),
|
||||||
}),
|
service_contract_doc_store_address: conf.service_contract_doc_store_address.map(into_service_contract_address),
|
||||||
|
service_contract_doc_sretr_address: conf.service_contract_doc_sretr_address.map(into_service_contract_address),
|
||||||
data_path: conf.data_path.clone(),
|
data_path: conf.data_path.clone(),
|
||||||
acl_check_enabled: conf.acl_check_enabled,
|
acl_check_enabled: conf.acl_check_enabled,
|
||||||
cluster_config: ethcore_secretstore::ClusterConfiguration {
|
cluster_config: ethcore_secretstore::ClusterConfiguration {
|
||||||
@ -195,6 +211,10 @@ impl Default for Configuration {
|
|||||||
acl_check_enabled: true,
|
acl_check_enabled: true,
|
||||||
auto_migrate_enabled: true,
|
auto_migrate_enabled: true,
|
||||||
service_contract_address: None,
|
service_contract_address: None,
|
||||||
|
service_contract_srv_gen_address: None,
|
||||||
|
service_contract_srv_retr_address: None,
|
||||||
|
service_contract_doc_store_address: None,
|
||||||
|
service_contract_doc_sretr_address: None,
|
||||||
self_secret: None,
|
self_secret: None,
|
||||||
admin_public: None,
|
admin_public: None,
|
||||||
nodes: BTreeMap::new(),
|
nodes: BTreeMap::new(),
|
||||||
|
@ -1 +1,24 @@
|
|||||||
[{"constant":true,"inputs":[],"name":"getMigrationMaster","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getMigrationKeyServerPublic","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"id","type":"bytes32"}],"name":"startMigration","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getMigrationKeyServerAddress","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getMigrationId","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getNewKeyServers","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"id","type":"bytes32"}],"name":"confirmMigration","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"getMigrationKeyServers","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"isMigrationConfirmed","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getCurrentKeyServers","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getCurrentKeyServerPublic","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getNewKeyServerAddress","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getCurrentKeyServerAddress","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getNewKeyServerPublic","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"view","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"keyServer","type":"address"}],"name":"KeyServerAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"keyServer","type":"address"}],"name":"KeyServerRemoved","type":"event"},{"anonymous":false,"inputs":[],"name":"MigrationStarted","type":"event"},{"anonymous":false,"inputs":[],"name":"MigrationCompleted","type":"event"}]
|
[
|
||||||
|
{"constant":true,"inputs":[],"name":"getMigrationMaster","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getMigrationKeyServerPublic","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":false,"inputs":[{"name":"id","type":"bytes32"}],"name":"startMigration","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getCurrentKeyServerIndex","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getMigrationKeyServerAddress","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[],"name":"getMigrationId","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[],"name":"getNewKeyServers","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":false,"inputs":[{"name":"id","type":"bytes32"}],"name":"confirmMigration","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
|
{"constant":true,"inputs":[],"name":"getMigrationKeyServers","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"isMigrationConfirmed","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[],"name":"getCurrentKeyServersCount","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[],"name":"getCurrentKeyServers","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[],"name":"getCurrentLastChange","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getCurrentKeyServerPublic","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getNewKeyServerAddress","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getCurrentKeyServerAddress","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getNewKeyServerPublic","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"index","type":"uint8"}],"name":"getCurrentKeyServer","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"anonymous":false,"inputs":[{"indexed":false,"name":"keyServer","type":"address"}],"name":"KeyServerAdded","type":"event"},
|
||||||
|
{"anonymous":false,"inputs":[{"indexed":false,"name":"keyServer","type":"address"}],"name":"KeyServerRemoved","type":"event"},
|
||||||
|
{"anonymous":false,"inputs":[],"name":"MigrationStarted","type":"event"},
|
||||||
|
{"anonymous":false,"inputs":[],"name":"MigrationCompleted","type":"event"}
|
||||||
|
]
|
@ -1,8 +1,33 @@
|
|||||||
[
|
[
|
||||||
|
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"requireKeyServer","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
|
||||||
{"constant":true,"inputs":[],"name":"serverKeyGenerationRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
{"constant":true,"inputs":[],"name":"serverKeyGenerationRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
{"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getServerKeyId","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},
|
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"serverKeyGenerationError","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"},{"name":"v","type":"uint8"},{"name":"r","type":"bytes32"},{"name":"s","type":"bytes32"}],"name":"serverKeyGenerated","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
{"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getServerKeyGenerationRequest","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"address"},{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
{"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"getServerKeyThreshold","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"}],"name":"serverKeyGenerated","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
{"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"authority","type":"address"}],"name":"getServerKeyConfirmationStatus","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},
|
{"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"keyServer","type":"address"}],"name":"isServerKeyGenerationResponseRequired","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
{"anonymous":false,"inputs":[{"indexed":true,"name":"serverKeyId","type":"bytes32"},{"indexed":true,"name":"threshold","type":"uint256"}],"name":"ServerKeyRequested","type":"event"}
|
{"anonymous":false,"inputs":[{"indexed":false,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"author","type":"address"},{"indexed":false,"name":"threshold","type":"uint8"}],"name":"ServerKeyGenerationRequested","type":"event"},
|
||||||
|
|
||||||
|
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"serverKeyRetrievalError","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
|
{"constant":true,"inputs":[],"name":"serverKeyRetrievalRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"keyServer","type":"address"}],"name":"isServerKeyRetrievalResponseRequired","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getServerKeyRetrievalRequest","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"},{"name":"threshold","type":"uint8"}],"name":"serverKeyRetrieved","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
|
{"anonymous":false,"inputs":[{"indexed":false,"name":"serverKeyId","type":"bytes32"}],"name":"ServerKeyRetrievalRequested","type":"event"},
|
||||||
|
|
||||||
|
{"constant":true,"inputs":[],"name":"documentKeyStoreRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"documentKeyStoreError","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
|
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"documentKeyStored","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"keyServer","type":"address"}],"name":"isDocumentKeyStoreResponseRequired","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getDocumentKeyStoreRequest","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"address"},{"name":"","type":"bytes"},{"name":"","type":"bytes"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"anonymous":false,"inputs":[{"indexed":false,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"author","type":"address"},{"indexed":false,"name":"commonPoint","type":"bytes"},{"indexed":false,"name":"encryptedPoint","type":"bytes"}],"name":"DocumentKeyStoreRequested","type":"event"},
|
||||||
|
|
||||||
|
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"requester","type":"address"},{"name":"commonPoint","type":"bytes"},{"name":"threshold","type":"uint8"}],"name":"documentKeyCommonRetrieved","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"keyServer","type":"address"},{"name":"requester","type":"address"}],"name":"isDocumentKeyShadowRetrievalResponseRequired","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"requester","type":"address"},{"name":"participants","type":"uint256"},{"name":"decryptedSecret","type":"bytes"},{"name":"shadow","type":"bytes"}],"name":"documentKeyPersonalRetrieved","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
|
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"requester","type":"address"}],"name":"documentKeyShadowRetrievalError","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
|
{"constant":true,"inputs":[],"name":"documentKeyShadowRetrievalRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getDocumentKeyShadowRetrievalRequest","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"bytes"},{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"anonymous":false,"inputs":[{"indexed":false,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"requester","type":"address"}],"name":"DocumentKeyCommonRetrievalRequested","type":"event"},
|
||||||
|
{"anonymous":false,"inputs":[{"indexed":false,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"requesterPublic","type":"bytes"}],"name":"DocumentKeyPersonalRetrievalRequested","type":"event"}
|
||||||
]
|
]
|
@ -17,12 +17,11 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock};
|
||||||
use ethkey::public_to_address;
|
|
||||||
use ethcore::client::{BlockId, ChainNotify, CallContract, RegistryInfo};
|
use ethcore::client::{BlockId, ChainNotify, CallContract, RegistryInfo};
|
||||||
use ethereum_types::{H256, Address};
|
use ethereum_types::{H256, Address};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use trusted_client::TrustedClient;
|
use trusted_client::TrustedClient;
|
||||||
use types::all::{Error, ServerKeyId, Public};
|
use types::all::{Error, ServerKeyId};
|
||||||
|
|
||||||
use_contract!(acl_storage, "AclStorage", "res/acl_storage.json");
|
use_contract!(acl_storage, "AclStorage", "res/acl_storage.json");
|
||||||
|
|
||||||
@ -30,8 +29,8 @@ const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checke
|
|||||||
|
|
||||||
/// ACL storage of Secret Store
|
/// ACL storage of Secret Store
|
||||||
pub trait AclStorage: Send + Sync {
|
pub trait AclStorage: Send + Sync {
|
||||||
/// Check if requestor with `public` key can access document with hash `document`
|
/// Check if requestor can access document with hash `document`
|
||||||
fn check(&self, public: &Public, document: &ServerKeyId) -> Result<bool, Error>;
|
fn check(&self, requester: Address, document: &ServerKeyId) -> Result<bool, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// On-chain ACL storage implementation.
|
/// On-chain ACL storage implementation.
|
||||||
@ -53,7 +52,7 @@ struct CachedContract {
|
|||||||
/// Dummy ACL storage implementation (check always passed).
|
/// Dummy ACL storage implementation (check always passed).
|
||||||
#[derive(Default, Debug)]
|
#[derive(Default, Debug)]
|
||||||
pub struct DummyAclStorage {
|
pub struct DummyAclStorage {
|
||||||
prohibited: RwLock<HashMap<Public, HashSet<ServerKeyId>>>,
|
prohibited: RwLock<HashMap<Address, HashSet<ServerKeyId>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OnChainAclStorage {
|
impl OnChainAclStorage {
|
||||||
@ -70,8 +69,8 @@ impl OnChainAclStorage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl AclStorage for OnChainAclStorage {
|
impl AclStorage for OnChainAclStorage {
|
||||||
fn check(&self, public: &Public, document: &ServerKeyId) -> Result<bool, Error> {
|
fn check(&self, requester: Address, document: &ServerKeyId) -> Result<bool, Error> {
|
||||||
self.contract.lock().check(public, document)
|
self.contract.lock().check(requester, document)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -104,16 +103,15 @@ impl CachedContract {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn check(&mut self, public: &Public, document: &ServerKeyId) -> Result<bool, Error> {
|
pub fn check(&mut self, requester: Address, document: &ServerKeyId) -> Result<bool, Error> {
|
||||||
if let Some(client) = self.client.get() {
|
if let Some(client) = self.client.get() {
|
||||||
// call contract to check accesss
|
// call contract to check accesss
|
||||||
match self.contract_addr {
|
match self.contract_addr {
|
||||||
Some(contract_address) => {
|
Some(contract_address) => {
|
||||||
let address = public_to_address(&public);
|
|
||||||
let do_call = |data| client.call_contract(BlockId::Latest, contract_address, data);
|
let do_call = |data| client.call_contract(BlockId::Latest, contract_address, data);
|
||||||
self.contract.functions()
|
self.contract.functions()
|
||||||
.check_permissions()
|
.check_permissions()
|
||||||
.call(address, document.clone(), &do_call)
|
.call(requester, document.clone(), &do_call)
|
||||||
.map_err(|e| Error::Internal(e.to_string()))
|
.map_err(|e| Error::Internal(e.to_string()))
|
||||||
},
|
},
|
||||||
None => Err(Error::Internal("ACL checker contract is not configured".to_owned())),
|
None => Err(Error::Internal("ACL checker contract is not configured".to_owned())),
|
||||||
@ -127,18 +125,18 @@ impl CachedContract {
|
|||||||
impl DummyAclStorage {
|
impl DummyAclStorage {
|
||||||
/// Prohibit given requestor access to given documents
|
/// Prohibit given requestor access to given documents
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub fn prohibit(&self, public: Public, document: ServerKeyId) {
|
pub fn prohibit(&self, requester: Address, document: ServerKeyId) {
|
||||||
self.prohibited.write()
|
self.prohibited.write()
|
||||||
.entry(public)
|
.entry(requester)
|
||||||
.or_insert_with(Default::default)
|
.or_insert_with(Default::default)
|
||||||
.insert(document);
|
.insert(document);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AclStorage for DummyAclStorage {
|
impl AclStorage for DummyAclStorage {
|
||||||
fn check(&self, public: &Public, document: &ServerKeyId) -> Result<bool, Error> {
|
fn check(&self, requester: Address, document: &ServerKeyId) -> Result<bool, Error> {
|
||||||
Ok(self.prohibited.read()
|
Ok(self.prohibited.read()
|
||||||
.get(public)
|
.get(&requester)
|
||||||
.map(|docs| !docs.contains(document))
|
.map(|docs| !docs.contains(document))
|
||||||
.unwrap_or(true))
|
.unwrap_or(true))
|
||||||
}
|
}
|
||||||
|
@ -22,13 +22,12 @@ use futures::{self, Future};
|
|||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use tokio_core::reactor::Core;
|
use tokio_core::reactor::Core;
|
||||||
use ethcrypto;
|
use ethcrypto;
|
||||||
use ethkey;
|
|
||||||
use super::acl_storage::AclStorage;
|
use super::acl_storage::AclStorage;
|
||||||
use super::key_storage::KeyStorage;
|
use super::key_storage::KeyStorage;
|
||||||
use super::key_server_set::KeyServerSet;
|
use super::key_server_set::KeyServerSet;
|
||||||
use key_server_cluster::{math, ClusterCore};
|
use key_server_cluster::{math, ClusterCore};
|
||||||
use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer, NodeKeyPair};
|
use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer, NodeKeyPair};
|
||||||
use types::all::{Error, Public, RequestSignature, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow,
|
use types::all::{Error, Public, RequestSignature, Requester, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow,
|
||||||
ClusterConfiguration, MessageHash, EncryptedMessageSignature, NodeId};
|
ClusterConfiguration, MessageHash, EncryptedMessageSignature, NodeId};
|
||||||
use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration};
|
use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration};
|
||||||
|
|
||||||
@ -71,39 +70,39 @@ impl AdminSessionsServer for KeyServerImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ServerKeyGenerator for KeyServerImpl {
|
impl ServerKeyGenerator for KeyServerImpl {
|
||||||
fn generate_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<Public, Error> {
|
fn generate_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result<Public, Error> {
|
||||||
// recover requestor' public key from signature
|
// recover requestor' public key from signature
|
||||||
let public = ethkey::recover(signature, key_id)
|
let address = author.address(key_id).map_err(Error::InsufficientRequesterData)?;
|
||||||
.map_err(|_| Error::BadSignature)?;
|
|
||||||
|
|
||||||
// generate server key
|
// generate server key
|
||||||
let generation_session = self.data.lock().cluster.new_generation_session(key_id.clone(), public, threshold)?;
|
let generation_session = self.data.lock().cluster.new_generation_session(key_id.clone(), None, address, threshold)?;
|
||||||
generation_session.wait(None).map_err(Into::into)
|
generation_session.wait(None)
|
||||||
|
.expect("when wait is called without timeout it always returns Some; qed")
|
||||||
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DocumentKeyServer for KeyServerImpl {
|
impl DocumentKeyServer for KeyServerImpl {
|
||||||
fn store_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> {
|
fn store_document_key(&self, key_id: &ServerKeyId, author: &Requester, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> {
|
||||||
// store encrypted key
|
// store encrypted key
|
||||||
let encryption_session = self.data.lock().cluster.new_encryption_session(key_id.clone(),
|
let encryption_session = self.data.lock().cluster.new_encryption_session(key_id.clone(),
|
||||||
signature.clone().into(), common_point, encrypted_document_key)?;
|
author.clone(), common_point, encrypted_document_key)?;
|
||||||
encryption_session.wait(None).map_err(Into::into)
|
encryption_session.wait(None).map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn generate_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<EncryptedDocumentKey, Error> {
|
fn generate_document_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result<EncryptedDocumentKey, Error> {
|
||||||
// recover requestor' public key from signature
|
// recover requestor' public key from signature
|
||||||
let public = ethkey::recover(signature, key_id)
|
let public = author.public(key_id).map_err(Error::InsufficientRequesterData)?;
|
||||||
.map_err(|_| Error::BadSignature)?;
|
|
||||||
|
|
||||||
// generate server key
|
// generate server key
|
||||||
let server_key = self.generate_key(key_id, signature, threshold)?;
|
let server_key = self.generate_key(key_id, author, threshold)?;
|
||||||
|
|
||||||
// generate random document key
|
// generate random document key
|
||||||
let document_key = math::generate_random_point()?;
|
let document_key = math::generate_random_point()?;
|
||||||
let encrypted_document_key = math::encrypt_secret(&document_key, &server_key)?;
|
let encrypted_document_key = math::encrypt_secret(&document_key, &server_key)?;
|
||||||
|
|
||||||
// store document key in the storage
|
// store document key in the storage
|
||||||
self.store_document_key(key_id, signature, encrypted_document_key.common_point, encrypted_document_key.encrypted_point)?;
|
self.store_document_key(key_id, author, encrypted_document_key.common_point, encrypted_document_key.encrypted_point)?;
|
||||||
|
|
||||||
// encrypt document key with requestor public key
|
// encrypt document key with requestor public key
|
||||||
let document_key = ethcrypto::ecies::encrypt(&public, ðcrypto::DEFAULT_MAC, &document_key)
|
let document_key = ethcrypto::ecies::encrypt(&public, ðcrypto::DEFAULT_MAC, &document_key)
|
||||||
@ -111,15 +110,16 @@ impl DocumentKeyServer for KeyServerImpl {
|
|||||||
Ok(document_key)
|
Ok(document_key)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn restore_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKey, Error> {
|
fn restore_document_key(&self, key_id: &ServerKeyId, requester: &Requester) -> Result<EncryptedDocumentKey, Error> {
|
||||||
// recover requestor' public key from signature
|
// recover requestor' public key from signature
|
||||||
let public = ethkey::recover(signature, key_id)
|
let public = requester.public(key_id).map_err(Error::InsufficientRequesterData)?;
|
||||||
.map_err(|_| Error::BadSignature)?;
|
|
||||||
|
|
||||||
// decrypt document key
|
// decrypt document key
|
||||||
let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(),
|
let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(),
|
||||||
signature.clone().into(), None, false)?;
|
None, requester.clone(), None, false, false)?;
|
||||||
let document_key = decryption_session.wait()?.decrypted_secret;
|
let document_key = decryption_session.wait(None)
|
||||||
|
.expect("when wait is called without timeout it always returns Some; qed")?
|
||||||
|
.decrypted_secret;
|
||||||
|
|
||||||
// encrypt document key with requestor public key
|
// encrypt document key with requestor public key
|
||||||
let document_key = ethcrypto::ecies::encrypt(&public, ðcrypto::DEFAULT_MAC, &document_key)
|
let document_key = ethcrypto::ecies::encrypt(&public, ðcrypto::DEFAULT_MAC, &document_key)
|
||||||
@ -127,22 +127,23 @@ impl DocumentKeyServer for KeyServerImpl {
|
|||||||
Ok(document_key)
|
Ok(document_key)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKeyShadow, Error> {
|
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, requester: &Requester) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||||
let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(),
|
let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(),
|
||||||
signature.clone().into(), None, true)?;
|
None, requester.clone(), None, true, false)?;
|
||||||
decryption_session.wait().map_err(Into::into)
|
decryption_session.wait(None)
|
||||||
|
.expect("when wait is called without timeout it always returns Some; qed")
|
||||||
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MessageSigner for KeyServerImpl {
|
impl MessageSigner for KeyServerImpl {
|
||||||
fn sign_message_schnorr(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
fn sign_message_schnorr(&self, key_id: &ServerKeyId, requester: &Requester, message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
||||||
// recover requestor' public key from signature
|
// recover requestor' public key from signature
|
||||||
let public = ethkey::recover(signature, key_id)
|
let public = requester.public(key_id).map_err(Error::InsufficientRequesterData)?;
|
||||||
.map_err(|_| Error::BadSignature)?;
|
|
||||||
|
|
||||||
// sign message
|
// sign message
|
||||||
let signing_session = self.data.lock().cluster.new_schnorr_signing_session(key_id.clone(),
|
let signing_session = self.data.lock().cluster.new_schnorr_signing_session(key_id.clone(),
|
||||||
signature.clone().into(), None, message)?;
|
requester.clone().into(), None, message)?;
|
||||||
let message_signature = signing_session.wait()?;
|
let message_signature = signing_session.wait()?;
|
||||||
|
|
||||||
// compose two message signature components into single one
|
// compose two message signature components into single one
|
||||||
@ -156,14 +157,13 @@ impl MessageSigner for KeyServerImpl {
|
|||||||
Ok(message_signature)
|
Ok(message_signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sign_message_ecdsa(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
fn sign_message_ecdsa(&self, key_id: &ServerKeyId, requester: &Requester, message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
||||||
// recover requestor' public key from signature
|
// recover requestor' public key from signature
|
||||||
let public = ethkey::recover(signature, key_id)
|
let public = requester.public(key_id).map_err(Error::InsufficientRequesterData)?;
|
||||||
.map_err(|_| Error::BadSignature)?;
|
|
||||||
|
|
||||||
// sign message
|
// sign message
|
||||||
let signing_session = self.data.lock().cluster.new_ecdsa_signing_session(key_id.clone(),
|
let signing_session = self.data.lock().cluster.new_ecdsa_signing_session(key_id.clone(),
|
||||||
signature.clone().into(), None, message)?;
|
requester.clone().into(), None, message)?;
|
||||||
let message_signature = signing_session.wait()?;
|
let message_signature = signing_session.wait()?;
|
||||||
|
|
||||||
// encrypt combined signature with requestor public key
|
// encrypt combined signature with requestor public key
|
||||||
@ -177,7 +177,7 @@ impl KeyServerCore {
|
|||||||
pub fn new(config: &ClusterConfiguration, key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>, acl_storage: Arc<AclStorage>, key_storage: Arc<KeyStorage>) -> Result<Self, Error> {
|
pub fn new(config: &ClusterConfiguration, key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>, acl_storage: Arc<AclStorage>, key_storage: Arc<KeyStorage>) -> Result<Self, Error> {
|
||||||
let config = NetClusterConfiguration {
|
let config = NetClusterConfiguration {
|
||||||
threads: config.threads,
|
threads: config.threads,
|
||||||
self_key_pair: self_key_pair,
|
self_key_pair: self_key_pair.clone(),
|
||||||
listen_address: (config.listener_address.address.clone(), config.listener_address.port),
|
listen_address: (config.listener_address.address.clone(), config.listener_address.port),
|
||||||
key_server_set: key_server_set,
|
key_server_set: key_server_set,
|
||||||
allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes,
|
allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes,
|
||||||
@ -189,7 +189,7 @@ impl KeyServerCore {
|
|||||||
|
|
||||||
let (stop, stopped) = futures::oneshot();
|
let (stop, stopped) = futures::oneshot();
|
||||||
let (tx, rx) = mpsc::channel();
|
let (tx, rx) = mpsc::channel();
|
||||||
let handle = thread::spawn(move || {
|
let handle = thread::Builder::new().name("KeyServerLoop".into()).spawn(move || {
|
||||||
let mut el = match Core::new() {
|
let mut el = match Core::new() {
|
||||||
Ok(el) => el,
|
Ok(el) => el,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@ -202,7 +202,9 @@ impl KeyServerCore {
|
|||||||
let cluster_client = cluster.and_then(|c| c.run().map(|_| c.client()));
|
let cluster_client = cluster.and_then(|c| c.run().map(|_| c.client()));
|
||||||
tx.send(cluster_client.map_err(Into::into)).expect("Rx is blocking upper thread.");
|
tx.send(cluster_client.map_err(Into::into)).expect("Rx is blocking upper thread.");
|
||||||
let _ = el.run(futures::empty().select(stopped));
|
let _ = el.run(futures::empty().select(stopped));
|
||||||
});
|
|
||||||
|
trace!(target: "secretstore_net", "{}: KeyServerLoop thread stopped", self_key_pair.public());
|
||||||
|
}).map_err(|e| Error::Internal(format!("{}", e)))?;
|
||||||
let cluster = rx.recv().map_err(|e| Error::Internal(format!("error initializing event loop: {}", e)))??;
|
let cluster = rx.recv().map_err(|e| Error::Internal(format!("error initializing event loop: {}", e)))??;
|
||||||
|
|
||||||
Ok(KeyServerCore {
|
Ok(KeyServerCore {
|
||||||
@ -225,26 +227,25 @@ pub mod tests {
|
|||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use std::time;
|
use std::time;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use ethcrypto;
|
use ethcrypto;
|
||||||
use ethkey::{self, Secret, Random, Generator, verify_public};
|
use ethkey::{self, Secret, Random, Generator, verify_public};
|
||||||
use acl_storage::DummyAclStorage;
|
use acl_storage::DummyAclStorage;
|
||||||
|
use key_storage::KeyStorage;
|
||||||
use key_storage::tests::DummyKeyStorage;
|
use key_storage::tests::DummyKeyStorage;
|
||||||
use node_key_pair::PlainNodeKeyPair;
|
use node_key_pair::PlainNodeKeyPair;
|
||||||
use key_server_set::tests::MapKeyServerSet;
|
use key_server_set::tests::MapKeyServerSet;
|
||||||
use key_server_cluster::math;
|
use key_server_cluster::math;
|
||||||
use ethereum_types::{H256, H520};
|
use ethereum_types::{H256, H520};
|
||||||
use types::all::{Error, Public, ClusterConfiguration, NodeAddress, RequestSignature, ServerKeyId,
|
use types::all::{Error, Public, ClusterConfiguration, NodeAddress, RequestSignature, ServerKeyId,
|
||||||
EncryptedDocumentKey, EncryptedDocumentKeyShadow, MessageHash, EncryptedMessageSignature, NodeId};
|
EncryptedDocumentKey, EncryptedDocumentKeyShadow, MessageHash, EncryptedMessageSignature,
|
||||||
|
Requester, NodeId};
|
||||||
use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer};
|
use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer};
|
||||||
use super::KeyServerImpl;
|
use super::KeyServerImpl;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct DummyKeyServer {
|
pub struct DummyKeyServer;
|
||||||
pub generation_requests_count: AtomicUsize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyServer for DummyKeyServer {}
|
impl KeyServer for DummyKeyServer {}
|
||||||
|
|
||||||
@ -255,41 +256,40 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ServerKeyGenerator for DummyKeyServer {
|
impl ServerKeyGenerator for DummyKeyServer {
|
||||||
fn generate_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _threshold: usize) -> Result<Public, Error> {
|
fn generate_key(&self, _key_id: &ServerKeyId, _author: &Requester, _threshold: usize) -> Result<Public, Error> {
|
||||||
self.generation_requests_count.fetch_add(1, Ordering::Relaxed);
|
unimplemented!("test-only")
|
||||||
Err(Error::Internal("test error".into()))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DocumentKeyServer for DummyKeyServer {
|
impl DocumentKeyServer for DummyKeyServer {
|
||||||
fn store_document_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _common_point: Public, _encrypted_document_key: Public) -> Result<(), Error> {
|
fn store_document_key(&self, _key_id: &ServerKeyId, _author: &Requester, _common_point: Public, _encrypted_document_key: Public) -> Result<(), Error> {
|
||||||
unimplemented!("test-only")
|
unimplemented!("test-only")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn generate_document_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _threshold: usize) -> Result<EncryptedDocumentKey, Error> {
|
fn generate_document_key(&self, _key_id: &ServerKeyId, _author: &Requester, _threshold: usize) -> Result<EncryptedDocumentKey, Error> {
|
||||||
unimplemented!("test-only")
|
unimplemented!("test-only")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn restore_document_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature) -> Result<EncryptedDocumentKey, Error> {
|
fn restore_document_key(&self, _key_id: &ServerKeyId, _requester: &Requester) -> Result<EncryptedDocumentKey, Error> {
|
||||||
unimplemented!("test-only")
|
unimplemented!("test-only")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn restore_document_key_shadow(&self, _key_id: &ServerKeyId, _signature: &RequestSignature) -> Result<EncryptedDocumentKeyShadow, Error> {
|
fn restore_document_key_shadow(&self, _key_id: &ServerKeyId, _requester: &Requester) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||||
unimplemented!("test-only")
|
unimplemented!("test-only")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MessageSigner for DummyKeyServer {
|
impl MessageSigner for DummyKeyServer {
|
||||||
fn sign_message_schnorr(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
fn sign_message_schnorr(&self, _key_id: &ServerKeyId, _requester: &Requester, _message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
||||||
unimplemented!("test-only")
|
unimplemented!("test-only")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sign_message_ecdsa(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
fn sign_message_ecdsa(&self, _key_id: &ServerKeyId, _requester: &Requester, _message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
||||||
unimplemented!("test-only")
|
unimplemented!("test-only")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_key_servers(start_port: u16, num_nodes: usize) -> Vec<KeyServerImpl> {
|
fn make_key_servers(start_port: u16, num_nodes: usize) -> (Vec<KeyServerImpl>, Vec<Arc<DummyKeyStorage>>) {
|
||||||
let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect();
|
let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect();
|
||||||
let configs: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration {
|
let configs: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration {
|
||||||
threads: 1,
|
threads: 1,
|
||||||
@ -309,11 +309,12 @@ pub mod tests {
|
|||||||
let key_servers_set: BTreeMap<Public, SocketAddr> = configs[0].nodes.iter()
|
let key_servers_set: BTreeMap<Public, SocketAddr> = configs[0].nodes.iter()
|
||||||
.map(|(k, a)| (k.clone(), format!("{}:{}", a.address, a.port).parse().unwrap()))
|
.map(|(k, a)| (k.clone(), format!("{}:{}", a.address, a.port).parse().unwrap()))
|
||||||
.collect();
|
.collect();
|
||||||
|
let key_storages = (0..num_nodes).map(|_| Arc::new(DummyKeyStorage::default())).collect::<Vec<_>>();
|
||||||
let key_servers: Vec<_> = configs.into_iter().enumerate().map(|(i, cfg)|
|
let key_servers: Vec<_> = configs.into_iter().enumerate().map(|(i, cfg)|
|
||||||
KeyServerImpl::new(&cfg, Arc::new(MapKeyServerSet::new(key_servers_set.clone())),
|
KeyServerImpl::new(&cfg, Arc::new(MapKeyServerSet::new(key_servers_set.clone())),
|
||||||
Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())),
|
Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())),
|
||||||
Arc::new(DummyAclStorage::default()),
|
Arc::new(DummyAclStorage::default()),
|
||||||
Arc::new(DummyKeyStorage::default())).unwrap()
|
key_storages[i].clone()).unwrap()
|
||||||
).collect();
|
).collect();
|
||||||
|
|
||||||
// wait until connections are established. It is fast => do not bother with events here
|
// wait until connections are established. It is fast => do not bother with events here
|
||||||
@ -343,25 +344,25 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
key_servers
|
(key_servers, key_storages)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn document_key_generation_and_retrievement_works_over_network_with_single_node() {
|
fn document_key_generation_and_retrievement_works_over_network_with_single_node() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let key_servers = make_key_servers(6070, 1);
|
let (key_servers, _) = make_key_servers(6070, 1);
|
||||||
|
|
||||||
// generate document key
|
// generate document key
|
||||||
let threshold = 0;
|
let threshold = 0;
|
||||||
let document = Random.generate().unwrap().secret().clone();
|
let document = Random.generate().unwrap().secret().clone();
|
||||||
let secret = Random.generate().unwrap().secret().clone();
|
let secret = Random.generate().unwrap().secret().clone();
|
||||||
let signature = ethkey::sign(&secret, &document).unwrap();
|
let signature = ethkey::sign(&secret, &document).unwrap();
|
||||||
let generated_key = key_servers[0].generate_document_key(&document, &signature, threshold).unwrap();
|
let generated_key = key_servers[0].generate_document_key(&document, &signature.clone().into(), threshold).unwrap();
|
||||||
let generated_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &generated_key).unwrap();
|
let generated_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &generated_key).unwrap();
|
||||||
|
|
||||||
// now let's try to retrieve key back
|
// now let's try to retrieve key back
|
||||||
for key_server in key_servers.iter() {
|
for key_server in key_servers.iter() {
|
||||||
let retrieved_key = key_server.restore_document_key(&document, &signature).unwrap();
|
let retrieved_key = key_server.restore_document_key(&document, &signature.clone().into()).unwrap();
|
||||||
let retrieved_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap();
|
let retrieved_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap();
|
||||||
assert_eq!(retrieved_key, generated_key);
|
assert_eq!(retrieved_key, generated_key);
|
||||||
}
|
}
|
||||||
@ -370,7 +371,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn document_key_generation_and_retrievement_works_over_network_with_3_nodes() {
|
fn document_key_generation_and_retrievement_works_over_network_with_3_nodes() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let key_servers = make_key_servers(6080, 3);
|
let (key_servers, key_storages) = make_key_servers(6080, 3);
|
||||||
|
|
||||||
let test_cases = [0, 1, 2];
|
let test_cases = [0, 1, 2];
|
||||||
for threshold in &test_cases {
|
for threshold in &test_cases {
|
||||||
@ -378,14 +379,18 @@ pub mod tests {
|
|||||||
let document = Random.generate().unwrap().secret().clone();
|
let document = Random.generate().unwrap().secret().clone();
|
||||||
let secret = Random.generate().unwrap().secret().clone();
|
let secret = Random.generate().unwrap().secret().clone();
|
||||||
let signature = ethkey::sign(&secret, &document).unwrap();
|
let signature = ethkey::sign(&secret, &document).unwrap();
|
||||||
let generated_key = key_servers[0].generate_document_key(&document, &signature, *threshold).unwrap();
|
let generated_key = key_servers[0].generate_document_key(&document, &signature.clone().into(), *threshold).unwrap();
|
||||||
let generated_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &generated_key).unwrap();
|
let generated_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &generated_key).unwrap();
|
||||||
|
|
||||||
// now let's try to retrieve key back
|
// now let's try to retrieve key back
|
||||||
for key_server in key_servers.iter() {
|
for (i, key_server) in key_servers.iter().enumerate() {
|
||||||
let retrieved_key = key_server.restore_document_key(&document, &signature).unwrap();
|
let retrieved_key = key_server.restore_document_key(&document, &signature.clone().into()).unwrap();
|
||||||
let retrieved_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap();
|
let retrieved_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap();
|
||||||
assert_eq!(retrieved_key, generated_key);
|
assert_eq!(retrieved_key, generated_key);
|
||||||
|
|
||||||
|
let key_share = key_storages[i].get(&document).unwrap().unwrap();
|
||||||
|
assert!(key_share.common_point.is_some());
|
||||||
|
assert!(key_share.encrypted_point.is_some());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -393,7 +398,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn server_key_generation_and_storing_document_key_works_over_network_with_3_nodes() {
|
fn server_key_generation_and_storing_document_key_works_over_network_with_3_nodes() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let key_servers = make_key_servers(6090, 3);
|
let (key_servers, _) = make_key_servers(6090, 3);
|
||||||
|
|
||||||
let test_cases = [0, 1, 2];
|
let test_cases = [0, 1, 2];
|
||||||
for threshold in &test_cases {
|
for threshold in &test_cases {
|
||||||
@ -401,18 +406,19 @@ pub mod tests {
|
|||||||
let server_key_id = Random.generate().unwrap().secret().clone();
|
let server_key_id = Random.generate().unwrap().secret().clone();
|
||||||
let requestor_secret = Random.generate().unwrap().secret().clone();
|
let requestor_secret = Random.generate().unwrap().secret().clone();
|
||||||
let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap();
|
let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap();
|
||||||
let server_public = key_servers[0].generate_key(&server_key_id, &signature, *threshold).unwrap();
|
let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), *threshold).unwrap();
|
||||||
|
|
||||||
// generate document key (this is done by KS client so that document key is unknown to any KS)
|
// generate document key (this is done by KS client so that document key is unknown to any KS)
|
||||||
let generated_key = Random.generate().unwrap().public().clone();
|
let generated_key = Random.generate().unwrap().public().clone();
|
||||||
let encrypted_document_key = math::encrypt_secret(&generated_key, &server_public).unwrap();
|
let encrypted_document_key = math::encrypt_secret(&generated_key, &server_public).unwrap();
|
||||||
|
|
||||||
// store document key
|
// store document key
|
||||||
key_servers[0].store_document_key(&server_key_id, &signature, encrypted_document_key.common_point, encrypted_document_key.encrypted_point).unwrap();
|
key_servers[0].store_document_key(&server_key_id, &signature.clone().into(),
|
||||||
|
encrypted_document_key.common_point, encrypted_document_key.encrypted_point).unwrap();
|
||||||
|
|
||||||
// now let's try to retrieve key back
|
// now let's try to retrieve key back
|
||||||
for key_server in key_servers.iter() {
|
for key_server in key_servers.iter() {
|
||||||
let retrieved_key = key_server.restore_document_key(&server_key_id, &signature).unwrap();
|
let retrieved_key = key_server.restore_document_key(&server_key_id, &signature.clone().into()).unwrap();
|
||||||
let retrieved_key = ethcrypto::ecies::decrypt(&requestor_secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap();
|
let retrieved_key = ethcrypto::ecies::decrypt(&requestor_secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap();
|
||||||
let retrieved_key = Public::from_slice(&retrieved_key);
|
let retrieved_key = Public::from_slice(&retrieved_key);
|
||||||
assert_eq!(retrieved_key, generated_key);
|
assert_eq!(retrieved_key, generated_key);
|
||||||
@ -423,7 +429,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn server_key_generation_and_message_signing_works_over_network_with_3_nodes() {
|
fn server_key_generation_and_message_signing_works_over_network_with_3_nodes() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let key_servers = make_key_servers(6100, 3);
|
let (key_servers, _) = make_key_servers(6100, 3);
|
||||||
|
|
||||||
let test_cases = [0, 1, 2];
|
let test_cases = [0, 1, 2];
|
||||||
for threshold in &test_cases {
|
for threshold in &test_cases {
|
||||||
@ -431,11 +437,11 @@ pub mod tests {
|
|||||||
let server_key_id = Random.generate().unwrap().secret().clone();
|
let server_key_id = Random.generate().unwrap().secret().clone();
|
||||||
let requestor_secret = Random.generate().unwrap().secret().clone();
|
let requestor_secret = Random.generate().unwrap().secret().clone();
|
||||||
let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap();
|
let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap();
|
||||||
let server_public = key_servers[0].generate_key(&server_key_id, &signature, *threshold).unwrap();
|
let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), *threshold).unwrap();
|
||||||
|
|
||||||
// sign message
|
// sign message
|
||||||
let message_hash = H256::from(42);
|
let message_hash = H256::from(42);
|
||||||
let combined_signature = key_servers[0].sign_message_schnorr(&server_key_id, &signature, message_hash.clone()).unwrap();
|
let combined_signature = key_servers[0].sign_message_schnorr(&server_key_id, &signature.into(), message_hash.clone()).unwrap();
|
||||||
let combined_signature = ethcrypto::ecies::decrypt(&requestor_secret, ðcrypto::DEFAULT_MAC, &combined_signature).unwrap();
|
let combined_signature = ethcrypto::ecies::decrypt(&requestor_secret, ðcrypto::DEFAULT_MAC, &combined_signature).unwrap();
|
||||||
let signature_c = Secret::from_slice(&combined_signature[..32]);
|
let signature_c = Secret::from_slice(&combined_signature[..32]);
|
||||||
let signature_s = Secret::from_slice(&combined_signature[32..]);
|
let signature_s = Secret::from_slice(&combined_signature[32..]);
|
||||||
@ -448,21 +454,21 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn decryption_session_is_delegated_when_node_does_not_have_key_share() {
|
fn decryption_session_is_delegated_when_node_does_not_have_key_share() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let key_servers = make_key_servers(6110, 3);
|
let (key_servers, _) = make_key_servers(6110, 3);
|
||||||
|
|
||||||
// generate document key
|
// generate document key
|
||||||
let threshold = 0;
|
let threshold = 0;
|
||||||
let document = Random.generate().unwrap().secret().clone();
|
let document = Random.generate().unwrap().secret().clone();
|
||||||
let secret = Random.generate().unwrap().secret().clone();
|
let secret = Random.generate().unwrap().secret().clone();
|
||||||
let signature = ethkey::sign(&secret, &document).unwrap();
|
let signature = ethkey::sign(&secret, &document).unwrap();
|
||||||
let generated_key = key_servers[0].generate_document_key(&document, &signature, threshold).unwrap();
|
let generated_key = key_servers[0].generate_document_key(&document, &signature.clone().into(), threshold).unwrap();
|
||||||
let generated_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &generated_key).unwrap();
|
let generated_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &generated_key).unwrap();
|
||||||
|
|
||||||
// remove key from node0
|
// remove key from node0
|
||||||
key_servers[0].cluster().key_storage().remove(&document).unwrap();
|
key_servers[0].cluster().key_storage().remove(&document).unwrap();
|
||||||
|
|
||||||
// now let's try to retrieve key back by requesting it from node0, so that session must be delegated
|
// now let's try to retrieve key back by requesting it from node0, so that session must be delegated
|
||||||
let retrieved_key = key_servers[0].restore_document_key(&document, &signature).unwrap();
|
let retrieved_key = key_servers[0].restore_document_key(&document, &signature.into()).unwrap();
|
||||||
let retrieved_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap();
|
let retrieved_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap();
|
||||||
assert_eq!(retrieved_key, generated_key);
|
assert_eq!(retrieved_key, generated_key);
|
||||||
}
|
}
|
||||||
@ -470,21 +476,21 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn schnorr_signing_session_is_delegated_when_node_does_not_have_key_share() {
|
fn schnorr_signing_session_is_delegated_when_node_does_not_have_key_share() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let key_servers = make_key_servers(6114, 3);
|
let (key_servers, _) = make_key_servers(6114, 3);
|
||||||
let threshold = 1;
|
let threshold = 1;
|
||||||
|
|
||||||
// generate server key
|
// generate server key
|
||||||
let server_key_id = Random.generate().unwrap().secret().clone();
|
let server_key_id = Random.generate().unwrap().secret().clone();
|
||||||
let requestor_secret = Random.generate().unwrap().secret().clone();
|
let requestor_secret = Random.generate().unwrap().secret().clone();
|
||||||
let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap();
|
let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap();
|
||||||
let server_public = key_servers[0].generate_key(&server_key_id, &signature, threshold).unwrap();
|
let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), threshold).unwrap();
|
||||||
|
|
||||||
// remove key from node0
|
// remove key from node0
|
||||||
key_servers[0].cluster().key_storage().remove(&server_key_id).unwrap();
|
key_servers[0].cluster().key_storage().remove(&server_key_id).unwrap();
|
||||||
|
|
||||||
// sign message
|
// sign message
|
||||||
let message_hash = H256::from(42);
|
let message_hash = H256::from(42);
|
||||||
let combined_signature = key_servers[0].sign_message_schnorr(&server_key_id, &signature, message_hash.clone()).unwrap();
|
let combined_signature = key_servers[0].sign_message_schnorr(&server_key_id, &signature.into(), message_hash.clone()).unwrap();
|
||||||
let combined_signature = ethcrypto::ecies::decrypt(&requestor_secret, ðcrypto::DEFAULT_MAC, &combined_signature).unwrap();
|
let combined_signature = ethcrypto::ecies::decrypt(&requestor_secret, ðcrypto::DEFAULT_MAC, &combined_signature).unwrap();
|
||||||
let signature_c = Secret::from_slice(&combined_signature[..32]);
|
let signature_c = Secret::from_slice(&combined_signature[..32]);
|
||||||
let signature_s = Secret::from_slice(&combined_signature[32..]);
|
let signature_s = Secret::from_slice(&combined_signature[32..]);
|
||||||
@ -496,21 +502,21 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn ecdsa_signing_session_is_delegated_when_node_does_not_have_key_share() {
|
fn ecdsa_signing_session_is_delegated_when_node_does_not_have_key_share() {
|
||||||
//::logger::init_log();
|
//::logger::init_log();
|
||||||
let key_servers = make_key_servers(6117, 4);
|
let (key_servers, _) = make_key_servers(6117, 4);
|
||||||
let threshold = 1;
|
let threshold = 1;
|
||||||
|
|
||||||
// generate server key
|
// generate server key
|
||||||
let server_key_id = Random.generate().unwrap().secret().clone();
|
let server_key_id = Random.generate().unwrap().secret().clone();
|
||||||
let requestor_secret = Random.generate().unwrap().secret().clone();
|
let requestor_secret = Random.generate().unwrap().secret().clone();
|
||||||
let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap();
|
let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap();
|
||||||
let server_public = key_servers[0].generate_key(&server_key_id, &signature, threshold).unwrap();
|
let server_public = key_servers[0].generate_key(&server_key_id, &signature.clone().into(), threshold).unwrap();
|
||||||
|
|
||||||
// remove key from node0
|
// remove key from node0
|
||||||
key_servers[0].cluster().key_storage().remove(&server_key_id).unwrap();
|
key_servers[0].cluster().key_storage().remove(&server_key_id).unwrap();
|
||||||
|
|
||||||
// sign message
|
// sign message
|
||||||
let message_hash = H256::random();
|
let message_hash = H256::random();
|
||||||
let signature = key_servers[0].sign_message_ecdsa(&server_key_id, &signature, message_hash.clone()).unwrap();
|
let signature = key_servers[0].sign_message_ecdsa(&server_key_id, &signature.into(), message_hash.clone()).unwrap();
|
||||||
let signature = ethcrypto::ecies::decrypt(&requestor_secret, ðcrypto::DEFAULT_MAC, &signature).unwrap();
|
let signature = ethcrypto::ecies::decrypt(&requestor_secret, ðcrypto::DEFAULT_MAC, &signature).unwrap();
|
||||||
let signature: H520 = signature[0..65].into();
|
let signature: H520 = signature[0..65].into();
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::{BTreeSet, BTreeMap};
|
use std::collections::{BTreeSet, BTreeMap};
|
||||||
use ethereum_types::H256;
|
use ethereum_types::{Address, H256};
|
||||||
use ethkey::Secret;
|
use ethkey::Secret;
|
||||||
use parking_lot::{Mutex, Condvar};
|
use parking_lot::{Mutex, Condvar};
|
||||||
use key_server_cluster::{Error, SessionId, NodeId, DocumentKeyShare};
|
use key_server_cluster::{Error, SessionId, NodeId, DocumentKeyShare};
|
||||||
@ -55,8 +55,8 @@ pub struct SessionImpl<T: SessionTransport> {
|
|||||||
/// Action after key version is negotiated.
|
/// Action after key version is negotiated.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub enum ContinueAction {
|
pub enum ContinueAction {
|
||||||
/// Decryption session + is_shadow_decryption.
|
/// Decryption session + origin + is_shadow_decryption + is_broadcast_decryption.
|
||||||
Decrypt(Arc<DecryptionSession>, bool),
|
Decrypt(Arc<DecryptionSession>, Option<Address>, bool, bool),
|
||||||
/// Schnorr signing session + message hash.
|
/// Schnorr signing session + message hash.
|
||||||
SchnorrSign(Arc<SchnorrSigningSession>, H256),
|
SchnorrSign(Arc<SchnorrSigningSession>, H256),
|
||||||
/// ECDSA signing session + message hash.
|
/// ECDSA signing session + message hash.
|
||||||
@ -202,6 +202,7 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
|||||||
/// Wait for session completion.
|
/// Wait for session completion.
|
||||||
pub fn wait(&self) -> Result<(H256, NodeId), Error> {
|
pub fn wait(&self) -> Result<(H256, NodeId), Error> {
|
||||||
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
||||||
|
.expect("wait_session returns Some if called without timeout; qed")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Initialize session.
|
/// Initialize session.
|
||||||
|
@ -221,6 +221,7 @@ impl SessionImpl {
|
|||||||
/// Wait for session completion.
|
/// Wait for session completion.
|
||||||
pub fn wait(&self) -> Result<(), Error> {
|
pub fn wait(&self) -> Result<(), Error> {
|
||||||
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
||||||
|
.expect("wait_session returns Some if called without timeout; qed")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Initialize servers set change session on master node.
|
/// Initialize servers set change session on master node.
|
||||||
@ -337,7 +338,7 @@ impl SessionImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let unknown_sessions_job = UnknownSessionsJob::new_on_master(self.core.key_storage.clone(), self.core.meta.self_node_id.clone());
|
let unknown_sessions_job = UnknownSessionsJob::new_on_master(self.core.key_storage.clone(), self.core.meta.self_node_id.clone());
|
||||||
consensus_session.disseminate_jobs(unknown_sessions_job, self.unknown_sessions_transport(), false)
|
consensus_session.disseminate_jobs(unknown_sessions_job, self.unknown_sessions_transport(), false).map(|_| ())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When unknown sessions are requested.
|
/// When unknown sessions are requested.
|
||||||
@ -1166,7 +1167,7 @@ pub mod tests {
|
|||||||
|
|
||||||
pub fn generate_key(threshold: usize, nodes_ids: BTreeSet<NodeId>) -> GenerationMessageLoop {
|
pub fn generate_key(threshold: usize, nodes_ids: BTreeSet<NodeId>) -> GenerationMessageLoop {
|
||||||
let mut gml = GenerationMessageLoop::with_nodes_ids(nodes_ids);
|
let mut gml = GenerationMessageLoop::with_nodes_ids(nodes_ids);
|
||||||
gml.master().initialize(Default::default(), false, threshold, gml.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).unwrap();
|
gml.master().initialize(Default::default(), Default::default(), false, threshold, gml.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).unwrap();
|
||||||
while let Some((from, to, message)) = gml.take_message() {
|
while let Some((from, to, message)) = gml.take_message() {
|
||||||
gml.process_message((from, to, message)).unwrap();
|
gml.process_message((from, to, message)).unwrap();
|
||||||
}
|
}
|
||||||
|
@ -14,10 +14,11 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::collections::BTreeSet;
|
use std::collections::{BTreeSet, BTreeMap};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time;
|
||||||
use parking_lot::{Mutex, Condvar};
|
use parking_lot::{Mutex, Condvar};
|
||||||
use ethereum_types::H256;
|
use ethereum_types::{Address, H256};
|
||||||
use ethkey::Secret;
|
use ethkey::Secret;
|
||||||
use key_server_cluster::{Error, AclStorage, DocumentKeyShare, NodeId, SessionId, Requester,
|
use key_server_cluster::{Error, AclStorage, DocumentKeyShare, NodeId, SessionId, Requester,
|
||||||
EncryptedDocumentKeyShadow, SessionMeta};
|
EncryptedDocumentKeyShadow, SessionMeta};
|
||||||
@ -26,7 +27,7 @@ use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSessi
|
|||||||
use key_server_cluster::message::{Message, DecryptionMessage, DecryptionConsensusMessage, RequestPartialDecryption,
|
use key_server_cluster::message::{Message, DecryptionMessage, DecryptionConsensusMessage, RequestPartialDecryption,
|
||||||
PartialDecryption, DecryptionSessionError, DecryptionSessionCompleted, ConsensusMessage, InitializeConsensusSession,
|
PartialDecryption, DecryptionSessionError, DecryptionSessionCompleted, ConsensusMessage, InitializeConsensusSession,
|
||||||
ConfirmConsensusInitialization, DecryptionSessionDelegation, DecryptionSessionDelegationCompleted};
|
ConfirmConsensusInitialization, DecryptionSessionDelegation, DecryptionSessionDelegationCompleted};
|
||||||
use key_server_cluster::jobs::job_session::{JobSession, JobTransport};
|
use key_server_cluster::jobs::job_session::{JobSession, JobSessionState, JobTransport};
|
||||||
use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
||||||
use key_server_cluster::jobs::decryption_job::{PartialDecryptionRequest, PartialDecryptionResponse, DecryptionJob};
|
use key_server_cluster::jobs::decryption_job::{PartialDecryptionRequest, PartialDecryptionResponse, DecryptionJob};
|
||||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||||
@ -71,6 +72,8 @@ type BroadcastDecryptionJobSession = JobSession<DecryptionJob, DecryptionJobTran
|
|||||||
struct SessionData {
|
struct SessionData {
|
||||||
/// Key version to use for decryption.
|
/// Key version to use for decryption.
|
||||||
pub version: Option<H256>,
|
pub version: Option<H256>,
|
||||||
|
/// Session origin (if any).
|
||||||
|
pub origin: Option<Address>,
|
||||||
/// Consensus-based decryption session.
|
/// Consensus-based decryption session.
|
||||||
pub consensus_session: DecryptionConsensusSession,
|
pub consensus_session: DecryptionConsensusSession,
|
||||||
/// Broadcast decryption job.
|
/// Broadcast decryption job.
|
||||||
@ -110,6 +113,8 @@ struct DecryptionConsensusTransport {
|
|||||||
access_key: Secret,
|
access_key: Secret,
|
||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
nonce: u64,
|
nonce: u64,
|
||||||
|
/// Session origin (if any).
|
||||||
|
origin: Option<Address>,
|
||||||
/// Selected key version (on master node).
|
/// Selected key version (on master node).
|
||||||
version: Option<H256>,
|
version: Option<H256>,
|
||||||
/// Cluster.
|
/// Cluster.
|
||||||
@ -157,6 +162,7 @@ impl SessionImpl {
|
|||||||
id: params.meta.id.clone(),
|
id: params.meta.id.clone(),
|
||||||
access_key: params.access_key.clone(),
|
access_key: params.access_key.clone(),
|
||||||
nonce: params.nonce,
|
nonce: params.nonce,
|
||||||
|
origin: None,
|
||||||
version: None,
|
version: None,
|
||||||
cluster: params.cluster.clone(),
|
cluster: params.cluster.clone(),
|
||||||
};
|
};
|
||||||
@ -180,6 +186,7 @@ impl SessionImpl {
|
|||||||
},
|
},
|
||||||
data: Mutex::new(SessionData {
|
data: Mutex::new(SessionData {
|
||||||
version: None,
|
version: None,
|
||||||
|
origin: None,
|
||||||
consensus_session: consensus_session,
|
consensus_session: consensus_session,
|
||||||
broadcast_job_session: None,
|
broadcast_job_session: None,
|
||||||
is_shadow_decryption: None,
|
is_shadow_decryption: None,
|
||||||
@ -214,13 +221,42 @@ impl SessionImpl {
|
|||||||
self.data.lock().result.clone()
|
self.data.lock().result.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get key requester.
|
||||||
|
pub fn requester(&self) -> Option<Requester> {
|
||||||
|
self.data.lock().consensus_session.consensus_job().executor().requester().cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get session origin.
|
||||||
|
pub fn origin(&self) -> Option<Address> {
|
||||||
|
self.data.lock().origin.clone()
|
||||||
|
}
|
||||||
|
|
||||||
/// Wait for session completion.
|
/// Wait for session completion.
|
||||||
pub fn wait(&self) -> Result<EncryptedDocumentKeyShadow, Error> {
|
pub fn wait(&self, timeout: Option<time::Duration>) -> Option<Result<EncryptedDocumentKeyShadow, Error>> {
|
||||||
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
Self::wait_session(&self.core.completed, &self.data, timeout, |data| data.result.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get broadcasted shadows.
|
||||||
|
pub fn broadcast_shadows(&self) -> Option<BTreeMap<NodeId, Vec<u8>>> {
|
||||||
|
let data = self.data.lock();
|
||||||
|
|
||||||
|
if data.result.is_none() || (data.is_broadcast_session, data.is_shadow_decryption) != (Some(true), Some(true)) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let proof = "data.is_shadow_decryption is true; decrypt_shadow.is_some() is checked in DecryptionJob::check_partial_response; qed";
|
||||||
|
Some(match self.core.meta.master_node_id == self.core.meta.self_node_id {
|
||||||
|
true => data.consensus_session.computation_job().responses().iter()
|
||||||
|
.map(|(n, r)| (n.clone(), r.decrypt_shadow.clone().expect(proof)))
|
||||||
|
.collect(),
|
||||||
|
false => data.broadcast_job_session.as_ref().expect("session completed; is_shadow_decryption == true; we're on non-master node; qed").responses().iter()
|
||||||
|
.map(|(n, r)| (n.clone(), r.decrypt_shadow.clone().expect(proof)))
|
||||||
|
.collect(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Delegate session to other node.
|
/// Delegate session to other node.
|
||||||
pub fn delegate(&self, master: NodeId, version: H256, is_shadow_decryption: bool, is_broadcast_session: bool) -> Result<(), Error> {
|
pub fn delegate(&self, master: NodeId, origin: Option<Address>, version: H256, is_shadow_decryption: bool, is_broadcast_session: bool) -> Result<(), Error> {
|
||||||
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
||||||
return Err(Error::InvalidStateForRequest);
|
return Err(Error::InvalidStateForRequest);
|
||||||
}
|
}
|
||||||
@ -235,6 +271,7 @@ impl SessionImpl {
|
|||||||
session: self.core.meta.id.clone().into(),
|
session: self.core.meta.id.clone().into(),
|
||||||
sub_session: self.core.access_key.clone().into(),
|
sub_session: self.core.access_key.clone().into(),
|
||||||
session_nonce: self.core.nonce,
|
session_nonce: self.core.nonce,
|
||||||
|
origin: origin.map(Into::into),
|
||||||
requester: data.consensus_session.consensus_job().executor().requester()
|
requester: data.consensus_session.consensus_job().executor().requester()
|
||||||
.expect("signature is passed to master node on creation; session can be delegated from master node only; qed")
|
.expect("signature is passed to master node on creation; session can be delegated from master node only; qed")
|
||||||
.clone().into(),
|
.clone().into(),
|
||||||
@ -247,7 +284,7 @@ impl SessionImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Initialize decryption session on master node.
|
/// Initialize decryption session on master node.
|
||||||
pub fn initialize(&self, version: H256, is_shadow_decryption: bool, is_broadcast_session: bool) -> Result<(), Error> {
|
pub fn initialize(&self, origin: Option<Address>, version: H256, is_shadow_decryption: bool, is_broadcast_session: bool) -> Result<(), Error> {
|
||||||
debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id);
|
debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id);
|
||||||
|
|
||||||
// check if version exists
|
// check if version exists
|
||||||
@ -268,6 +305,8 @@ impl SessionImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
data.consensus_session.consensus_job_mut().transport_mut().version = Some(version.clone());
|
data.consensus_session.consensus_job_mut().transport_mut().version = Some(version.clone());
|
||||||
|
data.consensus_session.consensus_job_mut().transport_mut().origin = origin.clone();
|
||||||
|
data.origin = origin;
|
||||||
data.version = Some(version.clone());
|
data.version = Some(version.clone());
|
||||||
data.is_shadow_decryption = Some(is_shadow_decryption);
|
data.is_shadow_decryption = Some(is_shadow_decryption);
|
||||||
data.is_broadcast_session = Some(is_broadcast_session);
|
data.is_broadcast_session = Some(is_broadcast_session);
|
||||||
@ -323,7 +362,7 @@ impl SessionImpl {
|
|||||||
data.delegation_status = Some(DelegationStatus::DelegatedFrom(sender.clone(), message.session_nonce));
|
data.delegation_status = Some(DelegationStatus::DelegatedFrom(sender.clone(), message.session_nonce));
|
||||||
}
|
}
|
||||||
|
|
||||||
self.initialize(message.version.clone().into(), message.is_shadow_decryption, message.is_broadcast_session)
|
self.initialize(message.origin.clone().map(Into::into), message.version.clone().into(), message.is_shadow_decryption, message.is_broadcast_session)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When delegated session is completed on other node.
|
/// When delegated session is completed on other node.
|
||||||
@ -364,6 +403,7 @@ impl SessionImpl {
|
|||||||
.unwrap_or(false);
|
.unwrap_or(false);
|
||||||
data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(has_key_share);
|
data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(has_key_share);
|
||||||
data.version = Some(version);
|
data.version = Some(version);
|
||||||
|
data.origin = message.origin.clone().map(Into::into);
|
||||||
}
|
}
|
||||||
data.consensus_session.on_consensus_message(&sender, &message.message)?;
|
data.consensus_session.on_consensus_message(&sender, &message.message)?;
|
||||||
|
|
||||||
@ -397,13 +437,19 @@ impl SessionImpl {
|
|||||||
let requester_public = data.consensus_session.consensus_job().executor().requester()
|
let requester_public = data.consensus_session.consensus_job().executor().requester()
|
||||||
.ok_or(Error::InvalidStateForRequest)?
|
.ok_or(Error::InvalidStateForRequest)?
|
||||||
.public(&self.core.meta.id)
|
.public(&self.core.meta.id)
|
||||||
.ok_or(Error::InsufficientRequesterData)?;
|
.map_err(Error::InsufficientRequesterData)?;
|
||||||
let decryption_job = DecryptionJob::new_on_slave(self.core.meta.self_node_id.clone(), self.core.access_key.clone(),
|
let decryption_job = DecryptionJob::new_on_slave(self.core.meta.self_node_id.clone(), self.core.access_key.clone(),
|
||||||
requester_public.clone(), key_share.clone(), key_version)?;
|
requester_public.clone(), key_share.clone(), key_version)?;
|
||||||
let decryption_transport = self.core.decryption_transport(false);
|
let decryption_transport = self.core.decryption_transport(false);
|
||||||
|
|
||||||
|
// update flags if not on master
|
||||||
|
if self.core.meta.self_node_id != self.core.meta.master_node_id {
|
||||||
|
data.is_shadow_decryption = Some(message.is_shadow_decryption);
|
||||||
|
data.is_broadcast_session = Some(message.is_broadcast_session);
|
||||||
|
}
|
||||||
|
|
||||||
// respond to request
|
// respond to request
|
||||||
data.consensus_session.on_job_request(sender, PartialDecryptionRequest {
|
let partial_decryption = data.consensus_session.on_job_request(sender, PartialDecryptionRequest {
|
||||||
id: message.request_id.clone().into(),
|
id: message.request_id.clone().into(),
|
||||||
is_shadow_decryption: message.is_shadow_decryption,
|
is_shadow_decryption: message.is_shadow_decryption,
|
||||||
is_broadcast_session: message.is_broadcast_session,
|
is_broadcast_session: message.is_broadcast_session,
|
||||||
@ -417,7 +463,7 @@ impl SessionImpl {
|
|||||||
self.core.access_key.clone(), requester_public, key_share.clone(), key_version,
|
self.core.access_key.clone(), requester_public, key_share.clone(), key_version,
|
||||||
message.is_shadow_decryption, message.is_broadcast_session)?;
|
message.is_shadow_decryption, message.is_broadcast_session)?;
|
||||||
Self::create_broadcast_decryption_job(&self.core, &mut *data, consensus_group, broadcast_decryption_job,
|
Self::create_broadcast_decryption_job(&self.core, &mut *data, consensus_group, broadcast_decryption_job,
|
||||||
message.request_id.clone().into())?;
|
message.request_id.clone().into(), Some(partial_decryption.take_response()))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -430,38 +476,52 @@ impl SessionImpl {
|
|||||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||||
|
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
if self.core.meta.self_node_id == self.core.meta.master_node_id {
|
let is_master_node = self.core.meta.self_node_id == self.core.meta.master_node_id;
|
||||||
|
let result = if is_master_node {
|
||||||
data.consensus_session.on_job_response(sender, PartialDecryptionResponse {
|
data.consensus_session.on_job_response(sender, PartialDecryptionResponse {
|
||||||
request_id: message.request_id.clone().into(),
|
request_id: message.request_id.clone().into(),
|
||||||
shadow_point: message.shadow_point.clone().into(),
|
shadow_point: message.shadow_point.clone().into(),
|
||||||
decrypt_shadow: message.decrypt_shadow.clone(),
|
decrypt_shadow: message.decrypt_shadow.clone(),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
if data.consensus_session.state() != ConsensusSessionState::Finished &&
|
||||||
|
data.consensus_session.state() != ConsensusSessionState::Failed {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// send completion signal to all nodes, except for rejected nodes
|
||||||
|
if is_master_node {
|
||||||
|
for node in data.consensus_session.consensus_non_rejected_nodes() {
|
||||||
|
self.core.cluster.send(&node, Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(DecryptionSessionCompleted {
|
||||||
|
session: self.core.meta.id.clone().into(),
|
||||||
|
sub_session: self.core.access_key.clone().into(),
|
||||||
|
session_nonce: self.core.nonce,
|
||||||
|
})))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data.consensus_session.result()
|
||||||
} else {
|
} else {
|
||||||
match data.broadcast_job_session.as_mut() {
|
match data.broadcast_job_session.as_mut() {
|
||||||
Some(broadcast_job_session) => broadcast_job_session.on_partial_response(sender, PartialDecryptionResponse {
|
Some(broadcast_job_session) => {
|
||||||
request_id: message.request_id.clone().into(),
|
broadcast_job_session.on_partial_response(sender, PartialDecryptionResponse {
|
||||||
shadow_point: message.shadow_point.clone().into(),
|
request_id: message.request_id.clone().into(),
|
||||||
decrypt_shadow: message.decrypt_shadow.clone(),
|
shadow_point: message.shadow_point.clone().into(),
|
||||||
})?,
|
decrypt_shadow: message.decrypt_shadow.clone(),
|
||||||
None => return Err(Error::TooEarlyForRequest),
|
})?;
|
||||||
|
|
||||||
|
if broadcast_job_session.state() != JobSessionState::Finished &&
|
||||||
|
broadcast_job_session.state() != JobSessionState::Failed {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
broadcast_job_session.result()
|
||||||
|
},
|
||||||
|
None => return Err(Error::InvalidMessage),
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
|
||||||
if data.consensus_session.state() != ConsensusSessionState::Finished {
|
Self::set_decryption_result(&self.core, &mut *data, result);
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// send compeltion signal to all nodes, except for rejected nodes
|
|
||||||
for node in data.consensus_session.consensus_non_rejected_nodes() {
|
|
||||||
self.core.cluster.send(&node, Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(DecryptionSessionCompleted {
|
|
||||||
session: self.core.meta.id.clone().into(),
|
|
||||||
sub_session: self.core.access_key.clone().into(),
|
|
||||||
session_nonce: self.core.nonce,
|
|
||||||
})))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let result = data.consensus_session.result()?;
|
|
||||||
Self::set_decryption_result(&self.core, &mut *data, Ok(result));
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -543,28 +603,31 @@ impl SessionImpl {
|
|||||||
|
|
||||||
let key_version = key_share.version(version).map_err(|e| Error::KeyStorage(e.into()))?.hash.clone();
|
let key_version = key_share.version(version).map_err(|e| Error::KeyStorage(e.into()))?.hash.clone();
|
||||||
let requester = data.consensus_session.consensus_job().executor().requester().ok_or(Error::InvalidStateForRequest)?.clone();
|
let requester = data.consensus_session.consensus_job().executor().requester().ok_or(Error::InvalidStateForRequest)?.clone();
|
||||||
let requester_public = requester.public(&core.meta.id).ok_or(Error::InsufficientRequesterData)?;
|
let requester_public = requester.public(&core.meta.id).map_err(Error::InsufficientRequesterData)?;
|
||||||
let consensus_group = data.consensus_session.select_consensus_group()?.clone();
|
let consensus_group = data.consensus_session.select_consensus_group()?.clone();
|
||||||
let decryption_job = DecryptionJob::new_on_master(core.meta.self_node_id.clone(),
|
let decryption_job = DecryptionJob::new_on_master(core.meta.self_node_id.clone(),
|
||||||
core.access_key.clone(), requester_public.clone(), key_share.clone(), key_version,
|
core.access_key.clone(), requester_public.clone(), key_share.clone(), key_version,
|
||||||
is_shadow_decryption, is_broadcast_session)?;
|
is_shadow_decryption, is_broadcast_session)?;
|
||||||
let decryption_request_id = decryption_job.request_id().clone().expect("TODO");
|
let decryption_request_id = decryption_job.request_id().clone()
|
||||||
|
.expect("DecryptionJob always have request_id when created on master; it is created using new_on_master above; qed");
|
||||||
let decryption_transport = core.decryption_transport(false);
|
let decryption_transport = core.decryption_transport(false);
|
||||||
data.consensus_session.disseminate_jobs(decryption_job, decryption_transport, data.is_broadcast_session.expect("TODO"))?;
|
let is_broadcast_session = data.is_broadcast_session
|
||||||
|
.expect("disseminate_jobs is called on master node only; on master node is_broadcast_session is filled during initialization; qed");
|
||||||
|
let self_response = data.consensus_session.disseminate_jobs(decryption_job, decryption_transport, is_broadcast_session)?;
|
||||||
|
|
||||||
// ...and prepare decryption job session if we need to broadcast result
|
// ...and prepare decryption job session if we need to broadcast result
|
||||||
if data.is_broadcast_session.expect("TODO") {
|
if is_broadcast_session {
|
||||||
let broadcast_decryption_job = DecryptionJob::new_on_master(core.meta.self_node_id.clone(),
|
let broadcast_decryption_job = DecryptionJob::new_on_master(core.meta.self_node_id.clone(),
|
||||||
core.access_key.clone(), requester_public, key_share.clone(), key_version, is_shadow_decryption, is_broadcast_session)?;
|
core.access_key.clone(), requester_public, key_share.clone(), key_version, is_shadow_decryption, is_broadcast_session)?;
|
||||||
Self::create_broadcast_decryption_job(&core, data, consensus_group, broadcast_decryption_job,
|
Self::create_broadcast_decryption_job(&core, data, consensus_group, broadcast_decryption_job,
|
||||||
decryption_request_id)?;
|
decryption_request_id, self_response)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create broadcast decryption job.
|
/// Create broadcast decryption job.
|
||||||
fn create_broadcast_decryption_job(core: &SessionCore, data: &mut SessionData, mut consensus_group: BTreeSet<NodeId>, mut job: DecryptionJob, request_id: Secret) -> Result<(), Error> {
|
fn create_broadcast_decryption_job(core: &SessionCore, data: &mut SessionData, mut consensus_group: BTreeSet<NodeId>, mut job: DecryptionJob, request_id: Secret, self_response: Option<PartialDecryptionResponse>) -> Result<(), Error> {
|
||||||
consensus_group.insert(core.meta.self_node_id.clone());
|
consensus_group.insert(core.meta.self_node_id.clone());
|
||||||
job.set_request_id(request_id.clone().into());
|
job.set_request_id(request_id.clone().into());
|
||||||
|
|
||||||
@ -575,7 +638,7 @@ impl SessionImpl {
|
|||||||
self_node_id: core.meta.self_node_id.clone(),
|
self_node_id: core.meta.self_node_id.clone(),
|
||||||
threshold: core.meta.threshold,
|
threshold: core.meta.threshold,
|
||||||
}, job, transport);
|
}, job, transport);
|
||||||
job_session.initialize(consensus_group, core.meta.self_node_id != core.meta.master_node_id)?;
|
job_session.initialize(consensus_group, self_response, core.meta.self_node_id != core.meta.master_node_id)?;
|
||||||
data.broadcast_job_session = Some(job_session);
|
data.broadcast_job_session = Some(job_session);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -691,6 +754,7 @@ impl JobTransport for DecryptionConsensusTransport {
|
|||||||
session: self.id.clone().into(),
|
session: self.id.clone().into(),
|
||||||
sub_session: self.access_key.clone().into(),
|
sub_session: self.access_key.clone().into(),
|
||||||
session_nonce: self.nonce,
|
session_nonce: self.nonce,
|
||||||
|
origin: self.origin.clone().map(Into::into),
|
||||||
message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
||||||
requester: request.into(),
|
requester: request.into(),
|
||||||
version: version.clone().into(),
|
version: version.clone().into(),
|
||||||
@ -703,6 +767,7 @@ impl JobTransport for DecryptionConsensusTransport {
|
|||||||
session: self.id.clone().into(),
|
session: self.id.clone().into(),
|
||||||
sub_session: self.access_key.clone().into(),
|
sub_session: self.access_key.clone().into(),
|
||||||
session_nonce: self.nonce,
|
session_nonce: self.nonce,
|
||||||
|
origin: None,
|
||||||
message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||||
is_confirmed: response,
|
is_confirmed: response,
|
||||||
})
|
})
|
||||||
@ -751,7 +816,7 @@ mod tests {
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::{BTreeMap, VecDeque};
|
use std::collections::{BTreeMap, VecDeque};
|
||||||
use acl_storage::DummyAclStorage;
|
use acl_storage::DummyAclStorage;
|
||||||
use ethkey::{self, KeyPair, Random, Generator, Public, Secret};
|
use ethkey::{self, KeyPair, Random, Generator, Public, Secret, public_to_address};
|
||||||
use key_server_cluster::{NodeId, DocumentKeyShare, DocumentKeyShareVersion, SessionId, Requester,
|
use key_server_cluster::{NodeId, DocumentKeyShare, DocumentKeyShareVersion, SessionId, Requester,
|
||||||
Error, EncryptedDocumentKeyShadow, SessionMeta};
|
Error, EncryptedDocumentKeyShadow, SessionMeta};
|
||||||
use key_server_cluster::cluster::tests::DummyCluster;
|
use key_server_cluster::cluster::tests::DummyCluster;
|
||||||
@ -918,7 +983,7 @@ mod tests {
|
|||||||
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
||||||
nonce: 0,
|
nonce: 0,
|
||||||
}, Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()))).unwrap();
|
}, Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()))).unwrap();
|
||||||
assert_eq!(session.initialize(Default::default(), false, false), Err(Error::InvalidMessage));
|
assert_eq!(session.initialize(Default::default(), Default::default(), false, false), Err(Error::InvalidMessage));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -951,24 +1016,25 @@ mod tests {
|
|||||||
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
||||||
nonce: 0,
|
nonce: 0,
|
||||||
}, Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()))).unwrap();
|
}, Some(Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()))).unwrap();
|
||||||
assert_eq!(session.initialize(Default::default(), false, false), Err(Error::ConsensusUnreachable));
|
assert_eq!(session.initialize(Default::default(), Default::default(), false, false), Err(Error::ConsensusUnreachable));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn fails_to_initialize_when_already_initialized() {
|
fn fails_to_initialize_when_already_initialized() {
|
||||||
let (_, _, _, sessions) = prepare_decryption_sessions();
|
let (_, _, _, sessions) = prepare_decryption_sessions();
|
||||||
assert_eq!(sessions[0].initialize(Default::default(), false, false).unwrap(), ());
|
assert_eq!(sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap(), ());
|
||||||
assert_eq!(sessions[0].initialize(Default::default(), false, false).unwrap_err(), Error::InvalidStateForRequest);
|
assert_eq!(sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap_err(), Error::InvalidStateForRequest);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn fails_to_accept_initialization_when_already_initialized() {
|
fn fails_to_accept_initialization_when_already_initialized() {
|
||||||
let (_, _, _, sessions) = prepare_decryption_sessions();
|
let (_, _, _, sessions) = prepare_decryption_sessions();
|
||||||
assert_eq!(sessions[0].initialize(Default::default(), false, false).unwrap(), ());
|
assert_eq!(sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap(), ());
|
||||||
assert_eq!(sessions[0].on_consensus_message(sessions[1].node(), &message::DecryptionConsensusMessage {
|
assert_eq!(sessions[0].on_consensus_message(sessions[1].node(), &message::DecryptionConsensusMessage {
|
||||||
session: SessionId::default().into(),
|
session: SessionId::default().into(),
|
||||||
sub_session: sessions[0].access_key().clone().into(),
|
sub_session: sessions[0].access_key().clone().into(),
|
||||||
session_nonce: 0,
|
session_nonce: 0,
|
||||||
|
origin: None,
|
||||||
message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession {
|
message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession {
|
||||||
requester: Requester::Signature(ethkey::sign(
|
requester: Requester::Signature(ethkey::sign(
|
||||||
Random.generate().unwrap().secret(), &SessionId::default()).unwrap()).into(),
|
Random.generate().unwrap().secret(), &SessionId::default()).unwrap()).into(),
|
||||||
@ -984,6 +1050,7 @@ mod tests {
|
|||||||
session: SessionId::default().into(),
|
session: SessionId::default().into(),
|
||||||
sub_session: sessions[0].access_key().clone().into(),
|
sub_session: sessions[0].access_key().clone().into(),
|
||||||
session_nonce: 0,
|
session_nonce: 0,
|
||||||
|
origin: None,
|
||||||
message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession {
|
message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession {
|
||||||
requester: Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(),
|
requester: Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(),
|
||||||
&SessionId::default()).unwrap()).into(),
|
&SessionId::default()).unwrap()).into(),
|
||||||
@ -1008,6 +1075,7 @@ mod tests {
|
|||||||
session: SessionId::default().into(),
|
session: SessionId::default().into(),
|
||||||
sub_session: sessions[0].access_key().clone().into(),
|
sub_session: sessions[0].access_key().clone().into(),
|
||||||
session_nonce: 0,
|
session_nonce: 0,
|
||||||
|
origin: None,
|
||||||
message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession {
|
message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession {
|
||||||
requester: Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(),
|
requester: Requester::Signature(ethkey::sign(Random.generate().unwrap().secret(),
|
||||||
&SessionId::default()).unwrap()).into(),
|
&SessionId::default()).unwrap()).into(),
|
||||||
@ -1041,7 +1109,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn fails_to_accept_partial_decrypt_twice() {
|
fn fails_to_accept_partial_decrypt_twice() {
|
||||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
sessions[0].initialize(Default::default(), false, false).unwrap();
|
sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap();
|
||||||
|
|
||||||
let mut pd_from = None;
|
let mut pd_from = None;
|
||||||
let mut pd_msg = None;
|
let mut pd_msg = None;
|
||||||
@ -1069,7 +1137,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn node_is_marked_rejected_when_timed_out_during_initialization_confirmation() {
|
fn node_is_marked_rejected_when_timed_out_during_initialization_confirmation() {
|
||||||
let (_, _, _, sessions) = prepare_decryption_sessions();
|
let (_, _, _, sessions) = prepare_decryption_sessions();
|
||||||
sessions[0].initialize(Default::default(), false, false).unwrap();
|
sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap();
|
||||||
|
|
||||||
// 1 node disconnects => we still can recover secret
|
// 1 node disconnects => we still can recover secret
|
||||||
sessions[0].on_node_timeout(sessions[1].node());
|
sessions[0].on_node_timeout(sessions[1].node());
|
||||||
@ -1086,8 +1154,8 @@ mod tests {
|
|||||||
let (_, clusters, acl_storages, sessions) = prepare_decryption_sessions();
|
let (_, clusters, acl_storages, sessions) = prepare_decryption_sessions();
|
||||||
let key_pair = Random.generate().unwrap();
|
let key_pair = Random.generate().unwrap();
|
||||||
|
|
||||||
acl_storages[1].prohibit(key_pair.public().clone(), SessionId::default());
|
acl_storages[1].prohibit(public_to_address(key_pair.public()), SessionId::default());
|
||||||
sessions[0].initialize(Default::default(), false, false).unwrap();
|
sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap();
|
||||||
|
|
||||||
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
||||||
|
|
||||||
@ -1099,7 +1167,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn session_does_not_fail_if_requested_node_disconnects() {
|
fn session_does_not_fail_if_requested_node_disconnects() {
|
||||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
sessions[0].initialize(Default::default(), false, false).unwrap();
|
sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap();
|
||||||
|
|
||||||
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
||||||
|
|
||||||
@ -1115,7 +1183,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn session_does_not_fail_if_node_with_shadow_point_disconnects() {
|
fn session_does_not_fail_if_node_with_shadow_point_disconnects() {
|
||||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
sessions[0].initialize(Default::default(), false, false).unwrap();
|
sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap();
|
||||||
|
|
||||||
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults
|
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults
|
||||||
&& sessions[0].data.lock().consensus_session.computation_job().responses().len() == 2).unwrap();
|
&& sessions[0].data.lock().consensus_session.computation_job().responses().len() == 2).unwrap();
|
||||||
@ -1132,7 +1200,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn session_restarts_if_confirmed_node_disconnects() {
|
fn session_restarts_if_confirmed_node_disconnects() {
|
||||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
sessions[0].initialize(Default::default(), false, false).unwrap();
|
sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap();
|
||||||
|
|
||||||
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
||||||
|
|
||||||
@ -1147,7 +1215,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn session_does_not_fail_if_non_master_node_disconnects_from_non_master_node() {
|
fn session_does_not_fail_if_non_master_node_disconnects_from_non_master_node() {
|
||||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
sessions[0].initialize(Default::default(), false, false).unwrap();
|
sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap();
|
||||||
|
|
||||||
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
||||||
|
|
||||||
@ -1162,7 +1230,7 @@ mod tests {
|
|||||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
|
|
||||||
// now let's try to do a decryption
|
// now let's try to do a decryption
|
||||||
sessions[0].initialize(Default::default(), false, false).unwrap();
|
sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap();
|
||||||
|
|
||||||
do_messages_exchange(&clusters, &sessions).unwrap();
|
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||||
|
|
||||||
@ -1184,7 +1252,7 @@ mod tests {
|
|||||||
let (key_pair, clusters, _, sessions) = prepare_decryption_sessions();
|
let (key_pair, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
|
|
||||||
// now let's try to do a decryption
|
// now let's try to do a decryption
|
||||||
sessions[0].initialize(Default::default(), true, false).unwrap();
|
sessions[0].initialize(Default::default(), Default::default(), true, false).unwrap();
|
||||||
|
|
||||||
do_messages_exchange(&clusters, &sessions).unwrap();
|
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||||
|
|
||||||
@ -1215,12 +1283,12 @@ mod tests {
|
|||||||
let (key_pair, clusters, acl_storages, sessions) = prepare_decryption_sessions();
|
let (key_pair, clusters, acl_storages, sessions) = prepare_decryption_sessions();
|
||||||
|
|
||||||
// now let's try to do a decryption
|
// now let's try to do a decryption
|
||||||
sessions[0].initialize(Default::default(), false, false).unwrap();
|
sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap();
|
||||||
|
|
||||||
// we need 4 out of 5 nodes to agree to do a decryption
|
// we need 4 out of 5 nodes to agree to do a decryption
|
||||||
// let's say that 2 of these nodes are disagree
|
// let's say that 2 of these nodes are disagree
|
||||||
acl_storages[1].prohibit(key_pair.public().clone(), SessionId::default());
|
acl_storages[1].prohibit(public_to_address(key_pair.public()), SessionId::default());
|
||||||
acl_storages[2].prohibit(key_pair.public().clone(), SessionId::default());
|
acl_storages[2].prohibit(public_to_address(key_pair.public()), SessionId::default());
|
||||||
|
|
||||||
assert_eq!(do_messages_exchange(&clusters, &sessions).unwrap_err(), Error::ConsensusUnreachable);
|
assert_eq!(do_messages_exchange(&clusters, &sessions).unwrap_err(), Error::ConsensusUnreachable);
|
||||||
|
|
||||||
@ -1235,10 +1303,10 @@ mod tests {
|
|||||||
|
|
||||||
// we need 4 out of 5 nodes to agree to do a decryption
|
// we need 4 out of 5 nodes to agree to do a decryption
|
||||||
// let's say that 1 of these nodes (master) is disagree
|
// let's say that 1 of these nodes (master) is disagree
|
||||||
acl_storages[0].prohibit(key_pair.public().clone(), SessionId::default());
|
acl_storages[0].prohibit(public_to_address(key_pair.public()), SessionId::default());
|
||||||
|
|
||||||
// now let's try to do a decryption
|
// now let's try to do a decryption
|
||||||
sessions[0].initialize(Default::default(), false, false).unwrap();
|
sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap();
|
||||||
|
|
||||||
do_messages_exchange(&clusters, &sessions).unwrap();
|
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||||
|
|
||||||
@ -1278,7 +1346,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// now let's try to do a decryption
|
// now let's try to do a decryption
|
||||||
sessions[1].delegate(sessions[0].core.meta.self_node_id.clone(), Default::default(), false, false).unwrap();
|
sessions[1].delegate(sessions[0].core.meta.self_node_id.clone(), Default::default(), Default::default(), false, false).unwrap();
|
||||||
do_messages_exchange(&clusters, &sessions).unwrap();
|
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||||
|
|
||||||
// now check that:
|
// now check that:
|
||||||
@ -1304,7 +1372,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// now let's try to do a decryption
|
// now let's try to do a decryption
|
||||||
sessions[0].initialize(Default::default(), false, false).unwrap();
|
sessions[0].initialize(Default::default(), Default::default(), false, false).unwrap();
|
||||||
do_messages_exchange(&clusters, &sessions).unwrap();
|
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||||
|
|
||||||
assert_eq!(sessions[0].decrypted_secret().unwrap().unwrap(), EncryptedDocumentKeyShadow {
|
assert_eq!(sessions[0].decrypted_secret().unwrap().unwrap(), EncryptedDocumentKeyShadow {
|
||||||
@ -1317,13 +1385,52 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn decryption_result_restored_on_all_nodes_if_broadcast_session_is_completed() {
|
fn decryption_result_restored_on_all_nodes_if_broadcast_session_is_completed() {
|
||||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
sessions[0].initialize(Default::default(), false, true).unwrap();
|
sessions[0].initialize(Default::default(), Default::default(), false, true).unwrap();
|
||||||
do_messages_exchange(&clusters, &sessions).unwrap();
|
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||||
|
|
||||||
// decryption result must be the same and available on 4 nodes
|
// decryption result must be the same and available on 4 nodes
|
||||||
let result = sessions[0].decrypted_secret();
|
let result = sessions[0].decrypted_secret();
|
||||||
assert!(result.clone().unwrap().is_ok());
|
assert!(result.clone().unwrap().is_ok());
|
||||||
|
assert_eq!(result.clone().unwrap().unwrap(), EncryptedDocumentKeyShadow {
|
||||||
|
decrypted_secret: SECRET_PLAIN.into(),
|
||||||
|
common_point: None,
|
||||||
|
decrypt_shadows: None,
|
||||||
|
});
|
||||||
assert_eq!(3, sessions.iter().skip(1).filter(|s| s.decrypted_secret() == result).count());
|
assert_eq!(3, sessions.iter().skip(1).filter(|s| s.decrypted_secret() == result).count());
|
||||||
assert_eq!(1, sessions.iter().skip(1).filter(|s| s.decrypted_secret().is_none()).count());
|
assert_eq!(1, sessions.iter().skip(1).filter(|s| s.decrypted_secret().is_none()).count());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn decryption_shadows_restored_on_all_nodes_if_shadow_broadcast_session_is_completed() {
|
||||||
|
let (key_pair, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
|
sessions[0].initialize(Default::default(), Default::default(), true, true).unwrap();
|
||||||
|
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||||
|
|
||||||
|
// decryption shadows must be the same and available on 4 nodes
|
||||||
|
let broadcast_shadows = sessions[0].broadcast_shadows();
|
||||||
|
assert!(broadcast_shadows.is_some());
|
||||||
|
assert_eq!(3, sessions.iter().skip(1).filter(|s| s.broadcast_shadows() == broadcast_shadows).count());
|
||||||
|
assert_eq!(1, sessions.iter().skip(1).filter(|s| s.broadcast_shadows().is_none()).count());
|
||||||
|
|
||||||
|
// 4 nodes must be able to recover original secret
|
||||||
|
use ethcrypto::DEFAULT_MAC;
|
||||||
|
use ethcrypto::ecies::decrypt;
|
||||||
|
let result = sessions[0].decrypted_secret().unwrap().unwrap();
|
||||||
|
assert_eq!(3, sessions.iter().skip(1).filter(|s| s.decrypted_secret() == Some(Ok(result.clone()))).count());
|
||||||
|
let decrypt_shadows: Vec<_> = result.decrypt_shadows.unwrap().into_iter()
|
||||||
|
.map(|c| Secret::from_slice(&decrypt(key_pair.secret(), &DEFAULT_MAC, &c).unwrap()))
|
||||||
|
.collect();
|
||||||
|
let decrypted_secret = math::decrypt_with_shadow_coefficients(result.decrypted_secret, result.common_point.unwrap(), decrypt_shadows).unwrap();
|
||||||
|
assert_eq!(decrypted_secret, SECRET_PLAIN.into());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn decryption_session_origin_is_known_to_all_initialized_nodes() {
|
||||||
|
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||||
|
sessions[0].initialize(Some(1.into()), Default::default(), true, true).unwrap();
|
||||||
|
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||||
|
|
||||||
|
// all session must have origin set
|
||||||
|
assert_eq!(5, sessions.iter().filter(|s| s.origin() == Some(1.into())).count());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,8 +19,10 @@ use std::fmt::{Debug, Formatter, Error as FmtError};
|
|||||||
use std::time;
|
use std::time;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use parking_lot::{Condvar, Mutex};
|
use parking_lot::{Condvar, Mutex};
|
||||||
|
use ethereum_types::Address;
|
||||||
use ethkey::Public;
|
use ethkey::Public;
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, Requester, KeyStorage, DocumentKeyShare};
|
use key_server_cluster::{Error, NodeId, SessionId, Requester, KeyStorage,
|
||||||
|
DocumentKeyShare, ServerKeyId};
|
||||||
use key_server_cluster::cluster::Cluster;
|
use key_server_cluster::cluster::Cluster;
|
||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||||
use key_server_cluster::message::{Message, EncryptionMessage, InitializeEncryptionSession,
|
use key_server_cluster::message::{Message, EncryptionMessage, InitializeEncryptionSession,
|
||||||
@ -107,7 +109,7 @@ pub enum SessionState {
|
|||||||
impl SessionImpl {
|
impl SessionImpl {
|
||||||
/// Create new encryption session.
|
/// Create new encryption session.
|
||||||
pub fn new(params: SessionParams) -> Result<Self, Error> {
|
pub fn new(params: SessionParams) -> Result<Self, Error> {
|
||||||
check_encrypted_data(¶ms.encrypted_data)?;
|
check_encrypted_data(params.encrypted_data.as_ref())?;
|
||||||
|
|
||||||
Ok(SessionImpl {
|
Ok(SessionImpl {
|
||||||
id: params.id,
|
id: params.id,
|
||||||
@ -133,9 +135,9 @@ impl SessionImpl {
|
|||||||
/// Wait for session completion.
|
/// Wait for session completion.
|
||||||
pub fn wait(&self, timeout: Option<time::Duration>) -> Result<(), Error> {
|
pub fn wait(&self, timeout: Option<time::Duration>) -> Result<(), Error> {
|
||||||
Self::wait_session(&self.completed, &self.data, timeout, |data| data.result.clone())
|
Self::wait_session(&self.completed, &self.data, timeout, |data| data.result.clone())
|
||||||
|
.expect("wait_session returns Some if called without timeout; qed")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Start new session initialization. This must be called on master node.
|
/// Start new session initialization. This must be called on master node.
|
||||||
pub fn initialize(&self, requester: Requester, common_point: Public, encrypted_point: Public) -> Result<(), Error> {
|
pub fn initialize(&self, requester: Requester, common_point: Public, encrypted_point: Public) -> Result<(), Error> {
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
@ -155,17 +157,10 @@ impl SessionImpl {
|
|||||||
// TODO [Reliability]: there could be situation when some nodes have failed to store encrypted data
|
// TODO [Reliability]: there could be situation when some nodes have failed to store encrypted data
|
||||||
// => potential problems during restore. some confirmation step is needed (2pc)?
|
// => potential problems during restore. some confirmation step is needed (2pc)?
|
||||||
// save encryption data
|
// save encryption data
|
||||||
if let Some(mut encrypted_data) = self.encrypted_data.clone() {
|
if let Some(encrypted_data) = self.encrypted_data.clone() {
|
||||||
// check that the requester is the author of the encrypted data
|
let requester_address = requester.address(&self.id).map_err(Error::InsufficientRequesterData)?;
|
||||||
let requester_address = requester.address(&self.id).ok_or(Error::InsufficientRequesterData)?;
|
update_encrypted_data(&self.key_storage, self.id.clone(),
|
||||||
if encrypted_data.author != requester_address {
|
encrypted_data, requester_address, common_point.clone(), encrypted_point.clone())?;
|
||||||
return Err(Error::AccessDenied);
|
|
||||||
}
|
|
||||||
|
|
||||||
encrypted_data.common_point = Some(common_point.clone());
|
|
||||||
encrypted_data.encrypted_point = Some(encrypted_point.clone());
|
|
||||||
self.key_storage.update(self.id.clone(), encrypted_data)
|
|
||||||
.map_err(|e| Error::KeyStorage(e.into()))?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// start initialization
|
// start initialization
|
||||||
@ -199,18 +194,11 @@ impl SessionImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check that the requester is the author of the encrypted data
|
// check that the requester is the author of the encrypted data
|
||||||
if let Some(mut encrypted_data) = self.encrypted_data.clone() {
|
if let Some(encrypted_data) = self.encrypted_data.clone() {
|
||||||
let requester: Requester = message.requester.clone().into();
|
let requester: Requester = message.requester.clone().into();
|
||||||
let requestor_address = requester.address(&self.id).ok_or(Error::InsufficientRequesterData)?;
|
let requester_address = requester.address(&self.id).map_err(Error::InsufficientRequesterData)?;
|
||||||
if encrypted_data.author != requestor_address {
|
update_encrypted_data(&self.key_storage, self.id.clone(),
|
||||||
return Err(Error::AccessDenied);
|
encrypted_data, requester_address, message.common_point.clone().into(), message.encrypted_point.clone().into())?;
|
||||||
}
|
|
||||||
|
|
||||||
// save encryption data
|
|
||||||
encrypted_data.common_point = Some(message.common_point.clone().into());
|
|
||||||
encrypted_data.encrypted_point = Some(message.encrypted_point.clone().into());
|
|
||||||
self.key_storage.update(self.id.clone(), encrypted_data)
|
|
||||||
.map_err(|e| Error::KeyStorage(e.into()))?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// update state
|
// update state
|
||||||
@ -333,13 +321,28 @@ impl Debug for SessionImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_encrypted_data(encrypted_data: &Option<DocumentKeyShare>) -> Result<(), Error> {
|
/// Check that common_point and encrypted point are not yet set in key share.
|
||||||
if let &Some(ref encrypted_data) = encrypted_data {
|
pub fn check_encrypted_data(key_share: Option<&DocumentKeyShare>) -> Result<(), Error> {
|
||||||
|
if let Some(key_share) = key_share {
|
||||||
// check that common_point and encrypted_point are still not set yet
|
// check that common_point and encrypted_point are still not set yet
|
||||||
if encrypted_data.common_point.is_some() || encrypted_data.encrypted_point.is_some() {
|
if key_share.common_point.is_some() || key_share.encrypted_point.is_some() {
|
||||||
return Err(Error::CompletedSessionId);
|
return Err(Error::CompletedSessionId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Update key share with encrypted document key.
|
||||||
|
pub fn update_encrypted_data(key_storage: &Arc<KeyStorage>, key_id: ServerKeyId, mut key_share: DocumentKeyShare, author: Address, common_point: Public, encrypted_point: Public) -> Result<(), Error> {
|
||||||
|
// author must be the same
|
||||||
|
if key_share.author != author {
|
||||||
|
return Err(Error::AccessDenied);
|
||||||
|
}
|
||||||
|
|
||||||
|
// save encryption data
|
||||||
|
key_share.common_point = Some(common_point);
|
||||||
|
key_share.encrypted_point = Some(encrypted_point);
|
||||||
|
key_storage.update(key_id, key_share)
|
||||||
|
.map_err(|e| Error::KeyStorage(e.into()))
|
||||||
|
}
|
||||||
|
@ -82,6 +82,8 @@ struct SessionData {
|
|||||||
author: Option<Address>,
|
author: Option<Address>,
|
||||||
|
|
||||||
// === Values, filled when session initialization is completed ===
|
// === Values, filled when session initialization is completed ===
|
||||||
|
/// Session origin (if any).
|
||||||
|
origin: Option<Address>,
|
||||||
/// Is zero secret generation session?
|
/// Is zero secret generation session?
|
||||||
is_zero: Option<bool>,
|
is_zero: Option<bool>,
|
||||||
/// Threshold value for this DKG. Only `threshold + 1` will be able to collectively recreate joint secret,
|
/// Threshold value for this DKG. Only `threshold + 1` will be able to collectively recreate joint secret,
|
||||||
@ -217,6 +219,7 @@ impl SessionImpl {
|
|||||||
simulate_faulty_behaviour: false,
|
simulate_faulty_behaviour: false,
|
||||||
master: None,
|
master: None,
|
||||||
author: None,
|
author: None,
|
||||||
|
origin: None,
|
||||||
is_zero: None,
|
is_zero: None,
|
||||||
threshold: None,
|
threshold: None,
|
||||||
derived_point: None,
|
derived_point: None,
|
||||||
@ -251,8 +254,13 @@ impl SessionImpl {
|
|||||||
self.data.lock().state.clone()
|
self.data.lock().state.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get session origin.
|
||||||
|
pub fn origin(&self) -> Option<Address> {
|
||||||
|
self.data.lock().origin.clone()
|
||||||
|
}
|
||||||
|
|
||||||
/// Wait for session completion.
|
/// Wait for session completion.
|
||||||
pub fn wait(&self, timeout: Option<Duration>) -> Result<Public, Error> {
|
pub fn wait(&self, timeout: Option<Duration>) -> Option<Result<Public, Error>> {
|
||||||
Self::wait_session(&self.completed, &self.data, timeout, |data| data.joint_public_and_secret.clone()
|
Self::wait_session(&self.completed, &self.data, timeout, |data| data.joint_public_and_secret.clone()
|
||||||
.map(|r| r.map(|r| r.0.clone())))
|
.map(|r| r.map(|r| r.0.clone())))
|
||||||
}
|
}
|
||||||
@ -263,7 +271,7 @@ impl SessionImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Start new session initialization. This must be called on master node.
|
/// Start new session initialization. This must be called on master node.
|
||||||
pub fn initialize(&self, author: Address, is_zero: bool, threshold: usize, nodes: InitializationNodes) -> Result<(), Error> {
|
pub fn initialize(&self, origin: Option<Address>, author: Address, is_zero: bool, threshold: usize, nodes: InitializationNodes) -> Result<(), Error> {
|
||||||
check_cluster_nodes(self.node(), &nodes.set())?;
|
check_cluster_nodes(self.node(), &nodes.set())?;
|
||||||
check_threshold(threshold, &nodes.set())?;
|
check_threshold(threshold, &nodes.set())?;
|
||||||
|
|
||||||
@ -277,6 +285,7 @@ impl SessionImpl {
|
|||||||
// update state
|
// update state
|
||||||
data.master = Some(self.node().clone());
|
data.master = Some(self.node().clone());
|
||||||
data.author = Some(author.clone());
|
data.author = Some(author.clone());
|
||||||
|
data.origin = origin.clone();
|
||||||
data.is_zero = Some(is_zero);
|
data.is_zero = Some(is_zero);
|
||||||
data.threshold = Some(threshold);
|
data.threshold = Some(threshold);
|
||||||
match nodes {
|
match nodes {
|
||||||
@ -304,6 +313,7 @@ impl SessionImpl {
|
|||||||
self.cluster.send(&next_node, Message::Generation(GenerationMessage::InitializeSession(InitializeSession {
|
self.cluster.send(&next_node, Message::Generation(GenerationMessage::InitializeSession(InitializeSession {
|
||||||
session: self.id.clone().into(),
|
session: self.id.clone().into(),
|
||||||
session_nonce: self.nonce,
|
session_nonce: self.nonce,
|
||||||
|
origin: origin.map(Into::into),
|
||||||
author: author.into(),
|
author: author.into(),
|
||||||
nodes: data.nodes.iter().map(|(k, v)| (k.clone().into(), v.id_number.clone().into())).collect(),
|
nodes: data.nodes.iter().map(|(k, v)| (k.clone().into(), v.id_number.clone().into())).collect(),
|
||||||
is_zero: data.is_zero.expect("is_zero is filled in initialization phase; KD phase follows initialization phase; qed"),
|
is_zero: data.is_zero.expect("is_zero is filled in initialization phase; KD phase follows initialization phase; qed"),
|
||||||
@ -380,6 +390,7 @@ impl SessionImpl {
|
|||||||
data.author = Some(message.author.clone().into());
|
data.author = Some(message.author.clone().into());
|
||||||
data.state = SessionState::WaitingForInitializationComplete;
|
data.state = SessionState::WaitingForInitializationComplete;
|
||||||
data.nodes = message.nodes.iter().map(|(id, number)| (id.clone().into(), NodeData::with_id_number(number.clone().into()))).collect();
|
data.nodes = message.nodes.iter().map(|(id, number)| (id.clone().into(), NodeData::with_id_number(number.clone().into()))).collect();
|
||||||
|
data.origin = message.origin.clone().map(Into::into);
|
||||||
data.is_zero = Some(message.is_zero);
|
data.is_zero = Some(message.is_zero);
|
||||||
data.threshold = Some(message.threshold);
|
data.threshold = Some(message.threshold);
|
||||||
|
|
||||||
@ -411,6 +422,7 @@ impl SessionImpl {
|
|||||||
return self.cluster.send(&next_receiver, Message::Generation(GenerationMessage::InitializeSession(InitializeSession {
|
return self.cluster.send(&next_receiver, Message::Generation(GenerationMessage::InitializeSession(InitializeSession {
|
||||||
session: self.id.clone().into(),
|
session: self.id.clone().into(),
|
||||||
session_nonce: self.nonce,
|
session_nonce: self.nonce,
|
||||||
|
origin: data.origin.clone().map(Into::into),
|
||||||
author: data.author.as_ref().expect("author is filled on initialization step; confrm initialization follows initialization; qed").clone().into(),
|
author: data.author.as_ref().expect("author is filled on initialization step; confrm initialization follows initialization; qed").clone().into(),
|
||||||
nodes: data.nodes.iter().map(|(k, v)| (k.clone().into(), v.id_number.clone().into())).collect(),
|
nodes: data.nodes.iter().map(|(k, v)| (k.clone().into(), v.id_number.clone().into())).collect(),
|
||||||
is_zero: data.is_zero.expect("is_zero is filled in initialization phase; KD phase follows initialization phase; qed"),
|
is_zero: data.is_zero.expect("is_zero is filled in initialization phase; KD phase follows initialization phase; qed"),
|
||||||
@ -937,7 +949,7 @@ pub mod tests {
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tokio_core::reactor::Core;
|
use tokio_core::reactor::Core;
|
||||||
use ethereum_types::Address;
|
use ethereum_types::Address;
|
||||||
use ethkey::{Random, Generator, Public, KeyPair};
|
use ethkey::{Random, Generator, KeyPair};
|
||||||
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage};
|
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage};
|
||||||
use key_server_cluster::message::{self, Message, GenerationMessage};
|
use key_server_cluster::message::{self, Message, GenerationMessage};
|
||||||
use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established};
|
use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established};
|
||||||
@ -1065,7 +1077,7 @@ pub mod tests {
|
|||||||
|
|
||||||
fn make_simple_cluster(threshold: usize, num_nodes: usize) -> Result<(SessionId, NodeId, NodeId, MessageLoop), Error> {
|
fn make_simple_cluster(threshold: usize, num_nodes: usize) -> Result<(SessionId, NodeId, NodeId, MessageLoop), Error> {
|
||||||
let l = MessageLoop::new(num_nodes);
|
let l = MessageLoop::new(num_nodes);
|
||||||
l.master().initialize(Default::default(), false, threshold, l.nodes.keys().cloned().collect::<BTreeSet<_>>().into())?;
|
l.master().initialize(Default::default(), Default::default(), false, threshold, l.nodes.keys().cloned().collect::<BTreeSet<_>>().into())?;
|
||||||
|
|
||||||
let session_id = l.session_id.clone();
|
let session_id = l.session_id.clone();
|
||||||
let master_id = l.master().node().clone();
|
let master_id = l.master().node().clone();
|
||||||
@ -1076,7 +1088,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn initializes_in_cluster_of_single_node() {
|
fn initializes_in_cluster_of_single_node() {
|
||||||
let l = MessageLoop::new(1);
|
let l = MessageLoop::new(1);
|
||||||
assert!(l.master().initialize(Default::default(), false, 0, l.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).is_ok());
|
assert!(l.master().initialize(Default::default(), Default::default(), false, 0, l.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -1091,7 +1103,7 @@ pub mod tests {
|
|||||||
nonce: Some(0),
|
nonce: Some(0),
|
||||||
});
|
});
|
||||||
let cluster_nodes: BTreeSet<_> = (0..2).map(|_| math::generate_random_point().unwrap()).collect();
|
let cluster_nodes: BTreeSet<_> = (0..2).map(|_| math::generate_random_point().unwrap()).collect();
|
||||||
assert_eq!(session.initialize(Default::default(), false, 0, cluster_nodes.into()).unwrap_err(), Error::InvalidNodesConfiguration);
|
assert_eq!(session.initialize(Default::default(), Default::default(), false, 0, cluster_nodes.into()).unwrap_err(), Error::InvalidNodesConfiguration);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -1105,7 +1117,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn fails_to_initialize_when_already_initialized() {
|
fn fails_to_initialize_when_already_initialized() {
|
||||||
let (_, _, _, l) = make_simple_cluster(0, 2).unwrap();
|
let (_, _, _, l) = make_simple_cluster(0, 2).unwrap();
|
||||||
assert_eq!(l.master().initialize(Default::default(), false, 0, l.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).unwrap_err(),
|
assert_eq!(l.master().initialize(Default::default(), Default::default(), false, 0, l.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).unwrap_err(),
|
||||||
Error::InvalidStateForRequest);
|
Error::InvalidStateForRequest);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1185,6 +1197,7 @@ pub mod tests {
|
|||||||
assert_eq!(l.first_slave().on_initialize_session(m, &message::InitializeSession {
|
assert_eq!(l.first_slave().on_initialize_session(m, &message::InitializeSession {
|
||||||
session: sid.into(),
|
session: sid.into(),
|
||||||
session_nonce: 0,
|
session_nonce: 0,
|
||||||
|
origin: None,
|
||||||
author: Address::default().into(),
|
author: Address::default().into(),
|
||||||
nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
||||||
is_zero: false,
|
is_zero: false,
|
||||||
@ -1202,6 +1215,7 @@ pub mod tests {
|
|||||||
assert_eq!(l.first_slave().on_initialize_session(m, &message::InitializeSession {
|
assert_eq!(l.first_slave().on_initialize_session(m, &message::InitializeSession {
|
||||||
session: sid.into(),
|
session: sid.into(),
|
||||||
session_nonce: 0,
|
session_nonce: 0,
|
||||||
|
origin: None,
|
||||||
author: Address::default().into(),
|
author: Address::default().into(),
|
||||||
nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
||||||
is_zero: false,
|
is_zero: false,
|
||||||
@ -1345,7 +1359,7 @@ pub mod tests {
|
|||||||
let test_cases = [(0, 5), (2, 5), (3, 5)];
|
let test_cases = [(0, 5), (2, 5), (3, 5)];
|
||||||
for &(threshold, num_nodes) in &test_cases {
|
for &(threshold, num_nodes) in &test_cases {
|
||||||
let mut l = MessageLoop::new(num_nodes);
|
let mut l = MessageLoop::new(num_nodes);
|
||||||
l.master().initialize(Default::default(), false, threshold, l.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).unwrap();
|
l.master().initialize(Default::default(), Default::default(), false, threshold, l.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).unwrap();
|
||||||
assert_eq!(l.nodes.len(), num_nodes);
|
assert_eq!(l.nodes.len(), num_nodes);
|
||||||
|
|
||||||
// let nodes do initialization + keys dissemination
|
// let nodes do initialization + keys dissemination
|
||||||
@ -1377,6 +1391,9 @@ pub mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn encryption_session_works_over_network() {
|
fn encryption_session_works_over_network() {
|
||||||
|
const CONN_TIMEOUT: Duration = Duration::from_millis(300);
|
||||||
|
const SESSION_TIMEOUT: Duration = Duration::from_millis(1000);
|
||||||
|
|
||||||
let test_cases = [(1, 3)];
|
let test_cases = [(1, 3)];
|
||||||
for &(threshold, num_nodes) in &test_cases {
|
for &(threshold, num_nodes) in &test_cases {
|
||||||
let mut core = Core::new().unwrap();
|
let mut core = Core::new().unwrap();
|
||||||
@ -1386,12 +1403,12 @@ pub mod tests {
|
|||||||
run_clusters(&clusters);
|
run_clusters(&clusters);
|
||||||
|
|
||||||
// establish connections
|
// establish connections
|
||||||
loop_until(&mut core, Duration::from_millis(300), || clusters.iter().all(all_connections_established));
|
loop_until(&mut core, CONN_TIMEOUT, || clusters.iter().all(all_connections_established));
|
||||||
|
|
||||||
// run session to completion
|
// run session to completion
|
||||||
let session_id = SessionId::default();
|
let session_id = SessionId::default();
|
||||||
let session = clusters[0].client().new_generation_session(session_id, Public::default(), threshold).unwrap();
|
let session = clusters[0].client().new_generation_session(session_id, Default::default(), Default::default(), threshold).unwrap();
|
||||||
loop_until(&mut core, Duration::from_millis(1000), || session.joint_public_and_secret().is_some());
|
loop_until(&mut core, SESSION_TIMEOUT, || session.joint_public_and_secret().is_some());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -222,6 +222,7 @@ impl SessionImpl {
|
|||||||
/// Wait for session completion.
|
/// Wait for session completion.
|
||||||
pub fn wait(&self) -> Result<Signature, Error> {
|
pub fn wait(&self) -> Result<Signature, Error> {
|
||||||
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
||||||
|
.expect("wait_session returns Some if called without timeout; qed")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Delegate session to other node.
|
/// Delegate session to other node.
|
||||||
@ -402,7 +403,7 @@ impl SessionImpl {
|
|||||||
session_nonce: n,
|
session_nonce: n,
|
||||||
message: m,
|
message: m,
|
||||||
}));
|
}));
|
||||||
sig_nonce_generation_session.initialize(Default::default(), false, key_share.threshold, consensus_group_map.clone().into())?;
|
sig_nonce_generation_session.initialize(Default::default(), Default::default(), false, key_share.threshold, consensus_group_map.clone().into())?;
|
||||||
data.sig_nonce_generation_session = Some(sig_nonce_generation_session);
|
data.sig_nonce_generation_session = Some(sig_nonce_generation_session);
|
||||||
|
|
||||||
// start generation of inversed nonce computation session
|
// start generation of inversed nonce computation session
|
||||||
@ -414,7 +415,7 @@ impl SessionImpl {
|
|||||||
session_nonce: n,
|
session_nonce: n,
|
||||||
message: m,
|
message: m,
|
||||||
}));
|
}));
|
||||||
inv_nonce_generation_session.initialize(Default::default(), false, key_share.threshold, consensus_group_map.clone().into())?;
|
inv_nonce_generation_session.initialize(Default::default(), Default::default(), false, key_share.threshold, consensus_group_map.clone().into())?;
|
||||||
data.inv_nonce_generation_session = Some(inv_nonce_generation_session);
|
data.inv_nonce_generation_session = Some(inv_nonce_generation_session);
|
||||||
|
|
||||||
// start generation of zero-secret shares for inversed nonce computation session
|
// start generation of zero-secret shares for inversed nonce computation session
|
||||||
@ -426,7 +427,7 @@ impl SessionImpl {
|
|||||||
session_nonce: n,
|
session_nonce: n,
|
||||||
message: m,
|
message: m,
|
||||||
}));
|
}));
|
||||||
inv_zero_generation_session.initialize(Default::default(), true, key_share.threshold * 2, consensus_group_map.clone().into())?;
|
inv_zero_generation_session.initialize(Default::default(), Default::default(), true, key_share.threshold * 2, consensus_group_map.clone().into())?;
|
||||||
data.inv_zero_generation_session = Some(inv_zero_generation_session);
|
data.inv_zero_generation_session = Some(inv_zero_generation_session);
|
||||||
|
|
||||||
data.state = SessionState::NoncesGenerating;
|
data.state = SessionState::NoncesGenerating;
|
||||||
@ -688,7 +689,7 @@ impl SessionImpl {
|
|||||||
id: message.request_id.clone().into(),
|
id: message.request_id.clone().into(),
|
||||||
inversed_nonce_coeff: message.inversed_nonce_coeff.clone().into(),
|
inversed_nonce_coeff: message.inversed_nonce_coeff.clone().into(),
|
||||||
message_hash: message.message_hash.clone().into(),
|
message_hash: message.message_hash.clone().into(),
|
||||||
}, signing_job, signing_transport)
|
}, signing_job, signing_transport).map(|_| ())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When partial signature is received.
|
/// When partial signature is received.
|
||||||
@ -989,7 +990,7 @@ impl SessionCore {
|
|||||||
|
|
||||||
let key_version = key_share.version(version).map_err(|e| Error::KeyStorage(e.into()))?.hash.clone();
|
let key_version = key_share.version(version).map_err(|e| Error::KeyStorage(e.into()))?.hash.clone();
|
||||||
let signing_job = EcdsaSigningJob::new_on_master(key_share.clone(), key_version, nonce_public, inv_nonce_share, inversed_nonce_coeff, message_hash)?;
|
let signing_job = EcdsaSigningJob::new_on_master(key_share.clone(), key_version, nonce_public, inv_nonce_share, inversed_nonce_coeff, message_hash)?;
|
||||||
consensus_session.disseminate_jobs(signing_job, self.signing_transport(), false)
|
consensus_session.disseminate_jobs(signing_job, self.signing_transport(), false).map(|_| ())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1054,7 +1055,7 @@ mod tests {
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::{BTreeSet, BTreeMap, VecDeque};
|
use std::collections::{BTreeSet, BTreeMap, VecDeque};
|
||||||
use ethereum_types::H256;
|
use ethereum_types::H256;
|
||||||
use ethkey::{self, Random, Generator, KeyPair, verify_public};
|
use ethkey::{self, Random, Generator, KeyPair, verify_public, public_to_address};
|
||||||
use acl_storage::DummyAclStorage;
|
use acl_storage::DummyAclStorage;
|
||||||
use key_server_cluster::{NodeId, DummyKeyStorage, SessionId, SessionMeta, Error, KeyStorage};
|
use key_server_cluster::{NodeId, DummyKeyStorage, SessionId, SessionMeta, Error, KeyStorage};
|
||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||||
@ -1165,7 +1166,7 @@ mod tests {
|
|||||||
fn prepare_signing_sessions(threshold: usize, num_nodes: usize) -> (KeyGenerationMessageLoop, MessageLoop) {
|
fn prepare_signing_sessions(threshold: usize, num_nodes: usize) -> (KeyGenerationMessageLoop, MessageLoop) {
|
||||||
// run key generation sessions
|
// run key generation sessions
|
||||||
let mut gl = KeyGenerationMessageLoop::new(num_nodes);
|
let mut gl = KeyGenerationMessageLoop::new(num_nodes);
|
||||||
gl.master().initialize(Default::default(), false, threshold, gl.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).unwrap();
|
gl.master().initialize(Default::default(), Default::default(), false, threshold, gl.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).unwrap();
|
||||||
while let Some((from, to, message)) = gl.take_message() {
|
while let Some((from, to, message)) = gl.take_message() {
|
||||||
gl.process_message((from, to, message)).unwrap();
|
gl.process_message((from, to, message)).unwrap();
|
||||||
}
|
}
|
||||||
@ -1214,7 +1215,7 @@ mod tests {
|
|||||||
|
|
||||||
// we need at least 3-of-4 nodes to agree to reach consensus
|
// we need at least 3-of-4 nodes to agree to reach consensus
|
||||||
// let's say 1 of 4 nodes disagee
|
// let's say 1 of 4 nodes disagee
|
||||||
sl.acl_storages[1].prohibit(sl.requester.public().clone(), SessionId::default());
|
sl.acl_storages[1].prohibit(public_to_address(sl.requester.public()), SessionId::default());
|
||||||
|
|
||||||
// then consensus reachable, but single node will disagree
|
// then consensus reachable, but single node will disagree
|
||||||
while let Some((from, to, message)) = sl.take_message() {
|
while let Some((from, to, message)) = sl.take_message() {
|
||||||
@ -1235,7 +1236,7 @@ mod tests {
|
|||||||
|
|
||||||
// we need at least 3-of-4 nodes to agree to reach consensus
|
// we need at least 3-of-4 nodes to agree to reach consensus
|
||||||
// let's say 1 of 4 nodes disagee
|
// let's say 1 of 4 nodes disagee
|
||||||
sl.acl_storages[0].prohibit(sl.requester.public().clone(), SessionId::default());
|
sl.acl_storages[0].prohibit(public_to_address(sl.requester.public()), SessionId::default());
|
||||||
|
|
||||||
// then consensus reachable, but single node will disagree
|
// then consensus reachable, but single node will disagree
|
||||||
while let Some((from, to, message)) = sl.take_message() {
|
while let Some((from, to, message)) = sl.take_message() {
|
||||||
|
@ -209,6 +209,7 @@ impl SessionImpl {
|
|||||||
/// Wait for session completion.
|
/// Wait for session completion.
|
||||||
pub fn wait(&self) -> Result<(Secret, Secret), Error> {
|
pub fn wait(&self) -> Result<(Secret, Secret), Error> {
|
||||||
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
||||||
|
.expect("wait_session returns Some if called without timeout; qed")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Delegate session to other node.
|
/// Delegate session to other node.
|
||||||
@ -277,7 +278,7 @@ impl SessionImpl {
|
|||||||
}),
|
}),
|
||||||
nonce: None,
|
nonce: None,
|
||||||
});
|
});
|
||||||
generation_session.initialize(Default::default(), false, 0, vec![self.core.meta.self_node_id.clone()].into_iter().collect::<BTreeSet<_>>().into())?;
|
generation_session.initialize(Default::default(), Default::default(), false, 0, vec![self.core.meta.self_node_id.clone()].into_iter().collect::<BTreeSet<_>>().into())?;
|
||||||
|
|
||||||
debug_assert_eq!(generation_session.state(), GenerationSessionState::WaitingForGenerationConfirmation);
|
debug_assert_eq!(generation_session.state(), GenerationSessionState::WaitingForGenerationConfirmation);
|
||||||
let joint_public_and_secret = generation_session
|
let joint_public_and_secret = generation_session
|
||||||
@ -406,7 +407,7 @@ impl SessionImpl {
|
|||||||
nonce: None,
|
nonce: None,
|
||||||
});
|
});
|
||||||
|
|
||||||
generation_session.initialize(Default::default(), false, key_share.threshold, consensus_group.into())?;
|
generation_session.initialize(Default::default(), Default::default(), false, key_share.threshold, consensus_group.into())?;
|
||||||
data.generation_session = Some(generation_session);
|
data.generation_session = Some(generation_session);
|
||||||
data.state = SessionState::SessionKeyGeneration;
|
data.state = SessionState::SessionKeyGeneration;
|
||||||
|
|
||||||
@ -508,7 +509,7 @@ impl SessionImpl {
|
|||||||
id: message.request_id.clone().into(),
|
id: message.request_id.clone().into(),
|
||||||
message_hash: message.message_hash.clone().into(),
|
message_hash: message.message_hash.clone().into(),
|
||||||
other_nodes_ids: message.nodes.iter().cloned().map(Into::into).collect(),
|
other_nodes_ids: message.nodes.iter().cloned().map(Into::into).collect(),
|
||||||
}, signing_job, signing_transport)
|
}, signing_job, signing_transport).map(|_| ())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When partial signature is received.
|
/// When partial signature is received.
|
||||||
@ -735,8 +736,9 @@ impl SessionCore {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let key_version = key_share.version(version).map_err(|e| Error::KeyStorage(e.into()))?.hash.clone();
|
let key_version = key_share.version(version).map_err(|e| Error::KeyStorage(e.into()))?.hash.clone();
|
||||||
let signing_job = SchnorrSigningJob::new_on_master(self.meta.self_node_id.clone(), key_share.clone(), key_version, session_public, session_secret_share, message_hash)?;
|
let signing_job = SchnorrSigningJob::new_on_master(self.meta.self_node_id.clone(), key_share.clone(), key_version,
|
||||||
consensus_session.disseminate_jobs(signing_job, self.signing_transport(), false)
|
session_public, session_secret_share, message_hash)?;
|
||||||
|
consensus_session.disseminate_jobs(signing_job, self.signing_transport(), false).map(|_| ())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -802,7 +804,7 @@ mod tests {
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::collections::{BTreeSet, BTreeMap, VecDeque};
|
use std::collections::{BTreeSet, BTreeMap, VecDeque};
|
||||||
use ethereum_types::{Address, H256};
|
use ethereum_types::{Address, H256};
|
||||||
use ethkey::{self, Random, Generator, Public, Secret, KeyPair};
|
use ethkey::{self, Random, Generator, Public, Secret, KeyPair, public_to_address};
|
||||||
use acl_storage::DummyAclStorage;
|
use acl_storage::DummyAclStorage;
|
||||||
use key_server_cluster::{NodeId, DummyKeyStorage, DocumentKeyShare, DocumentKeyShareVersion, SessionId,
|
use key_server_cluster::{NodeId, DummyKeyStorage, DocumentKeyShare, DocumentKeyShareVersion, SessionId,
|
||||||
Requester, SessionMeta, Error, KeyStorage};
|
Requester, SessionMeta, Error, KeyStorage};
|
||||||
@ -928,7 +930,7 @@ mod tests {
|
|||||||
fn prepare_signing_sessions(threshold: usize, num_nodes: usize) -> (KeyGenerationMessageLoop, MessageLoop) {
|
fn prepare_signing_sessions(threshold: usize, num_nodes: usize) -> (KeyGenerationMessageLoop, MessageLoop) {
|
||||||
// run key generation sessions
|
// run key generation sessions
|
||||||
let mut gl = KeyGenerationMessageLoop::new(num_nodes);
|
let mut gl = KeyGenerationMessageLoop::new(num_nodes);
|
||||||
gl.master().initialize(Default::default(), false, threshold, gl.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).unwrap();
|
gl.master().initialize(Default::default(), Default::default(), false, threshold, gl.nodes.keys().cloned().collect::<BTreeSet<_>>().into()).unwrap();
|
||||||
while let Some((from, to, message)) = gl.take_message() {
|
while let Some((from, to, message)) = gl.take_message() {
|
||||||
gl.process_message((from, to, message)).unwrap();
|
gl.process_message((from, to, message)).unwrap();
|
||||||
}
|
}
|
||||||
@ -1114,6 +1116,7 @@ mod tests {
|
|||||||
message: GenerationMessage::InitializeSession(InitializeSession {
|
message: GenerationMessage::InitializeSession(InitializeSession {
|
||||||
session: SessionId::default().into(),
|
session: SessionId::default().into(),
|
||||||
session_nonce: 0,
|
session_nonce: 0,
|
||||||
|
origin: None,
|
||||||
author: Address::default().into(),
|
author: Address::default().into(),
|
||||||
nodes: BTreeMap::new(),
|
nodes: BTreeMap::new(),
|
||||||
is_zero: false,
|
is_zero: false,
|
||||||
@ -1157,8 +1160,8 @@ mod tests {
|
|||||||
|
|
||||||
// we need at least 2-of-3 nodes to agree to reach consensus
|
// we need at least 2-of-3 nodes to agree to reach consensus
|
||||||
// let's say 2 of 3 nodes disagee
|
// let's say 2 of 3 nodes disagee
|
||||||
sl.acl_storages[1].prohibit(sl.requester.public().clone(), SessionId::default());
|
sl.acl_storages[1].prohibit(public_to_address(sl.requester.public()), SessionId::default());
|
||||||
sl.acl_storages[2].prohibit(sl.requester.public().clone(), SessionId::default());
|
sl.acl_storages[2].prohibit(public_to_address(sl.requester.public()), SessionId::default());
|
||||||
|
|
||||||
// then consensus is unreachable
|
// then consensus is unreachable
|
||||||
assert_eq!(sl.run_until(|_| false), Err(Error::ConsensusUnreachable));
|
assert_eq!(sl.run_until(|_| false), Err(Error::ConsensusUnreachable));
|
||||||
@ -1171,7 +1174,7 @@ mod tests {
|
|||||||
|
|
||||||
// we need at least 2-of-3 nodes to agree to reach consensus
|
// we need at least 2-of-3 nodes to agree to reach consensus
|
||||||
// let's say 1 of 3 nodes disagee
|
// let's say 1 of 3 nodes disagee
|
||||||
sl.acl_storages[1].prohibit(sl.requester.public().clone(), SessionId::default());
|
sl.acl_storages[1].prohibit(public_to_address(sl.requester.public()), SessionId::default());
|
||||||
|
|
||||||
// then consensus reachable, but single node will disagree
|
// then consensus reachable, but single node will disagree
|
||||||
while let Some((from, to, message)) = sl.take_message() {
|
while let Some((from, to, message)) = sl.take_message() {
|
||||||
@ -1192,7 +1195,7 @@ mod tests {
|
|||||||
|
|
||||||
// we need at least 2-of-3 nodes to agree to reach consensus
|
// we need at least 2-of-3 nodes to agree to reach consensus
|
||||||
// let's say 1 of 3 nodes disagee
|
// let's say 1 of 3 nodes disagee
|
||||||
sl.acl_storages[0].prohibit(sl.requester.public().clone(), SessionId::default());
|
sl.acl_storages[0].prohibit(public_to_address(sl.requester.public()), SessionId::default());
|
||||||
|
|
||||||
// then consensus reachable, but single node will disagree
|
// then consensus reachable, but single node will disagree
|
||||||
while let Some((from, to, message)) = sl.take_message() {
|
while let Some((from, to, message)) = sl.take_message() {
|
||||||
|
@ -26,8 +26,8 @@ use parking_lot::{RwLock, Mutex};
|
|||||||
use tokio_io::IoFuture;
|
use tokio_io::IoFuture;
|
||||||
use tokio_core::reactor::{Handle, Remote, Interval};
|
use tokio_core::reactor::{Handle, Remote, Interval};
|
||||||
use tokio_core::net::{TcpListener, TcpStream};
|
use tokio_core::net::{TcpListener, TcpStream};
|
||||||
use ethkey::{Public, KeyPair, Signature, Random, Generator, public_to_address};
|
use ethkey::{Public, KeyPair, Signature, Random, Generator};
|
||||||
use ethereum_types::H256;
|
use ethereum_types::{Address, H256};
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, Requester, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair};
|
use key_server_cluster::{Error, NodeId, SessionId, Requester, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair};
|
||||||
use key_server_cluster::cluster_sessions::{ClusterSession, AdminSession, ClusterSessions, SessionIdWithSubSession,
|
use key_server_cluster::cluster_sessions::{ClusterSession, AdminSession, ClusterSessions, SessionIdWithSubSession,
|
||||||
ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, AdminSessionCreationData, ClusterSessionsListener};
|
ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, AdminSessionCreationData, ClusterSessionsListener};
|
||||||
@ -66,11 +66,11 @@ pub trait ClusterClient: Send + Sync {
|
|||||||
/// Get cluster state.
|
/// Get cluster state.
|
||||||
fn cluster_state(&self) -> ClusterState;
|
fn cluster_state(&self) -> ClusterState;
|
||||||
/// Start new generation session.
|
/// Start new generation session.
|
||||||
fn new_generation_session(&self, session_id: SessionId, author: Public, threshold: usize) -> Result<Arc<GenerationSession>, Error>;
|
fn new_generation_session(&self, session_id: SessionId, origin: Option<Address>, author: Address, threshold: usize) -> Result<Arc<GenerationSession>, Error>;
|
||||||
/// Start new encryption session.
|
/// Start new encryption session.
|
||||||
fn new_encryption_session(&self, session_id: SessionId, requester: Requester, common_point: Public, encrypted_point: Public) -> Result<Arc<EncryptionSession>, Error>;
|
fn new_encryption_session(&self, session_id: SessionId, author: Requester, common_point: Public, encrypted_point: Public) -> Result<Arc<EncryptionSession>, Error>;
|
||||||
/// Start new decryption session.
|
/// Start new decryption session.
|
||||||
fn new_decryption_session(&self, session_id: SessionId, requester: Requester, version: Option<H256>, is_shadow_decryption: bool) -> Result<Arc<DecryptionSession>, Error>;
|
fn new_decryption_session(&self, session_id: SessionId, origin: Option<Address>, requester: Requester, version: Option<H256>, is_shadow_decryption: bool, is_broadcast_decryption: bool) -> Result<Arc<DecryptionSession>, Error>;
|
||||||
/// Start new Schnorr signing session.
|
/// Start new Schnorr signing session.
|
||||||
fn new_schnorr_signing_session(&self, session_id: SessionId, requester: Requester, version: Option<H256>, message_hash: H256) -> Result<Arc<SchnorrSigningSession>, Error>;
|
fn new_schnorr_signing_session(&self, session_id: SessionId, requester: Requester, version: Option<H256>, message_hash: H256) -> Result<Arc<SchnorrSigningSession>, Error>;
|
||||||
/// Start new ECDSA session.
|
/// Start new ECDSA session.
|
||||||
@ -82,6 +82,8 @@ pub trait ClusterClient: Send + Sync {
|
|||||||
|
|
||||||
/// Listen for new generation sessions.
|
/// Listen for new generation sessions.
|
||||||
fn add_generation_listener(&self, listener: Arc<ClusterSessionsListener<GenerationSession>>);
|
fn add_generation_listener(&self, listener: Arc<ClusterSessionsListener<GenerationSession>>);
|
||||||
|
/// Listen for new decryption sessions.
|
||||||
|
fn add_decryption_listener(&self, listener: Arc<ClusterSessionsListener<DecryptionSession>>);
|
||||||
|
|
||||||
/// Ask node to make 'faulty' generation sessions.
|
/// Ask node to make 'faulty' generation sessions.
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -477,11 +479,11 @@ impl ClusterCore {
|
|||||||
data.sessions.negotiation_sessions.remove(&session.id());
|
data.sessions.negotiation_sessions.remove(&session.id());
|
||||||
match session.wait() {
|
match session.wait() {
|
||||||
Ok((version, master)) => match session.take_continue_action() {
|
Ok((version, master)) => match session.take_continue_action() {
|
||||||
Some(ContinueAction::Decrypt(session, is_shadow_decryption)) => {
|
Some(ContinueAction::Decrypt(session, origin, is_shadow_decryption, is_broadcast_decryption)) => {
|
||||||
let initialization_error = if data.self_key_pair.public() == &master {
|
let initialization_error = if data.self_key_pair.public() == &master {
|
||||||
session.initialize(version, is_shadow_decryption, false)
|
session.initialize(origin, version, is_shadow_decryption, is_broadcast_decryption)
|
||||||
} else {
|
} else {
|
||||||
session.delegate(master, version, is_shadow_decryption, false)
|
session.delegate(master, origin, version, is_shadow_decryption, is_broadcast_decryption)
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Err(error) = initialization_error {
|
if let Err(error) = initialization_error {
|
||||||
@ -516,7 +518,7 @@ impl ClusterCore {
|
|||||||
None => (),
|
None => (),
|
||||||
},
|
},
|
||||||
Err(error) => match session.take_continue_action() {
|
Err(error) => match session.take_continue_action() {
|
||||||
Some(ContinueAction::Decrypt(session, _)) => {
|
Some(ContinueAction::Decrypt(session, _, _, _)) => {
|
||||||
data.sessions.decryption_sessions.remove(&session.id());
|
data.sessions.decryption_sessions.remove(&session.id());
|
||||||
session.on_session_error(&meta.self_node_id, error);
|
session.on_session_error(&meta.self_node_id, error);
|
||||||
},
|
},
|
||||||
@ -901,13 +903,13 @@ impl ClusterClient for ClusterClientImpl {
|
|||||||
self.data.connections.cluster_state()
|
self.data.connections.cluster_state()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_generation_session(&self, session_id: SessionId, author: Public, threshold: usize) -> Result<Arc<GenerationSession>, Error> {
|
fn new_generation_session(&self, session_id: SessionId, origin: Option<Address>, author: Address, threshold: usize) -> Result<Arc<GenerationSession>, Error> {
|
||||||
let mut connected_nodes = self.data.connections.connected_nodes();
|
let mut connected_nodes = self.data.connections.connected_nodes();
|
||||||
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||||
|
|
||||||
let cluster = create_cluster_view(&self.data, true)?;
|
let cluster = create_cluster_view(&self.data, true)?;
|
||||||
let session = self.data.sessions.generation_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?;
|
let session = self.data.sessions.generation_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?;
|
||||||
match session.initialize(public_to_address(&author), false, threshold, connected_nodes.into()) {
|
match session.initialize(origin, author, false, threshold, connected_nodes.into()) {
|
||||||
Ok(()) => Ok(session),
|
Ok(()) => Ok(session),
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
self.data.sessions.generation_sessions.remove(&session.id());
|
self.data.sessions.generation_sessions.remove(&session.id());
|
||||||
@ -931,7 +933,7 @@ impl ClusterClient for ClusterClientImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_decryption_session(&self, session_id: SessionId, requester: Requester, version: Option<H256>, is_shadow_decryption: bool) -> Result<Arc<DecryptionSession>, Error> {
|
fn new_decryption_session(&self, session_id: SessionId, origin: Option<Address>, requester: Requester, version: Option<H256>, is_shadow_decryption: bool, is_broadcast_decryption: bool) -> Result<Arc<DecryptionSession>, Error> {
|
||||||
let mut connected_nodes = self.data.connections.connected_nodes();
|
let mut connected_nodes = self.data.connections.connected_nodes();
|
||||||
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||||
|
|
||||||
@ -942,11 +944,11 @@ impl ClusterClient for ClusterClientImpl {
|
|||||||
session_id.clone(), None, false, Some(requester))?;
|
session_id.clone(), None, false, Some(requester))?;
|
||||||
|
|
||||||
let initialization_result = match version {
|
let initialization_result = match version {
|
||||||
Some(version) => session.initialize(version, is_shadow_decryption, false),
|
Some(version) => session.initialize(origin, version, is_shadow_decryption, is_broadcast_decryption),
|
||||||
None => {
|
None => {
|
||||||
self.create_key_version_negotiation_session(session_id.id.clone())
|
self.create_key_version_negotiation_session(session_id.id.clone())
|
||||||
.map(|version_session| {
|
.map(|version_session| {
|
||||||
version_session.set_continue_action(ContinueAction::Decrypt(session.clone(), is_shadow_decryption));
|
version_session.set_continue_action(ContinueAction::Decrypt(session.clone(), origin, is_shadow_decryption, is_broadcast_decryption));
|
||||||
ClusterCore::try_continue_session(&self.data, Some(version_session));
|
ClusterCore::try_continue_session(&self.data, Some(version_session));
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
@ -1056,6 +1058,10 @@ impl ClusterClient for ClusterClientImpl {
|
|||||||
self.data.sessions.generation_sessions.add_listener(listener);
|
self.data.sessions.generation_sessions.add_listener(listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn add_decryption_listener(&self, listener: Arc<ClusterSessionsListener<DecryptionSession>>) {
|
||||||
|
self.data.sessions.decryption_sessions.add_listener(listener);
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn connect(&self) {
|
fn connect(&self) {
|
||||||
ClusterCore::connect_disconnected_nodes(self.data.clone());
|
ClusterCore::connect_disconnected_nodes(self.data.clone());
|
||||||
@ -1085,11 +1091,12 @@ fn make_socket_address(address: &str, port: u16) -> Result<SocketAddr, Error> {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use std::collections::{BTreeSet, VecDeque};
|
use std::collections::{BTreeSet, VecDeque};
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use tokio_core::reactor::Core;
|
use tokio_core::reactor::Core;
|
||||||
use ethereum_types::H256;
|
use ethereum_types::{Address, H256};
|
||||||
use ethkey::{Random, Generator, Public, Signature, sign};
|
use ethkey::{Random, Generator, Public, Signature, sign};
|
||||||
use key_server_cluster::{NodeId, SessionId, Requester, Error, DummyAclStorage, DummyKeyStorage,
|
use key_server_cluster::{NodeId, SessionId, Requester, Error, DummyAclStorage, DummyKeyStorage,
|
||||||
MapKeyServerSet, PlainNodeKeyPair, KeyStorage};
|
MapKeyServerSet, PlainNodeKeyPair, KeyStorage};
|
||||||
@ -1107,7 +1114,9 @@ pub mod tests {
|
|||||||
const TIMEOUT: Duration = Duration::from_millis(300);
|
const TIMEOUT: Duration = Duration::from_millis(300);
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct DummyClusterClient;
|
pub struct DummyClusterClient {
|
||||||
|
pub generation_requests_count: AtomicUsize,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct DummyCluster {
|
pub struct DummyCluster {
|
||||||
@ -1123,15 +1132,20 @@ pub mod tests {
|
|||||||
|
|
||||||
impl ClusterClient for DummyClusterClient {
|
impl ClusterClient for DummyClusterClient {
|
||||||
fn cluster_state(&self) -> ClusterState { unimplemented!("test-only") }
|
fn cluster_state(&self) -> ClusterState { unimplemented!("test-only") }
|
||||||
fn new_generation_session(&self, _session_id: SessionId, _author: Public, _threshold: usize) -> Result<Arc<GenerationSession>, Error> { unimplemented!("test-only") }
|
fn new_generation_session(&self, _session_id: SessionId, _origin: Option<Address>, _author: Address, _threshold: usize) -> Result<Arc<GenerationSession>, Error> {
|
||||||
|
self.generation_requests_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
Err(Error::Io("test-errror".into()))
|
||||||
|
}
|
||||||
fn new_encryption_session(&self, _session_id: SessionId, _requester: Requester, _common_point: Public, _encrypted_point: Public) -> Result<Arc<EncryptionSession>, Error> { unimplemented!("test-only") }
|
fn new_encryption_session(&self, _session_id: SessionId, _requester: Requester, _common_point: Public, _encrypted_point: Public) -> Result<Arc<EncryptionSession>, Error> { unimplemented!("test-only") }
|
||||||
fn new_decryption_session(&self, _session_id: SessionId, _requester: Requester, _version: Option<H256>, _is_shadow_decryption: bool) -> Result<Arc<DecryptionSession>, Error> { unimplemented!("test-only") }
|
fn new_decryption_session(&self, _session_id: SessionId, _origin: Option<Address>, _requester: Requester, _version: Option<H256>, _is_shadow_decryption: bool, _is_broadcast_session: bool) -> Result<Arc<DecryptionSession>, Error> { unimplemented!("test-only") }
|
||||||
fn new_schnorr_signing_session(&self, _session_id: SessionId, _requester: Requester, _version: Option<H256>, _message_hash: H256) -> Result<Arc<SchnorrSigningSession>, Error> { unimplemented!("test-only") }
|
fn new_schnorr_signing_session(&self, _session_id: SessionId, _requester: Requester, _version: Option<H256>, _message_hash: H256) -> Result<Arc<SchnorrSigningSession>, Error> { unimplemented!("test-only") }
|
||||||
fn new_ecdsa_signing_session(&self, _session_id: SessionId, _requester: Requester, _version: Option<H256>, _message_hash: H256) -> Result<Arc<EcdsaSigningSession>, Error> { unimplemented!("test-only") }
|
fn new_ecdsa_signing_session(&self, _session_id: SessionId, _requester: Requester, _version: Option<H256>, _message_hash: H256) -> Result<Arc<EcdsaSigningSession>, Error> { unimplemented!("test-only") }
|
||||||
|
|
||||||
fn new_key_version_negotiation_session(&self, _session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>, Error> { unimplemented!("test-only") }
|
fn new_key_version_negotiation_session(&self, _session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>, Error> { unimplemented!("test-only") }
|
||||||
fn new_servers_set_change_session(&self, _session_id: Option<SessionId>, _migration_id: Option<H256>, _new_nodes_set: BTreeSet<NodeId>, _old_set_signature: Signature, _new_set_signature: Signature) -> Result<Arc<AdminSession>, Error> { unimplemented!("test-only") }
|
fn new_servers_set_change_session(&self, _session_id: Option<SessionId>, _migration_id: Option<H256>, _new_nodes_set: BTreeSet<NodeId>, _old_set_signature: Signature, _new_set_signature: Signature) -> Result<Arc<AdminSession>, Error> { unimplemented!("test-only") }
|
||||||
|
|
||||||
fn add_generation_listener(&self, _listener: Arc<ClusterSessionsListener<GenerationSession>>) {}
|
fn add_generation_listener(&self, _listener: Arc<ClusterSessionsListener<GenerationSession>>) {}
|
||||||
|
fn add_decryption_listener(&self, _listener: Arc<ClusterSessionsListener<DecryptionSession>>) {}
|
||||||
|
|
||||||
fn make_faulty_generation_sessions(&self) { unimplemented!("test-only") }
|
fn make_faulty_generation_sessions(&self) { unimplemented!("test-only") }
|
||||||
fn generation_session(&self, _session_id: &SessionId) -> Option<Arc<GenerationSession>> { unimplemented!("test-only") }
|
fn generation_session(&self, _session_id: &SessionId) -> Option<Arc<GenerationSession>> { unimplemented!("test-only") }
|
||||||
@ -1258,7 +1272,7 @@ pub mod tests {
|
|||||||
let core = Core::new().unwrap();
|
let core = Core::new().unwrap();
|
||||||
let clusters = make_clusters(&core, 6013, 3);
|
let clusters = make_clusters(&core, 6013, 3);
|
||||||
clusters[0].run().unwrap();
|
clusters[0].run().unwrap();
|
||||||
match clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1) {
|
match clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1) {
|
||||||
Err(Error::NodeDisconnected) => (),
|
Err(Error::NodeDisconnected) => (),
|
||||||
Err(e) => panic!("unexpected error {:?}", e),
|
Err(e) => panic!("unexpected error {:?}", e),
|
||||||
_ => panic!("unexpected success"),
|
_ => panic!("unexpected success"),
|
||||||
@ -1277,7 +1291,7 @@ pub mod tests {
|
|||||||
clusters[1].client().make_faulty_generation_sessions();
|
clusters[1].client().make_faulty_generation_sessions();
|
||||||
|
|
||||||
// start && wait for generation session to fail
|
// start && wait for generation session to fail
|
||||||
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
|
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
|
||||||
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some()
|
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some()
|
||||||
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
|
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
|
||||||
assert!(session.joint_public_and_secret().unwrap().is_err());
|
assert!(session.joint_public_and_secret().unwrap().is_err());
|
||||||
@ -1306,7 +1320,7 @@ pub mod tests {
|
|||||||
clusters[0].client().make_faulty_generation_sessions();
|
clusters[0].client().make_faulty_generation_sessions();
|
||||||
|
|
||||||
// start && wait for generation session to fail
|
// start && wait for generation session to fail
|
||||||
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
|
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
|
||||||
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some()
|
loop_until(&mut core, TIMEOUT, || session.joint_public_and_secret().is_some()
|
||||||
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
|
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
|
||||||
assert!(session.joint_public_and_secret().unwrap().is_err());
|
assert!(session.joint_public_and_secret().unwrap().is_err());
|
||||||
@ -1332,7 +1346,7 @@ pub mod tests {
|
|||||||
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
|
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
|
||||||
|
|
||||||
// start && wait for generation session to complete
|
// start && wait for generation session to complete
|
||||||
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
|
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
|
||||||
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished
|
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished
|
||||||
|| session.state() == GenerationSessionState::Failed)
|
|| session.state() == GenerationSessionState::Failed)
|
||||||
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
|
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
|
||||||
@ -1359,11 +1373,11 @@ pub mod tests {
|
|||||||
// generation session
|
// generation session
|
||||||
{
|
{
|
||||||
// try to start generation session => fail in initialization
|
// try to start generation session => fail in initialization
|
||||||
assert_eq!(clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 100).map(|_| ()),
|
assert_eq!(clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 100).map(|_| ()),
|
||||||
Err(Error::InvalidThreshold));
|
Err(Error::InvalidThreshold));
|
||||||
|
|
||||||
// try to start generation session => fails in initialization
|
// try to start generation session => fails in initialization
|
||||||
assert_eq!(clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 100).map(|_| ()),
|
assert_eq!(clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 100).map(|_| ()),
|
||||||
Err(Error::InvalidThreshold));
|
Err(Error::InvalidThreshold));
|
||||||
|
|
||||||
assert!(clusters[0].data.sessions.generation_sessions.is_empty());
|
assert!(clusters[0].data.sessions.generation_sessions.is_empty());
|
||||||
@ -1372,11 +1386,11 @@ pub mod tests {
|
|||||||
// decryption session
|
// decryption session
|
||||||
{
|
{
|
||||||
// try to start decryption session => fails in initialization
|
// try to start decryption session => fails in initialization
|
||||||
assert_eq!(clusters[0].client().new_decryption_session(Default::default(), Default::default(), Some(Default::default()), false).map(|_| ()),
|
assert_eq!(clusters[0].client().new_decryption_session(Default::default(), Default::default(), Default::default(), Some(Default::default()), false, false).map(|_| ()),
|
||||||
Err(Error::InvalidMessage));
|
Err(Error::InvalidMessage));
|
||||||
|
|
||||||
// try to start generation session => fails in initialization
|
// try to start generation session => fails in initialization
|
||||||
assert_eq!(clusters[0].client().new_decryption_session(Default::default(), Default::default(), Some(Default::default()), false).map(|_| ()),
|
assert_eq!(clusters[0].client().new_decryption_session(Default::default(), Default::default(), Default::default(), Some(Default::default()), false, false).map(|_| ()),
|
||||||
Err(Error::InvalidMessage));
|
Err(Error::InvalidMessage));
|
||||||
|
|
||||||
assert!(clusters[0].data.sessions.decryption_sessions.is_empty());
|
assert!(clusters[0].data.sessions.decryption_sessions.is_empty());
|
||||||
@ -1393,7 +1407,7 @@ pub mod tests {
|
|||||||
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
|
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
|
||||||
|
|
||||||
// start && wait for generation session to complete
|
// start && wait for generation session to complete
|
||||||
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
|
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
|
||||||
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished
|
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished
|
||||||
|| session.state() == GenerationSessionState::Failed)
|
|| session.state() == GenerationSessionState::Failed)
|
||||||
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
|
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
|
||||||
@ -1442,7 +1456,7 @@ pub mod tests {
|
|||||||
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
|
loop_until(&mut core, TIMEOUT, || clusters.iter().all(all_connections_established));
|
||||||
|
|
||||||
// start && wait for generation session to complete
|
// start && wait for generation session to complete
|
||||||
let session = clusters[0].client().new_generation_session(SessionId::default(), Public::default(), 1).unwrap();
|
let session = clusters[0].client().new_generation_session(SessionId::default(), Default::default(), Default::default(), 1).unwrap();
|
||||||
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished
|
loop_until(&mut core, TIMEOUT, || (session.state() == GenerationSessionState::Finished
|
||||||
|| session.state() == GenerationSessionState::Failed)
|
|| session.state() == GenerationSessionState::Failed)
|
||||||
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
|
&& clusters[0].client().generation_session(&SessionId::default()).is_none());
|
||||||
|
@ -84,10 +84,10 @@ pub trait ClusterSession {
|
|||||||
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error>;
|
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error>;
|
||||||
|
|
||||||
/// 'Wait for session completion' helper.
|
/// 'Wait for session completion' helper.
|
||||||
fn wait_session<T, U, F: Fn(&U) -> Option<Result<T, Error>>>(completion_event: &Condvar, session_data: &Mutex<U>, timeout: Option<Duration>, result_reader: F) -> Result<T, Error> {
|
fn wait_session<T, U, F: Fn(&U) -> Option<Result<T, Error>>>(completion_event: &Condvar, session_data: &Mutex<U>, timeout: Option<Duration>, result_reader: F) -> Option<Result<T, Error>> {
|
||||||
let mut locked_data = session_data.lock();
|
let mut locked_data = session_data.lock();
|
||||||
match result_reader(&locked_data) {
|
match result_reader(&locked_data) {
|
||||||
Some(result) => result,
|
Some(result) => Some(result),
|
||||||
None => {
|
None => {
|
||||||
match timeout {
|
match timeout {
|
||||||
None => completion_event.wait(&mut locked_data),
|
None => completion_event.wait(&mut locked_data),
|
||||||
@ -97,7 +97,6 @@ pub trait ClusterSession {
|
|||||||
}
|
}
|
||||||
|
|
||||||
result_reader(&locked_data)
|
result_reader(&locked_data)
|
||||||
.expect("waited for completion; completion is only signaled when result.is_some(); qed")
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -563,12 +562,14 @@ pub fn create_cluster_view(data: &Arc<ClusterData>, requires_all_connections: bo
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use ethkey::{Random, Generator};
|
use ethkey::{Random, Generator};
|
||||||
use key_server_cluster::{Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair};
|
use key_server_cluster::{Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair};
|
||||||
use key_server_cluster::cluster::ClusterConfiguration;
|
use key_server_cluster::cluster::ClusterConfiguration;
|
||||||
use key_server_cluster::connection_trigger::SimpleServersSetChangeSessionCreatorConnector;
|
use key_server_cluster::connection_trigger::SimpleServersSetChangeSessionCreatorConnector;
|
||||||
use key_server_cluster::cluster::tests::DummyCluster;
|
use key_server_cluster::cluster::tests::DummyCluster;
|
||||||
use super::{ClusterSessions, AdminSessionCreationData};
|
use key_server_cluster::generation_session::{SessionImpl as GenerationSession};
|
||||||
|
use super::{ClusterSessions, AdminSessionCreationData, ClusterSessionsListener};
|
||||||
|
|
||||||
pub fn make_cluster_sessions() -> ClusterSessions {
|
pub fn make_cluster_sessions() -> ClusterSessions {
|
||||||
let key_pair = Random.generate().unwrap();
|
let key_pair = Random.generate().unwrap();
|
||||||
@ -610,4 +611,35 @@ mod tests {
|
|||||||
Ok(_) => unreachable!("OK"),
|
Ok(_) => unreachable!("OK"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn session_listener_works() {
|
||||||
|
#[derive(Default)]
|
||||||
|
struct GenerationSessionListener {
|
||||||
|
inserted: AtomicUsize,
|
||||||
|
removed: AtomicUsize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClusterSessionsListener<GenerationSession> for GenerationSessionListener {
|
||||||
|
fn on_session_inserted(&self, _session: Arc<GenerationSession>) {
|
||||||
|
self.inserted.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_session_removed(&self, _session: Arc<GenerationSession>) {
|
||||||
|
self.removed.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let listener = Arc::new(GenerationSessionListener::default());
|
||||||
|
let sessions = make_cluster_sessions();
|
||||||
|
sessions.generation_sessions.add_listener(listener.clone());
|
||||||
|
|
||||||
|
sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None).unwrap();
|
||||||
|
assert_eq!(listener.inserted.load(Ordering::Relaxed), 1);
|
||||||
|
assert_eq!(listener.removed.load(Ordering::Relaxed), 0);
|
||||||
|
|
||||||
|
sessions.generation_sessions.remove(&Default::default());
|
||||||
|
assert_eq!(listener.inserted.load(Ordering::Relaxed), 1);
|
||||||
|
assert_eq!(listener.removed.load(Ordering::Relaxed), 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use key_server_cluster::{Error, NodeId, SessionMeta, Requester};
|
use key_server_cluster::{Error, NodeId, SessionMeta, Requester};
|
||||||
use key_server_cluster::message::ConsensusMessage;
|
use key_server_cluster::message::ConsensusMessage;
|
||||||
use key_server_cluster::jobs::job_session::{JobSession, JobSessionState, JobTransport, JobExecutor};
|
use key_server_cluster::jobs::job_session::{JobSession, JobSessionState, JobTransport, JobExecutor, JobPartialRequestAction};
|
||||||
|
|
||||||
/// Consensus session state.
|
/// Consensus session state.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||||
@ -114,7 +114,6 @@ impl<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTran
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get computation job reference.
|
/// Get computation job reference.
|
||||||
#[cfg(test)]
|
|
||||||
pub fn computation_job(&self) -> &JobSession<ComputationExecutor, ComputationTransport> {
|
pub fn computation_job(&self) -> &JobSession<ComputationExecutor, ComputationTransport> {
|
||||||
self.computation_job.as_ref()
|
self.computation_job.as_ref()
|
||||||
.expect("computation_job must only be called on master nodes")
|
.expect("computation_job must only be called on master nodes")
|
||||||
@ -140,15 +139,15 @@ impl<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTran
|
|||||||
/// Initialize session on master node.
|
/// Initialize session on master node.
|
||||||
pub fn initialize(&mut self, nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
pub fn initialize(&mut self, nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
||||||
let initialization_result = self.consensus_job.initialize(nodes, false);
|
let initialization_result = self.consensus_job.initialize(nodes, None, false);
|
||||||
self.state = ConsensusSessionState::EstablishingConsensus;
|
self.state = ConsensusSessionState::EstablishingConsensus;
|
||||||
self.process_result(initialization_result)
|
self.process_result(initialization_result.map(|_| ()))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process consensus request message.
|
/// Process consensus request message.
|
||||||
pub fn on_consensus_partial_request(&mut self, sender: &NodeId, request: ConsensusExecutor::PartialJobRequest) -> Result<(), Error> {
|
pub fn on_consensus_partial_request(&mut self, sender: &NodeId, request: ConsensusExecutor::PartialJobRequest) -> Result<(), Error> {
|
||||||
let consensus_result = self.consensus_job.on_partial_request(sender, request);
|
let consensus_result = self.consensus_job.on_partial_request(sender, request);
|
||||||
self.process_result(consensus_result)
|
self.process_result(consensus_result.map(|_| ()))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process consensus message response.
|
/// Process consensus message response.
|
||||||
@ -179,19 +178,22 @@ impl<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTran
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Disseminate jobs from master node.
|
/// Disseminate jobs from master node.
|
||||||
pub fn disseminate_jobs(&mut self, executor: ComputationExecutor, transport: ComputationTransport, broadcast_self_response: bool) -> Result<(), Error> {
|
pub fn disseminate_jobs(&mut self, executor: ComputationExecutor, transport: ComputationTransport, broadcast_self_response: bool) -> Result<Option<ComputationExecutor::PartialJobResponse>, Error> {
|
||||||
let consensus_group = self.select_consensus_group()?.clone();
|
let consensus_group = self.select_consensus_group()?.clone();
|
||||||
self.consensus_group.clear();
|
self.consensus_group.clear();
|
||||||
|
|
||||||
let mut computation_job = JobSession::new(self.meta.clone(), executor, transport);
|
let mut computation_job = JobSession::new(self.meta.clone(), executor, transport);
|
||||||
let computation_result = computation_job.initialize(consensus_group, broadcast_self_response);
|
let computation_result = computation_job.initialize(consensus_group, None, broadcast_self_response);
|
||||||
self.computation_job = Some(computation_job);
|
self.computation_job = Some(computation_job);
|
||||||
self.state = ConsensusSessionState::WaitingForPartialResults;
|
self.state = ConsensusSessionState::WaitingForPartialResults;
|
||||||
self.process_result(computation_result)
|
match computation_result {
|
||||||
|
Ok(computation_result) => self.process_result(Ok(())).map(|_| computation_result),
|
||||||
|
Err(error) => Err(self.process_result(Err(error)).unwrap_err()),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process job request on slave node.
|
/// Process job request on slave node.
|
||||||
pub fn on_job_request(&mut self, node: &NodeId, request: ComputationExecutor::PartialJobRequest, executor: ComputationExecutor, transport: ComputationTransport) -> Result<(), Error> {
|
pub fn on_job_request(&mut self, node: &NodeId, request: ComputationExecutor::PartialJobRequest, executor: ComputationExecutor, transport: ComputationTransport) -> Result<JobPartialRequestAction<ComputationExecutor::PartialJobResponse>, Error> {
|
||||||
if &self.meta.master_node_id != node {
|
if &self.meta.master_node_id != node {
|
||||||
return Err(Error::InvalidMessage);
|
return Err(Error::InvalidMessage);
|
||||||
}
|
}
|
||||||
@ -350,7 +352,7 @@ impl<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTran
|
|||||||
let consensus_result = match message {
|
let consensus_result = match message {
|
||||||
|
|
||||||
&ConsensusMessage::InitializeConsensusSession(ref message) =>
|
&ConsensusMessage::InitializeConsensusSession(ref message) =>
|
||||||
self.consensus_job.on_partial_request(sender, message.requester.clone().into()),
|
self.consensus_job.on_partial_request(sender, message.requester.clone().into()).map(|_| ()),
|
||||||
&ConsensusMessage::ConfirmConsensusInitialization(ref message) =>
|
&ConsensusMessage::ConfirmConsensusInitialization(ref message) =>
|
||||||
self.consensus_job.on_partial_response(sender, message.is_confirmed),
|
self.consensus_job.on_partial_response(sender, message.is_confirmed),
|
||||||
};
|
};
|
||||||
@ -361,7 +363,7 @@ impl<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTran
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use ethkey::{KeyPair, Random, Generator, sign};
|
use ethkey::{KeyPair, Random, Generator, sign, public_to_address};
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, Requester, DummyAclStorage};
|
use key_server_cluster::{Error, NodeId, SessionId, Requester, DummyAclStorage};
|
||||||
use key_server_cluster::message::{ConsensusMessage, InitializeConsensusSession, ConfirmConsensusInitialization};
|
use key_server_cluster::message::{ConsensusMessage, InitializeConsensusSession, ConfirmConsensusInitialization};
|
||||||
use key_server_cluster::jobs::job_session::tests::{make_master_session_meta, make_slave_session_meta, SquaredSumJobExecutor, DummyJobTransport};
|
use key_server_cluster::jobs::job_session::tests::{make_master_session_meta, make_slave_session_meta, SquaredSumJobExecutor, DummyJobTransport};
|
||||||
@ -414,7 +416,7 @@ mod tests {
|
|||||||
fn consensus_session_consensus_is_not_reached_when_initializes_with_zero_threshold_and_master_rejects() {
|
fn consensus_session_consensus_is_not_reached_when_initializes_with_zero_threshold_and_master_rejects() {
|
||||||
let requester = Random.generate().unwrap();
|
let requester = Random.generate().unwrap();
|
||||||
let acl_storage = DummyAclStorage::default();
|
let acl_storage = DummyAclStorage::default();
|
||||||
acl_storage.prohibit(requester.public().clone(), SessionId::default());
|
acl_storage.prohibit(public_to_address(requester.public()), SessionId::default());
|
||||||
|
|
||||||
let mut session = make_master_consensus_session(0, Some(requester), Some(acl_storage));
|
let mut session = make_master_consensus_session(0, Some(requester), Some(acl_storage));
|
||||||
session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap();
|
session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap();
|
||||||
@ -429,7 +431,7 @@ mod tests {
|
|||||||
fn consensus_session_consensus_is_failed_by_master_node() {
|
fn consensus_session_consensus_is_failed_by_master_node() {
|
||||||
let requester = Random.generate().unwrap();
|
let requester = Random.generate().unwrap();
|
||||||
let acl_storage = DummyAclStorage::default();
|
let acl_storage = DummyAclStorage::default();
|
||||||
acl_storage.prohibit(requester.public().clone(), SessionId::default());
|
acl_storage.prohibit(public_to_address(requester.public()), SessionId::default());
|
||||||
|
|
||||||
let mut session = make_master_consensus_session(1, Some(requester), Some(acl_storage));
|
let mut session = make_master_consensus_session(1, Some(requester), Some(acl_storage));
|
||||||
assert_eq!(session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap_err(), Error::ConsensusUnreachable);
|
assert_eq!(session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap_err(), Error::ConsensusUnreachable);
|
||||||
@ -471,7 +473,7 @@ mod tests {
|
|||||||
fn consensus_session_job_dissemination_does_not_select_master_node_if_rejected() {
|
fn consensus_session_job_dissemination_does_not_select_master_node_if_rejected() {
|
||||||
let requester = Random.generate().unwrap();
|
let requester = Random.generate().unwrap();
|
||||||
let acl_storage = DummyAclStorage::default();
|
let acl_storage = DummyAclStorage::default();
|
||||||
acl_storage.prohibit(requester.public().clone(), SessionId::default());
|
acl_storage.prohibit(public_to_address(requester.public()), SessionId::default());
|
||||||
|
|
||||||
let mut session = make_master_consensus_session(0, Some(requester), Some(acl_storage));
|
let mut session = make_master_consensus_session(0, Some(requester), Some(acl_storage));
|
||||||
session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap();
|
session.initialize(vec![NodeId::from(1), NodeId::from(2)].into_iter().collect()).unwrap();
|
||||||
|
@ -44,6 +44,7 @@ pub struct DecryptionJob {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Decryption job partial request.
|
/// Decryption job partial request.
|
||||||
|
#[derive(Debug)]
|
||||||
pub struct PartialDecryptionRequest {
|
pub struct PartialDecryptionRequest {
|
||||||
/// Request id.
|
/// Request id.
|
||||||
pub id: Secret,
|
pub id: Secret,
|
||||||
@ -143,10 +144,11 @@ impl JobExecutor for DecryptionJob {
|
|||||||
let decrypt_shadow = if partial_request.is_shadow_decryption { Some(math::generate_random_scalar()?) } else { None };
|
let decrypt_shadow = if partial_request.is_shadow_decryption { Some(math::generate_random_scalar()?) } else { None };
|
||||||
let common_point = self.key_share.common_point.as_ref().expect("DecryptionJob is only created when common_point is known; qed");
|
let common_point = self.key_share.common_point.as_ref().expect("DecryptionJob is only created when common_point is known; qed");
|
||||||
let (shadow_point, decrypt_shadow) = math::compute_node_shadow_point(&self.access_key, &common_point, &node_shadow, decrypt_shadow)?;
|
let (shadow_point, decrypt_shadow) = math::compute_node_shadow_point(&self.access_key, &common_point, &node_shadow, decrypt_shadow)?;
|
||||||
|
|
||||||
Ok(JobPartialRequestAction::Respond(PartialDecryptionResponse {
|
Ok(JobPartialRequestAction::Respond(PartialDecryptionResponse {
|
||||||
request_id: partial_request.id,
|
request_id: partial_request.id,
|
||||||
shadow_point: shadow_point,
|
shadow_point: shadow_point,
|
||||||
decrypt_shadow: match decrypt_shadow {
|
decrypt_shadow: match decrypt_shadow.clone() {
|
||||||
None => None,
|
None => None,
|
||||||
Some(decrypt_shadow) => Some(encrypt(&self.requester, &DEFAULT_MAC, &**decrypt_shadow)?),
|
Some(decrypt_shadow) => Some(encrypt(&self.requester, &DEFAULT_MAC, &**decrypt_shadow)?),
|
||||||
},
|
},
|
||||||
|
@ -197,7 +197,7 @@ impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExe
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Initialize.
|
/// Initialize.
|
||||||
pub fn initialize(&mut self, nodes: BTreeSet<NodeId>, broadcast_self_response: bool) -> Result<(), Error> {
|
pub fn initialize(&mut self, nodes: BTreeSet<NodeId>, self_response: Option<Executor::PartialJobResponse>, broadcast_self_response: bool) -> Result<Option<Executor::PartialJobResponse>, Error> {
|
||||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
||||||
|
|
||||||
if nodes.len() < self.meta.threshold + 1 {
|
if nodes.len() < self.meta.threshold + 1 {
|
||||||
@ -215,15 +215,13 @@ impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExe
|
|||||||
responses: BTreeMap::new(),
|
responses: BTreeMap::new(),
|
||||||
};
|
};
|
||||||
let waits_for_self = active_data.requests.contains(&self.meta.self_node_id);
|
let waits_for_self = active_data.requests.contains(&self.meta.self_node_id);
|
||||||
let self_response = if waits_for_self {
|
|
||||||
let partial_request = self.executor.prepare_partial_request(&self.meta.self_node_id, &active_data.requests)?;
|
|
||||||
Some(self.executor.process_partial_request(partial_request)?)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
let self_response = match self_response {
|
let self_response = match self_response {
|
||||||
Some(JobPartialRequestAction::Respond(self_response)) => Some(self_response),
|
Some(self_response) => Some(self_response),
|
||||||
Some(JobPartialRequestAction::Reject(self_response)) => Some(self_response),
|
None if waits_for_self => {
|
||||||
|
let partial_request = self.executor.prepare_partial_request(&self.meta.self_node_id, &active_data.requests)?;
|
||||||
|
let self_response = self.executor.process_partial_request(partial_request)?;
|
||||||
|
Some(self_response.take_response())
|
||||||
|
},
|
||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -249,11 +247,11 @@ impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExe
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(self_response)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When partial request is received by slave node.
|
/// When partial request is received by slave node.
|
||||||
pub fn on_partial_request(&mut self, node: &NodeId, request: Executor::PartialJobRequest) -> Result<(), Error> {
|
pub fn on_partial_request(&mut self, node: &NodeId, request: Executor::PartialJobRequest) -> Result<JobPartialRequestAction<Executor::PartialJobResponse>, Error> {
|
||||||
if node != &self.meta.master_node_id {
|
if node != &self.meta.master_node_id {
|
||||||
return Err(Error::InvalidMessage);
|
return Err(Error::InvalidMessage);
|
||||||
}
|
}
|
||||||
@ -264,17 +262,19 @@ impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExe
|
|||||||
return Err(Error::InvalidStateForRequest);
|
return Err(Error::InvalidStateForRequest);
|
||||||
}
|
}
|
||||||
|
|
||||||
let partial_response = match self.executor.process_partial_request(request)? {
|
let partial_request_action = self.executor.process_partial_request(request)?;
|
||||||
JobPartialRequestAction::Respond(partial_response) => {
|
let partial_response = match partial_request_action {
|
||||||
|
JobPartialRequestAction::Respond(ref partial_response) => {
|
||||||
self.data.state = JobSessionState::Finished;
|
self.data.state = JobSessionState::Finished;
|
||||||
partial_response
|
partial_response.clone()
|
||||||
},
|
},
|
||||||
JobPartialRequestAction::Reject(partial_response) => {
|
JobPartialRequestAction::Reject(ref partial_response) => {
|
||||||
self.data.state = JobSessionState::Failed;
|
self.data.state = JobSessionState::Failed;
|
||||||
partial_response
|
partial_response.clone()
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
self.transport.send_partial_response(node, partial_response)
|
self.transport.send_partial_response(node, partial_response)?;
|
||||||
|
Ok(partial_request_action)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// When partial request is received by master node.
|
/// When partial request is received by master node.
|
||||||
@ -291,7 +291,7 @@ impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExe
|
|||||||
if !active_data.requests.remove(node) {
|
if !active_data.requests.remove(node) {
|
||||||
return Err(Error::InvalidNodeForRequest);
|
return Err(Error::InvalidNodeForRequest);
|
||||||
}
|
}
|
||||||
|
|
||||||
match self.executor.check_partial_response(node, &response)? {
|
match self.executor.check_partial_response(node, &response)? {
|
||||||
JobPartialResponseAction::Ignore => Ok(()),
|
JobPartialResponseAction::Ignore => Ok(()),
|
||||||
JobPartialResponseAction::Reject => {
|
JobPartialResponseAction::Reject => {
|
||||||
@ -358,6 +358,15 @@ impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExe
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<PartialJobResponse> JobPartialRequestAction<PartialJobResponse> {
|
||||||
|
/// Take actual response.
|
||||||
|
pub fn take_response(self) -> PartialJobResponse {
|
||||||
|
match self {
|
||||||
|
JobPartialRequestAction::Respond(response) => response,
|
||||||
|
JobPartialRequestAction::Reject(response) => response,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
@ -415,14 +424,14 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn job_initialize_fails_if_not_inactive() {
|
fn job_initialize_fails_if_not_inactive() {
|
||||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||||
job.initialize(vec![Public::from(1)].into_iter().collect(), false).unwrap();
|
job.initialize(vec![Public::from(1)].into_iter().collect(), None, false).unwrap();
|
||||||
assert_eq!(job.initialize(vec![Public::from(1)].into_iter().collect(), false).unwrap_err(), Error::InvalidStateForRequest);
|
assert_eq!(job.initialize(vec![Public::from(1)].into_iter().collect(), None, false).unwrap_err(), Error::InvalidStateForRequest);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn job_initialization_leads_to_finish_if_single_node_is_required() {
|
fn job_initialization_leads_to_finish_if_single_node_is_required() {
|
||||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||||
job.initialize(vec![Public::from(1)].into_iter().collect(), false).unwrap();
|
job.initialize(vec![Public::from(1)].into_iter().collect(), None, false).unwrap();
|
||||||
assert_eq!(job.state(), JobSessionState::Finished);
|
assert_eq!(job.state(), JobSessionState::Finished);
|
||||||
assert!(job.is_result_ready());
|
assert!(job.is_result_ready());
|
||||||
assert_eq!(job.result(), Ok(4));
|
assert_eq!(job.result(), Ok(4));
|
||||||
@ -431,7 +440,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn job_initialization_does_not_leads_to_finish_if_single_other_node_is_required() {
|
fn job_initialization_does_not_leads_to_finish_if_single_other_node_is_required() {
|
||||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||||
job.initialize(vec![Public::from(2)].into_iter().collect(), false).unwrap();
|
job.initialize(vec![Public::from(2)].into_iter().collect(), None, false).unwrap();
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
assert_eq!(job.state(), JobSessionState::Active);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -474,7 +483,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn job_response_fails_if_comes_to_failed_state() {
|
fn job_response_fails_if_comes_to_failed_state() {
|
||||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||||
job.initialize(vec![Public::from(2)].into_iter().collect(), false).unwrap();
|
job.initialize(vec![Public::from(2)].into_iter().collect(), None, false).unwrap();
|
||||||
job.on_session_timeout().unwrap_err();
|
job.on_session_timeout().unwrap_err();
|
||||||
assert_eq!(job.on_partial_response(&NodeId::from(2), 2).unwrap_err(), Error::InvalidStateForRequest);
|
assert_eq!(job.on_partial_response(&NodeId::from(2), 2).unwrap_err(), Error::InvalidStateForRequest);
|
||||||
}
|
}
|
||||||
@ -482,14 +491,14 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn job_response_fails_if_comes_from_unknown_node() {
|
fn job_response_fails_if_comes_from_unknown_node() {
|
||||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||||
job.initialize(vec![Public::from(2)].into_iter().collect(), false).unwrap();
|
job.initialize(vec![Public::from(2)].into_iter().collect(), None, false).unwrap();
|
||||||
assert_eq!(job.on_partial_response(&NodeId::from(3), 2).unwrap_err(), Error::InvalidNodeForRequest);
|
assert_eq!(job.on_partial_response(&NodeId::from(3), 2).unwrap_err(), Error::InvalidNodeForRequest);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn job_response_leads_to_failure_if_too_few_nodes_left() {
|
fn job_response_leads_to_failure_if_too_few_nodes_left() {
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||||
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), false).unwrap();
|
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), None, false).unwrap();
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
assert_eq!(job.state(), JobSessionState::Active);
|
||||||
assert_eq!(job.on_partial_response(&NodeId::from(2), 3).unwrap_err(), Error::ConsensusUnreachable);
|
assert_eq!(job.on_partial_response(&NodeId::from(2), 3).unwrap_err(), Error::ConsensusUnreachable);
|
||||||
assert_eq!(job.state(), JobSessionState::Failed);
|
assert_eq!(job.state(), JobSessionState::Failed);
|
||||||
@ -498,7 +507,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn job_response_succeeds() {
|
fn job_response_succeeds() {
|
||||||
let mut job = JobSession::new(make_master_session_meta(2), SquaredSumJobExecutor, DummyJobTransport::default());
|
let mut job = JobSession::new(make_master_session_meta(2), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||||
job.initialize(vec![Public::from(1), Public::from(2), Public::from(3)].into_iter().collect(), false).unwrap();
|
job.initialize(vec![Public::from(1), Public::from(2), Public::from(3)].into_iter().collect(), None, false).unwrap();
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
assert_eq!(job.state(), JobSessionState::Active);
|
||||||
assert!(!job.is_result_ready());
|
assert!(!job.is_result_ready());
|
||||||
job.on_partial_response(&NodeId::from(2), 2).unwrap();
|
job.on_partial_response(&NodeId::from(2), 2).unwrap();
|
||||||
@ -509,7 +518,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn job_response_leads_to_finish() {
|
fn job_response_leads_to_finish() {
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||||
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), false).unwrap();
|
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), None, false).unwrap();
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
assert_eq!(job.state(), JobSessionState::Active);
|
||||||
job.on_partial_response(&NodeId::from(2), 2).unwrap();
|
job.on_partial_response(&NodeId::from(2), 2).unwrap();
|
||||||
assert_eq!(job.state(), JobSessionState::Finished);
|
assert_eq!(job.state(), JobSessionState::Finished);
|
||||||
@ -534,7 +543,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn job_node_error_ignored_when_disconnects_from_rejected() {
|
fn job_node_error_ignored_when_disconnects_from_rejected() {
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||||
job.initialize(vec![Public::from(1), Public::from(2), Public::from(3)].into_iter().collect(), false).unwrap();
|
job.initialize(vec![Public::from(1), Public::from(2), Public::from(3)].into_iter().collect(), None, false).unwrap();
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
assert_eq!(job.state(), JobSessionState::Active);
|
||||||
job.on_partial_response(&NodeId::from(2), 3).unwrap();
|
job.on_partial_response(&NodeId::from(2), 3).unwrap();
|
||||||
job.on_node_error(&NodeId::from(2)).unwrap();
|
job.on_node_error(&NodeId::from(2)).unwrap();
|
||||||
@ -544,7 +553,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn job_node_error_ignored_when_disconnects_from_unknown() {
|
fn job_node_error_ignored_when_disconnects_from_unknown() {
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||||
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), false).unwrap();
|
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), None, false).unwrap();
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
assert_eq!(job.state(), JobSessionState::Active);
|
||||||
job.on_node_error(&NodeId::from(3)).unwrap();
|
job.on_node_error(&NodeId::from(3)).unwrap();
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
assert_eq!(job.state(), JobSessionState::Active);
|
||||||
@ -553,7 +562,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn job_node_error_ignored_when_disconnects_from_requested_and_enough_nodes_left() {
|
fn job_node_error_ignored_when_disconnects_from_requested_and_enough_nodes_left() {
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||||
job.initialize(vec![Public::from(1), Public::from(2), Public::from(3)].into_iter().collect(), false).unwrap();
|
job.initialize(vec![Public::from(1), Public::from(2), Public::from(3)].into_iter().collect(), None, false).unwrap();
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
assert_eq!(job.state(), JobSessionState::Active);
|
||||||
job.on_node_error(&NodeId::from(3)).unwrap();
|
job.on_node_error(&NodeId::from(3)).unwrap();
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
assert_eq!(job.state(), JobSessionState::Active);
|
||||||
@ -562,7 +571,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn job_node_error_leads_to_fail_when_disconnects_from_requested_and_not_enough_nodes_left() {
|
fn job_node_error_leads_to_fail_when_disconnects_from_requested_and_not_enough_nodes_left() {
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||||
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), false).unwrap();
|
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), None, false).unwrap();
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
assert_eq!(job.state(), JobSessionState::Active);
|
||||||
assert_eq!(job.on_node_error(&NodeId::from(2)).unwrap_err(), Error::ConsensusUnreachable);
|
assert_eq!(job.on_node_error(&NodeId::from(2)).unwrap_err(), Error::ConsensusUnreachable);
|
||||||
assert_eq!(job.state(), JobSessionState::Failed);
|
assert_eq!(job.state(), JobSessionState::Failed);
|
||||||
@ -571,7 +580,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn job_broadcasts_self_response() {
|
fn job_broadcasts_self_response() {
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||||
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), true).unwrap();
|
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), None, true).unwrap();
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
assert_eq!(job.state(), JobSessionState::Active);
|
||||||
assert_eq!(job.transport().response(), (NodeId::from(2), 4));
|
assert_eq!(job.transport().response(), (NodeId::from(2), 4));
|
||||||
}
|
}
|
||||||
@ -579,7 +588,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn job_does_not_broadcasts_self_response() {
|
fn job_does_not_broadcasts_self_response() {
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
||||||
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), false).unwrap();
|
job.initialize(vec![Public::from(1), Public::from(2)].into_iter().collect(), None, false).unwrap();
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
assert_eq!(job.state(), JobSessionState::Active);
|
||||||
assert!(job.transport().is_empty_response());
|
assert!(job.transport().is_empty_response());
|
||||||
}
|
}
|
||||||
|
@ -78,7 +78,7 @@ impl JobExecutor for KeyAccessJob {
|
|||||||
}
|
}
|
||||||
|
|
||||||
self.requester = Some(partial_request.clone());
|
self.requester = Some(partial_request.clone());
|
||||||
self.acl_storage.check(&partial_request.public(&self.id).ok_or(Error::InsufficientRequesterData)?, &self.id)
|
self.acl_storage.check(partial_request.address(&self.id).map_err(Error::InsufficientRequesterData)?, &self.id)
|
||||||
.map_err(|_| Error::AccessDenied)
|
.map_err(|_| Error::AccessDenied)
|
||||||
.map(|is_confirmed| if is_confirmed { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) })
|
.map(|is_confirmed| if is_confirmed { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) })
|
||||||
}
|
}
|
||||||
|
@ -272,6 +272,8 @@ pub struct InitializeSession {
|
|||||||
pub session: MessageSessionId,
|
pub session: MessageSessionId,
|
||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
pub session_nonce: u64,
|
pub session_nonce: u64,
|
||||||
|
/// Session origin address (if any).
|
||||||
|
pub origin: Option<SerializableAddress>,
|
||||||
/// Session author.
|
/// Session author.
|
||||||
pub author: SerializableAddress,
|
pub author: SerializableAddress,
|
||||||
/// All session participants along with their identification numbers.
|
/// All session participants along with their identification numbers.
|
||||||
@ -713,6 +715,8 @@ pub struct DecryptionConsensusMessage {
|
|||||||
pub sub_session: SerializableSecret,
|
pub sub_session: SerializableSecret,
|
||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
pub session_nonce: u64,
|
pub session_nonce: u64,
|
||||||
|
/// Session origin (in consensus initialization message).
|
||||||
|
pub origin: Option<SerializableAddress>,
|
||||||
/// Consensus message.
|
/// Consensus message.
|
||||||
pub message: ConsensusMessage,
|
pub message: ConsensusMessage,
|
||||||
}
|
}
|
||||||
@ -788,6 +792,8 @@ pub struct DecryptionSessionDelegation {
|
|||||||
pub sub_session: SerializableSecret,
|
pub sub_session: SerializableSecret,
|
||||||
/// Session-level nonce.
|
/// Session-level nonce.
|
||||||
pub session_nonce: u64,
|
pub session_nonce: u64,
|
||||||
|
/// Session origin.
|
||||||
|
pub origin: Option<SerializableAddress>,
|
||||||
/// Requester.
|
/// Requester.
|
||||||
pub requester: SerializableRequester,
|
pub requester: SerializableRequester,
|
||||||
/// Key version.
|
/// Key version.
|
||||||
|
@ -115,7 +115,7 @@ pub enum Error {
|
|||||||
/// Can't start exclusive session, because there are other active sessions.
|
/// Can't start exclusive session, because there are other active sessions.
|
||||||
HasActiveSessions,
|
HasActiveSessions,
|
||||||
/// Insufficient requester data.
|
/// Insufficient requester data.
|
||||||
InsufficientRequesterData,
|
InsufficientRequesterData(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ethkey::Error> for Error {
|
impl From<ethkey::Error> for Error {
|
||||||
@ -164,7 +164,7 @@ impl fmt::Display for Error {
|
|||||||
Error::AccessDenied => write!(f, "Access denied"),
|
Error::AccessDenied => write!(f, "Access denied"),
|
||||||
Error::ExclusiveSessionActive => write!(f, "Exclusive session active"),
|
Error::ExclusiveSessionActive => write!(f, "Exclusive session active"),
|
||||||
Error::HasActiveSessions => write!(f, "Unable to start exclusive session"),
|
Error::HasActiveSessions => write!(f, "Unable to start exclusive session"),
|
||||||
Error::InsufficientRequesterData => write!(f, "Insufficient requester data"),
|
Error::InsufficientRequesterData(ref e) => write!(f, "Insufficient requester data: {}", e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -475,6 +475,10 @@ pub mod tests {
|
|||||||
let config = ServiceConfiguration {
|
let config = ServiceConfiguration {
|
||||||
listener_address: None,
|
listener_address: None,
|
||||||
service_contract_address: None,
|
service_contract_address: None,
|
||||||
|
service_contract_srv_gen_address: None,
|
||||||
|
service_contract_srv_retr_address: None,
|
||||||
|
service_contract_doc_store_address: None,
|
||||||
|
service_contract_doc_sretr_address: None,
|
||||||
acl_check_enabled: true,
|
acl_check_enabled: true,
|
||||||
data_path: tempdir.path().display().to_string(),
|
data_path: tempdir.path().display().to_string(),
|
||||||
cluster_config: ClusterConfiguration {
|
cluster_config: ClusterConfiguration {
|
||||||
|
@ -86,26 +86,75 @@ pub fn start(client: Arc<Client>, sync: Arc<SyncProvider>, self_key_pair: Arc<No
|
|||||||
let key_server_set = key_server_set::OnChainKeyServerSet::new(trusted_client.clone(), self_key_pair.clone(),
|
let key_server_set = key_server_set::OnChainKeyServerSet::new(trusted_client.clone(), self_key_pair.clone(),
|
||||||
config.cluster_config.auto_migrate_enabled, config.cluster_config.nodes.clone())?;
|
config.cluster_config.auto_migrate_enabled, config.cluster_config.nodes.clone())?;
|
||||||
let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?);
|
let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?);
|
||||||
let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set.clone(), self_key_pair.clone(), acl_storage, key_storage.clone())?);
|
let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set.clone(), self_key_pair.clone(), acl_storage.clone(), key_storage.clone())?);
|
||||||
let cluster = key_server.cluster();
|
let cluster = key_server.cluster();
|
||||||
|
let key_server: Arc<KeyServer> = key_server;
|
||||||
|
|
||||||
// prepare listeners
|
// prepare HTTP listener
|
||||||
let http_listener = match config.listener_address {
|
let http_listener = match config.listener_address {
|
||||||
Some(listener_address) => Some(listener::http_listener::KeyServerHttpListener::start(listener_address, key_server.clone())?),
|
Some(listener_address) => Some(listener::http_listener::KeyServerHttpListener::start(listener_address, Arc::downgrade(&key_server))?),
|
||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
let contract_listener = config.service_contract_address.map(|service_contract_address| {
|
|
||||||
let service_contract = Arc::new(listener::service_contract::OnChainServiceContract::new(trusted_client, service_contract_address, self_key_pair.clone()));
|
// prepare service contract listeners
|
||||||
let contract_listener = listener::service_contract_listener::ServiceContractListener::new(listener::service_contract_listener::ServiceContractListenerParams {
|
let create_service_contract = |address, name, api_mask|
|
||||||
contract: service_contract,
|
Arc::new(listener::service_contract::OnChainServiceContract::new(
|
||||||
key_server: key_server.clone(),
|
api_mask,
|
||||||
self_key_pair: self_key_pair,
|
trusted_client.clone(),
|
||||||
key_server_set: key_server_set,
|
name,
|
||||||
cluster: cluster,
|
address,
|
||||||
key_storage: key_storage,
|
self_key_pair.clone()));
|
||||||
});
|
|
||||||
client.add_notify(contract_listener.clone());
|
let mut contracts: Vec<Arc<listener::service_contract::ServiceContract>> = Vec::new();
|
||||||
contract_listener
|
config.service_contract_address.map(|address|
|
||||||
});
|
create_service_contract(address,
|
||||||
|
listener::service_contract::SERVICE_CONTRACT_REGISTRY_NAME.to_owned(),
|
||||||
|
listener::ApiMask::all()))
|
||||||
|
.map(|l| contracts.push(l));
|
||||||
|
config.service_contract_srv_gen_address.map(|address|
|
||||||
|
create_service_contract(address,
|
||||||
|
listener::service_contract::SRV_KEY_GEN_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(),
|
||||||
|
listener::ApiMask { server_key_generation_requests: true, ..Default::default() }))
|
||||||
|
.map(|l| contracts.push(l));
|
||||||
|
config.service_contract_srv_retr_address.map(|address|
|
||||||
|
create_service_contract(address,
|
||||||
|
listener::service_contract::SRV_KEY_RETR_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(),
|
||||||
|
listener::ApiMask { server_key_retrieval_requests: true, ..Default::default() }))
|
||||||
|
.map(|l| contracts.push(l));
|
||||||
|
config.service_contract_doc_store_address.map(|address|
|
||||||
|
create_service_contract(address,
|
||||||
|
listener::service_contract::DOC_KEY_STORE_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(),
|
||||||
|
listener::ApiMask { document_key_store_requests: true, ..Default::default() }))
|
||||||
|
.map(|l| contracts.push(l));
|
||||||
|
config.service_contract_doc_sretr_address.map(|address|
|
||||||
|
create_service_contract(address,
|
||||||
|
listener::service_contract::DOC_KEY_SRETR_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(),
|
||||||
|
listener::ApiMask { document_key_shadow_retrieval_requests: true, ..Default::default() }))
|
||||||
|
.map(|l| contracts.push(l));
|
||||||
|
|
||||||
|
let contract: Option<Arc<listener::service_contract::ServiceContract>> = match contracts.len() {
|
||||||
|
0 => None,
|
||||||
|
1 => Some(contracts.pop().expect("contract.len() is 1; qed")),
|
||||||
|
_ => Some(Arc::new(listener::service_contract_aggregate::OnChainServiceContractAggregate::new(contracts))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let contract_listener = match contract {
|
||||||
|
Some(contract) => Some({
|
||||||
|
let listener = listener::service_contract_listener::ServiceContractListener::new(
|
||||||
|
listener::service_contract_listener::ServiceContractListenerParams {
|
||||||
|
contract: contract,
|
||||||
|
self_key_pair: self_key_pair.clone(),
|
||||||
|
key_server_set: key_server_set,
|
||||||
|
acl_storage: acl_storage,
|
||||||
|
cluster: cluster,
|
||||||
|
key_storage: key_storage,
|
||||||
|
}
|
||||||
|
)?;
|
||||||
|
client.add_notify(listener.clone());
|
||||||
|
listener
|
||||||
|
}),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
Ok(Box::new(listener::Listener::new(key_server, http_listener, contract_listener)))
|
Ok(Box::new(listener::Listener::new(key_server, http_listener, contract_listener)))
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use std::io::Read;
|
use std::io::Read;
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, Weak};
|
||||||
use hyper::header;
|
use hyper::header;
|
||||||
use hyper::uri::RequestUri;
|
use hyper::uri::RequestUri;
|
||||||
use hyper::method::Method as HttpMethod;
|
use hyper::method::Method as HttpMethod;
|
||||||
@ -77,12 +77,12 @@ struct KeyServerHttpHandler {
|
|||||||
|
|
||||||
/// Shared http handler
|
/// Shared http handler
|
||||||
struct KeyServerSharedHttpHandler {
|
struct KeyServerSharedHttpHandler {
|
||||||
key_server: Arc<KeyServer>,
|
key_server: Weak<KeyServer>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyServerHttpListener {
|
impl KeyServerHttpListener {
|
||||||
/// Start KeyServer http listener
|
/// Start KeyServer http listener
|
||||||
pub fn start(listener_address: NodeAddress, key_server: Arc<KeyServer>) -> Result<Self, Error> {
|
pub fn start(listener_address: NodeAddress, key_server: Weak<KeyServer>) -> Result<Self, Error> {
|
||||||
let shared_handler = Arc::new(KeyServerSharedHttpHandler {
|
let shared_handler = Arc::new(KeyServerSharedHttpHandler {
|
||||||
key_server: key_server,
|
key_server: key_server,
|
||||||
});
|
});
|
||||||
@ -128,56 +128,72 @@ impl HttpHandler for KeyServerHttpHandler {
|
|||||||
match &req_uri {
|
match &req_uri {
|
||||||
&RequestUri::AbsolutePath(ref path) => match parse_request(&req_method, &path, &req_body) {
|
&RequestUri::AbsolutePath(ref path) => match parse_request(&req_method, &path, &req_body) {
|
||||||
Request::GenerateServerKey(document, signature, threshold) => {
|
Request::GenerateServerKey(document, signature, threshold) => {
|
||||||
return_server_public_key(req, res, self.handler.key_server.generate_key(&document, &signature, threshold)
|
return_server_public_key(req, res, self.handler.key_server.upgrade()
|
||||||
|
.map(|key_server| key_server.generate_key(&document, &signature.into(), threshold))
|
||||||
|
.unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into())))
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
warn!(target: "secretstore", "GenerateServerKey request {} has failed with: {}", req_uri, err);
|
warn!(target: "secretstore", "GenerateServerKey request {} has failed with: {}", req_uri, err);
|
||||||
err
|
err
|
||||||
}));
|
}));
|
||||||
},
|
},
|
||||||
Request::StoreDocumentKey(document, signature, common_point, encrypted_document_key) => {
|
Request::StoreDocumentKey(document, signature, common_point, encrypted_document_key) => {
|
||||||
return_empty(req, res, self.handler.key_server.store_document_key(&document, &signature, common_point, encrypted_document_key)
|
return_empty(req, res, self.handler.key_server.upgrade()
|
||||||
|
.map(|key_server| key_server.store_document_key(&document, &signature.into(), common_point, encrypted_document_key))
|
||||||
|
.unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into())))
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
warn!(target: "secretstore", "StoreDocumentKey request {} has failed with: {}", req_uri, err);
|
warn!(target: "secretstore", "StoreDocumentKey request {} has failed with: {}", req_uri, err);
|
||||||
err
|
err
|
||||||
}));
|
}));
|
||||||
},
|
},
|
||||||
Request::GenerateDocumentKey(document, signature, threshold) => {
|
Request::GenerateDocumentKey(document, signature, threshold) => {
|
||||||
return_document_key(req, res, self.handler.key_server.generate_document_key(&document, &signature, threshold)
|
return_document_key(req, res, self.handler.key_server.upgrade()
|
||||||
|
.map(|key_server| key_server.generate_document_key(&document, &signature.into(), threshold))
|
||||||
|
.unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into())))
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
warn!(target: "secretstore", "GenerateDocumentKey request {} has failed with: {}", req_uri, err);
|
warn!(target: "secretstore", "GenerateDocumentKey request {} has failed with: {}", req_uri, err);
|
||||||
err
|
err
|
||||||
}));
|
}));
|
||||||
},
|
},
|
||||||
Request::GetDocumentKey(document, signature) => {
|
Request::GetDocumentKey(document, signature) => {
|
||||||
return_document_key(req, res, self.handler.key_server.restore_document_key(&document, &signature)
|
return_document_key(req, res, self.handler.key_server.upgrade()
|
||||||
|
.map(|key_server| key_server.restore_document_key(&document, &signature.into()))
|
||||||
|
.unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into())))
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
warn!(target: "secretstore", "GetDocumentKey request {} has failed with: {}", req_uri, err);
|
warn!(target: "secretstore", "GetDocumentKey request {} has failed with: {}", req_uri, err);
|
||||||
err
|
err
|
||||||
}));
|
}));
|
||||||
},
|
},
|
||||||
Request::GetDocumentKeyShadow(document, signature) => {
|
Request::GetDocumentKeyShadow(document, signature) => {
|
||||||
return_document_key_shadow(req, res, self.handler.key_server.restore_document_key_shadow(&document, &signature)
|
return_document_key_shadow(req, res, self.handler.key_server.upgrade()
|
||||||
|
.map(|key_server| key_server.restore_document_key_shadow(&document, &signature.into()))
|
||||||
|
.unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into())))
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
warn!(target: "secretstore", "GetDocumentKeyShadow request {} has failed with: {}", req_uri, err);
|
warn!(target: "secretstore", "GetDocumentKeyShadow request {} has failed with: {}", req_uri, err);
|
||||||
err
|
err
|
||||||
}));
|
}));
|
||||||
},
|
},
|
||||||
Request::SchnorrSignMessage(document, signature, message_hash) => {
|
Request::SchnorrSignMessage(document, signature, message_hash) => {
|
||||||
return_message_signature(req, res, self.handler.key_server.sign_message_schnorr(&document, &signature, message_hash)
|
return_message_signature(req, res, self.handler.key_server.upgrade()
|
||||||
|
.map(|key_server| key_server.sign_message_schnorr(&document, &signature.into(), message_hash))
|
||||||
|
.unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into())))
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
warn!(target: "secretstore", "SchnorrSignMessage request {} has failed with: {}", req_uri, err);
|
warn!(target: "secretstore", "SchnorrSignMessage request {} has failed with: {}", req_uri, err);
|
||||||
err
|
err
|
||||||
}));
|
}));
|
||||||
},
|
},
|
||||||
Request::EcdsaSignMessage(document, signature, message_hash) => {
|
Request::EcdsaSignMessage(document, signature, message_hash) => {
|
||||||
return_message_signature(req, res, self.handler.key_server.sign_message_ecdsa(&document, &signature, message_hash)
|
return_message_signature(req, res, self.handler.key_server.upgrade()
|
||||||
|
.map(|key_server| key_server.sign_message_ecdsa(&document, &signature.into(), message_hash))
|
||||||
|
.unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into())))
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
warn!(target: "secretstore", "EcdsaSignMessage request {} has failed with: {}", req_uri, err);
|
warn!(target: "secretstore", "EcdsaSignMessage request {} has failed with: {}", req_uri, err);
|
||||||
err
|
err
|
||||||
}));
|
}));
|
||||||
},
|
},
|
||||||
Request::ChangeServersSet(old_set_signature, new_set_signature, new_servers_set) => {
|
Request::ChangeServersSet(old_set_signature, new_set_signature, new_servers_set) => {
|
||||||
return_empty(req, res, self.handler.key_server.change_servers_set(old_set_signature, new_set_signature, new_servers_set)
|
return_empty(req, res, self.handler.key_server.upgrade()
|
||||||
|
.map(|key_server| key_server.change_servers_set(old_set_signature, new_set_signature, new_servers_set))
|
||||||
|
.unwrap_or(Err(Error::Internal("KeyServer is already destroyed".into())))
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
warn!(target: "secretstore", "ChangeServersSet request {} has failed with: {}", req_uri, err);
|
warn!(target: "secretstore", "ChangeServersSet request {} has failed with: {}", req_uri, err);
|
||||||
err
|
err
|
||||||
@ -241,7 +257,7 @@ fn return_bytes<T: Serialize>(req: HttpRequest, mut res: HttpResponse, result: R
|
|||||||
|
|
||||||
fn return_error(mut res: HttpResponse, err: Error) {
|
fn return_error(mut res: HttpResponse, err: Error) {
|
||||||
match err {
|
match err {
|
||||||
Error::BadSignature => *res.status_mut() = HttpStatusCode::BadRequest,
|
Error::InsufficientRequesterData(_) => *res.status_mut() = HttpStatusCode::BadRequest,
|
||||||
Error::AccessDenied => *res.status_mut() = HttpStatusCode::Forbidden,
|
Error::AccessDenied => *res.status_mut() = HttpStatusCode::Forbidden,
|
||||||
Error::DocumentNotFound => *res.status_mut() = HttpStatusCode::NotFound,
|
Error::DocumentNotFound => *res.status_mut() = HttpStatusCode::NotFound,
|
||||||
Error::Hyper(_) => *res.status_mut() = HttpStatusCode::BadRequest,
|
Error::Hyper(_) => *res.status_mut() = HttpStatusCode::BadRequest,
|
||||||
@ -342,15 +358,16 @@ mod tests {
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use hyper::method::Method as HttpMethod;
|
use hyper::method::Method as HttpMethod;
|
||||||
use ethkey::Public;
|
use ethkey::Public;
|
||||||
|
use traits::KeyServer;
|
||||||
use key_server::tests::DummyKeyServer;
|
use key_server::tests::DummyKeyServer;
|
||||||
use types::all::NodeAddress;
|
use types::all::NodeAddress;
|
||||||
use super::{parse_request, Request, KeyServerHttpListener};
|
use super::{parse_request, Request, KeyServerHttpListener};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn http_listener_successfully_drops() {
|
fn http_listener_successfully_drops() {
|
||||||
let key_server = Arc::new(DummyKeyServer::default());
|
let key_server: Arc<KeyServer> = Arc::new(DummyKeyServer::default());
|
||||||
let address = NodeAddress { address: "127.0.0.1".into(), port: 9000 };
|
let address = NodeAddress { address: "127.0.0.1".into(), port: 9000 };
|
||||||
let listener = KeyServerHttpListener::start(address, key_server).unwrap();
|
let listener = KeyServerHttpListener::start(address, Arc::downgrade(&key_server)).unwrap();
|
||||||
drop(listener);
|
drop(listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
|
|
||||||
pub mod http_listener;
|
pub mod http_listener;
|
||||||
pub mod service_contract;
|
pub mod service_contract;
|
||||||
|
pub mod service_contract_aggregate;
|
||||||
pub mod service_contract_listener;
|
pub mod service_contract_listener;
|
||||||
mod tasks_queue;
|
mod tasks_queue;
|
||||||
|
|
||||||
@ -23,15 +24,42 @@ use std::collections::BTreeSet;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, AdminSessionsServer, KeyServer};
|
use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, AdminSessionsServer, KeyServer};
|
||||||
use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, RequestSignature, ServerKeyId,
|
use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, RequestSignature, ServerKeyId,
|
||||||
EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId};
|
EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId, Requester};
|
||||||
|
|
||||||
|
/// Available API mask.
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
pub struct ApiMask {
|
||||||
|
/// Accept server key generation requests.
|
||||||
|
pub server_key_generation_requests: bool,
|
||||||
|
/// Accept server key retrieval requests.
|
||||||
|
pub server_key_retrieval_requests: bool,
|
||||||
|
/// Accept document key store requests.
|
||||||
|
pub document_key_store_requests: bool,
|
||||||
|
/// Accept document key shadow retrieval requests.
|
||||||
|
pub document_key_shadow_retrieval_requests: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Combined HTTP + service contract listener.
|
||||||
pub struct Listener {
|
pub struct Listener {
|
||||||
key_server: Arc<KeyServer>,
|
key_server: Arc<KeyServer>,
|
||||||
_http: Option<http_listener::KeyServerHttpListener>,
|
_http: Option<http_listener::KeyServerHttpListener>,
|
||||||
_contract: Option<Arc<service_contract_listener::ServiceContractListener>>,
|
_contract: Option<Arc<service_contract_listener::ServiceContractListener>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl ApiMask {
|
||||||
|
/// Create mask that accepts all requests.
|
||||||
|
pub fn all() -> Self {
|
||||||
|
ApiMask {
|
||||||
|
server_key_generation_requests: true,
|
||||||
|
server_key_retrieval_requests: true,
|
||||||
|
document_key_store_requests: true,
|
||||||
|
document_key_shadow_retrieval_requests: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Listener {
|
impl Listener {
|
||||||
|
/// Create new listener.
|
||||||
pub fn new(key_server: Arc<KeyServer>, http: Option<http_listener::KeyServerHttpListener>, contract: Option<Arc<service_contract_listener::ServiceContractListener>>) -> Self {
|
pub fn new(key_server: Arc<KeyServer>, http: Option<http_listener::KeyServerHttpListener>, contract: Option<Arc<service_contract_listener::ServiceContractListener>>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
key_server: key_server,
|
key_server: key_server,
|
||||||
@ -44,36 +72,36 @@ impl Listener {
|
|||||||
impl KeyServer for Listener {}
|
impl KeyServer for Listener {}
|
||||||
|
|
||||||
impl ServerKeyGenerator for Listener {
|
impl ServerKeyGenerator for Listener {
|
||||||
fn generate_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<Public, Error> {
|
fn generate_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result<Public, Error> {
|
||||||
self.key_server.generate_key(key_id, signature, threshold)
|
self.key_server.generate_key(key_id, author, threshold)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DocumentKeyServer for Listener {
|
impl DocumentKeyServer for Listener {
|
||||||
fn store_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> {
|
fn store_document_key(&self, key_id: &ServerKeyId, author: &Requester, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> {
|
||||||
self.key_server.store_document_key(key_id, signature, common_point, encrypted_document_key)
|
self.key_server.store_document_key(key_id, author, common_point, encrypted_document_key)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn generate_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<EncryptedDocumentKey, Error> {
|
fn generate_document_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result<EncryptedDocumentKey, Error> {
|
||||||
self.key_server.generate_document_key(key_id, signature, threshold)
|
self.key_server.generate_document_key(key_id, author, threshold)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn restore_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKey, Error> {
|
fn restore_document_key(&self, key_id: &ServerKeyId, requester: &Requester) -> Result<EncryptedDocumentKey, Error> {
|
||||||
self.key_server.restore_document_key(key_id, signature)
|
self.key_server.restore_document_key(key_id, requester)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKeyShadow, Error> {
|
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, requester: &Requester) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||||
self.key_server.restore_document_key_shadow(key_id, signature)
|
self.key_server.restore_document_key_shadow(key_id, requester)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MessageSigner for Listener {
|
impl MessageSigner for Listener {
|
||||||
fn sign_message_schnorr(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
fn sign_message_schnorr(&self, key_id: &ServerKeyId, requester: &Requester, message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
||||||
self.key_server.sign_message_schnorr(key_id, signature, message)
|
self.key_server.sign_message_schnorr(key_id, requester, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sign_message_ecdsa(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
fn sign_message_ecdsa(&self, key_id: &ServerKeyId, requester: &Requester, message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
||||||
self.key_server.sign_message_ecdsa(key_id, signature, message)
|
self.key_server.sign_message_ecdsa(key_id, requester, message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,4 +109,4 @@ impl AdminSessionsServer for Listener {
|
|||||||
fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet<NodeId>) -> Result<(), Error> {
|
fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||||
self.key_server.change_servers_set(old_set_signature, new_set_signature, new_servers_set)
|
self.key_server.change_servers_set(old_set_signature, new_set_signature, new_servers_set)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -16,28 +16,51 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
|
use ethabi::RawLog;
|
||||||
use ethcore::filter::Filter;
|
use ethcore::filter::Filter;
|
||||||
use ethcore::client::{Client, BlockChainClient, BlockId, RegistryInfo, CallContract};
|
use ethcore::client::{Client, BlockChainClient, BlockId, RegistryInfo, CallContract};
|
||||||
use ethkey::{Public, Signature, public_to_address};
|
use ethkey::{Public, public_to_address};
|
||||||
use hash::keccak;
|
use hash::keccak;
|
||||||
|
use bytes::Bytes;
|
||||||
use ethereum_types::{H256, U256, Address};
|
use ethereum_types::{H256, U256, Address};
|
||||||
|
use listener::ApiMask;
|
||||||
use listener::service_contract_listener::ServiceTask;
|
use listener::service_contract_listener::ServiceTask;
|
||||||
use trusted_client::TrustedClient;
|
use trusted_client::TrustedClient;
|
||||||
use {ServerKeyId, NodeKeyPair, ContractAddress};
|
use {ServerKeyId, NodeKeyPair, ContractAddress};
|
||||||
|
|
||||||
use_contract!(service, "Service", "res/service.json");
|
use_contract!(service, "Service", "res/service.json");
|
||||||
|
|
||||||
/// Name of the SecretStore contract in the registry.
|
/// Name of the general SecretStore contract in the registry.
|
||||||
const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service";
|
pub const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service";
|
||||||
|
/// Name of the server key generation SecretStore contract in the registry.
|
||||||
|
pub const SRV_KEY_GEN_SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service_srv_gen";
|
||||||
|
/// Name of the server key retrieval SecretStore contract in the registry.
|
||||||
|
pub const SRV_KEY_RETR_SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service_srv_retr";
|
||||||
|
/// Name of the document key store SecretStore contract in the registry.
|
||||||
|
pub const DOC_KEY_STORE_SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service_doc_store";
|
||||||
|
/// Name of the document key retrieval SecretStore contract in the registry.
|
||||||
|
pub const DOC_KEY_SRETR_SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service_doc_sretr";
|
||||||
|
|
||||||
/// Key server has been added to the set.
|
/// Server key generation has been requested.
|
||||||
const SERVER_KEY_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyRequested(bytes32,uint256)";
|
const SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyGenerationRequested(bytes32,address,uint8)";
|
||||||
|
/// Server key retrieval has been requested.
|
||||||
|
const SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyRetrievalRequested(bytes32)";
|
||||||
|
/// Document key store has been requested.
|
||||||
|
const DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME: &'static [u8] = &*b"DocumentKeyStoreRequested(bytes32,address,bytes,bytes)";
|
||||||
|
/// Document key common part retrieval has been requested.
|
||||||
|
const DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME: &'static [u8] = &*b"DocumentKeyCommonRetrievalRequested(bytes32,address)";
|
||||||
|
/// Document key personal part retrieval has been requested.
|
||||||
|
const DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME: &'static [u8] = &*b"DocumentKeyPersonalRetrievalRequested(bytes32,bytes)";
|
||||||
|
|
||||||
/// Number of confirmations required before request can be processed.
|
/// Number of confirmations required before request can be processed.
|
||||||
const REQUEST_CONFIRMATIONS_REQUIRED: u64 = 3;
|
const REQUEST_CONFIRMATIONS_REQUIRED: u64 = 3;
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
static ref SERVER_KEY_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_REQUESTED_EVENT_NAME);
|
pub static ref SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME);
|
||||||
|
pub static ref SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME);
|
||||||
|
pub static ref DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME_HASH: H256 = keccak(DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME);
|
||||||
|
pub static ref DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH: H256 = keccak(DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME);
|
||||||
|
pub static ref DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH: H256 = keccak(DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Service contract trait.
|
/// Service contract trait.
|
||||||
@ -45,61 +68,82 @@ pub trait ServiceContract: Send + Sync {
|
|||||||
/// Update contract when new blocks are enacted. Returns true if contract is installed && up-to-date (i.e. chain is synced).
|
/// Update contract when new blocks are enacted. Returns true if contract is installed && up-to-date (i.e. chain is synced).
|
||||||
fn update(&self) -> bool;
|
fn update(&self) -> bool;
|
||||||
/// Read recent contract logs. Returns topics of every entry.
|
/// Read recent contract logs. Returns topics of every entry.
|
||||||
fn read_logs(&self) -> Box<Iterator<Item=Vec<H256>>>;
|
fn read_logs(&self) -> Box<Iterator<Item=ServiceTask>>;
|
||||||
/// Publish generated key.
|
/// Publish generated key.
|
||||||
fn read_pending_requests(&self) -> Box<Iterator<Item=(bool, ServiceTask)>>;
|
fn read_pending_requests(&self) -> Box<Iterator<Item=(bool, ServiceTask)>>;
|
||||||
/// Publish server key.
|
/// Publish generated server key.
|
||||||
fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String>;
|
fn publish_generated_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public) -> Result<(), String>;
|
||||||
|
/// Publish server key generation error.
|
||||||
|
fn publish_server_key_generation_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String>;
|
||||||
|
/// Publish retrieved server key.
|
||||||
|
fn publish_retrieved_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public, threshold: usize) -> Result<(), String>;
|
||||||
|
/// Publish server key retrieval error.
|
||||||
|
fn publish_server_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String>;
|
||||||
|
/// Publish stored document key.
|
||||||
|
fn publish_stored_document_key(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String>;
|
||||||
|
/// Publish document key store error.
|
||||||
|
fn publish_document_key_store_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String>;
|
||||||
|
/// Publish retrieved document key common.
|
||||||
|
fn publish_retrieved_document_key_common(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, common_point: Public, threshold: usize) -> Result<(), String>;
|
||||||
|
/// Publish retrieved document key personal.
|
||||||
|
fn publish_retrieved_document_key_personal(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, participants: &[Address], decrypted_secret: Public, shadow: Bytes) -> Result<(), String>;
|
||||||
|
/// Publish document key store error.
|
||||||
|
fn publish_document_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address) -> Result<(), String>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// On-chain service contract.
|
/// On-chain service contract.
|
||||||
pub struct OnChainServiceContract {
|
pub struct OnChainServiceContract {
|
||||||
|
/// Requests mask.
|
||||||
|
mask: ApiMask,
|
||||||
/// Blockchain client.
|
/// Blockchain client.
|
||||||
client: TrustedClient,
|
client: TrustedClient,
|
||||||
/// This node key pair.
|
/// This node key pair.
|
||||||
self_key_pair: Arc<NodeKeyPair>,
|
self_key_pair: Arc<NodeKeyPair>,
|
||||||
/// Contract addresss.
|
/// Contract registry name (if any).
|
||||||
|
name: String,
|
||||||
|
/// Contract address.
|
||||||
address: ContractAddress,
|
address: ContractAddress,
|
||||||
/// Contract.
|
/// Contract.
|
||||||
|
contract: service::Service,
|
||||||
|
/// Contract.
|
||||||
data: RwLock<ServiceData>,
|
data: RwLock<ServiceData>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// On-chain service contract data.
|
/// On-chain service contract data.
|
||||||
struct ServiceData {
|
struct ServiceData {
|
||||||
/// Contract.
|
/// Actual contract address.
|
||||||
pub contract: service::Service,
|
|
||||||
/// Contract address.
|
|
||||||
pub contract_address: Address,
|
pub contract_address: Address,
|
||||||
/// Last block we have read logs from.
|
/// Last block we have read logs from.
|
||||||
pub last_log_block: Option<H256>,
|
pub last_log_block: Option<H256>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Pending requests iterator.
|
/// Pending requests iterator.
|
||||||
struct PendingRequestsIterator {
|
struct PendingRequestsIterator<F: Fn(U256) -> Option<(bool, ServiceTask)>> {
|
||||||
/// Blockchain client.
|
/// Pending request read function.
|
||||||
client: Arc<Client>,
|
read_request: F,
|
||||||
/// Contract.
|
|
||||||
contract: service::Service,
|
|
||||||
/// Contract address.
|
|
||||||
contract_address: Address,
|
|
||||||
/// This node key pair.
|
|
||||||
self_key_pair: Arc<NodeKeyPair>,
|
|
||||||
/// Block, this iterator is created for.
|
|
||||||
block: H256,
|
|
||||||
/// Current request index.
|
/// Current request index.
|
||||||
index: U256,
|
index: U256,
|
||||||
/// Requests length.
|
/// Requests length.
|
||||||
length: U256,
|
length: U256,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Server key generation related functions.
|
||||||
|
struct ServerKeyGenerationService;
|
||||||
|
/// Server key retrieval related functions.
|
||||||
|
struct ServerKeyRetrievalService;
|
||||||
|
/// Document key store related functions.
|
||||||
|
struct DocumentKeyStoreService;
|
||||||
|
/// Document key shadow retrievalrelated functions.
|
||||||
|
struct DocumentKeyShadowRetrievalService;
|
||||||
|
|
||||||
impl OnChainServiceContract {
|
impl OnChainServiceContract {
|
||||||
/// Create new on-chain service contract.
|
/// Create new on-chain service contract.
|
||||||
pub fn new(client: TrustedClient, address: ContractAddress, self_key_pair: Arc<NodeKeyPair>) -> Self {
|
pub fn new(mask: ApiMask, client: TrustedClient, name: String, address: ContractAddress, self_key_pair: Arc<NodeKeyPair>) -> Self {
|
||||||
let contract_addr = match address {
|
let contract_addr = match address {
|
||||||
ContractAddress::Registry => client.get().and_then(|c| c.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned(), BlockId::Latest)
|
ContractAddress::Registry => client.get().and_then(|c| c.registry_address(name.clone(), BlockId::Latest)
|
||||||
.map(|address| {
|
.map(|address| {
|
||||||
trace!(target: "secretstore", "{}: installing service contract from address {}",
|
trace!(target: "secretstore", "{}: installing {} service contract from address {}",
|
||||||
self_key_pair.public(), address);
|
self_key_pair.public(), name, address);
|
||||||
address
|
address
|
||||||
}))
|
}))
|
||||||
.unwrap_or_default(),
|
.unwrap_or_default(),
|
||||||
@ -111,16 +155,81 @@ impl OnChainServiceContract {
|
|||||||
};
|
};
|
||||||
|
|
||||||
OnChainServiceContract {
|
OnChainServiceContract {
|
||||||
|
mask: mask,
|
||||||
client: client,
|
client: client,
|
||||||
self_key_pair: self_key_pair,
|
self_key_pair: self_key_pair,
|
||||||
|
name: name,
|
||||||
address: address,
|
address: address,
|
||||||
|
contract: service::Service::default(),
|
||||||
data: RwLock::new(ServiceData {
|
data: RwLock::new(ServiceData {
|
||||||
contract: service::Service::default(),
|
|
||||||
contract_address: contract_addr,
|
contract_address: contract_addr,
|
||||||
last_log_block: None,
|
last_log_block: None,
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Send transaction to the service contract.
|
||||||
|
fn send_contract_transaction<C, P>(&self, origin: &Address, server_key_id: &ServerKeyId, is_response_required: C, prepare_tx: P) -> Result<(), String>
|
||||||
|
where C: FnOnce(&Client, &Address, &service::Service, &ServerKeyId, &Address) -> bool,
|
||||||
|
P: FnOnce(&Client, &Address, &service::Service) -> Result<Bytes, String> {
|
||||||
|
// only publish if contract address is set && client is online
|
||||||
|
let client = match self.client.get() {
|
||||||
|
Some(client) => client,
|
||||||
|
None => return Err("trusted client is required to publish key".into()),
|
||||||
|
};
|
||||||
|
|
||||||
|
// only publish key if contract waits for publication
|
||||||
|
// failing is ok here - it could be that enough confirmations have been recevied
|
||||||
|
// or key has been requested using HTTP API
|
||||||
|
let self_address = public_to_address(self.self_key_pair.public());
|
||||||
|
if !is_response_required(&*client, origin, &self.contract, server_key_id, &self_address) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepare transaction data
|
||||||
|
let transaction_data = prepare_tx(&*client, origin, &self.contract)?;
|
||||||
|
|
||||||
|
// send transaction
|
||||||
|
client.transact_contract(
|
||||||
|
origin.clone(),
|
||||||
|
transaction_data
|
||||||
|
).map_err(|e| format!("{}", e))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create task-specific pending requests iterator.
|
||||||
|
fn create_pending_requests_iterator<
|
||||||
|
C: 'static + Fn(&Client, &Address, &service::Service, &BlockId) -> Result<U256, String>,
|
||||||
|
R: 'static + Fn(&NodeKeyPair, &Client, &Address, &service::Service, &BlockId, U256) -> Result<(bool, ServiceTask), String>
|
||||||
|
>(&self, client: Arc<Client>, contract_address: &Address, block: &BlockId, get_count: C, read_item: R) -> Box<Iterator<Item=(bool, ServiceTask)>> {
|
||||||
|
let contract = service::Service::default();
|
||||||
|
get_count(&*client, contract_address, &contract, block)
|
||||||
|
.map(|count| {
|
||||||
|
let client = client.clone();
|
||||||
|
let self_key_pair = self.self_key_pair.clone();
|
||||||
|
let contract_address = contract_address.clone();
|
||||||
|
let block = block.clone();
|
||||||
|
Box::new(PendingRequestsIterator {
|
||||||
|
read_request: move |index| read_item(&*self_key_pair, &*client, &contract_address, &contract, &block, index)
|
||||||
|
.map_err(|error| {
|
||||||
|
warn!(target: "secretstore", "{}: reading pending request failed: {}",
|
||||||
|
self_key_pair.public(), error);
|
||||||
|
error
|
||||||
|
})
|
||||||
|
.ok(),
|
||||||
|
index: 0.into(),
|
||||||
|
length: count,
|
||||||
|
}) as Box<Iterator<Item=(bool, ServiceTask)>>
|
||||||
|
})
|
||||||
|
.map_err(|error| {
|
||||||
|
warn!(target: "secretstore", "{}: creating pending requests iterator failed: {}",
|
||||||
|
self.self_key_pair.public(), error);
|
||||||
|
error
|
||||||
|
})
|
||||||
|
.ok()
|
||||||
|
.unwrap_or_else(|| Box::new(::std::iter::empty()))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ServiceContract for OnChainServiceContract {
|
impl ServiceContract for OnChainServiceContract {
|
||||||
@ -130,10 +239,10 @@ impl ServiceContract for OnChainServiceContract {
|
|||||||
if let &ContractAddress::Registry = &self.address {
|
if let &ContractAddress::Registry = &self.address {
|
||||||
if let Some(client) = self.client.get() {
|
if let Some(client) = self.client.get() {
|
||||||
// update contract address from registry
|
// update contract address from registry
|
||||||
let service_contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned(), BlockId::Latest).unwrap_or_default();
|
let service_contract_addr = client.registry_address(self.name.clone(), BlockId::Latest).unwrap_or_default();
|
||||||
if self.data.read().contract_address != service_contract_addr {
|
if self.data.read().contract_address != service_contract_addr {
|
||||||
trace!(target: "secretstore", "{}: installing service contract from address {}",
|
trace!(target: "secretstore", "{}: installing {} service contract from address {}",
|
||||||
self.self_key_pair.public(), service_contract_addr);
|
self.self_key_pair.public(), self.name, service_contract_addr);
|
||||||
self.data.write().contract_address = service_contract_addr;
|
self.data.write().contract_address = service_contract_addr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -143,7 +252,7 @@ impl ServiceContract for OnChainServiceContract {
|
|||||||
&& self.client.get().is_some()
|
&& self.client.get().is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_logs(&self) -> Box<Iterator<Item=Vec<H256>>> {
|
fn read_logs(&self) -> Box<Iterator<Item=ServiceTask>> {
|
||||||
let client = match self.client.get() {
|
let client = match self.client.get() {
|
||||||
Some(client) => client,
|
Some(client) => client,
|
||||||
None => {
|
None => {
|
||||||
@ -181,16 +290,33 @@ impl ServiceContract for OnChainServiceContract {
|
|||||||
from_block: BlockId::Hash(first_block),
|
from_block: BlockId::Hash(first_block),
|
||||||
to_block: BlockId::Hash(last_block),
|
to_block: BlockId::Hash(last_block),
|
||||||
address: Some(vec![address]),
|
address: Some(vec![address]),
|
||||||
topics: vec![
|
topics: vec![Some(mask_topics(&self.mask))],
|
||||||
Some(vec![*SERVER_KEY_REQUESTED_EVENT_NAME_HASH]),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
],
|
|
||||||
limit: None,
|
limit: None,
|
||||||
});
|
});
|
||||||
|
|
||||||
Box::new(request_logs.into_iter().map(|log| log.entry.topics))
|
Box::new(request_logs.into_iter()
|
||||||
|
.filter_map(|log| {
|
||||||
|
let raw_log: RawLog = (log.entry.topics.into_iter().map(|t| t.0.into()).collect(), log.entry.data).into();
|
||||||
|
if raw_log.topics[0] == *SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME_HASH {
|
||||||
|
ServerKeyGenerationService::parse_log(&address, &self.contract, raw_log)
|
||||||
|
} else if raw_log.topics[0] == *SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME_HASH {
|
||||||
|
ServerKeyRetrievalService::parse_log(&address, &self.contract, raw_log)
|
||||||
|
} else if raw_log.topics[0] == *DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME_HASH {
|
||||||
|
DocumentKeyStoreService::parse_log(&address, &self.contract, raw_log)
|
||||||
|
} else if raw_log.topics[0] == *DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH {
|
||||||
|
DocumentKeyShadowRetrievalService::parse_common_request_log(&address, &self.contract, raw_log)
|
||||||
|
} else if raw_log.topics[0] == *DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH {
|
||||||
|
DocumentKeyShadowRetrievalService::parse_personal_request_log(&address, &self.contract, raw_log)
|
||||||
|
} else {
|
||||||
|
Err("unknown type of log entry".into())
|
||||||
|
}
|
||||||
|
.map_err(|error| {
|
||||||
|
warn!(target: "secretstore", "{}: error parsing log entry from service contract: {}",
|
||||||
|
self.self_key_pair.public(), error);
|
||||||
|
error
|
||||||
|
})
|
||||||
|
.ok()
|
||||||
|
}).collect::<Vec<_>>().into_iter())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_pending_requests(&self) -> Box<Iterator<Item=(bool, ServiceTask)>> {
|
fn read_pending_requests(&self) -> Box<Iterator<Item=(bool, ServiceTask)>> {
|
||||||
@ -205,81 +331,102 @@ impl ServiceContract for OnChainServiceContract {
|
|||||||
match data.contract_address == Default::default() {
|
match data.contract_address == Default::default() {
|
||||||
true => Box::new(::std::iter::empty()),
|
true => Box::new(::std::iter::empty()),
|
||||||
false => get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED + 1)
|
false => get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED + 1)
|
||||||
.and_then(|b| {
|
.map(|b| {
|
||||||
let contract_address = data.contract_address;
|
let block = BlockId::Hash(b);
|
||||||
let do_call = |data| client.call_contract(BlockId::Hash(b), contract_address, data);
|
let iter = match self.mask.server_key_generation_requests {
|
||||||
data.contract.functions().server_key_generation_requests_count().call(&do_call)
|
true => Box::new(self.create_pending_requests_iterator(client.clone(), &data.contract_address, &block,
|
||||||
.map_err(|error| {
|
&ServerKeyGenerationService::read_pending_requests_count,
|
||||||
warn!(target: "secretstore", "{}: call to server_key_generation_requests_count failed: {}",
|
&ServerKeyGenerationService::read_pending_request)) as Box<Iterator<Item=(bool, ServiceTask)>>,
|
||||||
self.self_key_pair.public(), error);
|
false => Box::new(::std::iter::empty()),
|
||||||
error
|
};
|
||||||
})
|
let iter = match self.mask.server_key_retrieval_requests {
|
||||||
.map(|l| (b, l))
|
true => Box::new(iter.chain(self.create_pending_requests_iterator(client.clone(), &data.contract_address, &block,
|
||||||
.ok()
|
&ServerKeyRetrievalService::read_pending_requests_count,
|
||||||
|
&ServerKeyRetrievalService::read_pending_request))),
|
||||||
|
false => iter,
|
||||||
|
};
|
||||||
|
let iter = match self.mask.document_key_store_requests {
|
||||||
|
true => Box::new(iter.chain(self.create_pending_requests_iterator(client.clone(), &data.contract_address, &block,
|
||||||
|
&DocumentKeyStoreService::read_pending_requests_count,
|
||||||
|
&DocumentKeyStoreService::read_pending_request))),
|
||||||
|
false => iter,
|
||||||
|
};
|
||||||
|
let iter = match self.mask.document_key_shadow_retrieval_requests {
|
||||||
|
true => Box::new(iter.chain(self.create_pending_requests_iterator(client, &data.contract_address, &block,
|
||||||
|
&DocumentKeyShadowRetrievalService::read_pending_requests_count,
|
||||||
|
&DocumentKeyShadowRetrievalService::read_pending_request))),
|
||||||
|
false => iter
|
||||||
|
};
|
||||||
|
|
||||||
|
iter
|
||||||
})
|
})
|
||||||
.map(|(b, l)| Box::new(PendingRequestsIterator {
|
|
||||||
client: client,
|
|
||||||
contract: service::Service::default(),
|
|
||||||
contract_address: data.contract_address,
|
|
||||||
self_key_pair: self.self_key_pair.clone(),
|
|
||||||
block: b,
|
|
||||||
index: 0.into(),
|
|
||||||
length: l,
|
|
||||||
}) as Box<Iterator<Item=(bool, ServiceTask)>>)
|
|
||||||
.unwrap_or_else(|| Box::new(::std::iter::empty()))
|
.unwrap_or_else(|| Box::new(::std::iter::empty()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> {
|
fn publish_generated_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public) -> Result<(), String> {
|
||||||
// only publish if contract address is set && client is online
|
self.send_contract_transaction(origin, server_key_id, ServerKeyGenerationService::is_response_required, |_, _, service|
|
||||||
let data = self.data.read();
|
Ok(ServerKeyGenerationService::prepare_pubish_tx_data(service, server_key_id, &server_key))
|
||||||
if data.contract_address == Default::default() {
|
)
|
||||||
// it is not an error, because key could be generated even without contract
|
}
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let client = match self.client.get() {
|
fn publish_server_key_generation_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
||||||
Some(client) => client,
|
self.send_contract_transaction(origin, server_key_id, ServerKeyGenerationService::is_response_required, |_, _, service|
|
||||||
None => return Err("trusted client is required to publish key".into()),
|
Ok(ServerKeyGenerationService::prepare_error_tx_data(service, server_key_id))
|
||||||
};
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// only publish key if contract waits for publication
|
fn publish_retrieved_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public, threshold: usize) -> Result<(), String> {
|
||||||
// failing is ok here - it could be that enough confirmations have been recevied
|
let threshold = serialize_threshold(threshold)?;
|
||||||
// or key has been requested using HTTP API
|
self.send_contract_transaction(origin, server_key_id, ServerKeyRetrievalService::is_response_required, |_, _, service|
|
||||||
let contract_address = data.contract_address;
|
Ok(ServerKeyRetrievalService::prepare_pubish_tx_data(service, server_key_id, server_key, threshold))
|
||||||
let do_call = |data| client.call_contract(BlockId::Latest, contract_address, data);
|
)
|
||||||
let self_address = public_to_address(self.self_key_pair.public());
|
}
|
||||||
if data.contract.functions()
|
|
||||||
.get_server_key_confirmation_status()
|
|
||||||
.call(*server_key_id, self_address, &do_call)
|
|
||||||
.unwrap_or(false) {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepare transaction data
|
fn publish_server_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
||||||
let server_key_hash = keccak(server_key);
|
self.send_contract_transaction(origin, server_key_id, ServerKeyRetrievalService::is_response_required, |_, _, service|
|
||||||
let signed_server_key = self.self_key_pair.sign(&server_key_hash).map_err(|e| format!("{}", e))?;
|
Ok(ServerKeyRetrievalService::prepare_error_tx_data(service, server_key_id))
|
||||||
let signed_server_key: Signature = signed_server_key.into_electrum().into();
|
)
|
||||||
let transaction_data = data.contract.functions()
|
}
|
||||||
.server_key_generated()
|
|
||||||
.input(*server_key_id,
|
|
||||||
server_key.to_vec(),
|
|
||||||
signed_server_key.v(),
|
|
||||||
signed_server_key.r(),
|
|
||||||
signed_server_key.s(),
|
|
||||||
);
|
|
||||||
|
|
||||||
// send transaction
|
fn publish_stored_document_key(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
||||||
client.transact_contract(
|
self.send_contract_transaction(origin, server_key_id, DocumentKeyStoreService::is_response_required, |_, _, service|
|
||||||
data.contract_address,
|
Ok(DocumentKeyStoreService::prepare_pubish_tx_data(service, server_key_id))
|
||||||
transaction_data
|
)
|
||||||
).map_err(|e| format!("{}", e))?;
|
}
|
||||||
|
|
||||||
Ok(())
|
fn publish_document_key_store_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
||||||
|
self.send_contract_transaction(origin, server_key_id, DocumentKeyStoreService::is_response_required, |_, _, service|
|
||||||
|
Ok(DocumentKeyStoreService::prepare_error_tx_data(service, server_key_id))
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_retrieved_document_key_common(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, common_point: Public, threshold: usize) -> Result<(), String> {
|
||||||
|
let threshold = serialize_threshold(threshold)?;
|
||||||
|
self.send_contract_transaction(origin, server_key_id, |client, contract_address, contract, server_key_id, key_server|
|
||||||
|
DocumentKeyShadowRetrievalService::is_response_required(client, contract_address, contract, server_key_id, requester, key_server),
|
||||||
|
|_, _, service|
|
||||||
|
Ok(DocumentKeyShadowRetrievalService::prepare_pubish_common_tx_data(service, server_key_id, requester, common_point, threshold))
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_retrieved_document_key_personal(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, participants: &[Address], decrypted_secret: Public, shadow: Bytes) -> Result<(), String> {
|
||||||
|
self.send_contract_transaction(origin, server_key_id, |_, _, _, _, _| true,
|
||||||
|
move |client, address, service|
|
||||||
|
DocumentKeyShadowRetrievalService::prepare_pubish_personal_tx_data(client, address, service, server_key_id, requester, participants, decrypted_secret, shadow)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_document_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address) -> Result<(), String> {
|
||||||
|
self.send_contract_transaction(origin, server_key_id, |client, contract_address, contract, server_key_id, key_server|
|
||||||
|
DocumentKeyShadowRetrievalService::is_response_required(client, contract_address, contract, server_key_id, requester, key_server),
|
||||||
|
|_, _, service|
|
||||||
|
Ok(DocumentKeyShadowRetrievalService::prepare_error_tx_data(service, server_key_id, requester))
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Iterator for PendingRequestsIterator {
|
impl<F> Iterator for PendingRequestsIterator<F> where F: Fn(U256) -> Option<(bool, ServiceTask)> {
|
||||||
type Item = (bool, ServiceTask);
|
type Item = (bool, ServiceTask);
|
||||||
|
|
||||||
fn next(&mut self) -> Option<(bool, ServiceTask)> {
|
fn next(&mut self) -> Option<(bool, ServiceTask)> {
|
||||||
@ -290,27 +437,29 @@ impl Iterator for PendingRequestsIterator {
|
|||||||
let index = self.index.clone();
|
let index = self.index.clone();
|
||||||
self.index = self.index + 1.into();
|
self.index = self.index + 1.into();
|
||||||
|
|
||||||
let self_address = public_to_address(self.self_key_pair.public());
|
(self.read_request)(index)
|
||||||
let contract_address = self.contract_address;
|
|
||||||
let do_call = |data| self.client.call_contract(BlockId::Hash(self.block.clone()), contract_address, data);
|
|
||||||
self.contract.functions().get_server_key_id().call(index, &do_call)
|
|
||||||
.and_then(|server_key_id|
|
|
||||||
self.contract.functions().get_server_key_threshold().call(server_key_id, &do_call)
|
|
||||||
.map(|threshold| (server_key_id, threshold)))
|
|
||||||
.and_then(|(server_key_id, threshold)|
|
|
||||||
self.contract.functions().get_server_key_confirmation_status().call(server_key_id, self_address, &do_call)
|
|
||||||
.map(|is_confirmed| (server_key_id, threshold, is_confirmed)))
|
|
||||||
.map(|(server_key_id, threshold, is_confirmed)|
|
|
||||||
Some((is_confirmed, ServiceTask::GenerateServerKey(server_key_id, threshold.into()))))
|
|
||||||
.map_err(|error| {
|
|
||||||
warn!(target: "secretstore", "{}: reading service contract request failed: {}",
|
|
||||||
self.self_key_pair.public(), error);
|
|
||||||
()
|
|
||||||
})
|
|
||||||
.unwrap_or(None)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns vector of logs topics to listen to.
|
||||||
|
pub fn mask_topics(mask: &ApiMask) -> Vec<H256> {
|
||||||
|
let mut topics = Vec::new();
|
||||||
|
if mask.server_key_generation_requests {
|
||||||
|
topics.push(*SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME_HASH);
|
||||||
|
}
|
||||||
|
if mask.server_key_retrieval_requests {
|
||||||
|
topics.push(*SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME_HASH);
|
||||||
|
}
|
||||||
|
if mask.document_key_store_requests {
|
||||||
|
topics.push(*DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME_HASH);
|
||||||
|
}
|
||||||
|
if mask.document_key_shadow_retrieval_requests {
|
||||||
|
topics.push(*DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH);
|
||||||
|
topics.push(*DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH);
|
||||||
|
}
|
||||||
|
topics
|
||||||
|
}
|
||||||
|
|
||||||
/// Get hash of the last block with at least n confirmations.
|
/// Get hash of the last block with at least n confirmations.
|
||||||
fn get_confirmed_block_hash(client: &Client, confirmations: u64) -> Option<H256> {
|
fn get_confirmed_block_hash(client: &Client, confirmations: u64) -> Option<H256> {
|
||||||
client.block_number(BlockId::Latest)
|
client.block_number(BlockId::Latest)
|
||||||
@ -318,21 +467,365 @@ fn get_confirmed_block_hash(client: &Client, confirmations: u64) -> Option<H256>
|
|||||||
.and_then(|b| client.block_hash(BlockId::Number(b)))
|
.and_then(|b| client.block_hash(BlockId::Number(b)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl ServerKeyGenerationService {
|
||||||
|
/// Parse request log entry.
|
||||||
|
pub fn parse_log(origin: &Address, contract: &service::Service, raw_log: RawLog) -> Result<ServiceTask, String> {
|
||||||
|
let event = contract.events().server_key_generation_requested();
|
||||||
|
match event.parse_log(raw_log) {
|
||||||
|
Ok(l) => Ok(ServiceTask::GenerateServerKey(origin.clone(), l.server_key_id, l.author, parse_threshold(l.threshold)?)),
|
||||||
|
Err(e) => Err(format!("{}", e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if response from key server is required.
|
||||||
|
pub fn is_response_required(client: &Client, contract_address: &Address, contract: &service::Service, server_key_id: &ServerKeyId, key_server: &Address) -> bool {
|
||||||
|
// we're checking confirmation in Latest block, because we're interested in latest contract state here
|
||||||
|
let do_call = |data| client.call_contract(BlockId::Latest, *contract_address, data);
|
||||||
|
contract.functions()
|
||||||
|
.is_server_key_generation_response_required()
|
||||||
|
.call(*server_key_id, key_server.clone(), &do_call)
|
||||||
|
.unwrap_or(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prepare publish key transaction data.
|
||||||
|
pub fn prepare_pubish_tx_data(contract: &service::Service, server_key_id: &ServerKeyId, server_key_public: &Public) -> Bytes {
|
||||||
|
contract.functions()
|
||||||
|
.server_key_generated()
|
||||||
|
.input(*server_key_id, server_key_public.to_vec())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prepare error transaction data.
|
||||||
|
pub fn prepare_error_tx_data(contract: &service::Service, server_key_id: &ServerKeyId) -> Bytes {
|
||||||
|
contract.functions()
|
||||||
|
.server_key_generation_error()
|
||||||
|
.input(*server_key_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read pending requests count.
|
||||||
|
fn read_pending_requests_count(client: &Client, contract_address: &Address, _contract: &service::Service, block: &BlockId) -> Result<U256, String> {
|
||||||
|
let do_call = |data| client.call_contract(block.clone(), contract_address.clone(), data);
|
||||||
|
let contract = service::Service::default();
|
||||||
|
contract.functions()
|
||||||
|
.server_key_generation_requests_count()
|
||||||
|
.call(&do_call)
|
||||||
|
.map_err(|error| format!("{}", error))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read pending request.
|
||||||
|
fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, contract: &service::Service, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
|
||||||
|
let self_address = public_to_address(self_key_pair.public());
|
||||||
|
let do_call = |d| client.call_contract(block.clone(), contract_address.clone(), d);
|
||||||
|
contract.functions()
|
||||||
|
.get_server_key_generation_request()
|
||||||
|
.call(index, &do_call)
|
||||||
|
.map_err(|error| format!("{}", error))
|
||||||
|
.and_then(|(server_key_id, author, threshold)| parse_threshold(threshold)
|
||||||
|
.map(|threshold| (server_key_id, author, threshold)))
|
||||||
|
.and_then(|(server_key_id, author, threshold)| contract.functions()
|
||||||
|
.is_server_key_generation_response_required()
|
||||||
|
.call(server_key_id.clone(), self_address, &do_call)
|
||||||
|
.map(|not_confirmed| (
|
||||||
|
not_confirmed,
|
||||||
|
ServiceTask::GenerateServerKey(
|
||||||
|
contract_address.clone(),
|
||||||
|
server_key_id,
|
||||||
|
author,
|
||||||
|
threshold,
|
||||||
|
)))
|
||||||
|
.map_err(|error| format!("{}", error)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServerKeyRetrievalService {
|
||||||
|
/// Parse request log entry.
|
||||||
|
pub fn parse_log(origin: &Address, contract: &service::Service, raw_log: RawLog) -> Result<ServiceTask, String> {
|
||||||
|
let event = contract.events().server_key_retrieval_requested();
|
||||||
|
match event.parse_log(raw_log) {
|
||||||
|
Ok(l) => Ok(ServiceTask::RetrieveServerKey(origin.clone(), l.server_key_id)),
|
||||||
|
Err(e) => Err(format!("{}", e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if response from key server is required.
|
||||||
|
pub fn is_response_required(client: &Client, contract_address: &Address, contract: &service::Service, server_key_id: &ServerKeyId, key_server: &Address) -> bool {
|
||||||
|
// we're checking confirmation in Latest block, because we're interested in latest contract state here
|
||||||
|
let do_call = |data| client.call_contract(BlockId::Latest, *contract_address, data);
|
||||||
|
contract.functions()
|
||||||
|
.is_server_key_retrieval_response_required()
|
||||||
|
.call(*server_key_id, key_server.clone(), &do_call)
|
||||||
|
.unwrap_or(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prepare publish key transaction data.
|
||||||
|
pub fn prepare_pubish_tx_data(contract: &service::Service, server_key_id: &ServerKeyId, server_key_public: Public, threshold: U256) -> Bytes {
|
||||||
|
contract.functions()
|
||||||
|
.server_key_retrieved()
|
||||||
|
.input(*server_key_id, server_key_public.to_vec(), threshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prepare error transaction data.
|
||||||
|
pub fn prepare_error_tx_data(contract: &service::Service, server_key_id: &ServerKeyId) -> Bytes {
|
||||||
|
contract.functions()
|
||||||
|
.server_key_retrieval_error()
|
||||||
|
.input(*server_key_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read pending requests count.
|
||||||
|
fn read_pending_requests_count(client: &Client, contract_address: &Address, _contract: &service::Service, block: &BlockId) -> Result<U256, String> {
|
||||||
|
let do_call = |data| client.call_contract(block.clone(), contract_address.clone(), data);
|
||||||
|
let contract = service::Service::default();
|
||||||
|
contract.functions()
|
||||||
|
.server_key_retrieval_requests_count()
|
||||||
|
.call(&do_call)
|
||||||
|
.map_err(|error| format!("{}", error))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read pending request.
|
||||||
|
fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, contract: &service::Service, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
|
||||||
|
let self_address = public_to_address(self_key_pair.public());
|
||||||
|
let do_call = |d| client.call_contract(block.clone(), contract_address.clone(), d);
|
||||||
|
contract.functions()
|
||||||
|
.get_server_key_retrieval_request()
|
||||||
|
.call(index, &do_call)
|
||||||
|
.map_err(|error| format!("{}", error))
|
||||||
|
.and_then(|server_key_id| contract.functions()
|
||||||
|
.is_server_key_retrieval_response_required()
|
||||||
|
.call(server_key_id.clone(), self_address, &do_call)
|
||||||
|
.map(|not_confirmed| (
|
||||||
|
not_confirmed,
|
||||||
|
ServiceTask::RetrieveServerKey(
|
||||||
|
contract_address.clone(),
|
||||||
|
server_key_id,
|
||||||
|
)))
|
||||||
|
.map_err(|error| format!("{}", error)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DocumentKeyStoreService {
|
||||||
|
/// Parse request log entry.
|
||||||
|
pub fn parse_log(origin: &Address, contract: &service::Service, raw_log: RawLog) -> Result<ServiceTask, String> {
|
||||||
|
let event = contract.events().document_key_store_requested();
|
||||||
|
match event.parse_log(raw_log) {
|
||||||
|
Ok(l) => Ok(ServiceTask::StoreDocumentKey(origin.clone(), l.server_key_id, l.author, (*l.common_point).into(), (*l.encrypted_point).into())),
|
||||||
|
Err(e) => Err(format!("{}", e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if response from key server is required.
|
||||||
|
pub fn is_response_required(client: &Client, contract_address: &Address, contract: &service::Service, server_key_id: &ServerKeyId, key_server: &Address) -> bool {
|
||||||
|
// we're checking confirmation in Latest block, because we're interested in latest contract state here
|
||||||
|
let do_call = |data| client.call_contract(BlockId::Latest, *contract_address, data);
|
||||||
|
contract.functions()
|
||||||
|
.is_document_key_store_response_required()
|
||||||
|
.call(*server_key_id, key_server.clone(), &do_call)
|
||||||
|
.unwrap_or(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prepare publish key transaction data.
|
||||||
|
pub fn prepare_pubish_tx_data(contract: &service::Service, server_key_id: &ServerKeyId) -> Bytes {
|
||||||
|
contract.functions()
|
||||||
|
.document_key_stored()
|
||||||
|
.input(*server_key_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prepare error transaction data.
|
||||||
|
pub fn prepare_error_tx_data(contract: &service::Service, server_key_id: &ServerKeyId) -> Bytes {
|
||||||
|
contract.functions()
|
||||||
|
.document_key_store_error()
|
||||||
|
.input(*server_key_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read pending requests count.
|
||||||
|
fn read_pending_requests_count(client: &Client, contract_address: &Address, _contract: &service::Service, block: &BlockId) -> Result<U256, String> {
|
||||||
|
let do_call = |data| client.call_contract(block.clone(), contract_address.clone(), data);
|
||||||
|
let contract = service::Service::default();
|
||||||
|
contract.functions()
|
||||||
|
.document_key_store_requests_count()
|
||||||
|
.call(&do_call)
|
||||||
|
.map_err(|error| format!("{}", error))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read pending request.
|
||||||
|
fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, contract: &service::Service, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
|
||||||
|
let self_address = public_to_address(self_key_pair.public());
|
||||||
|
let do_call = |d| client.call_contract(block.clone(), contract_address.clone(), d);
|
||||||
|
contract.functions()
|
||||||
|
.get_document_key_store_request()
|
||||||
|
.call(index, &do_call)
|
||||||
|
.map_err(|error| format!("{}", error))
|
||||||
|
.and_then(|(server_key_id, author, common_point, encrypted_point)| contract.functions()
|
||||||
|
.is_document_key_store_response_required()
|
||||||
|
.call(server_key_id.clone(), self_address, &do_call)
|
||||||
|
.map(|not_confirmed| (
|
||||||
|
not_confirmed,
|
||||||
|
ServiceTask::StoreDocumentKey(
|
||||||
|
contract_address.clone(),
|
||||||
|
server_key_id,
|
||||||
|
author,
|
||||||
|
Public::from_slice(&common_point),
|
||||||
|
Public::from_slice(&encrypted_point),
|
||||||
|
)))
|
||||||
|
.map_err(|error| format!("{}", error)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DocumentKeyShadowRetrievalService {
|
||||||
|
/// Parse common request log entry.
|
||||||
|
pub fn parse_common_request_log(origin: &Address, contract: &service::Service, raw_log: RawLog) -> Result<ServiceTask, String> {
|
||||||
|
let event = contract.events().document_key_common_retrieval_requested();
|
||||||
|
match event.parse_log(raw_log) {
|
||||||
|
Ok(l) => Ok(ServiceTask::RetrieveShadowDocumentKeyCommon(origin.clone(), l.server_key_id, l.requester)),
|
||||||
|
Err(e) => Err(format!("{}", e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse personal request log entry.
|
||||||
|
pub fn parse_personal_request_log(origin: &Address, contract: &service::Service, raw_log: RawLog) -> Result<ServiceTask, String> {
|
||||||
|
let event = contract.events().document_key_personal_retrieval_requested();
|
||||||
|
match event.parse_log(raw_log) {
|
||||||
|
Ok(l) => Ok(ServiceTask::RetrieveShadowDocumentKeyPersonal(origin.clone(), l.server_key_id, (*l.requester_public).into())),
|
||||||
|
Err(e) => Err(format!("{}", e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if response from key server is required.
|
||||||
|
pub fn is_response_required(client: &Client, contract_address: &Address, contract: &service::Service, server_key_id: &ServerKeyId, requester: &Address, key_server: &Address) -> bool {
|
||||||
|
// we're checking confirmation in Latest block, because we're interested in latest contract state here
|
||||||
|
let do_call = |data| client.call_contract(BlockId::Latest, *contract_address, data);
|
||||||
|
contract.functions()
|
||||||
|
.is_document_key_shadow_retrieval_response_required()
|
||||||
|
.call(*server_key_id, *requester, key_server.clone(), &do_call)
|
||||||
|
.unwrap_or(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prepare publish common key transaction data.
|
||||||
|
pub fn prepare_pubish_common_tx_data(contract: &service::Service, server_key_id: &ServerKeyId, requester: &Address, common_point: Public, threshold: U256) -> Bytes {
|
||||||
|
contract.functions()
|
||||||
|
.document_key_common_retrieved()
|
||||||
|
.input(*server_key_id, *requester, common_point.to_vec(), threshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prepare publish personal key transaction data.
|
||||||
|
pub fn prepare_pubish_personal_tx_data(client: &Client, contract_address: &Address, contract: &service::Service, server_key_id: &ServerKeyId, requester: &Address, participants: &[Address], decrypted_secret: Public, shadow: Bytes) -> Result<Bytes, String> {
|
||||||
|
let mut participants_mask = U256::default();
|
||||||
|
for participant in participants {
|
||||||
|
let participant_index = Self::map_key_server_address(client, contract_address, contract, participant.clone())
|
||||||
|
.map_err(|e| format!("Error searching for {} participant: {}", participant, e))?;
|
||||||
|
participants_mask = participants_mask | (U256::one() << participant_index.into());
|
||||||
|
}
|
||||||
|
Ok(contract.functions()
|
||||||
|
.document_key_personal_retrieved()
|
||||||
|
.input(*server_key_id, *requester, participants_mask, decrypted_secret.to_vec(), shadow))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prepare error transaction data.
|
||||||
|
pub fn prepare_error_tx_data(contract: &service::Service, server_key_id: &ServerKeyId, requester: &Address) -> Bytes {
|
||||||
|
contract.functions()
|
||||||
|
.document_key_shadow_retrieval_error()
|
||||||
|
.input(*server_key_id, *requester)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read pending requests count.
|
||||||
|
fn read_pending_requests_count(client: &Client, contract_address: &Address, _contract: &service::Service, block: &BlockId) -> Result<U256, String> {
|
||||||
|
let do_call = |data| client.call_contract(block.clone(), contract_address.clone(), data);
|
||||||
|
let contract = service::Service::default();
|
||||||
|
contract.functions()
|
||||||
|
.document_key_shadow_retrieval_requests_count()
|
||||||
|
.call(&do_call)
|
||||||
|
.map_err(|error| format!("{}", error))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read pending request.
|
||||||
|
fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, contract: &service::Service, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
|
||||||
|
let self_address = public_to_address(self_key_pair.public());
|
||||||
|
let do_call = |d| client.call_contract(block.clone(), contract_address.clone(), d);
|
||||||
|
contract.functions()
|
||||||
|
.get_document_key_shadow_retrieval_request()
|
||||||
|
.call(index, &do_call)
|
||||||
|
.map_err(|error| format!("{}", error))
|
||||||
|
.and_then(|(server_key_id, requester, is_common_retrieval_completed)| {
|
||||||
|
let requester = Public::from_slice(&requester);
|
||||||
|
contract.functions()
|
||||||
|
.is_document_key_shadow_retrieval_response_required()
|
||||||
|
.call(server_key_id.clone(), public_to_address(&requester), self_address, &do_call)
|
||||||
|
.map(|not_confirmed| (
|
||||||
|
not_confirmed,
|
||||||
|
match is_common_retrieval_completed {
|
||||||
|
true => ServiceTask::RetrieveShadowDocumentKeyCommon(
|
||||||
|
contract_address.clone(),
|
||||||
|
server_key_id,
|
||||||
|
public_to_address(&requester),
|
||||||
|
),
|
||||||
|
false => ServiceTask::RetrieveShadowDocumentKeyPersonal(
|
||||||
|
contract_address.clone(),
|
||||||
|
server_key_id,
|
||||||
|
requester,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
))
|
||||||
|
.map_err(|error| format!("{}", error))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Map from key server address to key server index.
|
||||||
|
fn map_key_server_address(client: &Client, contract_address: &Address, contract: &service::Service, key_server: Address) -> Result<u8, String> {
|
||||||
|
// we're checking confirmation in Latest block, because tx ,ust be appended to the latest state
|
||||||
|
let do_call = |data| client.call_contract(BlockId::Latest, *contract_address, data);
|
||||||
|
contract.functions()
|
||||||
|
.require_key_server()
|
||||||
|
.call(key_server, &do_call)
|
||||||
|
.map_err(|e| format!("{}", e))
|
||||||
|
.and_then(|index| if index > ::std::u8::MAX.into() {
|
||||||
|
Err(format!("key server index is too big: {}", index))
|
||||||
|
} else {
|
||||||
|
let index: u32 = index.into();
|
||||||
|
Ok(index as u8)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse threshold (we only supposrt 256 KS at max).
|
||||||
|
fn parse_threshold(threshold: U256) -> Result<usize, String> {
|
||||||
|
let threshold_num = threshold.low_u64();
|
||||||
|
if threshold != threshold_num.into() || threshold_num >= ::std::u8::MAX as u64 {
|
||||||
|
return Err(format!("invalid threshold to use in service contract: {}", threshold));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(threshold_num as usize)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Serialize threshold (we only support 256 KS at max).
|
||||||
|
fn serialize_threshold(threshold: usize) -> Result<U256, String> {
|
||||||
|
if threshold > ::std::u8::MAX as usize {
|
||||||
|
return Err(format!("invalid threshold to use in service contract: {}", threshold));
|
||||||
|
}
|
||||||
|
Ok(threshold.into())
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
|
use bytes::Bytes;
|
||||||
use ethkey::Public;
|
use ethkey::Public;
|
||||||
use ethereum_types::H256;
|
use ethereum_types::Address;
|
||||||
use listener::service_contract_listener::ServiceTask;
|
use listener::service_contract_listener::ServiceTask;
|
||||||
use ServerKeyId;
|
use {ServerKeyId};
|
||||||
use super::ServiceContract;
|
use super::ServiceContract;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct DummyServiceContract {
|
pub struct DummyServiceContract {
|
||||||
pub is_actual: bool,
|
pub is_actual: bool,
|
||||||
pub logs: Vec<Vec<H256>>,
|
pub logs: Vec<ServiceTask>,
|
||||||
pub pending_requests: Vec<(bool, ServiceTask)>,
|
pub pending_requests: Vec<(bool, ServiceTask)>,
|
||||||
pub published_keys: Mutex<Vec<(ServerKeyId, Public)>>,
|
pub generated_server_keys: Mutex<Vec<(ServerKeyId, Public)>>,
|
||||||
|
pub server_keys_generation_failures: Mutex<Vec<ServerKeyId>>,
|
||||||
|
pub retrieved_server_keys: Mutex<Vec<(ServerKeyId, Public, usize)>>,
|
||||||
|
pub server_keys_retrieval_failures: Mutex<Vec<ServerKeyId>>,
|
||||||
|
pub stored_document_keys: Mutex<Vec<ServerKeyId>>,
|
||||||
|
pub document_keys_store_failures: Mutex<Vec<ServerKeyId>>,
|
||||||
|
pub common_shadow_retrieved_document_keys: Mutex<Vec<(ServerKeyId, Address, Public, usize)>>,
|
||||||
|
pub personal_shadow_retrieved_document_keys: Mutex<Vec<(ServerKeyId, Address, Vec<Address>, Public, Bytes)>>,
|
||||||
|
pub document_keys_shadow_retrieval_failures: Mutex<Vec<(ServerKeyId, Address)>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ServiceContract for DummyServiceContract {
|
impl ServiceContract for DummyServiceContract {
|
||||||
@ -340,7 +833,7 @@ pub mod tests {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_logs(&self) -> Box<Iterator<Item=Vec<H256>>> {
|
fn read_logs(&self) -> Box<Iterator<Item=ServiceTask>> {
|
||||||
Box::new(self.logs.clone().into_iter())
|
Box::new(self.logs.clone().into_iter())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -348,8 +841,48 @@ pub mod tests {
|
|||||||
Box::new(self.pending_requests.clone().into_iter())
|
Box::new(self.pending_requests.clone().into_iter())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> {
|
fn publish_generated_server_key(&self, _origin: &Address, server_key_id: &ServerKeyId, server_key: Public) -> Result<(), String> {
|
||||||
self.published_keys.lock().push((server_key_id.clone(), server_key.clone()));
|
self.generated_server_keys.lock().push((server_key_id.clone(), server_key.clone()));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_server_key_generation_error(&self, _origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
||||||
|
self.server_keys_generation_failures.lock().push(server_key_id.clone());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_retrieved_server_key(&self, _origin: &Address, server_key_id: &ServerKeyId, server_key: Public, threshold: usize) -> Result<(), String> {
|
||||||
|
self.retrieved_server_keys.lock().push((server_key_id.clone(), server_key.clone(), threshold));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_server_key_retrieval_error(&self, _origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
||||||
|
self.server_keys_retrieval_failures.lock().push(server_key_id.clone());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_stored_document_key(&self, _origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
||||||
|
self.stored_document_keys.lock().push(server_key_id.clone());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_document_key_store_error(&self, _origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
||||||
|
self.document_keys_store_failures.lock().push(server_key_id.clone());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_retrieved_document_key_common(&self, _origin: &Address, server_key_id: &ServerKeyId, requester: &Address, common_point: Public, threshold: usize) -> Result<(), String> {
|
||||||
|
self.common_shadow_retrieved_document_keys.lock().push((server_key_id.clone(), requester.clone(), common_point.clone(), threshold));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_retrieved_document_key_personal(&self, _origin: &Address, server_key_id: &ServerKeyId, requester: &Address, participants: &[Address], decrypted_secret: Public, shadow: Bytes) -> Result<(), String> {
|
||||||
|
self.personal_shadow_retrieved_document_keys.lock().push((server_key_id.clone(), requester.clone(), participants.iter().cloned().collect(), decrypted_secret, shadow));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_document_key_retrieval_error(&self, _origin: &Address, server_key_id: &ServerKeyId, requester: &Address) -> Result<(), String> {
|
||||||
|
self.document_keys_shadow_retrieval_failures.lock().push((server_key_id.clone(), requester.clone()));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
100
secret_store/src/listener/service_contract_aggregate.rs
Normal file
100
secret_store/src/listener/service_contract_aggregate.rs
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use bytes::Bytes;
|
||||||
|
use ethereum_types::Address;
|
||||||
|
use ethkey::Public;
|
||||||
|
use listener::service_contract::ServiceContract;
|
||||||
|
use listener::service_contract_listener::ServiceTask;
|
||||||
|
use {ServerKeyId};
|
||||||
|
|
||||||
|
/// Aggregated on-chain service contract.
|
||||||
|
pub struct OnChainServiceContractAggregate {
|
||||||
|
/// All hosted service contracts.
|
||||||
|
contracts: Vec<Arc<ServiceContract>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OnChainServiceContractAggregate {
|
||||||
|
/// Create new aggregated service contract listener.
|
||||||
|
pub fn new(contracts: Vec<Arc<ServiceContract>>) -> Self {
|
||||||
|
debug_assert!(contracts.len() > 1);
|
||||||
|
OnChainServiceContractAggregate {
|
||||||
|
contracts: contracts,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServiceContract for OnChainServiceContractAggregate {
|
||||||
|
fn update(&self) -> bool {
|
||||||
|
let mut result = false;
|
||||||
|
for contract in &self.contracts {
|
||||||
|
result = contract.update() || result;
|
||||||
|
}
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_logs(&self) -> Box<Iterator<Item=ServiceTask>> {
|
||||||
|
self.contracts.iter()
|
||||||
|
.fold(Box::new(::std::iter::empty()) as Box<Iterator<Item=ServiceTask>>, |i, c|
|
||||||
|
Box::new(i.chain(c.read_logs())))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_pending_requests(&self) -> Box<Iterator<Item=(bool, ServiceTask)>> {
|
||||||
|
self.contracts.iter()
|
||||||
|
.fold(Box::new(::std::iter::empty()) as Box<Iterator<Item=(bool, ServiceTask)>>, |i, c|
|
||||||
|
Box::new(i.chain(c.read_pending_requests())))
|
||||||
|
}
|
||||||
|
|
||||||
|
// in current implementation all publish methods are independent of actual contract adddress
|
||||||
|
// (tx is sent to origin) => we do not care which contract to use for publish data in methods below
|
||||||
|
|
||||||
|
fn publish_generated_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public) -> Result<(), String> {
|
||||||
|
self.contracts[0].publish_generated_server_key(origin, server_key_id, server_key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_server_key_generation_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
||||||
|
self.contracts[0].publish_server_key_generation_error(origin, server_key_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_retrieved_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public, threshold: usize) -> Result<(), String> {
|
||||||
|
self.contracts[0].publish_retrieved_server_key(origin, server_key_id, server_key, threshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_server_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
||||||
|
self.contracts[0].publish_server_key_retrieval_error(origin, server_key_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_stored_document_key(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
||||||
|
self.contracts[0].publish_stored_document_key(origin, server_key_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_document_key_store_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
||||||
|
self.contracts[0].publish_document_key_store_error(origin, server_key_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_retrieved_document_key_common(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, common_point: Public, threshold: usize) -> Result<(), String> {
|
||||||
|
self.contracts[0].publish_retrieved_document_key_common(origin, server_key_id, requester, common_point, threshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_retrieved_document_key_personal(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, participants: &[Address], decrypted_secret: Public, shadow: Bytes) -> Result<(), String> {
|
||||||
|
self.contracts[0].publish_retrieved_document_key_personal(origin, server_key_id, requester, participants, decrypted_secret, shadow)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_document_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address) -> Result<(), String> {
|
||||||
|
self.contracts[0].publish_document_key_retrieval_error(origin, server_key_id, requester)
|
||||||
|
}
|
||||||
|
}
|
@ -20,16 +20,20 @@ use std::sync::atomic::{AtomicUsize, Ordering};
|
|||||||
use std::thread;
|
use std::thread;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use ethcore::client::ChainNotify;
|
use ethcore::client::ChainNotify;
|
||||||
use ethkey::{Random, Generator, Public, sign};
|
use ethkey::{Public, public_to_address};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use ethereum_types::{H256, U256};
|
use ethereum_types::{H256, U256, Address};
|
||||||
use key_server_set::KeyServerSet;
|
use key_server_set::KeyServerSet;
|
||||||
use key_server_cluster::{ClusterClient, ClusterSessionsListener, ClusterSession};
|
use key_server_cluster::{ClusterClient, ClusterSessionsListener, ClusterSession};
|
||||||
|
use key_server_cluster::math;
|
||||||
use key_server_cluster::generation_session::SessionImpl as GenerationSession;
|
use key_server_cluster::generation_session::SessionImpl as GenerationSession;
|
||||||
|
use key_server_cluster::encryption_session::{check_encrypted_data, update_encrypted_data};
|
||||||
|
use key_server_cluster::decryption_session::SessionImpl as DecryptionSession;
|
||||||
use key_storage::KeyStorage;
|
use key_storage::KeyStorage;
|
||||||
|
use acl_storage::AclStorage;
|
||||||
use listener::service_contract::ServiceContract;
|
use listener::service_contract::ServiceContract;
|
||||||
use listener::tasks_queue::TasksQueue;
|
use listener::tasks_queue::TasksQueue;
|
||||||
use {ServerKeyId, NodeKeyPair, KeyServer};
|
use {ServerKeyId, NodeKeyPair, Error};
|
||||||
|
|
||||||
/// Retry interval (in blocks). Every RETRY_INTERVAL_BLOCKS blocks each KeyServer reads pending requests from
|
/// Retry interval (in blocks). Every RETRY_INTERVAL_BLOCKS blocks each KeyServer reads pending requests from
|
||||||
/// service contract && tries to re-execute. The reason to have this mechanism is primarily because keys
|
/// service contract && tries to re-execute. The reason to have this mechanism is primarily because keys
|
||||||
@ -56,12 +60,12 @@ pub struct ServiceContractListener {
|
|||||||
pub struct ServiceContractListenerParams {
|
pub struct ServiceContractListenerParams {
|
||||||
/// Service contract.
|
/// Service contract.
|
||||||
pub contract: Arc<ServiceContract>,
|
pub contract: Arc<ServiceContract>,
|
||||||
/// Key server reference.
|
|
||||||
pub key_server: Arc<KeyServer>,
|
|
||||||
/// This node key pair.
|
/// This node key pair.
|
||||||
pub self_key_pair: Arc<NodeKeyPair>,
|
pub self_key_pair: Arc<NodeKeyPair>,
|
||||||
/// Key servers set.
|
/// Key servers set.
|
||||||
pub key_server_set: Arc<KeyServerSet>,
|
pub key_server_set: Arc<KeyServerSet>,
|
||||||
|
/// ACL storage reference.
|
||||||
|
pub acl_storage: Arc<AclStorage>,
|
||||||
/// Cluster reference.
|
/// Cluster reference.
|
||||||
pub cluster: Arc<ClusterClient>,
|
pub cluster: Arc<ClusterClient>,
|
||||||
/// Key storage reference.
|
/// Key storage reference.
|
||||||
@ -78,8 +82,10 @@ struct ServiceContractListenerData {
|
|||||||
pub tasks_queue: Arc<TasksQueue<ServiceTask>>,
|
pub tasks_queue: Arc<TasksQueue<ServiceTask>>,
|
||||||
/// Service contract.
|
/// Service contract.
|
||||||
pub contract: Arc<ServiceContract>,
|
pub contract: Arc<ServiceContract>,
|
||||||
/// Key server reference.
|
/// ACL storage reference.
|
||||||
pub key_server: Arc<KeyServer>,
|
pub acl_storage: Arc<AclStorage>,
|
||||||
|
/// Cluster client reference.
|
||||||
|
pub cluster: Arc<ClusterClient>,
|
||||||
/// This node key pair.
|
/// This node key pair.
|
||||||
pub self_key_pair: Arc<NodeKeyPair>,
|
pub self_key_pair: Arc<NodeKeyPair>,
|
||||||
/// Key servers set.
|
/// Key servers set.
|
||||||
@ -92,8 +98,10 @@ struct ServiceContractListenerData {
|
|||||||
/// Retry-related data.
|
/// Retry-related data.
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct ServiceContractRetryData {
|
struct ServiceContractRetryData {
|
||||||
/// Server keys, which we have generated (or tried to generate) since last retry moment.
|
/// Server keys, which we have 'touched' since last retry.
|
||||||
pub generated_keys: HashSet<ServerKeyId>,
|
pub affected_server_keys: HashSet<ServerKeyId>,
|
||||||
|
/// Document keys + requesters, which we have 'touched' since last retry.
|
||||||
|
pub affected_document_keys: HashSet<(ServerKeyId, Address)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Service task.
|
/// Service task.
|
||||||
@ -101,23 +109,30 @@ struct ServiceContractRetryData {
|
|||||||
pub enum ServiceTask {
|
pub enum ServiceTask {
|
||||||
/// Retry all 'stalled' tasks.
|
/// Retry all 'stalled' tasks.
|
||||||
Retry,
|
Retry,
|
||||||
/// Generate server key (server_key_id, threshold).
|
/// Generate server key (origin, server_key_id, author, threshold).
|
||||||
GenerateServerKey(H256, H256),
|
GenerateServerKey(Address, ServerKeyId, Address, usize),
|
||||||
/// Confirm server key (server_key_id).
|
/// Retrieve server key (origin, server_key_id).
|
||||||
RestoreServerKey(H256),
|
RetrieveServerKey(Address, ServerKeyId),
|
||||||
|
/// Store document key (origin, server_key_id, author, common_point, encrypted_point).
|
||||||
|
StoreDocumentKey(Address, ServerKeyId, Address, Public, Public),
|
||||||
|
/// Retrieve common data of document key (origin, server_key_id, requester).
|
||||||
|
RetrieveShadowDocumentKeyCommon(Address, ServerKeyId, Address),
|
||||||
|
/// Retrieve personal data of document key (origin, server_key_id, requester).
|
||||||
|
RetrieveShadowDocumentKeyPersonal(Address, ServerKeyId, Public),
|
||||||
/// Shutdown listener.
|
/// Shutdown listener.
|
||||||
Shutdown,
|
Shutdown,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ServiceContractListener {
|
impl ServiceContractListener {
|
||||||
/// Create new service contract listener.
|
/// Create new service contract listener.
|
||||||
pub fn new(params: ServiceContractListenerParams) -> Arc<ServiceContractListener> {
|
pub fn new(params: ServiceContractListenerParams) -> Result<Arc<ServiceContractListener>, Error> {
|
||||||
let data = Arc::new(ServiceContractListenerData {
|
let data = Arc::new(ServiceContractListenerData {
|
||||||
last_retry: AtomicUsize::new(0),
|
last_retry: AtomicUsize::new(0),
|
||||||
retry_data: Default::default(),
|
retry_data: Default::default(),
|
||||||
tasks_queue: Arc::new(TasksQueue::new()),
|
tasks_queue: Arc::new(TasksQueue::new()),
|
||||||
contract: params.contract,
|
contract: params.contract,
|
||||||
key_server: params.key_server,
|
acl_storage: params.acl_storage,
|
||||||
|
cluster: params.cluster,
|
||||||
self_key_pair: params.self_key_pair,
|
self_key_pair: params.self_key_pair,
|
||||||
key_server_set: params.key_server_set,
|
key_server_set: params.key_server_set,
|
||||||
key_storage: params.key_storage,
|
key_storage: params.key_storage,
|
||||||
@ -129,39 +144,55 @@ impl ServiceContractListener {
|
|||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
let service_thread_data = data.clone();
|
let service_thread_data = data.clone();
|
||||||
Some(thread::spawn(move || Self::run_service_thread(service_thread_data)))
|
Some(thread::Builder::new().name("ServiceContractListener".into()).spawn(move ||
|
||||||
|
Self::run_service_thread(service_thread_data)).map_err(|e| Error::Internal(format!("{}", e)))?)
|
||||||
};
|
};
|
||||||
let contract = Arc::new(ServiceContractListener {
|
let contract = Arc::new(ServiceContractListener {
|
||||||
data: data,
|
data: data,
|
||||||
service_handle: service_handle,
|
service_handle: service_handle,
|
||||||
});
|
});
|
||||||
params.cluster.add_generation_listener(contract.clone());
|
contract.data.cluster.add_generation_listener(contract.clone());
|
||||||
contract
|
contract.data.cluster.add_decryption_listener(contract.clone());
|
||||||
|
Ok(contract)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process incoming events of service contract.
|
/// Process incoming events of service contract.
|
||||||
fn process_service_contract_events(&self) {
|
fn process_service_contract_events(&self) {
|
||||||
self.data.tasks_queue.push_many(self.data.contract.read_logs()
|
self.data.tasks_queue.push_many(self.data.contract.read_logs()
|
||||||
.filter_map(|topics| match topics.len() {
|
.filter_map(|task| Self::filter_task(&self.data, task)));
|
||||||
// when key is already generated && we have this key
|
}
|
||||||
3 if self.data.key_storage.get(&topics[1]).map(|k| k.is_some()).unwrap_or_default() => {
|
|
||||||
Some(ServiceTask::RestoreServerKey(
|
/// Filter service task. Only returns Some if task must be executed by this server.
|
||||||
topics[1],
|
fn filter_task(data: &Arc<ServiceContractListenerData>, task: ServiceTask) -> Option<ServiceTask> {
|
||||||
))
|
match task {
|
||||||
}
|
// when this node should be master of this server key generation session
|
||||||
// when key is not yet generated && this node should be master of this key generation session
|
ServiceTask::GenerateServerKey(origin, server_key_id, author, threshold) if is_processed_by_this_key_server(
|
||||||
3 if is_processed_by_this_key_server(&*self.data.key_server_set, &*self.data.self_key_pair, &topics[1]) => {
|
&*data.key_server_set, &*data.self_key_pair, &server_key_id) =>
|
||||||
Some(ServiceTask::GenerateServerKey(
|
Some(ServiceTask::GenerateServerKey(origin, server_key_id, author, threshold)),
|
||||||
topics[1],
|
// when server key is not yet generated and generation must be initiated by other node
|
||||||
topics[2],
|
ServiceTask::GenerateServerKey(_, _, _, _) => None,
|
||||||
))
|
|
||||||
},
|
// when server key retrieval is requested
|
||||||
3 => None,
|
ServiceTask::RetrieveServerKey(origin, server_key_id) =>
|
||||||
l @ _ => {
|
Some(ServiceTask::RetrieveServerKey(origin, server_key_id)),
|
||||||
warn!(target: "secretstore", "Ignoring ServerKeyRequested event with wrong number of params {}", l);
|
|
||||||
None
|
// when document key store is requested
|
||||||
},
|
ServiceTask::StoreDocumentKey(origin, server_key_id, author, common_point, encrypted_point) =>
|
||||||
}));
|
Some(ServiceTask::StoreDocumentKey(origin, server_key_id, author, common_point, encrypted_point)),
|
||||||
|
|
||||||
|
// when common document key data retrieval is requested
|
||||||
|
ServiceTask::RetrieveShadowDocumentKeyCommon(origin, server_key_id, requester) =>
|
||||||
|
Some(ServiceTask::RetrieveShadowDocumentKeyCommon(origin, server_key_id, requester)),
|
||||||
|
|
||||||
|
// when this node should be master of this document key decryption session
|
||||||
|
ServiceTask::RetrieveShadowDocumentKeyPersonal(origin, server_key_id, requester) if is_processed_by_this_key_server(
|
||||||
|
&*data.key_server_set, &*data.self_key_pair, &server_key_id) =>
|
||||||
|
Some(ServiceTask::RetrieveShadowDocumentKeyPersonal(origin, server_key_id, requester)),
|
||||||
|
// when server key is not yet generated and generation must be initiated by other node
|
||||||
|
ServiceTask::RetrieveShadowDocumentKeyPersonal(_, _, _) => None,
|
||||||
|
|
||||||
|
ServiceTask::Retry | ServiceTask::Shutdown => unreachable!("must be filtered outside"),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Service thread procedure.
|
/// Service thread procedure.
|
||||||
@ -172,18 +203,45 @@ impl ServiceContractListener {
|
|||||||
|
|
||||||
match task {
|
match task {
|
||||||
ServiceTask::Shutdown => break,
|
ServiceTask::Shutdown => break,
|
||||||
task @ _ => {
|
task => {
|
||||||
// the only possible reaction to an error is a trace && it is already happened
|
// the only possible reaction to an error is a tx+trace && it is already happened
|
||||||
let _ = Self::process_service_task(&data, task);
|
let _ = Self::process_service_task(&data, task);
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
trace!(target: "secretstore_net", "{}: ServiceContractListener thread stopped", data.self_key_pair.public());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process single service task.
|
/// Process single service task.
|
||||||
fn process_service_task(data: &Arc<ServiceContractListenerData>, task: ServiceTask) -> Result<(), String> {
|
fn process_service_task(data: &Arc<ServiceContractListenerData>, task: ServiceTask) -> Result<(), String> {
|
||||||
match task {
|
match &task {
|
||||||
ServiceTask::Retry =>
|
&ServiceTask::GenerateServerKey(origin, server_key_id, author, threshold) => {
|
||||||
|
data.retry_data.lock().affected_server_keys.insert(server_key_id.clone());
|
||||||
|
log_service_task_result(&task, data.self_key_pair.public(),
|
||||||
|
Self::generate_server_key(&data, origin, &server_key_id, author, threshold))
|
||||||
|
},
|
||||||
|
&ServiceTask::RetrieveServerKey(origin, server_key_id) => {
|
||||||
|
data.retry_data.lock().affected_server_keys.insert(server_key_id.clone());
|
||||||
|
log_service_task_result(&task, data.self_key_pair.public(),
|
||||||
|
Self::retrieve_server_key(&data, origin, &server_key_id))
|
||||||
|
},
|
||||||
|
&ServiceTask::StoreDocumentKey(origin, server_key_id, author, common_point, encrypted_point) => {
|
||||||
|
data.retry_data.lock().affected_document_keys.insert((server_key_id.clone(), author.clone()));
|
||||||
|
log_service_task_result(&task, data.self_key_pair.public(),
|
||||||
|
Self::store_document_key(&data, origin, &server_key_id, &author, &common_point, &encrypted_point))
|
||||||
|
},
|
||||||
|
&ServiceTask::RetrieveShadowDocumentKeyCommon(origin, server_key_id, requester) => {
|
||||||
|
data.retry_data.lock().affected_document_keys.insert((server_key_id.clone(), requester.clone()));
|
||||||
|
log_service_task_result(&task, data.self_key_pair.public(),
|
||||||
|
Self::retrieve_document_key_common(&data, origin, &server_key_id, &requester))
|
||||||
|
},
|
||||||
|
&ServiceTask::RetrieveShadowDocumentKeyPersonal(origin, server_key_id, requester) => {
|
||||||
|
data.retry_data.lock().affected_server_keys.insert(server_key_id.clone());
|
||||||
|
log_service_task_result(&task, data.self_key_pair.public(),
|
||||||
|
Self::retrieve_document_key_personal(&data, origin, &server_key_id, requester))
|
||||||
|
},
|
||||||
|
&ServiceTask::Retry => {
|
||||||
Self::retry_pending_requests(&data)
|
Self::retry_pending_requests(&data)
|
||||||
.map(|processed_requests| {
|
.map(|processed_requests| {
|
||||||
if processed_requests != 0 {
|
if processed_requests != 0 {
|
||||||
@ -196,38 +254,9 @@ impl ServiceContractListener {
|
|||||||
warn!(target: "secretstore", "{}: retrying pending requests has failed with: {}",
|
warn!(target: "secretstore", "{}: retrying pending requests has failed with: {}",
|
||||||
data.self_key_pair.public(), error);
|
data.self_key_pair.public(), error);
|
||||||
error
|
error
|
||||||
}),
|
|
||||||
ServiceTask::RestoreServerKey(server_key_id) => {
|
|
||||||
data.retry_data.lock().generated_keys.insert(server_key_id.clone());
|
|
||||||
Self::restore_server_key(&data, &server_key_id)
|
|
||||||
.and_then(|server_key| Self::publish_server_key(&data, &server_key_id, &server_key))
|
|
||||||
.map(|_| {
|
|
||||||
trace!(target: "secretstore", "{}: processed RestoreServerKey({}) request",
|
|
||||||
data.self_key_pair.public(), server_key_id);
|
|
||||||
()
|
|
||||||
})
|
|
||||||
.map_err(|error| {
|
|
||||||
warn!(target: "secretstore", "{}: failed to process RestoreServerKey({}) request with: {}",
|
|
||||||
data.self_key_pair.public(), server_key_id, error);
|
|
||||||
error
|
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
ServiceTask::GenerateServerKey(server_key_id, threshold) => {
|
&ServiceTask::Shutdown => unreachable!("must be filtered outside"),
|
||||||
data.retry_data.lock().generated_keys.insert(server_key_id.clone());
|
|
||||||
Self::generate_server_key(&data, &server_key_id, &threshold)
|
|
||||||
.and_then(|server_key| Self::publish_server_key(&data, &server_key_id, &server_key))
|
|
||||||
.map(|_| {
|
|
||||||
trace!(target: "secretstore", "{}: processed GenerateServerKey({}, {}) request",
|
|
||||||
data.self_key_pair.public(), server_key_id, threshold);
|
|
||||||
()
|
|
||||||
})
|
|
||||||
.map_err(|error| {
|
|
||||||
warn!(target: "secretstore", "{}: failed to process GenerateServerKey({}, {}) request with: {}",
|
|
||||||
data.self_key_pair.public(), server_key_id, threshold, error);
|
|
||||||
error
|
|
||||||
})
|
|
||||||
},
|
|
||||||
ServiceTask::Shutdown => unreachable!("it must be checked outside"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -236,32 +265,28 @@ impl ServiceContractListener {
|
|||||||
let mut failed_requests = 0;
|
let mut failed_requests = 0;
|
||||||
let mut processed_requests = 0;
|
let mut processed_requests = 0;
|
||||||
let retry_data = ::std::mem::replace(&mut *data.retry_data.lock(), Default::default());
|
let retry_data = ::std::mem::replace(&mut *data.retry_data.lock(), Default::default());
|
||||||
for (is_confirmed, task) in data.contract.read_pending_requests() {
|
let pending_tasks = data.contract.read_pending_requests()
|
||||||
|
.filter_map(|(is_confirmed, task)| Self::filter_task(data, task)
|
||||||
|
.map(|t| (is_confirmed, t)));
|
||||||
|
for (is_confirmed, task) in pending_tasks {
|
||||||
// only process requests, which we haven't confirmed yet
|
// only process requests, which we haven't confirmed yet
|
||||||
if is_confirmed {
|
if is_confirmed {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let request_result = match task {
|
match task {
|
||||||
ServiceTask::GenerateServerKey(server_key_id, threshold) => {
|
ServiceTask::GenerateServerKey(_, ref key, _, _) | ServiceTask::RetrieveServerKey(_, ref key)
|
||||||
// only process request, which haven't been processed recently
|
if retry_data.affected_server_keys.contains(key) => continue,
|
||||||
// there could be a lag when we've just generated server key && retrying on the same block
|
ServiceTask::StoreDocumentKey(_, ref key, ref author, _, _) |
|
||||||
// (or before our tx is mined) - state is not updated yet
|
ServiceTask::RetrieveShadowDocumentKeyCommon(_, ref key, ref author)
|
||||||
if retry_data.generated_keys.contains(&server_key_id) {
|
if retry_data.affected_document_keys.contains(&(key.clone(), author.clone())) => continue,
|
||||||
continue;
|
ServiceTask::RetrieveShadowDocumentKeyPersonal(_, ref key, ref requester)
|
||||||
}
|
if retry_data.affected_document_keys.contains(&(key.clone(), public_to_address(requester))) => continue,
|
||||||
|
_ => (),
|
||||||
// process request
|
}
|
||||||
let is_own_request = is_processed_by_this_key_server(&*data.key_server_set, &*data.self_key_pair, &server_key_id);
|
|
||||||
Self::process_service_task(data, match is_own_request {
|
|
||||||
true => ServiceTask::GenerateServerKey(server_key_id, threshold.into()),
|
|
||||||
false => ServiceTask::RestoreServerKey(server_key_id),
|
|
||||||
})
|
|
||||||
},
|
|
||||||
_ => Err("not supported".into()),
|
|
||||||
};
|
|
||||||
|
|
||||||
// process request result
|
// process request result
|
||||||
|
let request_result = Self::process_service_task(data, task);
|
||||||
match request_result {
|
match request_result {
|
||||||
Ok(_) => processed_requests += 1,
|
Ok(_) => processed_requests += 1,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
@ -276,33 +301,119 @@ impl ServiceContractListener {
|
|||||||
Ok(processed_requests)
|
Ok(processed_requests)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generate server key.
|
/// Generate server key (start generation session).
|
||||||
fn generate_server_key(data: &Arc<ServiceContractListenerData>, server_key_id: &ServerKeyId, threshold: &H256) -> Result<Public, String> {
|
fn generate_server_key(data: &Arc<ServiceContractListenerData>, origin: Address, server_key_id: &ServerKeyId, author: Address, threshold: usize) -> Result<(), String> {
|
||||||
let threshold_num = threshold.low_u64();
|
Self::process_server_key_generation_result(data, origin, server_key_id, data.cluster.new_generation_session(
|
||||||
if threshold != &threshold_num.into() || threshold_num >= ::std::usize::MAX as u64 {
|
server_key_id.clone(), Some(origin), author, threshold).map(|_| None).map_err(Into::into))
|
||||||
return Err(format!("invalid threshold {:?}", threshold));
|
}
|
||||||
|
|
||||||
|
/// Process server key generation result.
|
||||||
|
fn process_server_key_generation_result(data: &Arc<ServiceContractListenerData>, origin: Address, server_key_id: &ServerKeyId, result: Result<Option<Public>, Error>) -> Result<(), String> {
|
||||||
|
match result {
|
||||||
|
Ok(None) => Ok(()),
|
||||||
|
Ok(Some(server_key)) => {
|
||||||
|
data.contract.publish_generated_server_key(&origin, server_key_id, server_key)
|
||||||
|
},
|
||||||
|
Err(ref error) if is_internal_error(error) => Err(format!("{}", error)),
|
||||||
|
Err(ref error) => {
|
||||||
|
// ignore error as we're already processing an error
|
||||||
|
let _ = data.contract.publish_server_key_generation_error(&origin, server_key_id)
|
||||||
|
.map_err(|error| warn!(target: "secretstore", "{}: failed to publish GenerateServerKey({}) error: {}",
|
||||||
|
data.self_key_pair.public(), server_key_id, error));
|
||||||
|
Err(format!("{}", error))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// key server expects signed server_key_id in server_key_generation procedure
|
|
||||||
// only signer could store document key for this server key later
|
|
||||||
// => this API (server key generation) is not suitable for usage in encryption via contract endpoint
|
|
||||||
let author_key = Random.generate().map_err(|e| format!("{}", e))?;
|
|
||||||
let server_key_id_signature = sign(author_key.secret(), server_key_id).map_err(|e| format!("{}", e))?;
|
|
||||||
data.key_server.generate_key(server_key_id, &server_key_id_signature, threshold_num as usize)
|
|
||||||
.map_err(Into::into)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Restore server key.
|
/// Retrieve server key.
|
||||||
fn restore_server_key(data: &Arc<ServiceContractListenerData>, server_key_id: &ServerKeyId) -> Result<Public, String> {
|
fn retrieve_server_key(data: &Arc<ServiceContractListenerData>, origin: Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
||||||
data.key_storage.get(server_key_id)
|
match data.key_storage.get(server_key_id) {
|
||||||
.map_err(|e| format!("{}", e))
|
Ok(Some(server_key_share)) => {
|
||||||
.and_then(|ks| ks.ok_or("missing key".to_owned()))
|
data.contract.publish_retrieved_server_key(&origin, server_key_id, server_key_share.public, server_key_share.threshold)
|
||||||
.map(|ks| ks.public)
|
},
|
||||||
|
Ok(None) => {
|
||||||
|
data.contract.publish_server_key_retrieval_error(&origin, server_key_id)
|
||||||
|
}
|
||||||
|
Err(ref error) if is_internal_error(error) => Err(format!("{}", error)),
|
||||||
|
Err(ref error) => {
|
||||||
|
// ignore error as we're already processing an error
|
||||||
|
let _ = data.contract.publish_server_key_retrieval_error(&origin, server_key_id)
|
||||||
|
.map_err(|error| warn!(target: "secretstore", "{}: failed to publish RetrieveServerKey({}) error: {}",
|
||||||
|
data.self_key_pair.public(), server_key_id, error));
|
||||||
|
Err(format!("{}", error))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Publish server key.
|
/// Store document key.
|
||||||
fn publish_server_key(data: &Arc<ServiceContractListenerData>, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> {
|
fn store_document_key(data: &Arc<ServiceContractListenerData>, origin: Address, server_key_id: &ServerKeyId, author: &Address, common_point: &Public, encrypted_point: &Public) -> Result<(), String> {
|
||||||
data.contract.publish_server_key(server_key_id, server_key)
|
let store_result = data.key_storage.get(server_key_id)
|
||||||
|
.and_then(|key_share| key_share.ok_or(Error::DocumentNotFound))
|
||||||
|
.and_then(|key_share| check_encrypted_data(Some(&key_share)).map(|_| key_share).map_err(Into::into))
|
||||||
|
.and_then(|key_share| update_encrypted_data(&data.key_storage, server_key_id.clone(), key_share,
|
||||||
|
author.clone(), common_point.clone(), encrypted_point.clone()).map_err(Into::into));
|
||||||
|
match store_result {
|
||||||
|
Ok(()) => {
|
||||||
|
data.contract.publish_stored_document_key(&origin, server_key_id)
|
||||||
|
},
|
||||||
|
Err(ref error) if is_internal_error(&error) => Err(format!("{}", error)),
|
||||||
|
Err(ref error) => {
|
||||||
|
// ignore error as we're already processing an error
|
||||||
|
let _ = data.contract.publish_document_key_store_error(&origin, server_key_id)
|
||||||
|
.map_err(|error| warn!(target: "secretstore", "{}: failed to publish StoreDocumentKey({}) error: {}",
|
||||||
|
data.self_key_pair.public(), server_key_id, error));
|
||||||
|
Err(format!("{}", error))
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Retrieve common part of document key.
|
||||||
|
fn retrieve_document_key_common(data: &Arc<ServiceContractListenerData>, origin: Address, server_key_id: &ServerKeyId, requester: &Address) -> Result<(), String> {
|
||||||
|
let retrieval_result = data.acl_storage.check(requester.clone(), server_key_id)
|
||||||
|
.and_then(|is_allowed| if !is_allowed { Err(Error::AccessDenied) } else { Ok(()) })
|
||||||
|
.and_then(|_| data.key_storage.get(server_key_id).and_then(|key_share| key_share.ok_or(Error::DocumentNotFound)))
|
||||||
|
.and_then(|key_share| key_share.common_point
|
||||||
|
.ok_or(Error::DocumentNotFound)
|
||||||
|
.and_then(|common_point| math::make_common_shadow_point(key_share.threshold, common_point)
|
||||||
|
.map_err(|e| Error::Internal(e.into())))
|
||||||
|
.map(|common_point| (common_point, key_share.threshold)));
|
||||||
|
match retrieval_result {
|
||||||
|
Ok((common_point, threshold)) => {
|
||||||
|
data.contract.publish_retrieved_document_key_common(&origin, server_key_id, requester, common_point, threshold)
|
||||||
|
},
|
||||||
|
Err(ref error) if is_internal_error(&error) => Err(format!("{}", error)),
|
||||||
|
Err(ref error) => {
|
||||||
|
// ignore error as we're already processing an error
|
||||||
|
let _ = data.contract.publish_document_key_retrieval_error(&origin, server_key_id, requester)
|
||||||
|
.map_err(|error| warn!(target: "secretstore", "{}: failed to publish RetrieveDocumentKey({}) error: {}",
|
||||||
|
data.self_key_pair.public(), server_key_id, error));
|
||||||
|
Err(format!("{}", error))
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Retrieve personal part of document key (start decryption session).
|
||||||
|
fn retrieve_document_key_personal(data: &Arc<ServiceContractListenerData>, origin: Address, server_key_id: &ServerKeyId, requester: Public) -> Result<(), String> {
|
||||||
|
Self::process_document_key_retrieval_result(data, origin, server_key_id, &public_to_address(&requester), data.cluster.new_decryption_session(
|
||||||
|
server_key_id.clone(), Some(origin), requester.clone().into(), None, true, true).map(|_| None).map_err(Into::into))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process document key retrieval result.
|
||||||
|
fn process_document_key_retrieval_result(data: &Arc<ServiceContractListenerData>, origin: Address, server_key_id: &ServerKeyId, requester: &Address, result: Result<Option<(Vec<Address>, Public, Bytes)>, Error>) -> Result<(), String> {
|
||||||
|
match result {
|
||||||
|
Ok(None) => Ok(()),
|
||||||
|
Ok(Some((participants, decrypted_secret, shadow))) => {
|
||||||
|
data.contract.publish_retrieved_document_key_personal(&origin, server_key_id, &requester, &participants, decrypted_secret, shadow)
|
||||||
|
},
|
||||||
|
Err(ref error) if is_internal_error(error) => Err(format!("{}", error)),
|
||||||
|
Err(ref error) => {
|
||||||
|
// ignore error as we're already processing an error
|
||||||
|
let _ = data.contract.publish_document_key_retrieval_error(&origin, server_key_id, &requester)
|
||||||
|
.map_err(|error| warn!(target: "secretstore", "{}: failed to publish RetrieveDocumentKey({}) error: {}",
|
||||||
|
data.self_key_pair.public(), server_key_id, error));
|
||||||
|
Err(format!("{}", error))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -340,25 +451,84 @@ impl ChainNotify for ServiceContractListener {
|
|||||||
|
|
||||||
impl ClusterSessionsListener<GenerationSession> for ServiceContractListener {
|
impl ClusterSessionsListener<GenerationSession> for ServiceContractListener {
|
||||||
fn on_session_removed(&self, session: Arc<GenerationSession>) {
|
fn on_session_removed(&self, session: Arc<GenerationSession>) {
|
||||||
// only publish when the session is started by another node
|
// by this time sesion must already be completed - either successfully, or not
|
||||||
// when it is started by this node, it is published from process_service_task
|
assert!(session.is_finished());
|
||||||
if !is_processed_by_this_key_server(&*self.data.key_server_set, &*self.data.self_key_pair, &session.id()) {
|
|
||||||
// by this time sesion must already be completed - either successfully, or not
|
|
||||||
assert!(session.is_finished());
|
|
||||||
|
|
||||||
// ignore result - the only thing that we can do is to log the error
|
// ignore result - the only thing that we can do is to log the error
|
||||||
match session.wait(Some(Default::default()))
|
let server_key_id = session.id();
|
||||||
.map_err(|e| format!("{}", e))
|
if let Some(origin) = session.origin() {
|
||||||
.and_then(|server_key| Self::publish_server_key(&self.data, &session.id(), &server_key)) {
|
if let Some(generation_result) = session.wait(Some(Default::default())) {
|
||||||
Ok(_) => trace!(target: "secretstore", "{}: completed foreign GenerateServerKey({}) request",
|
let generation_result = generation_result.map(Some).map_err(Into::into);
|
||||||
self.data.self_key_pair.public(), session.id()),
|
let _ = Self::process_server_key_generation_result(&self.data, origin, &server_key_id, generation_result);
|
||||||
Err(error) => warn!(target: "secretstore", "{}: failed to process GenerateServerKey({}) request with: {}",
|
|
||||||
self.data.self_key_pair.public(), session.id(), error),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl ClusterSessionsListener<DecryptionSession> for ServiceContractListener {
|
||||||
|
fn on_session_removed(&self, session: Arc<DecryptionSession>) {
|
||||||
|
// by this time sesion must already be completed - either successfully, or not
|
||||||
|
assert!(session.is_finished());
|
||||||
|
|
||||||
|
// ignore result - the only thing that we can do is to log the error
|
||||||
|
let session_id = session.id();
|
||||||
|
let server_key_id = session_id.id;
|
||||||
|
if let (Some(requester), Some(origin)) = (session.requester().and_then(|r| r.address(&server_key_id).ok()), session.origin()) {
|
||||||
|
if let Some(retrieval_result) = session.wait(Some(Default::default())) {
|
||||||
|
let retrieval_result = retrieval_result.map(|key_shadow|
|
||||||
|
session.broadcast_shadows()
|
||||||
|
.and_then(|broadcast_shadows|
|
||||||
|
broadcast_shadows.get(self.data.self_key_pair.public())
|
||||||
|
.map(|self_shadow| (
|
||||||
|
broadcast_shadows.keys().map(public_to_address).collect(),
|
||||||
|
key_shadow.decrypted_secret,
|
||||||
|
self_shadow.clone()
|
||||||
|
)))
|
||||||
|
).map_err(Into::into);
|
||||||
|
let _ = Self::process_document_key_retrieval_result(&self.data, origin, &server_key_id, &requester, retrieval_result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ::std::fmt::Display for ServiceTask {
|
||||||
|
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||||
|
match *self {
|
||||||
|
ServiceTask::Retry => write!(f, "Retry"),
|
||||||
|
ServiceTask::GenerateServerKey(_, ref server_key_id, ref author, ref threshold) =>
|
||||||
|
write!(f, "GenerateServerKey({}, {}, {})", server_key_id, author, threshold),
|
||||||
|
ServiceTask::RetrieveServerKey(_, ref server_key_id) =>
|
||||||
|
write!(f, "RetrieveServerKey({})", server_key_id),
|
||||||
|
ServiceTask::StoreDocumentKey(_, ref server_key_id, ref author, _, _) =>
|
||||||
|
write!(f, "StoreDocumentKey({}, {})", server_key_id, author),
|
||||||
|
ServiceTask::RetrieveShadowDocumentKeyCommon(_, ref server_key_id, ref requester) =>
|
||||||
|
write!(f, "RetrieveShadowDocumentKeyCommon({}, {})", server_key_id, requester),
|
||||||
|
ServiceTask::RetrieveShadowDocumentKeyPersonal(_, ref server_key_id, ref requester) =>
|
||||||
|
write!(f, "RetrieveShadowDocumentKeyPersonal({}, {})", server_key_id, public_to_address(requester)),
|
||||||
|
ServiceTask::Shutdown => write!(f, "Shutdown"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Is internal error? Internal error means that it is SS who's responsible for it, like: connectivity, db failure, ...
|
||||||
|
/// External error is caused by SS misuse, like: trying to generate duplicated key, access denied, ...
|
||||||
|
/// When internal error occurs, we just ignore request for now and will retry later.
|
||||||
|
/// When external error occurs, we reject request.
|
||||||
|
fn is_internal_error(_error: &Error) -> bool {
|
||||||
|
// TODO [Reliability]: implement me after proper is passed through network
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Log service task result.
|
||||||
|
fn log_service_task_result(task: &ServiceTask, self_id: &Public, result: Result<(), String>) -> Result<(), String> {
|
||||||
|
match result {
|
||||||
|
Ok(_) => trace!(target: "secretstore", "{}: processed {} request", self_id, task),
|
||||||
|
Err(ref error) => warn!(target: "secretstore", "{}: failed to process {} request with: {}", self_id, task, error),
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns true when session, related to `server_key_id` must be started on this KeyServer.
|
/// Returns true when session, related to `server_key_id` must be started on this KeyServer.
|
||||||
fn is_processed_by_this_key_server(key_server_set: &KeyServerSet, self_key_pair: &NodeKeyPair, server_key_id: &H256) -> bool {
|
fn is_processed_by_this_key_server(key_server_set: &KeyServerSet, self_key_pair: &NodeKeyPair, server_key_id: &H256) -> bool {
|
||||||
let servers = key_server_set.snapshot().current_set;
|
let servers = key_server_set.snapshot().current_set;
|
||||||
@ -390,17 +560,31 @@ mod tests {
|
|||||||
use listener::service_contract::ServiceContract;
|
use listener::service_contract::ServiceContract;
|
||||||
use listener::service_contract::tests::DummyServiceContract;
|
use listener::service_contract::tests::DummyServiceContract;
|
||||||
use key_server_cluster::DummyClusterClient;
|
use key_server_cluster::DummyClusterClient;
|
||||||
use key_server::tests::DummyKeyServer;
|
use acl_storage::{AclStorage, DummyAclStorage};
|
||||||
use key_storage::{KeyStorage, DocumentKeyShare};
|
use key_storage::{KeyStorage, DocumentKeyShare};
|
||||||
use key_storage::tests::DummyKeyStorage;
|
use key_storage::tests::DummyKeyStorage;
|
||||||
use key_server_set::tests::MapKeyServerSet;
|
use key_server_set::tests::MapKeyServerSet;
|
||||||
use PlainNodeKeyPair;
|
use {PlainNodeKeyPair, ServerKeyId};
|
||||||
use super::{ServiceTask, ServiceContractListener, ServiceContractListenerParams, is_processed_by_this_key_server};
|
use super::{ServiceTask, ServiceContractListener, ServiceContractListenerParams, is_processed_by_this_key_server};
|
||||||
|
|
||||||
fn make_service_contract_listener(contract: Option<Arc<ServiceContract>>, key_server: Option<Arc<DummyKeyServer>>, key_storage: Option<Arc<KeyStorage>>) -> Arc<ServiceContractListener> {
|
fn create_non_empty_key_storage(has_doc_key: bool) -> Arc<DummyKeyStorage> {
|
||||||
|
let key_storage = Arc::new(DummyKeyStorage::default());
|
||||||
|
let mut key_share = DocumentKeyShare::default();
|
||||||
|
key_share.public = KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000001"
|
||||||
|
.parse().unwrap()).unwrap().public().clone();
|
||||||
|
if has_doc_key {
|
||||||
|
key_share.common_point = Some(Default::default());
|
||||||
|
key_share.encrypted_point = Some(Default::default());
|
||||||
|
}
|
||||||
|
key_storage.insert(Default::default(), key_share.clone()).unwrap();
|
||||||
|
key_storage
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_service_contract_listener(contract: Option<Arc<ServiceContract>>, cluster: Option<Arc<DummyClusterClient>>, key_storage: Option<Arc<KeyStorage>>, acl_storage: Option<Arc<AclStorage>>) -> Arc<ServiceContractListener> {
|
||||||
let contract = contract.unwrap_or_else(|| Arc::new(DummyServiceContract::default()));
|
let contract = contract.unwrap_or_else(|| Arc::new(DummyServiceContract::default()));
|
||||||
let key_server = key_server.unwrap_or_else(|| Arc::new(DummyKeyServer::default()));
|
let cluster = cluster.unwrap_or_else(|| Arc::new(DummyClusterClient::default()));
|
||||||
let key_storage = key_storage.unwrap_or_else(|| Arc::new(DummyKeyStorage::default()));
|
let key_storage = key_storage.unwrap_or_else(|| Arc::new(DummyKeyStorage::default()));
|
||||||
|
let acl_storage = acl_storage.unwrap_or_else(|| Arc::new(DummyAclStorage::default()));
|
||||||
let servers_set = Arc::new(MapKeyServerSet::new(vec![
|
let servers_set = Arc::new(MapKeyServerSet::new(vec![
|
||||||
("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(),
|
("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(),
|
||||||
"127.0.0.1:8080".parse().unwrap()),
|
"127.0.0.1:8080".parse().unwrap()),
|
||||||
@ -412,12 +596,12 @@ mod tests {
|
|||||||
let self_key_pair = Arc::new(PlainNodeKeyPair::new(KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap()));
|
let self_key_pair = Arc::new(PlainNodeKeyPair::new(KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap()));
|
||||||
ServiceContractListener::new(ServiceContractListenerParams {
|
ServiceContractListener::new(ServiceContractListenerParams {
|
||||||
contract: contract,
|
contract: contract,
|
||||||
key_server: key_server,
|
|
||||||
self_key_pair: self_key_pair,
|
self_key_pair: self_key_pair,
|
||||||
key_server_set: servers_set,
|
key_server_set: servers_set,
|
||||||
cluster: Arc::new(DummyClusterClient::default()),
|
acl_storage: acl_storage,
|
||||||
|
cluster: cluster,
|
||||||
key_storage: key_storage,
|
key_storage: key_storage,
|
||||||
})
|
}).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -576,51 +760,32 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn no_tasks_scheduled_when_no_contract_events() {
|
fn no_tasks_scheduled_when_no_contract_events() {
|
||||||
let listener = make_service_contract_listener(None, None, None);
|
let listener = make_service_contract_listener(None, None, None, None);
|
||||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
listener.process_service_contract_events();
|
listener.process_service_contract_events();
|
||||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// server key generation tests
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn server_key_generation_is_scheduled_when_requested_key_is_unknown() {
|
fn server_key_generation_is_scheduled_when_requested() {
|
||||||
let mut contract = DummyServiceContract::default();
|
let mut contract = DummyServiceContract::default();
|
||||||
contract.logs.push(vec![Default::default(), Default::default(), Default::default()]);
|
contract.logs.push(ServiceTask::GenerateServerKey(Default::default(), Default::default(), Default::default(), 0));
|
||||||
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None);
|
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None);
|
||||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
listener.process_service_contract_events();
|
listener.process_service_contract_events();
|
||||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 2);
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 2);
|
||||||
assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::GenerateServerKey(Default::default(), Default::default())));
|
assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::GenerateServerKey(
|
||||||
|
Default::default(), Default::default(), Default::default(), 0)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn no_new_tasks_scheduled_when_requested_key_is_unknown_and_request_belongs_to_other_key_server() {
|
fn no_new_tasks_scheduled_when_server_key_generation_requested_and_request_belongs_to_other_key_server() {
|
||||||
let server_key_id = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap();
|
let server_key_id = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap();
|
||||||
let mut contract = DummyServiceContract::default();
|
let mut contract = DummyServiceContract::default();
|
||||||
contract.logs.push(vec![Default::default(), server_key_id, Default::default()]);
|
contract.logs.push(ServiceTask::GenerateServerKey(Default::default(), server_key_id, Default::default(), 0));
|
||||||
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None);
|
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None);
|
||||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
|
||||||
listener.process_service_contract_events();
|
|
||||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn server_key_restore_is_scheduled_when_requested_key_is_known() {
|
|
||||||
let mut contract = DummyServiceContract::default();
|
|
||||||
contract.logs.push(vec![Default::default(), Default::default(), Default::default()]);
|
|
||||||
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None);
|
|
||||||
listener.data.key_storage.insert(Default::default(), Default::default()).unwrap();
|
|
||||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
|
||||||
listener.process_service_contract_events();
|
|
||||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 2);
|
|
||||||
assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::RestoreServerKey(Default::default())));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn no_new_tasks_scheduled_when_wrong_number_of_topics_in_log() {
|
|
||||||
let mut contract = DummyServiceContract::default();
|
|
||||||
contract.logs.push(vec![Default::default(), Default::default()]);
|
|
||||||
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None);
|
|
||||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
listener.process_service_contract_events();
|
listener.process_service_contract_events();
|
||||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
@ -628,32 +793,221 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn generation_session_is_created_when_processing_generate_server_key_task() {
|
fn generation_session_is_created_when_processing_generate_server_key_task() {
|
||||||
let key_server = Arc::new(DummyKeyServer::default());
|
let cluster = Arc::new(DummyClusterClient::default());
|
||||||
let listener = make_service_contract_listener(None, Some(key_server.clone()), None);
|
let listener = make_service_contract_listener(None, Some(cluster.clone()), None, None);
|
||||||
ServiceContractListener::process_service_task(&listener.data, ServiceTask::GenerateServerKey(Default::default(), Default::default())).unwrap_err();
|
ServiceContractListener::process_service_task(&listener.data, ServiceTask::GenerateServerKey(
|
||||||
assert_eq!(key_server.generation_requests_count.load(Ordering::Relaxed), 1);
|
Default::default(), Default::default(), Default::default(), Default::default())).unwrap_err();
|
||||||
|
assert_eq!(cluster.generation_requests_count.load(Ordering::Relaxed), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn key_is_read_and_published_when_processing_restore_server_key_task() {
|
fn server_key_generation_is_not_retried_if_tried_in_the_same_cycle() {
|
||||||
let contract = Arc::new(DummyServiceContract::default());
|
|
||||||
let key_storage = Arc::new(DummyKeyStorage::default());
|
|
||||||
let mut key_share = DocumentKeyShare::default();
|
|
||||||
key_share.public = KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap().public().clone();
|
|
||||||
key_storage.insert(Default::default(), key_share.clone()).unwrap();
|
|
||||||
let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage));
|
|
||||||
ServiceContractListener::process_service_task(&listener.data, ServiceTask::RestoreServerKey(Default::default())).unwrap();
|
|
||||||
assert_eq!(*contract.published_keys.lock(), vec![(Default::default(), key_share.public)]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn generation_is_not_retried_if_tried_in_the_same_cycle() {
|
|
||||||
let mut contract = DummyServiceContract::default();
|
let mut contract = DummyServiceContract::default();
|
||||||
contract.pending_requests.push((false, ServiceTask::GenerateServerKey(Default::default(), Default::default())));
|
contract.pending_requests.push((false, ServiceTask::GenerateServerKey(Default::default(),
|
||||||
let key_server = Arc::new(DummyKeyServer::default());
|
Default::default(), Default::default(), Default::default())));
|
||||||
let listener = make_service_contract_listener(Some(Arc::new(contract)), Some(key_server.clone()), None);
|
let cluster = Arc::new(DummyClusterClient::default());
|
||||||
listener.data.retry_data.lock().generated_keys.insert(Default::default());
|
let listener = make_service_contract_listener(Some(Arc::new(contract)), Some(cluster.clone()), None, None);
|
||||||
|
listener.data.retry_data.lock().affected_server_keys.insert(Default::default());
|
||||||
ServiceContractListener::retry_pending_requests(&listener.data).unwrap();
|
ServiceContractListener::retry_pending_requests(&listener.data).unwrap();
|
||||||
assert_eq!(key_server.generation_requests_count.load(Ordering::Relaxed), 0);
|
assert_eq!(cluster.generation_requests_count.load(Ordering::Relaxed), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// server key retrieval tests
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn server_key_retrieval_is_scheduled_when_requested() {
|
||||||
|
let mut contract = DummyServiceContract::default();
|
||||||
|
contract.logs.push(ServiceTask::RetrieveServerKey(Default::default(), Default::default()));
|
||||||
|
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
|
listener.process_service_contract_events();
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 2);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::RetrieveServerKey(
|
||||||
|
Default::default(), Default::default())));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn server_key_retrieval_is_scheduled_when_requested_and_request_belongs_to_other_key_server() {
|
||||||
|
let server_key_id: ServerKeyId = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap();
|
||||||
|
let mut contract = DummyServiceContract::default();
|
||||||
|
contract.logs.push(ServiceTask::RetrieveServerKey(Default::default(), server_key_id.clone()));
|
||||||
|
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
|
listener.process_service_contract_events();
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 2);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::RetrieveServerKey(
|
||||||
|
Default::default(), server_key_id)));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn server_key_is_retrieved_when_processing_retrieve_server_key_task() {
|
||||||
|
let contract = Arc::new(DummyServiceContract::default());
|
||||||
|
let key_storage = create_non_empty_key_storage(false);
|
||||||
|
let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage), None);
|
||||||
|
ServiceContractListener::process_service_task(&listener.data, ServiceTask::RetrieveServerKey(
|
||||||
|
Default::default(), Default::default())).unwrap();
|
||||||
|
assert_eq!(*contract.retrieved_server_keys.lock(), vec![(Default::default(),
|
||||||
|
KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap().public().clone(), 0)]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn server_key_retrieval_failure_is_reported_when_processing_retrieve_server_key_task_and_key_is_unknown() {
|
||||||
|
let contract = Arc::new(DummyServiceContract::default());
|
||||||
|
let listener = make_service_contract_listener(Some(contract.clone()), None, None, None);
|
||||||
|
ServiceContractListener::process_service_task(&listener.data, ServiceTask::RetrieveServerKey(
|
||||||
|
Default::default(), Default::default())).unwrap();
|
||||||
|
assert_eq!(*contract.server_keys_retrieval_failures.lock(), vec![Default::default()]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn server_key_retrieval_is_not_retried_if_tried_in_the_same_cycle() {
|
||||||
|
let mut contract = DummyServiceContract::default();
|
||||||
|
contract.pending_requests.push((false, ServiceTask::RetrieveServerKey(Default::default(), Default::default())));
|
||||||
|
let cluster = Arc::new(DummyClusterClient::default());
|
||||||
|
let listener = make_service_contract_listener(Some(Arc::new(contract)), Some(cluster.clone()), None, None);
|
||||||
|
listener.data.retry_data.lock().affected_server_keys.insert(Default::default());
|
||||||
|
ServiceContractListener::retry_pending_requests(&listener.data).unwrap();
|
||||||
|
assert_eq!(cluster.generation_requests_count.load(Ordering::Relaxed), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// document key store tests
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn document_key_store_is_scheduled_when_requested() {
|
||||||
|
let mut contract = DummyServiceContract::default();
|
||||||
|
contract.logs.push(ServiceTask::StoreDocumentKey(Default::default(), Default::default(),
|
||||||
|
Default::default(), Default::default(), Default::default()));
|
||||||
|
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
|
listener.process_service_contract_events();
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 2);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::StoreDocumentKey(
|
||||||
|
Default::default(), Default::default(), Default::default(), Default::default(), Default::default())));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn document_key_store_is_scheduled_when_requested_and_request_belongs_to_other_key_server() {
|
||||||
|
let server_key_id: ServerKeyId = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap();
|
||||||
|
let mut contract = DummyServiceContract::default();
|
||||||
|
contract.logs.push(ServiceTask::StoreDocumentKey(Default::default(), server_key_id.clone(),
|
||||||
|
Default::default(), Default::default(), Default::default()));
|
||||||
|
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
|
listener.process_service_contract_events();
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 2);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::StoreDocumentKey(
|
||||||
|
Default::default(), server_key_id, Default::default(), Default::default(), Default::default())));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn document_key_is_stored_when_processing_store_document_key_task() {
|
||||||
|
let contract = Arc::new(DummyServiceContract::default());
|
||||||
|
let key_storage = create_non_empty_key_storage(false);
|
||||||
|
let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage.clone()), None);
|
||||||
|
ServiceContractListener::process_service_task(&listener.data, ServiceTask::StoreDocumentKey(
|
||||||
|
Default::default(), Default::default(), Default::default(), Default::default(), Default::default())).unwrap();
|
||||||
|
assert_eq!(*contract.stored_document_keys.lock(), vec![Default::default()]);
|
||||||
|
|
||||||
|
let key_share = key_storage.get(&Default::default()).unwrap().unwrap();
|
||||||
|
assert_eq!(key_share.common_point, Some(Default::default()));
|
||||||
|
assert_eq!(key_share.encrypted_point, Some(Default::default()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn document_key_store_failure_reported_when_no_server_key() {
|
||||||
|
let contract = Arc::new(DummyServiceContract::default());
|
||||||
|
let listener = make_service_contract_listener(Some(contract.clone()), None, None, None);
|
||||||
|
ServiceContractListener::process_service_task(&listener.data, ServiceTask::StoreDocumentKey(
|
||||||
|
Default::default(), Default::default(), Default::default(), Default::default(), Default::default())).unwrap_err();
|
||||||
|
assert_eq!(*contract.document_keys_store_failures.lock(), vec![Default::default()]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn document_key_store_failure_reported_when_document_key_already_set() {
|
||||||
|
let contract = Arc::new(DummyServiceContract::default());
|
||||||
|
let key_storage = create_non_empty_key_storage(true);
|
||||||
|
let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage), None);
|
||||||
|
ServiceContractListener::process_service_task(&listener.data, ServiceTask::StoreDocumentKey(
|
||||||
|
Default::default(), Default::default(), Default::default(), Default::default(), Default::default())).unwrap_err();
|
||||||
|
assert_eq!(*contract.document_keys_store_failures.lock(), vec![Default::default()]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn document_key_store_failure_reported_when_author_differs() {
|
||||||
|
let contract = Arc::new(DummyServiceContract::default());
|
||||||
|
let key_storage = create_non_empty_key_storage(false);
|
||||||
|
let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage), None);
|
||||||
|
ServiceContractListener::process_service_task(&listener.data, ServiceTask::StoreDocumentKey(
|
||||||
|
Default::default(), Default::default(), 1.into(), Default::default(), Default::default())).unwrap_err();
|
||||||
|
assert_eq!(*contract.document_keys_store_failures.lock(), vec![Default::default()]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// document key shadow common retrieval tests
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn document_key_shadow_common_retrieval_is_scheduled_when_requested() {
|
||||||
|
let mut contract = DummyServiceContract::default();
|
||||||
|
contract.logs.push(ServiceTask::RetrieveShadowDocumentKeyCommon(Default::default(), Default::default(), Default::default()));
|
||||||
|
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
|
listener.process_service_contract_events();
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 2);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::RetrieveShadowDocumentKeyCommon(
|
||||||
|
Default::default(), Default::default(), Default::default())));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn document_key_shadow_common_retrieval_is_scheduled_when_requested_and_request_belongs_to_other_key_server() {
|
||||||
|
let server_key_id: ServerKeyId = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap();
|
||||||
|
let mut contract = DummyServiceContract::default();
|
||||||
|
contract.logs.push(ServiceTask::RetrieveShadowDocumentKeyCommon(Default::default(), server_key_id.clone(), Default::default()));
|
||||||
|
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None, None);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
|
listener.process_service_contract_events();
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 2);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::RetrieveShadowDocumentKeyCommon(
|
||||||
|
Default::default(), server_key_id, Default::default())));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn document_key_shadow_common_is_retrieved_when_processing_document_key_shadow_common_retrieval_task() {
|
||||||
|
let contract = Arc::new(DummyServiceContract::default());
|
||||||
|
let key_storage = create_non_empty_key_storage(true);
|
||||||
|
let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage.clone()), None);
|
||||||
|
ServiceContractListener::process_service_task(&listener.data, ServiceTask::RetrieveShadowDocumentKeyCommon(
|
||||||
|
Default::default(), Default::default(), Default::default())).unwrap();
|
||||||
|
assert_eq!(*contract.common_shadow_retrieved_document_keys.lock(), vec![(Default::default(), Default::default(),
|
||||||
|
Default::default(), 0)]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn document_key_shadow_common_retrieval_failure_reported_when_access_denied() {
|
||||||
|
let acl_storage = DummyAclStorage::default();
|
||||||
|
acl_storage.prohibit(Default::default(), Default::default());
|
||||||
|
let contract = Arc::new(DummyServiceContract::default());
|
||||||
|
let key_storage = create_non_empty_key_storage(true);
|
||||||
|
let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage.clone()), Some(Arc::new(acl_storage)));
|
||||||
|
ServiceContractListener::process_service_task(&listener.data, ServiceTask::RetrieveShadowDocumentKeyCommon(
|
||||||
|
Default::default(), Default::default(), Default::default())).unwrap_err();
|
||||||
|
assert_eq!(*contract.document_keys_shadow_retrieval_failures.lock(), vec![(Default::default(), Default::default())]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn document_key_shadow_common_retrieval_failure_reported_when_no_server_key() {
|
||||||
|
let contract = Arc::new(DummyServiceContract::default());
|
||||||
|
let listener = make_service_contract_listener(Some(contract.clone()), None, None, None);
|
||||||
|
ServiceContractListener::process_service_task(&listener.data, ServiceTask::RetrieveShadowDocumentKeyCommon(
|
||||||
|
Default::default(), Default::default(), Default::default())).unwrap_err();
|
||||||
|
assert_eq!(*contract.document_keys_shadow_retrieval_failures.lock(), vec![(Default::default(), Default::default())]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn document_key_shadow_common_retrieval_failure_reported_when_no_document_key() {
|
||||||
|
let contract = Arc::new(DummyServiceContract::default());
|
||||||
|
let key_storage = create_non_empty_key_storage(false);
|
||||||
|
let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage.clone()), None);
|
||||||
|
ServiceContractListener::process_service_task(&listener.data, ServiceTask::RetrieveShadowDocumentKeyCommon(
|
||||||
|
Default::default(), Default::default(), Default::default())).unwrap_err();
|
||||||
|
assert_eq!(*contract.document_keys_shadow_retrieval_failures.lock(), vec![(Default::default(), Default::default())]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,8 +17,8 @@
|
|||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use ethkey::{KeyPair, Signature, Error as EthKeyError};
|
use ethkey::{KeyPair, Signature, Error as EthKeyError};
|
||||||
use ethereum_types::{H256, Address};
|
use ethereum_types::{H256, Address};
|
||||||
use types::all::{Error, Public, ServerKeyId, MessageHash, EncryptedMessageSignature, RequestSignature, EncryptedDocumentKey,
|
use types::all::{Error, Public, ServerKeyId, MessageHash, EncryptedMessageSignature, RequestSignature, Requester,
|
||||||
EncryptedDocumentKeyShadow, NodeId};
|
EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId};
|
||||||
|
|
||||||
/// Node key pair.
|
/// Node key pair.
|
||||||
pub trait NodeKeyPair: Send + Sync {
|
pub trait NodeKeyPair: Send + Sync {
|
||||||
@ -36,34 +36,34 @@ pub trait NodeKeyPair: Send + Sync {
|
|||||||
pub trait ServerKeyGenerator {
|
pub trait ServerKeyGenerator {
|
||||||
/// Generate new SK.
|
/// Generate new SK.
|
||||||
/// `key_id` is the caller-provided identifier of generated SK.
|
/// `key_id` is the caller-provided identifier of generated SK.
|
||||||
/// `signature` is `key_id`, signed with caller public key.
|
/// `author` is the author of key entry.
|
||||||
/// `threshold + 1` is the minimal number of nodes, required to restore private key.
|
/// `threshold + 1` is the minimal number of nodes, required to restore private key.
|
||||||
/// Result is a public portion of SK.
|
/// Result is a public portion of SK.
|
||||||
fn generate_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<Public, Error>;
|
fn generate_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result<Public, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Document key (DK) server.
|
/// Document key (DK) server.
|
||||||
pub trait DocumentKeyServer: ServerKeyGenerator {
|
pub trait DocumentKeyServer: ServerKeyGenerator {
|
||||||
/// Store externally generated DK.
|
/// Store externally generated DK.
|
||||||
/// `key_id` is identifier of previously generated SK.
|
/// `key_id` is identifier of previously generated SK.
|
||||||
/// `signature` is key_id, signed with caller public key. Caller must be the same as in the `generate_key` call.
|
/// `author` is the same author, that has created the server key.
|
||||||
/// `common_point` is a result of `k * T` expression, where `T` is generation point and `k` is random scalar in EC field.
|
/// `common_point` is a result of `k * T` expression, where `T` is generation point and `k` is random scalar in EC field.
|
||||||
/// `encrypted_document_key` is a result of `M + k * y` expression, where `M` is unencrypted document key (point on EC),
|
/// `encrypted_document_key` is a result of `M + k * y` expression, where `M` is unencrypted document key (point on EC),
|
||||||
/// `k` is the same scalar used in `common_point` calculation and `y` is previously generated public part of SK.
|
/// `k` is the same scalar used in `common_point` calculation and `y` is previously generated public part of SK.
|
||||||
fn store_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, common_point: Public, encrypted_document_key: Public) -> Result<(), Error>;
|
fn store_document_key(&self, key_id: &ServerKeyId, author: &Requester, common_point: Public, encrypted_document_key: Public) -> Result<(), Error>;
|
||||||
/// Generate and store both SK and DK. This is a shortcut for consequent calls of `generate_key` and `store_document_key`.
|
/// Generate and store both SK and DK. This is a shortcut for consequent calls of `generate_key` and `store_document_key`.
|
||||||
/// The only difference is that DK is generated by DocumentKeyServer (which might be considered unsafe).
|
/// The only difference is that DK is generated by DocumentKeyServer (which might be considered unsafe).
|
||||||
/// `key_id` is the caller-provided identifier of generated SK.
|
/// `key_id` is the caller-provided identifier of generated SK.
|
||||||
/// `signature` is `key_id`, signed with caller public key.
|
/// `author` is the author of server && document key entry.
|
||||||
/// `threshold + 1` is the minimal number of nodes, required to restore private key.
|
/// `threshold + 1` is the minimal number of nodes, required to restore private key.
|
||||||
/// Result is a DK, encrypted with caller public key.
|
/// Result is a DK, encrypted with caller public key.
|
||||||
fn generate_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<EncryptedDocumentKey, Error>;
|
fn generate_document_key(&self, key_id: &ServerKeyId, author: &Requester, threshold: usize) -> Result<EncryptedDocumentKey, Error>;
|
||||||
/// Restore previously stored DK.
|
/// Restore previously stored DK.
|
||||||
/// DK is decrypted on the key server (which might be considered unsafe), and then encrypted with caller public key.
|
/// DK is decrypted on the key server (which might be considered unsafe), and then encrypted with caller public key.
|
||||||
/// `key_id` is identifier of previously generated SK.
|
/// `key_id` is identifier of previously generated SK.
|
||||||
/// `signature` is key_id, signed with caller public key. Caller must be on ACL for this function to succeed.
|
/// `requester` is the one who requests access to document key. Caller must be on ACL for this function to succeed.
|
||||||
/// Result is a DK, encrypted with caller public key.
|
/// Result is a DK, encrypted with caller public key.
|
||||||
fn restore_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKey, Error>;
|
fn restore_document_key(&self, key_id: &ServerKeyId, requester: &Requester) -> Result<EncryptedDocumentKey, Error>;
|
||||||
/// Restore previously stored DK.
|
/// Restore previously stored DK.
|
||||||
/// To decrypt DK on client:
|
/// To decrypt DK on client:
|
||||||
/// 1) use requestor secret key to decrypt secret coefficients from result.decrypt_shadows
|
/// 1) use requestor secret key to decrypt secret coefficients from result.decrypt_shadows
|
||||||
@ -71,24 +71,24 @@ pub trait DocumentKeyServer: ServerKeyGenerator {
|
|||||||
/// 3) calculate decrypt_shadow_point: decrypt_shadows_sum * result.common_point
|
/// 3) calculate decrypt_shadow_point: decrypt_shadows_sum * result.common_point
|
||||||
/// 4) calculate decrypted_secret: result.decrypted_secret + decrypt_shadow_point
|
/// 4) calculate decrypted_secret: result.decrypted_secret + decrypt_shadow_point
|
||||||
/// Result is a DK shadow.
|
/// Result is a DK shadow.
|
||||||
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKeyShadow, Error>;
|
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, requester: &Requester) -> Result<EncryptedDocumentKeyShadow, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Message signer.
|
/// Message signer.
|
||||||
pub trait MessageSigner: ServerKeyGenerator {
|
pub trait MessageSigner: ServerKeyGenerator {
|
||||||
/// Generate Schnorr signature for message with previously generated SK.
|
/// Generate Schnorr signature for message with previously generated SK.
|
||||||
/// `key_id` is the caller-provided identifier of generated SK.
|
/// `key_id` is the caller-provided identifier of generated SK.
|
||||||
/// `signature` is `key_id`, signed with caller public key.
|
/// `requester` is the one who requests access to server key private.
|
||||||
/// `message` is the message to be signed.
|
/// `message` is the message to be signed.
|
||||||
/// Result is a signed message, encrypted with caller public key.
|
/// Result is a signed message, encrypted with caller public key.
|
||||||
fn sign_message_schnorr(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result<EncryptedMessageSignature, Error>;
|
fn sign_message_schnorr(&self, key_id: &ServerKeyId, requester: &Requester, message: MessageHash) -> Result<EncryptedMessageSignature, Error>;
|
||||||
/// Generate ECDSA signature for message with previously generated SK.
|
/// Generate ECDSA signature for message with previously generated SK.
|
||||||
/// WARNING: only possible when SK was generated using t <= 2 * N.
|
/// WARNING: only possible when SK was generated using t <= 2 * N.
|
||||||
/// `key_id` is the caller-provided identifier of generated SK.
|
/// `key_id` is the caller-provided identifier of generated SK.
|
||||||
/// `signature` is `key_id`, signed with caller public key.
|
/// `signature` is `key_id`, signed with caller public key.
|
||||||
/// `message` is the message to be signed.
|
/// `message` is the message to be signed.
|
||||||
/// Result is a signed message, encrypted with caller public key.
|
/// Result is a signed message, encrypted with caller public key.
|
||||||
fn sign_message_ecdsa(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result<EncryptedMessageSignature, Error>;
|
fn sign_message_ecdsa(&self, key_id: &ServerKeyId, signature: &Requester, message: MessageHash) -> Result<EncryptedMessageSignature, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Administrative sessions server.
|
/// Administrative sessions server.
|
||||||
|
@ -38,8 +38,8 @@ pub use ethkey::Public;
|
|||||||
/// Secret store error
|
/// Secret store error
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
/// Bad signature is passed
|
/// Insufficient requester data
|
||||||
BadSignature,
|
InsufficientRequesterData(String),
|
||||||
/// Access to resource is denied
|
/// Access to resource is denied
|
||||||
AccessDenied,
|
AccessDenied,
|
||||||
/// Requested document not found
|
/// Requested document not found
|
||||||
@ -77,8 +77,16 @@ pub enum ContractAddress {
|
|||||||
pub struct ServiceConfiguration {
|
pub struct ServiceConfiguration {
|
||||||
/// HTTP listener address. If None, HTTP API is disabled.
|
/// HTTP listener address. If None, HTTP API is disabled.
|
||||||
pub listener_address: Option<NodeAddress>,
|
pub listener_address: Option<NodeAddress>,
|
||||||
/// Service contract address. If None, service contract API is disabled.
|
/// Service contract address.
|
||||||
pub service_contract_address: Option<ContractAddress>,
|
pub service_contract_address: Option<ContractAddress>,
|
||||||
|
/// Server key generation service contract address.
|
||||||
|
pub service_contract_srv_gen_address: Option<ContractAddress>,
|
||||||
|
/// Server key retrieval service contract address.
|
||||||
|
pub service_contract_srv_retr_address: Option<ContractAddress>,
|
||||||
|
/// Document key store service contract address.
|
||||||
|
pub service_contract_doc_store_address: Option<ContractAddress>,
|
||||||
|
/// Document key shadow retrieval service contract address.
|
||||||
|
pub service_contract_doc_sretr_address: Option<ContractAddress>,
|
||||||
/// Is ACL check enabled. If false, everyone has access to all keys. Useful for tests only.
|
/// Is ACL check enabled. If false, everyone has access to all keys. Useful for tests only.
|
||||||
pub acl_check_enabled: bool,
|
pub acl_check_enabled: bool,
|
||||||
/// Data directory path for secret store
|
/// Data directory path for secret store
|
||||||
@ -131,7 +139,7 @@ pub enum Requester {
|
|||||||
impl fmt::Display for Error {
|
impl fmt::Display for Error {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||||
match *self {
|
match *self {
|
||||||
Error::BadSignature => write!(f, "Bad signature"),
|
Error::InsufficientRequesterData(ref e) => write!(f, "Insufficient requester data: {}", e),
|
||||||
Error::AccessDenied => write!(f, "Access dened"),
|
Error::AccessDenied => write!(f, "Access dened"),
|
||||||
Error::DocumentNotFound => write!(f, "Document not found"),
|
Error::DocumentNotFound => write!(f, "Document not found"),
|
||||||
Error::Hyper(ref msg) => write!(f, "Hyper error: {}", msg),
|
Error::Hyper(ref msg) => write!(f, "Hyper error: {}", msg),
|
||||||
@ -163,6 +171,8 @@ impl From<kvdb::Error> for Error {
|
|||||||
impl From<key_server_cluster::Error> for Error {
|
impl From<key_server_cluster::Error> for Error {
|
||||||
fn from(err: key_server_cluster::Error) -> Self {
|
fn from(err: key_server_cluster::Error) -> Self {
|
||||||
match err {
|
match err {
|
||||||
|
key_server_cluster::Error::InsufficientRequesterData(err)
|
||||||
|
=> Error::InsufficientRequesterData(err),
|
||||||
key_server_cluster::Error::ConsensusUnreachable
|
key_server_cluster::Error::ConsensusUnreachable
|
||||||
| key_server_cluster::Error::AccessDenied => Error::AccessDenied,
|
| key_server_cluster::Error::AccessDenied => Error::AccessDenied,
|
||||||
key_server_cluster::Error::MissingKeyShare => Error::DocumentNotFound,
|
key_server_cluster::Error::MissingKeyShare => Error::DocumentNotFound,
|
||||||
@ -184,16 +194,18 @@ impl Default for Requester {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Requester {
|
impl Requester {
|
||||||
pub fn public(&self, server_key_id: &ServerKeyId) -> Option<Public> {
|
pub fn public(&self, server_key_id: &ServerKeyId) -> Result<Public, String> {
|
||||||
match *self {
|
match *self {
|
||||||
Requester::Signature(ref signature) => ethkey::recover(signature, server_key_id).ok(),
|
Requester::Signature(ref signature) => ethkey::recover(signature, server_key_id)
|
||||||
Requester::Public(ref public) => Some(public.clone()),
|
.map_err(|e| format!("bad signature: {}", e)),
|
||||||
Requester::Address(_) => None,
|
Requester::Public(ref public) => Ok(public.clone()),
|
||||||
|
Requester::Address(_) => Err("cannot recover public from address".into()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn address(&self, server_key_id: &ServerKeyId) -> Option<ethkey::Address> {
|
pub fn address(&self, server_key_id: &ServerKeyId) -> Result<ethkey::Address, String> {
|
||||||
self.public(server_key_id).map(|p| ethkey::public_to_address(&p))
|
self.public(server_key_id)
|
||||||
|
.map(|p| ethkey::public_to_address(&p))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,3 +214,15 @@ impl From<ethkey::Signature> for Requester {
|
|||||||
Requester::Signature(signature)
|
Requester::Signature(signature)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<ethereum_types::Public> for Requester {
|
||||||
|
fn from(public: ethereum_types::Public) -> Requester {
|
||||||
|
Requester::Public(public)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ethereum_types::Address> for Requester {
|
||||||
|
fn from(address: ethereum_types::Address) -> Requester {
|
||||||
|
Requester::Address(address)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user