Merge pull request #7101 from paritytech/secretstore_kovan
SecretStore: Kovan integration initial version
This commit is contained in:
commit
f8bd6b9f63
1
Cargo.lock
generated
1
Cargo.lock
generated
@ -671,6 +671,7 @@ dependencies = [
|
||||
"ethcore-util 1.9.0",
|
||||
"ethcrypto 0.1.0",
|
||||
"ethkey 0.3.0",
|
||||
"ethsync 1.9.0",
|
||||
"futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
|
@ -26,6 +26,7 @@ const REGISTRY_ABI: &'static str = include_str!("res/registrar.json");
|
||||
const URLHINT_ABI: &'static str = include_str!("res/urlhint.json");
|
||||
const SERVICE_TRANSACTION_ABI: &'static str = include_str!("res/service_transaction.json");
|
||||
const SECRETSTORE_ACL_STORAGE_ABI: &'static str = include_str!("res/secretstore_acl_storage.json");
|
||||
const SECRETSTORE_SERVICE_ABI: &'static str = include_str!("res/secretstore_service.json");
|
||||
const VALIDATOR_SET_ABI: &'static str = include_str!("res/validator_set.json");
|
||||
const VALIDATOR_REPORT_ABI: &'static str = include_str!("res/validator_report.json");
|
||||
const PEER_SET_ABI: &'static str = include_str!("res/peer_set.json");
|
||||
@ -53,6 +54,7 @@ fn main() {
|
||||
build_file("Urlhint", URLHINT_ABI, "urlhint.rs");
|
||||
build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs");
|
||||
build_file("SecretStoreAclStorage", SECRETSTORE_ACL_STORAGE_ABI, "secretstore_acl_storage.rs");
|
||||
build_file("SecretStoreService", SECRETSTORE_SERVICE_ABI, "secretstore_service.rs");
|
||||
build_file("ValidatorSet", VALIDATOR_SET_ABI, "validator_set.rs");
|
||||
build_file("ValidatorReport", VALIDATOR_REPORT_ABI, "validator_report.rs");
|
||||
build_file("PeerSet", PEER_SET_ABI, "peer_set.rs");
|
||||
|
@ -46,7 +46,7 @@ pub fn generate_module(struct_name: &str, abi: &str) -> Result<String, Error> {
|
||||
Ok(format!(r##"
|
||||
use byteorder::{{BigEndian, ByteOrder}};
|
||||
use futures::{{future, Future, IntoFuture}};
|
||||
use ethabi::{{Contract, Token, Event}};
|
||||
use ethabi::{{Bytes, Contract, Token, Event}};
|
||||
use bigint;
|
||||
|
||||
type BoxFuture<A, B> = Box<Future<Item = A, Error = B> + Send>;
|
||||
@ -96,7 +96,7 @@ fn generate_functions(contract: &Contract) -> Result<String, Error> {
|
||||
let inputs: Vec<_> = function.inputs.iter().map(|i| i.kind.clone()).collect();
|
||||
let outputs: Vec<_> = function.outputs.iter().map(|i| i.kind.clone()).collect();
|
||||
|
||||
let (input_params, to_tokens) = input_params_codegen(&inputs)
|
||||
let (input_params, input_names, to_tokens) = input_params_codegen(&inputs)
|
||||
.map_err(|bad_type| Error::UnsupportedType(name.clone(), bad_type))?;
|
||||
|
||||
let (output_type, decode_outputs) = output_params_codegen(&outputs)
|
||||
@ -113,14 +113,14 @@ pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type},
|
||||
U: IntoFuture<Item=Vec<u8>, Error=String>,
|
||||
U::Future: Send + 'static
|
||||
{{
|
||||
let call_addr = self.address;
|
||||
let call_future = match self.encode_{snake_name}_input({params_names}) {{
|
||||
Ok(call_data) => (call)(call_addr, call_data),
|
||||
Err(e) => return Box::new(future::err(e)),
|
||||
}};
|
||||
|
||||
let function = self.contract.function(r#"{abi_name}"#)
|
||||
.expect("function existence checked at compile-time; qed").clone();
|
||||
let call_addr = self.address;
|
||||
|
||||
let call_future = match function.encode_input(&{to_tokens}) {{
|
||||
Ok(call_data) => (call)(call_addr, call_data),
|
||||
Err(e) => return Box::new(future::err(format!("Error encoding call: {{:?}}", e))),
|
||||
}};
|
||||
|
||||
Box::new(call_future
|
||||
.into_future()
|
||||
@ -128,12 +128,22 @@ pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type},
|
||||
.map(Vec::into_iter)
|
||||
.and_then(|mut outputs| {decode_outputs}))
|
||||
}}
|
||||
|
||||
/// Encode "{abi_name}" function arguments.
|
||||
/// Arguments: {abi_inputs:?}
|
||||
pub fn encode_{snake_name}_input(&self, {params}) -> Result<Vec<u8>, String> {{
|
||||
self.contract.function(r#"{abi_name}"#)
|
||||
.expect("function existence checked at compile-time; qed")
|
||||
.encode_input(&{to_tokens})
|
||||
.map_err(|e| format!("Error encoding call: {{:?}}", e))
|
||||
}}
|
||||
"##,
|
||||
abi_name = name,
|
||||
abi_inputs = inputs,
|
||||
abi_outputs = outputs,
|
||||
snake_name = snake_name,
|
||||
params = input_params,
|
||||
params_names = input_names,
|
||||
output_type = output_type,
|
||||
to_tokens = to_tokens,
|
||||
decode_outputs = decode_outputs,
|
||||
@ -145,15 +155,17 @@ pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type},
|
||||
|
||||
// generate code for params in function signature and turning them into tokens.
|
||||
//
|
||||
// two pieces of code are generated: the first gives input types for the function signature,
|
||||
// and the second gives code to tokenize those inputs.
|
||||
// three pieces of code are generated: the first gives input types for the function signature,
|
||||
// the second one gives input parameter names to pass to another method,
|
||||
// and the third gives code to tokenize those inputs.
|
||||
//
|
||||
// params of form `param_0: type_0, param_1: type_1, ...`
|
||||
// tokenizing code of form `{let mut tokens = Vec::new(); tokens.push({param_X}); tokens }`
|
||||
//
|
||||
// returns any unsupported param type encountered.
|
||||
fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String), ParamType> {
|
||||
fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String, String), ParamType> {
|
||||
let mut params = String::new();
|
||||
let mut params_names = String::new();
|
||||
let mut to_tokens = "{ let mut tokens = Vec::new();".to_string();
|
||||
|
||||
for (index, param_type) in inputs.iter().enumerate() {
|
||||
@ -164,11 +176,13 @@ fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String), ParamT
|
||||
params.push_str(&format!("{}{}: {}, ",
|
||||
if needs_mut { "mut " } else { "" }, param_name, rust_type));
|
||||
|
||||
params_names.push_str(&format!("{}, ", param_name));
|
||||
|
||||
to_tokens.push_str(&format!("tokens.push({{ {} }});", tokenize_code));
|
||||
}
|
||||
|
||||
to_tokens.push_str(" tokens }");
|
||||
Ok((params, to_tokens))
|
||||
Ok((params, params_names, to_tokens))
|
||||
}
|
||||
|
||||
// generate code for outputs of the function and detokenizing them.
|
||||
|
8
ethcore/native_contracts/res/secretstore_service.json
Normal file
8
ethcore/native_contracts/res/secretstore_service.json
Normal file
@ -0,0 +1,8 @@
|
||||
[
|
||||
{"constant":true,"inputs":[],"name":"serverKeyGenerationRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||
{"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getServerKeyId","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"},{"name":"v","type":"uint8"},{"name":"r","type":"bytes32"},{"name":"s","type":"bytes32"}],"name":"serverKeyGenerated","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||
{"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"getServerKeyThreshold","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||
{"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"authority","type":"address"}],"name":"getServerKeyConfirmationStatus","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||
{"anonymous":false,"inputs":[{"indexed":true,"name":"serverKeyId","type":"bytes32"},{"indexed":true,"name":"threshold","type":"uint256"}],"name":"ServerKeyRequested","type":"event"}
|
||||
]
|
@ -28,6 +28,7 @@ mod registry;
|
||||
mod urlhint;
|
||||
mod service_transaction;
|
||||
mod secretstore_acl_storage;
|
||||
mod secretstore_service;
|
||||
mod validator_set;
|
||||
mod validator_report;
|
||||
mod peer_set;
|
||||
@ -40,6 +41,7 @@ pub use self::registry::Registry;
|
||||
pub use self::urlhint::Urlhint;
|
||||
pub use self::service_transaction::ServiceTransactionChecker;
|
||||
pub use self::secretstore_acl_storage::SecretStoreAclStorage;
|
||||
pub use self::secretstore_service::SecretStoreService;
|
||||
pub use self::validator_set::ValidatorSet;
|
||||
pub use self::validator_report::ValidatorReport;
|
||||
pub use self::peer_set::PeerSet;
|
||||
|
21
ethcore/native_contracts/src/secretstore_service.rs
Normal file
21
ethcore/native_contracts/src/secretstore_service.rs
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#![allow(unused_mut, unused_variables, unused_imports)]
|
||||
|
||||
//! Secret store service contract.
|
||||
|
||||
include!(concat!(env!("OUT_DIR"), "/secretstore_service.rs"));
|
@ -555,6 +555,10 @@ usage! {
|
||||
"--no-acl-check",
|
||||
"Disable ACL check (useful for test environments).",
|
||||
|
||||
ARG arg_secretstore_contract: (String) = "none", or |c: &Config| otry!(c.secretstore).service_contract.clone(),
|
||||
"--secretstore-contract=[SOURCE]",
|
||||
"Secret Store Service contract address source: none, registry (contract address is read from registry) or address.",
|
||||
|
||||
ARG arg_secretstore_nodes: (String) = "", or |c: &Config| otry!(c.secretstore).nodes.as_ref().map(|vec| vec.join(",")),
|
||||
"--secretstore-nodes=[NODES]",
|
||||
"Comma-separated list of other secret store cluster nodes in form NODE_PUBLIC_KEY_IN_HEX@NODE_IP_ADDR:NODE_PORT.",
|
||||
@ -1093,6 +1097,7 @@ struct SecretStore {
|
||||
disable: Option<bool>,
|
||||
disable_http: Option<bool>,
|
||||
disable_acl_check: Option<bool>,
|
||||
service_contract: Option<String>,
|
||||
self_secret: Option<String>,
|
||||
admin_public: Option<String>,
|
||||
nodes: Option<Vec<String>>,
|
||||
@ -1494,6 +1499,7 @@ mod tests {
|
||||
flag_no_secretstore: false,
|
||||
flag_no_secretstore_http: false,
|
||||
flag_no_secretstore_acl_check: false,
|
||||
arg_secretstore_contract: "none".into(),
|
||||
arg_secretstore_secret: None,
|
||||
arg_secretstore_admin_public: None,
|
||||
arg_secretstore_nodes: "".into(),
|
||||
@ -1737,6 +1743,7 @@ mod tests {
|
||||
disable: None,
|
||||
disable_http: None,
|
||||
disable_acl_check: None,
|
||||
service_contract: None,
|
||||
self_secret: None,
|
||||
admin_public: None,
|
||||
nodes: None,
|
||||
|
@ -80,6 +80,7 @@ pass = "test_pass"
|
||||
disable = false
|
||||
disable_http = false
|
||||
disable_acl_check = false
|
||||
service_contract = "none"
|
||||
nodes = []
|
||||
http_interface = "local"
|
||||
http_port = 8082
|
||||
|
@ -46,7 +46,7 @@ use ethcore_logger::Config as LogConfig;
|
||||
use dir::{self, Directories, default_hypervisor_path, default_local_path, default_data_path};
|
||||
use dapps::Configuration as DappsConfiguration;
|
||||
use ipfs::Configuration as IpfsConfiguration;
|
||||
use secretstore::{Configuration as SecretStoreConfiguration, NodeSecretKey};
|
||||
use secretstore::{NodeSecretKey, Configuration as SecretStoreConfiguration, ContractAddress as SecretStoreContractAddress};
|
||||
use updater::{UpdatePolicy, UpdateFilter, ReleaseTrack};
|
||||
use run::RunCmd;
|
||||
use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, KillBlockchain, ExportState, DataFormat};
|
||||
@ -608,6 +608,7 @@ impl Configuration {
|
||||
enabled: self.secretstore_enabled(),
|
||||
http_enabled: self.secretstore_http_enabled(),
|
||||
acl_check_enabled: self.secretstore_acl_check_enabled(),
|
||||
service_contract_address: self.secretstore_service_contract_address()?,
|
||||
self_secret: self.secretstore_self_secret()?,
|
||||
nodes: self.secretstore_nodes()?,
|
||||
interface: self.secretstore_interface(),
|
||||
@ -1085,6 +1086,14 @@ impl Configuration {
|
||||
!self.args.flag_no_secretstore_acl_check
|
||||
}
|
||||
|
||||
fn secretstore_service_contract_address(&self) -> Result<Option<SecretStoreContractAddress>, String> {
|
||||
Ok(match self.args.arg_secretstore_contract.as_ref() {
|
||||
"none" => None,
|
||||
"registry" => Some(SecretStoreContractAddress::Registry),
|
||||
a => Some(SecretStoreContractAddress::Address(a.parse().map_err(|e| format!("{}", e))?)),
|
||||
})
|
||||
}
|
||||
|
||||
fn ui_enabled(&self) -> bool {
|
||||
if self.args.flag_force_ui {
|
||||
return true;
|
||||
|
@ -785,6 +785,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
|
||||
// secret store key server
|
||||
let secretstore_deps = secretstore::Dependencies {
|
||||
client: client.clone(),
|
||||
sync: sync_provider.clone(),
|
||||
account_provider: account_provider,
|
||||
accounts_passwords: &passwords,
|
||||
};
|
||||
|
@ -20,11 +20,12 @@ use dir::default_data_path;
|
||||
use ethcore::account_provider::AccountProvider;
|
||||
use ethcore::client::Client;
|
||||
use ethkey::{Secret, Public};
|
||||
use ethsync::SyncProvider;
|
||||
use helpers::replace_home;
|
||||
use util::Address;
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
/// This node secret key.
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum NodeSecretKey {
|
||||
/// Stored as plain text in configuration file.
|
||||
Plain(Secret),
|
||||
@ -32,6 +33,15 @@ pub enum NodeSecretKey {
|
||||
KeyStore(Address),
|
||||
}
|
||||
|
||||
/// Secret store service contract address.
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum ContractAddress {
|
||||
/// Contract address is read from registry.
|
||||
Registry,
|
||||
/// Contract address is specified.
|
||||
Address(Address),
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
/// Secret store configuration
|
||||
pub struct Configuration {
|
||||
@ -41,6 +51,8 @@ pub struct Configuration {
|
||||
pub http_enabled: bool,
|
||||
/// Is ACL check enabled.
|
||||
pub acl_check_enabled: bool,
|
||||
/// Service contract address.
|
||||
pub service_contract_address: Option<ContractAddress>,
|
||||
/// This node secret.
|
||||
pub self_secret: Option<NodeSecretKey>,
|
||||
/// Other nodes IDs + addresses.
|
||||
@ -63,6 +75,8 @@ pub struct Configuration {
|
||||
pub struct Dependencies<'a> {
|
||||
/// Blockchain client.
|
||||
pub client: Arc<Client>,
|
||||
/// Sync provider.
|
||||
pub sync: Arc<SyncProvider>,
|
||||
/// Account provider.
|
||||
pub account_provider: Arc<AccountProvider>,
|
||||
/// Passed accounts passwords.
|
||||
@ -90,7 +104,7 @@ mod server {
|
||||
use ethcore_secretstore;
|
||||
use ethkey::KeyPair;
|
||||
use ansi_term::Colour::Red;
|
||||
use super::{Configuration, Dependencies, NodeSecretKey};
|
||||
use super::{Configuration, Dependencies, NodeSecretKey, ContractAddress};
|
||||
|
||||
/// Key server
|
||||
pub struct KeyServer {
|
||||
@ -134,6 +148,10 @@ mod server {
|
||||
address: conf.http_interface.clone(),
|
||||
port: conf.http_port,
|
||||
}) } else { None },
|
||||
service_contract_address: conf.service_contract_address.map(|c| match c {
|
||||
ContractAddress::Registry => ethcore_secretstore::ContractAddress::Registry,
|
||||
ContractAddress::Address(address) => ethcore_secretstore::ContractAddress::Address(address),
|
||||
}),
|
||||
data_path: conf.data_path.clone(),
|
||||
acl_check_enabled: conf.acl_check_enabled,
|
||||
cluster_config: ethcore_secretstore::ClusterConfiguration {
|
||||
@ -153,7 +171,7 @@ mod server {
|
||||
|
||||
cconf.cluster_config.nodes.insert(self_secret.public().clone(), cconf.cluster_config.listener_address.clone());
|
||||
|
||||
let key_server = ethcore_secretstore::start(deps.client, self_secret, cconf)
|
||||
let key_server = ethcore_secretstore::start(deps.client, deps.sync, self_secret, cconf)
|
||||
.map_err(|e| format!("Error starting KeyServer {}: {}", key_server_name, e))?;
|
||||
|
||||
Ok(KeyServer {
|
||||
@ -172,6 +190,7 @@ impl Default for Configuration {
|
||||
enabled: true,
|
||||
http_enabled: true,
|
||||
acl_check_enabled: true,
|
||||
service_contract_address: None,
|
||||
self_secret: None,
|
||||
admin_public: None,
|
||||
nodes: BTreeMap::new(),
|
||||
|
@ -26,6 +26,7 @@ ethcore = { path = "../ethcore" }
|
||||
ethcore-bytes = { path = "../util/bytes" }
|
||||
ethcore-util = { path = "../util" }
|
||||
ethcore-bigint = { path = "../util/bigint" }
|
||||
ethsync = { path = "../sync" }
|
||||
kvdb = { path = "../util/kvdb" }
|
||||
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }
|
||||
keccak-hash = { path = "../util/hash" }
|
||||
|
@ -14,16 +14,17 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::sync::Arc;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use futures::{future, Future};
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use ethkey::public_to_address;
|
||||
use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify};
|
||||
use ethcore::client::{BlockChainClient, BlockId, ChainNotify};
|
||||
use native_contracts::SecretStoreAclStorage;
|
||||
use bigint::hash::H256;
|
||||
use util::Address;
|
||||
use bytes::Bytes;
|
||||
use trusted_client::TrustedClient;
|
||||
use types::all::{Error, ServerKeyId, Public};
|
||||
|
||||
const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker";
|
||||
@ -43,7 +44,7 @@ pub struct OnChainAclStorage {
|
||||
/// Cached on-chain ACL storage contract.
|
||||
struct CachedContract {
|
||||
/// Blockchain client.
|
||||
client: Weak<Client>,
|
||||
client: TrustedClient,
|
||||
/// Contract address.
|
||||
contract_addr: Option<Address>,
|
||||
/// Contract at given address.
|
||||
@ -57,12 +58,15 @@ pub struct DummyAclStorage {
|
||||
}
|
||||
|
||||
impl OnChainAclStorage {
|
||||
pub fn new(client: &Arc<Client>) -> Arc<Self> {
|
||||
pub fn new(trusted_client: TrustedClient) -> Result<Arc<Self>, Error> {
|
||||
let client = trusted_client.get_untrusted();
|
||||
let acl_storage = Arc::new(OnChainAclStorage {
|
||||
contract: Mutex::new(CachedContract::new(client)),
|
||||
contract: Mutex::new(CachedContract::new(trusted_client)),
|
||||
});
|
||||
client.add_notify(acl_storage.clone());
|
||||
acl_storage
|
||||
client
|
||||
.ok_or(Error::Internal("Constructing OnChainAclStorage without active Client".into()))?
|
||||
.add_notify(acl_storage.clone());
|
||||
Ok(acl_storage)
|
||||
}
|
||||
}
|
||||
|
||||
@ -81,16 +85,16 @@ impl ChainNotify for OnChainAclStorage {
|
||||
}
|
||||
|
||||
impl CachedContract {
|
||||
pub fn new(client: &Arc<Client>) -> Self {
|
||||
pub fn new(client: TrustedClient) -> Self {
|
||||
CachedContract {
|
||||
client: Arc::downgrade(client),
|
||||
client: client,
|
||||
contract_addr: None,
|
||||
contract: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update(&mut self) {
|
||||
if let Some(client) = self.client.upgrade() {
|
||||
if let Some(client) = self.client.get() {
|
||||
let new_contract_addr = client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned());
|
||||
if self.contract_addr.as_ref() != new_contract_addr.as_ref() {
|
||||
self.contract = new_contract_addr.map(|contract_addr| {
|
||||
@ -105,19 +109,20 @@ impl CachedContract {
|
||||
}
|
||||
|
||||
pub fn check(&mut self, public: &Public, document: &ServerKeyId) -> Result<bool, Error> {
|
||||
match self.contract.as_ref() {
|
||||
Some(contract) => {
|
||||
let address = public_to_address(&public);
|
||||
let do_call = |a, d| future::done(
|
||||
self.client
|
||||
.upgrade()
|
||||
.ok_or_else(|| "Calling contract without client".into())
|
||||
.and_then(|c| c.call_contract(BlockId::Latest, a, d)));
|
||||
contract.check_permissions(do_call, address, document.clone())
|
||||
.map_err(|err| Error::Internal(err))
|
||||
.wait()
|
||||
},
|
||||
None => Err(Error::Internal("ACL checker contract is not configured".to_owned())),
|
||||
if let Some(client) = self.client.get() {
|
||||
// call contract to check accesss
|
||||
match self.contract.as_ref() {
|
||||
Some(contract) => {
|
||||
let address = public_to_address(&public);
|
||||
let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d));
|
||||
contract.check_permissions(do_call, address, document.clone())
|
||||
.map_err(|err| Error::Internal(err))
|
||||
.wait()
|
||||
},
|
||||
None => Err(Error::Internal("ACL checker contract is not configured".to_owned())),
|
||||
}
|
||||
} else {
|
||||
Err(Error::Internal("Calling ACL contract without trusted blockchain client".into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -53,7 +53,6 @@ impl KeyServerImpl {
|
||||
}
|
||||
|
||||
/// Get cluster client reference.
|
||||
#[cfg(test)]
|
||||
pub fn cluster(&self) -> Arc<ClusterClient> {
|
||||
self.data.lock().cluster.clone()
|
||||
}
|
||||
@ -65,7 +64,9 @@ impl AdminSessionsServer for KeyServerImpl {
|
||||
fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||
let servers_set_change_session = self.data.lock().cluster
|
||||
.new_servers_set_change_session(None, new_servers_set, old_set_signature, new_set_signature)?;
|
||||
servers_set_change_session.wait().map_err(Into::into)
|
||||
servers_set_change_session.as_servers_set_change()
|
||||
.expect("new_servers_set_change_session creates servers_set_change_session; qed")
|
||||
.wait().map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
@ -203,6 +204,7 @@ pub mod tests {
|
||||
use std::collections::BTreeSet;
|
||||
use std::time;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::net::SocketAddr;
|
||||
use std::collections::BTreeMap;
|
||||
use ethcrypto;
|
||||
@ -218,7 +220,10 @@ pub mod tests {
|
||||
use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer};
|
||||
use super::KeyServerImpl;
|
||||
|
||||
pub struct DummyKeyServer;
|
||||
#[derive(Default)]
|
||||
pub struct DummyKeyServer {
|
||||
pub generation_requests_count: AtomicUsize,
|
||||
}
|
||||
|
||||
impl KeyServer for DummyKeyServer {}
|
||||
|
||||
@ -230,7 +235,8 @@ pub mod tests {
|
||||
|
||||
impl ServerKeyGenerator for DummyKeyServer {
|
||||
fn generate_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _threshold: usize) -> Result<Public, Error> {
|
||||
unimplemented!()
|
||||
self.generation_requests_count.fetch_add(1, Ordering::Relaxed);
|
||||
Err(Error::Internal("test error".into()))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -31,16 +31,6 @@ use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||
/// Number of versions sent in single message.
|
||||
const VERSIONS_PER_MESSAGE: usize = 32;
|
||||
|
||||
/// Key version negotiation session API.
|
||||
pub trait Session: Send + Sync + 'static {
|
||||
/// Set continue action.
|
||||
fn set_continue_action(&self, action: ContinueAction);
|
||||
/// Get continue action.
|
||||
fn continue_action(&self) -> Option<ContinueAction>;
|
||||
/// Wait until session is completed.
|
||||
fn wait(&self) -> Result<(H256, NodeId), Error>;
|
||||
}
|
||||
|
||||
/// Key version negotiation transport.
|
||||
pub trait SessionTransport {
|
||||
/// Send message to given node.
|
||||
@ -196,6 +186,21 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
||||
.clone())
|
||||
}
|
||||
|
||||
/// Set continue action.
|
||||
pub fn set_continue_action(&self, action: ContinueAction) {
|
||||
self.data.lock().continue_with = Some(action);
|
||||
}
|
||||
|
||||
/// Get continue action.
|
||||
pub fn continue_action(&self) -> Option<ContinueAction> {
|
||||
self.data.lock().continue_with.clone()
|
||||
}
|
||||
|
||||
/// Wait for session completion.
|
||||
pub fn wait(&self) -> Result<(H256, NodeId), Error> {
|
||||
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
||||
}
|
||||
|
||||
/// Initialize session.
|
||||
pub fn initialize(&self, connected_nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||
// check state
|
||||
@ -355,27 +360,6 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Session for SessionImpl<T> where T: SessionTransport + Send + Sync + 'static {
|
||||
fn set_continue_action(&self, action: ContinueAction) {
|
||||
self.data.lock().continue_with = Some(action);
|
||||
}
|
||||
|
||||
fn continue_action(&self) -> Option<ContinueAction> {
|
||||
self.data.lock().continue_with.clone()
|
||||
}
|
||||
|
||||
fn wait(&self) -> Result<(H256, NodeId), Error> {
|
||||
let mut data = self.data.lock();
|
||||
if !data.result.is_some() {
|
||||
self.core.completed.wait(&mut data);
|
||||
}
|
||||
|
||||
data.result.as_ref()
|
||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
||||
.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ClusterSession for SessionImpl<T> where T: SessionTransport {
|
||||
type Id = SessionIdWithSubSession;
|
||||
|
||||
@ -709,6 +693,7 @@ mod tests {
|
||||
nodes.values().nth(0).unwrap().insert(Default::default(), DocumentKeyShare {
|
||||
author: Default::default(),
|
||||
threshold: 1,
|
||||
public: Default::default(),
|
||||
common_point: None,
|
||||
encrypted_point: None,
|
||||
versions: vec![DocumentKeyShareVersion {
|
||||
|
@ -33,7 +33,7 @@ use key_server_cluster::share_change_session::{ShareChangeSession, ShareChangeSe
|
||||
prepare_share_change_session_plan};
|
||||
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl,
|
||||
SessionParams as KeyVersionNegotiationSessionParams, LargestSupportResultComputer,
|
||||
SessionTransport as KeyVersionNegotiationTransport, Session as KeyVersionNegotiationSession};
|
||||
SessionTransport as KeyVersionNegotiationTransport};
|
||||
use key_server_cluster::jobs::job_session::JobTransport;
|
||||
use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest};
|
||||
use key_server_cluster::jobs::unknown_sessions_job::{UnknownSessionsJob};
|
||||
@ -44,12 +44,6 @@ use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||
/// Maximal number of active share change sessions.
|
||||
const MAX_ACTIVE_KEY_SESSIONS: usize = 64;
|
||||
|
||||
/// Servers set change session API.
|
||||
pub trait Session: Send + Sync + 'static {
|
||||
/// Wait until session is completed.
|
||||
fn wait(&self) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// Servers set change session.
|
||||
/// Brief overview:
|
||||
/// 1) consensus establishing
|
||||
@ -211,6 +205,11 @@ impl SessionImpl {
|
||||
&self.core.meta.id
|
||||
}
|
||||
|
||||
/// Wait for session completion.
|
||||
pub fn wait(&self) -> Result<(), Error> {
|
||||
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
||||
}
|
||||
|
||||
/// Initialize servers set change session on master node.
|
||||
pub fn initialize(&self, new_nodes_set: BTreeSet<NodeId>, all_set_signature: Signature, new_set_signature: Signature) -> Result<(), Error> {
|
||||
check_nodes_set(&self.core.all_nodes_set, &new_nodes_set)?;
|
||||
@ -877,18 +876,6 @@ impl SessionImpl {
|
||||
}
|
||||
}
|
||||
|
||||
impl Session for SessionImpl {
|
||||
fn wait(&self) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
if !data.result.is_some() {
|
||||
self.core.completed.wait(&mut data);
|
||||
}
|
||||
|
||||
data.result.clone()
|
||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
||||
}
|
||||
}
|
||||
|
||||
impl ClusterSession for SessionImpl {
|
||||
type Id = SessionId;
|
||||
|
||||
|
@ -32,12 +32,6 @@ use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAc
|
||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||
|
||||
/// Share addition session API.
|
||||
pub trait Session: Send + Sync + 'static {
|
||||
/// Wait until session is completed.
|
||||
fn wait(&self) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// Share addition session transport.
|
||||
pub trait SessionTransport: Clone + JobTransport<PartialJobRequest=ServersSetChangeAccessRequest, PartialJobResponse=bool> {
|
||||
/// Get all connected nodes. Since ShareAdd session requires all cluster nodes to be connected, this set equals to all known cluster nodes set.
|
||||
@ -92,14 +86,8 @@ struct SessionData<T: SessionTransport> {
|
||||
pub version: Option<H256>,
|
||||
/// Consensus session.
|
||||
pub consensus_session: Option<ShareAddChangeConsensusSession<T>>,
|
||||
/// NewKeyShare: threshold.
|
||||
pub key_share_threshold: Option<usize>,
|
||||
/// NewKeyShare: author.
|
||||
pub key_share_author: Option<Public>,
|
||||
/// NewKeyShare: Common (shared) encryption point.
|
||||
pub key_share_common_point: Option<Public>,
|
||||
/// NewKeyShare: Encrypted point.
|
||||
pub key_share_encrypted_point: Option<Public>,
|
||||
/// NewKeyShare (for nodes being added).
|
||||
pub new_key_share: Option<NewKeyShare>,
|
||||
/// Nodes id numbers.
|
||||
pub id_numbers: Option<BTreeMap<NodeId, Option<Secret>>>,
|
||||
/// Secret subshares received from nodes.
|
||||
@ -108,6 +96,20 @@ struct SessionData<T: SessionTransport> {
|
||||
pub result: Option<Result<(), Error>>,
|
||||
}
|
||||
|
||||
/// New key share.
|
||||
struct NewKeyShare {
|
||||
/// NewKeyShare: threshold.
|
||||
pub threshold: usize,
|
||||
/// NewKeyShare: author.
|
||||
pub author: Public,
|
||||
/// NewKeyShare: joint public.
|
||||
pub joint_public: Public,
|
||||
/// NewKeyShare: Common (shared) encryption point.
|
||||
pub common_point: Option<Public>,
|
||||
/// NewKeyShare: Encrypted point.
|
||||
pub encrypted_point: Option<Public>,
|
||||
}
|
||||
|
||||
/// Session state.
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum SessionState {
|
||||
@ -171,10 +173,7 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
||||
state: SessionState::ConsensusEstablishing,
|
||||
version: None,
|
||||
consensus_session: None,
|
||||
key_share_threshold: None,
|
||||
key_share_author: None,
|
||||
key_share_common_point: None,
|
||||
key_share_encrypted_point: None,
|
||||
new_key_share: None,
|
||||
id_numbers: None,
|
||||
secret_subshares: None,
|
||||
result: None,
|
||||
@ -430,7 +429,7 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
||||
}
|
||||
|
||||
// we only expect this message once
|
||||
if data.key_share_threshold.is_some() || data.key_share_author.is_some() || data.key_share_common_point.is_some() || data.key_share_encrypted_point.is_some() {
|
||||
if data.new_key_share.is_some() {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
@ -445,10 +444,13 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
||||
|
||||
// update data
|
||||
data.state = SessionState::WaitingForKeysDissemination;
|
||||
data.key_share_threshold = Some(message.threshold);
|
||||
data.key_share_author = Some(message.author.clone().into());
|
||||
data.key_share_common_point = message.common_point.clone().map(Into::into);
|
||||
data.key_share_encrypted_point = message.encrypted_point.clone().map(Into::into);
|
||||
data.new_key_share = Some(NewKeyShare {
|
||||
threshold: message.threshold,
|
||||
author: message.author.clone().into(),
|
||||
joint_public: message.joint_public.clone().into(),
|
||||
common_point: message.common_point.clone().map(Into::into),
|
||||
encrypted_point: message.encrypted_point.clone().map(Into::into),
|
||||
});
|
||||
|
||||
let id_numbers = data.id_numbers.as_mut()
|
||||
.expect("common key share data is expected after initialization; id_numers are filled during initialization; qed");
|
||||
@ -619,6 +621,7 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
||||
session_nonce: core.nonce,
|
||||
threshold: old_key_share.threshold,
|
||||
author: old_key_share.author.clone().into(),
|
||||
joint_public: old_key_share.public.clone().into(),
|
||||
common_point: old_key_share.common_point.clone().map(Into::into),
|
||||
encrypted_point: old_key_share.encrypted_point.clone().map(Into::into),
|
||||
id_numbers: old_key_version.id_numbers.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(),
|
||||
@ -666,8 +669,9 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
||||
let id_numbers = data.id_numbers.as_ref().expect(explanation);
|
||||
let secret_subshares = data.secret_subshares.as_ref().expect(explanation);
|
||||
let threshold = core.key_share.as_ref().map(|ks| ks.threshold)
|
||||
.unwrap_or_else(|| *data.key_share_threshold.as_ref()
|
||||
.expect("computation occurs after receiving key share threshold if not having one already; qed"));
|
||||
.unwrap_or_else(|| data.new_key_share.as_ref()
|
||||
.expect("computation occurs after receiving key share threshold if not having one already; qed")
|
||||
.threshold);
|
||||
|
||||
let explanation = "id_numbers are checked to have Some value for every consensus group node when consensus is establishe; qed";
|
||||
let sender_id_number = id_numbers[sender].as_ref().expect(explanation);
|
||||
@ -693,14 +697,17 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
||||
let refreshed_key_version = DocumentKeyShareVersion::new(id_numbers.clone().into_iter().map(|(k, v)| (k.clone(),
|
||||
v.expect("id_numbers are checked to have Some value for every consensus group node when consensus is establishe; qed"))).collect(),
|
||||
secret_share);
|
||||
let mut refreshed_key_share = core.key_share.as_ref().cloned().unwrap_or_else(|| DocumentKeyShare {
|
||||
author: data.key_share_author.clone()
|
||||
.expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed"),
|
||||
threshold: data.key_share_threshold.clone()
|
||||
.expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed"),
|
||||
common_point: data.key_share_common_point.clone(),
|
||||
encrypted_point: data.key_share_encrypted_point.clone(),
|
||||
versions: Vec::new(),
|
||||
let mut refreshed_key_share = core.key_share.as_ref().cloned().unwrap_or_else(|| {
|
||||
let new_key_share = data.new_key_share.as_ref()
|
||||
.expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed");
|
||||
DocumentKeyShare {
|
||||
author: new_key_share.author.clone(),
|
||||
threshold: new_key_share.threshold,
|
||||
public: new_key_share.joint_public.clone(),
|
||||
common_point: new_key_share.common_point.clone(),
|
||||
encrypted_point: new_key_share.encrypted_point.clone(),
|
||||
versions: Vec::new(),
|
||||
}
|
||||
});
|
||||
refreshed_key_share.versions.push(refreshed_key_version);
|
||||
|
||||
@ -721,18 +728,6 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Session for SessionImpl<T> where T: SessionTransport + Send + Sync + 'static {
|
||||
fn wait(&self) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
if !data.result.is_some() {
|
||||
self.core.completed.wait(&mut data);
|
||||
}
|
||||
|
||||
data.result.clone()
|
||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ClusterSession for SessionImpl<T> where T: SessionTransport {
|
||||
type Id = SessionId;
|
||||
|
||||
|
@ -30,12 +30,6 @@ use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
||||
use key_server_cluster::jobs::decryption_job::{PartialDecryptionRequest, PartialDecryptionResponse, DecryptionJob};
|
||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||
|
||||
/// Decryption session API.
|
||||
pub trait Session: Send + Sync + 'static {
|
||||
/// Wait until session is completed. Returns distributely restored secret key.
|
||||
fn wait(&self) -> Result<EncryptedDocumentKeyShadow, Error>;
|
||||
}
|
||||
|
||||
/// Distributed decryption session.
|
||||
/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper:
|
||||
/// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf
|
||||
@ -206,6 +200,11 @@ impl SessionImpl {
|
||||
self.data.lock().result.clone()
|
||||
}
|
||||
|
||||
/// Wait for session completion.
|
||||
pub fn wait(&self) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
||||
}
|
||||
|
||||
/// Delegate session to other node.
|
||||
pub fn delegate(&self, master: NodeId, version: H256, is_shadow_decryption: bool) -> Result<(), Error> {
|
||||
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
||||
@ -555,19 +554,6 @@ impl ClusterSession for SessionImpl {
|
||||
}
|
||||
}
|
||||
|
||||
impl Session for SessionImpl {
|
||||
fn wait(&self) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||
let mut data = self.data.lock();
|
||||
if !data.result.is_some() {
|
||||
self.core.completed.wait(&mut data);
|
||||
}
|
||||
|
||||
data.result.as_ref()
|
||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
||||
.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl SessionCore {
|
||||
pub fn decryption_transport(&self) -> DecryptionJobTransport {
|
||||
DecryptionJobTransport {
|
||||
@ -692,6 +678,7 @@ mod tests {
|
||||
let encrypted_datas: Vec<_> = (0..5).map(|i| DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 3,
|
||||
public: Default::default(),
|
||||
common_point: Some(common_point.clone()),
|
||||
encrypted_point: Some(encrypted_point.clone()),
|
||||
versions: vec![DocumentKeyShareVersion {
|
||||
@ -763,6 +750,7 @@ mod tests {
|
||||
key_share: Some(DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 0,
|
||||
public: Default::default(),
|
||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||
versions: vec![DocumentKeyShareVersion {
|
||||
@ -816,6 +804,7 @@ mod tests {
|
||||
key_share: Some(DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 2,
|
||||
public: Default::default(),
|
||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||
versions: vec![DocumentKeyShareVersion {
|
||||
|
@ -26,14 +26,6 @@ use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
use key_server_cluster::message::{Message, EncryptionMessage, InitializeEncryptionSession,
|
||||
ConfirmEncryptionInitialization, EncryptionSessionError};
|
||||
|
||||
/// Encryption session API.
|
||||
pub trait Session: Send + Sync + 'static {
|
||||
/// Get encryption session state.
|
||||
fn state(&self) -> SessionState;
|
||||
/// Wait until session is completed. Returns distributely generated secret key.
|
||||
fn wait(&self, timeout: Option<time::Duration>) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// Encryption (distributed key generation) session.
|
||||
/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper:
|
||||
/// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf
|
||||
@ -138,6 +130,12 @@ impl SessionImpl {
|
||||
&self.self_node_id
|
||||
}
|
||||
|
||||
/// Wait for session completion.
|
||||
pub fn wait(&self, timeout: Option<time::Duration>) -> Result<(), Error> {
|
||||
Self::wait_session(&self.completed, &self.data, timeout, |data| data.result.clone())
|
||||
}
|
||||
|
||||
|
||||
/// Start new session initialization. This must be called on master node.
|
||||
pub fn initialize(&self, requestor_signature: Signature, common_point: Public, encrypted_point: Public) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
@ -328,26 +326,6 @@ impl ClusterSession for SessionImpl {
|
||||
}
|
||||
}
|
||||
|
||||
impl Session for SessionImpl {
|
||||
fn state(&self) -> SessionState {
|
||||
self.data.lock().state.clone()
|
||||
}
|
||||
|
||||
fn wait(&self, timeout: Option<time::Duration>) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
if !data.result.is_some() {
|
||||
match timeout {
|
||||
None => self.completed.wait(&mut data),
|
||||
Some(timeout) => { self.completed.wait_for(&mut data, timeout); },
|
||||
}
|
||||
}
|
||||
|
||||
data.result.as_ref()
|
||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
||||
.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for SessionImpl {
|
||||
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
|
||||
write!(f, "Encryption session {} on {}", self.id, self.self_node_id)
|
||||
|
@ -27,16 +27,6 @@ use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
use key_server_cluster::message::{Message, GenerationMessage, InitializeSession, ConfirmInitialization, CompleteInitialization,
|
||||
KeysDissemination, PublicKeyShare, SessionError, SessionCompleted};
|
||||
|
||||
/// Key generation session API.
|
||||
pub trait Session: Send + Sync + 'static {
|
||||
/// Get generation session state.
|
||||
fn state(&self) -> SessionState;
|
||||
/// Wait until session is completed. Returns public portion of generated server key.
|
||||
fn wait(&self, timeout: Option<time::Duration>) -> Result<Public, Error>;
|
||||
/// Get joint public key (if it is known).
|
||||
fn joint_public_and_secret(&self) -> Option<Result<(Public, Secret), Error>>;
|
||||
}
|
||||
|
||||
/// Distributed key generation session.
|
||||
/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper:
|
||||
/// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf
|
||||
@ -226,6 +216,22 @@ impl SessionImpl {
|
||||
self.data.lock().simulate_faulty_behaviour = true;
|
||||
}
|
||||
|
||||
/// Get session state.
|
||||
pub fn state(&self) -> SessionState {
|
||||
self.data.lock().state.clone()
|
||||
}
|
||||
|
||||
/// Wait for session completion.
|
||||
pub fn wait(&self, timeout: Option<time::Duration>) -> Result<Public, Error> {
|
||||
Self::wait_session(&self.completed, &self.data, timeout, |data| data.joint_public_and_secret.clone()
|
||||
.map(|r| r.map(|r| r.0.clone())))
|
||||
}
|
||||
|
||||
/// Get generated public and secret (if any).
|
||||
pub fn joint_public_and_secret(&self) -> Option<Result<(Public, Secret), Error>> {
|
||||
self.data.lock().joint_public_and_secret.clone()
|
||||
}
|
||||
|
||||
/// Start new session initialization. This must be called on master node.
|
||||
pub fn initialize(&self, author: Public, threshold: usize, nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||
check_cluster_nodes(self.node(), &nodes)?;
|
||||
@ -502,10 +508,17 @@ impl SessionImpl {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
|
||||
// calculate joint public key
|
||||
let joint_public = {
|
||||
let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed"));
|
||||
math::compute_joint_public(public_shares)?
|
||||
};
|
||||
|
||||
// save encrypted data to key storage
|
||||
let encrypted_data = DocumentKeyShare {
|
||||
author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(),
|
||||
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
||||
public: joint_public,
|
||||
common_point: None,
|
||||
encrypted_point: None,
|
||||
versions: vec![DocumentKeyShareVersion::new(
|
||||
@ -662,7 +675,7 @@ impl SessionImpl {
|
||||
fn complete_generation(&self) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
|
||||
// else - calculate joint public key
|
||||
// calculate joint public key
|
||||
let joint_public = {
|
||||
let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed"));
|
||||
math::compute_joint_public(public_shares)?
|
||||
@ -672,6 +685,7 @@ impl SessionImpl {
|
||||
let encrypted_data = DocumentKeyShare {
|
||||
author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(),
|
||||
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
||||
public: joint_public.clone(),
|
||||
common_point: None,
|
||||
encrypted_point: None,
|
||||
versions: vec![DocumentKeyShareVersion::new(
|
||||
@ -782,30 +796,6 @@ impl ClusterSession for SessionImpl {
|
||||
}
|
||||
}
|
||||
|
||||
impl Session for SessionImpl {
|
||||
fn state(&self) -> SessionState {
|
||||
self.data.lock().state.clone()
|
||||
}
|
||||
|
||||
fn wait(&self, timeout: Option<time::Duration>) -> Result<Public, Error> {
|
||||
let mut data = self.data.lock();
|
||||
if !data.joint_public_and_secret.is_some() {
|
||||
match timeout {
|
||||
None => self.completed.wait(&mut data),
|
||||
Some(timeout) => { self.completed.wait_for(&mut data, timeout); },
|
||||
}
|
||||
}
|
||||
|
||||
data.joint_public_and_secret.clone()
|
||||
.expect("checked above or waited for completed; completed is only signaled when joint_public.is_some(); qed")
|
||||
.map(|p| p.0)
|
||||
}
|
||||
|
||||
fn joint_public_and_secret(&self) -> Option<Result<(Public, Secret), Error>> {
|
||||
self.data.lock().joint_public_and_secret.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl EveryOtherNodeVisitor {
|
||||
pub fn new<I>(self_id: &NodeId, nodes: I) -> Self where I: Iterator<Item=NodeId> {
|
||||
EveryOtherNodeVisitor {
|
||||
@ -883,7 +873,7 @@ pub mod tests {
|
||||
use key_server_cluster::message::{self, Message, GenerationMessage};
|
||||
use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established};
|
||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
use key_server_cluster::generation_session::{Session, SessionImpl, SessionState, SessionParams};
|
||||
use key_server_cluster::generation_session::{SessionImpl, SessionState, SessionParams};
|
||||
use key_server_cluster::math;
|
||||
use key_server_cluster::math::tests::do_encryption_and_decryption;
|
||||
|
||||
|
@ -23,7 +23,7 @@ use key_server_cluster::{Error, NodeId, SessionId, SessionMeta, AclStorage, Docu
|
||||
use key_server_cluster::cluster::{Cluster};
|
||||
use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession};
|
||||
use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionParams as GenerationSessionParams,
|
||||
Session as GenerationSessionApi, SessionState as GenerationSessionState};
|
||||
SessionState as GenerationSessionState};
|
||||
use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessage, SigningGenerationMessage,
|
||||
RequestPartialSignature, PartialSignature, SigningSessionCompleted, GenerationMessage, ConsensusMessage, SigningSessionError,
|
||||
InitializeConsensusSession, ConfirmConsensusInitialization, SigningSessionDelegation, SigningSessionDelegationCompleted};
|
||||
@ -32,12 +32,6 @@ use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
||||
use key_server_cluster::jobs::signing_job::{PartialSigningRequest, PartialSigningResponse, SigningJob};
|
||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||
|
||||
/// Signing session API.
|
||||
pub trait Session: Send + Sync + 'static {
|
||||
/// Wait until session is completed. Returns signed message.
|
||||
fn wait(&self) -> Result<(Secret, Secret), Error>;
|
||||
}
|
||||
|
||||
/// Distributed signing session.
|
||||
/// Based on "Efficient Multi-Party Digital Signature using Adaptive Secret Sharing for Low-Power Devices in Wireless Network" paper.
|
||||
/// Brief overview:
|
||||
@ -211,6 +205,11 @@ impl SessionImpl {
|
||||
self.data.lock().state
|
||||
}
|
||||
|
||||
/// Wait for session completion.
|
||||
pub fn wait(&self) -> Result<(Secret, Secret), Error> {
|
||||
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
||||
}
|
||||
|
||||
/// Delegate session to other node.
|
||||
pub fn delegate(&self, master: NodeId, version: H256, message_hash: H256) -> Result<(), Error> {
|
||||
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
||||
@ -680,19 +679,6 @@ impl ClusterSession for SessionImpl {
|
||||
}
|
||||
}
|
||||
|
||||
impl Session for SessionImpl {
|
||||
fn wait(&self) -> Result<(Secret, Secret), Error> {
|
||||
let mut data = self.data.lock();
|
||||
if !data.result.is_some() {
|
||||
self.core.completed.wait(&mut data);
|
||||
}
|
||||
|
||||
data.result.as_ref()
|
||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
||||
.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl SessionKeyGenerationTransport {
|
||||
fn map_message(&self, message: Message) -> Result<Message, Error> {
|
||||
match message {
|
||||
@ -819,12 +805,11 @@ mod tests {
|
||||
use key_server_cluster::{NodeId, DummyKeyStorage, DocumentKeyShare, DocumentKeyShareVersion, SessionId, SessionMeta, Error, KeyStorage};
|
||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
use key_server_cluster::cluster::tests::DummyCluster;
|
||||
use key_server_cluster::generation_session::{Session as GenerationSession};
|
||||
use key_server_cluster::generation_session::tests::MessageLoop as KeyGenerationMessageLoop;
|
||||
use key_server_cluster::math;
|
||||
use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessage, ConsensusMessage, ConfirmConsensusInitialization,
|
||||
SigningGenerationMessage, GenerationMessage, ConfirmInitialization, InitializeSession, RequestPartialSignature};
|
||||
use key_server_cluster::signing_session::{Session, SessionImpl, SessionState, SessionParams};
|
||||
use key_server_cluster::signing_session::{SessionImpl, SessionState, SessionParams};
|
||||
|
||||
struct Node {
|
||||
pub node_id: NodeId,
|
||||
@ -986,6 +971,7 @@ mod tests {
|
||||
key_share: Some(DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 0,
|
||||
public: Default::default(),
|
||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||
versions: vec![DocumentKeyShareVersion {
|
||||
@ -1039,6 +1025,7 @@ mod tests {
|
||||
key_share: Some(DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 2,
|
||||
public: Default::default(),
|
||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||
versions: vec![DocumentKeyShareVersion {
|
||||
|
@ -29,18 +29,15 @@ use tokio_core::net::{TcpListener, TcpStream};
|
||||
use ethkey::{Public, KeyPair, Signature, Random, Generator};
|
||||
use bigint::hash::H256;
|
||||
use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair};
|
||||
use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, GenerationSessionWrapper, EncryptionSessionWrapper,
|
||||
DecryptionSessionWrapper, SigningSessionWrapper, AdminSessionWrapper, KeyNegotiationSessionWrapper, SessionIdWithSubSession,
|
||||
ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, AdminSessionCreationData};
|
||||
use key_server_cluster::cluster_sessions::{ClusterSession, AdminSession, ClusterSessions, SessionIdWithSubSession,
|
||||
ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, AdminSessionCreationData, ClusterSessionsListener};
|
||||
use key_server_cluster::cluster_sessions_creator::{ClusterSessionCreator, IntoSessionId};
|
||||
use key_server_cluster::message::{self, Message, ClusterMessage};
|
||||
use key_server_cluster::generation_session::{Session as GenerationSession};
|
||||
#[cfg(test)]
|
||||
use key_server_cluster::generation_session::SessionImpl as GenerationSessionImpl;
|
||||
use key_server_cluster::decryption_session::{Session as DecryptionSession};
|
||||
use key_server_cluster::encryption_session::{Session as EncryptionSession};
|
||||
use key_server_cluster::signing_session::{Session as SigningSession};
|
||||
use key_server_cluster::key_version_negotiation_session::{Session as KeyVersionNegotiationSession, SessionImpl as KeyVersionNegotiationSessionImpl,
|
||||
use key_server_cluster::generation_session::{SessionImpl as GenerationSession};
|
||||
use key_server_cluster::decryption_session::{SessionImpl as DecryptionSession};
|
||||
use key_server_cluster::encryption_session::{SessionImpl as EncryptionSession};
|
||||
use key_server_cluster::signing_session::{SessionImpl as SigningSession};
|
||||
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession,
|
||||
IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction};
|
||||
use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, read_encrypted_message, WriteMessage, write_encrypted_message};
|
||||
use key_server_cluster::net::{accept_connection as net_accept_connection, connect as net_connect, Connection as NetConnection};
|
||||
@ -74,16 +71,19 @@ pub trait ClusterClient: Send + Sync {
|
||||
/// Start new signing session.
|
||||
fn new_signing_session(&self, session_id: SessionId, requestor_signature: Signature, version: Option<H256>, message_hash: H256) -> Result<Arc<SigningSession>, Error>;
|
||||
/// Start new key version negotiation session.
|
||||
fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSession>, Error>;
|
||||
fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>, Error>;
|
||||
/// Start new servers set change session.
|
||||
fn new_servers_set_change_session(&self, session_id: Option<SessionId>, new_nodes_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSessionWrapper>, Error>;
|
||||
fn new_servers_set_change_session(&self, session_id: Option<SessionId>, new_nodes_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSession>, Error>;
|
||||
|
||||
/// Listen for new generation sessions.
|
||||
fn add_generation_listener(&self, listener: Arc<ClusterSessionsListener<GenerationSession>>);
|
||||
|
||||
/// Ask node to make 'faulty' generation sessions.
|
||||
#[cfg(test)]
|
||||
fn make_faulty_generation_sessions(&self);
|
||||
/// Get active generation session with given id.
|
||||
#[cfg(test)]
|
||||
fn generation_session(&self, session_id: &SessionId) -> Option<Arc<GenerationSessionImpl>>;
|
||||
fn generation_session(&self, session_id: &SessionId) -> Option<Arc<GenerationSession>>;
|
||||
/// Try connect to disconnected nodes.
|
||||
#[cfg(test)]
|
||||
fn connect(&self);
|
||||
@ -446,7 +446,7 @@ impl ClusterCore {
|
||||
}
|
||||
|
||||
/// Try to contnue session.
|
||||
fn try_continue_session(data: &Arc<ClusterData>, session: Option<Arc<KeyVersionNegotiationSessionImpl<KeyVersionNegotiationSessionTransport>>>) {
|
||||
fn try_continue_session(data: &Arc<ClusterData>, session: Option<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>>) {
|
||||
if let Some(session) = session {
|
||||
let meta = session.meta();
|
||||
let is_master_node = meta.self_node_id == meta.master_node_id;
|
||||
@ -741,11 +741,6 @@ impl ClusterData {
|
||||
self.connections.get(node)
|
||||
}
|
||||
|
||||
/// Get sessions reference.
|
||||
pub fn sessions(&self) -> &ClusterSessions {
|
||||
&self.sessions
|
||||
}
|
||||
|
||||
/// Spawns a future using thread pool and schedules execution of it with event loop handle.
|
||||
pub fn spawn<F>(&self, f: F) where F: Future + Send + 'static, F::Item: Send + 'static, F::Error: Send + 'static {
|
||||
let pool_work = self.pool.spawn(f);
|
||||
@ -842,7 +837,7 @@ impl ClusterClientImpl {
|
||||
}
|
||||
}
|
||||
|
||||
fn create_key_version_negotiation_session(&self, session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSessionImpl<KeyVersionNegotiationSessionTransport>>, Error> {
|
||||
fn create_key_version_negotiation_session(&self, session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>, Error> {
|
||||
let mut connected_nodes = self.data.connections.connected_nodes();
|
||||
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||
|
||||
@ -872,7 +867,7 @@ impl ClusterClient for ClusterClientImpl {
|
||||
let cluster = create_cluster_view(&self.data, true)?;
|
||||
let session = self.data.sessions.generation_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?;
|
||||
match session.initialize(author, threshold, connected_nodes) {
|
||||
Ok(()) => Ok(GenerationSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)),
|
||||
Ok(()) => Ok(session),
|
||||
Err(error) => {
|
||||
self.data.sessions.generation_sessions.remove(&session.id());
|
||||
Err(error)
|
||||
@ -887,7 +882,7 @@ impl ClusterClient for ClusterClientImpl {
|
||||
let cluster = create_cluster_view(&self.data, true)?;
|
||||
let session = self.data.sessions.encryption_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?;
|
||||
match session.initialize(requestor_signature, common_point, encrypted_point) {
|
||||
Ok(()) => Ok(EncryptionSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)),
|
||||
Ok(()) => Ok(session),
|
||||
Err(error) => {
|
||||
self.data.sessions.encryption_sessions.remove(&session.id());
|
||||
Err(error)
|
||||
@ -916,7 +911,7 @@ impl ClusterClient for ClusterClientImpl {
|
||||
};
|
||||
|
||||
match initialization_result {
|
||||
Ok(()) => Ok(DecryptionSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)),
|
||||
Ok(()) => Ok(session),
|
||||
Err(error) => {
|
||||
self.data.sessions.decryption_sessions.remove(&session.id());
|
||||
Err(error)
|
||||
@ -945,7 +940,7 @@ impl ClusterClient for ClusterClientImpl {
|
||||
};
|
||||
|
||||
match initialization_result {
|
||||
Ok(()) => Ok(SigningSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)),
|
||||
Ok(()) => Ok(session),
|
||||
Err(error) => {
|
||||
self.data.sessions.signing_sessions.remove(&session.id());
|
||||
Err(error)
|
||||
@ -953,12 +948,12 @@ impl ClusterClient for ClusterClientImpl {
|
||||
}
|
||||
}
|
||||
|
||||
fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSession>, Error> {
|
||||
fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>, Error> {
|
||||
let session = self.create_key_version_negotiation_session(session_id)?;
|
||||
Ok(KeyNegotiationSessionWrapper::new(Arc::downgrade(&self.data), session.id(), session))
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
fn new_servers_set_change_session(&self, session_id: Option<SessionId>, new_nodes_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSessionWrapper>, Error> {
|
||||
fn new_servers_set_change_session(&self, session_id: Option<SessionId>, new_nodes_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSession>, Error> {
|
||||
let mut connected_nodes = self.data.connections.connected_nodes();
|
||||
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||
|
||||
@ -974,7 +969,7 @@ impl ClusterClient for ClusterClientImpl {
|
||||
.initialize(new_nodes_set, old_set_signature, new_set_signature);
|
||||
|
||||
match initialization_result {
|
||||
Ok(()) => Ok(AdminSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)),
|
||||
Ok(()) => Ok(session),
|
||||
Err(error) => {
|
||||
self.data.sessions.admin_sessions.remove(&session.id());
|
||||
Err(error)
|
||||
@ -982,6 +977,10 @@ impl ClusterClient for ClusterClientImpl {
|
||||
}
|
||||
}
|
||||
|
||||
fn add_generation_listener(&self, listener: Arc<ClusterSessionsListener<GenerationSession>>) {
|
||||
self.data.sessions.generation_sessions.add_listener(listener);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn connect(&self) {
|
||||
ClusterCore::connect_disconnected_nodes(self.data.clone());
|
||||
@ -993,7 +992,7 @@ impl ClusterClient for ClusterClientImpl {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn generation_session(&self, session_id: &SessionId) -> Option<Arc<GenerationSessionImpl>> {
|
||||
fn generation_session(&self, session_id: &SessionId) -> Option<Arc<GenerationSession>> {
|
||||
self.data.sessions.generation_sessions.get(session_id, false)
|
||||
}
|
||||
|
||||
@ -1015,12 +1014,21 @@ pub mod tests {
|
||||
use std::collections::{BTreeSet, VecDeque};
|
||||
use parking_lot::Mutex;
|
||||
use tokio_core::reactor::Core;
|
||||
use ethkey::{Random, Generator, Public, sign};
|
||||
use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair};
|
||||
use bigint::hash::H256;
|
||||
use ethkey::{Random, Generator, Public, Signature, sign};
|
||||
use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair, KeyStorage};
|
||||
use key_server_cluster::message::Message;
|
||||
use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration};
|
||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
use key_server_cluster::generation_session::{Session as GenerationSession, SessionState as GenerationSessionState};
|
||||
use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration, ClusterClient, ClusterState};
|
||||
use key_server_cluster::cluster_sessions::{ClusterSession, AdminSession, ClusterSessionsListener};
|
||||
use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionState as GenerationSessionState};
|
||||
use key_server_cluster::decryption_session::{SessionImpl as DecryptionSession};
|
||||
use key_server_cluster::encryption_session::{SessionImpl as EncryptionSession};
|
||||
use key_server_cluster::signing_session::{SessionImpl as SigningSession};
|
||||
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession,
|
||||
IsolatedSessionTransport as KeyVersionNegotiationSessionTransport};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DummyClusterClient;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct DummyCluster {
|
||||
@ -1034,6 +1042,23 @@ pub mod tests {
|
||||
messages: VecDeque<(NodeId, Message)>,
|
||||
}
|
||||
|
||||
impl ClusterClient for DummyClusterClient {
|
||||
fn cluster_state(&self) -> ClusterState { unimplemented!() }
|
||||
fn new_generation_session(&self, _session_id: SessionId, _author: Public, _threshold: usize) -> Result<Arc<GenerationSession>, Error> { unimplemented!() }
|
||||
fn new_encryption_session(&self, _session_id: SessionId, _requestor_signature: Signature, _common_point: Public, _encrypted_point: Public) -> Result<Arc<EncryptionSession>, Error> { unimplemented!() }
|
||||
fn new_decryption_session(&self, _session_id: SessionId, _requestor_signature: Signature, _version: Option<H256>, _is_shadow_decryption: bool) -> Result<Arc<DecryptionSession>, Error> { unimplemented!() }
|
||||
fn new_signing_session(&self, _session_id: SessionId, _requestor_signature: Signature, _version: Option<H256>, _message_hash: H256) -> Result<Arc<SigningSession>, Error> { unimplemented!() }
|
||||
fn new_key_version_negotiation_session(&self, _session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>, Error> { unimplemented!() }
|
||||
fn new_servers_set_change_session(&self, _session_id: Option<SessionId>, _new_nodes_set: BTreeSet<NodeId>, _old_set_signature: Signature, _new_set_signature: Signature) -> Result<Arc<AdminSession>, Error> { unimplemented!() }
|
||||
|
||||
fn add_generation_listener(&self, _listener: Arc<ClusterSessionsListener<GenerationSession>>) {}
|
||||
|
||||
fn make_faulty_generation_sessions(&self) { unimplemented!() }
|
||||
fn generation_session(&self, _session_id: &SessionId) -> Option<Arc<GenerationSession>> { unimplemented!() }
|
||||
fn connect(&self) { unimplemented!() }
|
||||
fn key_storage(&self) -> Arc<KeyStorage> { unimplemented!() }
|
||||
}
|
||||
|
||||
impl DummyCluster {
|
||||
pub fn new(id: NodeId) -> Self {
|
||||
DummyCluster {
|
||||
|
@ -18,23 +18,20 @@ use std::time;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::collections::{VecDeque, BTreeMap};
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use parking_lot::{Mutex, RwLock, Condvar};
|
||||
use bigint::hash::H256;
|
||||
use ethkey::{Public, Secret, Signature};
|
||||
use key_server_cluster::{Error, NodeId, SessionId, EncryptedDocumentKeyShadow};
|
||||
use ethkey::{Secret, Signature};
|
||||
use key_server_cluster::{Error, NodeId, SessionId};
|
||||
use key_server_cluster::cluster::{Cluster, ClusterData, ClusterConfiguration, ClusterView};
|
||||
use key_server_cluster::message::{self, Message};
|
||||
use key_server_cluster::generation_session::{Session as GenerationSession, SessionImpl as GenerationSessionImpl,
|
||||
SessionState as GenerationSessionState};
|
||||
use key_server_cluster::decryption_session::{Session as DecryptionSession, SessionImpl as DecryptionSessionImpl};
|
||||
use key_server_cluster::encryption_session::{Session as EncryptionSession, SessionImpl as EncryptionSessionImpl,
|
||||
SessionState as EncryptionSessionState};
|
||||
use key_server_cluster::signing_session::{Session as SigningSession, SessionImpl as SigningSessionImpl};
|
||||
use key_server_cluster::share_add_session::{Session as ShareAddSession, SessionImpl as ShareAddSessionImpl,
|
||||
IsolatedSessionTransport as ShareAddTransport};
|
||||
use key_server_cluster::servers_set_change_session::{Session as ServersSetChangeSession, SessionImpl as ServersSetChangeSessionImpl};
|
||||
use key_server_cluster::key_version_negotiation_session::{Session as KeyVersionNegotiationSession, SessionImpl as KeyVersionNegotiationSessionImpl,
|
||||
IsolatedSessionTransport as VersionNegotiationTransport, ContinueAction};
|
||||
use key_server_cluster::generation_session::{SessionImpl as GenerationSessionImpl};
|
||||
use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl};
|
||||
use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl};
|
||||
use key_server_cluster::signing_session::{SessionImpl as SigningSessionImpl};
|
||||
use key_server_cluster::share_add_session::{SessionImpl as ShareAddSessionImpl, IsolatedSessionTransport as ShareAddTransport};
|
||||
use key_server_cluster::servers_set_change_session::{SessionImpl as ServersSetChangeSessionImpl};
|
||||
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl,
|
||||
IsolatedSessionTransport as VersionNegotiationTransport};
|
||||
|
||||
use key_server_cluster::cluster_sessions_creator::{GenerationSessionCreator, EncryptionSessionCreator, DecryptionSessionCreator, SigningSessionCreator,
|
||||
KeyVersionNegotiationSessionCreator, AdminSessionCreator, SessionCreatorCore, ClusterSessionCreator};
|
||||
@ -82,6 +79,25 @@ pub trait ClusterSession {
|
||||
fn on_session_error(&self, sender: &NodeId, error: Error);
|
||||
/// Process session message.
|
||||
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error>;
|
||||
|
||||
/// 'Wait for session completion' helper.
|
||||
fn wait_session<T, U, F: Fn(&U) -> Option<Result<T, Error>>>(completion_event: &Condvar, session_data: &Mutex<U>, timeout: Option<time::Duration>, result_reader: F) -> Result<T, Error> {
|
||||
let mut locked_data = session_data.lock();
|
||||
match result_reader(&locked_data) {
|
||||
Some(result) => result,
|
||||
None => {
|
||||
match timeout {
|
||||
None => completion_event.wait(&mut locked_data),
|
||||
Some(timeout) => {
|
||||
completion_event.wait_for(&mut locked_data, timeout);
|
||||
},
|
||||
}
|
||||
|
||||
result_reader(&locked_data)
|
||||
.expect("waited for completion; completion is only signaled when result.is_some(); qed")
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Administrative session.
|
||||
@ -120,12 +136,22 @@ pub struct ClusterSessions {
|
||||
creator_core: Arc<SessionCreatorCore>,
|
||||
}
|
||||
|
||||
/// Active sessions container listener.
|
||||
pub trait ClusterSessionsListener<S: ClusterSession>: Send + Sync {
|
||||
/// When new session is inserted to the container.
|
||||
fn on_session_inserted(&self, _session: Arc<S>) {}
|
||||
/// When session is removed from the container.
|
||||
fn on_session_removed(&self, _session: Arc<S>) {}
|
||||
}
|
||||
|
||||
/// Active sessions container.
|
||||
pub struct ClusterSessionsContainer<S: ClusterSession, SC: ClusterSessionCreator<S, D>, D> {
|
||||
/// Sessions creator.
|
||||
pub creator: SC,
|
||||
/// Active sessions.
|
||||
sessions: RwLock<BTreeMap<S::Id, QueuedSession<S>>>,
|
||||
/// Listeners. Lock order: sessions -> listeners.
|
||||
listeners: Mutex<Vec<Weak<ClusterSessionsListener<S>>>>,
|
||||
/// Sessions container state.
|
||||
container_state: Arc<Mutex<ClusterSessionsContainerState>>,
|
||||
/// Phantom data.
|
||||
@ -159,66 +185,6 @@ pub enum ClusterSessionsContainerState {
|
||||
Exclusive,
|
||||
}
|
||||
|
||||
/// Generation session implementation, which removes session from cluster on drop.
|
||||
pub struct GenerationSessionWrapper {
|
||||
/// Wrapped session.
|
||||
session: Arc<GenerationSession>,
|
||||
/// Session Id.
|
||||
session_id: SessionId,
|
||||
/// Cluster data reference.
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
|
||||
/// Encryption session implementation, which removes session from cluster on drop.
|
||||
pub struct EncryptionSessionWrapper {
|
||||
/// Wrapped session.
|
||||
session: Arc<EncryptionSession>,
|
||||
/// Session Id.
|
||||
session_id: SessionId,
|
||||
/// Cluster data reference.
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
|
||||
/// Decryption session implementation, which removes session from cluster on drop.
|
||||
pub struct DecryptionSessionWrapper {
|
||||
/// Wrapped session.
|
||||
session: Arc<DecryptionSession>,
|
||||
/// Session Id.
|
||||
session_id: SessionIdWithSubSession,
|
||||
/// Cluster data reference.
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
|
||||
/// Signing session implementation, which removes session from cluster on drop.
|
||||
pub struct SigningSessionWrapper {
|
||||
/// Wrapped session.
|
||||
session: Arc<SigningSession>,
|
||||
/// Session Id.
|
||||
session_id: SessionIdWithSubSession,
|
||||
/// Cluster data reference.
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
|
||||
/// Admin session implementation, which removes session from cluster on drop.
|
||||
pub struct AdminSessionWrapper {
|
||||
/// Wrapped session.
|
||||
session: Arc<AdminSession>,
|
||||
/// Session Id.
|
||||
session_id: SessionId,
|
||||
/// Cluster data reference.
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
|
||||
/// Key server version negotiation session implementation, which removes session from cluster on drop.
|
||||
pub struct KeyNegotiationSessionWrapper {
|
||||
/// Wrapped session.
|
||||
session: Arc<KeyVersionNegotiationSession>,
|
||||
/// Session Id.
|
||||
session_id: SessionIdWithSubSession,
|
||||
/// Cluster data reference.
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
|
||||
impl ClusterSessions {
|
||||
/// Create new cluster sessions container.
|
||||
pub fn new(config: &ClusterConfiguration) -> Self {
|
||||
@ -294,11 +260,16 @@ impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: C
|
||||
ClusterSessionsContainer {
|
||||
creator: creator,
|
||||
sessions: RwLock::new(BTreeMap::new()),
|
||||
listeners: Mutex::new(Vec::new()),
|
||||
container_state: container_state,
|
||||
_pd: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_listener(&self, listener: Arc<ClusterSessionsListener<S>>) {
|
||||
self.listeners.lock().push(Arc::downgrade(&listener));
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.sessions.read().is_empty()
|
||||
}
|
||||
@ -342,12 +313,15 @@ impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: C
|
||||
queue: VecDeque::new(),
|
||||
};
|
||||
sessions.insert(session_id, queued_session);
|
||||
self.notify_listeners(|l| l.on_session_inserted(session.clone()));
|
||||
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
pub fn remove(&self, session_id: &S::Id) {
|
||||
if self.sessions.write().remove(session_id).is_some() {
|
||||
if let Some(session) = self.sessions.write().remove(session_id) {
|
||||
self.container_state.lock().on_session_completed();
|
||||
self.notify_listeners(|l| l.on_session_removed(session.session.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -394,6 +368,22 @@ impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: C
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn notify_listeners<F: Fn(&ClusterSessionsListener<S>) -> ()>(&self, callback: F) {
|
||||
let mut listeners = self.listeners.lock();
|
||||
let mut listener_index = 0;
|
||||
while listener_index < listeners.len() {
|
||||
match listeners[listener_index].upgrade() {
|
||||
Some(listener) => {
|
||||
callback(&*listener);
|
||||
listener_index += 1;
|
||||
},
|
||||
None => {
|
||||
listeners.swap_remove(listener_index);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: ClusterSessionCreator<S, D>, SessionId: From<S::Id> {
|
||||
@ -545,158 +535,6 @@ impl ClusterSession for AdminSession {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl GenerationSessionWrapper {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionId, session: Arc<GenerationSession>) -> Arc<Self> {
|
||||
Arc::new(GenerationSessionWrapper {
|
||||
session: session,
|
||||
session_id: session_id,
|
||||
cluster: cluster,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl GenerationSession for GenerationSessionWrapper {
|
||||
fn state(&self) -> GenerationSessionState {
|
||||
self.session.state()
|
||||
}
|
||||
|
||||
fn wait(&self, timeout: Option<time::Duration>) -> Result<Public, Error> {
|
||||
self.session.wait(timeout)
|
||||
}
|
||||
|
||||
fn joint_public_and_secret(&self) -> Option<Result<(Public, Secret), Error>> {
|
||||
self.session.joint_public_and_secret()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for GenerationSessionWrapper {
|
||||
fn drop(&mut self) {
|
||||
if let Some(cluster) = self.cluster.upgrade() {
|
||||
cluster.sessions().generation_sessions.remove(&self.session_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EncryptionSessionWrapper {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionId, session: Arc<EncryptionSession>) -> Arc<Self> {
|
||||
Arc::new(EncryptionSessionWrapper {
|
||||
session: session,
|
||||
session_id: session_id,
|
||||
cluster: cluster,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl EncryptionSession for EncryptionSessionWrapper {
|
||||
fn state(&self) -> EncryptionSessionState {
|
||||
self.session.state()
|
||||
}
|
||||
|
||||
fn wait(&self, timeout: Option<time::Duration>) -> Result<(), Error> {
|
||||
self.session.wait(timeout)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for EncryptionSessionWrapper {
|
||||
fn drop(&mut self) {
|
||||
if let Some(cluster) = self.cluster.upgrade() {
|
||||
cluster.sessions().encryption_sessions.remove(&self.session_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DecryptionSessionWrapper {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionIdWithSubSession, session: Arc<DecryptionSession>) -> Arc<Self> {
|
||||
Arc::new(DecryptionSessionWrapper {
|
||||
session: session,
|
||||
session_id: session_id,
|
||||
cluster: cluster,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl DecryptionSession for DecryptionSessionWrapper {
|
||||
fn wait(&self) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||
self.session.wait()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DecryptionSessionWrapper {
|
||||
fn drop(&mut self) {
|
||||
if let Some(cluster) = self.cluster.upgrade() {
|
||||
cluster.sessions().decryption_sessions.remove(&self.session_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SigningSessionWrapper {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionIdWithSubSession, session: Arc<SigningSession>) -> Arc<Self> {
|
||||
Arc::new(SigningSessionWrapper {
|
||||
session: session,
|
||||
session_id: session_id,
|
||||
cluster: cluster,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl SigningSession for SigningSessionWrapper {
|
||||
fn wait(&self) -> Result<(Secret, Secret), Error> {
|
||||
self.session.wait()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SigningSessionWrapper {
|
||||
fn drop(&mut self) {
|
||||
if let Some(cluster) = self.cluster.upgrade() {
|
||||
cluster.sessions().signing_sessions.remove(&self.session_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AdminSessionWrapper {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionId, session: Arc<AdminSession>) -> Arc<Self> {
|
||||
Arc::new(AdminSessionWrapper {
|
||||
session: session,
|
||||
session_id: session_id,
|
||||
cluster: cluster,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn wait(&self) -> Result<(), Error> {
|
||||
match *self.session {
|
||||
AdminSession::ShareAdd(ref session) => session.wait(),
|
||||
AdminSession::ServersSetChange(ref session) => session.wait(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ShareAddSession for AdminSessionWrapper {
|
||||
fn wait(&self) -> Result<(), Error> {
|
||||
match *self.session {
|
||||
AdminSession::ShareAdd(ref session) => session.wait(),
|
||||
_ => Err(Error::InvalidMessage),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ServersSetChangeSession for AdminSessionWrapper {
|
||||
fn wait(&self) -> Result<(), Error> {
|
||||
match *self.session {
|
||||
AdminSession::ServersSetChange(ref session) => session.wait(),
|
||||
_ => Err(Error::InvalidMessage),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for AdminSessionWrapper {
|
||||
fn drop(&mut self) {
|
||||
if let Some(cluster) = self.cluster.upgrade() {
|
||||
cluster.sessions().admin_sessions.remove(&self.session_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_cluster_view(data: &Arc<ClusterData>, requires_all_connections: bool) -> Result<Arc<Cluster>, Error> {
|
||||
if requires_all_connections {
|
||||
if !data.connections.disconnected_nodes().is_empty() {
|
||||
@ -710,39 +548,6 @@ pub fn create_cluster_view(data: &Arc<ClusterData>, requires_all_connections: bo
|
||||
Ok(Arc::new(ClusterView::new(data.clone(), connected_nodes)))
|
||||
}
|
||||
|
||||
impl KeyNegotiationSessionWrapper {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionIdWithSubSession, session: Arc<KeyVersionNegotiationSession>) -> Arc<Self> {
|
||||
Arc::new(KeyNegotiationSessionWrapper {
|
||||
session: session,
|
||||
session_id: session_id,
|
||||
cluster: cluster,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyVersionNegotiationSession for KeyNegotiationSessionWrapper {
|
||||
fn set_continue_action(&self, action: ContinueAction) {
|
||||
self.session.set_continue_action(action)
|
||||
}
|
||||
|
||||
fn continue_action(&self) -> Option<ContinueAction> {
|
||||
self.session.continue_action()
|
||||
}
|
||||
|
||||
fn wait(&self) -> Result<(H256, NodeId), Error> {
|
||||
self.session.wait()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for KeyNegotiationSessionWrapper {
|
||||
fn drop(&mut self) {
|
||||
if let Some(cluster) = self.cluster.upgrade() {
|
||||
cluster.sessions().negotiation_sessions.remove(&self.session_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
@ -773,6 +773,8 @@ pub struct KeyShareCommon {
|
||||
pub threshold: usize,
|
||||
/// Author of key share entry.
|
||||
pub author: SerializablePublic,
|
||||
/// Joint public.
|
||||
pub joint_public: SerializablePublic,
|
||||
/// Common (shared) encryption point.
|
||||
pub common_point: Option<SerializablePublic>,
|
||||
/// Encrypted point.
|
||||
|
@ -27,9 +27,9 @@ pub use super::key_storage::{KeyStorage, DocumentKeyShare, DocumentKeyShareVersi
|
||||
pub use super::key_server_set::KeyServerSet;
|
||||
pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic, SerializableMessageHash};
|
||||
pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient};
|
||||
pub use self::generation_session::Session as GenerationSession;
|
||||
pub use self::encryption_session::Session as EncryptionSession;
|
||||
pub use self::decryption_session::Session as DecryptionSession;
|
||||
pub use self::cluster_sessions::{ClusterSession, ClusterSessionsListener};
|
||||
#[cfg(test)]
|
||||
pub use self::cluster::tests::DummyClusterClient;
|
||||
|
||||
#[cfg(test)]
|
||||
pub use super::node_key_pair::PlainNodeKeyPair;
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::sync::Arc;
|
||||
use std::net::SocketAddr;
|
||||
use std::collections::BTreeMap;
|
||||
use futures::{future, Future};
|
||||
@ -27,6 +27,7 @@ use bigint::hash::H256;
|
||||
use util::Address;
|
||||
use bytes::Bytes;
|
||||
use types::all::{Error, Public, NodeAddress};
|
||||
use trusted_client::TrustedClient;
|
||||
|
||||
const KEY_SERVER_SET_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_server_set";
|
||||
|
||||
@ -55,7 +56,7 @@ pub struct OnChainKeyServerSet {
|
||||
/// Cached on-chain Key Server set contract.
|
||||
struct CachedContract {
|
||||
/// Blockchain client.
|
||||
client: Weak<Client>,
|
||||
client: TrustedClient,
|
||||
/// Contract address.
|
||||
contract_addr: Option<Address>,
|
||||
/// Active set of key servers.
|
||||
@ -63,19 +64,14 @@ struct CachedContract {
|
||||
}
|
||||
|
||||
impl OnChainKeyServerSet {
|
||||
pub fn new(client: &Arc<Client>, key_servers: BTreeMap<Public, NodeAddress>) -> Result<Arc<Self>, Error> {
|
||||
let mut cached_contract = CachedContract::new(client, key_servers)?;
|
||||
let key_server_contract_address = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned());
|
||||
// only initialize from contract if it is installed. otherwise - use default nodes
|
||||
// once the contract is installed, all default nodes are lost (if not in the contract' set)
|
||||
if key_server_contract_address.is_some() {
|
||||
cached_contract.read_from_registry(&*client, key_server_contract_address);
|
||||
}
|
||||
|
||||
pub fn new(trusted_client: TrustedClient, key_servers: BTreeMap<Public, NodeAddress>) -> Result<Arc<Self>, Error> {
|
||||
let client = trusted_client.get_untrusted();
|
||||
let key_server_set = Arc::new(OnChainKeyServerSet {
|
||||
contract: Mutex::new(cached_contract),
|
||||
contract: Mutex::new(CachedContract::new(trusted_client, key_servers)?),
|
||||
});
|
||||
client.add_notify(key_server_set.clone());
|
||||
client
|
||||
.ok_or(Error::Internal("Constructing OnChainKeyServerSet without active Client".into()))?
|
||||
.add_notify(key_server_set.clone());
|
||||
Ok(key_server_set)
|
||||
}
|
||||
}
|
||||
@ -95,9 +91,9 @@ impl ChainNotify for OnChainKeyServerSet {
|
||||
}
|
||||
|
||||
impl CachedContract {
|
||||
pub fn new(client: &Arc<Client>, key_servers: BTreeMap<Public, NodeAddress>) -> Result<Self, Error> {
|
||||
Ok(CachedContract {
|
||||
client: Arc::downgrade(client),
|
||||
pub fn new(client: TrustedClient, key_servers: BTreeMap<Public, NodeAddress>) -> Result<Self, Error> {
|
||||
let mut cached_contract = CachedContract {
|
||||
client: client,
|
||||
contract_addr: None,
|
||||
key_servers: key_servers.into_iter()
|
||||
.map(|(p, addr)| {
|
||||
@ -106,11 +102,22 @@ impl CachedContract {
|
||||
Ok((p, addr))
|
||||
})
|
||||
.collect::<Result<BTreeMap<_, _>, Error>>()?,
|
||||
})
|
||||
};
|
||||
|
||||
if let Some(client) = cached_contract.client.get() {
|
||||
let key_server_contract_address = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned());
|
||||
// only initialize from contract if it is installed. otherwise - use default nodes
|
||||
// once the contract is installed, all default nodes are lost (if not in the contract' set)
|
||||
if key_server_contract_address.is_some() {
|
||||
cached_contract.read_from_registry(&*client, key_server_contract_address);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(cached_contract)
|
||||
}
|
||||
|
||||
pub fn update(&mut self, enacted: Vec<H256>, retracted: Vec<H256>) {
|
||||
if let Some(client) = self.client.upgrade() {
|
||||
if let Some(client) = self.client.get() {
|
||||
let new_contract_addr = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned());
|
||||
|
||||
// new contract installed => read nodes set from the contract
|
||||
|
@ -35,11 +35,14 @@ type CurrentSerializableDocumentKeyVersion = SerializableDocumentKeyShareVersion
|
||||
|
||||
/// Encrypted key share, stored by key storage on the single key server.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[cfg_attr(test, derive(Default))]
|
||||
pub struct DocumentKeyShare {
|
||||
/// Author of the entry.
|
||||
pub author: Public,
|
||||
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
||||
pub threshold: usize,
|
||||
/// Server public key.
|
||||
pub public: Public,
|
||||
/// Common (shared) encryption point.
|
||||
pub common_point: Option<Public>,
|
||||
/// Encrypted point.
|
||||
@ -122,10 +125,12 @@ struct SerializableDocumentKeyShareV1 {
|
||||
/// V2 of encrypted key share, as it is stored by key storage on the single key server.
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct SerializableDocumentKeyShareV2 {
|
||||
/// Authore of the entry.
|
||||
/// Author of the entry.
|
||||
pub author: SerializablePublic,
|
||||
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
||||
pub threshold: usize,
|
||||
/// Server public.
|
||||
pub public: SerializablePublic,
|
||||
/// Common (shared) encryption point.
|
||||
pub common_point: Option<SerializablePublic>,
|
||||
/// Encrypted point.
|
||||
@ -174,6 +179,7 @@ fn upgrade_db(db: Database) -> Result<Database, Error> {
|
||||
// in v0 there have been only simultaneous GenEnc sessions.
|
||||
author: Public::default().into(), // added in v1
|
||||
threshold: v0_key.threshold,
|
||||
public: Public::default().into(), // addded in v2
|
||||
common_point: Some(v0_key.common_point),
|
||||
encrypted_point: Some(v0_key.encrypted_point),
|
||||
versions: vec![CurrentSerializableDocumentKeyVersion {
|
||||
@ -196,6 +202,7 @@ fn upgrade_db(db: Database) -> Result<Database, Error> {
|
||||
let current_key = CurrentSerializableDocumentKeyShare {
|
||||
author: v1_key.author, // added in v1
|
||||
threshold: v1_key.threshold,
|
||||
public: Public::default().into(), // addded in v2
|
||||
common_point: v1_key.common_point,
|
||||
encrypted_point: v1_key.encrypted_point,
|
||||
versions: vec![CurrentSerializableDocumentKeyVersion {
|
||||
@ -329,6 +336,7 @@ impl From<DocumentKeyShare> for SerializableDocumentKeyShareV2 {
|
||||
SerializableDocumentKeyShareV2 {
|
||||
author: key.author.into(),
|
||||
threshold: key.threshold,
|
||||
public: key.public.into(),
|
||||
common_point: key.common_point.map(Into::into),
|
||||
encrypted_point: key.encrypted_point.map(Into::into),
|
||||
versions: key.versions.into_iter().map(Into::into).collect(),
|
||||
@ -351,6 +359,7 @@ impl From<SerializableDocumentKeyShareV2> for DocumentKeyShare {
|
||||
DocumentKeyShare {
|
||||
author: key.author.into(),
|
||||
threshold: key.threshold,
|
||||
public: key.public.into(),
|
||||
common_point: key.common_point.map(Into::into),
|
||||
encrypted_point: key.encrypted_point.map(Into::into),
|
||||
versions: key.versions.into_iter()
|
||||
@ -424,6 +433,7 @@ pub mod tests {
|
||||
let tempdir = TempDir::new("").unwrap();
|
||||
let config = ServiceConfiguration {
|
||||
listener_address: None,
|
||||
service_contract_address: None,
|
||||
acl_check_enabled: true,
|
||||
data_path: tempdir.path().display().to_string(),
|
||||
cluster_config: ClusterConfiguration {
|
||||
@ -442,6 +452,7 @@ pub mod tests {
|
||||
let value1 = DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 100,
|
||||
public: Public::default(),
|
||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||
versions: vec![DocumentKeyShareVersion {
|
||||
@ -456,6 +467,7 @@ pub mod tests {
|
||||
let value2 = DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 200,
|
||||
public: Public::default(),
|
||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||
versions: vec![DocumentKeyShareVersion {
|
||||
|
@ -43,6 +43,7 @@ extern crate ethcore_bigint as bigint;
|
||||
extern crate ethcore_logger as logger;
|
||||
extern crate ethcrypto;
|
||||
extern crate ethkey;
|
||||
extern crate ethsync;
|
||||
extern crate native_contracts;
|
||||
extern crate keccak_hash as hash;
|
||||
extern crate kvdb;
|
||||
@ -53,33 +54,53 @@ mod types;
|
||||
|
||||
mod traits;
|
||||
mod acl_storage;
|
||||
mod http_listener;
|
||||
mod key_server;
|
||||
mod key_storage;
|
||||
mod serialization;
|
||||
mod key_server_set;
|
||||
mod node_key_pair;
|
||||
mod listener;
|
||||
mod trusted_client;
|
||||
|
||||
use std::sync::Arc;
|
||||
use ethcore::client::Client;
|
||||
use ethsync::SyncProvider;
|
||||
|
||||
pub use types::all::{ServerKeyId, EncryptedDocumentKey, RequestSignature, Public,
|
||||
Error, NodeAddress, ServiceConfiguration, ClusterConfiguration};
|
||||
Error, NodeAddress, ContractAddress, ServiceConfiguration, ClusterConfiguration};
|
||||
pub use traits::{NodeKeyPair, KeyServer};
|
||||
pub use self::node_key_pair::{PlainNodeKeyPair, KeyStoreNodeKeyPair};
|
||||
|
||||
/// Start new key server instance
|
||||
pub fn start(client: Arc<Client>, self_key_pair: Arc<NodeKeyPair>, config: ServiceConfiguration) -> Result<Box<KeyServer>, Error> {
|
||||
use std::sync::Arc;
|
||||
|
||||
pub fn start(client: Arc<Client>, sync: Arc<SyncProvider>, self_key_pair: Arc<NodeKeyPair>, config: ServiceConfiguration) -> Result<Box<KeyServer>, Error> {
|
||||
let trusted_client = trusted_client::TrustedClient::new(client.clone(), sync);
|
||||
let acl_storage: Arc<acl_storage::AclStorage> = if config.acl_check_enabled {
|
||||
acl_storage::OnChainAclStorage::new(&client)
|
||||
acl_storage::OnChainAclStorage::new(trusted_client.clone())?
|
||||
} else {
|
||||
Arc::new(acl_storage::DummyAclStorage::default())
|
||||
};
|
||||
let key_server_set = key_server_set::OnChainKeyServerSet::new(&client, config.cluster_config.nodes.clone())?;
|
||||
let key_server_set = key_server_set::OnChainKeyServerSet::new(trusted_client.clone(), config.cluster_config.nodes.clone())?;
|
||||
let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?);
|
||||
let key_server = key_server::KeyServerImpl::new(&config.cluster_config, key_server_set, self_key_pair, acl_storage, key_storage)?;
|
||||
let listener = http_listener::KeyServerHttpListener::start(config.listener_address, key_server)?;
|
||||
Ok(Box::new(listener))
|
||||
let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set.clone(), self_key_pair.clone(), acl_storage, key_storage.clone())?);
|
||||
let cluster = key_server.cluster();
|
||||
|
||||
// prepare listeners
|
||||
let http_listener = match config.listener_address {
|
||||
Some(listener_address) => Some(listener::http_listener::KeyServerHttpListener::start(listener_address, key_server.clone())?),
|
||||
None => None,
|
||||
};
|
||||
let contract_listener = config.service_contract_address.map(|service_contract_address| {
|
||||
let service_contract = Arc::new(listener::service_contract::OnChainServiceContract::new(trusted_client, service_contract_address, self_key_pair.clone()));
|
||||
let contract_listener = listener::service_contract_listener::ServiceContractListener::new(listener::service_contract_listener::ServiceContractListenerParams {
|
||||
contract: service_contract,
|
||||
key_server: key_server.clone(),
|
||||
self_key_pair: self_key_pair,
|
||||
key_server_set: key_server_set,
|
||||
cluster: cluster,
|
||||
key_storage: key_storage,
|
||||
});
|
||||
client.add_notify(contract_listener.clone());
|
||||
contract_listener
|
||||
});
|
||||
Ok(Box::new(listener::Listener::new(key_server, http_listener, contract_listener)))
|
||||
}
|
||||
|
@ -27,9 +27,9 @@ use serde::Serialize;
|
||||
use serde_json;
|
||||
use url::percent_encoding::percent_decode;
|
||||
|
||||
use traits::{ServerKeyGenerator, AdminSessionsServer, DocumentKeyServer, MessageSigner, KeyServer};
|
||||
use traits::KeyServer;
|
||||
use serialization::{SerializableEncryptedDocumentKeyShadow, SerializableBytes, SerializablePublic};
|
||||
use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, NodeAddress, RequestSignature, ServerKeyId,
|
||||
use types::all::{Error, Public, MessageHash, NodeAddress, RequestSignature, ServerKeyId,
|
||||
EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId};
|
||||
|
||||
/// Key server http-requests listener. Available requests:
|
||||
@ -41,9 +41,9 @@ use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, NodeAddr
|
||||
/// To sign message with server key: GET /{server_key_id}/{signature}/{message_hash}
|
||||
/// To change servers set: POST /admin/servers_set_change/{old_signature}/{new_signature} + BODY: json array of hex-encoded nodes ids
|
||||
|
||||
pub struct KeyServerHttpListener<T: KeyServer + 'static> {
|
||||
http_server: Option<HttpListening>,
|
||||
handler: Arc<KeyServerSharedHttpHandler<T>>,
|
||||
pub struct KeyServerHttpListener {
|
||||
http_server: HttpListening,
|
||||
_handler: Arc<KeyServerSharedHttpHandler>,
|
||||
}
|
||||
|
||||
/// Parsed http request
|
||||
@ -68,83 +68,44 @@ enum Request {
|
||||
}
|
||||
|
||||
/// Cloneable http handler
|
||||
struct KeyServerHttpHandler<T: KeyServer + 'static> {
|
||||
handler: Arc<KeyServerSharedHttpHandler<T>>,
|
||||
struct KeyServerHttpHandler {
|
||||
handler: Arc<KeyServerSharedHttpHandler>,
|
||||
}
|
||||
|
||||
/// Shared http handler
|
||||
struct KeyServerSharedHttpHandler<T: KeyServer + 'static> {
|
||||
key_server: T,
|
||||
struct KeyServerSharedHttpHandler {
|
||||
key_server: Arc<KeyServer>,
|
||||
}
|
||||
|
||||
impl<T> KeyServerHttpListener<T> where T: KeyServer + 'static {
|
||||
impl KeyServerHttpListener {
|
||||
/// Start KeyServer http listener
|
||||
pub fn start(listener_address: Option<NodeAddress>, key_server: T) -> Result<Self, Error> {
|
||||
pub fn start(listener_address: NodeAddress, key_server: Arc<KeyServer>) -> Result<Self, Error> {
|
||||
let shared_handler = Arc::new(KeyServerSharedHttpHandler {
|
||||
key_server: key_server,
|
||||
});
|
||||
|
||||
let http_server = listener_address
|
||||
.map(|listener_address| format!("{}:{}", listener_address.address, listener_address.port))
|
||||
.map(|listener_address| HttpServer::http(&listener_address).expect("cannot start HttpServer"))
|
||||
.map(|http_server| http_server.handle(KeyServerHttpHandler {
|
||||
let listener_address = format!("{}:{}", listener_address.address, listener_address.port);
|
||||
let http_server = HttpServer::http(&listener_address)
|
||||
.and_then(|http_server| http_server.handle(KeyServerHttpHandler {
|
||||
handler: shared_handler.clone(),
|
||||
}).expect("cannot start HttpServer"));
|
||||
})).map_err(|err| Error::Hyper(format!("{}", err)))?;
|
||||
|
||||
let listener = KeyServerHttpListener {
|
||||
http_server: http_server,
|
||||
handler: shared_handler,
|
||||
_handler: shared_handler,
|
||||
};
|
||||
Ok(listener)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> KeyServer for KeyServerHttpListener<T> where T: KeyServer + 'static {}
|
||||
|
||||
impl<T> AdminSessionsServer for KeyServerHttpListener<T> where T: KeyServer + 'static {
|
||||
fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||
self.handler.key_server.change_servers_set(old_set_signature, new_set_signature, new_servers_set)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ServerKeyGenerator for KeyServerHttpListener<T> where T: KeyServer + 'static {
|
||||
fn generate_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<Public, Error> {
|
||||
self.handler.key_server.generate_key(key_id, signature, threshold)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DocumentKeyServer for KeyServerHttpListener<T> where T: KeyServer + 'static {
|
||||
fn store_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> {
|
||||
self.handler.key_server.store_document_key(key_id, signature, common_point, encrypted_document_key)
|
||||
}
|
||||
|
||||
fn generate_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<EncryptedDocumentKey, Error> {
|
||||
self.handler.key_server.generate_document_key(key_id, signature, threshold)
|
||||
}
|
||||
|
||||
fn restore_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKey, Error> {
|
||||
self.handler.key_server.restore_document_key(key_id, signature)
|
||||
}
|
||||
|
||||
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||
self.handler.key_server.restore_document_key_shadow(key_id, signature)
|
||||
}
|
||||
}
|
||||
|
||||
impl <T> MessageSigner for KeyServerHttpListener<T> where T: KeyServer + 'static {
|
||||
fn sign_message(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
||||
self.handler.key_server.sign_message(key_id, signature, message)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for KeyServerHttpListener<T> where T: KeyServer + 'static {
|
||||
impl Drop for KeyServerHttpListener {
|
||||
fn drop(&mut self) {
|
||||
// ignore error as we are dropping anyway
|
||||
self.http_server.take().map(|mut s| { let _ = s.close(); });
|
||||
let _ = self.http_server.close();
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> HttpHandler for KeyServerHttpHandler<T> where T: KeyServer + 'static {
|
||||
impl HttpHandler for KeyServerHttpHandler {
|
||||
fn handle(&self, mut req: HttpRequest, mut res: HttpResponse) {
|
||||
if req.headers.has::<header::Origin>() {
|
||||
warn!(target: "secretstore", "Ignoring {}-request {} with Origin header", req.method, req.uri);
|
||||
@ -273,6 +234,7 @@ fn return_error(mut res: HttpResponse, err: Error) {
|
||||
Error::BadSignature => *res.status_mut() = HttpStatusCode::BadRequest,
|
||||
Error::AccessDenied => *res.status_mut() = HttpStatusCode::Forbidden,
|
||||
Error::DocumentNotFound => *res.status_mut() = HttpStatusCode::NotFound,
|
||||
Error::Hyper(_) => *res.status_mut() = HttpStatusCode::BadRequest,
|
||||
Error::Serde(_) => *res.status_mut() = HttpStatusCode::BadRequest,
|
||||
Error::Database(_) => *res.status_mut() = HttpStatusCode::InternalServerError,
|
||||
Error::Internal(_) => *res.status_mut() = HttpStatusCode::InternalServerError,
|
||||
@ -364,6 +326,7 @@ fn parse_admin_request(method: &HttpMethod, path: Vec<String>, body: &str) -> Re
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use hyper::method::Method as HttpMethod;
|
||||
use ethkey::Public;
|
||||
use key_server::tests::DummyKeyServer;
|
||||
@ -372,12 +335,12 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn http_listener_successfully_drops() {
|
||||
let key_server = DummyKeyServer;
|
||||
let key_server = Arc::new(DummyKeyServer::default());
|
||||
let address = NodeAddress { address: "127.0.0.1".into(), port: 9000 };
|
||||
let listener = KeyServerHttpListener::start(Some(address), key_server).unwrap();
|
||||
let listener = KeyServerHttpListener::start(address, key_server).unwrap();
|
||||
drop(listener);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn parse_request_successful() {
|
||||
// POST /shadow/{server_key_id}/{signature}/{threshold} => generate server key
|
80
secret_store/src/listener/mod.rs
Normal file
80
secret_store/src/listener/mod.rs
Normal file
@ -0,0 +1,80 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
pub mod http_listener;
|
||||
pub mod service_contract;
|
||||
pub mod service_contract_listener;
|
||||
mod tasks_queue;
|
||||
|
||||
use std::collections::BTreeSet;
|
||||
use std::sync::Arc;
|
||||
use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, AdminSessionsServer, KeyServer};
|
||||
use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, RequestSignature, ServerKeyId,
|
||||
EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId};
|
||||
|
||||
pub struct Listener {
|
||||
key_server: Arc<KeyServer>,
|
||||
_http: Option<http_listener::KeyServerHttpListener>,
|
||||
_contract: Option<Arc<service_contract_listener::ServiceContractListener>>,
|
||||
}
|
||||
|
||||
impl Listener {
|
||||
pub fn new(key_server: Arc<KeyServer>, http: Option<http_listener::KeyServerHttpListener>, contract: Option<Arc<service_contract_listener::ServiceContractListener>>) -> Self {
|
||||
Self {
|
||||
key_server: key_server,
|
||||
_http: http,
|
||||
_contract: contract,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyServer for Listener {}
|
||||
|
||||
impl ServerKeyGenerator for Listener {
|
||||
fn generate_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<Public, Error> {
|
||||
self.key_server.generate_key(key_id, signature, threshold)
|
||||
}
|
||||
}
|
||||
|
||||
impl DocumentKeyServer for Listener {
|
||||
fn store_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> {
|
||||
self.key_server.store_document_key(key_id, signature, common_point, encrypted_document_key)
|
||||
}
|
||||
|
||||
fn generate_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<EncryptedDocumentKey, Error> {
|
||||
self.key_server.generate_document_key(key_id, signature, threshold)
|
||||
}
|
||||
|
||||
fn restore_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKey, Error> {
|
||||
self.key_server.restore_document_key(key_id, signature)
|
||||
}
|
||||
|
||||
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||
self.key_server.restore_document_key_shadow(key_id, signature)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageSigner for Listener {
|
||||
fn sign_message(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
||||
self.key_server.sign_message(key_id, signature, message)
|
||||
}
|
||||
}
|
||||
|
||||
impl AdminSessionsServer for Listener {
|
||||
fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||
self.key_server.change_servers_set(old_set_signature, new_set_signature, new_servers_set)
|
||||
}
|
||||
}
|
343
secret_store/src/listener/service_contract.rs
Normal file
343
secret_store/src/listener/service_contract.rs
Normal file
@ -0,0 +1,343 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::Arc;
|
||||
use futures::{future, Future};
|
||||
use parking_lot::RwLock;
|
||||
use ethcore::filter::Filter;
|
||||
use ethcore::client::{Client, BlockChainClient, BlockId};
|
||||
use ethkey::{Public, Signature, public_to_address};
|
||||
use native_contracts::SecretStoreService;
|
||||
use hash::keccak;
|
||||
use bigint::hash::H256;
|
||||
use bigint::prelude::U256;
|
||||
use listener::service_contract_listener::ServiceTask;
|
||||
use trusted_client::TrustedClient;
|
||||
use {ServerKeyId, NodeKeyPair, ContractAddress};
|
||||
|
||||
/// Name of the SecretStore contract in the registry.
|
||||
const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service";
|
||||
|
||||
/// Key server has been added to the set.
|
||||
const SERVER_KEY_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyRequested(bytes32,uint256)";
|
||||
|
||||
/// Number of confirmations required before request can be processed.
|
||||
const REQUEST_CONFIRMATIONS_REQUIRED: u64 = 3;
|
||||
|
||||
lazy_static! {
|
||||
static ref SERVER_KEY_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_REQUESTED_EVENT_NAME);
|
||||
}
|
||||
|
||||
/// Service contract trait.
|
||||
pub trait ServiceContract: Send + Sync {
|
||||
/// Update contract when new blocks are enacted. Returns true if contract is installed && up-to-date (i.e. chain is synced).
|
||||
fn update(&self) -> bool;
|
||||
/// Read recent contract logs. Returns topics of every entry.
|
||||
fn read_logs(&self) -> Box<Iterator<Item=Vec<H256>>>;
|
||||
/// Publish generated key.
|
||||
fn read_pending_requests(&self) -> Box<Iterator<Item=(bool, ServiceTask)>>;
|
||||
/// Publish server key.
|
||||
fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String>;
|
||||
}
|
||||
|
||||
/// On-chain service contract.
|
||||
pub struct OnChainServiceContract {
|
||||
/// Blockchain client.
|
||||
client: TrustedClient,
|
||||
/// This node key pair.
|
||||
self_key_pair: Arc<NodeKeyPair>,
|
||||
/// Contract addresss.
|
||||
address: ContractAddress,
|
||||
/// Contract.
|
||||
data: RwLock<SecretStoreServiceData>,
|
||||
}
|
||||
|
||||
/// On-chain service contract data.
|
||||
struct SecretStoreServiceData {
|
||||
/// Contract.
|
||||
pub contract: Arc<SecretStoreService>,
|
||||
/// Last block we have read logs from.
|
||||
pub last_log_block: Option<H256>,
|
||||
}
|
||||
|
||||
/// Pending requests iterator.
|
||||
struct PendingRequestsIterator {
|
||||
/// Blockchain client.
|
||||
client: Arc<Client>,
|
||||
/// Contract.
|
||||
contract: Arc<SecretStoreService>,
|
||||
/// This node key pair.
|
||||
self_key_pair: Arc<NodeKeyPair>,
|
||||
/// Block, this iterator is created for.
|
||||
block: H256,
|
||||
/// Current request index.
|
||||
index: U256,
|
||||
/// Requests length.
|
||||
length: U256,
|
||||
}
|
||||
|
||||
impl OnChainServiceContract {
|
||||
/// Create new on-chain service contract.
|
||||
pub fn new(client: TrustedClient, address: ContractAddress, self_key_pair: Arc<NodeKeyPair>) -> Self {
|
||||
let contract_addr = match address {
|
||||
ContractAddress::Registry => client.get().and_then(|c| c.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned())
|
||||
.map(|address| {
|
||||
trace!(target: "secretstore", "{}: installing service contract from address {}",
|
||||
self_key_pair.public(), address);
|
||||
address
|
||||
}))
|
||||
.unwrap_or_default(),
|
||||
ContractAddress::Address(ref address) => {
|
||||
trace!(target: "secretstore", "{}: installing service contract from address {}",
|
||||
self_key_pair.public(), address);
|
||||
address.clone()
|
||||
},
|
||||
};
|
||||
|
||||
OnChainServiceContract {
|
||||
client: client,
|
||||
self_key_pair: self_key_pair,
|
||||
address: address,
|
||||
data: RwLock::new(SecretStoreServiceData {
|
||||
contract: Arc::new(SecretStoreService::new(contract_addr)),
|
||||
last_log_block: None,
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ServiceContract for OnChainServiceContract {
|
||||
fn update(&self) -> bool {
|
||||
// TODO [Sec]: registry_address currently reads from BlockId::Latest, instead of
|
||||
// from block with REQUEST_CONFIRMATIONS_REQUIRED confirmations
|
||||
if let &ContractAddress::Registry = &self.address {
|
||||
if let Some(client) = self.client.get() {
|
||||
// update contract address from registry
|
||||
let service_contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()).unwrap_or_default();
|
||||
if self.data.read().contract.address != service_contract_addr {
|
||||
trace!(target: "secretstore", "{}: installing service contract from address {}",
|
||||
self.self_key_pair.public(), service_contract_addr);
|
||||
self.data.write().contract = Arc::new(SecretStoreService::new(service_contract_addr));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.data.read().contract.address != Default::default()
|
||||
&& self.client.get().is_some()
|
||||
}
|
||||
|
||||
fn read_logs(&self) -> Box<Iterator<Item=Vec<H256>>> {
|
||||
let client = match self.client.get() {
|
||||
Some(client) => client,
|
||||
None => {
|
||||
warn!(target: "secretstore", "{}: client is offline during read_logs call",
|
||||
self.self_key_pair.public());
|
||||
return Box::new(::std::iter::empty());
|
||||
},
|
||||
};
|
||||
|
||||
// prepare range of blocks to read logs from
|
||||
let (address, first_block, last_block) = {
|
||||
let mut data = self.data.write();
|
||||
let address = data.contract.address.clone();
|
||||
let confirmed_block = match get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED) {
|
||||
Some(confirmed_block) => confirmed_block,
|
||||
None => return Box::new(::std::iter::empty()), // no block with enough confirmations
|
||||
};
|
||||
let first_block = match data.last_log_block.take().and_then(|b| client.tree_route(&b, &confirmed_block)) {
|
||||
// if we have a route from last_log_block to confirmed_block => search for logs on this route
|
||||
//
|
||||
// potentially this could lead us to reading same logs twice when reorganizing to the fork, which
|
||||
// already has been canonical previosuly
|
||||
// the worst thing that can happen in this case is spending some time reading unneeded data from SS db
|
||||
Some(ref route) if route.index < route.blocks.len() => route.blocks[route.index],
|
||||
// else we care only about confirmed block
|
||||
_ => confirmed_block.clone(),
|
||||
};
|
||||
|
||||
data.last_log_block = Some(confirmed_block.clone());
|
||||
(address, first_block, confirmed_block)
|
||||
};
|
||||
|
||||
// read server key generation requests
|
||||
let request_logs = client.logs(Filter {
|
||||
from_block: BlockId::Hash(first_block),
|
||||
to_block: BlockId::Hash(last_block),
|
||||
address: Some(vec![address]),
|
||||
topics: vec![
|
||||
Some(vec![*SERVER_KEY_REQUESTED_EVENT_NAME_HASH]),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
],
|
||||
limit: None,
|
||||
});
|
||||
|
||||
Box::new(request_logs.into_iter().map(|log| log.entry.topics))
|
||||
}
|
||||
|
||||
fn read_pending_requests(&self) -> Box<Iterator<Item=(bool, ServiceTask)>> {
|
||||
let client = match self.client.get() {
|
||||
Some(client) => client,
|
||||
None => return Box::new(::std::iter::empty()),
|
||||
};
|
||||
|
||||
// we only need requests that are here for more than REQUEST_CONFIRMATIONS_REQUIRED blocks
|
||||
// => we're reading from Latest - (REQUEST_CONFIRMATIONS_REQUIRED + 1) block
|
||||
let data = self.data.read();
|
||||
match data.contract.address == Default::default() {
|
||||
true => Box::new(::std::iter::empty()),
|
||||
false => get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED + 1)
|
||||
.and_then(|b| {
|
||||
let do_call = |a, d| future::done(client.call_contract(BlockId::Hash(b.clone()), a, d));
|
||||
data.contract.server_key_generation_requests_count(&do_call).wait()
|
||||
.map_err(|error| {
|
||||
warn!(target: "secretstore", "{}: call to server_key_generation_requests_count failed: {}",
|
||||
self.self_key_pair.public(), error);
|
||||
error
|
||||
})
|
||||
.map(|l| (b, l))
|
||||
.ok()
|
||||
})
|
||||
.map(|(b, l)| Box::new(PendingRequestsIterator {
|
||||
client: client,
|
||||
contract: data.contract.clone(),
|
||||
self_key_pair: self.self_key_pair.clone(),
|
||||
block: b,
|
||||
index: 0.into(),
|
||||
length: l,
|
||||
}) as Box<Iterator<Item=(bool, ServiceTask)>>)
|
||||
.unwrap_or_else(|| Box::new(::std::iter::empty()))
|
||||
}
|
||||
}
|
||||
|
||||
fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> {
|
||||
// only publish if contract address is set && client is online
|
||||
let data = self.data.read();
|
||||
if data.contract.address == Default::default() {
|
||||
// it is not an error, because key could be generated even without contract
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let client = match self.client.get() {
|
||||
Some(client) => client,
|
||||
None => return Err("trusted client is required to publish key".into()),
|
||||
};
|
||||
|
||||
// only publish key if contract waits for publication
|
||||
// failing is ok here - it could be that enough confirmations have been recevied
|
||||
// or key has been requested using HTTP API
|
||||
let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d));
|
||||
let self_address = public_to_address(self.self_key_pair.public());
|
||||
if data.contract.get_server_key_confirmation_status(&do_call, server_key_id.clone(), self_address).wait().unwrap_or(false) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// prepare transaction data
|
||||
let server_key_hash = keccak(server_key);
|
||||
let signed_server_key = self.self_key_pair.sign(&server_key_hash).map_err(|e| format!("{}", e))?;
|
||||
let signed_server_key: Signature = signed_server_key.into_electrum().into();
|
||||
let transaction_data = data.contract.encode_server_key_generated_input(server_key_id.clone(),
|
||||
server_key.to_vec(),
|
||||
signed_server_key.v(),
|
||||
signed_server_key.r().into(),
|
||||
signed_server_key.s().into()
|
||||
)?;
|
||||
|
||||
// send transaction
|
||||
client.transact_contract(
|
||||
data.contract.address.clone(),
|
||||
transaction_data
|
||||
).map_err(|e| format!("{}", e))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for PendingRequestsIterator {
|
||||
type Item = (bool, ServiceTask);
|
||||
|
||||
fn next(&mut self) -> Option<(bool, ServiceTask)> {
|
||||
if self.index >= self.length {
|
||||
return None;
|
||||
}
|
||||
|
||||
let index = self.index.clone();
|
||||
self.index = self.index + 1.into();
|
||||
|
||||
let self_address = public_to_address(self.self_key_pair.public());
|
||||
let do_call = |a, d| future::done(self.client.call_contract(BlockId::Hash(self.block.clone()), a, d));
|
||||
self.contract.get_server_key_id(&do_call, index).wait()
|
||||
.and_then(|server_key_id|
|
||||
self.contract.get_server_key_threshold(&do_call, server_key_id.clone()).wait()
|
||||
.map(|threshold| (server_key_id, threshold)))
|
||||
.and_then(|(server_key_id, threshold)|
|
||||
self.contract.get_server_key_confirmation_status(&do_call, server_key_id.clone(), self_address).wait()
|
||||
.map(|is_confirmed| (server_key_id, threshold, is_confirmed)))
|
||||
.map(|(server_key_id, threshold, is_confirmed)|
|
||||
Some((is_confirmed, ServiceTask::GenerateServerKey(server_key_id, threshold.into()))))
|
||||
.map_err(|error| {
|
||||
warn!(target: "secretstore", "{}: reading service contract request failed: {}",
|
||||
self.self_key_pair.public(), error);
|
||||
()
|
||||
})
|
||||
.unwrap_or(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Get hash of the last block with at least n confirmations.
|
||||
fn get_confirmed_block_hash(client: &Client, confirmations: u64) -> Option<H256> {
|
||||
client.block_number(BlockId::Latest)
|
||||
.map(|b| b.saturating_sub(confirmations))
|
||||
.and_then(|b| client.block_hash(BlockId::Number(b)))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use parking_lot::Mutex;
|
||||
use ethkey::Public;
|
||||
use bigint::hash::H256;
|
||||
use listener::service_contract_listener::ServiceTask;
|
||||
use ServerKeyId;
|
||||
use super::ServiceContract;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DummyServiceContract {
|
||||
pub is_actual: bool,
|
||||
pub logs: Vec<Vec<H256>>,
|
||||
pub pending_requests: Vec<(bool, ServiceTask)>,
|
||||
pub published_keys: Mutex<Vec<(ServerKeyId, Public)>>,
|
||||
}
|
||||
|
||||
impl ServiceContract for DummyServiceContract {
|
||||
fn update(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn read_logs(&self) -> Box<Iterator<Item=Vec<H256>>> {
|
||||
Box::new(self.logs.clone().into_iter())
|
||||
}
|
||||
|
||||
fn read_pending_requests(&self) -> Box<Iterator<Item=(bool, ServiceTask)>> {
|
||||
Box::new(self.pending_requests.clone().into_iter())
|
||||
}
|
||||
|
||||
fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> {
|
||||
self.published_keys.lock().push((server_key_id.clone(), server_key.clone()));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
660
secret_store/src/listener/service_contract_listener.rs
Normal file
660
secret_store/src/listener/service_contract_listener.rs
Normal file
@ -0,0 +1,660 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::thread;
|
||||
use parking_lot::Mutex;
|
||||
use ethcore::client::ChainNotify;
|
||||
use ethkey::{Random, Generator, Public, sign};
|
||||
use bytes::Bytes;
|
||||
use bigint::hash::H256;
|
||||
use bigint::prelude::U256;
|
||||
use key_server_set::KeyServerSet;
|
||||
use key_server_cluster::{ClusterClient, ClusterSessionsListener, ClusterSession};
|
||||
use key_server_cluster::generation_session::SessionImpl as GenerationSession;
|
||||
use key_storage::KeyStorage;
|
||||
use listener::service_contract::ServiceContract;
|
||||
use listener::tasks_queue::TasksQueue;
|
||||
use {ServerKeyId, NodeKeyPair, KeyServer};
|
||||
|
||||
/// Retry interval (in blocks). Every RETRY_INTERVAL_BLOCKS blocks each KeyServer reads pending requests from
|
||||
/// service contract && tries to re-execute. The reason to have this mechanism is primarily because keys
|
||||
/// servers set change takes a lot of time + there could be some races, when blocks are coming to different
|
||||
/// KS at different times. This isn't intended to fix && respond to general session errors!
|
||||
const RETRY_INTERVAL_BLOCKS: usize = 30;
|
||||
|
||||
/// Max failed retry requests (in single retry interval). The reason behind this constant is that if several
|
||||
/// pending requests have failed, then most probably other will fail too.
|
||||
const MAX_FAILED_RETRY_REQUESTS: usize = 1;
|
||||
|
||||
/// SecretStore <-> Authority connector responsible for:
|
||||
/// 1. listening for new requests on SecretStore contract
|
||||
/// 2. redirecting requests to key server
|
||||
/// 3. publishing response on SecretStore contract
|
||||
pub struct ServiceContractListener {
|
||||
/// Service contract listener data.
|
||||
data: Arc<ServiceContractListenerData>,
|
||||
/// Service thread handle.
|
||||
service_handle: Option<thread::JoinHandle<()>>,
|
||||
}
|
||||
|
||||
/// Service contract listener parameters.
|
||||
pub struct ServiceContractListenerParams {
|
||||
/// Service contract.
|
||||
pub contract: Arc<ServiceContract>,
|
||||
/// Key server reference.
|
||||
pub key_server: Arc<KeyServer>,
|
||||
/// This node key pair.
|
||||
pub self_key_pair: Arc<NodeKeyPair>,
|
||||
/// Key servers set.
|
||||
pub key_server_set: Arc<KeyServerSet>,
|
||||
/// Cluster reference.
|
||||
pub cluster: Arc<ClusterClient>,
|
||||
/// Key storage reference.
|
||||
pub key_storage: Arc<KeyStorage>,
|
||||
}
|
||||
|
||||
/// Service contract listener data.
|
||||
struct ServiceContractListenerData {
|
||||
/// Blocks since last retry.
|
||||
pub last_retry: AtomicUsize,
|
||||
/// Retry-related data.
|
||||
pub retry_data: Mutex<ServiceContractRetryData>,
|
||||
/// Service tasks queue.
|
||||
pub tasks_queue: Arc<TasksQueue<ServiceTask>>,
|
||||
/// Service contract.
|
||||
pub contract: Arc<ServiceContract>,
|
||||
/// Key server reference.
|
||||
pub key_server: Arc<KeyServer>,
|
||||
/// This node key pair.
|
||||
pub self_key_pair: Arc<NodeKeyPair>,
|
||||
/// Key servers set.
|
||||
pub key_server_set: Arc<KeyServerSet>,
|
||||
/// Key storage reference.
|
||||
pub key_storage: Arc<KeyStorage>,
|
||||
|
||||
}
|
||||
|
||||
/// Retry-related data.
|
||||
#[derive(Default)]
|
||||
struct ServiceContractRetryData {
|
||||
/// Server keys, which we have generated (or tried to generate) since last retry moment.
|
||||
pub generated_keys: HashSet<ServerKeyId>,
|
||||
}
|
||||
|
||||
/// Service task.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum ServiceTask {
|
||||
/// Retry all 'stalled' tasks.
|
||||
Retry,
|
||||
/// Generate server key (server_key_id, threshold).
|
||||
GenerateServerKey(H256, H256),
|
||||
/// Confirm server key (server_key_id).
|
||||
RestoreServerKey(H256),
|
||||
/// Shutdown listener.
|
||||
Shutdown,
|
||||
}
|
||||
|
||||
impl ServiceContractListener {
|
||||
/// Create new service contract listener.
|
||||
pub fn new(params: ServiceContractListenerParams) -> Arc<ServiceContractListener> {
|
||||
let data = Arc::new(ServiceContractListenerData {
|
||||
last_retry: AtomicUsize::new(0),
|
||||
retry_data: Default::default(),
|
||||
tasks_queue: Arc::new(TasksQueue::new()),
|
||||
contract: params.contract,
|
||||
key_server: params.key_server,
|
||||
self_key_pair: params.self_key_pair,
|
||||
key_server_set: params.key_server_set,
|
||||
key_storage: params.key_storage,
|
||||
});
|
||||
data.tasks_queue.push(ServiceTask::Retry);
|
||||
|
||||
// we are not starting thread when in test mode
|
||||
let service_handle = if cfg!(test) {
|
||||
None
|
||||
} else {
|
||||
let service_thread_data = data.clone();
|
||||
Some(thread::spawn(move || Self::run_service_thread(service_thread_data)))
|
||||
};
|
||||
let contract = Arc::new(ServiceContractListener {
|
||||
data: data,
|
||||
service_handle: service_handle,
|
||||
});
|
||||
params.cluster.add_generation_listener(contract.clone());
|
||||
contract
|
||||
}
|
||||
|
||||
/// Process incoming events of service contract.
|
||||
fn process_service_contract_events(&self) {
|
||||
self.data.tasks_queue.push_many(self.data.contract.read_logs()
|
||||
.filter_map(|topics| match topics.len() {
|
||||
// when key is already generated && we have this key
|
||||
3 if self.data.key_storage.get(&topics[1]).map(|k| k.is_some()).unwrap_or_default() => {
|
||||
Some(ServiceTask::RestoreServerKey(
|
||||
topics[1],
|
||||
))
|
||||
}
|
||||
// when key is not yet generated && this node should be master of this key generation session
|
||||
3 if is_processed_by_this_key_server(&*self.data.key_server_set, &*self.data.self_key_pair, &topics[1]) => {
|
||||
Some(ServiceTask::GenerateServerKey(
|
||||
topics[1],
|
||||
topics[2],
|
||||
))
|
||||
},
|
||||
3 => None,
|
||||
l @ _ => {
|
||||
warn!(target: "secretstore", "Ignoring ServerKeyRequested event with wrong number of params {}", l);
|
||||
None
|
||||
},
|
||||
}));
|
||||
}
|
||||
|
||||
/// Service thread procedure.
|
||||
fn run_service_thread(data: Arc<ServiceContractListenerData>) {
|
||||
loop {
|
||||
let task = data.tasks_queue.wait();
|
||||
trace!(target: "secretstore", "{}: processing {:?} task", data.self_key_pair.public(), task);
|
||||
|
||||
match task {
|
||||
ServiceTask::Shutdown => break,
|
||||
task @ _ => {
|
||||
// the only possible reaction to an error is a trace && it is already happened
|
||||
let _ = Self::process_service_task(&data, task);
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Process single service task.
|
||||
fn process_service_task(data: &Arc<ServiceContractListenerData>, task: ServiceTask) -> Result<(), String> {
|
||||
match task {
|
||||
ServiceTask::Retry =>
|
||||
Self::retry_pending_requests(&data)
|
||||
.map(|processed_requests| {
|
||||
if processed_requests != 0 {
|
||||
trace!(target: "secretstore", "{}: successfully retried {} pending requests",
|
||||
data.self_key_pair.public(), processed_requests);
|
||||
}
|
||||
()
|
||||
})
|
||||
.map_err(|error| {
|
||||
warn!(target: "secretstore", "{}: retrying pending requests has failed with: {}",
|
||||
data.self_key_pair.public(), error);
|
||||
error
|
||||
}),
|
||||
ServiceTask::RestoreServerKey(server_key_id) => {
|
||||
data.retry_data.lock().generated_keys.insert(server_key_id.clone());
|
||||
Self::restore_server_key(&data, &server_key_id)
|
||||
.and_then(|server_key| Self::publish_server_key(&data, &server_key_id, &server_key))
|
||||
.map(|_| {
|
||||
trace!(target: "secretstore", "{}: processed RestoreServerKey({}) request",
|
||||
data.self_key_pair.public(), server_key_id);
|
||||
()
|
||||
})
|
||||
.map_err(|error| {
|
||||
warn!(target: "secretstore", "{}: failed to process RestoreServerKey({}) request with: {}",
|
||||
data.self_key_pair.public(), server_key_id, error);
|
||||
error
|
||||
})
|
||||
},
|
||||
ServiceTask::GenerateServerKey(server_key_id, threshold) => {
|
||||
data.retry_data.lock().generated_keys.insert(server_key_id.clone());
|
||||
Self::generate_server_key(&data, &server_key_id, &threshold)
|
||||
.and_then(|server_key| Self::publish_server_key(&data, &server_key_id, &server_key))
|
||||
.map(|_| {
|
||||
trace!(target: "secretstore", "{}: processed GenerateServerKey({}, {}) request",
|
||||
data.self_key_pair.public(), server_key_id, threshold);
|
||||
()
|
||||
})
|
||||
.map_err(|error| {
|
||||
warn!(target: "secretstore", "{}: failed to process GenerateServerKey({}, {}) request with: {}",
|
||||
data.self_key_pair.public(), server_key_id, threshold, error);
|
||||
error
|
||||
})
|
||||
},
|
||||
ServiceTask::Shutdown => unreachable!("it must be checked outside"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Retry processing pending requests.
|
||||
fn retry_pending_requests(data: &Arc<ServiceContractListenerData>) -> Result<usize, String> {
|
||||
let mut failed_requests = 0;
|
||||
let mut processed_requests = 0;
|
||||
let retry_data = ::std::mem::replace(&mut *data.retry_data.lock(), Default::default());
|
||||
for (is_confirmed, task) in data.contract.read_pending_requests() {
|
||||
// only process requests, which we haven't confirmed yet
|
||||
if is_confirmed {
|
||||
continue;
|
||||
}
|
||||
|
||||
let request_result = match task {
|
||||
ServiceTask::GenerateServerKey(server_key_id, threshold) => {
|
||||
// only process request, which haven't been processed recently
|
||||
// there could be a lag when we've just generated server key && retrying on the same block
|
||||
// (or before our tx is mined) - state is not updated yet
|
||||
if retry_data.generated_keys.contains(&server_key_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// process request
|
||||
let is_own_request = is_processed_by_this_key_server(&*data.key_server_set, &*data.self_key_pair, &server_key_id);
|
||||
Self::process_service_task(data, match is_own_request {
|
||||
true => ServiceTask::GenerateServerKey(server_key_id, threshold.into()),
|
||||
false => ServiceTask::RestoreServerKey(server_key_id),
|
||||
})
|
||||
},
|
||||
_ => Err("not supported".into()),
|
||||
};
|
||||
|
||||
// process request result
|
||||
match request_result {
|
||||
Ok(_) => processed_requests += 1,
|
||||
Err(_) => {
|
||||
failed_requests += 1;
|
||||
if failed_requests > MAX_FAILED_RETRY_REQUESTS {
|
||||
return Err("too many failed requests".into());
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Ok(processed_requests)
|
||||
}
|
||||
|
||||
/// Generate server key.
|
||||
fn generate_server_key(data: &Arc<ServiceContractListenerData>, server_key_id: &ServerKeyId, threshold: &H256) -> Result<Public, String> {
|
||||
let threshold_num = threshold.low_u64();
|
||||
if threshold != &threshold_num.into() || threshold_num >= ::std::usize::MAX as u64 {
|
||||
return Err(format!("invalid threshold {:?}", threshold));
|
||||
}
|
||||
|
||||
// key server expects signed server_key_id in server_key_generation procedure
|
||||
// only signer could store document key for this server key later
|
||||
// => this API (server key generation) is not suitable for usage in encryption via contract endpoint
|
||||
let author_key = Random.generate().map_err(|e| format!("{}", e))?;
|
||||
let server_key_id_signature = sign(author_key.secret(), server_key_id).map_err(|e| format!("{}", e))?;
|
||||
data.key_server.generate_key(server_key_id, &server_key_id_signature, threshold_num as usize)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Restore server key.
|
||||
fn restore_server_key(data: &Arc<ServiceContractListenerData>, server_key_id: &ServerKeyId) -> Result<Public, String> {
|
||||
data.key_storage.get(server_key_id)
|
||||
.map_err(|e| format!("{}", e))
|
||||
.and_then(|ks| ks.ok_or("missing key".to_owned()))
|
||||
.map(|ks| ks.public)
|
||||
}
|
||||
|
||||
/// Publish server key.
|
||||
fn publish_server_key(data: &Arc<ServiceContractListenerData>, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> {
|
||||
data.contract.publish_server_key(server_key_id, server_key)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ServiceContractListener {
|
||||
fn drop(&mut self) {
|
||||
if let Some(service_handle) = self.service_handle.take() {
|
||||
self.data.tasks_queue.push_front(ServiceTask::Shutdown);
|
||||
// ignore error as we are already closing
|
||||
let _ = service_handle.join();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ChainNotify for ServiceContractListener {
|
||||
fn new_blocks(&self, _imported: Vec<H256>, _invalid: Vec<H256>, enacted: Vec<H256>, _retracted: Vec<H256>, _sealed: Vec<H256>, _proposed: Vec<Bytes>, _duration: u64) {
|
||||
let enacted_len = enacted.len();
|
||||
if enacted_len == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
if !self.data.contract.update() {
|
||||
return;
|
||||
}
|
||||
|
||||
self.process_service_contract_events();
|
||||
|
||||
// schedule retry if received enough blocks since last retry
|
||||
// it maybe inaccurate when switching syncing/synced states, but that's ok
|
||||
if self.data.last_retry.fetch_add(enacted_len, Ordering::Relaxed) >= RETRY_INTERVAL_BLOCKS {
|
||||
self.data.tasks_queue.push(ServiceTask::Retry);
|
||||
self.data.last_retry.store(0, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ClusterSessionsListener<GenerationSession> for ServiceContractListener {
|
||||
fn on_session_removed(&self, session: Arc<GenerationSession>) {
|
||||
// only publish when the session is started by another node
|
||||
// when it is started by this node, it is published from process_service_task
|
||||
if !is_processed_by_this_key_server(&*self.data.key_server_set, &*self.data.self_key_pair, &session.id()) {
|
||||
// by this time sesion must already be completed - either successfully, or not
|
||||
assert!(session.is_finished());
|
||||
|
||||
// ignore result - the only thing that we can do is to log the error
|
||||
match session.wait(Some(Default::default()))
|
||||
.map_err(|e| format!("{}", e))
|
||||
.and_then(|server_key| Self::publish_server_key(&self.data, &session.id(), &server_key)) {
|
||||
Ok(_) => trace!(target: "secretstore", "{}: completed foreign GenerateServerKey({}) request",
|
||||
self.data.self_key_pair.public(), session.id()),
|
||||
Err(error) => warn!(target: "secretstore", "{}: failed to process GenerateServerKey({}) request with: {}",
|
||||
self.data.self_key_pair.public(), session.id(), error),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true when session, related to `server_key_id` must be started on this KeyServer.
|
||||
fn is_processed_by_this_key_server(key_server_set: &KeyServerSet, self_key_pair: &NodeKeyPair, server_key_id: &H256) -> bool {
|
||||
let servers = key_server_set.get();
|
||||
let total_servers_count = servers.len();
|
||||
match total_servers_count {
|
||||
0 => return false,
|
||||
1 => return true,
|
||||
_ => (),
|
||||
}
|
||||
|
||||
let this_server_index = match servers.keys().enumerate().find(|&(_, s)| s == self_key_pair.public()) {
|
||||
Some((index, _)) => index,
|
||||
None => return false,
|
||||
};
|
||||
|
||||
let server_key_id_value: U256 = server_key_id.into();
|
||||
let range_interval = U256::max_value() / total_servers_count.into();
|
||||
let range_begin = (range_interval + 1.into()) * this_server_index.into();
|
||||
let range_end = range_begin.saturating_add(range_interval);
|
||||
|
||||
server_key_id_value >= range_begin && server_key_id_value <= range_end
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::Ordering;
|
||||
use ethkey::{Random, Generator, KeyPair};
|
||||
use listener::service_contract::ServiceContract;
|
||||
use listener::service_contract::tests::DummyServiceContract;
|
||||
use key_server_cluster::DummyClusterClient;
|
||||
use key_server::tests::DummyKeyServer;
|
||||
use key_storage::{KeyStorage, DocumentKeyShare};
|
||||
use key_storage::tests::DummyKeyStorage;
|
||||
use key_server_set::tests::MapKeyServerSet;
|
||||
use PlainNodeKeyPair;
|
||||
use super::{ServiceTask, ServiceContractListener, ServiceContractListenerParams, is_processed_by_this_key_server};
|
||||
|
||||
fn make_service_contract_listener(contract: Option<Arc<ServiceContract>>, key_server: Option<Arc<DummyKeyServer>>, key_storage: Option<Arc<KeyStorage>>) -> Arc<ServiceContractListener> {
|
||||
let contract = contract.unwrap_or_else(|| Arc::new(DummyServiceContract::default()));
|
||||
let key_server = key_server.unwrap_or_else(|| Arc::new(DummyKeyServer::default()));
|
||||
let key_storage = key_storage.unwrap_or_else(|| Arc::new(DummyKeyStorage::default()));
|
||||
let servers_set = Arc::new(MapKeyServerSet::new(vec![
|
||||
("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(),
|
||||
"127.0.0.1:8080".parse().unwrap()),
|
||||
("c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee51ae168fea63dc339a3c58419466ceaeef7f632653266d0e1236431a950cfe52a".parse().unwrap(),
|
||||
"127.0.0.1:8080".parse().unwrap()),
|
||||
("f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9388f7b0f632de8140fe337e62a37f3566500a99934c2231b6cb9fd7584b8e672".parse().unwrap(),
|
||||
"127.0.0.1:8080".parse().unwrap()),
|
||||
].into_iter().collect()));
|
||||
let self_key_pair = Arc::new(PlainNodeKeyPair::new(KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap()));
|
||||
ServiceContractListener::new(ServiceContractListenerParams {
|
||||
contract: contract,
|
||||
key_server: key_server,
|
||||
self_key_pair: self_key_pair,
|
||||
key_server_set: servers_set,
|
||||
cluster: Arc::new(DummyClusterClient::default()),
|
||||
key_storage: key_storage,
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_not_processed_by_this_key_server_with_zero_servers() {
|
||||
assert_eq!(is_processed_by_this_key_server(
|
||||
&MapKeyServerSet::default(),
|
||||
&PlainNodeKeyPair::new(Random.generate().unwrap()),
|
||||
&Default::default()), false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_processed_by_this_key_server_with_single_server() {
|
||||
let self_key_pair = Random.generate().unwrap();
|
||||
assert_eq!(is_processed_by_this_key_server(
|
||||
&MapKeyServerSet::new(vec![
|
||||
(self_key_pair.public().clone(), "127.0.0.1:8080".parse().unwrap())
|
||||
].into_iter().collect()),
|
||||
&PlainNodeKeyPair::new(self_key_pair),
|
||||
&Default::default()), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_not_processed_by_this_key_server_when_not_a_part_of_servers_set() {
|
||||
assert!(is_processed_by_this_key_server(
|
||||
&MapKeyServerSet::new(vec![
|
||||
(Random.generate().unwrap().public().clone(), "127.0.0.1:8080".parse().unwrap())
|
||||
].into_iter().collect()),
|
||||
&PlainNodeKeyPair::new(Random.generate().unwrap()),
|
||||
&Default::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_processed_by_this_key_server_in_set_of_3() {
|
||||
// servers set is ordered && server range depends on index of this server
|
||||
let servers_set = MapKeyServerSet::new(vec![
|
||||
// secret: 0000000000000000000000000000000000000000000000000000000000000001
|
||||
("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(),
|
||||
"127.0.0.1:8080".parse().unwrap()),
|
||||
// secret: 0000000000000000000000000000000000000000000000000000000000000002
|
||||
("c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee51ae168fea63dc339a3c58419466ceaeef7f632653266d0e1236431a950cfe52a".parse().unwrap(),
|
||||
"127.0.0.1:8080".parse().unwrap()),
|
||||
// secret: 0000000000000000000000000000000000000000000000000000000000000003
|
||||
("f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9388f7b0f632de8140fe337e62a37f3566500a99934c2231b6cb9fd7584b8e672".parse().unwrap(),
|
||||
"127.0.0.1:8080".parse().unwrap()),
|
||||
].into_iter().collect());
|
||||
|
||||
// 1st server: process hashes [0x0; 0x555...555]
|
||||
let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret(
|
||||
"0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap());
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"0000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"3000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"5555555555555555555555555555555555555555555555555555555555555555".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"5555555555555555555555555555555555555555555555555555555555555556".parse().unwrap()), false);
|
||||
|
||||
// 2nd server: process hashes from 0x555...556 to 0xaaa...aab
|
||||
let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret(
|
||||
"0000000000000000000000000000000000000000000000000000000000000002".parse().unwrap()).unwrap());
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"5555555555555555555555555555555555555555555555555555555555555555".parse().unwrap()), false);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"5555555555555555555555555555555555555555555555555555555555555556".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"7555555555555555555555555555555555555555555555555555555555555555".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac".parse().unwrap()), false);
|
||||
|
||||
// 3rd server: process hashes from 0x800...000 to 0xbff...ff
|
||||
let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret(
|
||||
"0000000000000000000000000000000000000000000000000000000000000003".parse().unwrap()).unwrap());
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab".parse().unwrap()), false);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"daaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_processed_by_this_key_server_in_set_of_4() {
|
||||
// servers set is ordered && server range depends on index of this server
|
||||
let servers_set = MapKeyServerSet::new(vec![
|
||||
// secret: 0000000000000000000000000000000000000000000000000000000000000001
|
||||
("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(),
|
||||
"127.0.0.1:8080".parse().unwrap()),
|
||||
// secret: 0000000000000000000000000000000000000000000000000000000000000002
|
||||
("c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee51ae168fea63dc339a3c58419466ceaeef7f632653266d0e1236431a950cfe52a".parse().unwrap(),
|
||||
"127.0.0.1:8080".parse().unwrap()),
|
||||
// secret: 0000000000000000000000000000000000000000000000000000000000000004
|
||||
("e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd1351ed993ea0d455b75642e2098ea51448d967ae33bfbdfe40cfe97bdc47739922".parse().unwrap(),
|
||||
"127.0.0.1:8080".parse().unwrap()),
|
||||
// secret: 0000000000000000000000000000000000000000000000000000000000000003
|
||||
("f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9388f7b0f632de8140fe337e62a37f3566500a99934c2231b6cb9fd7584b8e672".parse().unwrap(),
|
||||
"127.0.0.1:8080".parse().unwrap()),
|
||||
].into_iter().collect());
|
||||
|
||||
// 1st server: process hashes [0x0; 0x3ff...ff]
|
||||
let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret(
|
||||
"0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap());
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"0000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"2000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"4000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), false);
|
||||
|
||||
// 2nd server: process hashes from 0x400...000 to 0x7ff...ff
|
||||
let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret(
|
||||
"0000000000000000000000000000000000000000000000000000000000000002".parse().unwrap()).unwrap());
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), false);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"4000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"6000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"8000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), false);
|
||||
|
||||
// 3rd server: process hashes from 0x800...000 to 0xbff...ff
|
||||
let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret(
|
||||
"0000000000000000000000000000000000000000000000000000000000000004".parse().unwrap()).unwrap());
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), false);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"8000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"a000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"bfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"c000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), false);
|
||||
|
||||
// 4th server: process hashes from 0xc00...000 to 0xfff...ff
|
||||
let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret(
|
||||
"0000000000000000000000000000000000000000000000000000000000000003".parse().unwrap()).unwrap());
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"bfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), false);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"c000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"e000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||
&"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_tasks_scheduled_when_no_contract_events() {
|
||||
let listener = make_service_contract_listener(None, None, None);
|
||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||
listener.process_service_contract_events();
|
||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn server_key_generation_is_scheduled_when_requested_key_is_unknnown() {
|
||||
let mut contract = DummyServiceContract::default();
|
||||
contract.logs.push(vec![Default::default(), Default::default(), Default::default()]);
|
||||
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None);
|
||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||
listener.process_service_contract_events();
|
||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 2);
|
||||
assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::GenerateServerKey(Default::default(), Default::default())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_new_tasks_scheduled_when_requested_key_is_unknown_and_request_belongs_to_other_key_server() {
|
||||
let server_key_id = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap();
|
||||
let mut contract = DummyServiceContract::default();
|
||||
contract.logs.push(vec![Default::default(), server_key_id, Default::default()]);
|
||||
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None);
|
||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||
listener.process_service_contract_events();
|
||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn server_key_restore_is_scheduled_when_requested_key_is_knnown() {
|
||||
let mut contract = DummyServiceContract::default();
|
||||
contract.logs.push(vec![Default::default(), Default::default(), Default::default()]);
|
||||
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None);
|
||||
listener.data.key_storage.insert(Default::default(), Default::default()).unwrap();
|
||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||
listener.process_service_contract_events();
|
||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 2);
|
||||
assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::RestoreServerKey(Default::default())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_new_tasks_scheduled_when_wrong_number_of_topics_in_log() {
|
||||
let mut contract = DummyServiceContract::default();
|
||||
contract.logs.push(vec![Default::default(), Default::default()]);
|
||||
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None);
|
||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||
listener.process_service_contract_events();
|
||||
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn generation_session_is_created_when_processing_generate_server_key_task() {
|
||||
let key_server = Arc::new(DummyKeyServer::default());
|
||||
let listener = make_service_contract_listener(None, Some(key_server.clone()), None);
|
||||
ServiceContractListener::process_service_task(&listener.data, ServiceTask::GenerateServerKey(Default::default(), Default::default())).unwrap_err();
|
||||
assert_eq!(key_server.generation_requests_count.load(Ordering::Relaxed), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn key_is_read_and_published_when_processing_restore_server_key_task() {
|
||||
let contract = Arc::new(DummyServiceContract::default());
|
||||
let key_storage = Arc::new(DummyKeyStorage::default());
|
||||
let mut key_share = DocumentKeyShare::default();
|
||||
key_share.public = KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap().public().clone();
|
||||
key_storage.insert(Default::default(), key_share.clone()).unwrap();
|
||||
let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage));
|
||||
ServiceContractListener::process_service_task(&listener.data, ServiceTask::RestoreServerKey(Default::default())).unwrap();
|
||||
assert_eq!(*contract.published_keys.lock(), vec![(Default::default(), key_share.public)]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn generation_is_not_retried_if_tried_in_the_same_cycle() {
|
||||
let mut contract = DummyServiceContract::default();
|
||||
contract.pending_requests.push((false, ServiceTask::GenerateServerKey(Default::default(), Default::default())));
|
||||
let key_server = Arc::new(DummyKeyServer::default());
|
||||
let listener = make_service_contract_listener(Some(Arc::new(contract)), Some(key_server.clone()), None);
|
||||
listener.data.retry_data.lock().generated_keys.insert(Default::default());
|
||||
ServiceContractListener::retry_pending_requests(&listener.data).unwrap();
|
||||
assert_eq!(key_server.generation_requests_count.load(Ordering::Relaxed), 0);
|
||||
}
|
||||
}
|
78
secret_store/src/listener/tasks_queue.rs
Normal file
78
secret_store/src/listener/tasks_queue.rs
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use parking_lot::{Mutex, Condvar};
|
||||
|
||||
#[derive(Default)]
|
||||
/// General deque-based tasks queue.
|
||||
pub struct TasksQueue<Task: Clone> {
|
||||
/// Service event.
|
||||
service_event: Condvar,
|
||||
/// Service tasks queue.
|
||||
service_tasks: Mutex<VecDeque<Task>>,
|
||||
}
|
||||
|
||||
impl<Task> TasksQueue<Task> where Task: Clone {
|
||||
/// Create new tasks queue.
|
||||
pub fn new() -> Self {
|
||||
TasksQueue {
|
||||
service_event: Condvar::new(),
|
||||
service_tasks: Mutex::new(VecDeque::new()),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Get current tasks snapshot.
|
||||
pub fn snapshot(&self) -> VecDeque<Task> {
|
||||
self.service_tasks.lock().clone()
|
||||
}
|
||||
|
||||
/// Push task to the front of queue.
|
||||
pub fn push_front(&self, task: Task) {
|
||||
let mut service_tasks = self.service_tasks.lock();
|
||||
service_tasks.push_front(task);
|
||||
self.service_event.notify_all();
|
||||
}
|
||||
|
||||
/// Push task to the back of queue.
|
||||
pub fn push(&self, task: Task) {
|
||||
let mut service_tasks = self.service_tasks.lock();
|
||||
service_tasks.push_back(task);
|
||||
self.service_event.notify_all();
|
||||
}
|
||||
|
||||
/// Push task to the back of queue.
|
||||
pub fn push_many<I: Iterator<Item=Task>>(&self, tasks: I) {
|
||||
let mut service_tasks = self.service_tasks.lock();
|
||||
let previous_len = service_tasks.len();
|
||||
service_tasks.extend(tasks);
|
||||
if service_tasks.len() != previous_len {
|
||||
self.service_event.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
/// Wait for new task (task is removed from the front of queue).
|
||||
pub fn wait(&self) -> Task {
|
||||
let mut service_tasks = self.service_tasks.lock();
|
||||
if service_tasks.is_empty() {
|
||||
self.service_event.wait(&mut service_tasks);
|
||||
}
|
||||
|
||||
service_tasks.pop_front()
|
||||
.expect("service_event is only fired when there are new tasks; qed")
|
||||
}
|
||||
}
|
57
secret_store/src/trusted_client.rs
Normal file
57
secret_store/src/trusted_client.rs
Normal file
@ -0,0 +1,57 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
use ethcore::client::{Client, BlockChainClient};
|
||||
use ethsync::SyncProvider;
|
||||
|
||||
#[derive(Clone)]
|
||||
/// 'Trusted' client weak reference.
|
||||
pub struct TrustedClient {
|
||||
/// Blockchain client.
|
||||
client: Weak<Client>,
|
||||
/// Sync provider.
|
||||
sync: Weak<SyncProvider>,
|
||||
}
|
||||
|
||||
impl TrustedClient {
|
||||
/// Create new trusted client.
|
||||
pub fn new(client: Arc<Client>, sync: Arc<SyncProvider>) -> Self {
|
||||
TrustedClient {
|
||||
client: Arc::downgrade(&client),
|
||||
sync: Arc::downgrade(&sync),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get 'trusted' `Client` reference only if it is synchronized && trusted.
|
||||
pub fn get(&self) -> Option<Arc<Client>> {
|
||||
self.client.upgrade()
|
||||
.and_then(|client| self.sync.upgrade().map(|sync| (client, sync)))
|
||||
.and_then(|(client, sync)| {
|
||||
let is_synced = !sync.status().is_syncing(client.queue_info());
|
||||
let is_trusted = client.chain_info().security_level().is_full();
|
||||
match is_synced && is_trusted {
|
||||
true => Some(client),
|
||||
false => None,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Get untrusted `Client` reference.
|
||||
pub fn get_untrusted(&self) -> Option<Arc<Client>> {
|
||||
self.client.upgrade()
|
||||
}
|
||||
}
|
@ -44,6 +44,8 @@ pub enum Error {
|
||||
AccessDenied,
|
||||
/// Requested document not found
|
||||
DocumentNotFound,
|
||||
/// Hyper error
|
||||
Hyper(String),
|
||||
/// Serialization/deserialization error
|
||||
Serde(String),
|
||||
/// Database-related error
|
||||
@ -61,11 +63,22 @@ pub struct NodeAddress {
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
/// Contract address.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ContractAddress {
|
||||
/// Address is read from registry.
|
||||
Registry,
|
||||
/// Address is specified.
|
||||
Address(ethkey::Address),
|
||||
}
|
||||
|
||||
/// Secret store configuration
|
||||
#[derive(Debug)]
|
||||
pub struct ServiceConfiguration {
|
||||
/// HTTP listener address. If None, HTTP API is disabled.
|
||||
pub listener_address: Option<NodeAddress>,
|
||||
/// Service contract address. If None, service contract API is disabled.
|
||||
pub service_contract_address: Option<ContractAddress>,
|
||||
/// Is ACL check enabled. If false, everyone has access to all keys. Useful for tests only.
|
||||
pub acl_check_enabled: bool,
|
||||
/// Data directory path for secret store
|
||||
@ -107,6 +120,7 @@ impl fmt::Display for Error {
|
||||
Error::BadSignature => write!(f, "Bad signature"),
|
||||
Error::AccessDenied => write!(f, "Access dened"),
|
||||
Error::DocumentNotFound => write!(f, "Document not found"),
|
||||
Error::Hyper(ref msg) => write!(f, "Hyper error: {}", msg),
|
||||
Error::Serde(ref msg) => write!(f, "Serialization error: {}", msg),
|
||||
Error::Database(ref msg) => write!(f, "Database error: {}", msg),
|
||||
Error::Internal(ref msg) => write!(f, "Internal error: {}", msg),
|
||||
|
Loading…
Reference in New Issue
Block a user