Merge branch 'master' into dircrate2
This commit is contained in:
commit
7b40f1cfe9
11
Cargo.lock
generated
11
Cargo.lock
generated
@ -680,6 +680,7 @@ dependencies = [
|
|||||||
"ethcore-util 1.9.0",
|
"ethcore-util 1.9.0",
|
||||||
"ethcrypto 0.1.0",
|
"ethcrypto 0.1.0",
|
||||||
"ethkey 0.3.0",
|
"ethkey 0.3.0",
|
||||||
|
"ethsync 1.9.0",
|
||||||
"futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -1933,7 +1934,7 @@ dependencies = [
|
|||||||
"parity-rpc 1.9.0",
|
"parity-rpc 1.9.0",
|
||||||
"parity-rpc-client 1.4.0",
|
"parity-rpc-client 1.4.0",
|
||||||
"parity-updater 1.9.0",
|
"parity-updater 1.9.0",
|
||||||
"parity-version 0.1.0",
|
"parity-version 1.9.0",
|
||||||
"parity-whisper 0.1.0",
|
"parity-whisper 0.1.0",
|
||||||
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"path 0.1.0",
|
"path 0.1.0",
|
||||||
@ -1979,7 +1980,7 @@ dependencies = [
|
|||||||
"parity-hash-fetch 1.9.0",
|
"parity-hash-fetch 1.9.0",
|
||||||
"parity-reactor 0.1.0",
|
"parity-reactor 0.1.0",
|
||||||
"parity-ui 1.9.0",
|
"parity-ui 1.9.0",
|
||||||
"parity-version 0.1.0",
|
"parity-version 1.9.0",
|
||||||
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2127,7 +2128,7 @@ dependencies = [
|
|||||||
"order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-reactor 0.1.0",
|
"parity-reactor 0.1.0",
|
||||||
"parity-updater 1.9.0",
|
"parity-updater 1.9.0",
|
||||||
"parity-version 0.1.0",
|
"parity-version 1.9.0",
|
||||||
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2235,7 +2236,7 @@ dependencies = [
|
|||||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-hash-fetch 1.9.0",
|
"parity-hash-fetch 1.9.0",
|
||||||
"parity-reactor 0.1.0",
|
"parity-reactor 0.1.0",
|
||||||
"parity-version 0.1.0",
|
"parity-version 1.9.0",
|
||||||
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"path 0.1.0",
|
"path 0.1.0",
|
||||||
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2244,7 +2245,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-version"
|
name = "parity-version"
|
||||||
version = "0.1.0"
|
version = "1.9.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ethcore-bytes 0.1.0",
|
"ethcore-bytes 0.1.0",
|
||||||
"rlp 0.2.1",
|
"rlp 0.2.1",
|
||||||
|
@ -26,6 +26,7 @@ const REGISTRY_ABI: &'static str = include_str!("res/registrar.json");
|
|||||||
const URLHINT_ABI: &'static str = include_str!("res/urlhint.json");
|
const URLHINT_ABI: &'static str = include_str!("res/urlhint.json");
|
||||||
const SERVICE_TRANSACTION_ABI: &'static str = include_str!("res/service_transaction.json");
|
const SERVICE_TRANSACTION_ABI: &'static str = include_str!("res/service_transaction.json");
|
||||||
const SECRETSTORE_ACL_STORAGE_ABI: &'static str = include_str!("res/secretstore_acl_storage.json");
|
const SECRETSTORE_ACL_STORAGE_ABI: &'static str = include_str!("res/secretstore_acl_storage.json");
|
||||||
|
const SECRETSTORE_SERVICE_ABI: &'static str = include_str!("res/secretstore_service.json");
|
||||||
const VALIDATOR_SET_ABI: &'static str = include_str!("res/validator_set.json");
|
const VALIDATOR_SET_ABI: &'static str = include_str!("res/validator_set.json");
|
||||||
const VALIDATOR_REPORT_ABI: &'static str = include_str!("res/validator_report.json");
|
const VALIDATOR_REPORT_ABI: &'static str = include_str!("res/validator_report.json");
|
||||||
const PEER_SET_ABI: &'static str = include_str!("res/peer_set.json");
|
const PEER_SET_ABI: &'static str = include_str!("res/peer_set.json");
|
||||||
@ -53,6 +54,7 @@ fn main() {
|
|||||||
build_file("Urlhint", URLHINT_ABI, "urlhint.rs");
|
build_file("Urlhint", URLHINT_ABI, "urlhint.rs");
|
||||||
build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs");
|
build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs");
|
||||||
build_file("SecretStoreAclStorage", SECRETSTORE_ACL_STORAGE_ABI, "secretstore_acl_storage.rs");
|
build_file("SecretStoreAclStorage", SECRETSTORE_ACL_STORAGE_ABI, "secretstore_acl_storage.rs");
|
||||||
|
build_file("SecretStoreService", SECRETSTORE_SERVICE_ABI, "secretstore_service.rs");
|
||||||
build_file("ValidatorSet", VALIDATOR_SET_ABI, "validator_set.rs");
|
build_file("ValidatorSet", VALIDATOR_SET_ABI, "validator_set.rs");
|
||||||
build_file("ValidatorReport", VALIDATOR_REPORT_ABI, "validator_report.rs");
|
build_file("ValidatorReport", VALIDATOR_REPORT_ABI, "validator_report.rs");
|
||||||
build_file("PeerSet", PEER_SET_ABI, "peer_set.rs");
|
build_file("PeerSet", PEER_SET_ABI, "peer_set.rs");
|
||||||
|
@ -46,7 +46,7 @@ pub fn generate_module(struct_name: &str, abi: &str) -> Result<String, Error> {
|
|||||||
Ok(format!(r##"
|
Ok(format!(r##"
|
||||||
use byteorder::{{BigEndian, ByteOrder}};
|
use byteorder::{{BigEndian, ByteOrder}};
|
||||||
use futures::{{future, Future, IntoFuture}};
|
use futures::{{future, Future, IntoFuture}};
|
||||||
use ethabi::{{Contract, Token, Event}};
|
use ethabi::{{Bytes, Contract, Token, Event}};
|
||||||
use bigint;
|
use bigint;
|
||||||
|
|
||||||
type BoxFuture<A, B> = Box<Future<Item = A, Error = B> + Send>;
|
type BoxFuture<A, B> = Box<Future<Item = A, Error = B> + Send>;
|
||||||
@ -96,7 +96,7 @@ fn generate_functions(contract: &Contract) -> Result<String, Error> {
|
|||||||
let inputs: Vec<_> = function.inputs.iter().map(|i| i.kind.clone()).collect();
|
let inputs: Vec<_> = function.inputs.iter().map(|i| i.kind.clone()).collect();
|
||||||
let outputs: Vec<_> = function.outputs.iter().map(|i| i.kind.clone()).collect();
|
let outputs: Vec<_> = function.outputs.iter().map(|i| i.kind.clone()).collect();
|
||||||
|
|
||||||
let (input_params, to_tokens) = input_params_codegen(&inputs)
|
let (input_params, input_names, to_tokens) = input_params_codegen(&inputs)
|
||||||
.map_err(|bad_type| Error::UnsupportedType(name.clone(), bad_type))?;
|
.map_err(|bad_type| Error::UnsupportedType(name.clone(), bad_type))?;
|
||||||
|
|
||||||
let (output_type, decode_outputs) = output_params_codegen(&outputs)
|
let (output_type, decode_outputs) = output_params_codegen(&outputs)
|
||||||
@ -113,14 +113,14 @@ pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type},
|
|||||||
U: IntoFuture<Item=Vec<u8>, Error=String>,
|
U: IntoFuture<Item=Vec<u8>, Error=String>,
|
||||||
U::Future: Send + 'static
|
U::Future: Send + 'static
|
||||||
{{
|
{{
|
||||||
|
let call_addr = self.address;
|
||||||
|
let call_future = match self.encode_{snake_name}_input({params_names}) {{
|
||||||
|
Ok(call_data) => (call)(call_addr, call_data),
|
||||||
|
Err(e) => return Box::new(future::err(e)),
|
||||||
|
}};
|
||||||
|
|
||||||
let function = self.contract.function(r#"{abi_name}"#)
|
let function = self.contract.function(r#"{abi_name}"#)
|
||||||
.expect("function existence checked at compile-time; qed").clone();
|
.expect("function existence checked at compile-time; qed").clone();
|
||||||
let call_addr = self.address;
|
|
||||||
|
|
||||||
let call_future = match function.encode_input(&{to_tokens}) {{
|
|
||||||
Ok(call_data) => (call)(call_addr, call_data),
|
|
||||||
Err(e) => return Box::new(future::err(format!("Error encoding call: {{:?}}", e))),
|
|
||||||
}};
|
|
||||||
|
|
||||||
Box::new(call_future
|
Box::new(call_future
|
||||||
.into_future()
|
.into_future()
|
||||||
@ -128,12 +128,22 @@ pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type},
|
|||||||
.map(Vec::into_iter)
|
.map(Vec::into_iter)
|
||||||
.and_then(|mut outputs| {decode_outputs}))
|
.and_then(|mut outputs| {decode_outputs}))
|
||||||
}}
|
}}
|
||||||
|
|
||||||
|
/// Encode "{abi_name}" function arguments.
|
||||||
|
/// Arguments: {abi_inputs:?}
|
||||||
|
pub fn encode_{snake_name}_input(&self, {params}) -> Result<Vec<u8>, String> {{
|
||||||
|
self.contract.function(r#"{abi_name}"#)
|
||||||
|
.expect("function existence checked at compile-time; qed")
|
||||||
|
.encode_input(&{to_tokens})
|
||||||
|
.map_err(|e| format!("Error encoding call: {{:?}}", e))
|
||||||
|
}}
|
||||||
"##,
|
"##,
|
||||||
abi_name = name,
|
abi_name = name,
|
||||||
abi_inputs = inputs,
|
abi_inputs = inputs,
|
||||||
abi_outputs = outputs,
|
abi_outputs = outputs,
|
||||||
snake_name = snake_name,
|
snake_name = snake_name,
|
||||||
params = input_params,
|
params = input_params,
|
||||||
|
params_names = input_names,
|
||||||
output_type = output_type,
|
output_type = output_type,
|
||||||
to_tokens = to_tokens,
|
to_tokens = to_tokens,
|
||||||
decode_outputs = decode_outputs,
|
decode_outputs = decode_outputs,
|
||||||
@ -145,15 +155,17 @@ pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type},
|
|||||||
|
|
||||||
// generate code for params in function signature and turning them into tokens.
|
// generate code for params in function signature and turning them into tokens.
|
||||||
//
|
//
|
||||||
// two pieces of code are generated: the first gives input types for the function signature,
|
// three pieces of code are generated: the first gives input types for the function signature,
|
||||||
// and the second gives code to tokenize those inputs.
|
// the second one gives input parameter names to pass to another method,
|
||||||
|
// and the third gives code to tokenize those inputs.
|
||||||
//
|
//
|
||||||
// params of form `param_0: type_0, param_1: type_1, ...`
|
// params of form `param_0: type_0, param_1: type_1, ...`
|
||||||
// tokenizing code of form `{let mut tokens = Vec::new(); tokens.push({param_X}); tokens }`
|
// tokenizing code of form `{let mut tokens = Vec::new(); tokens.push({param_X}); tokens }`
|
||||||
//
|
//
|
||||||
// returns any unsupported param type encountered.
|
// returns any unsupported param type encountered.
|
||||||
fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String), ParamType> {
|
fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String, String), ParamType> {
|
||||||
let mut params = String::new();
|
let mut params = String::new();
|
||||||
|
let mut params_names = String::new();
|
||||||
let mut to_tokens = "{ let mut tokens = Vec::new();".to_string();
|
let mut to_tokens = "{ let mut tokens = Vec::new();".to_string();
|
||||||
|
|
||||||
for (index, param_type) in inputs.iter().enumerate() {
|
for (index, param_type) in inputs.iter().enumerate() {
|
||||||
@ -164,11 +176,13 @@ fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String), ParamT
|
|||||||
params.push_str(&format!("{}{}: {}, ",
|
params.push_str(&format!("{}{}: {}, ",
|
||||||
if needs_mut { "mut " } else { "" }, param_name, rust_type));
|
if needs_mut { "mut " } else { "" }, param_name, rust_type));
|
||||||
|
|
||||||
|
params_names.push_str(&format!("{}, ", param_name));
|
||||||
|
|
||||||
to_tokens.push_str(&format!("tokens.push({{ {} }});", tokenize_code));
|
to_tokens.push_str(&format!("tokens.push({{ {} }});", tokenize_code));
|
||||||
}
|
}
|
||||||
|
|
||||||
to_tokens.push_str(" tokens }");
|
to_tokens.push_str(" tokens }");
|
||||||
Ok((params, to_tokens))
|
Ok((params, params_names, to_tokens))
|
||||||
}
|
}
|
||||||
|
|
||||||
// generate code for outputs of the function and detokenizing them.
|
// generate code for outputs of the function and detokenizing them.
|
||||||
|
8
ethcore/native_contracts/res/secretstore_service.json
Normal file
8
ethcore/native_contracts/res/secretstore_service.json
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
[
|
||||||
|
{"constant":true,"inputs":[],"name":"serverKeyGenerationRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getServerKeyId","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"},{"name":"v","type":"uint8"},{"name":"r","type":"bytes32"},{"name":"s","type":"bytes32"}],"name":"serverKeyGenerated","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"getServerKeyThreshold","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"authority","type":"address"}],"name":"getServerKeyConfirmationStatus","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},
|
||||||
|
{"anonymous":false,"inputs":[{"indexed":true,"name":"serverKeyId","type":"bytes32"},{"indexed":true,"name":"threshold","type":"uint256"}],"name":"ServerKeyRequested","type":"event"}
|
||||||
|
]
|
@ -28,6 +28,7 @@ mod registry;
|
|||||||
mod urlhint;
|
mod urlhint;
|
||||||
mod service_transaction;
|
mod service_transaction;
|
||||||
mod secretstore_acl_storage;
|
mod secretstore_acl_storage;
|
||||||
|
mod secretstore_service;
|
||||||
mod validator_set;
|
mod validator_set;
|
||||||
mod validator_report;
|
mod validator_report;
|
||||||
mod peer_set;
|
mod peer_set;
|
||||||
@ -40,6 +41,7 @@ pub use self::registry::Registry;
|
|||||||
pub use self::urlhint::Urlhint;
|
pub use self::urlhint::Urlhint;
|
||||||
pub use self::service_transaction::ServiceTransactionChecker;
|
pub use self::service_transaction::ServiceTransactionChecker;
|
||||||
pub use self::secretstore_acl_storage::SecretStoreAclStorage;
|
pub use self::secretstore_acl_storage::SecretStoreAclStorage;
|
||||||
|
pub use self::secretstore_service::SecretStoreService;
|
||||||
pub use self::validator_set::ValidatorSet;
|
pub use self::validator_set::ValidatorSet;
|
||||||
pub use self::validator_report::ValidatorReport;
|
pub use self::validator_report::ValidatorReport;
|
||||||
pub use self::peer_set::PeerSet;
|
pub use self::peer_set::PeerSet;
|
||||||
|
21
ethcore/native_contracts/src/secretstore_service.rs
Normal file
21
ethcore/native_contracts/src/secretstore_service.rs
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#![allow(unused_mut, unused_variables, unused_imports)]
|
||||||
|
|
||||||
|
//! Secret store service contract.
|
||||||
|
|
||||||
|
include!(concat!(env!("OUT_DIR"), "/secretstore_service.rs"));
|
@ -173,23 +173,36 @@
|
|||||||
"stateRoot": "0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544"
|
"stateRoot": "0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544"
|
||||||
},
|
},
|
||||||
"nodes": [
|
"nodes": [
|
||||||
|
"enode://6a868ced2dec399c53f730261173638a93a40214cf299ccf4d42a76e3fa54701db410669e8006347a4b3a74fa090bb35af0320e4bc8d04cf5b7f582b1db285f5@163.172.131.191:30303",
|
||||||
|
"enode://66a483383882a518fcc59db6c017f9cd13c71261f13c8d7e67ed43adbbc82a932d88d2291f59be577e9425181fc08828dc916fdd053af935a9491edf9d6006ba@212.47.247.103:30303",
|
||||||
|
"enode://cd6611461840543d5b9c56fbf088736154c699c43973b3a1a32390cf27106f87e58a818a606ccb05f3866de95a4fe860786fea71bf891ea95f234480d3022aa3@163.172.157.114:30303",
|
||||||
"enode://5a62f19d35c0da8b576c9414568c728d4744e6e9d436c0f9db27456400011414f515871f13a6b8e0468534b5116cfe765d7630f680f1707a38467940a9f62511@45.55.33.62:30303",
|
"enode://5a62f19d35c0da8b576c9414568c728d4744e6e9d436c0f9db27456400011414f515871f13a6b8e0468534b5116cfe765d7630f680f1707a38467940a9f62511@45.55.33.62:30303",
|
||||||
"enode://605e04a43b1156966b3a3b66b980c87b7f18522f7f712035f84576016be909a2798a438b2b17b1a8c58db314d88539a77419ca4be36148c086900fba487c9d39@188.166.255.12:30303",
|
"enode://605e04a43b1156966b3a3b66b980c87b7f18522f7f712035f84576016be909a2798a438b2b17b1a8c58db314d88539a77419ca4be36148c086900fba487c9d39@188.166.255.12:30303",
|
||||||
"enode://dc72806c3aa8fda207c8c018aba8d6cf143728b3628b6ded8d5e8cdeb8aa05cbd53f710ecd014c9a8f0d1e98f2874bff8afb15a229202f510a9c0258d1f6d109@159.203.210.80:30303",
|
"enode://dc72806c3aa8fda207c8c018aba8d6cf143728b3628b6ded8d5e8cdeb8aa05cbd53f710ecd014c9a8f0d1e98f2874bff8afb15a229202f510a9c0258d1f6d109@159.203.210.80:30303",
|
||||||
|
"enode://aafde2e81e035f417019a80f5342d1cd0e5bce97f83230fc57e1abbb3a9a5d6fb751446040c67261ed422324ffb69214567e181bb4ac0cc6e817451be0eaad1e@52.178.74.216:30303",
|
||||||
|
"enode://460e54d7e9a361d326a9e503b3879c6a1075e1bfb7ea919b512ea1fe841e65f82c5f87af028f14a7825be1c1260825d5326b93b43a5bc72e3214a99e0c4c7bd4@52.230.6.166:30303",
|
||||||
|
"enode://28faaf6b2e86694d8978b8e6986e7813951d7bd25201116fa77de893aabedd2a4a8d5832776905b4c3e616320506516d08239d82aeef4355f6878c3a701a6059@40.71.19.172:30303",
|
||||||
|
"enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303",
|
||||||
|
"enode://3f1d12044546b76342d59d4a05532c14b85aa669704bfe1f864fe079415aa2c02d743e03218e57a33fb94523adb54032871a6c51b2cc5514cb7c7e35b3ed0a99@13.93.211.84:30303",
|
||||||
|
"enode://78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d@191.235.84.50:30303",
|
||||||
|
"enode://158f8aab45f6d19c6cbf4a089c2670541a8da11978a2f90dbf6a502a4a3bab80d288afdbeb7ec0ef6d92de563767f3b1ea9e8e334ca711e9f8e2df5a0385e8e6@13.75.154.138:30303",
|
||||||
|
"enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303",
|
||||||
|
"enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303",
|
||||||
|
"enode://0cc5f5ffb5d9098c8b8c62325f3797f56509bff942704687b6530992ac706e2cb946b90a34f1f19548cd3c7baccbcaea354531e5983c7d1bc0dee16ce4b6440b@40.118.3.223:30305",
|
||||||
|
"enode://1c7a64d76c0334b0418c004af2f67c50e36a3be60b5e4790bdac0439d21603469a85fad36f2473c9a80eb043ae60936df905fa28f1ff614c3e5dc34f15dcd2dc@40.118.3.223:30308",
|
||||||
|
"enode://85c85d7143ae8bb96924f2b54f1b3e70d8c4d367af305325d30a61385a432f247d2c75c45c6b4a60335060d072d7f5b35dd1d4c45f76941f62a4f83b6e75daaf@40.118.3.223:30309",
|
||||||
|
"enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303",
|
||||||
|
"enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303",
|
||||||
|
"enode://4cd540b2c3292e17cff39922e864094bf8b0741fcc8c5dcea14957e389d7944c70278d872902e3d0345927f621547efa659013c400865485ab4bfa0c6596936f@138.201.144.135:30303",
|
||||||
"enode://01f76fa0561eca2b9a7e224378dd854278735f1449793c46ad0c4e79e8775d080c21dcc455be391e90a98153c3b05dcc8935c8440de7b56fe6d67251e33f4e3c@51.15.42.252:30303",
|
"enode://01f76fa0561eca2b9a7e224378dd854278735f1449793c46ad0c4e79e8775d080c21dcc455be391e90a98153c3b05dcc8935c8440de7b56fe6d67251e33f4e3c@51.15.42.252:30303",
|
||||||
"enode://2c9059f05c352b29d559192fe6bca272d965c9f2290632a2cfda7f83da7d2634f3ec45ae3a72c54dd4204926fb8082dcf9686e0d7504257541c86fc8569bcf4b@163.172.171.38:30303",
|
"enode://2c9059f05c352b29d559192fe6bca272d965c9f2290632a2cfda7f83da7d2634f3ec45ae3a72c54dd4204926fb8082dcf9686e0d7504257541c86fc8569bcf4b@163.172.171.38:30303",
|
||||||
"enode://efe4f2493f4aff2d641b1db8366b96ddacfe13e7a6e9c8f8f8cf49f9cdba0fdf3258d8c8f8d0c5db529f8123c8f1d95f36d54d590ca1bb366a5818b9a4ba521c@163.172.187.252:30303",
|
"enode://efe4f2493f4aff2d641b1db8366b96ddacfe13e7a6e9c8f8f8cf49f9cdba0fdf3258d8c8f8d0c5db529f8123c8f1d95f36d54d590ca1bb366a5818b9a4ba521c@163.172.187.252:30303",
|
||||||
"enode://cd6611461840543d5b9c56fbf088736154c699c43973b3a1a32390cf27106f87e58a818a606ccb05f3866de95a4fe860786fea71bf891ea95f234480d3022aa3@163.172.157.114:30303",
|
|
||||||
"enode://bcc7240543fe2cf86f5e9093d05753dd83343f8fda7bf0e833f65985c73afccf8f981301e13ef49c4804491eab043647374df1c4adf85766af88a624ecc3330e@136.243.154.244:30303",
|
"enode://bcc7240543fe2cf86f5e9093d05753dd83343f8fda7bf0e833f65985c73afccf8f981301e13ef49c4804491eab043647374df1c4adf85766af88a624ecc3330e@136.243.154.244:30303",
|
||||||
"enode://ed4227681ca8c70beb2277b9e870353a9693f12e7c548c35df6bca6a956934d6f659999c2decb31f75ce217822eefca149ace914f1cbe461ed5a2ebaf9501455@88.212.206.70:30303",
|
"enode://ed4227681ca8c70beb2277b9e870353a9693f12e7c548c35df6bca6a956934d6f659999c2decb31f75ce217822eefca149ace914f1cbe461ed5a2ebaf9501455@88.212.206.70:30303",
|
||||||
"enode://cadc6e573b6bc2a9128f2f635ac0db3353e360b56deef239e9be7e7fce039502e0ec670b595f6288c0d2116812516ad6b6ff8d5728ff45eba176989e40dead1e@37.128.191.230:30303",
|
"enode://cadc6e573b6bc2a9128f2f635ac0db3353e360b56deef239e9be7e7fce039502e0ec670b595f6288c0d2116812516ad6b6ff8d5728ff45eba176989e40dead1e@37.128.191.230:30303",
|
||||||
"enode://595a9a06f8b9bc9835c8723b6a82105aea5d55c66b029b6d44f229d6d135ac3ecdd3e9309360a961ea39d7bee7bac5d03564077a4e08823acc723370aace65ec@46.20.235.22:30303",
|
"enode://595a9a06f8b9bc9835c8723b6a82105aea5d55c66b029b6d44f229d6d135ac3ecdd3e9309360a961ea39d7bee7bac5d03564077a4e08823acc723370aace65ec@46.20.235.22:30303",
|
||||||
"enode://029178d6d6f9f8026fc0bc17d5d1401aac76ec9d86633bba2320b5eed7b312980c0a210b74b20c4f9a8b0b2bf884b111fa9ea5c5f916bb9bbc0e0c8640a0f56c@216.158.85.185:30303",
|
"enode://029178d6d6f9f8026fc0bc17d5d1401aac76ec9d86633bba2320b5eed7b312980c0a210b74b20c4f9a8b0b2bf884b111fa9ea5c5f916bb9bbc0e0c8640a0f56c@216.158.85.185:30303",
|
||||||
"enode://fdd1b9bb613cfbc200bba17ce199a9490edc752a833f88d4134bf52bb0d858aa5524cb3ec9366c7a4ef4637754b8b15b5dc913e4ed9fdb6022f7512d7b63f181@212.47.247.103:30303",
|
"enode://fdd1b9bb613cfbc200bba17ce199a9490edc752a833f88d4134bf52bb0d858aa5524cb3ec9366c7a4ef4637754b8b15b5dc913e4ed9fdb6022f7512d7b63f181@212.47.247.103:30303"
|
||||||
"enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303",
|
|
||||||
"enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303",
|
|
||||||
"enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303",
|
|
||||||
"enode://4cd540b2c3292e17cff39922e864094bf8b0741fcc8c5dcea14957e389d7944c70278d872902e3d0345927f621547efa659013c400865485ab4bfa0c6596936f@138.201.144.135:30303"
|
|
||||||
],
|
],
|
||||||
"accounts": {
|
"accounts": {
|
||||||
"0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
|
"0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
|
||||||
|
@ -466,9 +466,9 @@ usage! {
|
|||||||
"--jsonrpc-threads=[THREADS]",
|
"--jsonrpc-threads=[THREADS]",
|
||||||
"Turn on additional processing threads in all RPC servers. Setting this to non-zero value allows parallel cpu-heavy queries execution.",
|
"Turn on additional processing threads in all RPC servers. Setting this to non-zero value allows parallel cpu-heavy queries execution.",
|
||||||
|
|
||||||
ARG arg_jsonrpc_cors: (Option<String>) = None, or |c: &Config| otry!(c.rpc).cors.clone(),
|
ARG arg_jsonrpc_cors: (String) = "none", or |c: &Config| otry!(c.rpc).cors.as_ref().map(|vec| vec.join(",")),
|
||||||
"--jsonrpc-cors=[URL]",
|
"--jsonrpc-cors=[URL]",
|
||||||
"Specify CORS header for JSON-RPC API responses.",
|
"Specify CORS header for JSON-RPC API responses. Special options: \"all\", \"none\".",
|
||||||
|
|
||||||
ARG arg_jsonrpc_server_threads: (Option<usize>) = None, or |c: &Config| otry!(c.rpc).server_threads,
|
ARG arg_jsonrpc_server_threads: (Option<usize>) = None, or |c: &Config| otry!(c.rpc).server_threads,
|
||||||
"--jsonrpc-server-threads=[NUM]",
|
"--jsonrpc-server-threads=[NUM]",
|
||||||
@ -538,9 +538,9 @@ usage! {
|
|||||||
"--ipfs-api-hosts=[HOSTS]",
|
"--ipfs-api-hosts=[HOSTS]",
|
||||||
"List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: \"all\", \"none\".",
|
"List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: \"all\", \"none\".",
|
||||||
|
|
||||||
ARG arg_ipfs_api_cors: (Option<String>) = None, or |c: &Config| otry!(c.ipfs).cors.clone(),
|
ARG arg_ipfs_api_cors: (String) = "none", or |c: &Config| otry!(c.ipfs).cors.as_ref().map(|vec| vec.join(",")),
|
||||||
"--ipfs-api-cors=[URL]",
|
"--ipfs-api-cors=[URL]",
|
||||||
"Specify CORS header for IPFS API responses.",
|
"Specify CORS header for IPFS API responses. Special options: \"all\", \"none\".",
|
||||||
|
|
||||||
["Secret store options"]
|
["Secret store options"]
|
||||||
FLAG flag_no_secretstore: (bool) = false, or |c: &Config| otry!(c.secretstore).disable.clone(),
|
FLAG flag_no_secretstore: (bool) = false, or |c: &Config| otry!(c.secretstore).disable.clone(),
|
||||||
@ -555,6 +555,10 @@ usage! {
|
|||||||
"--no-acl-check",
|
"--no-acl-check",
|
||||||
"Disable ACL check (useful for test environments).",
|
"Disable ACL check (useful for test environments).",
|
||||||
|
|
||||||
|
ARG arg_secretstore_contract: (String) = "none", or |c: &Config| otry!(c.secretstore).service_contract.clone(),
|
||||||
|
"--secretstore-contract=[SOURCE]",
|
||||||
|
"Secret Store Service contract address source: none, registry (contract address is read from registry) or address.",
|
||||||
|
|
||||||
ARG arg_secretstore_nodes: (String) = "", or |c: &Config| otry!(c.secretstore).nodes.as_ref().map(|vec| vec.join(",")),
|
ARG arg_secretstore_nodes: (String) = "", or |c: &Config| otry!(c.secretstore).nodes.as_ref().map(|vec| vec.join(",")),
|
||||||
"--secretstore-nodes=[NODES]",
|
"--secretstore-nodes=[NODES]",
|
||||||
"Comma-separated list of other secret store cluster nodes in form NODE_PUBLIC_KEY_IN_HEX@NODE_IP_ADDR:NODE_PORT.",
|
"Comma-separated list of other secret store cluster nodes in form NODE_PUBLIC_KEY_IN_HEX@NODE_IP_ADDR:NODE_PORT.",
|
||||||
@ -1052,7 +1056,7 @@ struct Rpc {
|
|||||||
disable: Option<bool>,
|
disable: Option<bool>,
|
||||||
port: Option<u16>,
|
port: Option<u16>,
|
||||||
interface: Option<String>,
|
interface: Option<String>,
|
||||||
cors: Option<String>,
|
cors: Option<Vec<String>>,
|
||||||
apis: Option<Vec<String>>,
|
apis: Option<Vec<String>>,
|
||||||
hosts: Option<Vec<String>>,
|
hosts: Option<Vec<String>>,
|
||||||
server_threads: Option<usize>,
|
server_threads: Option<usize>,
|
||||||
@ -1093,6 +1097,7 @@ struct SecretStore {
|
|||||||
disable: Option<bool>,
|
disable: Option<bool>,
|
||||||
disable_http: Option<bool>,
|
disable_http: Option<bool>,
|
||||||
disable_acl_check: Option<bool>,
|
disable_acl_check: Option<bool>,
|
||||||
|
service_contract: Option<String>,
|
||||||
self_secret: Option<String>,
|
self_secret: Option<String>,
|
||||||
admin_public: Option<String>,
|
admin_public: Option<String>,
|
||||||
nodes: Option<Vec<String>>,
|
nodes: Option<Vec<String>>,
|
||||||
@ -1108,7 +1113,7 @@ struct Ipfs {
|
|||||||
enable: Option<bool>,
|
enable: Option<bool>,
|
||||||
port: Option<u16>,
|
port: Option<u16>,
|
||||||
interface: Option<String>,
|
interface: Option<String>,
|
||||||
cors: Option<String>,
|
cors: Option<Vec<String>>,
|
||||||
hosts: Option<Vec<String>>,
|
hosts: Option<Vec<String>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1468,7 +1473,7 @@ mod tests {
|
|||||||
flag_no_jsonrpc: false,
|
flag_no_jsonrpc: false,
|
||||||
arg_jsonrpc_port: 8545u16,
|
arg_jsonrpc_port: 8545u16,
|
||||||
arg_jsonrpc_interface: "local".into(),
|
arg_jsonrpc_interface: "local".into(),
|
||||||
arg_jsonrpc_cors: Some("null".into()),
|
arg_jsonrpc_cors: "null".into(),
|
||||||
arg_jsonrpc_apis: "web3,eth,net,parity,traces,rpc,secretstore".into(),
|
arg_jsonrpc_apis: "web3,eth,net,parity,traces,rpc,secretstore".into(),
|
||||||
arg_jsonrpc_hosts: "none".into(),
|
arg_jsonrpc_hosts: "none".into(),
|
||||||
arg_jsonrpc_server_threads: None,
|
arg_jsonrpc_server_threads: None,
|
||||||
@ -1494,6 +1499,7 @@ mod tests {
|
|||||||
flag_no_secretstore: false,
|
flag_no_secretstore: false,
|
||||||
flag_no_secretstore_http: false,
|
flag_no_secretstore_http: false,
|
||||||
flag_no_secretstore_acl_check: false,
|
flag_no_secretstore_acl_check: false,
|
||||||
|
arg_secretstore_contract: "none".into(),
|
||||||
arg_secretstore_secret: None,
|
arg_secretstore_secret: None,
|
||||||
arg_secretstore_admin_public: None,
|
arg_secretstore_admin_public: None,
|
||||||
arg_secretstore_nodes: "".into(),
|
arg_secretstore_nodes: "".into(),
|
||||||
@ -1507,7 +1513,7 @@ mod tests {
|
|||||||
flag_ipfs_api: false,
|
flag_ipfs_api: false,
|
||||||
arg_ipfs_api_port: 5001u16,
|
arg_ipfs_api_port: 5001u16,
|
||||||
arg_ipfs_api_interface: "local".into(),
|
arg_ipfs_api_interface: "local".into(),
|
||||||
arg_ipfs_api_cors: Some("null".into()),
|
arg_ipfs_api_cors: "null".into(),
|
||||||
arg_ipfs_api_hosts: "none".into(),
|
arg_ipfs_api_hosts: "none".into(),
|
||||||
|
|
||||||
// -- Sealing/Mining Options
|
// -- Sealing/Mining Options
|
||||||
@ -1737,6 +1743,7 @@ mod tests {
|
|||||||
disable: None,
|
disable: None,
|
||||||
disable_http: None,
|
disable_http: None,
|
||||||
disable_acl_check: None,
|
disable_acl_check: None,
|
||||||
|
service_contract: None,
|
||||||
self_secret: None,
|
self_secret: None,
|
||||||
admin_public: None,
|
admin_public: None,
|
||||||
nodes: None,
|
nodes: None,
|
||||||
|
@ -49,7 +49,7 @@ reserved_peers = "./path_to_file"
|
|||||||
disable = false
|
disable = false
|
||||||
port = 8545
|
port = 8545
|
||||||
interface = "local"
|
interface = "local"
|
||||||
cors = "null"
|
cors = ["null"]
|
||||||
apis = ["web3", "eth", "net", "parity", "traces", "rpc", "secretstore"]
|
apis = ["web3", "eth", "net", "parity", "traces", "rpc", "secretstore"]
|
||||||
hosts = ["none"]
|
hosts = ["none"]
|
||||||
|
|
||||||
@ -80,6 +80,7 @@ pass = "test_pass"
|
|||||||
disable = false
|
disable = false
|
||||||
disable_http = false
|
disable_http = false
|
||||||
disable_acl_check = false
|
disable_acl_check = false
|
||||||
|
service_contract = "none"
|
||||||
nodes = []
|
nodes = []
|
||||||
http_interface = "local"
|
http_interface = "local"
|
||||||
http_port = 8082
|
http_port = 8082
|
||||||
@ -91,7 +92,7 @@ path = "$HOME/.parity/secretstore"
|
|||||||
enable = false
|
enable = false
|
||||||
port = 5001
|
port = 5001
|
||||||
interface = "local"
|
interface = "local"
|
||||||
cors = "null"
|
cors = ["null"]
|
||||||
hosts = ["none"]
|
hosts = ["none"]
|
||||||
|
|
||||||
[mining]
|
[mining]
|
||||||
|
@ -47,7 +47,7 @@ use ethcore_logger::Config as LogConfig;
|
|||||||
use dir::{self, Directories, default_hypervisor_path, default_local_path, default_data_path};
|
use dir::{self, Directories, default_hypervisor_path, default_local_path, default_data_path};
|
||||||
use dapps::Configuration as DappsConfiguration;
|
use dapps::Configuration as DappsConfiguration;
|
||||||
use ipfs::Configuration as IpfsConfiguration;
|
use ipfs::Configuration as IpfsConfiguration;
|
||||||
use secretstore::{Configuration as SecretStoreConfiguration, NodeSecretKey};
|
use secretstore::{NodeSecretKey, Configuration as SecretStoreConfiguration, ContractAddress as SecretStoreContractAddress};
|
||||||
use updater::{UpdatePolicy, UpdateFilter, ReleaseTrack};
|
use updater::{UpdatePolicy, UpdateFilter, ReleaseTrack};
|
||||||
use run::RunCmd;
|
use run::RunCmd;
|
||||||
use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, KillBlockchain, ExportState, DataFormat};
|
use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, KillBlockchain, ExportState, DataFormat};
|
||||||
@ -609,6 +609,7 @@ impl Configuration {
|
|||||||
enabled: self.secretstore_enabled(),
|
enabled: self.secretstore_enabled(),
|
||||||
http_enabled: self.secretstore_http_enabled(),
|
http_enabled: self.secretstore_http_enabled(),
|
||||||
acl_check_enabled: self.secretstore_acl_check_enabled(),
|
acl_check_enabled: self.secretstore_acl_check_enabled(),
|
||||||
|
service_contract_address: self.secretstore_service_contract_address()?,
|
||||||
self_secret: self.secretstore_self_secret()?,
|
self_secret: self.secretstore_self_secret()?,
|
||||||
nodes: self.secretstore_nodes()?,
|
nodes: self.secretstore_nodes()?,
|
||||||
interface: self.secretstore_interface(),
|
interface: self.secretstore_interface(),
|
||||||
@ -776,13 +777,19 @@ impl Configuration {
|
|||||||
apis.join(",")
|
apis.join(",")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cors(cors: Option<&String>) -> Option<Vec<String>> {
|
fn cors(cors: &str) -> Option<Vec<String>> {
|
||||||
cors.map(|ref c| c.split(',').map(Into::into).collect())
|
match cors {
|
||||||
|
"none" => return Some(Vec::new()),
|
||||||
|
"*" | "all" | "any" => return None,
|
||||||
|
_ => {},
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(cors.split(',').map(Into::into).collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rpc_cors(&self) -> Option<Vec<String>> {
|
fn rpc_cors(&self) -> Option<Vec<String>> {
|
||||||
let cors = self.args.arg_jsonrpc_cors.as_ref().or(self.args.arg_rpccorsdomain.as_ref());
|
let cors = self.args.arg_rpccorsdomain.clone().unwrap_or_else(|| self.args.arg_jsonrpc_cors.to_owned());
|
||||||
Self::cors(cors)
|
Self::cors(&cors)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ipfs_cors(&self) -> Option<Vec<String>> {
|
fn ipfs_cors(&self) -> Option<Vec<String>> {
|
||||||
@ -1080,6 +1087,14 @@ impl Configuration {
|
|||||||
!self.args.flag_no_secretstore_acl_check
|
!self.args.flag_no_secretstore_acl_check
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn secretstore_service_contract_address(&self) -> Result<Option<SecretStoreContractAddress>, String> {
|
||||||
|
Ok(match self.args.arg_secretstore_contract.as_ref() {
|
||||||
|
"none" => None,
|
||||||
|
"registry" => Some(SecretStoreContractAddress::Registry),
|
||||||
|
a => Some(SecretStoreContractAddress::Address(a.parse().map_err(|e| format!("{}", e))?)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
fn ui_enabled(&self) -> bool {
|
fn ui_enabled(&self) -> bool {
|
||||||
if self.args.flag_force_ui {
|
if self.args.flag_force_ui {
|
||||||
return true;
|
return true;
|
||||||
@ -1459,7 +1474,7 @@ mod tests {
|
|||||||
assert_eq!(net.rpc_enabled, true);
|
assert_eq!(net.rpc_enabled, true);
|
||||||
assert_eq!(net.rpc_interface, "0.0.0.0".to_owned());
|
assert_eq!(net.rpc_interface, "0.0.0.0".to_owned());
|
||||||
assert_eq!(net.rpc_port, 8000);
|
assert_eq!(net.rpc_port, 8000);
|
||||||
assert_eq!(conf.rpc_cors(), Some(vec!["*".to_owned()]));
|
assert_eq!(conf.rpc_cors(), None);
|
||||||
assert_eq!(conf.rpc_apis(), "web3,eth".to_owned());
|
assert_eq!(conf.rpc_apis(), "web3,eth".to_owned());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1526,8 +1541,8 @@ mod tests {
|
|||||||
let conf2 = parse(&["parity", "--ipfs-api-cors", "http://parity.io,http://something.io"]);
|
let conf2 = parse(&["parity", "--ipfs-api-cors", "http://parity.io,http://something.io"]);
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert_eq!(conf0.ipfs_cors(), None);
|
assert_eq!(conf0.ipfs_cors(), Some(vec![]));
|
||||||
assert_eq!(conf1.ipfs_cors(), Some(vec!["*".into()]));
|
assert_eq!(conf1.ipfs_cors(), None);
|
||||||
assert_eq!(conf2.ipfs_cors(), Some(vec!["http://parity.io".into(),"http://something.io".into()]));
|
assert_eq!(conf2.ipfs_cors(), Some(vec!["http://parity.io".into(),"http://something.io".into()]));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,8 +34,8 @@ impl Default for Configuration {
|
|||||||
enabled: false,
|
enabled: false,
|
||||||
port: 5001,
|
port: 5001,
|
||||||
interface: "127.0.0.1".into(),
|
interface: "127.0.0.1".into(),
|
||||||
cors: None,
|
cors: Some(vec![]),
|
||||||
hosts: Some(Vec::new()),
|
hosts: Some(vec![]),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -60,8 +60,8 @@ impl Default for HttpConfiguration {
|
|||||||
interface: "127.0.0.1".into(),
|
interface: "127.0.0.1".into(),
|
||||||
port: 8545,
|
port: 8545,
|
||||||
apis: ApiSet::UnsafeContext,
|
apis: ApiSet::UnsafeContext,
|
||||||
cors: None,
|
cors: Some(vec![]),
|
||||||
hosts: Some(Vec::new()),
|
hosts: Some(vec![]),
|
||||||
server_threads: 1,
|
server_threads: 1,
|
||||||
processing_threads: 4,
|
processing_threads: 4,
|
||||||
}
|
}
|
||||||
@ -99,7 +99,7 @@ impl From<UiConfiguration> for HttpConfiguration {
|
|||||||
interface: conf.interface,
|
interface: conf.interface,
|
||||||
port: conf.port,
|
port: conf.port,
|
||||||
apis: rpc_apis::ApiSet::UnsafeContext,
|
apis: rpc_apis::ApiSet::UnsafeContext,
|
||||||
cors: None,
|
cors: Some(vec![]),
|
||||||
hosts: conf.hosts,
|
hosts: conf.hosts,
|
||||||
server_threads: 1,
|
server_threads: 1,
|
||||||
processing_threads: 0,
|
processing_threads: 0,
|
||||||
|
@ -785,6 +785,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
|
|||||||
// secret store key server
|
// secret store key server
|
||||||
let secretstore_deps = secretstore::Dependencies {
|
let secretstore_deps = secretstore::Dependencies {
|
||||||
client: client.clone(),
|
client: client.clone(),
|
||||||
|
sync: sync_provider.clone(),
|
||||||
account_provider: account_provider,
|
account_provider: account_provider,
|
||||||
accounts_passwords: &passwords,
|
accounts_passwords: &passwords,
|
||||||
};
|
};
|
||||||
|
@ -21,10 +21,11 @@ use dir::helpers::replace_home;
|
|||||||
use ethcore::account_provider::AccountProvider;
|
use ethcore::account_provider::AccountProvider;
|
||||||
use ethcore::client::Client;
|
use ethcore::client::Client;
|
||||||
use ethkey::{Secret, Public};
|
use ethkey::{Secret, Public};
|
||||||
|
use ethsync::SyncProvider;
|
||||||
use util::Address;
|
use util::Address;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
/// This node secret key.
|
/// This node secret key.
|
||||||
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
pub enum NodeSecretKey {
|
pub enum NodeSecretKey {
|
||||||
/// Stored as plain text in configuration file.
|
/// Stored as plain text in configuration file.
|
||||||
Plain(Secret),
|
Plain(Secret),
|
||||||
@ -32,6 +33,15 @@ pub enum NodeSecretKey {
|
|||||||
KeyStore(Address),
|
KeyStore(Address),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Secret store service contract address.
|
||||||
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
|
pub enum ContractAddress {
|
||||||
|
/// Contract address is read from registry.
|
||||||
|
Registry,
|
||||||
|
/// Contract address is specified.
|
||||||
|
Address(Address),
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
/// Secret store configuration
|
/// Secret store configuration
|
||||||
pub struct Configuration {
|
pub struct Configuration {
|
||||||
@ -41,6 +51,8 @@ pub struct Configuration {
|
|||||||
pub http_enabled: bool,
|
pub http_enabled: bool,
|
||||||
/// Is ACL check enabled.
|
/// Is ACL check enabled.
|
||||||
pub acl_check_enabled: bool,
|
pub acl_check_enabled: bool,
|
||||||
|
/// Service contract address.
|
||||||
|
pub service_contract_address: Option<ContractAddress>,
|
||||||
/// This node secret.
|
/// This node secret.
|
||||||
pub self_secret: Option<NodeSecretKey>,
|
pub self_secret: Option<NodeSecretKey>,
|
||||||
/// Other nodes IDs + addresses.
|
/// Other nodes IDs + addresses.
|
||||||
@ -63,6 +75,8 @@ pub struct Configuration {
|
|||||||
pub struct Dependencies<'a> {
|
pub struct Dependencies<'a> {
|
||||||
/// Blockchain client.
|
/// Blockchain client.
|
||||||
pub client: Arc<Client>,
|
pub client: Arc<Client>,
|
||||||
|
/// Sync provider.
|
||||||
|
pub sync: Arc<SyncProvider>,
|
||||||
/// Account provider.
|
/// Account provider.
|
||||||
pub account_provider: Arc<AccountProvider>,
|
pub account_provider: Arc<AccountProvider>,
|
||||||
/// Passed accounts passwords.
|
/// Passed accounts passwords.
|
||||||
@ -90,7 +104,7 @@ mod server {
|
|||||||
use ethcore_secretstore;
|
use ethcore_secretstore;
|
||||||
use ethkey::KeyPair;
|
use ethkey::KeyPair;
|
||||||
use ansi_term::Colour::Red;
|
use ansi_term::Colour::Red;
|
||||||
use super::{Configuration, Dependencies, NodeSecretKey};
|
use super::{Configuration, Dependencies, NodeSecretKey, ContractAddress};
|
||||||
|
|
||||||
/// Key server
|
/// Key server
|
||||||
pub struct KeyServer {
|
pub struct KeyServer {
|
||||||
@ -134,6 +148,10 @@ mod server {
|
|||||||
address: conf.http_interface.clone(),
|
address: conf.http_interface.clone(),
|
||||||
port: conf.http_port,
|
port: conf.http_port,
|
||||||
}) } else { None },
|
}) } else { None },
|
||||||
|
service_contract_address: conf.service_contract_address.map(|c| match c {
|
||||||
|
ContractAddress::Registry => ethcore_secretstore::ContractAddress::Registry,
|
||||||
|
ContractAddress::Address(address) => ethcore_secretstore::ContractAddress::Address(address),
|
||||||
|
}),
|
||||||
data_path: conf.data_path.clone(),
|
data_path: conf.data_path.clone(),
|
||||||
acl_check_enabled: conf.acl_check_enabled,
|
acl_check_enabled: conf.acl_check_enabled,
|
||||||
cluster_config: ethcore_secretstore::ClusterConfiguration {
|
cluster_config: ethcore_secretstore::ClusterConfiguration {
|
||||||
@ -153,7 +171,7 @@ mod server {
|
|||||||
|
|
||||||
cconf.cluster_config.nodes.insert(self_secret.public().clone(), cconf.cluster_config.listener_address.clone());
|
cconf.cluster_config.nodes.insert(self_secret.public().clone(), cconf.cluster_config.listener_address.clone());
|
||||||
|
|
||||||
let key_server = ethcore_secretstore::start(deps.client, self_secret, cconf)
|
let key_server = ethcore_secretstore::start(deps.client, deps.sync, self_secret, cconf)
|
||||||
.map_err(|e| format!("Error starting KeyServer {}: {}", key_server_name, e))?;
|
.map_err(|e| format!("Error starting KeyServer {}: {}", key_server_name, e))?;
|
||||||
|
|
||||||
Ok(KeyServer {
|
Ok(KeyServer {
|
||||||
@ -172,6 +190,7 @@ impl Default for Configuration {
|
|||||||
enabled: true,
|
enabled: true,
|
||||||
http_enabled: true,
|
http_enabled: true,
|
||||||
acl_check_enabled: true,
|
acl_check_enabled: true,
|
||||||
|
service_contract_address: None,
|
||||||
self_secret: None,
|
self_secret: None,
|
||||||
admin_public: None,
|
admin_public: None,
|
||||||
nodes: BTreeMap::new(),
|
nodes: BTreeMap::new(),
|
||||||
|
@ -26,6 +26,7 @@ ethcore = { path = "../ethcore" }
|
|||||||
ethcore-bytes = { path = "../util/bytes" }
|
ethcore-bytes = { path = "../util/bytes" }
|
||||||
ethcore-util = { path = "../util" }
|
ethcore-util = { path = "../util" }
|
||||||
ethcore-bigint = { path = "../util/bigint" }
|
ethcore-bigint = { path = "../util/bigint" }
|
||||||
|
ethsync = { path = "../sync" }
|
||||||
kvdb = { path = "../util/kvdb" }
|
kvdb = { path = "../util/kvdb" }
|
||||||
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }
|
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }
|
||||||
keccak-hash = { path = "../util/hash" }
|
keccak-hash = { path = "../util/hash" }
|
||||||
|
@ -14,16 +14,17 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::Arc;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use futures::{future, Future};
|
use futures::{future, Future};
|
||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock};
|
||||||
use ethkey::public_to_address;
|
use ethkey::public_to_address;
|
||||||
use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify};
|
use ethcore::client::{BlockChainClient, BlockId, ChainNotify};
|
||||||
use native_contracts::SecretStoreAclStorage;
|
use native_contracts::SecretStoreAclStorage;
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use util::Address;
|
use util::Address;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
|
use trusted_client::TrustedClient;
|
||||||
use types::all::{Error, ServerKeyId, Public};
|
use types::all::{Error, ServerKeyId, Public};
|
||||||
|
|
||||||
const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker";
|
const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker";
|
||||||
@ -43,7 +44,7 @@ pub struct OnChainAclStorage {
|
|||||||
/// Cached on-chain ACL storage contract.
|
/// Cached on-chain ACL storage contract.
|
||||||
struct CachedContract {
|
struct CachedContract {
|
||||||
/// Blockchain client.
|
/// Blockchain client.
|
||||||
client: Weak<Client>,
|
client: TrustedClient,
|
||||||
/// Contract address.
|
/// Contract address.
|
||||||
contract_addr: Option<Address>,
|
contract_addr: Option<Address>,
|
||||||
/// Contract at given address.
|
/// Contract at given address.
|
||||||
@ -57,12 +58,15 @@ pub struct DummyAclStorage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl OnChainAclStorage {
|
impl OnChainAclStorage {
|
||||||
pub fn new(client: &Arc<Client>) -> Arc<Self> {
|
pub fn new(trusted_client: TrustedClient) -> Result<Arc<Self>, Error> {
|
||||||
|
let client = trusted_client.get_untrusted();
|
||||||
let acl_storage = Arc::new(OnChainAclStorage {
|
let acl_storage = Arc::new(OnChainAclStorage {
|
||||||
contract: Mutex::new(CachedContract::new(client)),
|
contract: Mutex::new(CachedContract::new(trusted_client)),
|
||||||
});
|
});
|
||||||
client.add_notify(acl_storage.clone());
|
client
|
||||||
acl_storage
|
.ok_or(Error::Internal("Constructing OnChainAclStorage without active Client".into()))?
|
||||||
|
.add_notify(acl_storage.clone());
|
||||||
|
Ok(acl_storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,16 +85,16 @@ impl ChainNotify for OnChainAclStorage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CachedContract {
|
impl CachedContract {
|
||||||
pub fn new(client: &Arc<Client>) -> Self {
|
pub fn new(client: TrustedClient) -> Self {
|
||||||
CachedContract {
|
CachedContract {
|
||||||
client: Arc::downgrade(client),
|
client: client,
|
||||||
contract_addr: None,
|
contract_addr: None,
|
||||||
contract: None,
|
contract: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update(&mut self) {
|
pub fn update(&mut self) {
|
||||||
if let Some(client) = self.client.upgrade() {
|
if let Some(client) = self.client.get() {
|
||||||
let new_contract_addr = client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned());
|
let new_contract_addr = client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned());
|
||||||
if self.contract_addr.as_ref() != new_contract_addr.as_ref() {
|
if self.contract_addr.as_ref() != new_contract_addr.as_ref() {
|
||||||
self.contract = new_contract_addr.map(|contract_addr| {
|
self.contract = new_contract_addr.map(|contract_addr| {
|
||||||
@ -105,20 +109,21 @@ impl CachedContract {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn check(&mut self, public: &Public, document: &ServerKeyId) -> Result<bool, Error> {
|
pub fn check(&mut self, public: &Public, document: &ServerKeyId) -> Result<bool, Error> {
|
||||||
|
if let Some(client) = self.client.get() {
|
||||||
|
// call contract to check accesss
|
||||||
match self.contract.as_ref() {
|
match self.contract.as_ref() {
|
||||||
Some(contract) => {
|
Some(contract) => {
|
||||||
let address = public_to_address(&public);
|
let address = public_to_address(&public);
|
||||||
let do_call = |a, d| future::done(
|
let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d));
|
||||||
self.client
|
|
||||||
.upgrade()
|
|
||||||
.ok_or_else(|| "Calling contract without client".into())
|
|
||||||
.and_then(|c| c.call_contract(BlockId::Latest, a, d)));
|
|
||||||
contract.check_permissions(do_call, address, document.clone())
|
contract.check_permissions(do_call, address, document.clone())
|
||||||
.map_err(|err| Error::Internal(err))
|
.map_err(|err| Error::Internal(err))
|
||||||
.wait()
|
.wait()
|
||||||
},
|
},
|
||||||
None => Err(Error::Internal("ACL checker contract is not configured".to_owned())),
|
None => Err(Error::Internal("ACL checker contract is not configured".to_owned())),
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
Err(Error::Internal("Calling ACL contract without trusted blockchain client".into()))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,7 +53,6 @@ impl KeyServerImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get cluster client reference.
|
/// Get cluster client reference.
|
||||||
#[cfg(test)]
|
|
||||||
pub fn cluster(&self) -> Arc<ClusterClient> {
|
pub fn cluster(&self) -> Arc<ClusterClient> {
|
||||||
self.data.lock().cluster.clone()
|
self.data.lock().cluster.clone()
|
||||||
}
|
}
|
||||||
@ -65,7 +64,9 @@ impl AdminSessionsServer for KeyServerImpl {
|
|||||||
fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet<NodeId>) -> Result<(), Error> {
|
fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||||
let servers_set_change_session = self.data.lock().cluster
|
let servers_set_change_session = self.data.lock().cluster
|
||||||
.new_servers_set_change_session(None, new_servers_set, old_set_signature, new_set_signature)?;
|
.new_servers_set_change_session(None, new_servers_set, old_set_signature, new_set_signature)?;
|
||||||
servers_set_change_session.wait().map_err(Into::into)
|
servers_set_change_session.as_servers_set_change()
|
||||||
|
.expect("new_servers_set_change_session creates servers_set_change_session; qed")
|
||||||
|
.wait().map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,6 +204,7 @@ pub mod tests {
|
|||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use std::time;
|
use std::time;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use ethcrypto;
|
use ethcrypto;
|
||||||
@ -218,7 +220,10 @@ pub mod tests {
|
|||||||
use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer};
|
use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer};
|
||||||
use super::KeyServerImpl;
|
use super::KeyServerImpl;
|
||||||
|
|
||||||
pub struct DummyKeyServer;
|
#[derive(Default)]
|
||||||
|
pub struct DummyKeyServer {
|
||||||
|
pub generation_requests_count: AtomicUsize,
|
||||||
|
}
|
||||||
|
|
||||||
impl KeyServer for DummyKeyServer {}
|
impl KeyServer for DummyKeyServer {}
|
||||||
|
|
||||||
@ -230,7 +235,8 @@ pub mod tests {
|
|||||||
|
|
||||||
impl ServerKeyGenerator for DummyKeyServer {
|
impl ServerKeyGenerator for DummyKeyServer {
|
||||||
fn generate_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _threshold: usize) -> Result<Public, Error> {
|
fn generate_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _threshold: usize) -> Result<Public, Error> {
|
||||||
unimplemented!()
|
self.generation_requests_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
Err(Error::Internal("test error".into()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,16 +31,6 @@ use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
|||||||
/// Number of versions sent in single message.
|
/// Number of versions sent in single message.
|
||||||
const VERSIONS_PER_MESSAGE: usize = 32;
|
const VERSIONS_PER_MESSAGE: usize = 32;
|
||||||
|
|
||||||
/// Key version negotiation session API.
|
|
||||||
pub trait Session: Send + Sync + 'static {
|
|
||||||
/// Set continue action.
|
|
||||||
fn set_continue_action(&self, action: ContinueAction);
|
|
||||||
/// Get continue action.
|
|
||||||
fn continue_action(&self) -> Option<ContinueAction>;
|
|
||||||
/// Wait until session is completed.
|
|
||||||
fn wait(&self) -> Result<(H256, NodeId), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Key version negotiation transport.
|
/// Key version negotiation transport.
|
||||||
pub trait SessionTransport {
|
pub trait SessionTransport {
|
||||||
/// Send message to given node.
|
/// Send message to given node.
|
||||||
@ -196,6 +186,21 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
|||||||
.clone())
|
.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set continue action.
|
||||||
|
pub fn set_continue_action(&self, action: ContinueAction) {
|
||||||
|
self.data.lock().continue_with = Some(action);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get continue action.
|
||||||
|
pub fn continue_action(&self) -> Option<ContinueAction> {
|
||||||
|
self.data.lock().continue_with.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wait for session completion.
|
||||||
|
pub fn wait(&self) -> Result<(H256, NodeId), Error> {
|
||||||
|
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
||||||
|
}
|
||||||
|
|
||||||
/// Initialize session.
|
/// Initialize session.
|
||||||
pub fn initialize(&self, connected_nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
pub fn initialize(&self, connected_nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||||
// check state
|
// check state
|
||||||
@ -227,7 +232,7 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
|||||||
// try to complete session
|
// try to complete session
|
||||||
Self::try_complete(&self.core, &mut *data);
|
Self::try_complete(&self.core, &mut *data);
|
||||||
if no_confirmations_required && data.state != SessionState::Finished {
|
if no_confirmations_required && data.state != SessionState::Finished {
|
||||||
return Err(Error::ConsensusUnreachable);
|
return Err(Error::MissingKeyShare);
|
||||||
} else if data.state == SessionState::Finished {
|
} else if data.state == SessionState::Finished {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
@ -355,27 +360,6 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> Session for SessionImpl<T> where T: SessionTransport + Send + Sync + 'static {
|
|
||||||
fn set_continue_action(&self, action: ContinueAction) {
|
|
||||||
self.data.lock().continue_with = Some(action);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn continue_action(&self) -> Option<ContinueAction> {
|
|
||||||
self.data.lock().continue_with.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn wait(&self) -> Result<(H256, NodeId), Error> {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
if !data.result.is_some() {
|
|
||||||
self.core.completed.wait(&mut data);
|
|
||||||
}
|
|
||||||
|
|
||||||
data.result.as_ref()
|
|
||||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
|
||||||
.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> ClusterSession for SessionImpl<T> where T: SessionTransport {
|
impl<T> ClusterSession for SessionImpl<T> where T: SessionTransport {
|
||||||
type Id = SessionIdWithSubSession;
|
type Id = SessionIdWithSubSession;
|
||||||
|
|
||||||
@ -454,6 +438,8 @@ impl FastestResultComputer {
|
|||||||
impl SessionResultComputer for FastestResultComputer {
|
impl SessionResultComputer for FastestResultComputer {
|
||||||
fn compute_result(&self, threshold: Option<usize>, confirmations: &BTreeSet<NodeId>, versions: &BTreeMap<H256, BTreeSet<NodeId>>) -> Option<Result<(H256, NodeId), Error>> {
|
fn compute_result(&self, threshold: Option<usize>, confirmations: &BTreeSet<NodeId>, versions: &BTreeMap<H256, BTreeSet<NodeId>>) -> Option<Result<(H256, NodeId), Error>> {
|
||||||
match self.threshold.or(threshold) {
|
match self.threshold.or(threshold) {
|
||||||
|
// if there's no versions at all && we're not waiting for confirmations anymore
|
||||||
|
_ if confirmations.is_empty() && versions.is_empty() => Some(Err(Error::MissingKeyShare)),
|
||||||
// if we have key share on this node
|
// if we have key share on this node
|
||||||
Some(threshold) => {
|
Some(threshold) => {
|
||||||
// select version this node have, with enough participants
|
// select version this node have, with enough participants
|
||||||
@ -489,6 +475,9 @@ impl SessionResultComputer for LargestSupportResultComputer {
|
|||||||
if !confirmations.is_empty() {
|
if !confirmations.is_empty() {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
if versions.is_empty() {
|
||||||
|
return Some(Err(Error::MissingKeyShare));
|
||||||
|
}
|
||||||
|
|
||||||
versions.iter()
|
versions.iter()
|
||||||
.max_by_key(|&(_, ref n)| n.len())
|
.max_by_key(|&(_, ref n)| n.len())
|
||||||
@ -507,7 +496,8 @@ mod tests {
|
|||||||
use key_server_cluster::cluster::tests::DummyCluster;
|
use key_server_cluster::cluster::tests::DummyCluster;
|
||||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||||
use key_server_cluster::message::{Message, KeyVersionNegotiationMessage, RequestKeyVersions, KeyVersions};
|
use key_server_cluster::message::{Message, KeyVersionNegotiationMessage, RequestKeyVersions, KeyVersions};
|
||||||
use super::{SessionImpl, SessionTransport, SessionParams, FastestResultComputer, SessionState};
|
use super::{SessionImpl, SessionTransport, SessionParams, FastestResultComputer, LargestSupportResultComputer,
|
||||||
|
SessionResultComputer, SessionState};
|
||||||
|
|
||||||
struct DummyTransport {
|
struct DummyTransport {
|
||||||
cluster: Arc<DummyCluster>,
|
cluster: Arc<DummyCluster>,
|
||||||
@ -709,6 +699,7 @@ mod tests {
|
|||||||
nodes.values().nth(0).unwrap().insert(Default::default(), DocumentKeyShare {
|
nodes.values().nth(0).unwrap().insert(Default::default(), DocumentKeyShare {
|
||||||
author: Default::default(),
|
author: Default::default(),
|
||||||
threshold: 1,
|
threshold: 1,
|
||||||
|
public: Default::default(),
|
||||||
common_point: None,
|
common_point: None,
|
||||||
encrypted_point: None,
|
encrypted_point: None,
|
||||||
versions: vec![DocumentKeyShareVersion {
|
versions: vec![DocumentKeyShareVersion {
|
||||||
@ -722,4 +713,19 @@ mod tests {
|
|||||||
// we can't be sure that node has given key version because previous ShareAdd session could fail
|
// we can't be sure that node has given key version because previous ShareAdd session could fail
|
||||||
assert!(ml.session(0).data.lock().state != SessionState::Finished);
|
assert!(ml.session(0).data.lock().state != SessionState::Finished);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn fastest_computer_returns_missing_share_if_no_versions_returned() {
|
||||||
|
let computer = FastestResultComputer {
|
||||||
|
self_node_id: Default::default(),
|
||||||
|
threshold: None,
|
||||||
|
};
|
||||||
|
assert_eq!(computer.compute_result(Some(10), &Default::default(), &Default::default()), Some(Err(Error::MissingKeyShare)));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn largest_computer_returns_missing_share_if_no_versions_returned() {
|
||||||
|
let computer = LargestSupportResultComputer;
|
||||||
|
assert_eq!(computer.compute_result(Some(10), &Default::default(), &Default::default()), Some(Err(Error::MissingKeyShare)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@ use key_server_cluster::share_change_session::{ShareChangeSession, ShareChangeSe
|
|||||||
prepare_share_change_session_plan};
|
prepare_share_change_session_plan};
|
||||||
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl,
|
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl,
|
||||||
SessionParams as KeyVersionNegotiationSessionParams, LargestSupportResultComputer,
|
SessionParams as KeyVersionNegotiationSessionParams, LargestSupportResultComputer,
|
||||||
SessionTransport as KeyVersionNegotiationTransport, Session as KeyVersionNegotiationSession};
|
SessionTransport as KeyVersionNegotiationTransport};
|
||||||
use key_server_cluster::jobs::job_session::JobTransport;
|
use key_server_cluster::jobs::job_session::JobTransport;
|
||||||
use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest};
|
use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest};
|
||||||
use key_server_cluster::jobs::unknown_sessions_job::{UnknownSessionsJob};
|
use key_server_cluster::jobs::unknown_sessions_job::{UnknownSessionsJob};
|
||||||
@ -44,12 +44,6 @@ use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
|||||||
/// Maximal number of active share change sessions.
|
/// Maximal number of active share change sessions.
|
||||||
const MAX_ACTIVE_KEY_SESSIONS: usize = 64;
|
const MAX_ACTIVE_KEY_SESSIONS: usize = 64;
|
||||||
|
|
||||||
/// Servers set change session API.
|
|
||||||
pub trait Session: Send + Sync + 'static {
|
|
||||||
/// Wait until session is completed.
|
|
||||||
fn wait(&self) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Servers set change session.
|
/// Servers set change session.
|
||||||
/// Brief overview:
|
/// Brief overview:
|
||||||
/// 1) consensus establishing
|
/// 1) consensus establishing
|
||||||
@ -211,6 +205,11 @@ impl SessionImpl {
|
|||||||
&self.core.meta.id
|
&self.core.meta.id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Wait for session completion.
|
||||||
|
pub fn wait(&self) -> Result<(), Error> {
|
||||||
|
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
||||||
|
}
|
||||||
|
|
||||||
/// Initialize servers set change session on master node.
|
/// Initialize servers set change session on master node.
|
||||||
pub fn initialize(&self, new_nodes_set: BTreeSet<NodeId>, all_set_signature: Signature, new_set_signature: Signature) -> Result<(), Error> {
|
pub fn initialize(&self, new_nodes_set: BTreeSet<NodeId>, all_set_signature: Signature, new_set_signature: Signature) -> Result<(), Error> {
|
||||||
check_nodes_set(&self.core.all_nodes_set, &new_nodes_set)?;
|
check_nodes_set(&self.core.all_nodes_set, &new_nodes_set)?;
|
||||||
@ -877,18 +876,6 @@ impl SessionImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Session for SessionImpl {
|
|
||||||
fn wait(&self) -> Result<(), Error> {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
if !data.result.is_some() {
|
|
||||||
self.core.completed.wait(&mut data);
|
|
||||||
}
|
|
||||||
|
|
||||||
data.result.clone()
|
|
||||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterSession for SessionImpl {
|
impl ClusterSession for SessionImpl {
|
||||||
type Id = SessionId;
|
type Id = SessionId;
|
||||||
|
|
||||||
|
@ -32,12 +32,6 @@ use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAc
|
|||||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||||
|
|
||||||
/// Share addition session API.
|
|
||||||
pub trait Session: Send + Sync + 'static {
|
|
||||||
/// Wait until session is completed.
|
|
||||||
fn wait(&self) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Share addition session transport.
|
/// Share addition session transport.
|
||||||
pub trait SessionTransport: Clone + JobTransport<PartialJobRequest=ServersSetChangeAccessRequest, PartialJobResponse=bool> {
|
pub trait SessionTransport: Clone + JobTransport<PartialJobRequest=ServersSetChangeAccessRequest, PartialJobResponse=bool> {
|
||||||
/// Get all connected nodes. Since ShareAdd session requires all cluster nodes to be connected, this set equals to all known cluster nodes set.
|
/// Get all connected nodes. Since ShareAdd session requires all cluster nodes to be connected, this set equals to all known cluster nodes set.
|
||||||
@ -92,14 +86,8 @@ struct SessionData<T: SessionTransport> {
|
|||||||
pub version: Option<H256>,
|
pub version: Option<H256>,
|
||||||
/// Consensus session.
|
/// Consensus session.
|
||||||
pub consensus_session: Option<ShareAddChangeConsensusSession<T>>,
|
pub consensus_session: Option<ShareAddChangeConsensusSession<T>>,
|
||||||
/// NewKeyShare: threshold.
|
/// NewKeyShare (for nodes being added).
|
||||||
pub key_share_threshold: Option<usize>,
|
pub new_key_share: Option<NewKeyShare>,
|
||||||
/// NewKeyShare: author.
|
|
||||||
pub key_share_author: Option<Public>,
|
|
||||||
/// NewKeyShare: Common (shared) encryption point.
|
|
||||||
pub key_share_common_point: Option<Public>,
|
|
||||||
/// NewKeyShare: Encrypted point.
|
|
||||||
pub key_share_encrypted_point: Option<Public>,
|
|
||||||
/// Nodes id numbers.
|
/// Nodes id numbers.
|
||||||
pub id_numbers: Option<BTreeMap<NodeId, Option<Secret>>>,
|
pub id_numbers: Option<BTreeMap<NodeId, Option<Secret>>>,
|
||||||
/// Secret subshares received from nodes.
|
/// Secret subshares received from nodes.
|
||||||
@ -108,6 +96,20 @@ struct SessionData<T: SessionTransport> {
|
|||||||
pub result: Option<Result<(), Error>>,
|
pub result: Option<Result<(), Error>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// New key share.
|
||||||
|
struct NewKeyShare {
|
||||||
|
/// NewKeyShare: threshold.
|
||||||
|
pub threshold: usize,
|
||||||
|
/// NewKeyShare: author.
|
||||||
|
pub author: Public,
|
||||||
|
/// NewKeyShare: joint public.
|
||||||
|
pub joint_public: Public,
|
||||||
|
/// NewKeyShare: Common (shared) encryption point.
|
||||||
|
pub common_point: Option<Public>,
|
||||||
|
/// NewKeyShare: Encrypted point.
|
||||||
|
pub encrypted_point: Option<Public>,
|
||||||
|
}
|
||||||
|
|
||||||
/// Session state.
|
/// Session state.
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
enum SessionState {
|
enum SessionState {
|
||||||
@ -171,10 +173,7 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
|||||||
state: SessionState::ConsensusEstablishing,
|
state: SessionState::ConsensusEstablishing,
|
||||||
version: None,
|
version: None,
|
||||||
consensus_session: None,
|
consensus_session: None,
|
||||||
key_share_threshold: None,
|
new_key_share: None,
|
||||||
key_share_author: None,
|
|
||||||
key_share_common_point: None,
|
|
||||||
key_share_encrypted_point: None,
|
|
||||||
id_numbers: None,
|
id_numbers: None,
|
||||||
secret_subshares: None,
|
secret_subshares: None,
|
||||||
result: None,
|
result: None,
|
||||||
@ -430,7 +429,7 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// we only expect this message once
|
// we only expect this message once
|
||||||
if data.key_share_threshold.is_some() || data.key_share_author.is_some() || data.key_share_common_point.is_some() || data.key_share_encrypted_point.is_some() {
|
if data.new_key_share.is_some() {
|
||||||
return Err(Error::InvalidStateForRequest);
|
return Err(Error::InvalidStateForRequest);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -445,10 +444,13 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
|||||||
|
|
||||||
// update data
|
// update data
|
||||||
data.state = SessionState::WaitingForKeysDissemination;
|
data.state = SessionState::WaitingForKeysDissemination;
|
||||||
data.key_share_threshold = Some(message.threshold);
|
data.new_key_share = Some(NewKeyShare {
|
||||||
data.key_share_author = Some(message.author.clone().into());
|
threshold: message.threshold,
|
||||||
data.key_share_common_point = message.common_point.clone().map(Into::into);
|
author: message.author.clone().into(),
|
||||||
data.key_share_encrypted_point = message.encrypted_point.clone().map(Into::into);
|
joint_public: message.joint_public.clone().into(),
|
||||||
|
common_point: message.common_point.clone().map(Into::into),
|
||||||
|
encrypted_point: message.encrypted_point.clone().map(Into::into),
|
||||||
|
});
|
||||||
|
|
||||||
let id_numbers = data.id_numbers.as_mut()
|
let id_numbers = data.id_numbers.as_mut()
|
||||||
.expect("common key share data is expected after initialization; id_numers are filled during initialization; qed");
|
.expect("common key share data is expected after initialization; id_numers are filled during initialization; qed");
|
||||||
@ -619,6 +621,7 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
|||||||
session_nonce: core.nonce,
|
session_nonce: core.nonce,
|
||||||
threshold: old_key_share.threshold,
|
threshold: old_key_share.threshold,
|
||||||
author: old_key_share.author.clone().into(),
|
author: old_key_share.author.clone().into(),
|
||||||
|
joint_public: old_key_share.public.clone().into(),
|
||||||
common_point: old_key_share.common_point.clone().map(Into::into),
|
common_point: old_key_share.common_point.clone().map(Into::into),
|
||||||
encrypted_point: old_key_share.encrypted_point.clone().map(Into::into),
|
encrypted_point: old_key_share.encrypted_point.clone().map(Into::into),
|
||||||
id_numbers: old_key_version.id_numbers.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(),
|
id_numbers: old_key_version.id_numbers.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(),
|
||||||
@ -666,8 +669,9 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
|||||||
let id_numbers = data.id_numbers.as_ref().expect(explanation);
|
let id_numbers = data.id_numbers.as_ref().expect(explanation);
|
||||||
let secret_subshares = data.secret_subshares.as_ref().expect(explanation);
|
let secret_subshares = data.secret_subshares.as_ref().expect(explanation);
|
||||||
let threshold = core.key_share.as_ref().map(|ks| ks.threshold)
|
let threshold = core.key_share.as_ref().map(|ks| ks.threshold)
|
||||||
.unwrap_or_else(|| *data.key_share_threshold.as_ref()
|
.unwrap_or_else(|| data.new_key_share.as_ref()
|
||||||
.expect("computation occurs after receiving key share threshold if not having one already; qed"));
|
.expect("computation occurs after receiving key share threshold if not having one already; qed")
|
||||||
|
.threshold);
|
||||||
|
|
||||||
let explanation = "id_numbers are checked to have Some value for every consensus group node when consensus is establishe; qed";
|
let explanation = "id_numbers are checked to have Some value for every consensus group node when consensus is establishe; qed";
|
||||||
let sender_id_number = id_numbers[sender].as_ref().expect(explanation);
|
let sender_id_number = id_numbers[sender].as_ref().expect(explanation);
|
||||||
@ -693,14 +697,17 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
|||||||
let refreshed_key_version = DocumentKeyShareVersion::new(id_numbers.clone().into_iter().map(|(k, v)| (k.clone(),
|
let refreshed_key_version = DocumentKeyShareVersion::new(id_numbers.clone().into_iter().map(|(k, v)| (k.clone(),
|
||||||
v.expect("id_numbers are checked to have Some value for every consensus group node when consensus is establishe; qed"))).collect(),
|
v.expect("id_numbers are checked to have Some value for every consensus group node when consensus is establishe; qed"))).collect(),
|
||||||
secret_share);
|
secret_share);
|
||||||
let mut refreshed_key_share = core.key_share.as_ref().cloned().unwrap_or_else(|| DocumentKeyShare {
|
let mut refreshed_key_share = core.key_share.as_ref().cloned().unwrap_or_else(|| {
|
||||||
author: data.key_share_author.clone()
|
let new_key_share = data.new_key_share.as_ref()
|
||||||
.expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed"),
|
.expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed");
|
||||||
threshold: data.key_share_threshold.clone()
|
DocumentKeyShare {
|
||||||
.expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed"),
|
author: new_key_share.author.clone(),
|
||||||
common_point: data.key_share_common_point.clone(),
|
threshold: new_key_share.threshold,
|
||||||
encrypted_point: data.key_share_encrypted_point.clone(),
|
public: new_key_share.joint_public.clone(),
|
||||||
|
common_point: new_key_share.common_point.clone(),
|
||||||
|
encrypted_point: new_key_share.encrypted_point.clone(),
|
||||||
versions: Vec::new(),
|
versions: Vec::new(),
|
||||||
|
}
|
||||||
});
|
});
|
||||||
refreshed_key_share.versions.push(refreshed_key_version);
|
refreshed_key_share.versions.push(refreshed_key_version);
|
||||||
|
|
||||||
@ -721,18 +728,6 @@ impl<T> SessionImpl<T> where T: SessionTransport {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> Session for SessionImpl<T> where T: SessionTransport + Send + Sync + 'static {
|
|
||||||
fn wait(&self) -> Result<(), Error> {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
if !data.result.is_some() {
|
|
||||||
self.core.completed.wait(&mut data);
|
|
||||||
}
|
|
||||||
|
|
||||||
data.result.clone()
|
|
||||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> ClusterSession for SessionImpl<T> where T: SessionTransport {
|
impl<T> ClusterSession for SessionImpl<T> where T: SessionTransport {
|
||||||
type Id = SessionId;
|
type Id = SessionId;
|
||||||
|
|
||||||
|
@ -30,12 +30,6 @@ use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
|||||||
use key_server_cluster::jobs::decryption_job::{PartialDecryptionRequest, PartialDecryptionResponse, DecryptionJob};
|
use key_server_cluster::jobs::decryption_job::{PartialDecryptionRequest, PartialDecryptionResponse, DecryptionJob};
|
||||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||||
|
|
||||||
/// Decryption session API.
|
|
||||||
pub trait Session: Send + Sync + 'static {
|
|
||||||
/// Wait until session is completed. Returns distributely restored secret key.
|
|
||||||
fn wait(&self) -> Result<EncryptedDocumentKeyShadow, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Distributed decryption session.
|
/// Distributed decryption session.
|
||||||
/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper:
|
/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper:
|
||||||
/// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf
|
/// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf
|
||||||
@ -206,6 +200,11 @@ impl SessionImpl {
|
|||||||
self.data.lock().result.clone()
|
self.data.lock().result.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Wait for session completion.
|
||||||
|
pub fn wait(&self) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||||
|
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
||||||
|
}
|
||||||
|
|
||||||
/// Delegate session to other node.
|
/// Delegate session to other node.
|
||||||
pub fn delegate(&self, master: NodeId, version: H256, is_shadow_decryption: bool) -> Result<(), Error> {
|
pub fn delegate(&self, master: NodeId, version: H256, is_shadow_decryption: bool) -> Result<(), Error> {
|
||||||
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
||||||
@ -555,19 +554,6 @@ impl ClusterSession for SessionImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Session for SessionImpl {
|
|
||||||
fn wait(&self) -> Result<EncryptedDocumentKeyShadow, Error> {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
if !data.result.is_some() {
|
|
||||||
self.core.completed.wait(&mut data);
|
|
||||||
}
|
|
||||||
|
|
||||||
data.result.as_ref()
|
|
||||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
|
||||||
.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SessionCore {
|
impl SessionCore {
|
||||||
pub fn decryption_transport(&self) -> DecryptionJobTransport {
|
pub fn decryption_transport(&self) -> DecryptionJobTransport {
|
||||||
DecryptionJobTransport {
|
DecryptionJobTransport {
|
||||||
@ -692,6 +678,7 @@ mod tests {
|
|||||||
let encrypted_datas: Vec<_> = (0..5).map(|i| DocumentKeyShare {
|
let encrypted_datas: Vec<_> = (0..5).map(|i| DocumentKeyShare {
|
||||||
author: Public::default(),
|
author: Public::default(),
|
||||||
threshold: 3,
|
threshold: 3,
|
||||||
|
public: Default::default(),
|
||||||
common_point: Some(common_point.clone()),
|
common_point: Some(common_point.clone()),
|
||||||
encrypted_point: Some(encrypted_point.clone()),
|
encrypted_point: Some(encrypted_point.clone()),
|
||||||
versions: vec![DocumentKeyShareVersion {
|
versions: vec![DocumentKeyShareVersion {
|
||||||
@ -763,6 +750,7 @@ mod tests {
|
|||||||
key_share: Some(DocumentKeyShare {
|
key_share: Some(DocumentKeyShare {
|
||||||
author: Public::default(),
|
author: Public::default(),
|
||||||
threshold: 0,
|
threshold: 0,
|
||||||
|
public: Default::default(),
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
versions: vec![DocumentKeyShareVersion {
|
versions: vec![DocumentKeyShareVersion {
|
||||||
@ -816,6 +804,7 @@ mod tests {
|
|||||||
key_share: Some(DocumentKeyShare {
|
key_share: Some(DocumentKeyShare {
|
||||||
author: Public::default(),
|
author: Public::default(),
|
||||||
threshold: 2,
|
threshold: 2,
|
||||||
|
public: Default::default(),
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
versions: vec![DocumentKeyShareVersion {
|
versions: vec![DocumentKeyShareVersion {
|
||||||
|
@ -26,14 +26,6 @@ use key_server_cluster::cluster_sessions::ClusterSession;
|
|||||||
use key_server_cluster::message::{Message, EncryptionMessage, InitializeEncryptionSession,
|
use key_server_cluster::message::{Message, EncryptionMessage, InitializeEncryptionSession,
|
||||||
ConfirmEncryptionInitialization, EncryptionSessionError};
|
ConfirmEncryptionInitialization, EncryptionSessionError};
|
||||||
|
|
||||||
/// Encryption session API.
|
|
||||||
pub trait Session: Send + Sync + 'static {
|
|
||||||
/// Get encryption session state.
|
|
||||||
fn state(&self) -> SessionState;
|
|
||||||
/// Wait until session is completed. Returns distributely generated secret key.
|
|
||||||
fn wait(&self, timeout: Option<time::Duration>) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Encryption (distributed key generation) session.
|
/// Encryption (distributed key generation) session.
|
||||||
/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper:
|
/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper:
|
||||||
/// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf
|
/// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf
|
||||||
@ -138,6 +130,12 @@ impl SessionImpl {
|
|||||||
&self.self_node_id
|
&self.self_node_id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Wait for session completion.
|
||||||
|
pub fn wait(&self, timeout: Option<time::Duration>) -> Result<(), Error> {
|
||||||
|
Self::wait_session(&self.completed, &self.data, timeout, |data| data.result.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Start new session initialization. This must be called on master node.
|
/// Start new session initialization. This must be called on master node.
|
||||||
pub fn initialize(&self, requestor_signature: Signature, common_point: Public, encrypted_point: Public) -> Result<(), Error> {
|
pub fn initialize(&self, requestor_signature: Signature, common_point: Public, encrypted_point: Public) -> Result<(), Error> {
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
@ -328,26 +326,6 @@ impl ClusterSession for SessionImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Session for SessionImpl {
|
|
||||||
fn state(&self) -> SessionState {
|
|
||||||
self.data.lock().state.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn wait(&self, timeout: Option<time::Duration>) -> Result<(), Error> {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
if !data.result.is_some() {
|
|
||||||
match timeout {
|
|
||||||
None => self.completed.wait(&mut data),
|
|
||||||
Some(timeout) => { self.completed.wait_for(&mut data, timeout); },
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
data.result.as_ref()
|
|
||||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
|
||||||
.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Debug for SessionImpl {
|
impl Debug for SessionImpl {
|
||||||
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
|
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
|
||||||
write!(f, "Encryption session {} on {}", self.id, self.self_node_id)
|
write!(f, "Encryption session {} on {}", self.id, self.self_node_id)
|
||||||
|
@ -27,16 +27,6 @@ use key_server_cluster::cluster_sessions::ClusterSession;
|
|||||||
use key_server_cluster::message::{Message, GenerationMessage, InitializeSession, ConfirmInitialization, CompleteInitialization,
|
use key_server_cluster::message::{Message, GenerationMessage, InitializeSession, ConfirmInitialization, CompleteInitialization,
|
||||||
KeysDissemination, PublicKeyShare, SessionError, SessionCompleted};
|
KeysDissemination, PublicKeyShare, SessionError, SessionCompleted};
|
||||||
|
|
||||||
/// Key generation session API.
|
|
||||||
pub trait Session: Send + Sync + 'static {
|
|
||||||
/// Get generation session state.
|
|
||||||
fn state(&self) -> SessionState;
|
|
||||||
/// Wait until session is completed. Returns public portion of generated server key.
|
|
||||||
fn wait(&self, timeout: Option<time::Duration>) -> Result<Public, Error>;
|
|
||||||
/// Get joint public key (if it is known).
|
|
||||||
fn joint_public_and_secret(&self) -> Option<Result<(Public, Secret), Error>>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Distributed key generation session.
|
/// Distributed key generation session.
|
||||||
/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper:
|
/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper:
|
||||||
/// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf
|
/// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf
|
||||||
@ -226,6 +216,22 @@ impl SessionImpl {
|
|||||||
self.data.lock().simulate_faulty_behaviour = true;
|
self.data.lock().simulate_faulty_behaviour = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get session state.
|
||||||
|
pub fn state(&self) -> SessionState {
|
||||||
|
self.data.lock().state.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wait for session completion.
|
||||||
|
pub fn wait(&self, timeout: Option<time::Duration>) -> Result<Public, Error> {
|
||||||
|
Self::wait_session(&self.completed, &self.data, timeout, |data| data.joint_public_and_secret.clone()
|
||||||
|
.map(|r| r.map(|r| r.0.clone())))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get generated public and secret (if any).
|
||||||
|
pub fn joint_public_and_secret(&self) -> Option<Result<(Public, Secret), Error>> {
|
||||||
|
self.data.lock().joint_public_and_secret.clone()
|
||||||
|
}
|
||||||
|
|
||||||
/// Start new session initialization. This must be called on master node.
|
/// Start new session initialization. This must be called on master node.
|
||||||
pub fn initialize(&self, author: Public, threshold: usize, nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
pub fn initialize(&self, author: Public, threshold: usize, nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||||
check_cluster_nodes(self.node(), &nodes)?;
|
check_cluster_nodes(self.node(), &nodes)?;
|
||||||
@ -502,10 +508,17 @@ impl SessionImpl {
|
|||||||
return Err(Error::InvalidMessage);
|
return Err(Error::InvalidMessage);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// calculate joint public key
|
||||||
|
let joint_public = {
|
||||||
|
let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed"));
|
||||||
|
math::compute_joint_public(public_shares)?
|
||||||
|
};
|
||||||
|
|
||||||
// save encrypted data to key storage
|
// save encrypted data to key storage
|
||||||
let encrypted_data = DocumentKeyShare {
|
let encrypted_data = DocumentKeyShare {
|
||||||
author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(),
|
author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(),
|
||||||
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
||||||
|
public: joint_public,
|
||||||
common_point: None,
|
common_point: None,
|
||||||
encrypted_point: None,
|
encrypted_point: None,
|
||||||
versions: vec![DocumentKeyShareVersion::new(
|
versions: vec![DocumentKeyShareVersion::new(
|
||||||
@ -662,7 +675,7 @@ impl SessionImpl {
|
|||||||
fn complete_generation(&self) -> Result<(), Error> {
|
fn complete_generation(&self) -> Result<(), Error> {
|
||||||
let mut data = self.data.lock();
|
let mut data = self.data.lock();
|
||||||
|
|
||||||
// else - calculate joint public key
|
// calculate joint public key
|
||||||
let joint_public = {
|
let joint_public = {
|
||||||
let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed"));
|
let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed"));
|
||||||
math::compute_joint_public(public_shares)?
|
math::compute_joint_public(public_shares)?
|
||||||
@ -672,6 +685,7 @@ impl SessionImpl {
|
|||||||
let encrypted_data = DocumentKeyShare {
|
let encrypted_data = DocumentKeyShare {
|
||||||
author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(),
|
author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(),
|
||||||
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
||||||
|
public: joint_public.clone(),
|
||||||
common_point: None,
|
common_point: None,
|
||||||
encrypted_point: None,
|
encrypted_point: None,
|
||||||
versions: vec![DocumentKeyShareVersion::new(
|
versions: vec![DocumentKeyShareVersion::new(
|
||||||
@ -782,30 +796,6 @@ impl ClusterSession for SessionImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Session for SessionImpl {
|
|
||||||
fn state(&self) -> SessionState {
|
|
||||||
self.data.lock().state.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn wait(&self, timeout: Option<time::Duration>) -> Result<Public, Error> {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
if !data.joint_public_and_secret.is_some() {
|
|
||||||
match timeout {
|
|
||||||
None => self.completed.wait(&mut data),
|
|
||||||
Some(timeout) => { self.completed.wait_for(&mut data, timeout); },
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
data.joint_public_and_secret.clone()
|
|
||||||
.expect("checked above or waited for completed; completed is only signaled when joint_public.is_some(); qed")
|
|
||||||
.map(|p| p.0)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn joint_public_and_secret(&self) -> Option<Result<(Public, Secret), Error>> {
|
|
||||||
self.data.lock().joint_public_and_secret.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EveryOtherNodeVisitor {
|
impl EveryOtherNodeVisitor {
|
||||||
pub fn new<I>(self_id: &NodeId, nodes: I) -> Self where I: Iterator<Item=NodeId> {
|
pub fn new<I>(self_id: &NodeId, nodes: I) -> Self where I: Iterator<Item=NodeId> {
|
||||||
EveryOtherNodeVisitor {
|
EveryOtherNodeVisitor {
|
||||||
@ -883,7 +873,7 @@ pub mod tests {
|
|||||||
use key_server_cluster::message::{self, Message, GenerationMessage};
|
use key_server_cluster::message::{self, Message, GenerationMessage};
|
||||||
use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established};
|
use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established};
|
||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||||
use key_server_cluster::generation_session::{Session, SessionImpl, SessionState, SessionParams};
|
use key_server_cluster::generation_session::{SessionImpl, SessionState, SessionParams};
|
||||||
use key_server_cluster::math;
|
use key_server_cluster::math;
|
||||||
use key_server_cluster::math::tests::do_encryption_and_decryption;
|
use key_server_cluster::math::tests::do_encryption_and_decryption;
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ use key_server_cluster::{Error, NodeId, SessionId, SessionMeta, AclStorage, Docu
|
|||||||
use key_server_cluster::cluster::{Cluster};
|
use key_server_cluster::cluster::{Cluster};
|
||||||
use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession};
|
use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession};
|
||||||
use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionParams as GenerationSessionParams,
|
use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionParams as GenerationSessionParams,
|
||||||
Session as GenerationSessionApi, SessionState as GenerationSessionState};
|
SessionState as GenerationSessionState};
|
||||||
use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessage, SigningGenerationMessage,
|
use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessage, SigningGenerationMessage,
|
||||||
RequestPartialSignature, PartialSignature, SigningSessionCompleted, GenerationMessage, ConsensusMessage, SigningSessionError,
|
RequestPartialSignature, PartialSignature, SigningSessionCompleted, GenerationMessage, ConsensusMessage, SigningSessionError,
|
||||||
InitializeConsensusSession, ConfirmConsensusInitialization, SigningSessionDelegation, SigningSessionDelegationCompleted};
|
InitializeConsensusSession, ConfirmConsensusInitialization, SigningSessionDelegation, SigningSessionDelegationCompleted};
|
||||||
@ -32,12 +32,6 @@ use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
|||||||
use key_server_cluster::jobs::signing_job::{PartialSigningRequest, PartialSigningResponse, SigningJob};
|
use key_server_cluster::jobs::signing_job::{PartialSigningRequest, PartialSigningResponse, SigningJob};
|
||||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||||
|
|
||||||
/// Signing session API.
|
|
||||||
pub trait Session: Send + Sync + 'static {
|
|
||||||
/// Wait until session is completed. Returns signed message.
|
|
||||||
fn wait(&self) -> Result<(Secret, Secret), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Distributed signing session.
|
/// Distributed signing session.
|
||||||
/// Based on "Efficient Multi-Party Digital Signature using Adaptive Secret Sharing for Low-Power Devices in Wireless Network" paper.
|
/// Based on "Efficient Multi-Party Digital Signature using Adaptive Secret Sharing for Low-Power Devices in Wireless Network" paper.
|
||||||
/// Brief overview:
|
/// Brief overview:
|
||||||
@ -211,6 +205,11 @@ impl SessionImpl {
|
|||||||
self.data.lock().state
|
self.data.lock().state
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Wait for session completion.
|
||||||
|
pub fn wait(&self) -> Result<(Secret, Secret), Error> {
|
||||||
|
Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone())
|
||||||
|
}
|
||||||
|
|
||||||
/// Delegate session to other node.
|
/// Delegate session to other node.
|
||||||
pub fn delegate(&self, master: NodeId, version: H256, message_hash: H256) -> Result<(), Error> {
|
pub fn delegate(&self, master: NodeId, version: H256, message_hash: H256) -> Result<(), Error> {
|
||||||
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
||||||
@ -680,19 +679,6 @@ impl ClusterSession for SessionImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Session for SessionImpl {
|
|
||||||
fn wait(&self) -> Result<(Secret, Secret), Error> {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
if !data.result.is_some() {
|
|
||||||
self.core.completed.wait(&mut data);
|
|
||||||
}
|
|
||||||
|
|
||||||
data.result.as_ref()
|
|
||||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
|
||||||
.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SessionKeyGenerationTransport {
|
impl SessionKeyGenerationTransport {
|
||||||
fn map_message(&self, message: Message) -> Result<Message, Error> {
|
fn map_message(&self, message: Message) -> Result<Message, Error> {
|
||||||
match message {
|
match message {
|
||||||
@ -819,12 +805,11 @@ mod tests {
|
|||||||
use key_server_cluster::{NodeId, DummyKeyStorage, DocumentKeyShare, DocumentKeyShareVersion, SessionId, SessionMeta, Error, KeyStorage};
|
use key_server_cluster::{NodeId, DummyKeyStorage, DocumentKeyShare, DocumentKeyShareVersion, SessionId, SessionMeta, Error, KeyStorage};
|
||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||||
use key_server_cluster::cluster::tests::DummyCluster;
|
use key_server_cluster::cluster::tests::DummyCluster;
|
||||||
use key_server_cluster::generation_session::{Session as GenerationSession};
|
|
||||||
use key_server_cluster::generation_session::tests::MessageLoop as KeyGenerationMessageLoop;
|
use key_server_cluster::generation_session::tests::MessageLoop as KeyGenerationMessageLoop;
|
||||||
use key_server_cluster::math;
|
use key_server_cluster::math;
|
||||||
use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessage, ConsensusMessage, ConfirmConsensusInitialization,
|
use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessage, ConsensusMessage, ConfirmConsensusInitialization,
|
||||||
SigningGenerationMessage, GenerationMessage, ConfirmInitialization, InitializeSession, RequestPartialSignature};
|
SigningGenerationMessage, GenerationMessage, ConfirmInitialization, InitializeSession, RequestPartialSignature};
|
||||||
use key_server_cluster::signing_session::{Session, SessionImpl, SessionState, SessionParams};
|
use key_server_cluster::signing_session::{SessionImpl, SessionState, SessionParams};
|
||||||
|
|
||||||
struct Node {
|
struct Node {
|
||||||
pub node_id: NodeId,
|
pub node_id: NodeId,
|
||||||
@ -986,6 +971,7 @@ mod tests {
|
|||||||
key_share: Some(DocumentKeyShare {
|
key_share: Some(DocumentKeyShare {
|
||||||
author: Public::default(),
|
author: Public::default(),
|
||||||
threshold: 0,
|
threshold: 0,
|
||||||
|
public: Default::default(),
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
versions: vec![DocumentKeyShareVersion {
|
versions: vec![DocumentKeyShareVersion {
|
||||||
@ -1039,6 +1025,7 @@ mod tests {
|
|||||||
key_share: Some(DocumentKeyShare {
|
key_share: Some(DocumentKeyShare {
|
||||||
author: Public::default(),
|
author: Public::default(),
|
||||||
threshold: 2,
|
threshold: 2,
|
||||||
|
public: Default::default(),
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
versions: vec![DocumentKeyShareVersion {
|
versions: vec![DocumentKeyShareVersion {
|
||||||
|
@ -29,18 +29,15 @@ use tokio_core::net::{TcpListener, TcpStream};
|
|||||||
use ethkey::{Public, KeyPair, Signature, Random, Generator};
|
use ethkey::{Public, KeyPair, Signature, Random, Generator};
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair};
|
use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair};
|
||||||
use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, GenerationSessionWrapper, EncryptionSessionWrapper,
|
use key_server_cluster::cluster_sessions::{ClusterSession, AdminSession, ClusterSessions, SessionIdWithSubSession,
|
||||||
DecryptionSessionWrapper, SigningSessionWrapper, AdminSessionWrapper, KeyNegotiationSessionWrapper, SessionIdWithSubSession,
|
ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, AdminSessionCreationData, ClusterSessionsListener};
|
||||||
ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, AdminSessionCreationData};
|
|
||||||
use key_server_cluster::cluster_sessions_creator::{ClusterSessionCreator, IntoSessionId};
|
use key_server_cluster::cluster_sessions_creator::{ClusterSessionCreator, IntoSessionId};
|
||||||
use key_server_cluster::message::{self, Message, ClusterMessage};
|
use key_server_cluster::message::{self, Message, ClusterMessage};
|
||||||
use key_server_cluster::generation_session::{Session as GenerationSession};
|
use key_server_cluster::generation_session::{SessionImpl as GenerationSession};
|
||||||
#[cfg(test)]
|
use key_server_cluster::decryption_session::{SessionImpl as DecryptionSession};
|
||||||
use key_server_cluster::generation_session::SessionImpl as GenerationSessionImpl;
|
use key_server_cluster::encryption_session::{SessionImpl as EncryptionSession};
|
||||||
use key_server_cluster::decryption_session::{Session as DecryptionSession};
|
use key_server_cluster::signing_session::{SessionImpl as SigningSession};
|
||||||
use key_server_cluster::encryption_session::{Session as EncryptionSession};
|
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession,
|
||||||
use key_server_cluster::signing_session::{Session as SigningSession};
|
|
||||||
use key_server_cluster::key_version_negotiation_session::{Session as KeyVersionNegotiationSession, SessionImpl as KeyVersionNegotiationSessionImpl,
|
|
||||||
IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction};
|
IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction};
|
||||||
use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, read_encrypted_message, WriteMessage, write_encrypted_message};
|
use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, read_encrypted_message, WriteMessage, write_encrypted_message};
|
||||||
use key_server_cluster::net::{accept_connection as net_accept_connection, connect as net_connect, Connection as NetConnection};
|
use key_server_cluster::net::{accept_connection as net_accept_connection, connect as net_connect, Connection as NetConnection};
|
||||||
@ -74,16 +71,19 @@ pub trait ClusterClient: Send + Sync {
|
|||||||
/// Start new signing session.
|
/// Start new signing session.
|
||||||
fn new_signing_session(&self, session_id: SessionId, requestor_signature: Signature, version: Option<H256>, message_hash: H256) -> Result<Arc<SigningSession>, Error>;
|
fn new_signing_session(&self, session_id: SessionId, requestor_signature: Signature, version: Option<H256>, message_hash: H256) -> Result<Arc<SigningSession>, Error>;
|
||||||
/// Start new key version negotiation session.
|
/// Start new key version negotiation session.
|
||||||
fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSession>, Error>;
|
fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>, Error>;
|
||||||
/// Start new servers set change session.
|
/// Start new servers set change session.
|
||||||
fn new_servers_set_change_session(&self, session_id: Option<SessionId>, new_nodes_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSessionWrapper>, Error>;
|
fn new_servers_set_change_session(&self, session_id: Option<SessionId>, new_nodes_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSession>, Error>;
|
||||||
|
|
||||||
|
/// Listen for new generation sessions.
|
||||||
|
fn add_generation_listener(&self, listener: Arc<ClusterSessionsListener<GenerationSession>>);
|
||||||
|
|
||||||
/// Ask node to make 'faulty' generation sessions.
|
/// Ask node to make 'faulty' generation sessions.
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn make_faulty_generation_sessions(&self);
|
fn make_faulty_generation_sessions(&self);
|
||||||
/// Get active generation session with given id.
|
/// Get active generation session with given id.
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn generation_session(&self, session_id: &SessionId) -> Option<Arc<GenerationSessionImpl>>;
|
fn generation_session(&self, session_id: &SessionId) -> Option<Arc<GenerationSession>>;
|
||||||
/// Try connect to disconnected nodes.
|
/// Try connect to disconnected nodes.
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn connect(&self);
|
fn connect(&self);
|
||||||
@ -446,14 +446,14 @@ impl ClusterCore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Try to contnue session.
|
/// Try to contnue session.
|
||||||
fn try_continue_session(data: &Arc<ClusterData>, session: Option<Arc<KeyVersionNegotiationSessionImpl<KeyVersionNegotiationSessionTransport>>>) {
|
fn try_continue_session(data: &Arc<ClusterData>, session: Option<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>>) {
|
||||||
if let Some(session) = session {
|
if let Some(session) = session {
|
||||||
let meta = session.meta();
|
let meta = session.meta();
|
||||||
let is_master_node = meta.self_node_id == meta.master_node_id;
|
let is_master_node = meta.self_node_id == meta.master_node_id;
|
||||||
if is_master_node && session.is_finished() {
|
if is_master_node && session.is_finished() {
|
||||||
data.sessions.negotiation_sessions.remove(&session.id());
|
data.sessions.negotiation_sessions.remove(&session.id());
|
||||||
if let Ok((version, master)) = session.wait() {
|
match session.wait() {
|
||||||
match session.continue_action() {
|
Ok((version, master)) => match session.continue_action() {
|
||||||
Some(ContinueAction::Decrypt(session, is_shadow_decryption)) => {
|
Some(ContinueAction::Decrypt(session, is_shadow_decryption)) => {
|
||||||
let initialization_error = if data.self_key_pair.public() == &master {
|
let initialization_error = if data.self_key_pair.public() == &master {
|
||||||
session.initialize(version, is_shadow_decryption)
|
session.initialize(version, is_shadow_decryption)
|
||||||
@ -479,19 +479,18 @@ impl ClusterCore {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
None => (),
|
None => (),
|
||||||
}
|
},
|
||||||
} else {
|
Err(error) => match session.continue_action() {
|
||||||
match session.continue_action() {
|
|
||||||
Some(ContinueAction::Decrypt(session, _)) => {
|
Some(ContinueAction::Decrypt(session, _)) => {
|
||||||
data.sessions.decryption_sessions.remove(&session.id());
|
data.sessions.decryption_sessions.remove(&session.id());
|
||||||
session.on_session_error(&meta.self_node_id, Error::ConsensusUnreachable);
|
session.on_session_error(&meta.self_node_id, error);
|
||||||
},
|
},
|
||||||
Some(ContinueAction::Sign(session, _)) => {
|
Some(ContinueAction::Sign(session, _)) => {
|
||||||
data.sessions.signing_sessions.remove(&session.id());
|
data.sessions.signing_sessions.remove(&session.id());
|
||||||
session.on_session_error(&meta.self_node_id, Error::ConsensusUnreachable);
|
session.on_session_error(&meta.self_node_id, error);
|
||||||
},
|
},
|
||||||
None => (),
|
None => (),
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -741,11 +740,6 @@ impl ClusterData {
|
|||||||
self.connections.get(node)
|
self.connections.get(node)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get sessions reference.
|
|
||||||
pub fn sessions(&self) -> &ClusterSessions {
|
|
||||||
&self.sessions
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Spawns a future using thread pool and schedules execution of it with event loop handle.
|
/// Spawns a future using thread pool and schedules execution of it with event loop handle.
|
||||||
pub fn spawn<F>(&self, f: F) where F: Future + Send + 'static, F::Item: Send + 'static, F::Error: Send + 'static {
|
pub fn spawn<F>(&self, f: F) where F: Future + Send + 'static, F::Item: Send + 'static, F::Error: Send + 'static {
|
||||||
let pool_work = self.pool.spawn(f);
|
let pool_work = self.pool.spawn(f);
|
||||||
@ -842,7 +836,7 @@ impl ClusterClientImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_key_version_negotiation_session(&self, session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSessionImpl<KeyVersionNegotiationSessionTransport>>, Error> {
|
fn create_key_version_negotiation_session(&self, session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>, Error> {
|
||||||
let mut connected_nodes = self.data.connections.connected_nodes();
|
let mut connected_nodes = self.data.connections.connected_nodes();
|
||||||
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||||
|
|
||||||
@ -872,7 +866,7 @@ impl ClusterClient for ClusterClientImpl {
|
|||||||
let cluster = create_cluster_view(&self.data, true)?;
|
let cluster = create_cluster_view(&self.data, true)?;
|
||||||
let session = self.data.sessions.generation_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?;
|
let session = self.data.sessions.generation_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?;
|
||||||
match session.initialize(author, threshold, connected_nodes) {
|
match session.initialize(author, threshold, connected_nodes) {
|
||||||
Ok(()) => Ok(GenerationSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)),
|
Ok(()) => Ok(session),
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
self.data.sessions.generation_sessions.remove(&session.id());
|
self.data.sessions.generation_sessions.remove(&session.id());
|
||||||
Err(error)
|
Err(error)
|
||||||
@ -887,7 +881,7 @@ impl ClusterClient for ClusterClientImpl {
|
|||||||
let cluster = create_cluster_view(&self.data, true)?;
|
let cluster = create_cluster_view(&self.data, true)?;
|
||||||
let session = self.data.sessions.encryption_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?;
|
let session = self.data.sessions.encryption_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?;
|
||||||
match session.initialize(requestor_signature, common_point, encrypted_point) {
|
match session.initialize(requestor_signature, common_point, encrypted_point) {
|
||||||
Ok(()) => Ok(EncryptionSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)),
|
Ok(()) => Ok(session),
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
self.data.sessions.encryption_sessions.remove(&session.id());
|
self.data.sessions.encryption_sessions.remove(&session.id());
|
||||||
Err(error)
|
Err(error)
|
||||||
@ -916,7 +910,7 @@ impl ClusterClient for ClusterClientImpl {
|
|||||||
};
|
};
|
||||||
|
|
||||||
match initialization_result {
|
match initialization_result {
|
||||||
Ok(()) => Ok(DecryptionSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)),
|
Ok(()) => Ok(session),
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
self.data.sessions.decryption_sessions.remove(&session.id());
|
self.data.sessions.decryption_sessions.remove(&session.id());
|
||||||
Err(error)
|
Err(error)
|
||||||
@ -945,7 +939,7 @@ impl ClusterClient for ClusterClientImpl {
|
|||||||
};
|
};
|
||||||
|
|
||||||
match initialization_result {
|
match initialization_result {
|
||||||
Ok(()) => Ok(SigningSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)),
|
Ok(()) => Ok(session),
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
self.data.sessions.signing_sessions.remove(&session.id());
|
self.data.sessions.signing_sessions.remove(&session.id());
|
||||||
Err(error)
|
Err(error)
|
||||||
@ -953,12 +947,12 @@ impl ClusterClient for ClusterClientImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSession>, Error> {
|
fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>, Error> {
|
||||||
let session = self.create_key_version_negotiation_session(session_id)?;
|
let session = self.create_key_version_negotiation_session(session_id)?;
|
||||||
Ok(KeyNegotiationSessionWrapper::new(Arc::downgrade(&self.data), session.id(), session))
|
Ok(session)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_servers_set_change_session(&self, session_id: Option<SessionId>, new_nodes_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSessionWrapper>, Error> {
|
fn new_servers_set_change_session(&self, session_id: Option<SessionId>, new_nodes_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Result<Arc<AdminSession>, Error> {
|
||||||
let mut connected_nodes = self.data.connections.connected_nodes();
|
let mut connected_nodes = self.data.connections.connected_nodes();
|
||||||
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||||
|
|
||||||
@ -974,7 +968,7 @@ impl ClusterClient for ClusterClientImpl {
|
|||||||
.initialize(new_nodes_set, old_set_signature, new_set_signature);
|
.initialize(new_nodes_set, old_set_signature, new_set_signature);
|
||||||
|
|
||||||
match initialization_result {
|
match initialization_result {
|
||||||
Ok(()) => Ok(AdminSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)),
|
Ok(()) => Ok(session),
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
self.data.sessions.admin_sessions.remove(&session.id());
|
self.data.sessions.admin_sessions.remove(&session.id());
|
||||||
Err(error)
|
Err(error)
|
||||||
@ -982,6 +976,10 @@ impl ClusterClient for ClusterClientImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn add_generation_listener(&self, listener: Arc<ClusterSessionsListener<GenerationSession>>) {
|
||||||
|
self.data.sessions.generation_sessions.add_listener(listener);
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn connect(&self) {
|
fn connect(&self) {
|
||||||
ClusterCore::connect_disconnected_nodes(self.data.clone());
|
ClusterCore::connect_disconnected_nodes(self.data.clone());
|
||||||
@ -993,7 +991,7 @@ impl ClusterClient for ClusterClientImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn generation_session(&self, session_id: &SessionId) -> Option<Arc<GenerationSessionImpl>> {
|
fn generation_session(&self, session_id: &SessionId) -> Option<Arc<GenerationSession>> {
|
||||||
self.data.sessions.generation_sessions.get(session_id, false)
|
self.data.sessions.generation_sessions.get(session_id, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1015,12 +1013,21 @@ pub mod tests {
|
|||||||
use std::collections::{BTreeSet, VecDeque};
|
use std::collections::{BTreeSet, VecDeque};
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use tokio_core::reactor::Core;
|
use tokio_core::reactor::Core;
|
||||||
use ethkey::{Random, Generator, Public, sign};
|
use bigint::hash::H256;
|
||||||
use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair};
|
use ethkey::{Random, Generator, Public, Signature, sign};
|
||||||
|
use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair, KeyStorage};
|
||||||
use key_server_cluster::message::Message;
|
use key_server_cluster::message::Message;
|
||||||
use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration};
|
use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration, ClusterClient, ClusterState};
|
||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
use key_server_cluster::cluster_sessions::{ClusterSession, AdminSession, ClusterSessionsListener};
|
||||||
use key_server_cluster::generation_session::{Session as GenerationSession, SessionState as GenerationSessionState};
|
use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionState as GenerationSessionState};
|
||||||
|
use key_server_cluster::decryption_session::{SessionImpl as DecryptionSession};
|
||||||
|
use key_server_cluster::encryption_session::{SessionImpl as EncryptionSession};
|
||||||
|
use key_server_cluster::signing_session::{SessionImpl as SigningSession};
|
||||||
|
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession,
|
||||||
|
IsolatedSessionTransport as KeyVersionNegotiationSessionTransport};
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct DummyClusterClient;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct DummyCluster {
|
pub struct DummyCluster {
|
||||||
@ -1034,6 +1041,23 @@ pub mod tests {
|
|||||||
messages: VecDeque<(NodeId, Message)>,
|
messages: VecDeque<(NodeId, Message)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl ClusterClient for DummyClusterClient {
|
||||||
|
fn cluster_state(&self) -> ClusterState { unimplemented!() }
|
||||||
|
fn new_generation_session(&self, _session_id: SessionId, _author: Public, _threshold: usize) -> Result<Arc<GenerationSession>, Error> { unimplemented!() }
|
||||||
|
fn new_encryption_session(&self, _session_id: SessionId, _requestor_signature: Signature, _common_point: Public, _encrypted_point: Public) -> Result<Arc<EncryptionSession>, Error> { unimplemented!() }
|
||||||
|
fn new_decryption_session(&self, _session_id: SessionId, _requestor_signature: Signature, _version: Option<H256>, _is_shadow_decryption: bool) -> Result<Arc<DecryptionSession>, Error> { unimplemented!() }
|
||||||
|
fn new_signing_session(&self, _session_id: SessionId, _requestor_signature: Signature, _version: Option<H256>, _message_hash: H256) -> Result<Arc<SigningSession>, Error> { unimplemented!() }
|
||||||
|
fn new_key_version_negotiation_session(&self, _session_id: SessionId) -> Result<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>, Error> { unimplemented!() }
|
||||||
|
fn new_servers_set_change_session(&self, _session_id: Option<SessionId>, _new_nodes_set: BTreeSet<NodeId>, _old_set_signature: Signature, _new_set_signature: Signature) -> Result<Arc<AdminSession>, Error> { unimplemented!() }
|
||||||
|
|
||||||
|
fn add_generation_listener(&self, _listener: Arc<ClusterSessionsListener<GenerationSession>>) {}
|
||||||
|
|
||||||
|
fn make_faulty_generation_sessions(&self) { unimplemented!() }
|
||||||
|
fn generation_session(&self, _session_id: &SessionId) -> Option<Arc<GenerationSession>> { unimplemented!() }
|
||||||
|
fn connect(&self) { unimplemented!() }
|
||||||
|
fn key_storage(&self) -> Arc<KeyStorage> { unimplemented!() }
|
||||||
|
}
|
||||||
|
|
||||||
impl DummyCluster {
|
impl DummyCluster {
|
||||||
pub fn new(id: NodeId) -> Self {
|
pub fn new(id: NodeId) -> Self {
|
||||||
DummyCluster {
|
DummyCluster {
|
||||||
|
@ -18,23 +18,20 @@ use std::time;
|
|||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::collections::{VecDeque, BTreeMap};
|
use std::collections::{VecDeque, BTreeMap};
|
||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock, Condvar};
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use ethkey::{Public, Secret, Signature};
|
use ethkey::{Secret, Signature};
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, EncryptedDocumentKeyShadow};
|
use key_server_cluster::{Error, NodeId, SessionId};
|
||||||
use key_server_cluster::cluster::{Cluster, ClusterData, ClusterConfiguration, ClusterView};
|
use key_server_cluster::cluster::{Cluster, ClusterData, ClusterConfiguration, ClusterView};
|
||||||
use key_server_cluster::message::{self, Message};
|
use key_server_cluster::message::{self, Message};
|
||||||
use key_server_cluster::generation_session::{Session as GenerationSession, SessionImpl as GenerationSessionImpl,
|
use key_server_cluster::generation_session::{SessionImpl as GenerationSessionImpl};
|
||||||
SessionState as GenerationSessionState};
|
use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl};
|
||||||
use key_server_cluster::decryption_session::{Session as DecryptionSession, SessionImpl as DecryptionSessionImpl};
|
use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl};
|
||||||
use key_server_cluster::encryption_session::{Session as EncryptionSession, SessionImpl as EncryptionSessionImpl,
|
use key_server_cluster::signing_session::{SessionImpl as SigningSessionImpl};
|
||||||
SessionState as EncryptionSessionState};
|
use key_server_cluster::share_add_session::{SessionImpl as ShareAddSessionImpl, IsolatedSessionTransport as ShareAddTransport};
|
||||||
use key_server_cluster::signing_session::{Session as SigningSession, SessionImpl as SigningSessionImpl};
|
use key_server_cluster::servers_set_change_session::{SessionImpl as ServersSetChangeSessionImpl};
|
||||||
use key_server_cluster::share_add_session::{Session as ShareAddSession, SessionImpl as ShareAddSessionImpl,
|
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl,
|
||||||
IsolatedSessionTransport as ShareAddTransport};
|
IsolatedSessionTransport as VersionNegotiationTransport};
|
||||||
use key_server_cluster::servers_set_change_session::{Session as ServersSetChangeSession, SessionImpl as ServersSetChangeSessionImpl};
|
|
||||||
use key_server_cluster::key_version_negotiation_session::{Session as KeyVersionNegotiationSession, SessionImpl as KeyVersionNegotiationSessionImpl,
|
|
||||||
IsolatedSessionTransport as VersionNegotiationTransport, ContinueAction};
|
|
||||||
|
|
||||||
use key_server_cluster::cluster_sessions_creator::{GenerationSessionCreator, EncryptionSessionCreator, DecryptionSessionCreator, SigningSessionCreator,
|
use key_server_cluster::cluster_sessions_creator::{GenerationSessionCreator, EncryptionSessionCreator, DecryptionSessionCreator, SigningSessionCreator,
|
||||||
KeyVersionNegotiationSessionCreator, AdminSessionCreator, SessionCreatorCore, ClusterSessionCreator};
|
KeyVersionNegotiationSessionCreator, AdminSessionCreator, SessionCreatorCore, ClusterSessionCreator};
|
||||||
@ -82,6 +79,25 @@ pub trait ClusterSession {
|
|||||||
fn on_session_error(&self, sender: &NodeId, error: Error);
|
fn on_session_error(&self, sender: &NodeId, error: Error);
|
||||||
/// Process session message.
|
/// Process session message.
|
||||||
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error>;
|
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error>;
|
||||||
|
|
||||||
|
/// 'Wait for session completion' helper.
|
||||||
|
fn wait_session<T, U, F: Fn(&U) -> Option<Result<T, Error>>>(completion_event: &Condvar, session_data: &Mutex<U>, timeout: Option<time::Duration>, result_reader: F) -> Result<T, Error> {
|
||||||
|
let mut locked_data = session_data.lock();
|
||||||
|
match result_reader(&locked_data) {
|
||||||
|
Some(result) => result,
|
||||||
|
None => {
|
||||||
|
match timeout {
|
||||||
|
None => completion_event.wait(&mut locked_data),
|
||||||
|
Some(timeout) => {
|
||||||
|
completion_event.wait_for(&mut locked_data, timeout);
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result_reader(&locked_data)
|
||||||
|
.expect("waited for completion; completion is only signaled when result.is_some(); qed")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Administrative session.
|
/// Administrative session.
|
||||||
@ -120,12 +136,22 @@ pub struct ClusterSessions {
|
|||||||
creator_core: Arc<SessionCreatorCore>,
|
creator_core: Arc<SessionCreatorCore>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Active sessions container listener.
|
||||||
|
pub trait ClusterSessionsListener<S: ClusterSession>: Send + Sync {
|
||||||
|
/// When new session is inserted to the container.
|
||||||
|
fn on_session_inserted(&self, _session: Arc<S>) {}
|
||||||
|
/// When session is removed from the container.
|
||||||
|
fn on_session_removed(&self, _session: Arc<S>) {}
|
||||||
|
}
|
||||||
|
|
||||||
/// Active sessions container.
|
/// Active sessions container.
|
||||||
pub struct ClusterSessionsContainer<S: ClusterSession, SC: ClusterSessionCreator<S, D>, D> {
|
pub struct ClusterSessionsContainer<S: ClusterSession, SC: ClusterSessionCreator<S, D>, D> {
|
||||||
/// Sessions creator.
|
/// Sessions creator.
|
||||||
pub creator: SC,
|
pub creator: SC,
|
||||||
/// Active sessions.
|
/// Active sessions.
|
||||||
sessions: RwLock<BTreeMap<S::Id, QueuedSession<S>>>,
|
sessions: RwLock<BTreeMap<S::Id, QueuedSession<S>>>,
|
||||||
|
/// Listeners. Lock order: sessions -> listeners.
|
||||||
|
listeners: Mutex<Vec<Weak<ClusterSessionsListener<S>>>>,
|
||||||
/// Sessions container state.
|
/// Sessions container state.
|
||||||
container_state: Arc<Mutex<ClusterSessionsContainerState>>,
|
container_state: Arc<Mutex<ClusterSessionsContainerState>>,
|
||||||
/// Phantom data.
|
/// Phantom data.
|
||||||
@ -159,66 +185,6 @@ pub enum ClusterSessionsContainerState {
|
|||||||
Exclusive,
|
Exclusive,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generation session implementation, which removes session from cluster on drop.
|
|
||||||
pub struct GenerationSessionWrapper {
|
|
||||||
/// Wrapped session.
|
|
||||||
session: Arc<GenerationSession>,
|
|
||||||
/// Session Id.
|
|
||||||
session_id: SessionId,
|
|
||||||
/// Cluster data reference.
|
|
||||||
cluster: Weak<ClusterData>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Encryption session implementation, which removes session from cluster on drop.
|
|
||||||
pub struct EncryptionSessionWrapper {
|
|
||||||
/// Wrapped session.
|
|
||||||
session: Arc<EncryptionSession>,
|
|
||||||
/// Session Id.
|
|
||||||
session_id: SessionId,
|
|
||||||
/// Cluster data reference.
|
|
||||||
cluster: Weak<ClusterData>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Decryption session implementation, which removes session from cluster on drop.
|
|
||||||
pub struct DecryptionSessionWrapper {
|
|
||||||
/// Wrapped session.
|
|
||||||
session: Arc<DecryptionSession>,
|
|
||||||
/// Session Id.
|
|
||||||
session_id: SessionIdWithSubSession,
|
|
||||||
/// Cluster data reference.
|
|
||||||
cluster: Weak<ClusterData>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Signing session implementation, which removes session from cluster on drop.
|
|
||||||
pub struct SigningSessionWrapper {
|
|
||||||
/// Wrapped session.
|
|
||||||
session: Arc<SigningSession>,
|
|
||||||
/// Session Id.
|
|
||||||
session_id: SessionIdWithSubSession,
|
|
||||||
/// Cluster data reference.
|
|
||||||
cluster: Weak<ClusterData>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Admin session implementation, which removes session from cluster on drop.
|
|
||||||
pub struct AdminSessionWrapper {
|
|
||||||
/// Wrapped session.
|
|
||||||
session: Arc<AdminSession>,
|
|
||||||
/// Session Id.
|
|
||||||
session_id: SessionId,
|
|
||||||
/// Cluster data reference.
|
|
||||||
cluster: Weak<ClusterData>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Key server version negotiation session implementation, which removes session from cluster on drop.
|
|
||||||
pub struct KeyNegotiationSessionWrapper {
|
|
||||||
/// Wrapped session.
|
|
||||||
session: Arc<KeyVersionNegotiationSession>,
|
|
||||||
/// Session Id.
|
|
||||||
session_id: SessionIdWithSubSession,
|
|
||||||
/// Cluster data reference.
|
|
||||||
cluster: Weak<ClusterData>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterSessions {
|
impl ClusterSessions {
|
||||||
/// Create new cluster sessions container.
|
/// Create new cluster sessions container.
|
||||||
pub fn new(config: &ClusterConfiguration) -> Self {
|
pub fn new(config: &ClusterConfiguration) -> Self {
|
||||||
@ -294,11 +260,16 @@ impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: C
|
|||||||
ClusterSessionsContainer {
|
ClusterSessionsContainer {
|
||||||
creator: creator,
|
creator: creator,
|
||||||
sessions: RwLock::new(BTreeMap::new()),
|
sessions: RwLock::new(BTreeMap::new()),
|
||||||
|
listeners: Mutex::new(Vec::new()),
|
||||||
container_state: container_state,
|
container_state: container_state,
|
||||||
_pd: Default::default(),
|
_pd: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn add_listener(&self, listener: Arc<ClusterSessionsListener<S>>) {
|
||||||
|
self.listeners.lock().push(Arc::downgrade(&listener));
|
||||||
|
}
|
||||||
|
|
||||||
pub fn is_empty(&self) -> bool {
|
pub fn is_empty(&self) -> bool {
|
||||||
self.sessions.read().is_empty()
|
self.sessions.read().is_empty()
|
||||||
}
|
}
|
||||||
@ -342,12 +313,15 @@ impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: C
|
|||||||
queue: VecDeque::new(),
|
queue: VecDeque::new(),
|
||||||
};
|
};
|
||||||
sessions.insert(session_id, queued_session);
|
sessions.insert(session_id, queued_session);
|
||||||
|
self.notify_listeners(|l| l.on_session_inserted(session.clone()));
|
||||||
|
|
||||||
Ok(session)
|
Ok(session)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn remove(&self, session_id: &S::Id) {
|
pub fn remove(&self, session_id: &S::Id) {
|
||||||
if self.sessions.write().remove(session_id).is_some() {
|
if let Some(session) = self.sessions.write().remove(session_id) {
|
||||||
self.container_state.lock().on_session_completed();
|
self.container_state.lock().on_session_completed();
|
||||||
|
self.notify_listeners(|l| l.on_session_removed(session.session.clone()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -394,6 +368,22 @@ impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: C
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn notify_listeners<F: Fn(&ClusterSessionsListener<S>) -> ()>(&self, callback: F) {
|
||||||
|
let mut listeners = self.listeners.lock();
|
||||||
|
let mut listener_index = 0;
|
||||||
|
while listener_index < listeners.len() {
|
||||||
|
match listeners[listener_index].upgrade() {
|
||||||
|
Some(listener) => {
|
||||||
|
callback(&*listener);
|
||||||
|
listener_index += 1;
|
||||||
|
},
|
||||||
|
None => {
|
||||||
|
listeners.swap_remove(listener_index);
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: ClusterSessionCreator<S, D>, SessionId: From<S::Id> {
|
impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: ClusterSessionCreator<S, D>, SessionId: From<S::Id> {
|
||||||
@ -545,158 +535,6 @@ impl ClusterSession for AdminSession {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GenerationSessionWrapper {
|
|
||||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionId, session: Arc<GenerationSession>) -> Arc<Self> {
|
|
||||||
Arc::new(GenerationSessionWrapper {
|
|
||||||
session: session,
|
|
||||||
session_id: session_id,
|
|
||||||
cluster: cluster,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GenerationSession for GenerationSessionWrapper {
|
|
||||||
fn state(&self) -> GenerationSessionState {
|
|
||||||
self.session.state()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn wait(&self, timeout: Option<time::Duration>) -> Result<Public, Error> {
|
|
||||||
self.session.wait(timeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn joint_public_and_secret(&self) -> Option<Result<(Public, Secret), Error>> {
|
|
||||||
self.session.joint_public_and_secret()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for GenerationSessionWrapper {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if let Some(cluster) = self.cluster.upgrade() {
|
|
||||||
cluster.sessions().generation_sessions.remove(&self.session_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EncryptionSessionWrapper {
|
|
||||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionId, session: Arc<EncryptionSession>) -> Arc<Self> {
|
|
||||||
Arc::new(EncryptionSessionWrapper {
|
|
||||||
session: session,
|
|
||||||
session_id: session_id,
|
|
||||||
cluster: cluster,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EncryptionSession for EncryptionSessionWrapper {
|
|
||||||
fn state(&self) -> EncryptionSessionState {
|
|
||||||
self.session.state()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn wait(&self, timeout: Option<time::Duration>) -> Result<(), Error> {
|
|
||||||
self.session.wait(timeout)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for EncryptionSessionWrapper {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if let Some(cluster) = self.cluster.upgrade() {
|
|
||||||
cluster.sessions().encryption_sessions.remove(&self.session_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DecryptionSessionWrapper {
|
|
||||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionIdWithSubSession, session: Arc<DecryptionSession>) -> Arc<Self> {
|
|
||||||
Arc::new(DecryptionSessionWrapper {
|
|
||||||
session: session,
|
|
||||||
session_id: session_id,
|
|
||||||
cluster: cluster,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DecryptionSession for DecryptionSessionWrapper {
|
|
||||||
fn wait(&self) -> Result<EncryptedDocumentKeyShadow, Error> {
|
|
||||||
self.session.wait()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for DecryptionSessionWrapper {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if let Some(cluster) = self.cluster.upgrade() {
|
|
||||||
cluster.sessions().decryption_sessions.remove(&self.session_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SigningSessionWrapper {
|
|
||||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionIdWithSubSession, session: Arc<SigningSession>) -> Arc<Self> {
|
|
||||||
Arc::new(SigningSessionWrapper {
|
|
||||||
session: session,
|
|
||||||
session_id: session_id,
|
|
||||||
cluster: cluster,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SigningSession for SigningSessionWrapper {
|
|
||||||
fn wait(&self) -> Result<(Secret, Secret), Error> {
|
|
||||||
self.session.wait()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for SigningSessionWrapper {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if let Some(cluster) = self.cluster.upgrade() {
|
|
||||||
cluster.sessions().signing_sessions.remove(&self.session_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AdminSessionWrapper {
|
|
||||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionId, session: Arc<AdminSession>) -> Arc<Self> {
|
|
||||||
Arc::new(AdminSessionWrapper {
|
|
||||||
session: session,
|
|
||||||
session_id: session_id,
|
|
||||||
cluster: cluster,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn wait(&self) -> Result<(), Error> {
|
|
||||||
match *self.session {
|
|
||||||
AdminSession::ShareAdd(ref session) => session.wait(),
|
|
||||||
AdminSession::ServersSetChange(ref session) => session.wait(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ShareAddSession for AdminSessionWrapper {
|
|
||||||
fn wait(&self) -> Result<(), Error> {
|
|
||||||
match *self.session {
|
|
||||||
AdminSession::ShareAdd(ref session) => session.wait(),
|
|
||||||
_ => Err(Error::InvalidMessage),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServersSetChangeSession for AdminSessionWrapper {
|
|
||||||
fn wait(&self) -> Result<(), Error> {
|
|
||||||
match *self.session {
|
|
||||||
AdminSession::ServersSetChange(ref session) => session.wait(),
|
|
||||||
_ => Err(Error::InvalidMessage),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for AdminSessionWrapper {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if let Some(cluster) = self.cluster.upgrade() {
|
|
||||||
cluster.sessions().admin_sessions.remove(&self.session_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn create_cluster_view(data: &Arc<ClusterData>, requires_all_connections: bool) -> Result<Arc<Cluster>, Error> {
|
pub fn create_cluster_view(data: &Arc<ClusterData>, requires_all_connections: bool) -> Result<Arc<Cluster>, Error> {
|
||||||
if requires_all_connections {
|
if requires_all_connections {
|
||||||
if !data.connections.disconnected_nodes().is_empty() {
|
if !data.connections.disconnected_nodes().is_empty() {
|
||||||
@ -710,39 +548,6 @@ pub fn create_cluster_view(data: &Arc<ClusterData>, requires_all_connections: bo
|
|||||||
Ok(Arc::new(ClusterView::new(data.clone(), connected_nodes)))
|
Ok(Arc::new(ClusterView::new(data.clone(), connected_nodes)))
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyNegotiationSessionWrapper {
|
|
||||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionIdWithSubSession, session: Arc<KeyVersionNegotiationSession>) -> Arc<Self> {
|
|
||||||
Arc::new(KeyNegotiationSessionWrapper {
|
|
||||||
session: session,
|
|
||||||
session_id: session_id,
|
|
||||||
cluster: cluster,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyVersionNegotiationSession for KeyNegotiationSessionWrapper {
|
|
||||||
fn set_continue_action(&self, action: ContinueAction) {
|
|
||||||
self.session.set_continue_action(action)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn continue_action(&self) -> Option<ContinueAction> {
|
|
||||||
self.session.continue_action()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn wait(&self) -> Result<(H256, NodeId), Error> {
|
|
||||||
self.session.wait()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for KeyNegotiationSessionWrapper {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if let Some(cluster) = self.cluster.upgrade() {
|
|
||||||
cluster.sessions().negotiation_sessions.remove(&self.session_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -773,6 +773,8 @@ pub struct KeyShareCommon {
|
|||||||
pub threshold: usize,
|
pub threshold: usize,
|
||||||
/// Author of key share entry.
|
/// Author of key share entry.
|
||||||
pub author: SerializablePublic,
|
pub author: SerializablePublic,
|
||||||
|
/// Joint public.
|
||||||
|
pub joint_public: SerializablePublic,
|
||||||
/// Common (shared) encryption point.
|
/// Common (shared) encryption point.
|
||||||
pub common_point: Option<SerializablePublic>,
|
pub common_point: Option<SerializablePublic>,
|
||||||
/// Encrypted point.
|
/// Encrypted point.
|
||||||
|
@ -27,9 +27,9 @@ pub use super::key_storage::{KeyStorage, DocumentKeyShare, DocumentKeyShareVersi
|
|||||||
pub use super::key_server_set::KeyServerSet;
|
pub use super::key_server_set::KeyServerSet;
|
||||||
pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic, SerializableMessageHash};
|
pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic, SerializableMessageHash};
|
||||||
pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient};
|
pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient};
|
||||||
pub use self::generation_session::Session as GenerationSession;
|
pub use self::cluster_sessions::{ClusterSession, ClusterSessionsListener};
|
||||||
pub use self::encryption_session::Session as EncryptionSession;
|
#[cfg(test)]
|
||||||
pub use self::decryption_session::Session as DecryptionSession;
|
pub use self::cluster::tests::DummyClusterClient;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub use super::node_key_pair::PlainNodeKeyPair;
|
pub use super::node_key_pair::PlainNodeKeyPair;
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::Arc;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use futures::{future, Future};
|
use futures::{future, Future};
|
||||||
@ -27,6 +27,7 @@ use bigint::hash::H256;
|
|||||||
use util::Address;
|
use util::Address;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use types::all::{Error, Public, NodeAddress};
|
use types::all::{Error, Public, NodeAddress};
|
||||||
|
use trusted_client::TrustedClient;
|
||||||
|
|
||||||
const KEY_SERVER_SET_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_server_set";
|
const KEY_SERVER_SET_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_server_set";
|
||||||
|
|
||||||
@ -55,7 +56,7 @@ pub struct OnChainKeyServerSet {
|
|||||||
/// Cached on-chain Key Server set contract.
|
/// Cached on-chain Key Server set contract.
|
||||||
struct CachedContract {
|
struct CachedContract {
|
||||||
/// Blockchain client.
|
/// Blockchain client.
|
||||||
client: Weak<Client>,
|
client: TrustedClient,
|
||||||
/// Contract address.
|
/// Contract address.
|
||||||
contract_addr: Option<Address>,
|
contract_addr: Option<Address>,
|
||||||
/// Active set of key servers.
|
/// Active set of key servers.
|
||||||
@ -63,19 +64,14 @@ struct CachedContract {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl OnChainKeyServerSet {
|
impl OnChainKeyServerSet {
|
||||||
pub fn new(client: &Arc<Client>, key_servers: BTreeMap<Public, NodeAddress>) -> Result<Arc<Self>, Error> {
|
pub fn new(trusted_client: TrustedClient, key_servers: BTreeMap<Public, NodeAddress>) -> Result<Arc<Self>, Error> {
|
||||||
let mut cached_contract = CachedContract::new(client, key_servers)?;
|
let client = trusted_client.get_untrusted();
|
||||||
let key_server_contract_address = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned());
|
|
||||||
// only initialize from contract if it is installed. otherwise - use default nodes
|
|
||||||
// once the contract is installed, all default nodes are lost (if not in the contract' set)
|
|
||||||
if key_server_contract_address.is_some() {
|
|
||||||
cached_contract.read_from_registry(&*client, key_server_contract_address);
|
|
||||||
}
|
|
||||||
|
|
||||||
let key_server_set = Arc::new(OnChainKeyServerSet {
|
let key_server_set = Arc::new(OnChainKeyServerSet {
|
||||||
contract: Mutex::new(cached_contract),
|
contract: Mutex::new(CachedContract::new(trusted_client, key_servers)?),
|
||||||
});
|
});
|
||||||
client.add_notify(key_server_set.clone());
|
client
|
||||||
|
.ok_or(Error::Internal("Constructing OnChainKeyServerSet without active Client".into()))?
|
||||||
|
.add_notify(key_server_set.clone());
|
||||||
Ok(key_server_set)
|
Ok(key_server_set)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -95,9 +91,9 @@ impl ChainNotify for OnChainKeyServerSet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CachedContract {
|
impl CachedContract {
|
||||||
pub fn new(client: &Arc<Client>, key_servers: BTreeMap<Public, NodeAddress>) -> Result<Self, Error> {
|
pub fn new(client: TrustedClient, key_servers: BTreeMap<Public, NodeAddress>) -> Result<Self, Error> {
|
||||||
Ok(CachedContract {
|
let mut cached_contract = CachedContract {
|
||||||
client: Arc::downgrade(client),
|
client: client,
|
||||||
contract_addr: None,
|
contract_addr: None,
|
||||||
key_servers: key_servers.into_iter()
|
key_servers: key_servers.into_iter()
|
||||||
.map(|(p, addr)| {
|
.map(|(p, addr)| {
|
||||||
@ -106,11 +102,22 @@ impl CachedContract {
|
|||||||
Ok((p, addr))
|
Ok((p, addr))
|
||||||
})
|
})
|
||||||
.collect::<Result<BTreeMap<_, _>, Error>>()?,
|
.collect::<Result<BTreeMap<_, _>, Error>>()?,
|
||||||
})
|
};
|
||||||
|
|
||||||
|
if let Some(client) = cached_contract.client.get() {
|
||||||
|
let key_server_contract_address = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned());
|
||||||
|
// only initialize from contract if it is installed. otherwise - use default nodes
|
||||||
|
// once the contract is installed, all default nodes are lost (if not in the contract' set)
|
||||||
|
if key_server_contract_address.is_some() {
|
||||||
|
cached_contract.read_from_registry(&*client, key_server_contract_address);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(cached_contract)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update(&mut self, enacted: Vec<H256>, retracted: Vec<H256>) {
|
pub fn update(&mut self, enacted: Vec<H256>, retracted: Vec<H256>) {
|
||||||
if let Some(client) = self.client.upgrade() {
|
if let Some(client) = self.client.get() {
|
||||||
let new_contract_addr = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned());
|
let new_contract_addr = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned());
|
||||||
|
|
||||||
// new contract installed => read nodes set from the contract
|
// new contract installed => read nodes set from the contract
|
||||||
|
@ -35,11 +35,14 @@ type CurrentSerializableDocumentKeyVersion = SerializableDocumentKeyShareVersion
|
|||||||
|
|
||||||
/// Encrypted key share, stored by key storage on the single key server.
|
/// Encrypted key share, stored by key storage on the single key server.
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
#[cfg_attr(test, derive(Default))]
|
||||||
pub struct DocumentKeyShare {
|
pub struct DocumentKeyShare {
|
||||||
/// Author of the entry.
|
/// Author of the entry.
|
||||||
pub author: Public,
|
pub author: Public,
|
||||||
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
||||||
pub threshold: usize,
|
pub threshold: usize,
|
||||||
|
/// Server public key.
|
||||||
|
pub public: Public,
|
||||||
/// Common (shared) encryption point.
|
/// Common (shared) encryption point.
|
||||||
pub common_point: Option<Public>,
|
pub common_point: Option<Public>,
|
||||||
/// Encrypted point.
|
/// Encrypted point.
|
||||||
@ -122,10 +125,12 @@ struct SerializableDocumentKeyShareV1 {
|
|||||||
/// V2 of encrypted key share, as it is stored by key storage on the single key server.
|
/// V2 of encrypted key share, as it is stored by key storage on the single key server.
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
struct SerializableDocumentKeyShareV2 {
|
struct SerializableDocumentKeyShareV2 {
|
||||||
/// Authore of the entry.
|
/// Author of the entry.
|
||||||
pub author: SerializablePublic,
|
pub author: SerializablePublic,
|
||||||
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
||||||
pub threshold: usize,
|
pub threshold: usize,
|
||||||
|
/// Server public.
|
||||||
|
pub public: SerializablePublic,
|
||||||
/// Common (shared) encryption point.
|
/// Common (shared) encryption point.
|
||||||
pub common_point: Option<SerializablePublic>,
|
pub common_point: Option<SerializablePublic>,
|
||||||
/// Encrypted point.
|
/// Encrypted point.
|
||||||
@ -174,6 +179,7 @@ fn upgrade_db(db: Database) -> Result<Database, Error> {
|
|||||||
// in v0 there have been only simultaneous GenEnc sessions.
|
// in v0 there have been only simultaneous GenEnc sessions.
|
||||||
author: Public::default().into(), // added in v1
|
author: Public::default().into(), // added in v1
|
||||||
threshold: v0_key.threshold,
|
threshold: v0_key.threshold,
|
||||||
|
public: Public::default().into(), // addded in v2
|
||||||
common_point: Some(v0_key.common_point),
|
common_point: Some(v0_key.common_point),
|
||||||
encrypted_point: Some(v0_key.encrypted_point),
|
encrypted_point: Some(v0_key.encrypted_point),
|
||||||
versions: vec![CurrentSerializableDocumentKeyVersion {
|
versions: vec![CurrentSerializableDocumentKeyVersion {
|
||||||
@ -196,6 +202,7 @@ fn upgrade_db(db: Database) -> Result<Database, Error> {
|
|||||||
let current_key = CurrentSerializableDocumentKeyShare {
|
let current_key = CurrentSerializableDocumentKeyShare {
|
||||||
author: v1_key.author, // added in v1
|
author: v1_key.author, // added in v1
|
||||||
threshold: v1_key.threshold,
|
threshold: v1_key.threshold,
|
||||||
|
public: Public::default().into(), // addded in v2
|
||||||
common_point: v1_key.common_point,
|
common_point: v1_key.common_point,
|
||||||
encrypted_point: v1_key.encrypted_point,
|
encrypted_point: v1_key.encrypted_point,
|
||||||
versions: vec![CurrentSerializableDocumentKeyVersion {
|
versions: vec![CurrentSerializableDocumentKeyVersion {
|
||||||
@ -329,6 +336,7 @@ impl From<DocumentKeyShare> for SerializableDocumentKeyShareV2 {
|
|||||||
SerializableDocumentKeyShareV2 {
|
SerializableDocumentKeyShareV2 {
|
||||||
author: key.author.into(),
|
author: key.author.into(),
|
||||||
threshold: key.threshold,
|
threshold: key.threshold,
|
||||||
|
public: key.public.into(),
|
||||||
common_point: key.common_point.map(Into::into),
|
common_point: key.common_point.map(Into::into),
|
||||||
encrypted_point: key.encrypted_point.map(Into::into),
|
encrypted_point: key.encrypted_point.map(Into::into),
|
||||||
versions: key.versions.into_iter().map(Into::into).collect(),
|
versions: key.versions.into_iter().map(Into::into).collect(),
|
||||||
@ -351,6 +359,7 @@ impl From<SerializableDocumentKeyShareV2> for DocumentKeyShare {
|
|||||||
DocumentKeyShare {
|
DocumentKeyShare {
|
||||||
author: key.author.into(),
|
author: key.author.into(),
|
||||||
threshold: key.threshold,
|
threshold: key.threshold,
|
||||||
|
public: key.public.into(),
|
||||||
common_point: key.common_point.map(Into::into),
|
common_point: key.common_point.map(Into::into),
|
||||||
encrypted_point: key.encrypted_point.map(Into::into),
|
encrypted_point: key.encrypted_point.map(Into::into),
|
||||||
versions: key.versions.into_iter()
|
versions: key.versions.into_iter()
|
||||||
@ -424,6 +433,7 @@ pub mod tests {
|
|||||||
let tempdir = TempDir::new("").unwrap();
|
let tempdir = TempDir::new("").unwrap();
|
||||||
let config = ServiceConfiguration {
|
let config = ServiceConfiguration {
|
||||||
listener_address: None,
|
listener_address: None,
|
||||||
|
service_contract_address: None,
|
||||||
acl_check_enabled: true,
|
acl_check_enabled: true,
|
||||||
data_path: tempdir.path().display().to_string(),
|
data_path: tempdir.path().display().to_string(),
|
||||||
cluster_config: ClusterConfiguration {
|
cluster_config: ClusterConfiguration {
|
||||||
@ -442,6 +452,7 @@ pub mod tests {
|
|||||||
let value1 = DocumentKeyShare {
|
let value1 = DocumentKeyShare {
|
||||||
author: Public::default(),
|
author: Public::default(),
|
||||||
threshold: 100,
|
threshold: 100,
|
||||||
|
public: Public::default(),
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
versions: vec![DocumentKeyShareVersion {
|
versions: vec![DocumentKeyShareVersion {
|
||||||
@ -456,6 +467,7 @@ pub mod tests {
|
|||||||
let value2 = DocumentKeyShare {
|
let value2 = DocumentKeyShare {
|
||||||
author: Public::default(),
|
author: Public::default(),
|
||||||
threshold: 200,
|
threshold: 200,
|
||||||
|
public: Public::default(),
|
||||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||||
versions: vec![DocumentKeyShareVersion {
|
versions: vec![DocumentKeyShareVersion {
|
||||||
|
@ -43,6 +43,7 @@ extern crate ethcore_bigint as bigint;
|
|||||||
extern crate ethcore_logger as logger;
|
extern crate ethcore_logger as logger;
|
||||||
extern crate ethcrypto;
|
extern crate ethcrypto;
|
||||||
extern crate ethkey;
|
extern crate ethkey;
|
||||||
|
extern crate ethsync;
|
||||||
extern crate native_contracts;
|
extern crate native_contracts;
|
||||||
extern crate keccak_hash as hash;
|
extern crate keccak_hash as hash;
|
||||||
extern crate kvdb;
|
extern crate kvdb;
|
||||||
@ -53,33 +54,53 @@ mod types;
|
|||||||
|
|
||||||
mod traits;
|
mod traits;
|
||||||
mod acl_storage;
|
mod acl_storage;
|
||||||
mod http_listener;
|
|
||||||
mod key_server;
|
mod key_server;
|
||||||
mod key_storage;
|
mod key_storage;
|
||||||
mod serialization;
|
mod serialization;
|
||||||
mod key_server_set;
|
mod key_server_set;
|
||||||
mod node_key_pair;
|
mod node_key_pair;
|
||||||
|
mod listener;
|
||||||
|
mod trusted_client;
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use ethcore::client::Client;
|
use ethcore::client::Client;
|
||||||
|
use ethsync::SyncProvider;
|
||||||
|
|
||||||
pub use types::all::{ServerKeyId, EncryptedDocumentKey, RequestSignature, Public,
|
pub use types::all::{ServerKeyId, EncryptedDocumentKey, RequestSignature, Public,
|
||||||
Error, NodeAddress, ServiceConfiguration, ClusterConfiguration};
|
Error, NodeAddress, ContractAddress, ServiceConfiguration, ClusterConfiguration};
|
||||||
pub use traits::{NodeKeyPair, KeyServer};
|
pub use traits::{NodeKeyPair, KeyServer};
|
||||||
pub use self::node_key_pair::{PlainNodeKeyPair, KeyStoreNodeKeyPair};
|
pub use self::node_key_pair::{PlainNodeKeyPair, KeyStoreNodeKeyPair};
|
||||||
|
|
||||||
/// Start new key server instance
|
/// Start new key server instance
|
||||||
pub fn start(client: Arc<Client>, self_key_pair: Arc<NodeKeyPair>, config: ServiceConfiguration) -> Result<Box<KeyServer>, Error> {
|
pub fn start(client: Arc<Client>, sync: Arc<SyncProvider>, self_key_pair: Arc<NodeKeyPair>, config: ServiceConfiguration) -> Result<Box<KeyServer>, Error> {
|
||||||
use std::sync::Arc;
|
let trusted_client = trusted_client::TrustedClient::new(client.clone(), sync);
|
||||||
|
|
||||||
let acl_storage: Arc<acl_storage::AclStorage> = if config.acl_check_enabled {
|
let acl_storage: Arc<acl_storage::AclStorage> = if config.acl_check_enabled {
|
||||||
acl_storage::OnChainAclStorage::new(&client)
|
acl_storage::OnChainAclStorage::new(trusted_client.clone())?
|
||||||
} else {
|
} else {
|
||||||
Arc::new(acl_storage::DummyAclStorage::default())
|
Arc::new(acl_storage::DummyAclStorage::default())
|
||||||
};
|
};
|
||||||
let key_server_set = key_server_set::OnChainKeyServerSet::new(&client, config.cluster_config.nodes.clone())?;
|
let key_server_set = key_server_set::OnChainKeyServerSet::new(trusted_client.clone(), config.cluster_config.nodes.clone())?;
|
||||||
let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?);
|
let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?);
|
||||||
let key_server = key_server::KeyServerImpl::new(&config.cluster_config, key_server_set, self_key_pair, acl_storage, key_storage)?;
|
let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set.clone(), self_key_pair.clone(), acl_storage, key_storage.clone())?);
|
||||||
let listener = http_listener::KeyServerHttpListener::start(config.listener_address, key_server)?;
|
let cluster = key_server.cluster();
|
||||||
Ok(Box::new(listener))
|
|
||||||
|
// prepare listeners
|
||||||
|
let http_listener = match config.listener_address {
|
||||||
|
Some(listener_address) => Some(listener::http_listener::KeyServerHttpListener::start(listener_address, key_server.clone())?),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
let contract_listener = config.service_contract_address.map(|service_contract_address| {
|
||||||
|
let service_contract = Arc::new(listener::service_contract::OnChainServiceContract::new(trusted_client, service_contract_address, self_key_pair.clone()));
|
||||||
|
let contract_listener = listener::service_contract_listener::ServiceContractListener::new(listener::service_contract_listener::ServiceContractListenerParams {
|
||||||
|
contract: service_contract,
|
||||||
|
key_server: key_server.clone(),
|
||||||
|
self_key_pair: self_key_pair,
|
||||||
|
key_server_set: key_server_set,
|
||||||
|
cluster: cluster,
|
||||||
|
key_storage: key_storage,
|
||||||
|
});
|
||||||
|
client.add_notify(contract_listener.clone());
|
||||||
|
contract_listener
|
||||||
|
});
|
||||||
|
Ok(Box::new(listener::Listener::new(key_server, http_listener, contract_listener)))
|
||||||
}
|
}
|
||||||
|
@ -27,9 +27,9 @@ use serde::Serialize;
|
|||||||
use serde_json;
|
use serde_json;
|
||||||
use url::percent_encoding::percent_decode;
|
use url::percent_encoding::percent_decode;
|
||||||
|
|
||||||
use traits::{ServerKeyGenerator, AdminSessionsServer, DocumentKeyServer, MessageSigner, KeyServer};
|
use traits::KeyServer;
|
||||||
use serialization::{SerializableEncryptedDocumentKeyShadow, SerializableBytes, SerializablePublic};
|
use serialization::{SerializableEncryptedDocumentKeyShadow, SerializableBytes, SerializablePublic};
|
||||||
use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, NodeAddress, RequestSignature, ServerKeyId,
|
use types::all::{Error, Public, MessageHash, NodeAddress, RequestSignature, ServerKeyId,
|
||||||
EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId};
|
EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId};
|
||||||
|
|
||||||
/// Key server http-requests listener. Available requests:
|
/// Key server http-requests listener. Available requests:
|
||||||
@ -41,9 +41,9 @@ use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, NodeAddr
|
|||||||
/// To sign message with server key: GET /{server_key_id}/{signature}/{message_hash}
|
/// To sign message with server key: GET /{server_key_id}/{signature}/{message_hash}
|
||||||
/// To change servers set: POST /admin/servers_set_change/{old_signature}/{new_signature} + BODY: json array of hex-encoded nodes ids
|
/// To change servers set: POST /admin/servers_set_change/{old_signature}/{new_signature} + BODY: json array of hex-encoded nodes ids
|
||||||
|
|
||||||
pub struct KeyServerHttpListener<T: KeyServer + 'static> {
|
pub struct KeyServerHttpListener {
|
||||||
http_server: Option<HttpListening>,
|
http_server: HttpListening,
|
||||||
handler: Arc<KeyServerSharedHttpHandler<T>>,
|
_handler: Arc<KeyServerSharedHttpHandler>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Parsed http request
|
/// Parsed http request
|
||||||
@ -68,83 +68,44 @@ enum Request {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Cloneable http handler
|
/// Cloneable http handler
|
||||||
struct KeyServerHttpHandler<T: KeyServer + 'static> {
|
struct KeyServerHttpHandler {
|
||||||
handler: Arc<KeyServerSharedHttpHandler<T>>,
|
handler: Arc<KeyServerSharedHttpHandler>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Shared http handler
|
/// Shared http handler
|
||||||
struct KeyServerSharedHttpHandler<T: KeyServer + 'static> {
|
struct KeyServerSharedHttpHandler {
|
||||||
key_server: T,
|
key_server: Arc<KeyServer>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> KeyServerHttpListener<T> where T: KeyServer + 'static {
|
impl KeyServerHttpListener {
|
||||||
/// Start KeyServer http listener
|
/// Start KeyServer http listener
|
||||||
pub fn start(listener_address: Option<NodeAddress>, key_server: T) -> Result<Self, Error> {
|
pub fn start(listener_address: NodeAddress, key_server: Arc<KeyServer>) -> Result<Self, Error> {
|
||||||
let shared_handler = Arc::new(KeyServerSharedHttpHandler {
|
let shared_handler = Arc::new(KeyServerSharedHttpHandler {
|
||||||
key_server: key_server,
|
key_server: key_server,
|
||||||
});
|
});
|
||||||
|
|
||||||
let http_server = listener_address
|
let listener_address = format!("{}:{}", listener_address.address, listener_address.port);
|
||||||
.map(|listener_address| format!("{}:{}", listener_address.address, listener_address.port))
|
let http_server = HttpServer::http(&listener_address)
|
||||||
.map(|listener_address| HttpServer::http(&listener_address).expect("cannot start HttpServer"))
|
.and_then(|http_server| http_server.handle(KeyServerHttpHandler {
|
||||||
.map(|http_server| http_server.handle(KeyServerHttpHandler {
|
|
||||||
handler: shared_handler.clone(),
|
handler: shared_handler.clone(),
|
||||||
}).expect("cannot start HttpServer"));
|
})).map_err(|err| Error::Hyper(format!("{}", err)))?;
|
||||||
|
|
||||||
let listener = KeyServerHttpListener {
|
let listener = KeyServerHttpListener {
|
||||||
http_server: http_server,
|
http_server: http_server,
|
||||||
handler: shared_handler,
|
_handler: shared_handler,
|
||||||
};
|
};
|
||||||
Ok(listener)
|
Ok(listener)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> KeyServer for KeyServerHttpListener<T> where T: KeyServer + 'static {}
|
impl Drop for KeyServerHttpListener {
|
||||||
|
|
||||||
impl<T> AdminSessionsServer for KeyServerHttpListener<T> where T: KeyServer + 'static {
|
|
||||||
fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet<NodeId>) -> Result<(), Error> {
|
|
||||||
self.handler.key_server.change_servers_set(old_set_signature, new_set_signature, new_servers_set)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> ServerKeyGenerator for KeyServerHttpListener<T> where T: KeyServer + 'static {
|
|
||||||
fn generate_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<Public, Error> {
|
|
||||||
self.handler.key_server.generate_key(key_id, signature, threshold)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> DocumentKeyServer for KeyServerHttpListener<T> where T: KeyServer + 'static {
|
|
||||||
fn store_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> {
|
|
||||||
self.handler.key_server.store_document_key(key_id, signature, common_point, encrypted_document_key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn generate_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<EncryptedDocumentKey, Error> {
|
|
||||||
self.handler.key_server.generate_document_key(key_id, signature, threshold)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn restore_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKey, Error> {
|
|
||||||
self.handler.key_server.restore_document_key(key_id, signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKeyShadow, Error> {
|
|
||||||
self.handler.key_server.restore_document_key_shadow(key_id, signature)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl <T> MessageSigner for KeyServerHttpListener<T> where T: KeyServer + 'static {
|
|
||||||
fn sign_message(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
|
||||||
self.handler.key_server.sign_message(key_id, signature, message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Drop for KeyServerHttpListener<T> where T: KeyServer + 'static {
|
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
// ignore error as we are dropping anyway
|
// ignore error as we are dropping anyway
|
||||||
self.http_server.take().map(|mut s| { let _ = s.close(); });
|
let _ = self.http_server.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> HttpHandler for KeyServerHttpHandler<T> where T: KeyServer + 'static {
|
impl HttpHandler for KeyServerHttpHandler {
|
||||||
fn handle(&self, mut req: HttpRequest, mut res: HttpResponse) {
|
fn handle(&self, mut req: HttpRequest, mut res: HttpResponse) {
|
||||||
if req.headers.has::<header::Origin>() {
|
if req.headers.has::<header::Origin>() {
|
||||||
warn!(target: "secretstore", "Ignoring {}-request {} with Origin header", req.method, req.uri);
|
warn!(target: "secretstore", "Ignoring {}-request {} with Origin header", req.method, req.uri);
|
||||||
@ -273,6 +234,7 @@ fn return_error(mut res: HttpResponse, err: Error) {
|
|||||||
Error::BadSignature => *res.status_mut() = HttpStatusCode::BadRequest,
|
Error::BadSignature => *res.status_mut() = HttpStatusCode::BadRequest,
|
||||||
Error::AccessDenied => *res.status_mut() = HttpStatusCode::Forbidden,
|
Error::AccessDenied => *res.status_mut() = HttpStatusCode::Forbidden,
|
||||||
Error::DocumentNotFound => *res.status_mut() = HttpStatusCode::NotFound,
|
Error::DocumentNotFound => *res.status_mut() = HttpStatusCode::NotFound,
|
||||||
|
Error::Hyper(_) => *res.status_mut() = HttpStatusCode::BadRequest,
|
||||||
Error::Serde(_) => *res.status_mut() = HttpStatusCode::BadRequest,
|
Error::Serde(_) => *res.status_mut() = HttpStatusCode::BadRequest,
|
||||||
Error::Database(_) => *res.status_mut() = HttpStatusCode::InternalServerError,
|
Error::Database(_) => *res.status_mut() = HttpStatusCode::InternalServerError,
|
||||||
Error::Internal(_) => *res.status_mut() = HttpStatusCode::InternalServerError,
|
Error::Internal(_) => *res.status_mut() = HttpStatusCode::InternalServerError,
|
||||||
@ -364,6 +326,7 @@ fn parse_admin_request(method: &HttpMethod, path: Vec<String>, body: &str) -> Re
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
use hyper::method::Method as HttpMethod;
|
use hyper::method::Method as HttpMethod;
|
||||||
use ethkey::Public;
|
use ethkey::Public;
|
||||||
use key_server::tests::DummyKeyServer;
|
use key_server::tests::DummyKeyServer;
|
||||||
@ -372,9 +335,9 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn http_listener_successfully_drops() {
|
fn http_listener_successfully_drops() {
|
||||||
let key_server = DummyKeyServer;
|
let key_server = Arc::new(DummyKeyServer::default());
|
||||||
let address = NodeAddress { address: "127.0.0.1".into(), port: 9000 };
|
let address = NodeAddress { address: "127.0.0.1".into(), port: 9000 };
|
||||||
let listener = KeyServerHttpListener::start(Some(address), key_server).unwrap();
|
let listener = KeyServerHttpListener::start(address, key_server).unwrap();
|
||||||
drop(listener);
|
drop(listener);
|
||||||
}
|
}
|
||||||
|
|
80
secret_store/src/listener/mod.rs
Normal file
80
secret_store/src/listener/mod.rs
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
pub mod http_listener;
|
||||||
|
pub mod service_contract;
|
||||||
|
pub mod service_contract_listener;
|
||||||
|
mod tasks_queue;
|
||||||
|
|
||||||
|
use std::collections::BTreeSet;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, AdminSessionsServer, KeyServer};
|
||||||
|
use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, RequestSignature, ServerKeyId,
|
||||||
|
EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId};
|
||||||
|
|
||||||
|
pub struct Listener {
|
||||||
|
key_server: Arc<KeyServer>,
|
||||||
|
_http: Option<http_listener::KeyServerHttpListener>,
|
||||||
|
_contract: Option<Arc<service_contract_listener::ServiceContractListener>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Listener {
|
||||||
|
pub fn new(key_server: Arc<KeyServer>, http: Option<http_listener::KeyServerHttpListener>, contract: Option<Arc<service_contract_listener::ServiceContractListener>>) -> Self {
|
||||||
|
Self {
|
||||||
|
key_server: key_server,
|
||||||
|
_http: http,
|
||||||
|
_contract: contract,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl KeyServer for Listener {}
|
||||||
|
|
||||||
|
impl ServerKeyGenerator for Listener {
|
||||||
|
fn generate_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<Public, Error> {
|
||||||
|
self.key_server.generate_key(key_id, signature, threshold)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DocumentKeyServer for Listener {
|
||||||
|
fn store_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> {
|
||||||
|
self.key_server.store_document_key(key_id, signature, common_point, encrypted_document_key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn generate_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result<EncryptedDocumentKey, Error> {
|
||||||
|
self.key_server.generate_document_key(key_id, signature, threshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn restore_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKey, Error> {
|
||||||
|
self.key_server.restore_document_key(key_id, signature)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||||
|
self.key_server.restore_document_key_shadow(key_id, signature)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MessageSigner for Listener {
|
||||||
|
fn sign_message(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result<EncryptedMessageSignature, Error> {
|
||||||
|
self.key_server.sign_message(key_id, signature, message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AdminSessionsServer for Listener {
|
||||||
|
fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||||
|
self.key_server.change_servers_set(old_set_signature, new_set_signature, new_servers_set)
|
||||||
|
}
|
||||||
|
}
|
343
secret_store/src/listener/service_contract.rs
Normal file
343
secret_store/src/listener/service_contract.rs
Normal file
@ -0,0 +1,343 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use futures::{future, Future};
|
||||||
|
use parking_lot::RwLock;
|
||||||
|
use ethcore::filter::Filter;
|
||||||
|
use ethcore::client::{Client, BlockChainClient, BlockId};
|
||||||
|
use ethkey::{Public, Signature, public_to_address};
|
||||||
|
use native_contracts::SecretStoreService;
|
||||||
|
use hash::keccak;
|
||||||
|
use bigint::hash::H256;
|
||||||
|
use bigint::prelude::U256;
|
||||||
|
use listener::service_contract_listener::ServiceTask;
|
||||||
|
use trusted_client::TrustedClient;
|
||||||
|
use {ServerKeyId, NodeKeyPair, ContractAddress};
|
||||||
|
|
||||||
|
/// Name of the SecretStore contract in the registry.
|
||||||
|
const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service";
|
||||||
|
|
||||||
|
/// Key server has been added to the set.
|
||||||
|
const SERVER_KEY_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyRequested(bytes32,uint256)";
|
||||||
|
|
||||||
|
/// Number of confirmations required before request can be processed.
|
||||||
|
const REQUEST_CONFIRMATIONS_REQUIRED: u64 = 3;
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
static ref SERVER_KEY_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_REQUESTED_EVENT_NAME);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Service contract trait.
|
||||||
|
pub trait ServiceContract: Send + Sync {
|
||||||
|
/// Update contract when new blocks are enacted. Returns true if contract is installed && up-to-date (i.e. chain is synced).
|
||||||
|
fn update(&self) -> bool;
|
||||||
|
/// Read recent contract logs. Returns topics of every entry.
|
||||||
|
fn read_logs(&self) -> Box<Iterator<Item=Vec<H256>>>;
|
||||||
|
/// Publish generated key.
|
||||||
|
fn read_pending_requests(&self) -> Box<Iterator<Item=(bool, ServiceTask)>>;
|
||||||
|
/// Publish server key.
|
||||||
|
fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// On-chain service contract.
|
||||||
|
pub struct OnChainServiceContract {
|
||||||
|
/// Blockchain client.
|
||||||
|
client: TrustedClient,
|
||||||
|
/// This node key pair.
|
||||||
|
self_key_pair: Arc<NodeKeyPair>,
|
||||||
|
/// Contract addresss.
|
||||||
|
address: ContractAddress,
|
||||||
|
/// Contract.
|
||||||
|
data: RwLock<SecretStoreServiceData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// On-chain service contract data.
|
||||||
|
struct SecretStoreServiceData {
|
||||||
|
/// Contract.
|
||||||
|
pub contract: Arc<SecretStoreService>,
|
||||||
|
/// Last block we have read logs from.
|
||||||
|
pub last_log_block: Option<H256>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Pending requests iterator.
|
||||||
|
struct PendingRequestsIterator {
|
||||||
|
/// Blockchain client.
|
||||||
|
client: Arc<Client>,
|
||||||
|
/// Contract.
|
||||||
|
contract: Arc<SecretStoreService>,
|
||||||
|
/// This node key pair.
|
||||||
|
self_key_pair: Arc<NodeKeyPair>,
|
||||||
|
/// Block, this iterator is created for.
|
||||||
|
block: H256,
|
||||||
|
/// Current request index.
|
||||||
|
index: U256,
|
||||||
|
/// Requests length.
|
||||||
|
length: U256,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OnChainServiceContract {
|
||||||
|
/// Create new on-chain service contract.
|
||||||
|
pub fn new(client: TrustedClient, address: ContractAddress, self_key_pair: Arc<NodeKeyPair>) -> Self {
|
||||||
|
let contract_addr = match address {
|
||||||
|
ContractAddress::Registry => client.get().and_then(|c| c.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned())
|
||||||
|
.map(|address| {
|
||||||
|
trace!(target: "secretstore", "{}: installing service contract from address {}",
|
||||||
|
self_key_pair.public(), address);
|
||||||
|
address
|
||||||
|
}))
|
||||||
|
.unwrap_or_default(),
|
||||||
|
ContractAddress::Address(ref address) => {
|
||||||
|
trace!(target: "secretstore", "{}: installing service contract from address {}",
|
||||||
|
self_key_pair.public(), address);
|
||||||
|
address.clone()
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
OnChainServiceContract {
|
||||||
|
client: client,
|
||||||
|
self_key_pair: self_key_pair,
|
||||||
|
address: address,
|
||||||
|
data: RwLock::new(SecretStoreServiceData {
|
||||||
|
contract: Arc::new(SecretStoreService::new(contract_addr)),
|
||||||
|
last_log_block: None,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServiceContract for OnChainServiceContract {
|
||||||
|
fn update(&self) -> bool {
|
||||||
|
// TODO [Sec]: registry_address currently reads from BlockId::Latest, instead of
|
||||||
|
// from block with REQUEST_CONFIRMATIONS_REQUIRED confirmations
|
||||||
|
if let &ContractAddress::Registry = &self.address {
|
||||||
|
if let Some(client) = self.client.get() {
|
||||||
|
// update contract address from registry
|
||||||
|
let service_contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()).unwrap_or_default();
|
||||||
|
if self.data.read().contract.address != service_contract_addr {
|
||||||
|
trace!(target: "secretstore", "{}: installing service contract from address {}",
|
||||||
|
self.self_key_pair.public(), service_contract_addr);
|
||||||
|
self.data.write().contract = Arc::new(SecretStoreService::new(service_contract_addr));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.data.read().contract.address != Default::default()
|
||||||
|
&& self.client.get().is_some()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_logs(&self) -> Box<Iterator<Item=Vec<H256>>> {
|
||||||
|
let client = match self.client.get() {
|
||||||
|
Some(client) => client,
|
||||||
|
None => {
|
||||||
|
warn!(target: "secretstore", "{}: client is offline during read_logs call",
|
||||||
|
self.self_key_pair.public());
|
||||||
|
return Box::new(::std::iter::empty());
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
// prepare range of blocks to read logs from
|
||||||
|
let (address, first_block, last_block) = {
|
||||||
|
let mut data = self.data.write();
|
||||||
|
let address = data.contract.address.clone();
|
||||||
|
let confirmed_block = match get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED) {
|
||||||
|
Some(confirmed_block) => confirmed_block,
|
||||||
|
None => return Box::new(::std::iter::empty()), // no block with enough confirmations
|
||||||
|
};
|
||||||
|
let first_block = match data.last_log_block.take().and_then(|b| client.tree_route(&b, &confirmed_block)) {
|
||||||
|
// if we have a route from last_log_block to confirmed_block => search for logs on this route
|
||||||
|
//
|
||||||
|
// potentially this could lead us to reading same logs twice when reorganizing to the fork, which
|
||||||
|
// already has been canonical previosuly
|
||||||
|
// the worst thing that can happen in this case is spending some time reading unneeded data from SS db
|
||||||
|
Some(ref route) if route.index < route.blocks.len() => route.blocks[route.index],
|
||||||
|
// else we care only about confirmed block
|
||||||
|
_ => confirmed_block.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
data.last_log_block = Some(confirmed_block.clone());
|
||||||
|
(address, first_block, confirmed_block)
|
||||||
|
};
|
||||||
|
|
||||||
|
// read server key generation requests
|
||||||
|
let request_logs = client.logs(Filter {
|
||||||
|
from_block: BlockId::Hash(first_block),
|
||||||
|
to_block: BlockId::Hash(last_block),
|
||||||
|
address: Some(vec![address]),
|
||||||
|
topics: vec![
|
||||||
|
Some(vec![*SERVER_KEY_REQUESTED_EVENT_NAME_HASH]),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
],
|
||||||
|
limit: None,
|
||||||
|
});
|
||||||
|
|
||||||
|
Box::new(request_logs.into_iter().map(|log| log.entry.topics))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_pending_requests(&self) -> Box<Iterator<Item=(bool, ServiceTask)>> {
|
||||||
|
let client = match self.client.get() {
|
||||||
|
Some(client) => client,
|
||||||
|
None => return Box::new(::std::iter::empty()),
|
||||||
|
};
|
||||||
|
|
||||||
|
// we only need requests that are here for more than REQUEST_CONFIRMATIONS_REQUIRED blocks
|
||||||
|
// => we're reading from Latest - (REQUEST_CONFIRMATIONS_REQUIRED + 1) block
|
||||||
|
let data = self.data.read();
|
||||||
|
match data.contract.address == Default::default() {
|
||||||
|
true => Box::new(::std::iter::empty()),
|
||||||
|
false => get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED + 1)
|
||||||
|
.and_then(|b| {
|
||||||
|
let do_call = |a, d| future::done(client.call_contract(BlockId::Hash(b.clone()), a, d));
|
||||||
|
data.contract.server_key_generation_requests_count(&do_call).wait()
|
||||||
|
.map_err(|error| {
|
||||||
|
warn!(target: "secretstore", "{}: call to server_key_generation_requests_count failed: {}",
|
||||||
|
self.self_key_pair.public(), error);
|
||||||
|
error
|
||||||
|
})
|
||||||
|
.map(|l| (b, l))
|
||||||
|
.ok()
|
||||||
|
})
|
||||||
|
.map(|(b, l)| Box::new(PendingRequestsIterator {
|
||||||
|
client: client,
|
||||||
|
contract: data.contract.clone(),
|
||||||
|
self_key_pair: self.self_key_pair.clone(),
|
||||||
|
block: b,
|
||||||
|
index: 0.into(),
|
||||||
|
length: l,
|
||||||
|
}) as Box<Iterator<Item=(bool, ServiceTask)>>)
|
||||||
|
.unwrap_or_else(|| Box::new(::std::iter::empty()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> {
|
||||||
|
// only publish if contract address is set && client is online
|
||||||
|
let data = self.data.read();
|
||||||
|
if data.contract.address == Default::default() {
|
||||||
|
// it is not an error, because key could be generated even without contract
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let client = match self.client.get() {
|
||||||
|
Some(client) => client,
|
||||||
|
None => return Err("trusted client is required to publish key".into()),
|
||||||
|
};
|
||||||
|
|
||||||
|
// only publish key if contract waits for publication
|
||||||
|
// failing is ok here - it could be that enough confirmations have been recevied
|
||||||
|
// or key has been requested using HTTP API
|
||||||
|
let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d));
|
||||||
|
let self_address = public_to_address(self.self_key_pair.public());
|
||||||
|
if data.contract.get_server_key_confirmation_status(&do_call, server_key_id.clone(), self_address).wait().unwrap_or(false) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepare transaction data
|
||||||
|
let server_key_hash = keccak(server_key);
|
||||||
|
let signed_server_key = self.self_key_pair.sign(&server_key_hash).map_err(|e| format!("{}", e))?;
|
||||||
|
let signed_server_key: Signature = signed_server_key.into_electrum().into();
|
||||||
|
let transaction_data = data.contract.encode_server_key_generated_input(server_key_id.clone(),
|
||||||
|
server_key.to_vec(),
|
||||||
|
signed_server_key.v(),
|
||||||
|
signed_server_key.r().into(),
|
||||||
|
signed_server_key.s().into()
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// send transaction
|
||||||
|
client.transact_contract(
|
||||||
|
data.contract.address.clone(),
|
||||||
|
transaction_data
|
||||||
|
).map_err(|e| format!("{}", e))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Iterator for PendingRequestsIterator {
|
||||||
|
type Item = (bool, ServiceTask);
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<(bool, ServiceTask)> {
|
||||||
|
if self.index >= self.length {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let index = self.index.clone();
|
||||||
|
self.index = self.index + 1.into();
|
||||||
|
|
||||||
|
let self_address = public_to_address(self.self_key_pair.public());
|
||||||
|
let do_call = |a, d| future::done(self.client.call_contract(BlockId::Hash(self.block.clone()), a, d));
|
||||||
|
self.contract.get_server_key_id(&do_call, index).wait()
|
||||||
|
.and_then(|server_key_id|
|
||||||
|
self.contract.get_server_key_threshold(&do_call, server_key_id.clone()).wait()
|
||||||
|
.map(|threshold| (server_key_id, threshold)))
|
||||||
|
.and_then(|(server_key_id, threshold)|
|
||||||
|
self.contract.get_server_key_confirmation_status(&do_call, server_key_id.clone(), self_address).wait()
|
||||||
|
.map(|is_confirmed| (server_key_id, threshold, is_confirmed)))
|
||||||
|
.map(|(server_key_id, threshold, is_confirmed)|
|
||||||
|
Some((is_confirmed, ServiceTask::GenerateServerKey(server_key_id, threshold.into()))))
|
||||||
|
.map_err(|error| {
|
||||||
|
warn!(target: "secretstore", "{}: reading service contract request failed: {}",
|
||||||
|
self.self_key_pair.public(), error);
|
||||||
|
()
|
||||||
|
})
|
||||||
|
.unwrap_or(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get hash of the last block with at least n confirmations.
|
||||||
|
fn get_confirmed_block_hash(client: &Client, confirmations: u64) -> Option<H256> {
|
||||||
|
client.block_number(BlockId::Latest)
|
||||||
|
.map(|b| b.saturating_sub(confirmations))
|
||||||
|
.and_then(|b| client.block_hash(BlockId::Number(b)))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub mod tests {
|
||||||
|
use parking_lot::Mutex;
|
||||||
|
use ethkey::Public;
|
||||||
|
use bigint::hash::H256;
|
||||||
|
use listener::service_contract_listener::ServiceTask;
|
||||||
|
use ServerKeyId;
|
||||||
|
use super::ServiceContract;
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct DummyServiceContract {
|
||||||
|
pub is_actual: bool,
|
||||||
|
pub logs: Vec<Vec<H256>>,
|
||||||
|
pub pending_requests: Vec<(bool, ServiceTask)>,
|
||||||
|
pub published_keys: Mutex<Vec<(ServerKeyId, Public)>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServiceContract for DummyServiceContract {
|
||||||
|
fn update(&self) -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_logs(&self) -> Box<Iterator<Item=Vec<H256>>> {
|
||||||
|
Box::new(self.logs.clone().into_iter())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_pending_requests(&self) -> Box<Iterator<Item=(bool, ServiceTask)>> {
|
||||||
|
Box::new(self.pending_requests.clone().into_iter())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> {
|
||||||
|
self.published_keys.lock().push((server_key_id.clone(), server_key.clone()));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
660
secret_store/src/listener/service_contract_listener.rs
Normal file
660
secret_store/src/listener/service_contract_listener.rs
Normal file
@ -0,0 +1,660 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
use std::thread;
|
||||||
|
use parking_lot::Mutex;
|
||||||
|
use ethcore::client::ChainNotify;
|
||||||
|
use ethkey::{Random, Generator, Public, sign};
|
||||||
|
use bytes::Bytes;
|
||||||
|
use bigint::hash::H256;
|
||||||
|
use bigint::prelude::U256;
|
||||||
|
use key_server_set::KeyServerSet;
|
||||||
|
use key_server_cluster::{ClusterClient, ClusterSessionsListener, ClusterSession};
|
||||||
|
use key_server_cluster::generation_session::SessionImpl as GenerationSession;
|
||||||
|
use key_storage::KeyStorage;
|
||||||
|
use listener::service_contract::ServiceContract;
|
||||||
|
use listener::tasks_queue::TasksQueue;
|
||||||
|
use {ServerKeyId, NodeKeyPair, KeyServer};
|
||||||
|
|
||||||
|
/// Retry interval (in blocks). Every RETRY_INTERVAL_BLOCKS blocks each KeyServer reads pending requests from
|
||||||
|
/// service contract && tries to re-execute. The reason to have this mechanism is primarily because keys
|
||||||
|
/// servers set change takes a lot of time + there could be some races, when blocks are coming to different
|
||||||
|
/// KS at different times. This isn't intended to fix && respond to general session errors!
|
||||||
|
const RETRY_INTERVAL_BLOCKS: usize = 30;
|
||||||
|
|
||||||
|
/// Max failed retry requests (in single retry interval). The reason behind this constant is that if several
|
||||||
|
/// pending requests have failed, then most probably other will fail too.
|
||||||
|
const MAX_FAILED_RETRY_REQUESTS: usize = 1;
|
||||||
|
|
||||||
|
/// SecretStore <-> Authority connector responsible for:
|
||||||
|
/// 1. listening for new requests on SecretStore contract
|
||||||
|
/// 2. redirecting requests to key server
|
||||||
|
/// 3. publishing response on SecretStore contract
|
||||||
|
pub struct ServiceContractListener {
|
||||||
|
/// Service contract listener data.
|
||||||
|
data: Arc<ServiceContractListenerData>,
|
||||||
|
/// Service thread handle.
|
||||||
|
service_handle: Option<thread::JoinHandle<()>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Service contract listener parameters.
|
||||||
|
pub struct ServiceContractListenerParams {
|
||||||
|
/// Service contract.
|
||||||
|
pub contract: Arc<ServiceContract>,
|
||||||
|
/// Key server reference.
|
||||||
|
pub key_server: Arc<KeyServer>,
|
||||||
|
/// This node key pair.
|
||||||
|
pub self_key_pair: Arc<NodeKeyPair>,
|
||||||
|
/// Key servers set.
|
||||||
|
pub key_server_set: Arc<KeyServerSet>,
|
||||||
|
/// Cluster reference.
|
||||||
|
pub cluster: Arc<ClusterClient>,
|
||||||
|
/// Key storage reference.
|
||||||
|
pub key_storage: Arc<KeyStorage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Service contract listener data.
|
||||||
|
struct ServiceContractListenerData {
|
||||||
|
/// Blocks since last retry.
|
||||||
|
pub last_retry: AtomicUsize,
|
||||||
|
/// Retry-related data.
|
||||||
|
pub retry_data: Mutex<ServiceContractRetryData>,
|
||||||
|
/// Service tasks queue.
|
||||||
|
pub tasks_queue: Arc<TasksQueue<ServiceTask>>,
|
||||||
|
/// Service contract.
|
||||||
|
pub contract: Arc<ServiceContract>,
|
||||||
|
/// Key server reference.
|
||||||
|
pub key_server: Arc<KeyServer>,
|
||||||
|
/// This node key pair.
|
||||||
|
pub self_key_pair: Arc<NodeKeyPair>,
|
||||||
|
/// Key servers set.
|
||||||
|
pub key_server_set: Arc<KeyServerSet>,
|
||||||
|
/// Key storage reference.
|
||||||
|
pub key_storage: Arc<KeyStorage>,
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Retry-related data.
|
||||||
|
#[derive(Default)]
|
||||||
|
struct ServiceContractRetryData {
|
||||||
|
/// Server keys, which we have generated (or tried to generate) since last retry moment.
|
||||||
|
pub generated_keys: HashSet<ServerKeyId>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Service task.
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub enum ServiceTask {
|
||||||
|
/// Retry all 'stalled' tasks.
|
||||||
|
Retry,
|
||||||
|
/// Generate server key (server_key_id, threshold).
|
||||||
|
GenerateServerKey(H256, H256),
|
||||||
|
/// Confirm server key (server_key_id).
|
||||||
|
RestoreServerKey(H256),
|
||||||
|
/// Shutdown listener.
|
||||||
|
Shutdown,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServiceContractListener {
|
||||||
|
/// Create new service contract listener.
|
||||||
|
pub fn new(params: ServiceContractListenerParams) -> Arc<ServiceContractListener> {
|
||||||
|
let data = Arc::new(ServiceContractListenerData {
|
||||||
|
last_retry: AtomicUsize::new(0),
|
||||||
|
retry_data: Default::default(),
|
||||||
|
tasks_queue: Arc::new(TasksQueue::new()),
|
||||||
|
contract: params.contract,
|
||||||
|
key_server: params.key_server,
|
||||||
|
self_key_pair: params.self_key_pair,
|
||||||
|
key_server_set: params.key_server_set,
|
||||||
|
key_storage: params.key_storage,
|
||||||
|
});
|
||||||
|
data.tasks_queue.push(ServiceTask::Retry);
|
||||||
|
|
||||||
|
// we are not starting thread when in test mode
|
||||||
|
let service_handle = if cfg!(test) {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let service_thread_data = data.clone();
|
||||||
|
Some(thread::spawn(move || Self::run_service_thread(service_thread_data)))
|
||||||
|
};
|
||||||
|
let contract = Arc::new(ServiceContractListener {
|
||||||
|
data: data,
|
||||||
|
service_handle: service_handle,
|
||||||
|
});
|
||||||
|
params.cluster.add_generation_listener(contract.clone());
|
||||||
|
contract
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process incoming events of service contract.
|
||||||
|
fn process_service_contract_events(&self) {
|
||||||
|
self.data.tasks_queue.push_many(self.data.contract.read_logs()
|
||||||
|
.filter_map(|topics| match topics.len() {
|
||||||
|
// when key is already generated && we have this key
|
||||||
|
3 if self.data.key_storage.get(&topics[1]).map(|k| k.is_some()).unwrap_or_default() => {
|
||||||
|
Some(ServiceTask::RestoreServerKey(
|
||||||
|
topics[1],
|
||||||
|
))
|
||||||
|
}
|
||||||
|
// when key is not yet generated && this node should be master of this key generation session
|
||||||
|
3 if is_processed_by_this_key_server(&*self.data.key_server_set, &*self.data.self_key_pair, &topics[1]) => {
|
||||||
|
Some(ServiceTask::GenerateServerKey(
|
||||||
|
topics[1],
|
||||||
|
topics[2],
|
||||||
|
))
|
||||||
|
},
|
||||||
|
3 => None,
|
||||||
|
l @ _ => {
|
||||||
|
warn!(target: "secretstore", "Ignoring ServerKeyRequested event with wrong number of params {}", l);
|
||||||
|
None
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Service thread procedure.
|
||||||
|
fn run_service_thread(data: Arc<ServiceContractListenerData>) {
|
||||||
|
loop {
|
||||||
|
let task = data.tasks_queue.wait();
|
||||||
|
trace!(target: "secretstore", "{}: processing {:?} task", data.self_key_pair.public(), task);
|
||||||
|
|
||||||
|
match task {
|
||||||
|
ServiceTask::Shutdown => break,
|
||||||
|
task @ _ => {
|
||||||
|
// the only possible reaction to an error is a trace && it is already happened
|
||||||
|
let _ = Self::process_service_task(&data, task);
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process single service task.
|
||||||
|
fn process_service_task(data: &Arc<ServiceContractListenerData>, task: ServiceTask) -> Result<(), String> {
|
||||||
|
match task {
|
||||||
|
ServiceTask::Retry =>
|
||||||
|
Self::retry_pending_requests(&data)
|
||||||
|
.map(|processed_requests| {
|
||||||
|
if processed_requests != 0 {
|
||||||
|
trace!(target: "secretstore", "{}: successfully retried {} pending requests",
|
||||||
|
data.self_key_pair.public(), processed_requests);
|
||||||
|
}
|
||||||
|
()
|
||||||
|
})
|
||||||
|
.map_err(|error| {
|
||||||
|
warn!(target: "secretstore", "{}: retrying pending requests has failed with: {}",
|
||||||
|
data.self_key_pair.public(), error);
|
||||||
|
error
|
||||||
|
}),
|
||||||
|
ServiceTask::RestoreServerKey(server_key_id) => {
|
||||||
|
data.retry_data.lock().generated_keys.insert(server_key_id.clone());
|
||||||
|
Self::restore_server_key(&data, &server_key_id)
|
||||||
|
.and_then(|server_key| Self::publish_server_key(&data, &server_key_id, &server_key))
|
||||||
|
.map(|_| {
|
||||||
|
trace!(target: "secretstore", "{}: processed RestoreServerKey({}) request",
|
||||||
|
data.self_key_pair.public(), server_key_id);
|
||||||
|
()
|
||||||
|
})
|
||||||
|
.map_err(|error| {
|
||||||
|
warn!(target: "secretstore", "{}: failed to process RestoreServerKey({}) request with: {}",
|
||||||
|
data.self_key_pair.public(), server_key_id, error);
|
||||||
|
error
|
||||||
|
})
|
||||||
|
},
|
||||||
|
ServiceTask::GenerateServerKey(server_key_id, threshold) => {
|
||||||
|
data.retry_data.lock().generated_keys.insert(server_key_id.clone());
|
||||||
|
Self::generate_server_key(&data, &server_key_id, &threshold)
|
||||||
|
.and_then(|server_key| Self::publish_server_key(&data, &server_key_id, &server_key))
|
||||||
|
.map(|_| {
|
||||||
|
trace!(target: "secretstore", "{}: processed GenerateServerKey({}, {}) request",
|
||||||
|
data.self_key_pair.public(), server_key_id, threshold);
|
||||||
|
()
|
||||||
|
})
|
||||||
|
.map_err(|error| {
|
||||||
|
warn!(target: "secretstore", "{}: failed to process GenerateServerKey({}, {}) request with: {}",
|
||||||
|
data.self_key_pair.public(), server_key_id, threshold, error);
|
||||||
|
error
|
||||||
|
})
|
||||||
|
},
|
||||||
|
ServiceTask::Shutdown => unreachable!("it must be checked outside"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Retry processing pending requests.
|
||||||
|
fn retry_pending_requests(data: &Arc<ServiceContractListenerData>) -> Result<usize, String> {
|
||||||
|
let mut failed_requests = 0;
|
||||||
|
let mut processed_requests = 0;
|
||||||
|
let retry_data = ::std::mem::replace(&mut *data.retry_data.lock(), Default::default());
|
||||||
|
for (is_confirmed, task) in data.contract.read_pending_requests() {
|
||||||
|
// only process requests, which we haven't confirmed yet
|
||||||
|
if is_confirmed {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let request_result = match task {
|
||||||
|
ServiceTask::GenerateServerKey(server_key_id, threshold) => {
|
||||||
|
// only process request, which haven't been processed recently
|
||||||
|
// there could be a lag when we've just generated server key && retrying on the same block
|
||||||
|
// (or before our tx is mined) - state is not updated yet
|
||||||
|
if retry_data.generated_keys.contains(&server_key_id) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// process request
|
||||||
|
let is_own_request = is_processed_by_this_key_server(&*data.key_server_set, &*data.self_key_pair, &server_key_id);
|
||||||
|
Self::process_service_task(data, match is_own_request {
|
||||||
|
true => ServiceTask::GenerateServerKey(server_key_id, threshold.into()),
|
||||||
|
false => ServiceTask::RestoreServerKey(server_key_id),
|
||||||
|
})
|
||||||
|
},
|
||||||
|
_ => Err("not supported".into()),
|
||||||
|
};
|
||||||
|
|
||||||
|
// process request result
|
||||||
|
match request_result {
|
||||||
|
Ok(_) => processed_requests += 1,
|
||||||
|
Err(_) => {
|
||||||
|
failed_requests += 1;
|
||||||
|
if failed_requests > MAX_FAILED_RETRY_REQUESTS {
|
||||||
|
return Err("too many failed requests".into());
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(processed_requests)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generate server key.
|
||||||
|
fn generate_server_key(data: &Arc<ServiceContractListenerData>, server_key_id: &ServerKeyId, threshold: &H256) -> Result<Public, String> {
|
||||||
|
let threshold_num = threshold.low_u64();
|
||||||
|
if threshold != &threshold_num.into() || threshold_num >= ::std::usize::MAX as u64 {
|
||||||
|
return Err(format!("invalid threshold {:?}", threshold));
|
||||||
|
}
|
||||||
|
|
||||||
|
// key server expects signed server_key_id in server_key_generation procedure
|
||||||
|
// only signer could store document key for this server key later
|
||||||
|
// => this API (server key generation) is not suitable for usage in encryption via contract endpoint
|
||||||
|
let author_key = Random.generate().map_err(|e| format!("{}", e))?;
|
||||||
|
let server_key_id_signature = sign(author_key.secret(), server_key_id).map_err(|e| format!("{}", e))?;
|
||||||
|
data.key_server.generate_key(server_key_id, &server_key_id_signature, threshold_num as usize)
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Restore server key.
|
||||||
|
fn restore_server_key(data: &Arc<ServiceContractListenerData>, server_key_id: &ServerKeyId) -> Result<Public, String> {
|
||||||
|
data.key_storage.get(server_key_id)
|
||||||
|
.map_err(|e| format!("{}", e))
|
||||||
|
.and_then(|ks| ks.ok_or("missing key".to_owned()))
|
||||||
|
.map(|ks| ks.public)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Publish server key.
|
||||||
|
fn publish_server_key(data: &Arc<ServiceContractListenerData>, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> {
|
||||||
|
data.contract.publish_server_key(server_key_id, server_key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for ServiceContractListener {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if let Some(service_handle) = self.service_handle.take() {
|
||||||
|
self.data.tasks_queue.push_front(ServiceTask::Shutdown);
|
||||||
|
// ignore error as we are already closing
|
||||||
|
let _ = service_handle.join();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ChainNotify for ServiceContractListener {
|
||||||
|
fn new_blocks(&self, _imported: Vec<H256>, _invalid: Vec<H256>, enacted: Vec<H256>, _retracted: Vec<H256>, _sealed: Vec<H256>, _proposed: Vec<Bytes>, _duration: u64) {
|
||||||
|
let enacted_len = enacted.len();
|
||||||
|
if enacted_len == 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if !self.data.contract.update() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.process_service_contract_events();
|
||||||
|
|
||||||
|
// schedule retry if received enough blocks since last retry
|
||||||
|
// it maybe inaccurate when switching syncing/synced states, but that's ok
|
||||||
|
if self.data.last_retry.fetch_add(enacted_len, Ordering::Relaxed) >= RETRY_INTERVAL_BLOCKS {
|
||||||
|
self.data.tasks_queue.push(ServiceTask::Retry);
|
||||||
|
self.data.last_retry.store(0, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClusterSessionsListener<GenerationSession> for ServiceContractListener {
|
||||||
|
fn on_session_removed(&self, session: Arc<GenerationSession>) {
|
||||||
|
// only publish when the session is started by another node
|
||||||
|
// when it is started by this node, it is published from process_service_task
|
||||||
|
if !is_processed_by_this_key_server(&*self.data.key_server_set, &*self.data.self_key_pair, &session.id()) {
|
||||||
|
// by this time sesion must already be completed - either successfully, or not
|
||||||
|
assert!(session.is_finished());
|
||||||
|
|
||||||
|
// ignore result - the only thing that we can do is to log the error
|
||||||
|
match session.wait(Some(Default::default()))
|
||||||
|
.map_err(|e| format!("{}", e))
|
||||||
|
.and_then(|server_key| Self::publish_server_key(&self.data, &session.id(), &server_key)) {
|
||||||
|
Ok(_) => trace!(target: "secretstore", "{}: completed foreign GenerateServerKey({}) request",
|
||||||
|
self.data.self_key_pair.public(), session.id()),
|
||||||
|
Err(error) => warn!(target: "secretstore", "{}: failed to process GenerateServerKey({}) request with: {}",
|
||||||
|
self.data.self_key_pair.public(), session.id(), error),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true when session, related to `server_key_id` must be started on this KeyServer.
|
||||||
|
fn is_processed_by_this_key_server(key_server_set: &KeyServerSet, self_key_pair: &NodeKeyPair, server_key_id: &H256) -> bool {
|
||||||
|
let servers = key_server_set.get();
|
||||||
|
let total_servers_count = servers.len();
|
||||||
|
match total_servers_count {
|
||||||
|
0 => return false,
|
||||||
|
1 => return true,
|
||||||
|
_ => (),
|
||||||
|
}
|
||||||
|
|
||||||
|
let this_server_index = match servers.keys().enumerate().find(|&(_, s)| s == self_key_pair.public()) {
|
||||||
|
Some((index, _)) => index,
|
||||||
|
None => return false,
|
||||||
|
};
|
||||||
|
|
||||||
|
let server_key_id_value: U256 = server_key_id.into();
|
||||||
|
let range_interval = U256::max_value() / total_servers_count.into();
|
||||||
|
let range_begin = (range_interval + 1.into()) * this_server_index.into();
|
||||||
|
let range_end = range_begin.saturating_add(range_interval);
|
||||||
|
|
||||||
|
server_key_id_value >= range_begin && server_key_id_value <= range_end
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::Ordering;
|
||||||
|
use ethkey::{Random, Generator, KeyPair};
|
||||||
|
use listener::service_contract::ServiceContract;
|
||||||
|
use listener::service_contract::tests::DummyServiceContract;
|
||||||
|
use key_server_cluster::DummyClusterClient;
|
||||||
|
use key_server::tests::DummyKeyServer;
|
||||||
|
use key_storage::{KeyStorage, DocumentKeyShare};
|
||||||
|
use key_storage::tests::DummyKeyStorage;
|
||||||
|
use key_server_set::tests::MapKeyServerSet;
|
||||||
|
use PlainNodeKeyPair;
|
||||||
|
use super::{ServiceTask, ServiceContractListener, ServiceContractListenerParams, is_processed_by_this_key_server};
|
||||||
|
|
||||||
|
fn make_service_contract_listener(contract: Option<Arc<ServiceContract>>, key_server: Option<Arc<DummyKeyServer>>, key_storage: Option<Arc<KeyStorage>>) -> Arc<ServiceContractListener> {
|
||||||
|
let contract = contract.unwrap_or_else(|| Arc::new(DummyServiceContract::default()));
|
||||||
|
let key_server = key_server.unwrap_or_else(|| Arc::new(DummyKeyServer::default()));
|
||||||
|
let key_storage = key_storage.unwrap_or_else(|| Arc::new(DummyKeyStorage::default()));
|
||||||
|
let servers_set = Arc::new(MapKeyServerSet::new(vec![
|
||||||
|
("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(),
|
||||||
|
"127.0.0.1:8080".parse().unwrap()),
|
||||||
|
("c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee51ae168fea63dc339a3c58419466ceaeef7f632653266d0e1236431a950cfe52a".parse().unwrap(),
|
||||||
|
"127.0.0.1:8080".parse().unwrap()),
|
||||||
|
("f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9388f7b0f632de8140fe337e62a37f3566500a99934c2231b6cb9fd7584b8e672".parse().unwrap(),
|
||||||
|
"127.0.0.1:8080".parse().unwrap()),
|
||||||
|
].into_iter().collect()));
|
||||||
|
let self_key_pair = Arc::new(PlainNodeKeyPair::new(KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap()));
|
||||||
|
ServiceContractListener::new(ServiceContractListenerParams {
|
||||||
|
contract: contract,
|
||||||
|
key_server: key_server,
|
||||||
|
self_key_pair: self_key_pair,
|
||||||
|
key_server_set: servers_set,
|
||||||
|
cluster: Arc::new(DummyClusterClient::default()),
|
||||||
|
key_storage: key_storage,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn is_not_processed_by_this_key_server_with_zero_servers() {
|
||||||
|
assert_eq!(is_processed_by_this_key_server(
|
||||||
|
&MapKeyServerSet::default(),
|
||||||
|
&PlainNodeKeyPair::new(Random.generate().unwrap()),
|
||||||
|
&Default::default()), false);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn is_processed_by_this_key_server_with_single_server() {
|
||||||
|
let self_key_pair = Random.generate().unwrap();
|
||||||
|
assert_eq!(is_processed_by_this_key_server(
|
||||||
|
&MapKeyServerSet::new(vec![
|
||||||
|
(self_key_pair.public().clone(), "127.0.0.1:8080".parse().unwrap())
|
||||||
|
].into_iter().collect()),
|
||||||
|
&PlainNodeKeyPair::new(self_key_pair),
|
||||||
|
&Default::default()), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn is_not_processed_by_this_key_server_when_not_a_part_of_servers_set() {
|
||||||
|
assert!(is_processed_by_this_key_server(
|
||||||
|
&MapKeyServerSet::new(vec![
|
||||||
|
(Random.generate().unwrap().public().clone(), "127.0.0.1:8080".parse().unwrap())
|
||||||
|
].into_iter().collect()),
|
||||||
|
&PlainNodeKeyPair::new(Random.generate().unwrap()),
|
||||||
|
&Default::default()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn is_processed_by_this_key_server_in_set_of_3() {
|
||||||
|
// servers set is ordered && server range depends on index of this server
|
||||||
|
let servers_set = MapKeyServerSet::new(vec![
|
||||||
|
// secret: 0000000000000000000000000000000000000000000000000000000000000001
|
||||||
|
("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(),
|
||||||
|
"127.0.0.1:8080".parse().unwrap()),
|
||||||
|
// secret: 0000000000000000000000000000000000000000000000000000000000000002
|
||||||
|
("c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee51ae168fea63dc339a3c58419466ceaeef7f632653266d0e1236431a950cfe52a".parse().unwrap(),
|
||||||
|
"127.0.0.1:8080".parse().unwrap()),
|
||||||
|
// secret: 0000000000000000000000000000000000000000000000000000000000000003
|
||||||
|
("f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9388f7b0f632de8140fe337e62a37f3566500a99934c2231b6cb9fd7584b8e672".parse().unwrap(),
|
||||||
|
"127.0.0.1:8080".parse().unwrap()),
|
||||||
|
].into_iter().collect());
|
||||||
|
|
||||||
|
// 1st server: process hashes [0x0; 0x555...555]
|
||||||
|
let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret(
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap());
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"0000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"3000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"5555555555555555555555555555555555555555555555555555555555555555".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"5555555555555555555555555555555555555555555555555555555555555556".parse().unwrap()), false);
|
||||||
|
|
||||||
|
// 2nd server: process hashes from 0x555...556 to 0xaaa...aab
|
||||||
|
let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret(
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002".parse().unwrap()).unwrap());
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"5555555555555555555555555555555555555555555555555555555555555555".parse().unwrap()), false);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"5555555555555555555555555555555555555555555555555555555555555556".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"7555555555555555555555555555555555555555555555555555555555555555".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac".parse().unwrap()), false);
|
||||||
|
|
||||||
|
// 3rd server: process hashes from 0x800...000 to 0xbff...ff
|
||||||
|
let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret(
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003".parse().unwrap()).unwrap());
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab".parse().unwrap()), false);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"daaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn is_processed_by_this_key_server_in_set_of_4() {
|
||||||
|
// servers set is ordered && server range depends on index of this server
|
||||||
|
let servers_set = MapKeyServerSet::new(vec![
|
||||||
|
// secret: 0000000000000000000000000000000000000000000000000000000000000001
|
||||||
|
("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(),
|
||||||
|
"127.0.0.1:8080".parse().unwrap()),
|
||||||
|
// secret: 0000000000000000000000000000000000000000000000000000000000000002
|
||||||
|
("c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee51ae168fea63dc339a3c58419466ceaeef7f632653266d0e1236431a950cfe52a".parse().unwrap(),
|
||||||
|
"127.0.0.1:8080".parse().unwrap()),
|
||||||
|
// secret: 0000000000000000000000000000000000000000000000000000000000000004
|
||||||
|
("e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd1351ed993ea0d455b75642e2098ea51448d967ae33bfbdfe40cfe97bdc47739922".parse().unwrap(),
|
||||||
|
"127.0.0.1:8080".parse().unwrap()),
|
||||||
|
// secret: 0000000000000000000000000000000000000000000000000000000000000003
|
||||||
|
("f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9388f7b0f632de8140fe337e62a37f3566500a99934c2231b6cb9fd7584b8e672".parse().unwrap(),
|
||||||
|
"127.0.0.1:8080".parse().unwrap()),
|
||||||
|
].into_iter().collect());
|
||||||
|
|
||||||
|
// 1st server: process hashes [0x0; 0x3ff...ff]
|
||||||
|
let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret(
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap());
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"0000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"2000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"4000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), false);
|
||||||
|
|
||||||
|
// 2nd server: process hashes from 0x400...000 to 0x7ff...ff
|
||||||
|
let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret(
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002".parse().unwrap()).unwrap());
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), false);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"4000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"6000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"8000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), false);
|
||||||
|
|
||||||
|
// 3rd server: process hashes from 0x800...000 to 0xbff...ff
|
||||||
|
let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret(
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000004".parse().unwrap()).unwrap());
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), false);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"8000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"a000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"bfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"c000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), false);
|
||||||
|
|
||||||
|
// 4th server: process hashes from 0xc00...000 to 0xfff...ff
|
||||||
|
let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret(
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003".parse().unwrap()).unwrap());
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"bfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), false);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"c000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"e000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true);
|
||||||
|
assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair,
|
||||||
|
&"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn no_tasks_scheduled_when_no_contract_events() {
|
||||||
|
let listener = make_service_contract_listener(None, None, None);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
|
listener.process_service_contract_events();
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn server_key_generation_is_scheduled_when_requested_key_is_unknnown() {
|
||||||
|
let mut contract = DummyServiceContract::default();
|
||||||
|
contract.logs.push(vec![Default::default(), Default::default(), Default::default()]);
|
||||||
|
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
|
listener.process_service_contract_events();
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 2);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::GenerateServerKey(Default::default(), Default::default())));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn no_new_tasks_scheduled_when_requested_key_is_unknown_and_request_belongs_to_other_key_server() {
|
||||||
|
let server_key_id = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap();
|
||||||
|
let mut contract = DummyServiceContract::default();
|
||||||
|
contract.logs.push(vec![Default::default(), server_key_id, Default::default()]);
|
||||||
|
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
|
listener.process_service_contract_events();
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn server_key_restore_is_scheduled_when_requested_key_is_knnown() {
|
||||||
|
let mut contract = DummyServiceContract::default();
|
||||||
|
contract.logs.push(vec![Default::default(), Default::default(), Default::default()]);
|
||||||
|
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None);
|
||||||
|
listener.data.key_storage.insert(Default::default(), Default::default()).unwrap();
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
|
listener.process_service_contract_events();
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 2);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::RestoreServerKey(Default::default())));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn no_new_tasks_scheduled_when_wrong_number_of_topics_in_log() {
|
||||||
|
let mut contract = DummyServiceContract::default();
|
||||||
|
contract.logs.push(vec![Default::default(), Default::default()]);
|
||||||
|
let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None);
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
|
listener.process_service_contract_events();
|
||||||
|
assert_eq!(listener.data.tasks_queue.snapshot().len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn generation_session_is_created_when_processing_generate_server_key_task() {
|
||||||
|
let key_server = Arc::new(DummyKeyServer::default());
|
||||||
|
let listener = make_service_contract_listener(None, Some(key_server.clone()), None);
|
||||||
|
ServiceContractListener::process_service_task(&listener.data, ServiceTask::GenerateServerKey(Default::default(), Default::default())).unwrap_err();
|
||||||
|
assert_eq!(key_server.generation_requests_count.load(Ordering::Relaxed), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn key_is_read_and_published_when_processing_restore_server_key_task() {
|
||||||
|
let contract = Arc::new(DummyServiceContract::default());
|
||||||
|
let key_storage = Arc::new(DummyKeyStorage::default());
|
||||||
|
let mut key_share = DocumentKeyShare::default();
|
||||||
|
key_share.public = KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap().public().clone();
|
||||||
|
key_storage.insert(Default::default(), key_share.clone()).unwrap();
|
||||||
|
let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage));
|
||||||
|
ServiceContractListener::process_service_task(&listener.data, ServiceTask::RestoreServerKey(Default::default())).unwrap();
|
||||||
|
assert_eq!(*contract.published_keys.lock(), vec![(Default::default(), key_share.public)]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn generation_is_not_retried_if_tried_in_the_same_cycle() {
|
||||||
|
let mut contract = DummyServiceContract::default();
|
||||||
|
contract.pending_requests.push((false, ServiceTask::GenerateServerKey(Default::default(), Default::default())));
|
||||||
|
let key_server = Arc::new(DummyKeyServer::default());
|
||||||
|
let listener = make_service_contract_listener(Some(Arc::new(contract)), Some(key_server.clone()), None);
|
||||||
|
listener.data.retry_data.lock().generated_keys.insert(Default::default());
|
||||||
|
ServiceContractListener::retry_pending_requests(&listener.data).unwrap();
|
||||||
|
assert_eq!(key_server.generation_requests_count.load(Ordering::Relaxed), 0);
|
||||||
|
}
|
||||||
|
}
|
78
secret_store/src/listener/tasks_queue.rs
Normal file
78
secret_store/src/listener/tasks_queue.rs
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::collections::VecDeque;
|
||||||
|
use parking_lot::{Mutex, Condvar};
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
/// General deque-based tasks queue.
|
||||||
|
pub struct TasksQueue<Task: Clone> {
|
||||||
|
/// Service event.
|
||||||
|
service_event: Condvar,
|
||||||
|
/// Service tasks queue.
|
||||||
|
service_tasks: Mutex<VecDeque<Task>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Task> TasksQueue<Task> where Task: Clone {
|
||||||
|
/// Create new tasks queue.
|
||||||
|
pub fn new() -> Self {
|
||||||
|
TasksQueue {
|
||||||
|
service_event: Condvar::new(),
|
||||||
|
service_tasks: Mutex::new(VecDeque::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
/// Get current tasks snapshot.
|
||||||
|
pub fn snapshot(&self) -> VecDeque<Task> {
|
||||||
|
self.service_tasks.lock().clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Push task to the front of queue.
|
||||||
|
pub fn push_front(&self, task: Task) {
|
||||||
|
let mut service_tasks = self.service_tasks.lock();
|
||||||
|
service_tasks.push_front(task);
|
||||||
|
self.service_event.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Push task to the back of queue.
|
||||||
|
pub fn push(&self, task: Task) {
|
||||||
|
let mut service_tasks = self.service_tasks.lock();
|
||||||
|
service_tasks.push_back(task);
|
||||||
|
self.service_event.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Push task to the back of queue.
|
||||||
|
pub fn push_many<I: Iterator<Item=Task>>(&self, tasks: I) {
|
||||||
|
let mut service_tasks = self.service_tasks.lock();
|
||||||
|
let previous_len = service_tasks.len();
|
||||||
|
service_tasks.extend(tasks);
|
||||||
|
if service_tasks.len() != previous_len {
|
||||||
|
self.service_event.notify_all();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wait for new task (task is removed from the front of queue).
|
||||||
|
pub fn wait(&self) -> Task {
|
||||||
|
let mut service_tasks = self.service_tasks.lock();
|
||||||
|
if service_tasks.is_empty() {
|
||||||
|
self.service_event.wait(&mut service_tasks);
|
||||||
|
}
|
||||||
|
|
||||||
|
service_tasks.pop_front()
|
||||||
|
.expect("service_event is only fired when there are new tasks; qed")
|
||||||
|
}
|
||||||
|
}
|
57
secret_store/src/trusted_client.rs
Normal file
57
secret_store/src/trusted_client.rs
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
use ethcore::client::{Client, BlockChainClient};
|
||||||
|
use ethsync::SyncProvider;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
/// 'Trusted' client weak reference.
|
||||||
|
pub struct TrustedClient {
|
||||||
|
/// Blockchain client.
|
||||||
|
client: Weak<Client>,
|
||||||
|
/// Sync provider.
|
||||||
|
sync: Weak<SyncProvider>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TrustedClient {
|
||||||
|
/// Create new trusted client.
|
||||||
|
pub fn new(client: Arc<Client>, sync: Arc<SyncProvider>) -> Self {
|
||||||
|
TrustedClient {
|
||||||
|
client: Arc::downgrade(&client),
|
||||||
|
sync: Arc::downgrade(&sync),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get 'trusted' `Client` reference only if it is synchronized && trusted.
|
||||||
|
pub fn get(&self) -> Option<Arc<Client>> {
|
||||||
|
self.client.upgrade()
|
||||||
|
.and_then(|client| self.sync.upgrade().map(|sync| (client, sync)))
|
||||||
|
.and_then(|(client, sync)| {
|
||||||
|
let is_synced = !sync.status().is_syncing(client.queue_info());
|
||||||
|
let is_trusted = client.chain_info().security_level().is_full();
|
||||||
|
match is_synced && is_trusted {
|
||||||
|
true => Some(client),
|
||||||
|
false => None,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get untrusted `Client` reference.
|
||||||
|
pub fn get_untrusted(&self) -> Option<Arc<Client>> {
|
||||||
|
self.client.upgrade()
|
||||||
|
}
|
||||||
|
}
|
@ -44,6 +44,8 @@ pub enum Error {
|
|||||||
AccessDenied,
|
AccessDenied,
|
||||||
/// Requested document not found
|
/// Requested document not found
|
||||||
DocumentNotFound,
|
DocumentNotFound,
|
||||||
|
/// Hyper error
|
||||||
|
Hyper(String),
|
||||||
/// Serialization/deserialization error
|
/// Serialization/deserialization error
|
||||||
Serde(String),
|
Serde(String),
|
||||||
/// Database-related error
|
/// Database-related error
|
||||||
@ -61,11 +63,22 @@ pub struct NodeAddress {
|
|||||||
pub port: u16,
|
pub port: u16,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Contract address.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum ContractAddress {
|
||||||
|
/// Address is read from registry.
|
||||||
|
Registry,
|
||||||
|
/// Address is specified.
|
||||||
|
Address(ethkey::Address),
|
||||||
|
}
|
||||||
|
|
||||||
/// Secret store configuration
|
/// Secret store configuration
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct ServiceConfiguration {
|
pub struct ServiceConfiguration {
|
||||||
/// HTTP listener address. If None, HTTP API is disabled.
|
/// HTTP listener address. If None, HTTP API is disabled.
|
||||||
pub listener_address: Option<NodeAddress>,
|
pub listener_address: Option<NodeAddress>,
|
||||||
|
/// Service contract address. If None, service contract API is disabled.
|
||||||
|
pub service_contract_address: Option<ContractAddress>,
|
||||||
/// Is ACL check enabled. If false, everyone has access to all keys. Useful for tests only.
|
/// Is ACL check enabled. If false, everyone has access to all keys. Useful for tests only.
|
||||||
pub acl_check_enabled: bool,
|
pub acl_check_enabled: bool,
|
||||||
/// Data directory path for secret store
|
/// Data directory path for secret store
|
||||||
@ -107,6 +120,7 @@ impl fmt::Display for Error {
|
|||||||
Error::BadSignature => write!(f, "Bad signature"),
|
Error::BadSignature => write!(f, "Bad signature"),
|
||||||
Error::AccessDenied => write!(f, "Access dened"),
|
Error::AccessDenied => write!(f, "Access dened"),
|
||||||
Error::DocumentNotFound => write!(f, "Document not found"),
|
Error::DocumentNotFound => write!(f, "Document not found"),
|
||||||
|
Error::Hyper(ref msg) => write!(f, "Hyper error: {}", msg),
|
||||||
Error::Serde(ref msg) => write!(f, "Serialization error: {}", msg),
|
Error::Serde(ref msg) => write!(f, "Serialization error: {}", msg),
|
||||||
Error::Database(ref msg) => write!(f, "Database error: {}", msg),
|
Error::Database(ref msg) => write!(f, "Database error: {}", msg),
|
||||||
Error::Internal(ref msg) => write!(f, "Internal error: {}", msg),
|
Error::Internal(ref msg) => write!(f, "Internal error: {}", msg),
|
||||||
@ -136,6 +150,7 @@ impl From<key_server_cluster::Error> for Error {
|
|||||||
fn from(err: key_server_cluster::Error) -> Self {
|
fn from(err: key_server_cluster::Error) -> Self {
|
||||||
match err {
|
match err {
|
||||||
key_server_cluster::Error::AccessDenied => Error::AccessDenied,
|
key_server_cluster::Error::AccessDenied => Error::AccessDenied,
|
||||||
|
key_server_cluster::Error::MissingKeyShare => Error::DocumentNotFound,
|
||||||
_ => Error::Internal(err.into()),
|
_ => Error::Internal(err.into()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "parity-version"
|
name = "parity-version"
|
||||||
version = "0.1.0"
|
# NOTE: this value is used for Parity version string.
|
||||||
|
version = "1.9.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
build = "build.rs"
|
build = "build.rs"
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user