From abfb9fccd3475ffe06b191ef8ba83d814bae2f30 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 14 Nov 2017 14:26:31 +0300 Subject: [PATCH 01/42] SecretStore: Kovan integration initial commit --- ethcore/native_contracts/build.rs | 2 + ethcore/native_contracts/generator/src/lib.rs | 38 +++-- .../res/secretstore_service.json | 3 + ethcore/native_contracts/src/lib.rs | 2 + .../src/secretstore_service.rs | 21 +++ secret_store/src/lib.rs | 13 +- .../src/{ => listener}/http_listener.rs | 74 +++------- secret_store/src/listener/mod.rs | 55 +++++++ .../src/listener/service_contract_listener.rs | 138 ++++++++++++++++++ 9 files changed, 276 insertions(+), 70 deletions(-) create mode 100644 ethcore/native_contracts/res/secretstore_service.json create mode 100644 ethcore/native_contracts/src/secretstore_service.rs rename secret_store/src/{ => listener}/http_listener.rs (85%) create mode 100644 secret_store/src/listener/mod.rs create mode 100644 secret_store/src/listener/service_contract_listener.rs diff --git a/ethcore/native_contracts/build.rs b/ethcore/native_contracts/build.rs index 979535057..72250dba5 100644 --- a/ethcore/native_contracts/build.rs +++ b/ethcore/native_contracts/build.rs @@ -26,6 +26,7 @@ const REGISTRY_ABI: &'static str = include_str!("res/registrar.json"); const URLHINT_ABI: &'static str = include_str!("res/urlhint.json"); const SERVICE_TRANSACTION_ABI: &'static str = include_str!("res/service_transaction.json"); const SECRETSTORE_ACL_STORAGE_ABI: &'static str = include_str!("res/secretstore_acl_storage.json"); +const SECRETSTORE_SERVICE_ABI: &'static str = include_str!("res/secretstore_service.json"); const VALIDATOR_SET_ABI: &'static str = include_str!("res/validator_set.json"); const VALIDATOR_REPORT_ABI: &'static str = include_str!("res/validator_report.json"); const PEER_SET_ABI: &'static str = include_str!("res/peer_set.json"); @@ -53,6 +54,7 @@ fn main() { build_file("Urlhint", URLHINT_ABI, "urlhint.rs"); build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs"); build_file("SecretStoreAclStorage", SECRETSTORE_ACL_STORAGE_ABI, "secretstore_acl_storage.rs"); + build_file("SecretStoreService", SECRETSTORE_SERVICE_ABI, "secretstore_service.rs"); build_file("ValidatorSet", VALIDATOR_SET_ABI, "validator_set.rs"); build_file("ValidatorReport", VALIDATOR_REPORT_ABI, "validator_report.rs"); build_file("PeerSet", PEER_SET_ABI, "peer_set.rs"); diff --git a/ethcore/native_contracts/generator/src/lib.rs b/ethcore/native_contracts/generator/src/lib.rs index a8848503e..0c545abaf 100644 --- a/ethcore/native_contracts/generator/src/lib.rs +++ b/ethcore/native_contracts/generator/src/lib.rs @@ -46,7 +46,7 @@ pub fn generate_module(struct_name: &str, abi: &str) -> Result { Ok(format!(r##" use byteorder::{{BigEndian, ByteOrder}}; use futures::{{future, Future, IntoFuture}}; -use ethabi::{{Contract, Token, Event}}; +use ethabi::{{Bytes, Contract, Token, Event}}; use bigint; type BoxFuture = Box + Send>; @@ -96,7 +96,7 @@ fn generate_functions(contract: &Contract) -> Result { let inputs: Vec<_> = function.inputs.iter().map(|i| i.kind.clone()).collect(); let outputs: Vec<_> = function.outputs.iter().map(|i| i.kind.clone()).collect(); - let (input_params, to_tokens) = input_params_codegen(&inputs) + let (input_params, input_names, to_tokens) = input_params_codegen(&inputs) .map_err(|bad_type| Error::UnsupportedType(name.clone(), bad_type))?; let (output_type, decode_outputs) = output_params_codegen(&outputs) @@ -113,14 +113,14 @@ pub fn {snake_name}(&self, call: F, {params}) -> BoxFuture<{output_type}, U: IntoFuture, Error=String>, U::Future: Send + 'static {{ + let call_addr = self.address; + let call_future = match self.encode_{snake_name}_input({params_names}) {{ + Ok(call_data) => (call)(call_addr, call_data), + Err(e) => return Box::new(future::err(e)), + }}; + let function = self.contract.function(r#"{abi_name}"#) .expect("function existence checked at compile-time; qed").clone(); - let call_addr = self.address; - - let call_future = match function.encode_input(&{to_tokens}) {{ - Ok(call_data) => (call)(call_addr, call_data), - Err(e) => return Box::new(future::err(format!("Error encoding call: {{:?}}", e))), - }}; Box::new(call_future .into_future() @@ -128,12 +128,22 @@ pub fn {snake_name}(&self, call: F, {params}) -> BoxFuture<{output_type}, .map(Vec::into_iter) .and_then(|mut outputs| {decode_outputs})) }} + +/// Encode "{abi_name}" function arguments. +/// Arguments: {abi_inputs:?} +pub fn encode_{snake_name}_input(&self, {params}) -> Result {{ + self.contract.function(r#"{abi_name}"#) + .expect("function existence checked at compile-time; qed") + .encode_input(&{to_tokens}) + .map_err(|e| format!("Error encoding call: {{:?}}", e)) +}} "##, abi_name = name, abi_inputs = inputs, abi_outputs = outputs, snake_name = snake_name, params = input_params, + params_names = input_names, output_type = output_type, to_tokens = to_tokens, decode_outputs = decode_outputs, @@ -145,15 +155,17 @@ pub fn {snake_name}(&self, call: F, {params}) -> BoxFuture<{output_type}, // generate code for params in function signature and turning them into tokens. // -// two pieces of code are generated: the first gives input types for the function signature, -// and the second gives code to tokenize those inputs. +// three pieces of code are generated: the first gives input types for the function signature, +// the second one gives input parameter names to pass to another method, +// and the third gives code to tokenize those inputs. // // params of form `param_0: type_0, param_1: type_1, ...` // tokenizing code of form `{let mut tokens = Vec::new(); tokens.push({param_X}); tokens }` // // returns any unsupported param type encountered. -fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String), ParamType> { +fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String, String), ParamType> { let mut params = String::new(); + let mut params_names = String::new(); let mut to_tokens = "{ let mut tokens = Vec::new();".to_string(); for (index, param_type) in inputs.iter().enumerate() { @@ -164,11 +176,13 @@ fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String), ParamT params.push_str(&format!("{}{}: {}, ", if needs_mut { "mut " } else { "" }, param_name, rust_type)); + params_names.push_str(&format!("{}, ", param_name)); + to_tokens.push_str(&format!("tokens.push({{ {} }});", tokenize_code)); } to_tokens.push_str(" tokens }"); - Ok((params, to_tokens)) + Ok((params, params_names, to_tokens)) } // generate code for outputs of the function and detokenizing them. diff --git a/ethcore/native_contracts/res/secretstore_service.json b/ethcore/native_contracts/res/secretstore_service.json new file mode 100644 index 000000000..3c9510bb5 --- /dev/null +++ b/ethcore/native_contracts/res/secretstore_service.json @@ -0,0 +1,3 @@ +[ + {"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"},{"name":"v","type":"uint8"},{"name":"r","type":"bytes32"},{"name":"s","type":"bytes32"}],"name":"serverKeyGenerated","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"} +] \ No newline at end of file diff --git a/ethcore/native_contracts/src/lib.rs b/ethcore/native_contracts/src/lib.rs index c37a13504..2138c1976 100644 --- a/ethcore/native_contracts/src/lib.rs +++ b/ethcore/native_contracts/src/lib.rs @@ -28,6 +28,7 @@ mod registry; mod urlhint; mod service_transaction; mod secretstore_acl_storage; +mod secretstore_service; mod validator_set; mod validator_report; mod peer_set; @@ -40,6 +41,7 @@ pub use self::registry::Registry; pub use self::urlhint::Urlhint; pub use self::service_transaction::ServiceTransactionChecker; pub use self::secretstore_acl_storage::SecretStoreAclStorage; +pub use self::secretstore_service::SecretStoreService; pub use self::validator_set::ValidatorSet; pub use self::validator_report::ValidatorReport; pub use self::peer_set::PeerSet; diff --git a/ethcore/native_contracts/src/secretstore_service.rs b/ethcore/native_contracts/src/secretstore_service.rs new file mode 100644 index 000000000..508cfa13b --- /dev/null +++ b/ethcore/native_contracts/src/secretstore_service.rs @@ -0,0 +1,21 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +#![allow(unused_mut, unused_variables, unused_imports)] + +//! Secret store service contract. + +include!(concat!(env!("OUT_DIR"), "/secretstore_service.rs")); diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 5951be508..31a93f012 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -54,12 +54,12 @@ mod types; mod traits; mod acl_storage; -mod http_listener; mod key_server; mod key_storage; mod serialization; mod key_server_set; mod node_key_pair; +mod listener; use std::sync::Arc; use ethcore::client::Client; @@ -71,8 +71,6 @@ pub use self::node_key_pair::{PlainNodeKeyPair, KeyStoreNodeKeyPair}; /// Start new key server instance pub fn start(client: Arc, self_key_pair: Arc, config: ServiceConfiguration) -> Result, Error> { - use std::sync::Arc; - let acl_storage: Arc = if config.acl_check_enabled { acl_storage::OnChainAclStorage::new(&client) } else { @@ -80,7 +78,12 @@ pub fn start(client: Arc, self_key_pair: Arc, config: Servi }; let key_server_set = key_server_set::OnChainKeyServerSet::new(&client, config.cluster_config.nodes.clone())?; let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?); - let key_server = key_server::KeyServerImpl::new(&config.cluster_config, key_server_set, self_key_pair, acl_storage, key_storage)?; - let listener = http_listener::KeyServerHttpListener::start(config.listener_address, key_server)?; + let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set, self_key_pair.clone(), acl_storage, key_storage)?); + let http_listener = match config.listener_address { + Some(listener_address) => Some(listener::http_listener::KeyServerHttpListener::start(listener_address, key_server.clone())?), + None => None, + }; + let contract_listener = listener::service_contract_listener::ServiceContractListener::new(&client, key_server.clone(), self_key_pair); + let listener = listener::Listener::new(key_server, Some(http_listener), Some(contract_listener)); Ok(Box::new(listener)) } diff --git a/secret_store/src/http_listener.rs b/secret_store/src/listener/http_listener.rs similarity index 85% rename from secret_store/src/http_listener.rs rename to secret_store/src/listener/http_listener.rs index 883389365..c9f2acf16 100644 --- a/secret_store/src/http_listener.rs +++ b/secret_store/src/listener/http_listener.rs @@ -25,9 +25,9 @@ use serde::Serialize; use serde_json; use url::percent_encoding::percent_decode; -use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer}; +use traits::KeyServer; use serialization::{SerializableEncryptedDocumentKeyShadow, SerializableBytes, SerializablePublic}; -use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, NodeAddress, RequestSignature, ServerKeyId, +use types::all::{Error, Public, MessageHash, NodeAddress, RequestSignature, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow}; /// Key server http-requests listener. Available requests: @@ -38,9 +38,9 @@ use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, NodeAddr /// To get document key shadow: GET /shadow/{server_key_id}/{signature} /// To sign message with server key: GET /{server_key_id}/{signature}/{message_hash} -pub struct KeyServerHttpListener { - http_server: Option, - handler: Arc>, +pub struct KeyServerHttpListener { + http_server: HttpListening, + _handler: Arc, } /// Parsed http request @@ -63,77 +63,44 @@ enum Request { } /// Cloneable http handler -struct KeyServerHttpHandler { - handler: Arc>, +struct KeyServerHttpHandler { + handler: Arc, } /// Shared http handler -struct KeyServerSharedHttpHandler { - key_server: T, +struct KeyServerSharedHttpHandler { + key_server: Arc, } -impl KeyServerHttpListener where T: KeyServer + 'static { +impl KeyServerHttpListener { /// Start KeyServer http listener - pub fn start(listener_address: Option, key_server: T) -> Result { + pub fn start(listener_address: NodeAddress, key_server: Arc) -> Result { let shared_handler = Arc::new(KeyServerSharedHttpHandler { key_server: key_server, }); - let http_server = listener_address - .map(|listener_address| format!("{}:{}", listener_address.address, listener_address.port)) - .map(|listener_address| HttpServer::http(&listener_address).expect("cannot start HttpServer")) - .map(|http_server| http_server.handle(KeyServerHttpHandler { + let listener_address = format!("{}:{}", listener_address.address, listener_address.port); + let http_server = HttpServer::http(&listener_address).expect("cannot start HttpServer"); + let http_server = http_server.handle(KeyServerHttpHandler { handler: shared_handler.clone(), - }).expect("cannot start HttpServer")); + }).expect("cannot start HttpServer"); let listener = KeyServerHttpListener { http_server: http_server, - handler: shared_handler, + _handler: shared_handler, }; Ok(listener) } } -impl KeyServer for KeyServerHttpListener where T: KeyServer + 'static {} - -impl ServerKeyGenerator for KeyServerHttpListener where T: KeyServer + 'static { - fn generate_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result { - self.handler.key_server.generate_key(key_id, signature, threshold) - } -} - -impl DocumentKeyServer for KeyServerHttpListener where T: KeyServer + 'static { - fn store_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> { - self.handler.key_server.store_document_key(key_id, signature, common_point, encrypted_document_key) - } - - fn generate_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result { - self.handler.key_server.generate_document_key(key_id, signature, threshold) - } - - fn restore_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result { - self.handler.key_server.restore_document_key(key_id, signature) - } - - fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result { - self.handler.key_server.restore_document_key_shadow(key_id, signature) - } -} - -impl MessageSigner for KeyServerHttpListener where T: KeyServer + 'static { - fn sign_message(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result { - self.handler.key_server.sign_message(key_id, signature, message) - } -} - -impl Drop for KeyServerHttpListener where T: KeyServer + 'static { +impl Drop for KeyServerHttpListener { fn drop(&mut self) { // ignore error as we are dropping anyway self.http_server.take().map(|mut s| { let _ = s.close(); }); } } -impl HttpHandler for KeyServerHttpHandler where T: KeyServer + 'static { +impl HttpHandler for KeyServerHttpHandler { fn handle(&self, req: HttpRequest, mut res: HttpResponse) { if req.headers.has::() { warn!(target: "secretstore", "Ignoring {}-request {} with Origin header", req.method, req.uri); @@ -310,6 +277,7 @@ fn parse_request(method: &HttpMethod, uri_path: &str) -> Request { #[cfg(test)] mod tests { + use std::sync::Arc; use hyper::method::Method as HttpMethod; use key_server::tests::DummyKeyServer; use types::all::NodeAddress; @@ -317,12 +285,12 @@ mod tests { #[test] fn http_listener_successfully_drops() { - let key_server = DummyKeyServer; + let key_server = Arc::new(DummyKeyServer); let address = NodeAddress { address: "127.0.0.1".into(), port: 9000 }; let listener = KeyServerHttpListener::start(Some(address), key_server).unwrap(); drop(listener); } - + #[test] fn parse_request_successful() { // POST /shadow/{server_key_id}/{signature}/{threshold} => generate server key diff --git a/secret_store/src/listener/mod.rs b/secret_store/src/listener/mod.rs new file mode 100644 index 000000000..858f01ee0 --- /dev/null +++ b/secret_store/src/listener/mod.rs @@ -0,0 +1,55 @@ +pub mod http_listener; +pub mod service_contract_listener; + +use std::sync::Arc; +use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer}; +use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, RequestSignature, ServerKeyId, + EncryptedDocumentKey, EncryptedDocumentKeyShadow}; + +pub struct Listener { + key_server: Arc, + _http: Option, + _contract: Option>, +} + +impl Listener { + pub fn new(key_server: Arc, http: Option, contract: Option>) -> Self { + Self { + key_server: key_server, + _http: http, + _contract: contract, + } + } +} + +impl KeyServer for Listener {} + +impl ServerKeyGenerator for Listener { + fn generate_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result { + self.key_server.generate_key(key_id, signature, threshold) + } +} + +impl DocumentKeyServer for Listener { + fn store_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, common_point: Public, encrypted_document_key: Public) -> Result<(), Error> { + self.key_server.store_document_key(key_id, signature, common_point, encrypted_document_key) + } + + fn generate_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature, threshold: usize) -> Result { + self.key_server.generate_document_key(key_id, signature, threshold) + } + + fn restore_document_key(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result { + self.key_server.restore_document_key(key_id, signature) + } + + fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result { + self.key_server.restore_document_key_shadow(key_id, signature) + } +} + +impl MessageSigner for Listener { + fn sign_message(&self, key_id: &ServerKeyId, signature: &RequestSignature, message: MessageHash) -> Result { + self.key_server.sign_message(key_id, signature, message) + } +} diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs new file mode 100644 index 000000000..d30e0100c --- /dev/null +++ b/secret_store/src/listener/service_contract_listener.rs @@ -0,0 +1,138 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::{Arc, Weak}; +use parking_lot::Mutex; +use ethcore::filter::Filter; +use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; +use native_contracts::SecretStoreService; +use ethkey::{Random, Generator, sign}; +use bytes::Bytes; +use hash::keccak; +use bigint::hash::H256; +use util::Address; +use {NodeKeyPair, KeyServer}; + +/// Name of the SecretStore contract in the registry. +const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service"; + +/// Key server has been added to the set. +const SERVER_KEY_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyRequested(bytes32)"; + +lazy_static! { + static ref SERVER_KEY_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_REQUESTED_EVENT_NAME); +} + +/// SecretStore <-> Authority connector. Duties: +/// 1. Listen for new requests on SecretStore contract +/// 2. Redirects requests for key server +/// 3. Publishes response on SecretStore contract +pub struct ServiceContractListener { + /// Cached on-chain contract. + contract: Mutex, +} + +/// Cached on-chain Key Server set contract. +struct CachedContract { + /// Blockchain client. + client: Weak, + /// Contract. + contract: SecretStoreService, + /// Contract address. + contract_addr: Option
, + /// Key server reference. + key_server: Arc, + /// This node key pair. + self_key_pair: Arc, +} + +impl ServiceContractListener { + pub fn new(client: &Arc, key_server: Arc, self_key_pair: Arc) -> Arc { + let contract = Arc::new(ServiceContractListener { + contract: Mutex::new(CachedContract::new(client, key_server, self_key_pair)), + }); + client.add_notify(contract.clone()); + contract + } +} + +impl ChainNotify for ServiceContractListener { + fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { + if !enacted.is_empty() { + self.contract.lock().update(enacted) + } + } +} + +impl CachedContract { + pub fn new(client: &Arc, key_server: Arc, self_key_pair: Arc) -> Self { + CachedContract { + client: Arc::downgrade(client), + contract: SecretStoreService::new(Default::default()), // we aren't going to call contract => could use default address + contract_addr: client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()), + key_server: key_server, + self_key_pair: self_key_pair, + } + } + + pub fn update(&mut self, enacted: Vec) { + if let Some(client) = self.client.upgrade() { + // update contract address + self.contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()); + + // check for new key requests. + // NOTE: If contract is changed, or unregistered && there are several enacted blocks + // in single update call, some requests in old contract can be abandoned (we get contract_address from latest block) + // && check for requests in this contract for every enacted block. + // The opposite is also true (we can process requests of contract, before it actually becames a SS contract). + if let Some(contract_addr) = self.contract_addr.as_ref() { + // TODO: in case of reorgs we might process requests for free (maybe wait for several confirmations???) && publish keys without request + // TODO: in case of reorgs we might publish keys to forked branch (re-submit transaction???) + for block in enacted { + let request_logs = client.logs(Filter { + from_block: BlockId::Hash(block.clone()), + to_block: BlockId::Hash(block), + address: Some(vec![contract_addr.clone()]), + topics: vec![ + Some(vec![*SERVER_KEY_REQUESTED_EVENT_NAME_HASH]), + None, + None, + None, + ], + limit: None, + }); + + // TODO: it actually should queue tasks to separate thread + // + separate thread at the beginning should read all requests from contract + // and then start processing logs + for request in request_logs { + // TODO: check if we are selected to process this request + let key_id = request.entry.topics[1]; + let key = Random.generate().unwrap(); + let signature = sign(key.secret(), &key_id).unwrap(); + let server_key = self.key_server.generate_key(&key_id, &signature, 0).unwrap(); +println!("=== generated key: {:?}", server_key); + // publish generated key + let server_key_hash = keccak(server_key); + let signed_key = self.self_key_pair.sign(&server_key_hash).unwrap(); + let transaction_data = self.contract.encode_server_key_generated_input(key_id, server_key.to_vec(), signed_key.v(), signed_key.r().into(), signed_key.s().into()).unwrap(); + client.transact_contract(contract_addr.clone(), transaction_data).unwrap(); + } + } + } + } + } +} From 56875a83b3d512c0ac8cc3861367ccec96117392 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 15 Nov 2017 10:42:13 +0300 Subject: [PATCH 02/42] SecretStore: Kovan flush2 --- ethcore/native_contracts/generator/src/lib.rs | 4 ++-- secret_store/src/lib.rs | 2 +- secret_store/src/listener/http_listener.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ethcore/native_contracts/generator/src/lib.rs b/ethcore/native_contracts/generator/src/lib.rs index 0c545abaf..be71fc395 100644 --- a/ethcore/native_contracts/generator/src/lib.rs +++ b/ethcore/native_contracts/generator/src/lib.rs @@ -131,10 +131,10 @@ pub fn {snake_name}(&self, call: F, {params}) -> BoxFuture<{output_type}, /// Encode "{abi_name}" function arguments. /// Arguments: {abi_inputs:?} -pub fn encode_{snake_name}_input(&self, {params}) -> Result {{ +pub fn encode_{snake_name}_input(&self, {params}) -> Result, String> {{ self.contract.function(r#"{abi_name}"#) .expect("function existence checked at compile-time; qed") - .encode_input(&{to_tokens}) + .encode_call({to_tokens}) .map_err(|e| format!("Error encoding call: {{:?}}", e)) }} "##, diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 31a93f012..c93c3c5dd 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -84,6 +84,6 @@ pub fn start(client: Arc, self_key_pair: Arc, config: Servi None => None, }; let contract_listener = listener::service_contract_listener::ServiceContractListener::new(&client, key_server.clone(), self_key_pair); - let listener = listener::Listener::new(key_server, Some(http_listener), Some(contract_listener)); + let listener = listener::Listener::new(key_server, http_listener, Some(contract_listener)); Ok(Box::new(listener)) } diff --git a/secret_store/src/listener/http_listener.rs b/secret_store/src/listener/http_listener.rs index c9f2acf16..cb6530f40 100644 --- a/secret_store/src/listener/http_listener.rs +++ b/secret_store/src/listener/http_listener.rs @@ -96,7 +96,7 @@ impl KeyServerHttpListener { impl Drop for KeyServerHttpListener { fn drop(&mut self) { // ignore error as we are dropping anyway - self.http_server.take().map(|mut s| { let _ = s.close(); }); + let _ = self.http_server.close(); } } From 6618827d1ab5ebc549df5c1353ef2f7a27989fef Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 17 Nov 2017 13:37:01 +0300 Subject: [PATCH 03/42] SecretStore: cleaning up service contract listener --- .../res/secretstore_service.json | 2 +- secret_store/src/listener/http_listener.rs | 2 +- .../src/listener/service_contract_listener.rs | 275 +++++++++++++----- 3 files changed, 200 insertions(+), 79 deletions(-) diff --git a/ethcore/native_contracts/res/secretstore_service.json b/ethcore/native_contracts/res/secretstore_service.json index 3c9510bb5..0189691a1 100644 --- a/ethcore/native_contracts/res/secretstore_service.json +++ b/ethcore/native_contracts/res/secretstore_service.json @@ -1,3 +1,3 @@ [ - {"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"},{"name":"v","type":"uint8"},{"name":"r","type":"bytes32"},{"name":"s","type":"bytes32"}],"name":"serverKeyGenerated","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"} + {"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"threshold","type":"uint256"}],"name":"generateServerKey","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"constant":true,"inputs":[],"name":"serverKeyGenerationFee","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"},{"name":"v","type":"uint8"},{"name":"r","type":"bytes32"},{"name":"s","type":"bytes32"}],"name":"serverKeyGenerated","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"serverKeyId","type":"bytes32"}],"name":"ServerKeyRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"serverKeyPublic","type":"bytes"}],"name":"ServerKeyGenerated","type":"event"} ] \ No newline at end of file diff --git a/secret_store/src/listener/http_listener.rs b/secret_store/src/listener/http_listener.rs index cb6530f40..c1f5fc3fd 100644 --- a/secret_store/src/listener/http_listener.rs +++ b/secret_store/src/listener/http_listener.rs @@ -287,7 +287,7 @@ mod tests { fn http_listener_successfully_drops() { let key_server = Arc::new(DummyKeyServer); let address = NodeAddress { address: "127.0.0.1".into(), port: 9000 }; - let listener = KeyServerHttpListener::start(Some(address), key_server).unwrap(); + let listener = KeyServerHttpListener::start(address, key_server).unwrap(); drop(listener); } diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index d30e0100c..ae3ec0736 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -14,17 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::collections::VecDeque; use std::sync::{Arc, Weak}; -use parking_lot::Mutex; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::thread; +use parking_lot::{RwLock, Mutex, Condvar}; use ethcore::filter::Filter; use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; use native_contracts::SecretStoreService; -use ethkey::{Random, Generator, sign}; +use ethkey::{Random, Generator, Public, Signature, sign}; use bytes::Bytes; use hash::keccak; use bigint::hash::H256; use util::Address; -use {NodeKeyPair, KeyServer}; +use {ServerKeyId, NodeKeyPair, KeyServer}; /// Name of the SecretStore contract in the registry. const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service"; @@ -36,103 +39,221 @@ lazy_static! { static ref SERVER_KEY_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_REQUESTED_EVENT_NAME); } -/// SecretStore <-> Authority connector. Duties: -/// 1. Listen for new requests on SecretStore contract -/// 2. Redirects requests for key server -/// 3. Publishes response on SecretStore contract +/// SecretStore <-> Authority connector responsible for: +/// 1. listening for new requests on SecretStore contract +/// 2. redirecting requests to key server +/// 3. publishing response on SecretStore contract pub struct ServiceContractListener { - /// Cached on-chain contract. - contract: Mutex, + /// Service contract listener data. + data: Arc, + /// Service thread handle. + service_handle: Option>, } -/// Cached on-chain Key Server set contract. -struct CachedContract { +/// Service contract listener data. +struct ServiceContractListenerData { + /// Contract (currently used for parameters encoding only). + pub contract: RwLock, /// Blockchain client. - client: Weak, - /// Contract. - contract: SecretStoreService, - /// Contract address. - contract_addr: Option
, + pub client: Weak, /// Key server reference. - key_server: Arc, + pub key_server: Arc, /// This node key pair. - self_key_pair: Arc, + pub self_key_pair: Arc, + /// Service tasks queue. + pub tasks_queue: Arc, +} + +/// Service tasks queue. +struct TasksQueue { + /// Are we closing currently. + is_shutdown: AtomicBool, + /// Service event. + service_event: Condvar, + /// Service tasks queue. + service_tasks: Mutex>, +} + +/// Service task. +enum ServiceTask { + /// Generate server key (server_key_id, threshold). + GenerateServerKey(H256, H256), + /// Shutdown listener. + Shutdown, } impl ServiceContractListener { pub fn new(client: &Arc, key_server: Arc, self_key_pair: Arc) -> Arc { + let contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()).unwrap_or_default(); + let data = Arc::new(ServiceContractListenerData { + contract: RwLock::new(SecretStoreService::new(contract_addr)), + client: Arc::downgrade(client), + key_server: key_server, + self_key_pair: self_key_pair, + tasks_queue: Arc::new(TasksQueue::new()), + }); + + let service_thread_data = data.clone(); + let service_handle = thread::spawn(move || Self::run_service_thread(service_thread_data)); let contract = Arc::new(ServiceContractListener { - contract: Mutex::new(CachedContract::new(client, key_server, self_key_pair)), + data: data, + service_handle: Some(service_handle), }); client.add_notify(contract.clone()); contract } + + fn process_service_contract_events(&self, client: &Client, service_contract: Address, blocks: Vec) { + debug_assert!(!blocks.is_empty()); + + // TODO: is blocks guaranteed to be ordered here? + // TODO: logs() is called from notify() thread - is it ok? + let request_logs = client.logs(Filter { + from_block: BlockId::Hash(blocks.first().expect("!block.is_empty(); qed").clone()), + to_block: BlockId::Hash(blocks.last().expect("!block.is_empty(); qed").clone()), + address: Some(vec![service_contract]), + topics: vec![ + Some(vec![*SERVER_KEY_REQUESTED_EVENT_NAME_HASH]), + None, + None, + None, + ], + limit: None, + }); + + self.data.tasks_queue.push(request_logs.into_iter() + .filter_map(|r| match r.entry.topics.len() { + 3 => Some(ServiceTask::GenerateServerKey( + r.entry.topics[1], + r.entry.topics[2], + )), + l @ _ => { + warn!(target: "secretstore", "Ignoring ServerKeyRequested event with wrong number of params {}", l); + None + }, + })); + } + + fn run_service_thread(data: Arc) { + loop { + let task = data.tasks_queue.wait(); + + match task { + ServiceTask::GenerateServerKey(server_key_id, threshold) => { + match Self::generate_server_key(&data, &server_key_id, &threshold) + .and_then(|server_key| Self::publish_server_key(&data, &server_key_id, &server_key)) { + Ok(_) => trace!(target: "secretstore", "GenerateServerKey({}, {}) request has completed", + server_key_id, threshold), + Err(error) => warn!(target: "secretstore", "GenerateServerKey({}, {}) request has failed with: {}", + server_key_id, threshold, error), + } + }, + ServiceTask::Shutdown => break, + } + } + } + + fn generate_server_key(data: &Arc, server_key_id: &ServerKeyId, threshold: &H256) -> Result { + let threshold_num = threshold.low_u64(); + if threshold != &threshold_num.into() || threshold_num >= ::std::usize::MAX as u64 { + return Err(format!("invalid threshold {:?}", threshold)); + } + + // TODO: if this server key is going to be used for document key generation later, author must + // be specified from outside + let author_key = Random.generate().map_err(|e| format!("{}", e))?; + let server_key_id_signature = sign(author_key.secret(), server_key_id).map_err(|e| format!("{}", e))?; + data.key_server.generate_key(server_key_id, &server_key_id_signature, threshold_num as usize) + .map_err(Into::into) + + } + + fn publish_server_key(data: &Arc, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> { + let server_key_hash = keccak(server_key); + let signed_server_key = data.self_key_pair.sign(&server_key_hash).map_err(|e| format!("{}", e))?; + let signed_server_key: Signature = signed_server_key.into_electrum().into(); + let transaction_data = data.contract.read().encode_server_key_generated_input(server_key_id.clone(), + server_key.to_vec(), + signed_server_key.v(), + signed_server_key.r().into(), + signed_server_key.s().into() + )?; + + let contract = data.contract.read(); + if contract.address != Default::default() { + if let Some(client) = data.client.upgrade() { + client.transact_contract( + contract.address.clone(), + transaction_data + ).map_err(|e| format!("{}", e))?; + } // else we will read this in the next refresh cycle + } + + Ok(()) + } +} + +impl Drop for ServiceContractListener { + fn drop(&mut self) { + if let Some(service_handle) = self.service_handle.take() { + self.data.tasks_queue.shutdown(); + // ignore error as we are already closing + let _ = service_handle.join(); + } + } } impl ChainNotify for ServiceContractListener { fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { if !enacted.is_empty() { - self.contract.lock().update(enacted) - } - } -} - -impl CachedContract { - pub fn new(client: &Arc, key_server: Arc, self_key_pair: Arc) -> Self { - CachedContract { - client: Arc::downgrade(client), - contract: SecretStoreService::new(Default::default()), // we aren't going to call contract => could use default address - contract_addr: client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()), - key_server: key_server, - self_key_pair: self_key_pair, - } - } - - pub fn update(&mut self, enacted: Vec) { - if let Some(client) = self.client.upgrade() { - // update contract address - self.contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()); - - // check for new key requests. - // NOTE: If contract is changed, or unregistered && there are several enacted blocks - // in single update call, some requests in old contract can be abandoned (we get contract_address from latest block) - // && check for requests in this contract for every enacted block. - // The opposite is also true (we can process requests of contract, before it actually becames a SS contract). - if let Some(contract_addr) = self.contract_addr.as_ref() { - // TODO: in case of reorgs we might process requests for free (maybe wait for several confirmations???) && publish keys without request - // TODO: in case of reorgs we might publish keys to forked branch (re-submit transaction???) - for block in enacted { - let request_logs = client.logs(Filter { - from_block: BlockId::Hash(block.clone()), - to_block: BlockId::Hash(block), - address: Some(vec![contract_addr.clone()]), - topics: vec![ - Some(vec![*SERVER_KEY_REQUESTED_EVENT_NAME_HASH]), - None, - None, - None, - ], - limit: None, - }); - - // TODO: it actually should queue tasks to separate thread - // + separate thread at the beginning should read all requests from contract - // and then start processing logs - for request in request_logs { - // TODO: check if we are selected to process this request - let key_id = request.entry.topics[1]; - let key = Random.generate().unwrap(); - let signature = sign(key.secret(), &key_id).unwrap(); - let server_key = self.key_server.generate_key(&key_id, &signature, 0).unwrap(); -println!("=== generated key: {:?}", server_key); - // publish generated key - let server_key_hash = keccak(server_key); - let signed_key = self.self_key_pair.sign(&server_key_hash).unwrap(); - let transaction_data = self.contract.encode_server_key_generated_input(key_id, server_key.to_vec(), signed_key.v(), signed_key.r().into(), signed_key.s().into()).unwrap(); - client.transact_contract(contract_addr.clone(), transaction_data).unwrap(); + if let Some(client) = self.data.client.upgrade() { + if let Some(service_contract_addr) = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) { + if self.data.contract.read().address != service_contract_addr { + *self.data.contract.write() = SecretStoreService::new(service_contract_addr.clone()); } + self.process_service_contract_events(&*client, service_contract_addr, enacted); } } + + //self.contract.lock().update(enacted) } } } + +impl TasksQueue { + pub fn new() -> Self { + TasksQueue { + is_shutdown: AtomicBool::new(false), + service_event: Condvar::new(), + service_tasks: Mutex::new(VecDeque::new()), + } + } + + pub fn shutdown(&self) { + self.is_shutdown.store(true, Ordering::Release); + self.service_event.notify_all(); + } + + pub fn push(&self, tasks: I) where I: Iterator { + let mut service_tasks = self.service_tasks.lock(); + service_tasks.extend(tasks); + self.service_event.notify_all(); + } + + pub fn wait(&self) -> ServiceTask { + if self.is_shutdown.load(Ordering::Release) { + return ServiceTask::Shutdown; + } + + let mut service_tasks = self.service_tasks.lock(); + if service_tasks.is_empty() { + self.service_event.wait(&mut service_tasks); + if self.is_shutdown.load(Ordering::Release) { + return ServiceTask::Shutdown; + } + } + + service_tasks.pop_front() + .expect("service_event is only fired when there are new tasks or is_shutdown == true; is_shutdown == false; qed") + } +} From 3945a29ee62b6354d25fa4fc4eb4c67ac8032600 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 20 Nov 2017 15:18:31 +0300 Subject: [PATCH 04/42] SecretStore: mapping requests to KeyServer + requests retry --- Cargo.lock | 1 + .../res/secretstore_service.json | 4 +- parity/run.rs | 1 + parity/secretstore.rs | 5 +- secret_store/Cargo.toml | 1 + secret_store/src/lib.rs | 12 +- .../src/listener/service_contract_listener.rs | 391 ++++++++++++++++-- 7 files changed, 370 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c3922dc48..fd4cc2e5d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -678,6 +678,7 @@ dependencies = [ "ethcore-util 1.9.0", "ethcrypto 0.1.0", "ethkey 0.2.0", + "ethsync 1.8.0", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "hash 0.1.0", diff --git a/ethcore/native_contracts/res/secretstore_service.json b/ethcore/native_contracts/res/secretstore_service.json index 0189691a1..48d9adaa7 100644 --- a/ethcore/native_contracts/res/secretstore_service.json +++ b/ethcore/native_contracts/res/secretstore_service.json @@ -1,3 +1 @@ -[ - {"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"threshold","type":"uint256"}],"name":"generateServerKey","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"constant":true,"inputs":[],"name":"serverKeyGenerationFee","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"},{"name":"v","type":"uint8"},{"name":"r","type":"bytes32"},{"name":"s","type":"bytes32"}],"name":"serverKeyGenerated","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"serverKeyId","type":"bytes32"}],"name":"ServerKeyRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"serverKeyPublic","type":"bytes"}],"name":"ServerKeyGenerated","type":"event"} -] \ No newline at end of file +[{"constant":true,"inputs":[],"name":"serverKeyGenerationRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"authority","type":"address"},{"name":"index","type":"uint256"}],"name":"getServerKeyGenerationRequest","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"uint256"},{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"threshold","type":"uint256"}],"name":"generateServerKey","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"constant":true,"inputs":[],"name":"serverKeyGenerationFee","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[],"name":"drain","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"},{"name":"v","type":"uint8"},{"name":"r","type":"bytes32"},{"name":"s","type":"bytes32"}],"name":"serverKeyGenerated","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"serverKeyId","type":"bytes32"},{"indexed":true,"name":"threshold","type":"uint256"}],"name":"ServerKeyRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"serverKeyPublic","type":"bytes"}],"name":"ServerKeyGenerated","type":"event"}] \ No newline at end of file diff --git a/parity/run.rs b/parity/run.rs index 13c3575d9..fcad975c8 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -785,6 +785,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // secret store key server let secretstore_deps = secretstore::Dependencies { client: client.clone(), + sync: sync_provider.clone(), account_provider: account_provider, accounts_passwords: &passwords, }; diff --git a/parity/secretstore.rs b/parity/secretstore.rs index 416c9d547..7e36ef5e0 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -20,6 +20,7 @@ use dir::default_data_path; use ethcore::account_provider::AccountProvider; use ethcore::client::Client; use ethkey::{Secret, Public}; +use ethsync::SyncProvider; use helpers::replace_home; use util::Address; @@ -63,6 +64,8 @@ pub struct Configuration { pub struct Dependencies<'a> { /// Blockchain client. pub client: Arc, + /// Sync provider. + pub sync: Arc, /// Account provider. pub account_provider: Arc, /// Passed accounts passwords. @@ -153,7 +156,7 @@ mod server { cconf.cluster_config.nodes.insert(self_secret.public().clone(), cconf.cluster_config.listener_address.clone()); - let key_server = ethcore_secretstore::start(deps.client, self_secret, cconf) + let key_server = ethcore_secretstore::start(deps.client, deps.sync, self_secret, cconf) .map_err(|e| format!("Error starting KeyServer {}: {}", key_server_name, e))?; Ok(KeyServer { diff --git a/secret_store/Cargo.toml b/secret_store/Cargo.toml index 8357b530b..7f8d0bf8c 100644 --- a/secret_store/Cargo.toml +++ b/secret_store/Cargo.toml @@ -27,6 +27,7 @@ ethcore-bytes = { path = "../util/bytes" } ethcore-devtools = { path = "../devtools" } ethcore-util = { path = "../util" } ethcore-bigint = { path = "../util/bigint" } +ethsync = { path = "../sync" } kvdb = { path = "../util/kvdb" } kvdb-rocksdb = { path = "../util/kvdb-rocksdb" } hash = { path = "../util/hash" } diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index c93c3c5dd..e71cf2adc 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -44,6 +44,7 @@ extern crate ethcore_bigint as bigint; extern crate ethcore_logger as logger; extern crate ethcrypto; extern crate ethkey; +extern crate ethsync; extern crate native_contracts; extern crate hash; extern crate kvdb; @@ -63,6 +64,7 @@ mod listener; use std::sync::Arc; use ethcore::client::Client; +use ethsync::SyncProvider; pub use types::all::{ServerKeyId, EncryptedDocumentKey, RequestSignature, Public, Error, NodeAddress, ServiceConfiguration, ClusterConfiguration}; @@ -70,20 +72,20 @@ pub use traits::{NodeKeyPair, KeyServer}; pub use self::node_key_pair::{PlainNodeKeyPair, KeyStoreNodeKeyPair}; /// Start new key server instance -pub fn start(client: Arc, self_key_pair: Arc, config: ServiceConfiguration) -> Result, Error> { +pub fn start(client: Arc, sync: Arc, self_key_pair: Arc, config: ServiceConfiguration) -> Result, Error> { let acl_storage: Arc = if config.acl_check_enabled { - acl_storage::OnChainAclStorage::new(&client) + acl_storage::OnChainAclStorage::new(&client/*, &sync*/) // TODO: return false until fully synced } else { Arc::new(acl_storage::DummyAclStorage::default()) }; - let key_server_set = key_server_set::OnChainKeyServerSet::new(&client, config.cluster_config.nodes.clone())?; + let key_server_set = key_server_set::OnChainKeyServerSet::new(&client, /*&sync, */config.cluster_config.nodes.clone())?; // TODO: return empty set until fully synced let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?); - let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set, self_key_pair.clone(), acl_storage, key_storage)?); + let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set.clone(), self_key_pair.clone(), acl_storage, key_storage)?); let http_listener = match config.listener_address { Some(listener_address) => Some(listener::http_listener::KeyServerHttpListener::start(listener_address, key_server.clone())?), None => None, }; - let contract_listener = listener::service_contract_listener::ServiceContractListener::new(&client, key_server.clone(), self_key_pair); + let contract_listener = listener::service_contract_listener::ServiceContractListener::new(&client, &sync, key_server.clone(), self_key_pair, key_server_set); let listener = listener::Listener::new(key_server, http_listener, Some(contract_listener)); Ok(Box::new(listener)) } diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index ae3ec0736..884c6d260 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -14,26 +14,40 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::collections::VecDeque; +use std::collections::{VecDeque, HashSet}; use std::sync::{Arc, Weak}; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread; +use futures::{future, Future}; use parking_lot::{RwLock, Mutex, Condvar}; use ethcore::filter::Filter; use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; +use ethkey::{Random, Generator, Public, Signature, sign, public_to_address}; +use ethsync::SyncProvider; use native_contracts::SecretStoreService; -use ethkey::{Random, Generator, Public, Signature, sign}; use bytes::Bytes; use hash::keccak; use bigint::hash::H256; +use bigint::prelude::U256; use util::Address; +use key_server_set::KeyServerSet; use {ServerKeyId, NodeKeyPair, KeyServer}; /// Name of the SecretStore contract in the registry. const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service"; /// Key server has been added to the set. -const SERVER_KEY_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyRequested(bytes32)"; +const SERVER_KEY_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyRequested(bytes32,uint256)"; + +/// Retry interval (in blocks). Every RETRY_INTEVAL_BLOCKS blocks each KeyServer reads pending requests from +/// service contract && tries to re-execute. The reason to have this mechanism is primarily because keys +/// servers set change takes a lot of time + there could be some races, when blocks are coming to different +/// KS at different times. This isn't intended to fix && respond to general session errors! +const RETRY_INTEVAL_BLOCKS: usize = 30; + +/// Max failed retry requests (in single retry interval). The reason behind this constant is that if several +/// pending requests have failed, then most probably other will fail too. +const MAX_FAILED_RETRY_REQUESTS: usize = 1; lazy_static! { static ref SERVER_KEY_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_REQUESTED_EVENT_NAME); @@ -52,22 +66,35 @@ pub struct ServiceContractListener { /// Service contract listener data. struct ServiceContractListenerData { - /// Contract (currently used for parameters encoding only). + /// Blocks since last retry. + pub last_retry: AtomicUsize, + /// Retry-related data. + pub retry_data: Mutex, + /// Contract. pub contract: RwLock, /// Blockchain client. pub client: Weak, + /// Sync provider. + pub sync: Weak, /// Key server reference. pub key_server: Arc, /// This node key pair. pub self_key_pair: Arc, + /// Key servers set. + pub key_servers_set: Arc, /// Service tasks queue. pub tasks_queue: Arc, } +/// Retry-related data. +#[derive(Default)] +struct ServiceContractRetryData { + /// Server keys, which we have generated (or tried to generate) since last retry moment. + pub generated_keys: HashSet, +} + /// Service tasks queue. struct TasksQueue { - /// Are we closing currently. - is_shutdown: AtomicBool, /// Service event. service_event: Condvar, /// Service tasks queue. @@ -75,7 +102,10 @@ struct TasksQueue { } /// Service task. +#[derive(Debug)] enum ServiceTask { + /// Retry all 'stalled' tasks. + Retry, /// Generate server key (server_key_id, threshold). GenerateServerKey(H256, H256), /// Shutdown listener. @@ -83,16 +113,32 @@ enum ServiceTask { } impl ServiceContractListener { - pub fn new(client: &Arc, key_server: Arc, self_key_pair: Arc) -> Arc { - let contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()).unwrap_or_default(); + pub fn new(client: &Arc, sync: &Arc, key_server: Arc, self_key_pair: Arc, key_servers_set: Arc) -> Arc { + let contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) + .map(|a| { + trace!(target: "secretstore", "Installing service contract from address {}", a); + a + }) + .unwrap_or_default(); + + let is_syncing = sync.status().is_syncing(client.queue_info()); let data = Arc::new(ServiceContractListenerData { + last_retry: AtomicUsize::new(0), + retry_data: Default::default(), contract: RwLock::new(SecretStoreService::new(contract_addr)), client: Arc::downgrade(client), + sync: Arc::downgrade(sync), key_server: key_server, self_key_pair: self_key_pair, + key_servers_set: key_servers_set, tasks_queue: Arc::new(TasksQueue::new()), }); + // retry on restart + if !is_syncing { + data.tasks_queue.push(::std::iter::once(ServiceTask::Retry)); + } + let service_thread_data = data.clone(); let service_handle = thread::spawn(move || Self::run_service_thread(service_thread_data)); let contract = Arc::new(ServiceContractListener { @@ -107,7 +153,8 @@ impl ServiceContractListener { debug_assert!(!blocks.is_empty()); // TODO: is blocks guaranteed to be ordered here? - // TODO: logs() is called from notify() thread - is it ok? + // TODO: logs() is called from notify() thread - is it ok (doesn't 'logs')? + // read server key generation requests let request_logs = client.logs(Filter { from_block: BlockId::Hash(blocks.first().expect("!block.is_empty(); qed").clone()), to_block: BlockId::Hash(blocks.last().expect("!block.is_empty(); qed").clone()), @@ -121,12 +168,16 @@ impl ServiceContractListener { limit: None, }); + // schedule correct requests if they're intended to be processed by this KeyServer self.data.tasks_queue.push(request_logs.into_iter() .filter_map(|r| match r.entry.topics.len() { - 3 => Some(ServiceTask::GenerateServerKey( - r.entry.topics[1], - r.entry.topics[2], - )), + 3 if is_processed_by_this_key_server(&*self.data.key_servers_set, &*self.data.self_key_pair, &r.entry.topics[1]) => { + Some(ServiceTask::GenerateServerKey( + r.entry.topics[1], + r.entry.topics[2], + )) + }, + 3 => None, l @ _ => { warn!(target: "secretstore", "Ignoring ServerKeyRequested event with wrong number of params {}", l); None @@ -137,20 +188,106 @@ impl ServiceContractListener { fn run_service_thread(data: Arc) { loop { let task = data.tasks_queue.wait(); + trace!(target: "secretstore", "Processing {:?} task", task); match task { - ServiceTask::GenerateServerKey(server_key_id, threshold) => { - match Self::generate_server_key(&data, &server_key_id, &threshold) - .and_then(|server_key| Self::publish_server_key(&data, &server_key_id, &server_key)) { - Ok(_) => trace!(target: "secretstore", "GenerateServerKey({}, {}) request has completed", - server_key_id, threshold), - Err(error) => warn!(target: "secretstore", "GenerateServerKey({}, {}) request has failed with: {}", - server_key_id, threshold, error), + ServiceTask::Shutdown => break, + task @ _ => { + // the only possible reaction to an error is a trace && it is already happened + let _ = Self::process_service_task(&data, task); + }, + }; + } + } + + fn process_service_task(data: &Arc, task: ServiceTask) -> Result<(), String> { + match task { + ServiceTask::Retry => + Self::retry_pending_requests(&data) + .map(|processed_requests| { + if processed_requests != 0 { + trace!(target: "secretstore", "Successfully retried {} pending requests", + processed_requests); + } + () + }) + .map_err(|error| { + warn!(target: "secretstore", "Retrying pending requests has failed with: {}", + error); + error + }), + ServiceTask::GenerateServerKey(server_key_id, threshold) => { + data.retry_data.lock().generated_keys.insert(server_key_id.clone()); + Self::generate_server_key(&data, &server_key_id, &threshold) + .and_then(|server_key| Self::publish_server_key(&data, &server_key_id, &server_key)) + .map(|_| { + trace!(target: "secretstore", "GenerateServerKey({}, {}) request has completed", + server_key_id, threshold); + () + }) + .map_err(|error| { + warn!(target: "secretstore", "GenerateServerKey({}, {}) request has failed with: {}", + server_key_id, threshold, error); + error + }) + }, + ServiceTask::Shutdown => unreachable!("it must be checked outside"), + } + } + + fn retry_pending_requests(data: &Arc) -> Result { + let client = data.client.upgrade().ok_or("client is required".to_owned())?; + let retry_data = ::std::mem::replace(&mut *data.retry_data.lock(), Default::default()); + let contract = data.contract.read(); + + // it is only possible when contract address is set + if contract.address == Default::default() { + return Ok(0); + } + + let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d)); + let generate_server_key_requests_count = contract.server_key_generation_requests_count(&do_call).wait()?; + let mut generate_server_key_request_index = 0.into(); + let mut failed_requests = 0; + let mut processed_requests = 0; + loop { + if generate_server_key_request_index >= generate_server_key_requests_count { + break; + } + + // read request from the contract + let (server_key_id, threshold, is_confirmed) = contract.get_server_key_generation_request(&do_call, + public_to_address(data.self_key_pair.public()), + generate_server_key_request_index).wait()?; + generate_server_key_request_index = generate_server_key_request_index + 1.into(); + + // only process requests, which we haven't confirmed yet + if is_confirmed { + continue; + } + // only process request, which haven't been processed recently + // there could be a lag when we've just generated server key && retrying on the same block + // (or before our tx is mined) - state is not updated yet + if retry_data.generated_keys.contains(&server_key_id){ + continue; + } + // only process requests that are intended to be processed by this server + if !is_processed_by_this_key_server(&*data.key_servers_set, &*data.self_key_pair, &server_key_id) { + continue; + } + + // process request + match Self::process_service_task(data, ServiceTask::GenerateServerKey(server_key_id, threshold.into())) { + Ok(_) => processed_requests += 1, + Err(_) => { + failed_requests += 1; + if failed_requests > MAX_FAILED_RETRY_REQUESTS { + return Err("too many failed requests".into()); } }, - ServiceTask::Shutdown => break, } } + Ok(processed_requests) } fn generate_server_key(data: &Arc, server_key_id: &ServerKeyId, threshold: &H256) -> Result { @@ -159,6 +296,7 @@ impl ServiceContractListener { return Err(format!("invalid threshold {:?}", threshold)); } + // TODO: check if key is already generated // TODO: if this server key is going to be used for document key generation later, author must // be specified from outside let author_key = Random.generate().map_err(|e| format!("{}", e))?; @@ -205,17 +343,32 @@ impl Drop for ServiceContractListener { impl ChainNotify for ServiceContractListener { fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { - if !enacted.is_empty() { - if let Some(client) = self.data.client.upgrade() { + let enacted_len = enacted.len(); + if enacted_len != 0 { + if let (Some(client), Some(sync)) = (self.data.client.upgrade(), self.data.sync.upgrade()) { + // do nothing until synced + if sync.status().is_syncing(client.queue_info()) { + return; + } + + // update contract address from registry if let Some(service_contract_addr) = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) { if self.data.contract.read().address != service_contract_addr { + trace!(target: "secretstore", "Installing service contract from address {}", service_contract_addr); *self.data.contract.write() = SecretStoreService::new(service_contract_addr.clone()); } + + // and process contract events self.process_service_contract_events(&*client, service_contract_addr, enacted); } - } - //self.contract.lock().update(enacted) + // schedule retry if received enough blocks since last retry + // it maybe inaccurate when switching syncing/synced states, but that's ok + if self.data.last_retry.fetch_add(enacted_len, Ordering::AcqRel) >= RETRY_INTEVAL_BLOCKS { + self.data.tasks_queue.push(::std::iter::once(ServiceTask::Retry)); + self.data.last_retry.store(0, Ordering::AcqRel); + } + } } } } @@ -223,37 +376,203 @@ impl ChainNotify for ServiceContractListener { impl TasksQueue { pub fn new() -> Self { TasksQueue { - is_shutdown: AtomicBool::new(false), service_event: Condvar::new(), service_tasks: Mutex::new(VecDeque::new()), } } pub fn shutdown(&self) { - self.is_shutdown.store(true, Ordering::Release); + let mut service_tasks = self.service_tasks.lock(); + service_tasks.push_front(ServiceTask::Shutdown); self.service_event.notify_all(); } pub fn push(&self, tasks: I) where I: Iterator { let mut service_tasks = self.service_tasks.lock(); service_tasks.extend(tasks); - self.service_event.notify_all(); + if !service_tasks.is_empty() { + self.service_event.notify_all(); + } } pub fn wait(&self) -> ServiceTask { - if self.is_shutdown.load(Ordering::Release) { - return ServiceTask::Shutdown; - } - let mut service_tasks = self.service_tasks.lock(); if service_tasks.is_empty() { self.service_event.wait(&mut service_tasks); - if self.is_shutdown.load(Ordering::Release) { - return ServiceTask::Shutdown; - } } service_tasks.pop_front() .expect("service_event is only fired when there are new tasks or is_shutdown == true; is_shutdown == false; qed") } } + +/// Returns true when session, related to `server_key_id` must be started on this KeyServer. +fn is_processed_by_this_key_server(key_servers_set: &KeyServerSet, self_key_pair: &NodeKeyPair, server_key_id: &H256) -> bool { + let servers = key_servers_set.get(); + let total_servers_count = servers.len(); + if total_servers_count == 0 { + return false; + } + let this_server_index = match servers.keys().enumerate().find(|&(_, s)| s == self_key_pair.public()) { + Some((index, _)) => index, + None => return false, + }; + + let server_key_id_value: U256 = server_key_id.into(); + let range_interval = U256::max_value() / total_servers_count.into(); + let range_begin = (range_interval + 1.into()) * this_server_index.into(); + let range_end = range_begin.saturating_add(range_interval); + + server_key_id_value >= range_begin && server_key_id_value <= range_end +} + +#[cfg(test)] +mod tests { + use ethkey::{Random, Generator, KeyPair}; + use key_server_set::tests::MapKeyServerSet; + use PlainNodeKeyPair; + use super::is_processed_by_this_key_server; + + #[test] + fn is_not_processed_by_this_key_server_with_zero_servers() { + assert_eq!(is_processed_by_this_key_server( + &MapKeyServerSet::default(), + &PlainNodeKeyPair::new(Random.generate().unwrap()), + &Default::default()), false); + } + + #[test] + fn is_not_processed_by_this_key_server_when_not_a_part_of_servers_set() { + assert_eq!(is_processed_by_this_key_server( + &MapKeyServerSet::new(vec![ + (Random.generate().unwrap().public().clone(), "127.0.0.1:8080".parse().unwrap()) + ].into_iter().collect()), + &PlainNodeKeyPair::new(Random.generate().unwrap()), + &Default::default()), false); + } + + #[test] + fn is_processed_by_this_key_server_in_set_of_3() { + // servers set is ordered && server range depends on index of this server + let servers_set = MapKeyServerSet::new(vec![ + // secret: 0000000000000000000000000000000000000000000000000000000000000001 + ("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(), + "127.0.0.1:8080".parse().unwrap()), + // secret: 0000000000000000000000000000000000000000000000000000000000000002 + ("c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee51ae168fea63dc339a3c58419466ceaeef7f632653266d0e1236431a950cfe52a".parse().unwrap(), + "127.0.0.1:8080".parse().unwrap()), + // secret: 0000000000000000000000000000000000000000000000000000000000000003 + ("f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9388f7b0f632de8140fe337e62a37f3566500a99934c2231b6cb9fd7584b8e672".parse().unwrap(), + "127.0.0.1:8080".parse().unwrap()), + ].into_iter().collect()); + + // 1st server: process hashes [0x0; 0x555...555] + let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap()); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"0000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"3000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"5555555555555555555555555555555555555555555555555555555555555555".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"5555555555555555555555555555555555555555555555555555555555555556".parse().unwrap()), false); + + // 2nd server: process hashes from 0x555...556 to 0xaaa...aab + let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000002".parse().unwrap()).unwrap()); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"5555555555555555555555555555555555555555555555555555555555555555".parse().unwrap()), false); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"5555555555555555555555555555555555555555555555555555555555555556".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"7555555555555555555555555555555555555555555555555555555555555555".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac".parse().unwrap()), false); + + // 3rd server: process hashes from 0x800...000 to 0xbff...ff + let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000003".parse().unwrap()).unwrap()); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab".parse().unwrap()), false); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"daaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true); + } + + #[test] + fn is_processed_by_this_key_server_in_set_of_4() { + // servers set is ordered && server range depends on index of this server + let servers_set = MapKeyServerSet::new(vec![ + // secret: 0000000000000000000000000000000000000000000000000000000000000001 + ("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(), + "127.0.0.1:8080".parse().unwrap()), + // secret: 0000000000000000000000000000000000000000000000000000000000000002 + ("c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee51ae168fea63dc339a3c58419466ceaeef7f632653266d0e1236431a950cfe52a".parse().unwrap(), + "127.0.0.1:8080".parse().unwrap()), + // secret: 0000000000000000000000000000000000000000000000000000000000000004 + ("e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd1351ed993ea0d455b75642e2098ea51448d967ae33bfbdfe40cfe97bdc47739922".parse().unwrap(), + "127.0.0.1:8080".parse().unwrap()), + // secret: 0000000000000000000000000000000000000000000000000000000000000003 + ("f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9388f7b0f632de8140fe337e62a37f3566500a99934c2231b6cb9fd7584b8e672".parse().unwrap(), + "127.0.0.1:8080".parse().unwrap()), + ].into_iter().collect()); + + // 1st server: process hashes [0x0; 0x3ff...ff] + let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap()); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"0000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"2000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"4000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), false); + + // 2nd server: process hashes from 0x400...000 to 0x7ff...ff + let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000002".parse().unwrap()).unwrap()); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), false); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"4000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"6000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"8000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), false); + + // 3rd server: process hashes from 0x800...000 to 0xbff...ff + let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000004".parse().unwrap()).unwrap()); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), false); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"8000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"a000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"bfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"c000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), false); + + // 4th server: process hashes from 0xc00...000 to 0xfff...ff + let key_pair = PlainNodeKeyPair::new(KeyPair::from_secret( + "0000000000000000000000000000000000000000000000000000000000000003".parse().unwrap()).unwrap()); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"bfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), false); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"c000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"e000000000000000000000000000000000000000000000000000000000000000".parse().unwrap()), true); + assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, + &"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true); + } +} From 01d6532875d2e0109ded0c582002daa8d057127b Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 20 Nov 2017 15:59:23 +0300 Subject: [PATCH 05/42] SecretStore: fixed Ordering --- secret_store/src/listener/service_contract_listener.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index 884c6d260..acb453872 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -364,9 +364,9 @@ impl ChainNotify for ServiceContractListener { // schedule retry if received enough blocks since last retry // it maybe inaccurate when switching syncing/synced states, but that's ok - if self.data.last_retry.fetch_add(enacted_len, Ordering::AcqRel) >= RETRY_INTEVAL_BLOCKS { + if self.data.last_retry.fetch_add(enacted_len, Ordering::Relaxed) >= RETRY_INTEVAL_BLOCKS { self.data.tasks_queue.push(::std::iter::once(ServiceTask::Retry)); - self.data.last_retry.store(0, Ordering::AcqRel); + self.data.last_retry.store(0, Ordering::Relaxed); } } } From 32edb33608f5a58f7f1835cf20a635ccb1368a87 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 20 Nov 2017 18:53:08 +0300 Subject: [PATCH 06/42] removed some TODOs --- secret_store/src/listener/service_contract_listener.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index acb453872..74e76b1c1 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -152,8 +152,6 @@ impl ServiceContractListener { fn process_service_contract_events(&self, client: &Client, service_contract: Address, blocks: Vec) { debug_assert!(!blocks.is_empty()); - // TODO: is blocks guaranteed to be ordered here? - // TODO: logs() is called from notify() thread - is it ok (doesn't 'logs')? // read server key generation requests let request_logs = client.logs(Filter { from_block: BlockId::Hash(blocks.first().expect("!block.is_empty(); qed").clone()), @@ -296,14 +294,13 @@ impl ServiceContractListener { return Err(format!("invalid threshold {:?}", threshold)); } - // TODO: check if key is already generated - // TODO: if this server key is going to be used for document key generation later, author must - // be specified from outside + // key server expects signed server_key_id in server_key_generation procedure + // only signer could store document key for this server key later + // => this API (server key generation) is not suitable for usage in encryption via contract endpoint let author_key = Random.generate().map_err(|e| format!("{}", e))?; let server_key_id_signature = sign(author_key.secret(), server_key_id).map_err(|e| format!("{}", e))?; data.key_server.generate_key(server_key_id, &server_key_id_signature, threshold_num as usize) .map_err(Into::into) - } fn publish_server_key(data: &Arc, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> { From af409eba071381c2e45997824d3426fd0036fb79 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 20 Nov 2017 19:41:53 +0300 Subject: [PATCH 07/42] SecretSTore: fix after merge from secretstore_kovan_1_8 --- Cargo.lock | 2 +- ethcore/native_contracts/generator/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fd4cc2e5d..600970524 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -678,7 +678,7 @@ dependencies = [ "ethcore-util 1.9.0", "ethcrypto 0.1.0", "ethkey 0.2.0", - "ethsync 1.8.0", + "ethsync 1.9.0", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "hash 0.1.0", diff --git a/ethcore/native_contracts/generator/src/lib.rs b/ethcore/native_contracts/generator/src/lib.rs index be71fc395..5986bded4 100644 --- a/ethcore/native_contracts/generator/src/lib.rs +++ b/ethcore/native_contracts/generator/src/lib.rs @@ -134,7 +134,7 @@ pub fn {snake_name}(&self, call: F, {params}) -> BoxFuture<{output_type}, pub fn encode_{snake_name}_input(&self, {params}) -> Result, String> {{ self.contract.function(r#"{abi_name}"#) .expect("function existence checked at compile-time; qed") - .encode_call({to_tokens}) + .encode_input(&{to_tokens}) .map_err(|e| format!("Error encoding call: {{:?}}", e)) }} "##, From 76e693240d50cd99017b5a48ea083d25855fb122 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 20 Nov 2017 20:02:03 +0300 Subject: [PATCH 08/42] fix after merge --- secret_store/src/listener/http_listener.rs | 6 ------ secret_store/src/listener/mod.rs | 11 +++++++++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/secret_store/src/listener/http_listener.rs b/secret_store/src/listener/http_listener.rs index b8d661f07..3350b51d4 100644 --- a/secret_store/src/listener/http_listener.rs +++ b/secret_store/src/listener/http_listener.rs @@ -31,12 +31,6 @@ use traits::KeyServer; use serialization::{SerializableEncryptedDocumentKeyShadow, SerializableBytes, SerializablePublic}; use types::all::{Error, Public, MessageHash, NodeAddress, RequestSignature, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId}; -/*======= -use traits::{ServerKeyGenerator, AdminSessionsServer, DocumentKeyServer, MessageSigner, KeyServer}; -use serialization::{SerializableEncryptedDocumentKeyShadow, SerializableBytes, SerializablePublic}; -use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, NodeAddress, RequestSignature, ServerKeyId, - EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId}; ->>>>>>> master:secret_store/src/http_listener.rs*/ /// Key server http-requests listener. Available requests: /// To generate server key: POST /shadow/{server_key_id}/{signature}/{threshold} diff --git a/secret_store/src/listener/mod.rs b/secret_store/src/listener/mod.rs index 858f01ee0..1ebe4aa47 100644 --- a/secret_store/src/listener/mod.rs +++ b/secret_store/src/listener/mod.rs @@ -1,10 +1,11 @@ pub mod http_listener; pub mod service_contract_listener; +use std::collections::BTreeSet; use std::sync::Arc; -use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer}; +use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, AdminSessionsServer, KeyServer}; use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, RequestSignature, ServerKeyId, - EncryptedDocumentKey, EncryptedDocumentKeyShadow}; + EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId}; pub struct Listener { key_server: Arc, @@ -53,3 +54,9 @@ impl MessageSigner for Listener { self.key_server.sign_message(key_id, signature, message) } } + +impl AdminSessionsServer for Listener { + fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet) -> Result<(), Error> { + self.key_server.change_servers_set(old_set_signature, new_set_signature, new_servers_set) + } +} \ No newline at end of file From 5a7e065e41cde696045366e39eb4b0f644452b37 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 22 Nov 2017 10:05:14 +0300 Subject: [PATCH 09/42] SecretStore: Kovan flush3 --- secret_store/src/key_server.rs | 5 +- .../src/key_server_cluster/cluster.rs | 46 +++++++------ .../key_server_cluster/cluster_sessions.rs | 56 ++++++++++++++- secret_store/src/key_server_cluster/mod.rs | 1 + secret_store/src/lib.rs | 3 +- .../src/listener/service_contract_listener.rs | 69 ++++++++++++++----- 6 files changed, 140 insertions(+), 40 deletions(-) diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index 0d23b99c8..e9cd382d9 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -31,6 +31,10 @@ use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, Message use types::all::{Error, Public, RequestSignature, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow, ClusterConfiguration, MessageHash, EncryptedMessageSignature, NodeId}; use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration}; +use key_server_cluster::generation_session::Session as GenerationSession; +use key_server_cluster::encryption_session::Session as EncryptionSession; +use key_server_cluster::decryption_session::Session as DecryptionSession; +use key_server_cluster::signing_session::Session as SigningSession; /// Secret store key server implementation pub struct KeyServerImpl { @@ -53,7 +57,6 @@ impl KeyServerImpl { } /// Get cluster client reference. - #[cfg(test)] pub fn cluster(&self) -> Arc { self.data.lock().cluster.clone() } diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 0a975c275..3d3724e13 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -31,17 +31,15 @@ use bigint::hash::H256; use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair}; use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, GenerationSessionWrapper, EncryptionSessionWrapper, DecryptionSessionWrapper, SigningSessionWrapper, AdminSessionWrapper, KeyNegotiationSessionWrapper, SessionIdWithSubSession, - ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, AdminSessionCreationData}; + ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, AdminSessionCreationData, ClusterSessionsListener}; use key_server_cluster::cluster_sessions_creator::{ClusterSessionCreator, IntoSessionId}; use key_server_cluster::message::{self, Message, ClusterMessage}; -use key_server_cluster::generation_session::{Session as GenerationSession}; -#[cfg(test)] -use key_server_cluster::generation_session::SessionImpl as GenerationSessionImpl; -use key_server_cluster::decryption_session::{Session as DecryptionSession}; -use key_server_cluster::encryption_session::{Session as EncryptionSession}; -use key_server_cluster::signing_session::{Session as SigningSession}; -use key_server_cluster::key_version_negotiation_session::{Session as KeyVersionNegotiationSession, SessionImpl as KeyVersionNegotiationSessionImpl, - IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction}; +use key_server_cluster::generation_session::{SessionImpl as GenerationSession, Session as GenerationSessionTrait}; +use key_server_cluster::decryption_session::{SessionImpl as DecryptionSession, Session as DecryptionSessionTrait}; +use key_server_cluster::encryption_session::{SessionImpl as EncryptionSession, Session as EncryptionSessionTrait}; +use key_server_cluster::signing_session::{SessionImpl as SigningSession, Session as SigningSessionTrait}; +use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession, + IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction, Session as KeyVersionNegotiationSessionTrait}; use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, read_encrypted_message, WriteMessage, write_encrypted_message}; use key_server_cluster::net::{accept_connection as net_accept_connection, connect as net_connect, Connection as NetConnection}; @@ -74,16 +72,19 @@ pub trait ClusterClient: Send + Sync { /// Start new signing session. fn new_signing_session(&self, session_id: SessionId, requestor_signature: Signature, version: Option, message_hash: H256) -> Result, Error>; /// Start new key version negotiation session. - fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result, Error>; + fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result>, Error>; /// Start new servers set change session. fn new_servers_set_change_session(&self, session_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error>; + /// Listen for new generation sessions. + fn add_generation_listener(&self, listener: Arc>); + /// Ask node to make 'faulty' generation sessions. #[cfg(test)] fn make_faulty_generation_sessions(&self); /// Get active generation session with given id. #[cfg(test)] - fn generation_session(&self, session_id: &SessionId) -> Option>; + fn generation_session(&self, session_id: &SessionId) -> Option>; /// Try connect to disconnected nodes. #[cfg(test)] fn connect(&self); @@ -446,7 +447,7 @@ impl ClusterCore { } /// Try to contnue session. - fn try_continue_session(data: &Arc, session: Option>>) { + fn try_continue_session(data: &Arc, session: Option>>) { if let Some(session) = session { let meta = session.meta(); let is_master_node = meta.self_node_id == meta.master_node_id; @@ -842,7 +843,7 @@ impl ClusterClientImpl { } } - fn create_key_version_negotiation_session(&self, session_id: SessionId) -> Result>, Error> { + fn create_key_version_negotiation_session(&self, session_id: SessionId) -> Result>, Error> { let mut connected_nodes = self.data.connections.connected_nodes(); connected_nodes.insert(self.data.self_key_pair.public().clone()); @@ -872,7 +873,7 @@ impl ClusterClient for ClusterClientImpl { let cluster = create_cluster_view(&self.data, true)?; let session = self.data.sessions.generation_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?; match session.initialize(author, threshold, connected_nodes) { - Ok(()) => Ok(GenerationSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)), + Ok(()) => Ok(session), Err(error) => { self.data.sessions.generation_sessions.remove(&session.id()); Err(error) @@ -887,7 +888,7 @@ impl ClusterClient for ClusterClientImpl { let cluster = create_cluster_view(&self.data, true)?; let session = self.data.sessions.encryption_sessions.insert(cluster, self.data.self_key_pair.public().clone(), session_id, None, false, None)?; match session.initialize(requestor_signature, common_point, encrypted_point) { - Ok(()) => Ok(EncryptionSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)), + Ok(()) => Ok(session), Err(error) => { self.data.sessions.encryption_sessions.remove(&session.id()); Err(error) @@ -916,7 +917,7 @@ impl ClusterClient for ClusterClientImpl { }; match initialization_result { - Ok(()) => Ok(DecryptionSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)), + Ok(()) => Ok(session), Err(error) => { self.data.sessions.decryption_sessions.remove(&session.id()); Err(error) @@ -945,7 +946,7 @@ impl ClusterClient for ClusterClientImpl { }; match initialization_result { - Ok(()) => Ok(SigningSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)), + Ok(()) => Ok(session), Err(error) => { self.data.sessions.signing_sessions.remove(&session.id()); Err(error) @@ -953,9 +954,9 @@ impl ClusterClient for ClusterClientImpl { } } - fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result, Error> { + fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result>, Error> { let session = self.create_key_version_negotiation_session(session_id)?; - Ok(KeyNegotiationSessionWrapper::new(Arc::downgrade(&self.data), session.id(), session)) + Ok(session) } fn new_servers_set_change_session(&self, session_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error> { @@ -982,6 +983,10 @@ impl ClusterClient for ClusterClientImpl { } } + fn add_generation_listener(&self, listener: Arc>) { + self.data.sessions.generation_sessions.add_listener(listener); + } + #[cfg(test)] fn connect(&self) { ClusterCore::connect_disconnected_nodes(self.data.clone()); @@ -993,7 +998,7 @@ impl ClusterClient for ClusterClientImpl { } #[cfg(test)] - fn generation_session(&self, session_id: &SessionId) -> Option> { + fn generation_session(&self, session_id: &SessionId) -> Option> { self.data.sessions.generation_sessions.get(session_id, false) } @@ -1021,6 +1026,7 @@ pub mod tests { use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration}; use key_server_cluster::cluster_sessions::ClusterSession; use key_server_cluster::generation_session::{Session as GenerationSession, SessionState as GenerationSessionState}; + use key_server_cluster::signing_session::Session as SigningSession; #[derive(Debug)] pub struct DummyCluster { diff --git a/secret_store/src/key_server_cluster/cluster_sessions.rs b/secret_store/src/key_server_cluster/cluster_sessions.rs index 254e3ecc6..ceec154da 100644 --- a/secret_store/src/key_server_cluster/cluster_sessions.rs +++ b/secret_store/src/key_server_cluster/cluster_sessions.rs @@ -120,12 +120,22 @@ pub struct ClusterSessions { creator_core: Arc, } +/// Active sessions container listener. +pub trait ClusterSessionsListener: Send + Sync { + /// When new session is inserted to the container. + fn on_session_inserted(&self, session: Arc); + /// When session is removed from the container. + fn on_session_removed(&self, session: Arc); +} + /// Active sessions container. pub struct ClusterSessionsContainer, D> { /// Sessions creator. pub creator: SC, /// Active sessions. sessions: RwLock>>, + /// Listeners. Lock order: sessions -> listeners. + listeners: Mutex>>>, /// Sessions container state. container_state: Arc>, /// Phantom data. @@ -294,11 +304,16 @@ impl ClusterSessionsContainer where S: ClusterSession, SC: C ClusterSessionsContainer { creator: creator, sessions: RwLock::new(BTreeMap::new()), + listeners: Mutex::new(Vec::new()), container_state: container_state, _pd: Default::default(), } } + pub fn add_listener(&self, listener: Arc>) { + self.listeners.lock().push(Arc::downgrade(&listener)); + } + pub fn is_empty(&self) -> bool { self.sessions.read().is_empty() } @@ -342,12 +357,51 @@ impl ClusterSessionsContainer where S: ClusterSession, SC: C queue: VecDeque::new(), }; sessions.insert(session_id, queued_session); + + // notify listeners + let mut listeners = self.listeners.lock(); + let mut listener_index = 0; + loop { + if listener_index >= listeners.len() { + break; + } + + match listeners[listener_index].upgrade() { + Some(listener) => { + listener.on_session_inserted(session.clone()); + listener_index += 1; + }, + None => { + listeners.swap_remove(listener_index); + }, + } + } + Ok(session) } pub fn remove(&self, session_id: &S::Id) { - if self.sessions.write().remove(session_id).is_some() { + if let Some(session) = self.sessions.write().remove(session_id) { self.container_state.lock().on_session_completed(); + + // notify listeners + let mut listeners = self.listeners.lock(); + let mut listener_index = 0; + loop { + if listener_index >= listeners.len() { + break; + } + + match listeners[listener_index].upgrade() { + Some(listener) => { + listener.on_session_removed(session.session.clone()); + listener_index += 1; + }, + None => { + listeners.swap_remove(listener_index); + }, + } + } } } diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index 81f9be647..8a2f777c0 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -27,6 +27,7 @@ pub use super::key_storage::{KeyStorage, DocumentKeyShare, DocumentKeyShareVersi pub use super::key_server_set::KeyServerSet; pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic, SerializableMessageHash}; pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient}; +pub use self::cluster_sessions::{ClusterSession, ClusterSessionsListener}; pub use self::generation_session::Session as GenerationSession; pub use self::encryption_session::Session as EncryptionSession; pub use self::decryption_session::Session as DecryptionSession; diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 34675b9c5..09d4ce774 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -80,11 +80,12 @@ pub fn start(client: Arc, sync: Arc, self_key_pair: Arc Some(listener::http_listener::KeyServerHttpListener::start(listener_address, key_server.clone())?), None => None, }; - let contract_listener = listener::service_contract_listener::ServiceContractListener::new(&client, &sync, key_server.clone(), self_key_pair, key_server_set); + let contract_listener = listener::service_contract_listener::ServiceContractListener::new(&client, &sync, key_server.clone(), cluster, self_key_pair, key_server_set); let listener = listener::Listener::new(key_server, http_listener, Some(contract_listener)); Ok(Box::new(listener)) } diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index 74e76b1c1..e3fc83674 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -31,6 +31,8 @@ use bigint::hash::H256; use bigint::prelude::U256; use util::Address; use key_server_set::KeyServerSet; +use key_server_cluster::{ClusterClient, ClusterSessionsListener, ClusterSession}; +use key_server_cluster::generation_session::{SessionImpl as GenerationSession, Session as GenerationSessionTrait}; use {ServerKeyId, NodeKeyPair, KeyServer}; /// Name of the SecretStore contract in the registry. @@ -108,15 +110,17 @@ enum ServiceTask { Retry, /// Generate server key (server_key_id, threshold). GenerateServerKey(H256, H256), + /// Confirm server key (server_key_id). + ConfirmServerKey(H256), /// Shutdown listener. Shutdown, } impl ServiceContractListener { - pub fn new(client: &Arc, sync: &Arc, key_server: Arc, self_key_pair: Arc, key_servers_set: Arc) -> Arc { + pub fn new(client: &Arc, sync: &Arc, key_server: Arc, cluster: Arc, self_key_pair: Arc, key_servers_set: Arc) -> Arc { let contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) .map(|a| { - trace!(target: "secretstore", "Installing service contract from address {}", a); + trace!(target: "secretstore", "{}: installing service contract from address {}", self_key_pair.public(), a); a }) .unwrap_or_default(); @@ -146,6 +150,7 @@ impl ServiceContractListener { service_handle: Some(service_handle), }); client.add_notify(contract.clone()); + cluster.add_generation_listener(contract.clone()); contract } @@ -186,7 +191,7 @@ impl ServiceContractListener { fn run_service_thread(data: Arc) { loop { let task = data.tasks_queue.wait(); - trace!(target: "secretstore", "Processing {:?} task", task); + trace!(target: "secretstore", "{}: processing {:?} task",data.self_key_pair.public(), task); match task { ServiceTask::Shutdown => break, @@ -204,28 +209,29 @@ impl ServiceContractListener { Self::retry_pending_requests(&data) .map(|processed_requests| { if processed_requests != 0 { - trace!(target: "secretstore", "Successfully retried {} pending requests", - processed_requests); + trace!(target: "secretstore", "{}: successfully retried {} pending requests", + data.self_key_pair.public(), processed_requests); } () }) .map_err(|error| { - warn!(target: "secretstore", "Retrying pending requests has failed with: {}", - error); + warn!(target: "secretstore", "{}: retrying pending requests has failed with: {}", + data.self_key_pair.public(), error); error }), + ServiceTask::ConfirmServerKey(_) => Err("not implemented".to_owned()), // TODO ServiceTask::GenerateServerKey(server_key_id, threshold) => { data.retry_data.lock().generated_keys.insert(server_key_id.clone()); Self::generate_server_key(&data, &server_key_id, &threshold) .and_then(|server_key| Self::publish_server_key(&data, &server_key_id, &server_key)) .map(|_| { - trace!(target: "secretstore", "GenerateServerKey({}, {}) request has completed", - server_key_id, threshold); + trace!(target: "secretstore", "{}: started processing GenerateServerKey({}, {}) request", + data.self_key_pair.public(), server_key_id, threshold); () }) .map_err(|error| { - warn!(target: "secretstore", "GenerateServerKey({}, {}) request has failed with: {}", - server_key_id, threshold, error); + warn!(target: "secretstore", "{}: failed to start processing GenerateServerKey({}, {}) request with: {}", + data.self_key_pair.public(), server_key_id, threshold, error); error }) }, @@ -263,19 +269,23 @@ impl ServiceContractListener { if is_confirmed { continue; } + // only process request, which haven't been processed recently // there could be a lag when we've just generated server key && retrying on the same block // (or before our tx is mined) - state is not updated yet if retry_data.generated_keys.contains(&server_key_id){ continue; } - // only process requests that are intended to be processed by this server - if !is_processed_by_this_key_server(&*data.key_servers_set, &*data.self_key_pair, &server_key_id) { - continue; - } // process request - match Self::process_service_task(data, ServiceTask::GenerateServerKey(server_key_id, threshold.into())) { + let is_own_request = is_processed_by_this_key_server(&*data.key_servers_set, &*data.self_key_pair, &server_key_id); + let request_result = Self::process_service_task(data, match is_own_request { + true => ServiceTask::GenerateServerKey(server_key_id, threshold.into()), + false => ServiceTask::ConfirmServerKey(server_key_id), + }); + + // process request result + match request_result { Ok(_) => processed_requests += 1, Err(_) => { failed_requests += 1; @@ -351,7 +361,7 @@ impl ChainNotify for ServiceContractListener { // update contract address from registry if let Some(service_contract_addr) = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) { if self.data.contract.read().address != service_contract_addr { - trace!(target: "secretstore", "Installing service contract from address {}", service_contract_addr); + trace!(target: "secretstore", "{}: installing service contract from address {}", self.data.self_key_pair.public(), service_contract_addr); *self.data.contract.write() = SecretStoreService::new(service_contract_addr.clone()); } @@ -370,6 +380,31 @@ impl ChainNotify for ServiceContractListener { } } +impl ClusterSessionsListener for ServiceContractListener { + fn on_session_inserted(&self, _session: Arc) { + } + + fn on_session_removed(&self, session: Arc) { + // TODO: only start if session started via the contract + // only publish when the session is started by another node + if !is_processed_by_this_key_server(&*self.data.key_servers_set, &*self.data.self_key_pair, &session.id()) { + session.wait(Some(Default::default())) + .map_err(|e| format!("{}", e)) + .and_then(|server_key| Self::publish_server_key(&self.data, &session.id(), &server_key)) + .map(|_| { + trace!(target: "secretstore", "{}: completed foreign GenerateServerKey({}) request", + self.data.self_key_pair.public(), session.id()); + () + }) + .map_err(|error| { + warn!(target: "secretstore", "{}: failed to process GenerateServerKey({}) request with: {}", + self.data.self_key_pair.public(), session.id(), error); + error + }); + } + } +} + impl TasksQueue { pub fn new() -> Self { TasksQueue { From fc7f3433b78ac0a41f08005ba31a53149d26cb13 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 22 Nov 2017 10:21:14 +0300 Subject: [PATCH 10/42] SecretStore: removed obsolete traits --- secret_store/src/key_server.rs | 8 +- .../key_version_negotiation_session.rs | 53 ++-- .../servers_set_change_session.rs | 31 +-- .../admin_sessions/share_add_session.rs | 29 +- .../client_sessions/decryption_session.rs | 31 +-- .../client_sessions/encryption_session.rs | 49 ++-- .../client_sessions/generation_session.rs | 61 ++-- .../client_sessions/signing_session.rs | 36 +-- .../src/key_server_cluster/cluster.rs | 22 +- .../key_server_cluster/cluster_sessions.rs | 261 +----------------- secret_store/src/key_server_cluster/mod.rs | 3 - .../src/listener/service_contract_listener.rs | 2 +- 12 files changed, 139 insertions(+), 447 deletions(-) diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index e9cd382d9..6a68abe46 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -31,10 +31,6 @@ use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, Message use types::all::{Error, Public, RequestSignature, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow, ClusterConfiguration, MessageHash, EncryptedMessageSignature, NodeId}; use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration}; -use key_server_cluster::generation_session::Session as GenerationSession; -use key_server_cluster::encryption_session::Session as EncryptionSession; -use key_server_cluster::decryption_session::Session as DecryptionSession; -use key_server_cluster::signing_session::Session as SigningSession; /// Secret store key server implementation pub struct KeyServerImpl { @@ -68,7 +64,9 @@ impl AdminSessionsServer for KeyServerImpl { fn change_servers_set(&self, old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet) -> Result<(), Error> { let servers_set_change_session = self.data.lock().cluster .new_servers_set_change_session(None, new_servers_set, old_set_signature, new_set_signature)?; - servers_set_change_session.wait().map_err(Into::into) + servers_set_change_session.as_servers_set_change() + .expect("new_servers_set_change_session creates servers_set_change_session; qed") + .wait().map_err(Into::into) } } diff --git a/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs b/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs index b12269fe7..f755ae089 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs @@ -31,16 +31,6 @@ use key_server_cluster::admin_sessions::ShareChangeSessionMeta; /// Number of versions sent in single message. const VERSIONS_PER_MESSAGE: usize = 32; -/// Key version negotiation session API. -pub trait Session: Send + Sync + 'static { - /// Set continue action. - fn set_continue_action(&self, action: ContinueAction); - /// Get continue action. - fn continue_action(&self) -> Option; - /// Wait until session is completed. - fn wait(&self) -> Result<(H256, NodeId), Error>; -} - /// Key version negotiation transport. pub trait SessionTransport { /// Send message to given node. @@ -196,6 +186,28 @@ impl SessionImpl where T: SessionTransport { .clone()) } + /// Set continue action. + pub fn set_continue_action(&self, action: ContinueAction) { + self.data.lock().continue_with = Some(action); + } + + /// Get continue action. + pub fn continue_action(&self) -> Option { + self.data.lock().continue_with.clone() + } + + /// Wait for session completion. + pub fn wait(&self) -> Result<(H256, NodeId), Error> { + let mut data = self.data.lock(); + if !data.result.is_some() { + self.core.completed.wait(&mut data); + } + + data.result.as_ref() + .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") + .clone() + } + /// Initialize session. pub fn initialize(&self, connected_nodes: BTreeSet) -> Result<(), Error> { // check state @@ -355,27 +367,6 @@ impl SessionImpl where T: SessionTransport { } } -impl Session for SessionImpl where T: SessionTransport + Send + Sync + 'static { - fn set_continue_action(&self, action: ContinueAction) { - self.data.lock().continue_with = Some(action); - } - - fn continue_action(&self) -> Option { - self.data.lock().continue_with.clone() - } - - fn wait(&self) -> Result<(H256, NodeId), Error> { - let mut data = self.data.lock(); - if !data.result.is_some() { - self.core.completed.wait(&mut data); - } - - data.result.as_ref() - .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") - .clone() - } -} - impl ClusterSession for SessionImpl where T: SessionTransport { type Id = SessionIdWithSubSession; diff --git a/secret_store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs b/secret_store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs index 223caef21..17a1468a2 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs @@ -33,7 +33,7 @@ use key_server_cluster::share_change_session::{ShareChangeSession, ShareChangeSe prepare_share_change_session_plan}; use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl, SessionParams as KeyVersionNegotiationSessionParams, LargestSupportResultComputer, - SessionTransport as KeyVersionNegotiationTransport, Session as KeyVersionNegotiationSession}; + SessionTransport as KeyVersionNegotiationTransport}; use key_server_cluster::jobs::job_session::JobTransport; use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest}; use key_server_cluster::jobs::unknown_sessions_job::{UnknownSessionsJob}; @@ -44,12 +44,6 @@ use key_server_cluster::admin_sessions::ShareChangeSessionMeta; /// Maximal number of active share change sessions. const MAX_ACTIVE_KEY_SESSIONS: usize = 64; -/// Servers set change session API. -pub trait Session: Send + Sync + 'static { - /// Wait until session is completed. - fn wait(&self) -> Result<(), Error>; -} - /// Servers set change session. /// Brief overview: /// 1) consensus establishing @@ -211,6 +205,17 @@ impl SessionImpl { &self.core.meta.id } + /// Wait for session completion. + pub fn wait(&self) -> Result<(), Error> { + let mut data = self.data.lock(); + if !data.result.is_some() { + self.core.completed.wait(&mut data); + } + + data.result.clone() + .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") + } + /// Initialize servers set change session on master node. pub fn initialize(&self, new_nodes_set: BTreeSet, all_set_signature: Signature, new_set_signature: Signature) -> Result<(), Error> { check_nodes_set(&self.core.all_nodes_set, &new_nodes_set)?; @@ -877,18 +882,6 @@ impl SessionImpl { } } -impl Session for SessionImpl { - fn wait(&self) -> Result<(), Error> { - let mut data = self.data.lock(); - if !data.result.is_some() { - self.core.completed.wait(&mut data); - } - - data.result.clone() - .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") - } -} - impl ClusterSession for SessionImpl { type Id = SessionId; diff --git a/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs b/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs index 040360d7d..a31df24c6 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs @@ -32,12 +32,6 @@ use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAc use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; use key_server_cluster::admin_sessions::ShareChangeSessionMeta; -/// Share addition session API. -pub trait Session: Send + Sync + 'static { - /// Wait until session is completed. - fn wait(&self) -> Result<(), Error>; -} - /// Share addition session transport. pub trait SessionTransport: Clone + JobTransport { /// Get all connected nodes. Since ShareAdd session requires all cluster nodes to be connected, this set equals to all known cluster nodes set. @@ -182,6 +176,17 @@ impl SessionImpl where T: SessionTransport { }) } + /// Wait for session completion. + pub fn wait(&self) -> Result<(), Error> { + let mut data = self.data.lock(); + if !data.result.is_some() { + self.core.completed.wait(&mut data); + } + + data.result.clone() + .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") + } + /// Set pre-established consensus data. pub fn set_consensus_output(&self, version: &H256, consensus_group: BTreeSet, mut new_nodes_map: BTreeMap>) -> Result<(), Error> { let mut data = self.data.lock(); @@ -721,18 +726,6 @@ impl SessionImpl where T: SessionTransport { } } -impl Session for SessionImpl where T: SessionTransport + Send + Sync + 'static { - fn wait(&self) -> Result<(), Error> { - let mut data = self.data.lock(); - if !data.result.is_some() { - self.core.completed.wait(&mut data); - } - - data.result.clone() - .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") - } -} - impl ClusterSession for SessionImpl where T: SessionTransport { type Id = SessionId; diff --git a/secret_store/src/key_server_cluster/client_sessions/decryption_session.rs b/secret_store/src/key_server_cluster/client_sessions/decryption_session.rs index 087fc9245..61055612a 100644 --- a/secret_store/src/key_server_cluster/client_sessions/decryption_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/decryption_session.rs @@ -30,12 +30,6 @@ use key_server_cluster::jobs::key_access_job::KeyAccessJob; use key_server_cluster::jobs::decryption_job::{PartialDecryptionRequest, PartialDecryptionResponse, DecryptionJob}; use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; -/// Decryption session API. -pub trait Session: Send + Sync + 'static { - /// Wait until session is completed. Returns distributely restored secret key. - fn wait(&self) -> Result; -} - /// Distributed decryption session. /// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: /// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf @@ -206,6 +200,18 @@ impl SessionImpl { self.data.lock().result.clone() } + /// Wait for session completion. + pub fn wait(&self) -> Result { + let mut data = self.data.lock(); + if !data.result.is_some() { + self.core.completed.wait(&mut data); + } + + data.result.as_ref() + .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") + .clone() + } + /// Delegate session to other node. pub fn delegate(&self, master: NodeId, version: H256, is_shadow_decryption: bool) -> Result<(), Error> { if self.core.meta.master_node_id != self.core.meta.self_node_id { @@ -555,19 +561,6 @@ impl ClusterSession for SessionImpl { } } -impl Session for SessionImpl { - fn wait(&self) -> Result { - let mut data = self.data.lock(); - if !data.result.is_some() { - self.core.completed.wait(&mut data); - } - - data.result.as_ref() - .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") - .clone() - } -} - impl SessionCore { pub fn decryption_transport(&self) -> DecryptionJobTransport { DecryptionJobTransport { diff --git a/secret_store/src/key_server_cluster/client_sessions/encryption_session.rs b/secret_store/src/key_server_cluster/client_sessions/encryption_session.rs index 46fea1d99..83abbde86 100644 --- a/secret_store/src/key_server_cluster/client_sessions/encryption_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/encryption_session.rs @@ -26,14 +26,6 @@ use key_server_cluster::cluster_sessions::ClusterSession; use key_server_cluster::message::{Message, EncryptionMessage, InitializeEncryptionSession, ConfirmEncryptionInitialization, EncryptionSessionError}; -/// Encryption session API. -pub trait Session: Send + Sync + 'static { - /// Get encryption session state. - fn state(&self) -> SessionState; - /// Wait until session is completed. Returns distributely generated secret key. - fn wait(&self, timeout: Option) -> Result<(), Error>; -} - /// Encryption (distributed key generation) session. /// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: /// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf @@ -138,6 +130,27 @@ impl SessionImpl { &self.self_node_id } + /// Get session state. + pub fn state(&self) -> SessionState { + self.data.lock().state.clone() + } + + /// Wait for session completion. + pub fn wait(&self, timeout: Option) -> Result<(), Error> { + let mut data = self.data.lock(); + if !data.result.is_some() { + match timeout { + None => self.completed.wait(&mut data), + Some(timeout) => { self.completed.wait_for(&mut data, timeout); }, + } + } + + data.result.as_ref() + .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") + .clone() + } + + /// Start new session initialization. This must be called on master node. pub fn initialize(&self, requestor_signature: Signature, common_point: Public, encrypted_point: Public) -> Result<(), Error> { let mut data = self.data.lock(); @@ -328,26 +341,6 @@ impl ClusterSession for SessionImpl { } } -impl Session for SessionImpl { - fn state(&self) -> SessionState { - self.data.lock().state.clone() - } - - fn wait(&self, timeout: Option) -> Result<(), Error> { - let mut data = self.data.lock(); - if !data.result.is_some() { - match timeout { - None => self.completed.wait(&mut data), - Some(timeout) => { self.completed.wait_for(&mut data, timeout); }, - } - } - - data.result.as_ref() - .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") - .clone() - } -} - impl Debug for SessionImpl { fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { write!(f, "Encryption session {} on {}", self.id, self.self_node_id) diff --git a/secret_store/src/key_server_cluster/client_sessions/generation_session.rs b/secret_store/src/key_server_cluster/client_sessions/generation_session.rs index 5c21d5786..01f82815c 100644 --- a/secret_store/src/key_server_cluster/client_sessions/generation_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/generation_session.rs @@ -27,16 +27,6 @@ use key_server_cluster::cluster_sessions::ClusterSession; use key_server_cluster::message::{Message, GenerationMessage, InitializeSession, ConfirmInitialization, CompleteInitialization, KeysDissemination, PublicKeyShare, SessionError, SessionCompleted}; -/// Key generation session API. -pub trait Session: Send + Sync + 'static { - /// Get generation session state. - fn state(&self) -> SessionState; - /// Wait until session is completed. Returns public portion of generated server key. - fn wait(&self, timeout: Option) -> Result; - /// Get joint public key (if it is known). - fn joint_public_and_secret(&self) -> Option>; -} - /// Distributed key generation session. /// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: /// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf @@ -226,6 +216,31 @@ impl SessionImpl { self.data.lock().simulate_faulty_behaviour = true; } + /// Get session state. + pub fn state(&self) -> SessionState { + self.data.lock().state.clone() + } + + /// Wait for session completion. + pub fn wait(&self, timeout: Option) -> Result { + let mut data = self.data.lock(); + if !data.joint_public_and_secret.is_some() { + match timeout { + None => self.completed.wait(&mut data), + Some(timeout) => { self.completed.wait_for(&mut data, timeout); }, + } + } + + data.joint_public_and_secret.clone() + .expect("checked above or waited for completed; completed is only signaled when joint_public.is_some(); qed") + .map(|p| p.0) + } + + /// Get generated public and secret (if any). + pub fn joint_public_and_secret(&self) -> Option> { + self.data.lock().joint_public_and_secret.clone() + } + /// Start new session initialization. This must be called on master node. pub fn initialize(&self, author: Public, threshold: usize, nodes: BTreeSet) -> Result<(), Error> { check_cluster_nodes(self.node(), &nodes)?; @@ -782,30 +797,6 @@ impl ClusterSession for SessionImpl { } } -impl Session for SessionImpl { - fn state(&self) -> SessionState { - self.data.lock().state.clone() - } - - fn wait(&self, timeout: Option) -> Result { - let mut data = self.data.lock(); - if !data.joint_public_and_secret.is_some() { - match timeout { - None => self.completed.wait(&mut data), - Some(timeout) => { self.completed.wait_for(&mut data, timeout); }, - } - } - - data.joint_public_and_secret.clone() - .expect("checked above or waited for completed; completed is only signaled when joint_public.is_some(); qed") - .map(|p| p.0) - } - - fn joint_public_and_secret(&self) -> Option> { - self.data.lock().joint_public_and_secret.clone() - } -} - impl EveryOtherNodeVisitor { pub fn new(self_id: &NodeId, nodes: I) -> Self where I: Iterator { EveryOtherNodeVisitor { @@ -883,7 +874,7 @@ pub mod tests { use key_server_cluster::message::{self, Message, GenerationMessage}; use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established}; use key_server_cluster::cluster_sessions::ClusterSession; - use key_server_cluster::generation_session::{Session, SessionImpl, SessionState, SessionParams}; + use key_server_cluster::generation_session::{SessionImpl, SessionState, SessionParams}; use key_server_cluster::math; use key_server_cluster::math::tests::do_encryption_and_decryption; diff --git a/secret_store/src/key_server_cluster/client_sessions/signing_session.rs b/secret_store/src/key_server_cluster/client_sessions/signing_session.rs index 2f33160fa..542b3454c 100644 --- a/secret_store/src/key_server_cluster/client_sessions/signing_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/signing_session.rs @@ -23,7 +23,7 @@ use key_server_cluster::{Error, NodeId, SessionId, SessionMeta, AclStorage, Docu use key_server_cluster::cluster::{Cluster}; use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession}; use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionParams as GenerationSessionParams, - Session as GenerationSessionApi, SessionState as GenerationSessionState}; + SessionState as GenerationSessionState}; use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessage, SigningGenerationMessage, RequestPartialSignature, PartialSignature, SigningSessionCompleted, GenerationMessage, ConsensusMessage, SigningSessionError, InitializeConsensusSession, ConfirmConsensusInitialization, SigningSessionDelegation, SigningSessionDelegationCompleted}; @@ -32,12 +32,6 @@ use key_server_cluster::jobs::key_access_job::KeyAccessJob; use key_server_cluster::jobs::signing_job::{PartialSigningRequest, PartialSigningResponse, SigningJob}; use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession}; -/// Signing session API. -pub trait Session: Send + Sync + 'static { - /// Wait until session is completed. Returns signed message. - fn wait(&self) -> Result<(Secret, Secret), Error>; -} - /// Distributed signing session. /// Based on "Efficient Multi-Party Digital Signature using Adaptive Secret Sharing for Low-Power Devices in Wireless Network" paper. /// Brief overview: @@ -211,6 +205,18 @@ impl SessionImpl { self.data.lock().state } + /// Wait for session completion. + pub fn wait(&self) -> Result<(Secret, Secret), Error> { + let mut data = self.data.lock(); + if !data.result.is_some() { + self.core.completed.wait(&mut data); + } + + data.result.as_ref() + .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") + .clone() + } + /// Delegate session to other node. pub fn delegate(&self, master: NodeId, version: H256, message_hash: H256) -> Result<(), Error> { if self.core.meta.master_node_id != self.core.meta.self_node_id { @@ -680,19 +686,6 @@ impl ClusterSession for SessionImpl { } } -impl Session for SessionImpl { - fn wait(&self) -> Result<(Secret, Secret), Error> { - let mut data = self.data.lock(); - if !data.result.is_some() { - self.core.completed.wait(&mut data); - } - - data.result.as_ref() - .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") - .clone() - } -} - impl SessionKeyGenerationTransport { fn map_message(&self, message: Message) -> Result { match message { @@ -819,12 +812,11 @@ mod tests { use key_server_cluster::{NodeId, DummyKeyStorage, DocumentKeyShare, DocumentKeyShareVersion, SessionId, SessionMeta, Error, KeyStorage}; use key_server_cluster::cluster_sessions::ClusterSession; use key_server_cluster::cluster::tests::DummyCluster; - use key_server_cluster::generation_session::{Session as GenerationSession}; use key_server_cluster::generation_session::tests::MessageLoop as KeyGenerationMessageLoop; use key_server_cluster::math; use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessage, ConsensusMessage, ConfirmConsensusInitialization, SigningGenerationMessage, GenerationMessage, ConfirmInitialization, InitializeSession, RequestPartialSignature}; - use key_server_cluster::signing_session::{Session, SessionImpl, SessionState, SessionParams}; + use key_server_cluster::signing_session::{SessionImpl, SessionState, SessionParams}; struct Node { pub node_id: NodeId, diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 3d3724e13..5fbf47acc 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -29,17 +29,16 @@ use tokio_core::net::{TcpListener, TcpStream}; use ethkey::{Public, KeyPair, Signature, Random, Generator}; use bigint::hash::H256; use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair}; -use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, GenerationSessionWrapper, EncryptionSessionWrapper, - DecryptionSessionWrapper, SigningSessionWrapper, AdminSessionWrapper, KeyNegotiationSessionWrapper, SessionIdWithSubSession, +use key_server_cluster::cluster_sessions::{ClusterSession, AdminSession, ClusterSessions, SessionIdWithSubSession, ClusterSessionsContainer, SERVERS_SET_CHANGE_SESSION_ID, create_cluster_view, AdminSessionCreationData, ClusterSessionsListener}; use key_server_cluster::cluster_sessions_creator::{ClusterSessionCreator, IntoSessionId}; use key_server_cluster::message::{self, Message, ClusterMessage}; -use key_server_cluster::generation_session::{SessionImpl as GenerationSession, Session as GenerationSessionTrait}; -use key_server_cluster::decryption_session::{SessionImpl as DecryptionSession, Session as DecryptionSessionTrait}; -use key_server_cluster::encryption_session::{SessionImpl as EncryptionSession, Session as EncryptionSessionTrait}; -use key_server_cluster::signing_session::{SessionImpl as SigningSession, Session as SigningSessionTrait}; +use key_server_cluster::generation_session::{SessionImpl as GenerationSession}; +use key_server_cluster::decryption_session::{SessionImpl as DecryptionSession}; +use key_server_cluster::encryption_session::{SessionImpl as EncryptionSession}; +use key_server_cluster::signing_session::{SessionImpl as SigningSession}; use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession, - IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction, Session as KeyVersionNegotiationSessionTrait}; + IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction}; use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, read_encrypted_message, WriteMessage, write_encrypted_message}; use key_server_cluster::net::{accept_connection as net_accept_connection, connect as net_connect, Connection as NetConnection}; @@ -74,7 +73,7 @@ pub trait ClusterClient: Send + Sync { /// Start new key version negotiation session. fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result>, Error>; /// Start new servers set change session. - fn new_servers_set_change_session(&self, session_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error>; + fn new_servers_set_change_session(&self, session_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error>; /// Listen for new generation sessions. fn add_generation_listener(&self, listener: Arc>); @@ -959,7 +958,7 @@ impl ClusterClient for ClusterClientImpl { Ok(session) } - fn new_servers_set_change_session(&self, session_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error> { + fn new_servers_set_change_session(&self, session_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error> { let mut connected_nodes = self.data.connections.connected_nodes(); connected_nodes.insert(self.data.self_key_pair.public().clone()); @@ -975,7 +974,7 @@ impl ClusterClient for ClusterClientImpl { .initialize(new_nodes_set, old_set_signature, new_set_signature); match initialization_result { - Ok(()) => Ok(AdminSessionWrapper::new(Arc::downgrade(&self.data), session_id, session)), + Ok(()) => Ok(session), Err(error) => { self.data.sessions.admin_sessions.remove(&session.id()); Err(error) @@ -1025,8 +1024,7 @@ pub mod tests { use key_server_cluster::message::Message; use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration}; use key_server_cluster::cluster_sessions::ClusterSession; - use key_server_cluster::generation_session::{Session as GenerationSession, SessionState as GenerationSessionState}; - use key_server_cluster::signing_session::Session as SigningSession; + use key_server_cluster::generation_session::SessionState as GenerationSessionState; #[derive(Debug)] pub struct DummyCluster { diff --git a/secret_store/src/key_server_cluster/cluster_sessions.rs b/secret_store/src/key_server_cluster/cluster_sessions.rs index ceec154da..7b1a9cfe6 100644 --- a/secret_store/src/key_server_cluster/cluster_sessions.rs +++ b/secret_store/src/key_server_cluster/cluster_sessions.rs @@ -24,16 +24,13 @@ use ethkey::{Public, Secret, Signature}; use key_server_cluster::{Error, NodeId, SessionId, EncryptedDocumentKeyShadow}; use key_server_cluster::cluster::{Cluster, ClusterData, ClusterConfiguration, ClusterView}; use key_server_cluster::message::{self, Message}; -use key_server_cluster::generation_session::{Session as GenerationSession, SessionImpl as GenerationSessionImpl, - SessionState as GenerationSessionState}; -use key_server_cluster::decryption_session::{Session as DecryptionSession, SessionImpl as DecryptionSessionImpl}; -use key_server_cluster::encryption_session::{Session as EncryptionSession, SessionImpl as EncryptionSessionImpl, - SessionState as EncryptionSessionState}; -use key_server_cluster::signing_session::{Session as SigningSession, SessionImpl as SigningSessionImpl}; -use key_server_cluster::share_add_session::{Session as ShareAddSession, SessionImpl as ShareAddSessionImpl, - IsolatedSessionTransport as ShareAddTransport}; -use key_server_cluster::servers_set_change_session::{Session as ServersSetChangeSession, SessionImpl as ServersSetChangeSessionImpl}; -use key_server_cluster::key_version_negotiation_session::{Session as KeyVersionNegotiationSession, SessionImpl as KeyVersionNegotiationSessionImpl, +use key_server_cluster::generation_session::{SessionImpl as GenerationSessionImpl, SessionState as GenerationSessionState}; +use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl}; +use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl, SessionState as EncryptionSessionState}; +use key_server_cluster::signing_session::{SessionImpl as SigningSessionImpl}; +use key_server_cluster::share_add_session::{SessionImpl as ShareAddSessionImpl, IsolatedSessionTransport as ShareAddTransport}; +use key_server_cluster::servers_set_change_session::{SessionImpl as ServersSetChangeSessionImpl}; +use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl, IsolatedSessionTransport as VersionNegotiationTransport, ContinueAction}; use key_server_cluster::cluster_sessions_creator::{GenerationSessionCreator, EncryptionSessionCreator, DecryptionSessionCreator, SigningSessionCreator, @@ -169,66 +166,6 @@ pub enum ClusterSessionsContainerState { Exclusive, } -/// Generation session implementation, which removes session from cluster on drop. -pub struct GenerationSessionWrapper { - /// Wrapped session. - session: Arc, - /// Session Id. - session_id: SessionId, - /// Cluster data reference. - cluster: Weak, -} - -/// Encryption session implementation, which removes session from cluster on drop. -pub struct EncryptionSessionWrapper { - /// Wrapped session. - session: Arc, - /// Session Id. - session_id: SessionId, - /// Cluster data reference. - cluster: Weak, -} - -/// Decryption session implementation, which removes session from cluster on drop. -pub struct DecryptionSessionWrapper { - /// Wrapped session. - session: Arc, - /// Session Id. - session_id: SessionIdWithSubSession, - /// Cluster data reference. - cluster: Weak, -} - -/// Signing session implementation, which removes session from cluster on drop. -pub struct SigningSessionWrapper { - /// Wrapped session. - session: Arc, - /// Session Id. - session_id: SessionIdWithSubSession, - /// Cluster data reference. - cluster: Weak, -} - -/// Admin session implementation, which removes session from cluster on drop. -pub struct AdminSessionWrapper { - /// Wrapped session. - session: Arc, - /// Session Id. - session_id: SessionId, - /// Cluster data reference. - cluster: Weak, -} - -/// Key server version negotiation session implementation, which removes session from cluster on drop. -pub struct KeyNegotiationSessionWrapper { - /// Wrapped session. - session: Arc, - /// Session Id. - session_id: SessionIdWithSubSession, - /// Cluster data reference. - cluster: Weak, -} - impl ClusterSessions { /// Create new cluster sessions container. pub fn new(config: &ClusterConfiguration) -> Self { @@ -599,158 +536,6 @@ impl ClusterSession for AdminSession { } } } - -impl GenerationSessionWrapper { - pub fn new(cluster: Weak, session_id: SessionId, session: Arc) -> Arc { - Arc::new(GenerationSessionWrapper { - session: session, - session_id: session_id, - cluster: cluster, - }) - } -} - -impl GenerationSession for GenerationSessionWrapper { - fn state(&self) -> GenerationSessionState { - self.session.state() - } - - fn wait(&self, timeout: Option) -> Result { - self.session.wait(timeout) - } - - fn joint_public_and_secret(&self) -> Option> { - self.session.joint_public_and_secret() - } -} - -impl Drop for GenerationSessionWrapper { - fn drop(&mut self) { - if let Some(cluster) = self.cluster.upgrade() { - cluster.sessions().generation_sessions.remove(&self.session_id); - } - } -} - -impl EncryptionSessionWrapper { - pub fn new(cluster: Weak, session_id: SessionId, session: Arc) -> Arc { - Arc::new(EncryptionSessionWrapper { - session: session, - session_id: session_id, - cluster: cluster, - }) - } -} - -impl EncryptionSession for EncryptionSessionWrapper { - fn state(&self) -> EncryptionSessionState { - self.session.state() - } - - fn wait(&self, timeout: Option) -> Result<(), Error> { - self.session.wait(timeout) - } -} - -impl Drop for EncryptionSessionWrapper { - fn drop(&mut self) { - if let Some(cluster) = self.cluster.upgrade() { - cluster.sessions().encryption_sessions.remove(&self.session_id); - } - } -} - -impl DecryptionSessionWrapper { - pub fn new(cluster: Weak, session_id: SessionIdWithSubSession, session: Arc) -> Arc { - Arc::new(DecryptionSessionWrapper { - session: session, - session_id: session_id, - cluster: cluster, - }) - } -} - -impl DecryptionSession for DecryptionSessionWrapper { - fn wait(&self) -> Result { - self.session.wait() - } -} - -impl Drop for DecryptionSessionWrapper { - fn drop(&mut self) { - if let Some(cluster) = self.cluster.upgrade() { - cluster.sessions().decryption_sessions.remove(&self.session_id); - } - } -} - -impl SigningSessionWrapper { - pub fn new(cluster: Weak, session_id: SessionIdWithSubSession, session: Arc) -> Arc { - Arc::new(SigningSessionWrapper { - session: session, - session_id: session_id, - cluster: cluster, - }) - } -} - -impl SigningSession for SigningSessionWrapper { - fn wait(&self) -> Result<(Secret, Secret), Error> { - self.session.wait() - } -} - -impl Drop for SigningSessionWrapper { - fn drop(&mut self) { - if let Some(cluster) = self.cluster.upgrade() { - cluster.sessions().signing_sessions.remove(&self.session_id); - } - } -} - -impl AdminSessionWrapper { - pub fn new(cluster: Weak, session_id: SessionId, session: Arc) -> Arc { - Arc::new(AdminSessionWrapper { - session: session, - session_id: session_id, - cluster: cluster, - }) - } - - pub fn wait(&self) -> Result<(), Error> { - match *self.session { - AdminSession::ShareAdd(ref session) => session.wait(), - AdminSession::ServersSetChange(ref session) => session.wait(), - } - } -} - -impl ShareAddSession for AdminSessionWrapper { - fn wait(&self) -> Result<(), Error> { - match *self.session { - AdminSession::ShareAdd(ref session) => session.wait(), - _ => Err(Error::InvalidMessage), - } - } -} - -impl ServersSetChangeSession for AdminSessionWrapper { - fn wait(&self) -> Result<(), Error> { - match *self.session { - AdminSession::ServersSetChange(ref session) => session.wait(), - _ => Err(Error::InvalidMessage), - } - } -} - -impl Drop for AdminSessionWrapper { - fn drop(&mut self) { - if let Some(cluster) = self.cluster.upgrade() { - cluster.sessions().admin_sessions.remove(&self.session_id); - } - } -} - pub fn create_cluster_view(data: &Arc, requires_all_connections: bool) -> Result, Error> { if requires_all_connections { if !data.connections.disconnected_nodes().is_empty() { @@ -764,38 +549,6 @@ pub fn create_cluster_view(data: &Arc, requires_all_connections: bo Ok(Arc::new(ClusterView::new(data.clone(), connected_nodes))) } -impl KeyNegotiationSessionWrapper { - pub fn new(cluster: Weak, session_id: SessionIdWithSubSession, session: Arc) -> Arc { - Arc::new(KeyNegotiationSessionWrapper { - session: session, - session_id: session_id, - cluster: cluster, - }) - } -} - -impl KeyVersionNegotiationSession for KeyNegotiationSessionWrapper { - fn set_continue_action(&self, action: ContinueAction) { - self.session.set_continue_action(action) - } - - fn continue_action(&self) -> Option { - self.session.continue_action() - } - - fn wait(&self) -> Result<(H256, NodeId), Error> { - self.session.wait() - } -} - -impl Drop for KeyNegotiationSessionWrapper { - fn drop(&mut self) { - if let Some(cluster) = self.cluster.upgrade() { - cluster.sessions().negotiation_sessions.remove(&self.session_id); - } - } -} - #[cfg(test)] mod tests { diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index 8a2f777c0..bea212460 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -28,9 +28,6 @@ pub use super::key_server_set::KeyServerSet; pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic, SerializableMessageHash}; pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient}; pub use self::cluster_sessions::{ClusterSession, ClusterSessionsListener}; -pub use self::generation_session::Session as GenerationSession; -pub use self::encryption_session::Session as EncryptionSession; -pub use self::decryption_session::Session as DecryptionSession; #[cfg(test)] pub use super::node_key_pair::PlainNodeKeyPair; diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index e3fc83674..dcf615744 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -32,7 +32,7 @@ use bigint::prelude::U256; use util::Address; use key_server_set::KeyServerSet; use key_server_cluster::{ClusterClient, ClusterSessionsListener, ClusterSession}; -use key_server_cluster::generation_session::{SessionImpl as GenerationSession, Session as GenerationSessionTrait}; +use key_server_cluster::generation_session::SessionImpl as GenerationSession; use {ServerKeyId, NodeKeyPair, KeyServer}; /// Name of the SecretStore contract in the registry. From df3a8a92348d18b5e28d472d6330cc35ef50501d Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 22 Nov 2017 10:43:16 +0300 Subject: [PATCH 11/42] SecretStore: default ClusterSessionsListener impl --- .../src/key_server_cluster/cluster_sessions.rs | 4 ++-- .../src/listener/service_contract_listener.rs | 18 +++++++++++++++--- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/secret_store/src/key_server_cluster/cluster_sessions.rs b/secret_store/src/key_server_cluster/cluster_sessions.rs index 7b1a9cfe6..2c1081c3b 100644 --- a/secret_store/src/key_server_cluster/cluster_sessions.rs +++ b/secret_store/src/key_server_cluster/cluster_sessions.rs @@ -120,9 +120,9 @@ pub struct ClusterSessions { /// Active sessions container listener. pub trait ClusterSessionsListener: Send + Sync { /// When new session is inserted to the container. - fn on_session_inserted(&self, session: Arc); + fn on_session_inserted(&self, _session: Arc) {} /// When session is removed from the container. - fn on_session_removed(&self, session: Arc); + fn on_session_removed(&self, _session: Arc) {} } /// Active sessions container. diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index dcf615744..d53be7daa 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -117,6 +117,7 @@ enum ServiceTask { } impl ServiceContractListener { + /// Create new service contract listener. pub fn new(client: &Arc, sync: &Arc, key_server: Arc, cluster: Arc, self_key_pair: Arc, key_servers_set: Arc) -> Arc { let contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) .map(|a| { @@ -154,6 +155,7 @@ impl ServiceContractListener { contract } + /// Process incoming events of service contract. fn process_service_contract_events(&self, client: &Client, service_contract: Address, blocks: Vec) { debug_assert!(!blocks.is_empty()); @@ -188,6 +190,7 @@ impl ServiceContractListener { })); } + /// Service thread procedure. fn run_service_thread(data: Arc) { loop { let task = data.tasks_queue.wait(); @@ -203,6 +206,7 @@ impl ServiceContractListener { } } + /// Process single service task. fn process_service_task(data: &Arc, task: ServiceTask) -> Result<(), String> { match task { ServiceTask::Retry => @@ -239,6 +243,7 @@ impl ServiceContractListener { } } + /// Retry processing pending requests. fn retry_pending_requests(data: &Arc) -> Result { let client = data.client.upgrade().ok_or("client is required".to_owned())?; let retry_data = ::std::mem::replace(&mut *data.retry_data.lock(), Default::default()); @@ -298,6 +303,7 @@ impl ServiceContractListener { Ok(processed_requests) } + /// Generate server key. fn generate_server_key(data: &Arc, server_key_id: &ServerKeyId, threshold: &H256) -> Result { let threshold_num = threshold.low_u64(); if threshold != &threshold_num.into() || threshold_num >= ::std::usize::MAX as u64 { @@ -313,6 +319,7 @@ impl ServiceContractListener { .map_err(Into::into) } + /// Publish server key. fn publish_server_key(data: &Arc, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> { let server_key_hash = keccak(server_key); let signed_server_key = data.self_key_pair.sign(&server_key_hash).map_err(|e| format!("{}", e))?; @@ -381,13 +388,14 @@ impl ChainNotify for ServiceContractListener { } impl ClusterSessionsListener for ServiceContractListener { - fn on_session_inserted(&self, _session: Arc) { - } - fn on_session_removed(&self, session: Arc) { // TODO: only start if session started via the contract + // only publish when the session is started by another node + // when it is started by this node, it is published from process_service_task if !is_processed_by_this_key_server(&*self.data.key_servers_set, &*self.data.self_key_pair, &session.id()) { + // by this time sesion must already be completed - either successfully, or not + debug_assert!(session.is_finished()); session.wait(Some(Default::default())) .map_err(|e| format!("{}", e)) .and_then(|server_key| Self::publish_server_key(&self.data, &session.id(), &server_key)) @@ -406,6 +414,7 @@ impl ClusterSessionsListener for ServiceContractListener { } impl TasksQueue { + /// Create new tasks queue. pub fn new() -> Self { TasksQueue { service_event: Condvar::new(), @@ -413,12 +422,14 @@ impl TasksQueue { } } + /// Shutdown tasks queue. pub fn shutdown(&self) { let mut service_tasks = self.service_tasks.lock(); service_tasks.push_front(ServiceTask::Shutdown); self.service_event.notify_all(); } + //// Push new tasks to the queue. pub fn push(&self, tasks: I) where I: Iterator { let mut service_tasks = self.service_tasks.lock(); service_tasks.extend(tasks); @@ -427,6 +438,7 @@ impl TasksQueue { } } + /// Wait for new task. pub fn wait(&self) -> ServiceTask { let mut service_tasks = self.service_tasks.lock(); if service_tasks.is_empty() { From 7da66c24f0118e3d569d665d32db0432d9064cbf Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 22 Nov 2017 11:09:48 +0300 Subject: [PATCH 12/42] SecretStore: store joint_public in key_storage --- .../key_version_negotiation_session.rs | 1 + .../admin_sessions/share_add_session.rs | 11 ++++++++++- .../client_sessions/decryption_session.rs | 3 +++ .../client_sessions/generation_session.rs | 10 +++++++++- .../client_sessions/signing_session.rs | 2 ++ secret_store/src/key_server_cluster/message.rs | 2 ++ secret_store/src/key_storage.rs | 12 +++++++++++- 7 files changed, 38 insertions(+), 3 deletions(-) diff --git a/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs b/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs index f755ae089..f86275f3a 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs @@ -700,6 +700,7 @@ mod tests { nodes.values().nth(0).unwrap().insert(Default::default(), DocumentKeyShare { author: Default::default(), threshold: 1, + public: Default::default(), common_point: None, encrypted_point: None, versions: vec![DocumentKeyShareVersion { diff --git a/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs b/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs index a31df24c6..5fcc18a28 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs @@ -90,6 +90,8 @@ struct SessionData { pub key_share_threshold: Option, /// NewKeyShare: author. pub key_share_author: Option, + /// NewKeyShare: joint public. + pub key_share_joint_public: Option, /// NewKeyShare: Common (shared) encryption point. pub key_share_common_point: Option, /// NewKeyShare: Encrypted point. @@ -167,6 +169,7 @@ impl SessionImpl where T: SessionTransport { consensus_session: None, key_share_threshold: None, key_share_author: None, + key_share_joint_public: None, key_share_common_point: None, key_share_encrypted_point: None, id_numbers: None, @@ -435,7 +438,9 @@ impl SessionImpl where T: SessionTransport { } // we only expect this message once - if data.key_share_threshold.is_some() || data.key_share_author.is_some() || data.key_share_common_point.is_some() || data.key_share_encrypted_point.is_some() { + if data.key_share_threshold.is_some() || data.key_share_author.is_some() || + data.key_share_common_point.is_some() || data.key_share_encrypted_point.is_some() || + data.key_share_joint_public.is_some() { return Err(Error::InvalidStateForRequest); } @@ -452,6 +457,7 @@ impl SessionImpl where T: SessionTransport { data.state = SessionState::WaitingForKeysDissemination; data.key_share_threshold = Some(message.threshold); data.key_share_author = Some(message.author.clone().into()); + data.key_share_joint_public = Some(message.joint_public.clone().into()); data.key_share_common_point = message.common_point.clone().map(Into::into); data.key_share_encrypted_point = message.encrypted_point.clone().map(Into::into); @@ -624,6 +630,7 @@ impl SessionImpl where T: SessionTransport { session_nonce: core.nonce, threshold: old_key_share.threshold, author: old_key_share.author.clone().into(), + joint_public: old_key_share.public.clone().into(), common_point: old_key_share.common_point.clone().map(Into::into), encrypted_point: old_key_share.encrypted_point.clone().map(Into::into), id_numbers: old_key_version.id_numbers.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(), @@ -703,6 +710,8 @@ impl SessionImpl where T: SessionTransport { .expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed"), threshold: data.key_share_threshold.clone() .expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed"), + public: data.key_share_joint_public.clone() + .expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed"), common_point: data.key_share_common_point.clone(), encrypted_point: data.key_share_encrypted_point.clone(), versions: Vec::new(), diff --git a/secret_store/src/key_server_cluster/client_sessions/decryption_session.rs b/secret_store/src/key_server_cluster/client_sessions/decryption_session.rs index 61055612a..58ae20661 100644 --- a/secret_store/src/key_server_cluster/client_sessions/decryption_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/decryption_session.rs @@ -685,6 +685,7 @@ mod tests { let encrypted_datas: Vec<_> = (0..5).map(|i| DocumentKeyShare { author: Public::default(), threshold: 3, + public: Default::default(), common_point: Some(common_point.clone()), encrypted_point: Some(encrypted_point.clone()), versions: vec![DocumentKeyShareVersion { @@ -756,6 +757,7 @@ mod tests { key_share: Some(DocumentKeyShare { author: Public::default(), threshold: 0, + public: Default::default(), common_point: Some(Random.generate().unwrap().public().clone()), encrypted_point: Some(Random.generate().unwrap().public().clone()), versions: vec![DocumentKeyShareVersion { @@ -809,6 +811,7 @@ mod tests { key_share: Some(DocumentKeyShare { author: Public::default(), threshold: 2, + public: Default::default(), common_point: Some(Random.generate().unwrap().public().clone()), encrypted_point: Some(Random.generate().unwrap().public().clone()), versions: vec![DocumentKeyShareVersion { diff --git a/secret_store/src/key_server_cluster/client_sessions/generation_session.rs b/secret_store/src/key_server_cluster/client_sessions/generation_session.rs index 01f82815c..84c1b43e5 100644 --- a/secret_store/src/key_server_cluster/client_sessions/generation_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/generation_session.rs @@ -517,10 +517,17 @@ impl SessionImpl { return Err(Error::InvalidMessage); } + // calculate joint public key + let joint_public = { + let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed")); + math::compute_joint_public(public_shares)? + }; + // save encrypted data to key storage let encrypted_data = DocumentKeyShare { author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(), threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), + public: joint_public, common_point: None, encrypted_point: None, versions: vec![DocumentKeyShareVersion::new( @@ -677,7 +684,7 @@ impl SessionImpl { fn complete_generation(&self) -> Result<(), Error> { let mut data = self.data.lock(); - // else - calculate joint public key + // calculate joint public key let joint_public = { let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed")); math::compute_joint_public(public_shares)? @@ -687,6 +694,7 @@ impl SessionImpl { let encrypted_data = DocumentKeyShare { author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(), threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), + public: joint_public.clone(), common_point: None, encrypted_point: None, versions: vec![DocumentKeyShareVersion::new( diff --git a/secret_store/src/key_server_cluster/client_sessions/signing_session.rs b/secret_store/src/key_server_cluster/client_sessions/signing_session.rs index 542b3454c..a0895ceb0 100644 --- a/secret_store/src/key_server_cluster/client_sessions/signing_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/signing_session.rs @@ -978,6 +978,7 @@ mod tests { key_share: Some(DocumentKeyShare { author: Public::default(), threshold: 0, + public: Default::default(), common_point: Some(Random.generate().unwrap().public().clone()), encrypted_point: Some(Random.generate().unwrap().public().clone()), versions: vec![DocumentKeyShareVersion { @@ -1031,6 +1032,7 @@ mod tests { key_share: Some(DocumentKeyShare { author: Public::default(), threshold: 2, + public: Default::default(), common_point: Some(Random.generate().unwrap().public().clone()), encrypted_point: Some(Random.generate().unwrap().public().clone()), versions: vec![DocumentKeyShareVersion { diff --git a/secret_store/src/key_server_cluster/message.rs b/secret_store/src/key_server_cluster/message.rs index 357786725..b8cd98643 100644 --- a/secret_store/src/key_server_cluster/message.rs +++ b/secret_store/src/key_server_cluster/message.rs @@ -773,6 +773,8 @@ pub struct KeyShareCommon { pub threshold: usize, /// Author of key share entry. pub author: SerializablePublic, + /// Joint public. + pub joint_public: SerializablePublic, /// Common (shared) encryption point. pub common_point: Option, /// Encrypted point. diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index fec58de2b..3b1e0e6f2 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -40,6 +40,8 @@ pub struct DocumentKeyShare { pub author: Public, /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). pub threshold: usize, + /// Server public key. + pub public: Public, /// Common (shared) encryption point. pub common_point: Option, /// Encrypted point. @@ -122,10 +124,12 @@ struct SerializableDocumentKeyShareV1 { /// V2 of encrypted key share, as it is stored by key storage on the single key server. #[derive(Serialize, Deserialize)] struct SerializableDocumentKeyShareV2 { - /// Authore of the entry. + /// Author of the entry. pub author: SerializablePublic, /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). pub threshold: usize, + /// Server public. + pub public: SerializablePublic, /// Common (shared) encryption point. pub common_point: Option, /// Encrypted point. @@ -174,6 +178,7 @@ fn upgrade_db(db: Database) -> Result { // in v0 there have been only simultaneous GenEnc sessions. author: Public::default().into(), // added in v1 threshold: v0_key.threshold, + public: Public::default().into(), // addded in v2 common_point: Some(v0_key.common_point), encrypted_point: Some(v0_key.encrypted_point), versions: vec![CurrentSerializableDocumentKeyVersion { @@ -196,6 +201,7 @@ fn upgrade_db(db: Database) -> Result { let current_key = CurrentSerializableDocumentKeyShare { author: v1_key.author, // added in v1 threshold: v1_key.threshold, + public: Public::default().into(), // addded in v2 common_point: v1_key.common_point, encrypted_point: v1_key.encrypted_point, versions: vec![CurrentSerializableDocumentKeyVersion { @@ -329,6 +335,7 @@ impl From for SerializableDocumentKeyShareV2 { SerializableDocumentKeyShareV2 { author: key.author.into(), threshold: key.threshold, + public: key.public.into(), common_point: key.common_point.map(Into::into), encrypted_point: key.encrypted_point.map(Into::into), versions: key.versions.into_iter().map(Into::into).collect(), @@ -351,6 +358,7 @@ impl From for DocumentKeyShare { DocumentKeyShare { author: key.author.into(), threshold: key.threshold, + public: key.public.into(), common_point: key.common_point.map(Into::into), encrypted_point: key.encrypted_point.map(Into::into), versions: key.versions.into_iter() @@ -442,6 +450,7 @@ pub mod tests { let value1 = DocumentKeyShare { author: Public::default(), threshold: 100, + public: Public::default(), common_point: Some(Random.generate().unwrap().public().clone()), encrypted_point: Some(Random.generate().unwrap().public().clone()), versions: vec![DocumentKeyShareVersion { @@ -456,6 +465,7 @@ pub mod tests { let value2 = DocumentKeyShare { author: Public::default(), threshold: 200, + public: Public::default(), common_point: Some(Random.generate().unwrap().public().clone()), encrypted_point: Some(Random.generate().unwrap().public().clone()), versions: vec![DocumentKeyShareVersion { From 85e150dbb51ff1a0d27755aaef9ce9c95bcd00b9 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 22 Nov 2017 11:43:01 +0300 Subject: [PATCH 13/42] SecretStore: restore server key --- secret_store/src/lib.rs | 12 +- .../src/listener/service_contract_listener.rs | 117 ++++++++++++------ 2 files changed, 87 insertions(+), 42 deletions(-) diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 09d4ce774..a0af4f072 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -79,13 +79,21 @@ pub fn start(client: Arc, sync: Arc, self_key_pair: Arc Some(listener::http_listener::KeyServerHttpListener::start(listener_address, key_server.clone())?), None => None, }; - let contract_listener = listener::service_contract_listener::ServiceContractListener::new(&client, &sync, key_server.clone(), cluster, self_key_pair, key_server_set); + let contract_listener = listener::service_contract_listener::ServiceContractListener::new(listener::service_contract_listener::ServiceContractListenerParams { + client: Arc::downgrade(&client), + sync: Arc::downgrade(&sync), + key_server: key_server.clone(), + self_key_pair: self_key_pair, + key_servers_set: key_server_set, + cluster: cluster, + key_storage: key_storage, + }); let listener = listener::Listener::new(key_server, http_listener, Some(contract_listener)); Ok(Box::new(listener)) } diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index d53be7daa..33fde4eed 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -33,6 +33,7 @@ use util::Address; use key_server_set::KeyServerSet; use key_server_cluster::{ClusterClient, ClusterSessionsListener, ClusterSession}; use key_server_cluster::generation_session::SessionImpl as GenerationSession; +use key_storage::KeyStorage; use {ServerKeyId, NodeKeyPair, KeyServer}; /// Name of the SecretStore contract in the registry. @@ -66,14 +67,8 @@ pub struct ServiceContractListener { service_handle: Option>, } -/// Service contract listener data. -struct ServiceContractListenerData { - /// Blocks since last retry. - pub last_retry: AtomicUsize, - /// Retry-related data. - pub retry_data: Mutex, - /// Contract. - pub contract: RwLock, +/// Service contract listener parameters. +pub struct ServiceContractListenerParams { /// Blockchain client. pub client: Weak, /// Sync provider. @@ -84,8 +79,24 @@ struct ServiceContractListenerData { pub self_key_pair: Arc, /// Key servers set. pub key_servers_set: Arc, + /// Cluster reference. + pub cluster: Arc, + /// Key storage reference. + pub key_storage: Arc, +} + +/// Service contract listener data. +struct ServiceContractListenerData { + /// Blocks since last retry. + pub last_retry: AtomicUsize, + /// Retry-related data. + pub retry_data: Mutex, + /// Contract. + pub contract: RwLock, /// Service tasks queue. pub tasks_queue: Arc, + /// Cluster params. + pub params: ServiceContractListenerParams, } /// Retry-related data. @@ -111,18 +122,21 @@ enum ServiceTask { /// Generate server key (server_key_id, threshold). GenerateServerKey(H256, H256), /// Confirm server key (server_key_id). - ConfirmServerKey(H256), + RestoreServerKey(H256), /// Shutdown listener. Shutdown, } impl ServiceContractListener { /// Create new service contract listener. - pub fn new(client: &Arc, sync: &Arc, key_server: Arc, cluster: Arc, self_key_pair: Arc, key_servers_set: Arc) -> Arc { + pub fn new(params: ServiceContractListenerParams) -> Arc { + let client = params.client.upgrade().expect("client is active in constructor; qed"); + let sync = params.sync.upgrade().expect("sync is active in constructor; qed"); let contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) - .map(|a| { - trace!(target: "secretstore", "{}: installing service contract from address {}", self_key_pair.public(), a); - a + .map(|address| { + trace!(target: "secretstore", "{}: installing service contract from address {}", + params.self_key_pair.public(), address); + address }) .unwrap_or_default(); @@ -131,12 +145,8 @@ impl ServiceContractListener { last_retry: AtomicUsize::new(0), retry_data: Default::default(), contract: RwLock::new(SecretStoreService::new(contract_addr)), - client: Arc::downgrade(client), - sync: Arc::downgrade(sync), - key_server: key_server, - self_key_pair: self_key_pair, - key_servers_set: key_servers_set, tasks_queue: Arc::new(TasksQueue::new()), + params: params, }); // retry on restart @@ -151,7 +161,7 @@ impl ServiceContractListener { service_handle: Some(service_handle), }); client.add_notify(contract.clone()); - cluster.add_generation_listener(contract.clone()); + contract.data.params.cluster.add_generation_listener(contract.clone()); contract } @@ -176,7 +186,14 @@ impl ServiceContractListener { // schedule correct requests if they're intended to be processed by this KeyServer self.data.tasks_queue.push(request_logs.into_iter() .filter_map(|r| match r.entry.topics.len() { - 3 if is_processed_by_this_key_server(&*self.data.key_servers_set, &*self.data.self_key_pair, &r.entry.topics[1]) => { + // when key is already generated && we have this key + 3 if self.data.params.key_storage.get(&r.entry.topics[1]).map(|k| k.is_some()).unwrap_or_default() => { + Some(ServiceTask::RestoreServerKey( + r.entry.topics[1], + )) + } + // when key is not yet generated && this node should be master of this key generation session + 3 if is_processed_by_this_key_server(&*self.data.params.key_servers_set, &*self.data.params.self_key_pair, &r.entry.topics[1]) => { Some(ServiceTask::GenerateServerKey( r.entry.topics[1], r.entry.topics[2], @@ -194,7 +211,7 @@ impl ServiceContractListener { fn run_service_thread(data: Arc) { loop { let task = data.tasks_queue.wait(); - trace!(target: "secretstore", "{}: processing {:?} task",data.self_key_pair.public(), task); + trace!(target: "secretstore", "{}: processing {:?} task", data.params.self_key_pair.public(), task); match task { ServiceTask::Shutdown => break, @@ -214,28 +231,42 @@ impl ServiceContractListener { .map(|processed_requests| { if processed_requests != 0 { trace!(target: "secretstore", "{}: successfully retried {} pending requests", - data.self_key_pair.public(), processed_requests); + data.params.self_key_pair.public(), processed_requests); } () }) .map_err(|error| { warn!(target: "secretstore", "{}: retrying pending requests has failed with: {}", - data.self_key_pair.public(), error); + data.params.self_key_pair.public(), error); error }), - ServiceTask::ConfirmServerKey(_) => Err("not implemented".to_owned()), // TODO + ServiceTask::RestoreServerKey(server_key_id) => { + data.retry_data.lock().generated_keys.insert(server_key_id.clone()); + Self::restore_server_key(&data, &server_key_id) + .and_then(|server_key| Self::publish_server_key(&data, &server_key_id, &server_key)) + .map(|_| { + trace!(target: "secretstore", "{}: processed RestoreServerKey({}) request", + data.params.self_key_pair.public(), server_key_id); + () + }) + .map_err(|error| { + warn!(target: "secretstore", "{}: failed to process RestoreServerKey({}) request with: {}", + data.params.self_key_pair.public(), server_key_id, error); + error + }) + } ServiceTask::GenerateServerKey(server_key_id, threshold) => { data.retry_data.lock().generated_keys.insert(server_key_id.clone()); Self::generate_server_key(&data, &server_key_id, &threshold) .and_then(|server_key| Self::publish_server_key(&data, &server_key_id, &server_key)) .map(|_| { - trace!(target: "secretstore", "{}: started processing GenerateServerKey({}, {}) request", - data.self_key_pair.public(), server_key_id, threshold); + trace!(target: "secretstore", "{}: processed GenerateServerKey({}, {}) request", + data.params.self_key_pair.public(), server_key_id, threshold); () }) .map_err(|error| { - warn!(target: "secretstore", "{}: failed to start processing GenerateServerKey({}, {}) request with: {}", - data.self_key_pair.public(), server_key_id, threshold, error); + warn!(target: "secretstore", "{}: failed to process GenerateServerKey({}, {}) request with: {}", + data.params.self_key_pair.public(), server_key_id, threshold, error); error }) }, @@ -245,7 +276,7 @@ impl ServiceContractListener { /// Retry processing pending requests. fn retry_pending_requests(data: &Arc) -> Result { - let client = data.client.upgrade().ok_or("client is required".to_owned())?; + let client = data.params.client.upgrade().ok_or("client is required".to_owned())?; let retry_data = ::std::mem::replace(&mut *data.retry_data.lock(), Default::default()); let contract = data.contract.read(); @@ -266,7 +297,7 @@ impl ServiceContractListener { // read request from the contract let (server_key_id, threshold, is_confirmed) = contract.get_server_key_generation_request(&do_call, - public_to_address(data.self_key_pair.public()), + public_to_address(data.params.self_key_pair.public()), generate_server_key_request_index).wait()?; generate_server_key_request_index = generate_server_key_request_index + 1.into(); @@ -283,10 +314,10 @@ impl ServiceContractListener { } // process request - let is_own_request = is_processed_by_this_key_server(&*data.key_servers_set, &*data.self_key_pair, &server_key_id); + let is_own_request = is_processed_by_this_key_server(&*data.params.key_servers_set, &*data.params.self_key_pair, &server_key_id); let request_result = Self::process_service_task(data, match is_own_request { true => ServiceTask::GenerateServerKey(server_key_id, threshold.into()), - false => ServiceTask::ConfirmServerKey(server_key_id), + false => ServiceTask::RestoreServerKey(server_key_id), }); // process request result @@ -315,14 +346,19 @@ impl ServiceContractListener { // => this API (server key generation) is not suitable for usage in encryption via contract endpoint let author_key = Random.generate().map_err(|e| format!("{}", e))?; let server_key_id_signature = sign(author_key.secret(), server_key_id).map_err(|e| format!("{}", e))?; - data.key_server.generate_key(server_key_id, &server_key_id_signature, threshold_num as usize) + data.params.key_server.generate_key(server_key_id, &server_key_id_signature, threshold_num as usize) .map_err(Into::into) } + /// Restore server key. + fn restore_server_key(data: &Arc, server_key_id: &ServerKeyId) -> Result { + unimplemented!() + } + /// Publish server key. fn publish_server_key(data: &Arc, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> { let server_key_hash = keccak(server_key); - let signed_server_key = data.self_key_pair.sign(&server_key_hash).map_err(|e| format!("{}", e))?; + let signed_server_key = data.params.self_key_pair.sign(&server_key_hash).map_err(|e| format!("{}", e))?; let signed_server_key: Signature = signed_server_key.into_electrum().into(); let transaction_data = data.contract.read().encode_server_key_generated_input(server_key_id.clone(), server_key.to_vec(), @@ -333,7 +369,7 @@ impl ServiceContractListener { let contract = data.contract.read(); if contract.address != Default::default() { - if let Some(client) = data.client.upgrade() { + if let Some(client) = data.params.client.upgrade() { client.transact_contract( contract.address.clone(), transaction_data @@ -359,7 +395,7 @@ impl ChainNotify for ServiceContractListener { fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { let enacted_len = enacted.len(); if enacted_len != 0 { - if let (Some(client), Some(sync)) = (self.data.client.upgrade(), self.data.sync.upgrade()) { + if let (Some(client), Some(sync)) = (self.data.params.client.upgrade(), self.data.params.sync.upgrade()) { // do nothing until synced if sync.status().is_syncing(client.queue_info()) { return; @@ -368,7 +404,8 @@ impl ChainNotify for ServiceContractListener { // update contract address from registry if let Some(service_contract_addr) = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) { if self.data.contract.read().address != service_contract_addr { - trace!(target: "secretstore", "{}: installing service contract from address {}", self.data.self_key_pair.public(), service_contract_addr); + trace!(target: "secretstore", "{}: installing service contract from address {}", + self.data.params.self_key_pair.public(), service_contract_addr); *self.data.contract.write() = SecretStoreService::new(service_contract_addr.clone()); } @@ -393,7 +430,7 @@ impl ClusterSessionsListener for ServiceContractListener { // only publish when the session is started by another node // when it is started by this node, it is published from process_service_task - if !is_processed_by_this_key_server(&*self.data.key_servers_set, &*self.data.self_key_pair, &session.id()) { + if !is_processed_by_this_key_server(&*self.data.params.key_servers_set, &*self.data.params.self_key_pair, &session.id()) { // by this time sesion must already be completed - either successfully, or not debug_assert!(session.is_finished()); session.wait(Some(Default::default())) @@ -401,12 +438,12 @@ impl ClusterSessionsListener for ServiceContractListener { .and_then(|server_key| Self::publish_server_key(&self.data, &session.id(), &server_key)) .map(|_| { trace!(target: "secretstore", "{}: completed foreign GenerateServerKey({}) request", - self.data.self_key_pair.public(), session.id()); + self.data.params.self_key_pair.public(), session.id()); () }) .map_err(|error| { warn!(target: "secretstore", "{}: failed to process GenerateServerKey({}) request with: {}", - self.data.self_key_pair.public(), session.id(), error); + self.data.params.self_key_pair.public(), session.id(), error); error }); } From a2c12ab13efd621f917c2bfb7098cf2fad1bd4bc Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 22 Nov 2017 11:47:15 +0300 Subject: [PATCH 14/42] SecretStore: fixed warnings --- .../admin_sessions/share_add_session.rs | 11 ----------- .../client_sessions/encryption_session.rs | 5 ----- secret_store/src/key_server_cluster/cluster.rs | 5 ----- .../src/key_server_cluster/cluster_sessions.rs | 10 +++++----- .../src/listener/service_contract_listener.rs | 8 +++++--- 5 files changed, 10 insertions(+), 29 deletions(-) diff --git a/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs b/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs index 5fcc18a28..3156d6f87 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs @@ -179,17 +179,6 @@ impl SessionImpl where T: SessionTransport { }) } - /// Wait for session completion. - pub fn wait(&self) -> Result<(), Error> { - let mut data = self.data.lock(); - if !data.result.is_some() { - self.core.completed.wait(&mut data); - } - - data.result.clone() - .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") - } - /// Set pre-established consensus data. pub fn set_consensus_output(&self, version: &H256, consensus_group: BTreeSet, mut new_nodes_map: BTreeMap>) -> Result<(), Error> { let mut data = self.data.lock(); diff --git a/secret_store/src/key_server_cluster/client_sessions/encryption_session.rs b/secret_store/src/key_server_cluster/client_sessions/encryption_session.rs index 83abbde86..1cc6ad9f3 100644 --- a/secret_store/src/key_server_cluster/client_sessions/encryption_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/encryption_session.rs @@ -130,11 +130,6 @@ impl SessionImpl { &self.self_node_id } - /// Get session state. - pub fn state(&self) -> SessionState { - self.data.lock().state.clone() - } - /// Wait for session completion. pub fn wait(&self, timeout: Option) -> Result<(), Error> { let mut data = self.data.lock(); diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 5fbf47acc..c8f24034a 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -741,11 +741,6 @@ impl ClusterData { self.connections.get(node) } - /// Get sessions reference. - pub fn sessions(&self) -> &ClusterSessions { - &self.sessions - } - /// Spawns a future using thread pool and schedules execution of it with event loop handle. pub fn spawn(&self, f: F) where F: Future + Send + 'static, F::Item: Send + 'static, F::Error: Send + 'static { let pool_work = self.pool.spawn(f); diff --git a/secret_store/src/key_server_cluster/cluster_sessions.rs b/secret_store/src/key_server_cluster/cluster_sessions.rs index 2c1081c3b..2e83c407d 100644 --- a/secret_store/src/key_server_cluster/cluster_sessions.rs +++ b/secret_store/src/key_server_cluster/cluster_sessions.rs @@ -20,18 +20,18 @@ use std::sync::atomic::AtomicBool; use std::collections::{VecDeque, BTreeMap}; use parking_lot::{Mutex, RwLock}; use bigint::hash::H256; -use ethkey::{Public, Secret, Signature}; -use key_server_cluster::{Error, NodeId, SessionId, EncryptedDocumentKeyShadow}; +use ethkey::{Secret, Signature}; +use key_server_cluster::{Error, NodeId, SessionId}; use key_server_cluster::cluster::{Cluster, ClusterData, ClusterConfiguration, ClusterView}; use key_server_cluster::message::{self, Message}; -use key_server_cluster::generation_session::{SessionImpl as GenerationSessionImpl, SessionState as GenerationSessionState}; +use key_server_cluster::generation_session::{SessionImpl as GenerationSessionImpl}; use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl}; -use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl, SessionState as EncryptionSessionState}; +use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl}; use key_server_cluster::signing_session::{SessionImpl as SigningSessionImpl}; use key_server_cluster::share_add_session::{SessionImpl as ShareAddSessionImpl, IsolatedSessionTransport as ShareAddTransport}; use key_server_cluster::servers_set_change_session::{SessionImpl as ServersSetChangeSessionImpl}; use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl, - IsolatedSessionTransport as VersionNegotiationTransport, ContinueAction}; + IsolatedSessionTransport as VersionNegotiationTransport}; use key_server_cluster::cluster_sessions_creator::{GenerationSessionCreator, EncryptionSessionCreator, DecryptionSessionCreator, SigningSessionCreator, KeyVersionNegotiationSessionCreator, AdminSessionCreator, SessionCreatorCore, ClusterSessionCreator}; diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index 33fde4eed..d77cc8135 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -351,8 +351,8 @@ impl ServiceContractListener { } /// Restore server key. - fn restore_server_key(data: &Arc, server_key_id: &ServerKeyId) -> Result { - unimplemented!() + fn restore_server_key(_data: &Arc, _server_key_id: &ServerKeyId) -> Result { + unimplemented!() // TODO } /// Publish server key. @@ -433,7 +433,9 @@ impl ClusterSessionsListener for ServiceContractListener { if !is_processed_by_this_key_server(&*self.data.params.key_servers_set, &*self.data.params.self_key_pair, &session.id()) { // by this time sesion must already be completed - either successfully, or not debug_assert!(session.is_finished()); - session.wait(Some(Default::default())) + + // ignore result - the only thing that we can do is to log the error + let _ = session.wait(Some(Default::default())) .map_err(|e| format!("{}", e)) .and_then(|server_key| Self::publish_server_key(&self.data, &session.id(), &server_key)) .map(|_| { From 5d6abfe2f59d95a8183ba7b42bfb7f6cdabe121d Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 22 Nov 2017 11:51:51 +0300 Subject: [PATCH 15/42] SecretStore: implemented restore_server_key --- secret_store/src/listener/service_contract_listener.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index d77cc8135..62a2a387a 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -351,8 +351,11 @@ impl ServiceContractListener { } /// Restore server key. - fn restore_server_key(_data: &Arc, _server_key_id: &ServerKeyId) -> Result { - unimplemented!() // TODO + fn restore_server_key(data: &Arc, server_key_id: &ServerKeyId) -> Result { + data.params.key_storage.get(server_key_id) + .map_err(|e| format!("{}", e)) + .and_then(|ks| ks.ok_or("missing key".to_owned())) + .map(|ks| ks.public) } /// Publish server key. From ea9c8a174c6f4fb313213eaefbe6bebb80d71204 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 22 Nov 2017 17:31:34 +0300 Subject: [PATCH 16/42] SecretStore: started adding tests for ServiceContractListener --- .../res/secretstore_service.json | 7 +- secret_store/src/key_server.rs | 9 +- .../src/key_server_cluster/cluster.rs | 36 +- secret_store/src/key_server_cluster/mod.rs | 2 + secret_store/src/key_storage.rs | 1 + secret_store/src/lib.rs | 7 +- secret_store/src/listener/http_listener.rs | 2 +- secret_store/src/listener/mod.rs | 17 + secret_store/src/listener/service_contract.rs | 306 +++++++++++++++ .../src/listener/service_contract_listener.rs | 363 +++++++++++------- 10 files changed, 609 insertions(+), 141 deletions(-) create mode 100644 secret_store/src/listener/service_contract.rs diff --git a/ethcore/native_contracts/res/secretstore_service.json b/ethcore/native_contracts/res/secretstore_service.json index 48d9adaa7..fecf5ca14 100644 --- a/ethcore/native_contracts/res/secretstore_service.json +++ b/ethcore/native_contracts/res/secretstore_service.json @@ -1 +1,6 @@ -[{"constant":true,"inputs":[],"name":"serverKeyGenerationRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"authority","type":"address"},{"name":"index","type":"uint256"}],"name":"getServerKeyGenerationRequest","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"uint256"},{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"threshold","type":"uint256"}],"name":"generateServerKey","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"constant":true,"inputs":[],"name":"serverKeyGenerationFee","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[],"name":"drain","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"},{"name":"v","type":"uint8"},{"name":"r","type":"bytes32"},{"name":"s","type":"bytes32"}],"name":"serverKeyGenerated","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"serverKeyId","type":"bytes32"},{"indexed":true,"name":"threshold","type":"uint256"}],"name":"ServerKeyRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"serverKeyPublic","type":"bytes"}],"name":"ServerKeyGenerated","type":"event"}] \ No newline at end of file +[ + {"constant":true,"inputs":[],"name":"serverKeyGenerationRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"}, + {"constant":true,"inputs":[{"name":"authority","type":"address"},{"name":"index","type":"uint256"}],"name":"getServerKeyGenerationRequest","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"uint256"},{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"}, + {"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"},{"name":"v","type":"uint8"},{"name":"r","type":"bytes32"},{"name":"s","type":"bytes32"}],"name":"serverKeyGenerated","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, + {"anonymous":false,"inputs":[{"indexed":true,"name":"serverKeyId","type":"bytes32"},{"indexed":true,"name":"threshold","type":"uint256"}],"name":"ServerKeyRequested","type":"event"} +] \ No newline at end of file diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index 6a68abe46..2908afd64 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -204,6 +204,7 @@ pub mod tests { use std::collections::BTreeSet; use std::time; use std::sync::Arc; + use std::sync::atomic::{AtomicUsize, Ordering}; use std::net::SocketAddr; use std::collections::BTreeMap; use ethcrypto; @@ -219,7 +220,10 @@ pub mod tests { use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer}; use super::KeyServerImpl; - pub struct DummyKeyServer; + #[derive(Default)] + pub struct DummyKeyServer { + pub generation_requests_count: AtomicUsize, + } impl KeyServer for DummyKeyServer {} @@ -231,7 +235,8 @@ pub mod tests { impl ServerKeyGenerator for DummyKeyServer { fn generate_key(&self, _key_id: &ServerKeyId, _signature: &RequestSignature, _threshold: usize) -> Result { - unimplemented!() + self.generation_requests_count.fetch_add(1, Ordering::Relaxed); + Err(Error::Internal("test error".into())) } } diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index c8f24034a..ae352a9b4 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -1014,12 +1014,21 @@ pub mod tests { use std::collections::{BTreeSet, VecDeque}; use parking_lot::Mutex; use tokio_core::reactor::Core; - use ethkey::{Random, Generator, Public, sign}; - use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair}; + use bigint::hash::H256; + use ethkey::{Random, Generator, Public, Signature, sign}; + use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair, KeyStorage}; use key_server_cluster::message::Message; - use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration}; - use key_server_cluster::cluster_sessions::ClusterSession; - use key_server_cluster::generation_session::SessionState as GenerationSessionState; + use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration, ClusterClient, ClusterState}; + use key_server_cluster::cluster_sessions::{ClusterSession, AdminSession, ClusterSessionsListener}; + use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionState as GenerationSessionState}; + use key_server_cluster::decryption_session::{SessionImpl as DecryptionSession}; + use key_server_cluster::encryption_session::{SessionImpl as EncryptionSession}; + use key_server_cluster::signing_session::{SessionImpl as SigningSession}; + use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession, + IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction}; + + #[derive(Default)] + pub struct DummyClusterClient; #[derive(Debug)] pub struct DummyCluster { @@ -1033,6 +1042,23 @@ pub mod tests { messages: VecDeque<(NodeId, Message)>, } + impl ClusterClient for DummyClusterClient { + fn cluster_state(&self) -> ClusterState { unimplemented!() } + fn new_generation_session(&self, session_id: SessionId, author: Public, threshold: usize) -> Result, Error> { unimplemented!() } + fn new_encryption_session(&self, session_id: SessionId, requestor_signature: Signature, common_point: Public, encrypted_point: Public) -> Result, Error> { unimplemented!() } + fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature, version: Option, is_shadow_decryption: bool) -> Result, Error> { unimplemented!() } + fn new_signing_session(&self, session_id: SessionId, requestor_signature: Signature, version: Option, message_hash: H256) -> Result, Error> { unimplemented!() } + fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result>, Error> { unimplemented!() } + fn new_servers_set_change_session(&self, session_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error> { unimplemented!() } + + fn add_generation_listener(&self, listener: Arc>) {} + + fn make_faulty_generation_sessions(&self) { unimplemented!() } + fn generation_session(&self, session_id: &SessionId) -> Option> { unimplemented!() } + fn connect(&self) { unimplemented!() } + fn key_storage(&self) -> Arc { unimplemented!() } + } + impl DummyCluster { pub fn new(id: NodeId) -> Self { DummyCluster { diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index bea212460..0692a39ad 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -28,6 +28,8 @@ pub use super::key_server_set::KeyServerSet; pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic, SerializableMessageHash}; pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient}; pub use self::cluster_sessions::{ClusterSession, ClusterSessionsListener}; +#[cfg(test)] +pub use self::cluster::tests::DummyClusterClient; #[cfg(test)] pub use super::node_key_pair::PlainNodeKeyPair; diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index 3b1e0e6f2..da698e5e0 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -35,6 +35,7 @@ type CurrentSerializableDocumentKeyVersion = SerializableDocumentKeyShareVersion /// Encrypted key share, stored by key storage on the single key server. #[derive(Debug, Clone, PartialEq)] +#[cfg_attr(test, derive(Default))] pub struct DocumentKeyShare { /// Author of the entry. pub author: Public, diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index a0af4f072..3bf6dde06 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -85,15 +85,16 @@ pub fn start(client: Arc, sync: Arc, self_key_pair: Arc Some(listener::http_listener::KeyServerHttpListener::start(listener_address, key_server.clone())?), None => None, }; + let service_contract = Arc::new(listener::service_contract::OnChainServiceContract::new(&client, &sync, self_key_pair.clone())); let contract_listener = listener::service_contract_listener::ServiceContractListener::new(listener::service_contract_listener::ServiceContractListenerParams { - client: Arc::downgrade(&client), - sync: Arc::downgrade(&sync), + contract: service_contract, key_server: key_server.clone(), self_key_pair: self_key_pair, - key_servers_set: key_server_set, + key_server_set: key_server_set, cluster: cluster, key_storage: key_storage, }); + client.add_notify(contract_listener.clone()); let listener = listener::Listener::new(key_server, http_listener, Some(contract_listener)); Ok(Box::new(listener)) } diff --git a/secret_store/src/listener/http_listener.rs b/secret_store/src/listener/http_listener.rs index 3350b51d4..8b1779843 100644 --- a/secret_store/src/listener/http_listener.rs +++ b/secret_store/src/listener/http_listener.rs @@ -334,7 +334,7 @@ mod tests { #[test] fn http_listener_successfully_drops() { - let key_server = Arc::new(DummyKeyServer); + let key_server = Arc::new(DummyKeyServer::default()); let address = NodeAddress { address: "127.0.0.1".into(), port: 9000 }; let listener = KeyServerHttpListener::start(address, key_server).unwrap(); drop(listener); diff --git a/secret_store/src/listener/mod.rs b/secret_store/src/listener/mod.rs index 1ebe4aa47..403eaf549 100644 --- a/secret_store/src/listener/mod.rs +++ b/secret_store/src/listener/mod.rs @@ -1,4 +1,21 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + pub mod http_listener; +pub mod service_contract; pub mod service_contract_listener; use std::collections::BTreeSet; diff --git a/secret_store/src/listener/service_contract.rs b/secret_store/src/listener/service_contract.rs new file mode 100644 index 000000000..30ad8937d --- /dev/null +++ b/secret_store/src/listener/service_contract.rs @@ -0,0 +1,306 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::{VecDeque, HashSet}; +use std::sync::{Arc, Weak}; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::thread; +use futures::{future, Future}; +use parking_lot::{RwLock, Mutex, Condvar}; +use ethcore::filter::Filter; +use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; +use ethkey::{Random, Generator, Public, Signature, sign, public_to_address}; +use ethsync::SyncProvider; +use native_contracts::SecretStoreService; +use bytes::Bytes; +use hash::keccak; +use bigint::hash::H256; +use bigint::prelude::U256; +use util::Address; +use key_server_set::KeyServerSet; +use key_server_cluster::{ClusterClient, ClusterSessionsListener, ClusterSession}; +use key_server_cluster::generation_session::SessionImpl as GenerationSession; +use key_storage::KeyStorage; +use listener::service_contract_listener::{ServiceTask, ServiceContractListenerParams}; +use {ServerKeyId, NodeKeyPair, KeyServer}; + +/// Name of the SecretStore contract in the registry. +const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service"; + +/// Key server has been added to the set. +const SERVER_KEY_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyRequested(bytes32,uint256)"; + +lazy_static! { + static ref SERVER_KEY_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_REQUESTED_EVENT_NAME); +} + +/// Service contract trait. +pub trait ServiceContract: Send + Sync { + /// Update contract. + fn update(&self); + /// Is contract installed && up-to-date (i.e. chain is synced)? + fn is_actual(&self) -> bool; + /// Read contract logs from given blocks. Returns topics of every entry. + fn read_logs(&self, first_block: H256, last_block: H256) -> Box>>; + /// Publish generated key. + fn read_pending_requests(&self) -> Box>; + /// Publish server key. + fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String>; +} + +/// On-chain service contract. +pub struct OnChainServiceContract { + /// Blockchain client. + client: Weak, + /// Sync provider. + sync: Weak, + /// This node key pair. + self_key_pair: Arc, + /// Contract. + contract: RwLock>, +} + +/// Pending requests iterator. +struct PendingRequestsIterator { + /// Blockchain client. + client: Arc, + /// Contract. + contract: Arc, + /// This node key pair. + self_key_pair: Arc, + /// Current request index. + index: U256, + /// Requests length. + length: U256, +} + +impl OnChainServiceContract { + /// Create new on-chain service contract. + pub fn new(client: &Arc, sync: &Arc, self_key_pair: Arc) -> Self { + let contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) + .map(|address| { + trace!(target: "secretstore", "{}: installing service contract from address {}", + self_key_pair.public(), address); + address + }) + .unwrap_or_default(); + + OnChainServiceContract { + client: Arc::downgrade(client), + sync: Arc::downgrade(sync), + self_key_pair: self_key_pair, + contract: RwLock::new(Arc::new(SecretStoreService::new(contract_addr))), + } + } +} + +impl ServiceContract for OnChainServiceContract { + fn update(&self) { + if let (Some(client), Some(sync)) = (self.client.upgrade(), self.sync.upgrade()) { + // do nothing until synced + if sync.status().is_syncing(client.queue_info()) { + return; + } + + // update contract address from registry + let service_contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()).unwrap_or_default(); + if self.contract.read().address != service_contract_addr { + trace!(target: "secretstore", "{}: installing service contract from address {}", + self.self_key_pair.public(), service_contract_addr); + *self.contract.write() = Arc::new(SecretStoreService::new(service_contract_addr)); + } + } + } + + fn is_actual(&self) -> bool { + self.contract.read().address != Default::default() + && match (self.client.upgrade(), self.sync.upgrade()) { + (Some(client), Some(sync)) => !sync.status().is_syncing(client.queue_info()), + _ => false, + } + } + + fn read_logs(&self, first_block: H256, last_block: H256) -> Box>> { + let client = match self.client.upgrade() { + Some(client) => client, + None => { + warn!(target: "secretstore", "{}: client is offline during read_pending_requests call", + self.self_key_pair.public()); + return Box::new(::std::iter::empty()); + }, + }; + + // read server key generation requests + let contract_address = self.contract.read().address.clone(); + let request_logs = client.logs(Filter { + from_block: BlockId::Hash(first_block), + to_block: BlockId::Hash(last_block), + address: Some(vec![contract_address]), + topics: vec![ + Some(vec![*SERVER_KEY_REQUESTED_EVENT_NAME_HASH]), + None, + None, + None, + ], + limit: None, + }); + + Box::new(request_logs.into_iter().map(|log| log.entry.topics)) + } + + fn read_pending_requests(&self) -> Box> { + let client = match self.client.upgrade() { + Some(client) => client, + None => { + warn!(target: "secretstore", "{}: client is offline during read_pending_requests call", + self.self_key_pair.public()); + return Box::new(::std::iter::empty()); + }, + }; + + let contract = self.contract.read(); + let length = match contract.address == Default::default() { + true => 0.into(), + false => { + let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d)); + contract.server_key_generation_requests_count(&do_call).wait() + .map_err(|error| { + warn!(target: "secretstore", "{}: call to server_key_generation_requests_count failed: {}", + self.self_key_pair.public(), error); + error + }) + .unwrap_or_default() + }, + }; + + Box::new(PendingRequestsIterator { + client: client, + contract: contract.clone(), + self_key_pair: self.self_key_pair.clone(), + index: 0.into(), + length: length, + }) + } + + fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> { + let server_key_hash = keccak(server_key); + let signed_server_key = self.self_key_pair.sign(&server_key_hash).map_err(|e| format!("{}", e))?; + let signed_server_key: Signature = signed_server_key.into_electrum().into(); + let contract = self.contract.read(); + let transaction_data = contract.encode_server_key_generated_input(server_key_id.clone(), + server_key.to_vec(), + signed_server_key.v(), + signed_server_key.r().into(), + signed_server_key.s().into() + )?; + + if contract.address != Default::default() { + if let Some(client) = self.client.upgrade() { + client.transact_contract( + contract.address.clone(), + transaction_data + ).map_err(|e| format!("{}", e))?; + } // else we will read this in the next refresh cycle + } + + Ok(()) + } +} + +impl Iterator for PendingRequestsIterator { + type Item = (bool, ServiceTask); + + fn next(&mut self) -> Option<(bool, ServiceTask)> { + if self.index >= self.length { + return None; + } + self.index = self.index + 1.into(); + + let do_call = |a, d| future::done(self.client.call_contract(BlockId::Latest, a, d)); + let key_generation_request = self.contract.get_server_key_generation_request(&do_call, + public_to_address(self.self_key_pair.public()), + (self.index - 1.into()).clone().into()).wait(); + let (server_key_id, threshold, is_confirmed) = match key_generation_request { + Ok((server_key_id, threshold, is_confirmed)) => { + (server_key_id, threshold, is_confirmed) + }, + Err(error) => { + warn!(target: "secretstore", "{}: call to get_server_key_generation_request failed: {}", + self.self_key_pair.public(), error); + return None; + }, + }; + + Some((is_confirmed, ServiceTask::GenerateServerKey(server_key_id, threshold.into()))) + } +} + +#[cfg(test)] +pub mod tests { + use std::collections::{VecDeque, HashSet}; + use std::sync::{Arc, Weak}; + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::thread; + use futures::{future, Future}; + use parking_lot::{RwLock, Mutex, Condvar}; + use ethcore::filter::Filter; + use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; + use ethkey::{Random, Generator, Public, Signature, sign, public_to_address}; + use ethsync::SyncProvider; + use native_contracts::SecretStoreService; + use bytes::Bytes; + use hash::keccak; + use bigint::hash::H256; + use bigint::prelude::U256; + use util::Address; + use key_server_set::KeyServerSet; + use key_server_cluster::{ClusterClient, ClusterSessionsListener, ClusterSession}; + use key_server_cluster::generation_session::SessionImpl as GenerationSession; + use key_storage::KeyStorage; + use listener::service_contract_listener::{ServiceTask, ServiceContractListenerParams}; + use {ServerKeyId, NodeKeyPair, KeyServer}; + use super::ServiceContract; + + #[derive(Default)] + pub struct DummyServiceContract { + pub is_actual: bool, + pub logs: Vec>, + pub pending_requests: Vec<(bool, ServiceTask)>, + pub published_keys: Mutex>, + } + + impl ServiceContract for DummyServiceContract { + fn update(&self) { + } + + fn is_actual(&self) -> bool { + self.is_actual + } + + fn read_logs(&self, first_block: H256, last_block: H256) -> Box>> { + Box::new(self.logs.clone().into_iter()) + } + + fn read_pending_requests(&self) -> Box> { + Box::new(self.pending_requests.clone().into_iter()) + } + + fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> { + self.published_keys.lock().push((server_key_id.clone(), server_key.clone())); + Ok(()) + } + } +} diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index 62a2a387a..48884a73c 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -34,14 +34,9 @@ use key_server_set::KeyServerSet; use key_server_cluster::{ClusterClient, ClusterSessionsListener, ClusterSession}; use key_server_cluster::generation_session::SessionImpl as GenerationSession; use key_storage::KeyStorage; +use listener::service_contract::ServiceContract; use {ServerKeyId, NodeKeyPair, KeyServer}; -/// Name of the SecretStore contract in the registry. -const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service"; - -/// Key server has been added to the set. -const SERVER_KEY_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyRequested(bytes32,uint256)"; - /// Retry interval (in blocks). Every RETRY_INTEVAL_BLOCKS blocks each KeyServer reads pending requests from /// service contract && tries to re-execute. The reason to have this mechanism is primarily because keys /// servers set change takes a lot of time + there could be some races, when blocks are coming to different @@ -52,10 +47,6 @@ const RETRY_INTEVAL_BLOCKS: usize = 30; /// pending requests have failed, then most probably other will fail too. const MAX_FAILED_RETRY_REQUESTS: usize = 1; -lazy_static! { - static ref SERVER_KEY_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_REQUESTED_EVENT_NAME); -} - /// SecretStore <-> Authority connector responsible for: /// 1. listening for new requests on SecretStore contract /// 2. redirecting requests to key server @@ -69,16 +60,14 @@ pub struct ServiceContractListener { /// Service contract listener parameters. pub struct ServiceContractListenerParams { - /// Blockchain client. - pub client: Weak, - /// Sync provider. - pub sync: Weak, + /// Service contract. + pub contract: Arc, /// Key server reference. pub key_server: Arc, /// This node key pair. pub self_key_pair: Arc, /// Key servers set. - pub key_servers_set: Arc, + pub key_server_set: Arc, /// Cluster reference. pub cluster: Arc, /// Key storage reference. @@ -91,12 +80,19 @@ struct ServiceContractListenerData { pub last_retry: AtomicUsize, /// Retry-related data. pub retry_data: Mutex, - /// Contract. - pub contract: RwLock, /// Service tasks queue. pub tasks_queue: Arc, - /// Cluster params. - pub params: ServiceContractListenerParams, + /// Service contract. + pub contract: Arc, + /// Key server reference. + pub key_server: Arc, + /// This node key pair. + pub self_key_pair: Arc, + /// Key servers set. + pub key_server_set: Arc, + /// Key storage reference. + pub key_storage: Arc, + } /// Retry-related data. @@ -115,8 +111,8 @@ struct TasksQueue { } /// Service task. -#[derive(Debug)] -enum ServiceTask { +#[derive(Debug, Clone, PartialEq)] +pub enum ServiceTask { /// Retry all 'stalled' tasks. Retry, /// Generate server key (server_key_id, threshold). @@ -130,44 +126,58 @@ enum ServiceTask { impl ServiceContractListener { /// Create new service contract listener. pub fn new(params: ServiceContractListenerParams) -> Arc { - let client = params.client.upgrade().expect("client is active in constructor; qed"); - let sync = params.sync.upgrade().expect("sync is active in constructor; qed"); - let contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) - .map(|address| { - trace!(target: "secretstore", "{}: installing service contract from address {}", - params.self_key_pair.public(), address); - address - }) - .unwrap_or_default(); - - let is_syncing = sync.status().is_syncing(client.queue_info()); let data = Arc::new(ServiceContractListenerData { last_retry: AtomicUsize::new(0), retry_data: Default::default(), - contract: RwLock::new(SecretStoreService::new(contract_addr)), tasks_queue: Arc::new(TasksQueue::new()), - params: params, + contract: params.contract, + key_server: params.key_server, + self_key_pair: params.self_key_pair, + key_server_set: params.key_server_set, + key_storage: params.key_storage, }); + data.tasks_queue.push(::std::iter::once(ServiceTask::Retry)); - // retry on restart - if !is_syncing { - data.tasks_queue.push(::std::iter::once(ServiceTask::Retry)); - } - - let service_thread_data = data.clone(); - let service_handle = thread::spawn(move || Self::run_service_thread(service_thread_data)); + // we are not starting thread when in test mode + let service_handle = if cfg!(test) { + None + } else { + let service_thread_data = data.clone(); + Some(thread::spawn(move || Self::run_service_thread(service_thread_data))) + }; let contract = Arc::new(ServiceContractListener { data: data, - service_handle: Some(service_handle), + service_handle: service_handle, }); - client.add_notify(contract.clone()); - contract.data.params.cluster.add_generation_listener(contract.clone()); + params.cluster.add_generation_listener(contract.clone()); contract } /// Process incoming events of service contract. - fn process_service_contract_events(&self, client: &Client, service_contract: Address, blocks: Vec) { - debug_assert!(!blocks.is_empty()); + fn process_service_contract_events(&self, first: H256, last: H256) { + self.data.tasks_queue.push(self.data.contract.read_logs(first, last) + .filter_map(|topics| match topics.len() { + // when key is already generated && we have this key + 3 if self.data.key_storage.get(&topics[1]).map(|k| k.is_some()).unwrap_or_default() => { + Some(ServiceTask::RestoreServerKey( + topics[1], + )) + } + // when key is not yet generated && this node should be master of this key generation session + 3 if is_processed_by_this_key_server(&*self.data.key_server_set, &*self.data.self_key_pair, &topics[1]) => { + Some(ServiceTask::GenerateServerKey( + topics[1], + topics[2], + )) + }, + 3 => None, + l @ _ => { + warn!(target: "secretstore", "Ignoring ServerKeyRequested event with wrong number of params {}", l); + None + }, + })); + +/* debug_assert!(!blocks.is_empty()); // read server key generation requests let request_logs = client.logs(Filter { @@ -187,13 +197,13 @@ impl ServiceContractListener { self.data.tasks_queue.push(request_logs.into_iter() .filter_map(|r| match r.entry.topics.len() { // when key is already generated && we have this key - 3 if self.data.params.key_storage.get(&r.entry.topics[1]).map(|k| k.is_some()).unwrap_or_default() => { + 3 if self.data.key_storage.get(&r.entry.topics[1]).map(|k| k.is_some()).unwrap_or_default() => { Some(ServiceTask::RestoreServerKey( r.entry.topics[1], )) } // when key is not yet generated && this node should be master of this key generation session - 3 if is_processed_by_this_key_server(&*self.data.params.key_servers_set, &*self.data.params.self_key_pair, &r.entry.topics[1]) => { + 3 if is_processed_by_this_key_server(&*self.data.key_server_set, &*self.data.self_key_pair, &r.entry.topics[1]) => { Some(ServiceTask::GenerateServerKey( r.entry.topics[1], r.entry.topics[2], @@ -204,14 +214,14 @@ impl ServiceContractListener { warn!(target: "secretstore", "Ignoring ServerKeyRequested event with wrong number of params {}", l); None }, - })); + }));*/ } /// Service thread procedure. fn run_service_thread(data: Arc) { loop { let task = data.tasks_queue.wait(); - trace!(target: "secretstore", "{}: processing {:?} task", data.params.self_key_pair.public(), task); + trace!(target: "secretstore", "{}: processing {:?} task", data.self_key_pair.public(), task); match task { ServiceTask::Shutdown => break, @@ -231,13 +241,13 @@ impl ServiceContractListener { .map(|processed_requests| { if processed_requests != 0 { trace!(target: "secretstore", "{}: successfully retried {} pending requests", - data.params.self_key_pair.public(), processed_requests); + data.self_key_pair.public(), processed_requests); } () }) .map_err(|error| { warn!(target: "secretstore", "{}: retrying pending requests has failed with: {}", - data.params.self_key_pair.public(), error); + data.self_key_pair.public(), error); error }), ServiceTask::RestoreServerKey(server_key_id) => { @@ -246,27 +256,27 @@ impl ServiceContractListener { .and_then(|server_key| Self::publish_server_key(&data, &server_key_id, &server_key)) .map(|_| { trace!(target: "secretstore", "{}: processed RestoreServerKey({}) request", - data.params.self_key_pair.public(), server_key_id); + data.self_key_pair.public(), server_key_id); () }) .map_err(|error| { warn!(target: "secretstore", "{}: failed to process RestoreServerKey({}) request with: {}", - data.params.self_key_pair.public(), server_key_id, error); + data.self_key_pair.public(), server_key_id, error); error }) - } + }, ServiceTask::GenerateServerKey(server_key_id, threshold) => { data.retry_data.lock().generated_keys.insert(server_key_id.clone()); Self::generate_server_key(&data, &server_key_id, &threshold) .and_then(|server_key| Self::publish_server_key(&data, &server_key_id, &server_key)) .map(|_| { trace!(target: "secretstore", "{}: processed GenerateServerKey({}, {}) request", - data.params.self_key_pair.public(), server_key_id, threshold); + data.self_key_pair.public(), server_key_id, threshold); () }) .map_err(|error| { warn!(target: "secretstore", "{}: failed to process GenerateServerKey({}, {}) request with: {}", - data.params.self_key_pair.public(), server_key_id, threshold, error); + data.self_key_pair.public(), server_key_id, threshold, error); error }) }, @@ -276,49 +286,33 @@ impl ServiceContractListener { /// Retry processing pending requests. fn retry_pending_requests(data: &Arc) -> Result { - let client = data.params.client.upgrade().ok_or("client is required".to_owned())?; - let retry_data = ::std::mem::replace(&mut *data.retry_data.lock(), Default::default()); - let contract = data.contract.read(); - - // it is only possible when contract address is set - if contract.address == Default::default() { - return Ok(0); - } - - let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d)); - let generate_server_key_requests_count = contract.server_key_generation_requests_count(&do_call).wait()?; - let mut generate_server_key_request_index = 0.into(); let mut failed_requests = 0; let mut processed_requests = 0; - loop { - if generate_server_key_request_index >= generate_server_key_requests_count { - break; - } - - // read request from the contract - let (server_key_id, threshold, is_confirmed) = contract.get_server_key_generation_request(&do_call, - public_to_address(data.params.self_key_pair.public()), - generate_server_key_request_index).wait()?; - generate_server_key_request_index = generate_server_key_request_index + 1.into(); - + let retry_data = ::std::mem::replace(&mut *data.retry_data.lock(), Default::default()); + for (is_confirmed, task) in data.contract.read_pending_requests() { // only process requests, which we haven't confirmed yet if is_confirmed { continue; } - // only process request, which haven't been processed recently - // there could be a lag when we've just generated server key && retrying on the same block - // (or before our tx is mined) - state is not updated yet - if retry_data.generated_keys.contains(&server_key_id){ - continue; - } + let request_result = match task { + ServiceTask::GenerateServerKey(server_key_id, threshold) => { + // only process request, which haven't been processed recently + // there could be a lag when we've just generated server key && retrying on the same block + // (or before our tx is mined) - state is not updated yet + if retry_data.generated_keys.contains(&server_key_id){ + continue; + } - // process request - let is_own_request = is_processed_by_this_key_server(&*data.params.key_servers_set, &*data.params.self_key_pair, &server_key_id); - let request_result = Self::process_service_task(data, match is_own_request { - true => ServiceTask::GenerateServerKey(server_key_id, threshold.into()), - false => ServiceTask::RestoreServerKey(server_key_id), - }); + // process request + let is_own_request = is_processed_by_this_key_server(&*data.key_server_set, &*data.self_key_pair, &server_key_id); + Self::process_service_task(data, match is_own_request { + true => ServiceTask::GenerateServerKey(server_key_id, threshold.into()), + false => ServiceTask::RestoreServerKey(server_key_id), + }) + }, + _ => Err("not supported".into()), + }; // process request result match request_result { @@ -331,6 +325,7 @@ impl ServiceContractListener { }, } } + Ok(processed_requests) } @@ -346,13 +341,13 @@ impl ServiceContractListener { // => this API (server key generation) is not suitable for usage in encryption via contract endpoint let author_key = Random.generate().map_err(|e| format!("{}", e))?; let server_key_id_signature = sign(author_key.secret(), server_key_id).map_err(|e| format!("{}", e))?; - data.params.key_server.generate_key(server_key_id, &server_key_id_signature, threshold_num as usize) + data.key_server.generate_key(server_key_id, &server_key_id_signature, threshold_num as usize) .map_err(Into::into) } /// Restore server key. fn restore_server_key(data: &Arc, server_key_id: &ServerKeyId) -> Result { - data.params.key_storage.get(server_key_id) + data.key_storage.get(server_key_id) .map_err(|e| format!("{}", e)) .and_then(|ks| ks.ok_or("missing key".to_owned())) .map(|ks| ks.public) @@ -360,8 +355,9 @@ impl ServiceContractListener { /// Publish server key. fn publish_server_key(data: &Arc, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> { - let server_key_hash = keccak(server_key); - let signed_server_key = data.params.self_key_pair.sign(&server_key_hash).map_err(|e| format!("{}", e))?; + data.contract.publish_server_key(server_key_id, server_key) + /*let server_key_hash = keccak(server_key); + let signed_server_key = data.self_key_pair.sign(&server_key_hash).map_err(|e| format!("{}", e))?; let signed_server_key: Signature = signed_server_key.into_electrum().into(); let transaction_data = data.contract.read().encode_server_key_generated_input(server_key_id.clone(), server_key.to_vec(), @@ -372,7 +368,7 @@ impl ServiceContractListener { let contract = data.contract.read(); if contract.address != Default::default() { - if let Some(client) = data.params.client.upgrade() { + if let Some(client) = data.client.upgrade() { client.transact_contract( contract.address.clone(), transaction_data @@ -381,6 +377,7 @@ impl ServiceContractListener { } Ok(()) + unimplemented!()*/ } } @@ -397,33 +394,52 @@ impl Drop for ServiceContractListener { impl ChainNotify for ServiceContractListener { fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { let enacted_len = enacted.len(); - if enacted_len != 0 { - if let (Some(client), Some(sync)) = (self.data.params.client.upgrade(), self.data.params.sync.upgrade()) { - // do nothing until synced - if sync.status().is_syncing(client.queue_info()) { - return; - } - - // update contract address from registry - if let Some(service_contract_addr) = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) { - if self.data.contract.read().address != service_contract_addr { - trace!(target: "secretstore", "{}: installing service contract from address {}", - self.data.params.self_key_pair.public(), service_contract_addr); - *self.data.contract.write() = SecretStoreService::new(service_contract_addr.clone()); - } - - // and process contract events - self.process_service_contract_events(&*client, service_contract_addr, enacted); - } - - // schedule retry if received enough blocks since last retry - // it maybe inaccurate when switching syncing/synced states, but that's ok - if self.data.last_retry.fetch_add(enacted_len, Ordering::Relaxed) >= RETRY_INTEVAL_BLOCKS { - self.data.tasks_queue.push(::std::iter::once(ServiceTask::Retry)); - self.data.last_retry.store(0, Ordering::Relaxed); - } - } + if enacted_len == 0 { + return; } + + if !self.data.contract.is_actual() { + return; + } + + self.data.contract.update(); + self.process_service_contract_events( + enacted.first().expect("TODO").clone(), + enacted.last().expect("TODO").clone()); + + // schedule retry if received enough blocks since last retry + // it maybe inaccurate when switching syncing/synced states, but that's ok + if self.data.last_retry.fetch_add(enacted_len, Ordering::Relaxed) >= RETRY_INTEVAL_BLOCKS { + self.data.tasks_queue.push(::std::iter::once(ServiceTask::Retry)); + self.data.last_retry.store(0, Ordering::Relaxed); + } + + +/* if let (Some(client), Some(sync)) = (self.data.client.upgrade(), self.data.sync.upgrade()) { + // do nothing until synced + if sync.status().is_syncing(client.queue_info()) { + return; + } + + // update contract address from registry + if let Some(service_contract_addr) = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) { + if self.data.contract.read().address != service_contract_addr { + trace!(target: "secretstore", "{}: installing service contract from address {}", + self.data.self_key_pair.public(), service_contract_addr); + *self.data.contract.write() = SecretStoreService::new(service_contract_addr.clone()); + } + + // and process contract events + self.process_service_contract_events(&*client, service_contract_addr, enacted); + } + + // schedule retry if received enough blocks since last retry + // it maybe inaccurate when switching syncing/synced states, but that's ok + if self.data.last_retry.fetch_add(enacted_len, Ordering::Relaxed) >= RETRY_INTEVAL_BLOCKS { + self.data.tasks_queue.push(::std::iter::once(ServiceTask::Retry)); + self.data.last_retry.store(0, Ordering::Relaxed); + } + }*/ } } @@ -433,7 +449,7 @@ impl ClusterSessionsListener for ServiceContractListener { // only publish when the session is started by another node // when it is started by this node, it is published from process_service_task - if !is_processed_by_this_key_server(&*self.data.params.key_servers_set, &*self.data.params.self_key_pair, &session.id()) { + if !is_processed_by_this_key_server(&*self.data.key_server_set, &*self.data.self_key_pair, &session.id()) { // by this time sesion must already be completed - either successfully, or not debug_assert!(session.is_finished()); @@ -443,12 +459,12 @@ impl ClusterSessionsListener for ServiceContractListener { .and_then(|server_key| Self::publish_server_key(&self.data, &session.id(), &server_key)) .map(|_| { trace!(target: "secretstore", "{}: completed foreign GenerateServerKey({}) request", - self.data.params.self_key_pair.public(), session.id()); + self.data.self_key_pair.public(), session.id()); () }) .map_err(|error| { warn!(target: "secretstore", "{}: failed to process GenerateServerKey({}) request with: {}", - self.data.params.self_key_pair.public(), session.id(), error); + self.data.self_key_pair.public(), session.id(), error); error }); } @@ -493,8 +509,8 @@ impl TasksQueue { } /// Returns true when session, related to `server_key_id` must be started on this KeyServer. -fn is_processed_by_this_key_server(key_servers_set: &KeyServerSet, self_key_pair: &NodeKeyPair, server_key_id: &H256) -> bool { - let servers = key_servers_set.get(); +fn is_processed_by_this_key_server(key_server_set: &KeyServerSet, self_key_pair: &NodeKeyPair, server_key_id: &H256) -> bool { + let servers = key_server_set.get(); let total_servers_count = servers.len(); if total_servers_count == 0 { return false; @@ -514,10 +530,39 @@ fn is_processed_by_this_key_server(key_servers_set: &KeyServerSet, self_key_pair #[cfg(test)] mod tests { + use std::sync::Arc; + use std::sync::atomic::Ordering; use ethkey::{Random, Generator, KeyPair}; + use listener::service_contract::ServiceContract; + use listener::service_contract::tests::DummyServiceContract; + use key_server_cluster::DummyClusterClient; + use key_server::tests::DummyKeyServer; + use key_storage::tests::DummyKeyStorage; use key_server_set::tests::MapKeyServerSet; use PlainNodeKeyPair; - use super::is_processed_by_this_key_server; + use super::{ServiceTask, ServiceContractListener, ServiceContractListenerParams, is_processed_by_this_key_server}; + + fn make_service_contract_listener(contract: Option>, key_server: Option>) -> Arc { + let contract = contract.unwrap_or_else(|| Arc::new(DummyServiceContract::default())); + let key_server = key_server.unwrap_or_else(|| Arc::new(DummyKeyServer::default())); + let servers_set = Arc::new(MapKeyServerSet::new(vec![ + ("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(), + "127.0.0.1:8080".parse().unwrap()), + ("c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee51ae168fea63dc339a3c58419466ceaeef7f632653266d0e1236431a950cfe52a".parse().unwrap(), + "127.0.0.1:8080".parse().unwrap()), + ("f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9388f7b0f632de8140fe337e62a37f3566500a99934c2231b6cb9fd7584b8e672".parse().unwrap(), + "127.0.0.1:8080".parse().unwrap()), + ].into_iter().collect())); + let self_key_pair = Arc::new(PlainNodeKeyPair::new(KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap())); + ServiceContractListener::new(ServiceContractListenerParams { + contract: contract, + key_server: key_server, + self_key_pair: self_key_pair, + key_server_set: servers_set, + cluster: Arc::new(DummyClusterClient::default()), + key_storage: Arc::new(DummyKeyStorage::default()), + }) + } #[test] fn is_not_processed_by_this_key_server_with_zero_servers() { @@ -661,4 +706,64 @@ mod tests { assert_eq!(is_processed_by_this_key_server(&servers_set, &key_pair, &"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true); } + + #[test] + fn no_tasks_scheduled_when_no_contract_events() { + let listener = make_service_contract_listener(None, None); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + listener.process_service_contract_events(Default::default(), Default::default()); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + } + + #[test] + fn server_key_generation_is_scheduled_when_requested_key_is_unknnown() { + let mut contract = DummyServiceContract::default(); + contract.logs.push(vec![Default::default(), Default::default(), Default::default()]); + let listener = make_service_contract_listener(Some(Arc::new(contract)), None); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + listener.process_service_contract_events(Default::default(), Default::default()); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 2); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().pop_back(), Some(ServiceTask::GenerateServerKey(Default::default(), Default::default()))); + } + + #[test] + fn no_new_tasks_scheduled_when_requested_key_is_unknown_and_request_belongs_to_other_key_server() { + let server_key_id = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap(); + let mut contract = DummyServiceContract::default(); + contract.logs.push(vec![Default::default(), server_key_id, Default::default()]); + let listener = make_service_contract_listener(Some(Arc::new(contract)), None); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + listener.process_service_contract_events(Default::default(), Default::default()); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + } + + #[test] + fn server_key_restore_is_scheduled_when_requested_key_is_knnown() { + let mut contract = DummyServiceContract::default(); + contract.logs.push(vec![Default::default(), Default::default(), Default::default()]); + let listener = make_service_contract_listener(Some(Arc::new(contract)), None); + listener.data.key_storage.insert(Default::default(), Default::default()); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + listener.process_service_contract_events(Default::default(), Default::default()); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 2); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().pop_back(), Some(ServiceTask::RestoreServerKey(Default::default()))); + } + + #[test] + fn no_new_tasks_scheduled_when_wrong_number_of_topics_in_log() { + let mut contract = DummyServiceContract::default(); + contract.logs.push(vec![Default::default(), Default::default()]); + let listener = make_service_contract_listener(Some(Arc::new(contract)), None); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + listener.process_service_contract_events(Default::default(), Default::default()); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + } + + #[test] + fn generation_session_is_created_when_processing_generate_server_key_task() { + let key_server = Arc::new(DummyKeyServer::default()); + let listener = make_service_contract_listener(None, Some(key_server.clone())); + ServiceContractListener::process_service_task(&listener.data, ServiceTask::GenerateServerKey(Default::default(), Default::default())).unwrap_err(); + assert_eq!(key_server.generation_requests_count.load(Ordering::Relaxed), 1); + } } From b036624bacd21cb53c14b2a2a7109826c03d48d5 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 23 Nov 2017 07:26:45 +0300 Subject: [PATCH 17/42] SecretStore: key_is_read_and_published_when_processing_restore_server_key_task --- .../src/listener/service_contract_listener.rs | 30 ++++++++++++++----- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index 48884a73c..44026107b 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -537,14 +537,16 @@ mod tests { use listener::service_contract::tests::DummyServiceContract; use key_server_cluster::DummyClusterClient; use key_server::tests::DummyKeyServer; + use key_storage::{KeyStorage, DocumentKeyShare}; use key_storage::tests::DummyKeyStorage; use key_server_set::tests::MapKeyServerSet; use PlainNodeKeyPair; use super::{ServiceTask, ServiceContractListener, ServiceContractListenerParams, is_processed_by_this_key_server}; - fn make_service_contract_listener(contract: Option>, key_server: Option>) -> Arc { + fn make_service_contract_listener(contract: Option>, key_server: Option>, key_storage: Option>) -> Arc { let contract = contract.unwrap_or_else(|| Arc::new(DummyServiceContract::default())); let key_server = key_server.unwrap_or_else(|| Arc::new(DummyKeyServer::default())); + let key_storage = key_storage.unwrap_or_else(|| Arc::new(DummyKeyStorage::default())); let servers_set = Arc::new(MapKeyServerSet::new(vec![ ("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(), "127.0.0.1:8080".parse().unwrap()), @@ -560,7 +562,7 @@ mod tests { self_key_pair: self_key_pair, key_server_set: servers_set, cluster: Arc::new(DummyClusterClient::default()), - key_storage: Arc::new(DummyKeyStorage::default()), + key_storage: key_storage, }) } @@ -709,7 +711,7 @@ mod tests { #[test] fn no_tasks_scheduled_when_no_contract_events() { - let listener = make_service_contract_listener(None, None); + let listener = make_service_contract_listener(None, None, None); assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); listener.process_service_contract_events(Default::default(), Default::default()); assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); @@ -719,7 +721,7 @@ mod tests { fn server_key_generation_is_scheduled_when_requested_key_is_unknnown() { let mut contract = DummyServiceContract::default(); contract.logs.push(vec![Default::default(), Default::default(), Default::default()]); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None); + let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); listener.process_service_contract_events(Default::default(), Default::default()); assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 2); @@ -731,7 +733,7 @@ mod tests { let server_key_id = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap(); let mut contract = DummyServiceContract::default(); contract.logs.push(vec![Default::default(), server_key_id, Default::default()]); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None); + let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); listener.process_service_contract_events(Default::default(), Default::default()); assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); @@ -741,7 +743,7 @@ mod tests { fn server_key_restore_is_scheduled_when_requested_key_is_knnown() { let mut contract = DummyServiceContract::default(); contract.logs.push(vec![Default::default(), Default::default(), Default::default()]); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None); + let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); listener.data.key_storage.insert(Default::default(), Default::default()); assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); listener.process_service_contract_events(Default::default(), Default::default()); @@ -753,7 +755,7 @@ mod tests { fn no_new_tasks_scheduled_when_wrong_number_of_topics_in_log() { let mut contract = DummyServiceContract::default(); contract.logs.push(vec![Default::default(), Default::default()]); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None); + let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); listener.process_service_contract_events(Default::default(), Default::default()); assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); @@ -762,8 +764,20 @@ mod tests { #[test] fn generation_session_is_created_when_processing_generate_server_key_task() { let key_server = Arc::new(DummyKeyServer::default()); - let listener = make_service_contract_listener(None, Some(key_server.clone())); + let listener = make_service_contract_listener(None, Some(key_server.clone()), None); ServiceContractListener::process_service_task(&listener.data, ServiceTask::GenerateServerKey(Default::default(), Default::default())).unwrap_err(); assert_eq!(key_server.generation_requests_count.load(Ordering::Relaxed), 1); } + + #[test] + fn key_is_read_and_published_when_processing_restore_server_key_task() { + let contract = Arc::new(DummyServiceContract::default()); + let key_storage = Arc::new(DummyKeyStorage::default()); + let mut key_share = DocumentKeyShare::default(); + key_share.public = KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap().public().clone(); + key_storage.insert(Default::default(), key_share.clone()); + let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage)); + ServiceContractListener::process_service_task(&listener.data, ServiceTask::RestoreServerKey(Default::default())).unwrap(); + assert_eq!(*contract.published_keys.lock(), vec![(Default::default(), key_share.public)]); + } } From 6a7f523cdb02d112610cf74fd2df34f424520b0d Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 23 Nov 2017 08:07:46 +0300 Subject: [PATCH 18/42] SecretStore: generation_is_not_retried_if_tried_in_the_same_cycle --- .../src/listener/service_contract_listener.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index 44026107b..3a85efd6a 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -780,4 +780,15 @@ mod tests { ServiceContractListener::process_service_task(&listener.data, ServiceTask::RestoreServerKey(Default::default())).unwrap(); assert_eq!(*contract.published_keys.lock(), vec![(Default::default(), key_share.public)]); } + + #[test] + fn generation_is_not_retried_if_tried_in_the_same_cycle() { + let mut contract = DummyServiceContract::default(); + contract.pending_requests.push((false, ServiceTask::GenerateServerKey(Default::default(), Default::default()))); + let key_server = Arc::new(DummyKeyServer::default()); + let listener = make_service_contract_listener(Some(Arc::new(contract)), Some(key_server.clone()), None); + listener.data.retry_data.lock().generated_keys.insert(Default::default()); + ServiceContractListener::retry_pending_requests(&listener.data); + assert_eq!(key_server.generation_requests_count.load(Ordering::Relaxed), 0); + } } From 498a708df72e03e88d64b536c52fc2693559762b Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 23 Nov 2017 08:22:51 +0300 Subject: [PATCH 19/42] fixed warnings --- .../src/key_server_cluster/cluster.rs | 18 ++++---- secret_store/src/listener/service_contract.rs | 46 ++++--------------- .../src/listener/service_contract_listener.rs | 20 +++----- 3 files changed, 26 insertions(+), 58 deletions(-) diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index ae352a9b4..59f1bfac5 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -1025,7 +1025,7 @@ pub mod tests { use key_server_cluster::encryption_session::{SessionImpl as EncryptionSession}; use key_server_cluster::signing_session::{SessionImpl as SigningSession}; use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession, - IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction}; + IsolatedSessionTransport as KeyVersionNegotiationSessionTransport}; #[derive(Default)] pub struct DummyClusterClient; @@ -1044,17 +1044,17 @@ pub mod tests { impl ClusterClient for DummyClusterClient { fn cluster_state(&self) -> ClusterState { unimplemented!() } - fn new_generation_session(&self, session_id: SessionId, author: Public, threshold: usize) -> Result, Error> { unimplemented!() } - fn new_encryption_session(&self, session_id: SessionId, requestor_signature: Signature, common_point: Public, encrypted_point: Public) -> Result, Error> { unimplemented!() } - fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature, version: Option, is_shadow_decryption: bool) -> Result, Error> { unimplemented!() } - fn new_signing_session(&self, session_id: SessionId, requestor_signature: Signature, version: Option, message_hash: H256) -> Result, Error> { unimplemented!() } - fn new_key_version_negotiation_session(&self, session_id: SessionId) -> Result>, Error> { unimplemented!() } - fn new_servers_set_change_session(&self, session_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error> { unimplemented!() } + fn new_generation_session(&self, _session_id: SessionId, _author: Public, _threshold: usize) -> Result, Error> { unimplemented!() } + fn new_encryption_session(&self, _session_id: SessionId, _requestor_signature: Signature, _common_point: Public, _encrypted_point: Public) -> Result, Error> { unimplemented!() } + fn new_decryption_session(&self, _session_id: SessionId, _requestor_signature: Signature, _version: Option, _is_shadow_decryption: bool) -> Result, Error> { unimplemented!() } + fn new_signing_session(&self, _session_id: SessionId, _requestor_signature: Signature, _version: Option, _message_hash: H256) -> Result, Error> { unimplemented!() } + fn new_key_version_negotiation_session(&self, _session_id: SessionId) -> Result>, Error> { unimplemented!() } + fn new_servers_set_change_session(&self, _session_id: Option, _new_nodes_set: BTreeSet, _old_set_signature: Signature, _new_set_signature: Signature) -> Result, Error> { unimplemented!() } - fn add_generation_listener(&self, listener: Arc>) {} + fn add_generation_listener(&self, _listener: Arc>) {} fn make_faulty_generation_sessions(&self) { unimplemented!() } - fn generation_session(&self, session_id: &SessionId) -> Option> { unimplemented!() } + fn generation_session(&self, _session_id: &SessionId) -> Option> { unimplemented!() } fn connect(&self) { unimplemented!() } fn key_storage(&self) -> Arc { unimplemented!() } } diff --git a/secret_store/src/listener/service_contract.rs b/secret_store/src/listener/service_contract.rs index 30ad8937d..3d5b25b72 100644 --- a/secret_store/src/listener/service_contract.rs +++ b/secret_store/src/listener/service_contract.rs @@ -14,28 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::collections::{VecDeque, HashSet}; use std::sync::{Arc, Weak}; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::thread; use futures::{future, Future}; -use parking_lot::{RwLock, Mutex, Condvar}; +use parking_lot::RwLock; use ethcore::filter::Filter; -use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; -use ethkey::{Random, Generator, Public, Signature, sign, public_to_address}; +use ethcore::client::{Client, BlockChainClient, BlockId}; +use ethkey::{Public, Signature, public_to_address}; use ethsync::SyncProvider; use native_contracts::SecretStoreService; -use bytes::Bytes; use hash::keccak; use bigint::hash::H256; use bigint::prelude::U256; -use util::Address; -use key_server_set::KeyServerSet; -use key_server_cluster::{ClusterClient, ClusterSessionsListener, ClusterSession}; -use key_server_cluster::generation_session::SessionImpl as GenerationSession; -use key_storage::KeyStorage; -use listener::service_contract_listener::{ServiceTask, ServiceContractListenerParams}; -use {ServerKeyId, NodeKeyPair, KeyServer}; +use listener::service_contract_listener::ServiceTask; +use {ServerKeyId, NodeKeyPair}; /// Name of the SecretStore contract in the registry. const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service"; @@ -250,28 +241,11 @@ impl Iterator for PendingRequestsIterator { #[cfg(test)] pub mod tests { - use std::collections::{VecDeque, HashSet}; - use std::sync::{Arc, Weak}; - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::thread; - use futures::{future, Future}; - use parking_lot::{RwLock, Mutex, Condvar}; - use ethcore::filter::Filter; - use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; - use ethkey::{Random, Generator, Public, Signature, sign, public_to_address}; - use ethsync::SyncProvider; - use native_contracts::SecretStoreService; - use bytes::Bytes; - use hash::keccak; + use parking_lot::Mutex; + use ethkey::Public; use bigint::hash::H256; - use bigint::prelude::U256; - use util::Address; - use key_server_set::KeyServerSet; - use key_server_cluster::{ClusterClient, ClusterSessionsListener, ClusterSession}; - use key_server_cluster::generation_session::SessionImpl as GenerationSession; - use key_storage::KeyStorage; - use listener::service_contract_listener::{ServiceTask, ServiceContractListenerParams}; - use {ServerKeyId, NodeKeyPair, KeyServer}; + use listener::service_contract_listener::ServiceTask; + use ServerKeyId; use super::ServiceContract; #[derive(Default)] @@ -290,7 +264,7 @@ pub mod tests { self.is_actual } - fn read_logs(&self, first_block: H256, last_block: H256) -> Box>> { + fn read_logs(&self, _first_block: H256, _last_block: H256) -> Box>> { Box::new(self.logs.clone().into_iter()) } diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index 3a85efd6a..8ffc75224 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -15,21 +15,15 @@ // along with Parity. If not, see . use std::collections::{VecDeque, HashSet}; -use std::sync::{Arc, Weak}; +use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread; -use futures::{future, Future}; -use parking_lot::{RwLock, Mutex, Condvar}; -use ethcore::filter::Filter; -use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; -use ethkey::{Random, Generator, Public, Signature, sign, public_to_address}; -use ethsync::SyncProvider; -use native_contracts::SecretStoreService; +use parking_lot::{Mutex, Condvar}; +use ethcore::client::ChainNotify; +use ethkey::{Random, Generator, Public, sign}; use bytes::Bytes; -use hash::keccak; use bigint::hash::H256; use bigint::prelude::U256; -use util::Address; use key_server_set::KeyServerSet; use key_server_cluster::{ClusterClient, ClusterSessionsListener, ClusterSession}; use key_server_cluster::generation_session::SessionImpl as GenerationSession; @@ -744,7 +738,7 @@ mod tests { let mut contract = DummyServiceContract::default(); contract.logs.push(vec![Default::default(), Default::default(), Default::default()]); let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); - listener.data.key_storage.insert(Default::default(), Default::default()); + listener.data.key_storage.insert(Default::default(), Default::default()).unwrap(); assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); listener.process_service_contract_events(Default::default(), Default::default()); assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 2); @@ -775,7 +769,7 @@ mod tests { let key_storage = Arc::new(DummyKeyStorage::default()); let mut key_share = DocumentKeyShare::default(); key_share.public = KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000001".parse().unwrap()).unwrap().public().clone(); - key_storage.insert(Default::default(), key_share.clone()); + key_storage.insert(Default::default(), key_share.clone()).unwrap(); let listener = make_service_contract_listener(Some(contract.clone()), None, Some(key_storage)); ServiceContractListener::process_service_task(&listener.data, ServiceTask::RestoreServerKey(Default::default())).unwrap(); assert_eq!(*contract.published_keys.lock(), vec![(Default::default(), key_share.public)]); @@ -788,7 +782,7 @@ mod tests { let key_server = Arc::new(DummyKeyServer::default()); let listener = make_service_contract_listener(Some(Arc::new(contract)), Some(key_server.clone()), None); listener.data.retry_data.lock().generated_keys.insert(Default::default()); - ServiceContractListener::retry_pending_requests(&listener.data); + ServiceContractListener::retry_pending_requests(&listener.data).unwrap(); assert_eq!(key_server.generation_requests_count.load(Ordering::Relaxed), 0); } } From c296b8ac5d79c809929241feb09c7df6c29c5803 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 23 Nov 2017 09:33:27 +0300 Subject: [PATCH 20/42] SecretStore: fixed service contract update --- secret_store/src/listener/service_contract_listener.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index 8ffc75224..5790a37bd 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -392,11 +392,11 @@ impl ChainNotify for ServiceContractListener { return; } + self.data.contract.update(); if !self.data.contract.is_actual() { return; } - self.data.contract.update(); self.process_service_contract_events( enacted.first().expect("TODO").clone(), enacted.last().expect("TODO").clone()); From 989f2a0542b17f67daf7ccf529eed958405fced3 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 23 Nov 2017 10:35:59 +0300 Subject: [PATCH 21/42] SecretStore: removed code comments --- .../src/listener/service_contract_listener.rs | 88 ------------------- 1 file changed, 88 deletions(-) diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index 5790a37bd..e6b197f22 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -170,45 +170,6 @@ impl ServiceContractListener { None }, })); - -/* debug_assert!(!blocks.is_empty()); - - // read server key generation requests - let request_logs = client.logs(Filter { - from_block: BlockId::Hash(blocks.first().expect("!block.is_empty(); qed").clone()), - to_block: BlockId::Hash(blocks.last().expect("!block.is_empty(); qed").clone()), - address: Some(vec![service_contract]), - topics: vec![ - Some(vec![*SERVER_KEY_REQUESTED_EVENT_NAME_HASH]), - None, - None, - None, - ], - limit: None, - }); - - // schedule correct requests if they're intended to be processed by this KeyServer - self.data.tasks_queue.push(request_logs.into_iter() - .filter_map(|r| match r.entry.topics.len() { - // when key is already generated && we have this key - 3 if self.data.key_storage.get(&r.entry.topics[1]).map(|k| k.is_some()).unwrap_or_default() => { - Some(ServiceTask::RestoreServerKey( - r.entry.topics[1], - )) - } - // when key is not yet generated && this node should be master of this key generation session - 3 if is_processed_by_this_key_server(&*self.data.key_server_set, &*self.data.self_key_pair, &r.entry.topics[1]) => { - Some(ServiceTask::GenerateServerKey( - r.entry.topics[1], - r.entry.topics[2], - )) - }, - 3 => None, - l @ _ => { - warn!(target: "secretstore", "Ignoring ServerKeyRequested event with wrong number of params {}", l); - None - }, - }));*/ } /// Service thread procedure. @@ -350,28 +311,6 @@ impl ServiceContractListener { /// Publish server key. fn publish_server_key(data: &Arc, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> { data.contract.publish_server_key(server_key_id, server_key) - /*let server_key_hash = keccak(server_key); - let signed_server_key = data.self_key_pair.sign(&server_key_hash).map_err(|e| format!("{}", e))?; - let signed_server_key: Signature = signed_server_key.into_electrum().into(); - let transaction_data = data.contract.read().encode_server_key_generated_input(server_key_id.clone(), - server_key.to_vec(), - signed_server_key.v(), - signed_server_key.r().into(), - signed_server_key.s().into() - )?; - - let contract = data.contract.read(); - if contract.address != Default::default() { - if let Some(client) = data.client.upgrade() { - client.transact_contract( - contract.address.clone(), - transaction_data - ).map_err(|e| format!("{}", e))?; - } // else we will read this in the next refresh cycle - } - - Ok(()) - unimplemented!()*/ } } @@ -407,33 +346,6 @@ impl ChainNotify for ServiceContractListener { self.data.tasks_queue.push(::std::iter::once(ServiceTask::Retry)); self.data.last_retry.store(0, Ordering::Relaxed); } - - -/* if let (Some(client), Some(sync)) = (self.data.client.upgrade(), self.data.sync.upgrade()) { - // do nothing until synced - if sync.status().is_syncing(client.queue_info()) { - return; - } - - // update contract address from registry - if let Some(service_contract_addr) = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) { - if self.data.contract.read().address != service_contract_addr { - trace!(target: "secretstore", "{}: installing service contract from address {}", - self.data.self_key_pair.public(), service_contract_addr); - *self.data.contract.write() = SecretStoreService::new(service_contract_addr.clone()); - } - - // and process contract events - self.process_service_contract_events(&*client, service_contract_addr, enacted); - } - - // schedule retry if received enough blocks since last retry - // it maybe inaccurate when switching syncing/synced states, but that's ok - if self.data.last_retry.fetch_add(enacted_len, Ordering::Relaxed) >= RETRY_INTEVAL_BLOCKS { - self.data.tasks_queue.push(::std::iter::once(ServiceTask::Retry)); - self.data.last_retry.store(0, Ordering::Relaxed); - } - }*/ } } From 333e0e1637f76221f1941c7d00f436286ae55c68 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 23 Nov 2017 11:34:51 +0300 Subject: [PATCH 22/42] SecretStore: publish key only if requested through contract --- .../res/secretstore_service.json | 4 +- secret_store/src/listener/service_contract.rs | 64 +++++++++++++------ .../src/listener/service_contract_listener.rs | 7 +- 3 files changed, 49 insertions(+), 26 deletions(-) diff --git a/ethcore/native_contracts/res/secretstore_service.json b/ethcore/native_contracts/res/secretstore_service.json index fecf5ca14..37d45350b 100644 --- a/ethcore/native_contracts/res/secretstore_service.json +++ b/ethcore/native_contracts/res/secretstore_service.json @@ -1,6 +1,8 @@ [ {"constant":true,"inputs":[],"name":"serverKeyGenerationRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[{"name":"authority","type":"address"},{"name":"index","type":"uint256"}],"name":"getServerKeyGenerationRequest","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"uint256"},{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"}, + {"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getServerKeyId","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"}, {"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"},{"name":"v","type":"uint8"},{"name":"r","type":"bytes32"},{"name":"s","type":"bytes32"}],"name":"serverKeyGenerated","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, + {"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"getServerKeyThreshold","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"}, + {"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"authority","type":"address"}],"name":"getServerKeyConfirmationStatus","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"}, {"anonymous":false,"inputs":[{"indexed":true,"name":"serverKeyId","type":"bytes32"},{"indexed":true,"name":"threshold","type":"uint256"}],"name":"ServerKeyRequested","type":"event"} ] \ No newline at end of file diff --git a/secret_store/src/listener/service_contract.rs b/secret_store/src/listener/service_contract.rs index 3d5b25b72..cf716846d 100644 --- a/secret_store/src/listener/service_contract.rs +++ b/secret_store/src/listener/service_contract.rs @@ -187,10 +187,30 @@ impl ServiceContract for OnChainServiceContract { } fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> { + // only publish if contract address is set && client is online + let contract = self.contract.read(); + if contract.address == Default::default() { + // it is not an error, because key could be generated even without contract + return Ok(()); + } + let client = match self.client.upgrade() { + Some(client) => client, + None => return Err("client is required to publish key".into()), + }; + + // only publish key if contract waits for publication + // failing is ok here - it could be that enough confirmations have been recevied + // or key has been requested using HTTP API + let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d)); + let self_address = public_to_address(self.self_key_pair.public()); + if contract.get_server_key_confirmation_status(&do_call, server_key_id.clone(), self_address).wait().unwrap_or(false) { + return Ok(()); + } + + // prepare transaction data let server_key_hash = keccak(server_key); let signed_server_key = self.self_key_pair.sign(&server_key_hash).map_err(|e| format!("{}", e))?; let signed_server_key: Signature = signed_server_key.into_electrum().into(); - let contract = self.contract.read(); let transaction_data = contract.encode_server_key_generated_input(server_key_id.clone(), server_key.to_vec(), signed_server_key.v(), @@ -198,13 +218,12 @@ impl ServiceContract for OnChainServiceContract { signed_server_key.s().into() )?; + // send transaction if contract.address != Default::default() { - if let Some(client) = self.client.upgrade() { - client.transact_contract( - contract.address.clone(), - transaction_data - ).map_err(|e| format!("{}", e))?; - } // else we will read this in the next refresh cycle + client.transact_contract( + contract.address.clone(), + transaction_data + ).map_err(|e| format!("{}", e))?; } Ok(()) @@ -218,24 +237,27 @@ impl Iterator for PendingRequestsIterator { if self.index >= self.length { return None; } + + let index = self.index.clone(); self.index = self.index + 1.into(); + let self_address = public_to_address(self.self_key_pair.public()); let do_call = |a, d| future::done(self.client.call_contract(BlockId::Latest, a, d)); - let key_generation_request = self.contract.get_server_key_generation_request(&do_call, - public_to_address(self.self_key_pair.public()), - (self.index - 1.into()).clone().into()).wait(); - let (server_key_id, threshold, is_confirmed) = match key_generation_request { - Ok((server_key_id, threshold, is_confirmed)) => { - (server_key_id, threshold, is_confirmed) - }, - Err(error) => { - warn!(target: "secretstore", "{}: call to get_server_key_generation_request failed: {}", + self.contract.get_server_key_id(&do_call, index).wait() + .and_then(|server_key_id| + self.contract.get_server_key_threshold(&do_call, server_key_id.clone()).wait() + .map(|threshold| (server_key_id, threshold))) + .and_then(|(server_key_id, threshold)| + self.contract.get_server_key_confirmation_status(&do_call, server_key_id.clone(), self_address).wait() + .map(|is_confirmed| (server_key_id, threshold, is_confirmed))) + .map(|(server_key_id, threshold, is_confirmed)| + Some((is_confirmed, ServiceTask::GenerateServerKey(server_key_id, threshold.into())))) + .map_err(|error| { + warn!(target: "secretstore", "{}: reading service contract request failed: {}", self.self_key_pair.public(), error); - return None; - }, - }; - - Some((is_confirmed, ServiceTask::GenerateServerKey(server_key_id, threshold.into()))) + () + }) + .unwrap_or(None) } } diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index e6b197f22..f11c5cce8 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -336,9 +336,10 @@ impl ChainNotify for ServiceContractListener { return; } + let reason = "enacted.len() != 0; qed"; self.process_service_contract_events( - enacted.first().expect("TODO").clone(), - enacted.last().expect("TODO").clone()); + enacted.first().expect(reason).clone(), + enacted.last().expect(reason).clone()); // schedule retry if received enough blocks since last retry // it maybe inaccurate when switching syncing/synced states, but that's ok @@ -351,8 +352,6 @@ impl ChainNotify for ServiceContractListener { impl ClusterSessionsListener for ServiceContractListener { fn on_session_removed(&self, session: Arc) { - // TODO: only start if session started via the contract - // only publish when the session is started by another node // when it is started by this node, it is published from process_service_task if !is_processed_by_this_key_server(&*self.data.key_server_set, &*self.data.self_key_pair, &session.id()) { From 14686f2652a5d5f6b665b9bef072353cf43f4d9e Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 24 Nov 2017 12:33:33 +0300 Subject: [PATCH 23/42] SecretStore: cli option to configure service contract --- parity/cli/mod.rs | 7 +++ parity/cli/tests/config.full.toml | 1 + parity/configuration.rs | 11 ++++- parity/secretstore.rs | 20 +++++++- secret_store/src/key_storage.rs | 1 + secret_store/src/lib.rs | 28 ++++++----- secret_store/src/listener/service_contract.rs | 48 ++++++++++++------- secret_store/src/types/all.rs | 11 +++++ 8 files changed, 94 insertions(+), 33 deletions(-) diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index c46965ef2..5e49890e0 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -551,6 +551,10 @@ usage! { "--no-acl-check", "Disable ACL check (useful for test environments).", + ARG arg_secretstore_contract: (String) = "none", or |c: &Config| otry!(c.secretstore).service_contract.clone(), + "--secretstore-contract=[SOURCE]", + "Secret Store Service contract source: none, registry (contract address is read from registry) or address.", + ARG arg_secretstore_nodes: (String) = "", or |c: &Config| otry!(c.secretstore).nodes.as_ref().map(|vec| vec.join(",")), "--secretstore-nodes=[NODES]", "Comma-separated list of other secret store cluster nodes in form NODE_PUBLIC_KEY_IN_HEX@NODE_IP_ADDR:NODE_PORT.", @@ -1088,6 +1092,7 @@ struct SecretStore { disable: Option, disable_http: Option, disable_acl_check: Option, + service_contract: Option, self_secret: Option, admin_public: Option, nodes: Option>, @@ -1488,6 +1493,7 @@ mod tests { flag_no_secretstore: false, flag_no_secretstore_http: false, flag_no_secretstore_acl_check: false, + arg_secretstore_contract: "none".into(), arg_secretstore_secret: None, arg_secretstore_admin_public: None, arg_secretstore_nodes: "".into(), @@ -1730,6 +1736,7 @@ mod tests { disable: None, disable_http: None, disable_acl_check: None, + service_contract: None, self_secret: None, admin_public: None, nodes: None, diff --git a/parity/cli/tests/config.full.toml b/parity/cli/tests/config.full.toml index a49717085..9b780c757 100644 --- a/parity/cli/tests/config.full.toml +++ b/parity/cli/tests/config.full.toml @@ -80,6 +80,7 @@ pass = "test_pass" disable = false disable_http = false disable_acl_check = false +service_contract = "none" nodes = [] http_interface = "local" http_port = 8082 diff --git a/parity/configuration.rs b/parity/configuration.rs index 7cb3a84bd..e78cc290f 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -45,7 +45,7 @@ use ethcore_logger::Config as LogConfig; use dir::{self, Directories, default_hypervisor_path, default_local_path, default_data_path}; use dapps::Configuration as DappsConfiguration; use ipfs::Configuration as IpfsConfiguration; -use secretstore::{Configuration as SecretStoreConfiguration, NodeSecretKey}; +use secretstore::{NodeSecretKey, Configuration as SecretStoreConfiguration, ContractAddress as SecretStoreContractAddress}; use updater::{UpdatePolicy, UpdateFilter, ReleaseTrack}; use run::RunCmd; use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, KillBlockchain, ExportState, DataFormat}; @@ -606,6 +606,7 @@ impl Configuration { enabled: self.secretstore_enabled(), http_enabled: self.secretstore_http_enabled(), acl_check_enabled: self.secretstore_acl_check_enabled(), + service_contract_address: self.secretstore_service_contract_address()?, self_secret: self.secretstore_self_secret()?, nodes: self.secretstore_nodes()?, interface: self.secretstore_interface(), @@ -1076,6 +1077,14 @@ impl Configuration { !self.args.flag_no_secretstore_acl_check } + fn secretstore_service_contract_address(&self) -> Result, String> { + Ok(match self.args.arg_secretstore_contract.as_ref() { + "none" => None, + "registry" => Some(SecretStoreContractAddress::Registry), + a @ _ => Some(SecretStoreContractAddress::Address(a.parse().map_err(|e| format!("{}", e))?)), + }) + } + fn ui_enabled(&self) -> bool { if self.args.flag_force_ui { return true; diff --git a/parity/secretstore.rs b/parity/secretstore.rs index 7e36ef5e0..06772e74a 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -24,8 +24,8 @@ use ethsync::SyncProvider; use helpers::replace_home; use util::Address; -#[derive(Debug, PartialEq, Clone)] /// This node secret key. +#[derive(Debug, PartialEq, Clone)] pub enum NodeSecretKey { /// Stored as plain text in configuration file. Plain(Secret), @@ -33,6 +33,15 @@ pub enum NodeSecretKey { KeyStore(Address), } +/// Secret store service contract address. +#[derive(Debug, PartialEq, Clone)] +pub enum ContractAddress { + /// Contract address is read from registry. + Registry, + /// Contract address is specified. + Address(Address), +} + #[derive(Debug, PartialEq, Clone)] /// Secret store configuration pub struct Configuration { @@ -42,6 +51,8 @@ pub struct Configuration { pub http_enabled: bool, /// Is ACL check enabled. pub acl_check_enabled: bool, + /// Service contract address. + pub service_contract_address: Option, /// This node secret. pub self_secret: Option, /// Other nodes IDs + addresses. @@ -93,7 +104,7 @@ mod server { use ethcore_secretstore; use ethkey::KeyPair; use ansi_term::Colour::Red; - use super::{Configuration, Dependencies, NodeSecretKey}; + use super::{Configuration, Dependencies, NodeSecretKey, ContractAddress}; /// Key server pub struct KeyServer { @@ -137,6 +148,10 @@ mod server { address: conf.http_interface.clone(), port: conf.http_port, }) } else { None }, + service_contract_address: conf.service_contract_address.map(|c| match c { + ContractAddress::Registry => ethcore_secretstore::ContractAddress::Registry, + ContractAddress::Address(address) => ethcore_secretstore::ContractAddress::Address(address), + }), data_path: conf.data_path.clone(), acl_check_enabled: conf.acl_check_enabled, cluster_config: ethcore_secretstore::ClusterConfiguration { @@ -175,6 +190,7 @@ impl Default for Configuration { enabled: true, http_enabled: true, acl_check_enabled: true, + service_contract_address: None, self_secret: None, admin_public: None, nodes: BTreeMap::new(), diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index da698e5e0..020dde700 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -433,6 +433,7 @@ pub mod tests { let tempdir = TempDir::new("").unwrap(); let config = ServiceConfiguration { listener_address: None, + service_contract_address: None, acl_check_enabled: true, data_path: tempdir.path().display().to_string(), cluster_config: ClusterConfiguration { diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 3bf6dde06..e996c8295 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -66,7 +66,7 @@ use ethcore::client::Client; use ethsync::SyncProvider; pub use types::all::{ServerKeyId, EncryptedDocumentKey, RequestSignature, Public, - Error, NodeAddress, ServiceConfiguration, ClusterConfiguration}; + Error, NodeAddress, ContractAddress, ServiceConfiguration, ClusterConfiguration}; pub use traits::{NodeKeyPair, KeyServer}; pub use self::node_key_pair::{PlainNodeKeyPair, KeyStoreNodeKeyPair}; @@ -81,20 +81,24 @@ pub fn start(client: Arc, sync: Arc, self_key_pair: Arc Some(listener::http_listener::KeyServerHttpListener::start(listener_address, key_server.clone())?), None => None, }; - let service_contract = Arc::new(listener::service_contract::OnChainServiceContract::new(&client, &sync, self_key_pair.clone())); - let contract_listener = listener::service_contract_listener::ServiceContractListener::new(listener::service_contract_listener::ServiceContractListenerParams { - contract: service_contract, - key_server: key_server.clone(), - self_key_pair: self_key_pair, - key_server_set: key_server_set, - cluster: cluster, - key_storage: key_storage, + let contract_listener = config.service_contract_address.map(|service_contract_address| { + let service_contract = Arc::new(listener::service_contract::OnChainServiceContract::new(&client, &sync, service_contract_address, self_key_pair.clone())); + let contract_listener = listener::service_contract_listener::ServiceContractListener::new(listener::service_contract_listener::ServiceContractListenerParams { + contract: service_contract, + key_server: key_server.clone(), + self_key_pair: self_key_pair, + key_server_set: key_server_set, + cluster: cluster, + key_storage: key_storage, + }); + client.add_notify(contract_listener.clone()); + contract_listener }); - client.add_notify(contract_listener.clone()); - let listener = listener::Listener::new(key_server, http_listener, Some(contract_listener)); - Ok(Box::new(listener)) + Ok(Box::new(listener::Listener::new(key_server, http_listener, contract_listener))) } diff --git a/secret_store/src/listener/service_contract.rs b/secret_store/src/listener/service_contract.rs index cf716846d..28167e487 100644 --- a/secret_store/src/listener/service_contract.rs +++ b/secret_store/src/listener/service_contract.rs @@ -26,7 +26,7 @@ use hash::keccak; use bigint::hash::H256; use bigint::prelude::U256; use listener::service_contract_listener::ServiceTask; -use {ServerKeyId, NodeKeyPair}; +use {ServerKeyId, NodeKeyPair, ContractAddress}; /// Name of the SecretStore contract in the registry. const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service"; @@ -60,6 +60,8 @@ pub struct OnChainServiceContract { sync: Weak, /// This node key pair. self_key_pair: Arc, + /// Contract addresss. + address: ContractAddress, /// Contract. contract: RwLock>, } @@ -80,19 +82,27 @@ struct PendingRequestsIterator { impl OnChainServiceContract { /// Create new on-chain service contract. - pub fn new(client: &Arc, sync: &Arc, self_key_pair: Arc) -> Self { - let contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) - .map(|address| { + pub fn new(client: &Arc, sync: &Arc, address: ContractAddress, self_key_pair: Arc) -> Self { + let contract_addr = match &address { + &ContractAddress::Registry => client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) + .map(|address| { + trace!(target: "secretstore", "{}: installing service contract from address {}", + self_key_pair.public(), address); + address + }) + .unwrap_or_default(), + &ContractAddress::Address(ref address) => { trace!(target: "secretstore", "{}: installing service contract from address {}", self_key_pair.public(), address); - address - }) - .unwrap_or_default(); + address.clone() + }, + }; OnChainServiceContract { client: Arc::downgrade(client), sync: Arc::downgrade(sync), self_key_pair: self_key_pair, + address: address, contract: RwLock::new(Arc::new(SecretStoreService::new(contract_addr))), } } @@ -100,18 +110,20 @@ impl OnChainServiceContract { impl ServiceContract for OnChainServiceContract { fn update(&self) { - if let (Some(client), Some(sync)) = (self.client.upgrade(), self.sync.upgrade()) { - // do nothing until synced - if sync.status().is_syncing(client.queue_info()) { - return; - } + if let &ContractAddress::Registry = &self.address { + if let (Some(client), Some(sync)) = (self.client.upgrade(), self.sync.upgrade()) { + // do nothing until synced + if sync.status().is_syncing(client.queue_info()) { + return; + } - // update contract address from registry - let service_contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()).unwrap_or_default(); - if self.contract.read().address != service_contract_addr { - trace!(target: "secretstore", "{}: installing service contract from address {}", - self.self_key_pair.public(), service_contract_addr); - *self.contract.write() = Arc::new(SecretStoreService::new(service_contract_addr)); + // update contract address from registry + let service_contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()).unwrap_or_default(); + if self.contract.read().address != service_contract_addr { + trace!(target: "secretstore", "{}: installing service contract from address {}", + self.self_key_pair.public(), service_contract_addr); + *self.contract.write() = Arc::new(SecretStoreService::new(service_contract_addr)); + } } } } diff --git a/secret_store/src/types/all.rs b/secret_store/src/types/all.rs index 7c746f2a0..8738ba032 100644 --- a/secret_store/src/types/all.rs +++ b/secret_store/src/types/all.rs @@ -61,11 +61,22 @@ pub struct NodeAddress { pub port: u16, } +/// Contract address. +#[derive(Debug, Clone)] +pub enum ContractAddress { + /// Address is read from registry. + Registry, + /// Address is specified. + Address(ethkey::Address), +} + /// Secret store configuration #[derive(Debug)] pub struct ServiceConfiguration { /// HTTP listener address. If None, HTTP API is disabled. pub listener_address: Option, + /// Service contract address. If None, service contract API is disabled. + pub service_contract_address: Option, /// Is ACL check enabled. If false, everyone has access to all keys. Useful for tests only. pub acl_check_enabled: bool, /// Data directory path for secret store From 009e350fc7b44005a45478ed0cae509cf03651a8 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 24 Nov 2017 13:17:08 +0300 Subject: [PATCH 24/42] SecretStore: ACL checker now requires blockchain to be fully synchronized --- secret_store/src/acl_storage.rs | 41 +++++++++++++++++++++------------ secret_store/src/lib.rs | 2 +- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/secret_store/src/acl_storage.rs b/secret_store/src/acl_storage.rs index c06929e94..8d629f022 100644 --- a/secret_store/src/acl_storage.rs +++ b/secret_store/src/acl_storage.rs @@ -20,6 +20,7 @@ use futures::{future, Future}; use parking_lot::{Mutex, RwLock}; use ethkey::public_to_address; use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; +use ethsync::SyncProvider; use native_contracts::SecretStoreAclStorage; use bigint::hash::H256; use util::Address; @@ -44,6 +45,8 @@ pub struct OnChainAclStorage { struct CachedContract { /// Blockchain client. client: Weak, + /// Sync provider. + sync: Weak, /// Contract address. contract_addr: Option
, /// Contract at given address. @@ -57,9 +60,9 @@ pub struct DummyAclStorage { } impl OnChainAclStorage { - pub fn new(client: &Arc) -> Arc { + pub fn new(client: &Arc, sync: &Arc) -> Arc { let acl_storage = Arc::new(OnChainAclStorage { - contract: Mutex::new(CachedContract::new(client)), + contract: Mutex::new(CachedContract::new(client, sync)), }); client.add_notify(acl_storage.clone()); acl_storage @@ -81,9 +84,10 @@ impl ChainNotify for OnChainAclStorage { } impl CachedContract { - pub fn new(client: &Arc) -> Self { + pub fn new(client: &Arc, sync: &Arc) -> Self { CachedContract { client: Arc::downgrade(client), + sync: Arc::downgrade(sync), contract_addr: None, contract: None, } @@ -105,19 +109,26 @@ impl CachedContract { } pub fn check(&mut self, public: &Public, document: &ServerKeyId) -> Result { - match self.contract.as_ref() { - Some(contract) => { - let address = public_to_address(&public); - let do_call = |a, d| future::done( - self.client - .upgrade() - .ok_or("Calling contract without client".into()) - .and_then(|c| c.call_contract(BlockId::Latest, a, d))); - contract.check_permissions(do_call, address, document.clone()) - .map_err(|err| Error::Internal(err)) - .wait() + match (self.client.upgrade(), self.sync.upgrade()) { + (Some(client), Some(sync)) => { + // we can not tell if access to document is allowed until fully synchronized + if sync.status().is_syncing(client.queue_info()) { + return Err(Error::Internal("Trying to check access by non-synchronized client".to_owned())); + } + + // call contract to check accesss + match self.contract.as_ref() { + Some(contract) => { + let address = public_to_address(&public); + let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d)); + contract.check_permissions(do_call, address, document.clone()) + .map_err(|err| Error::Internal(err)) + .wait() + }, + None => Err(Error::Internal("ACL checker contract is not configured".to_owned())), + } }, - None => Err(Error::Internal("ACL checker contract is not configured".to_owned())), + _ => Err(Error::Internal("Calling ACL contract without client".into())), } } } diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index e996c8295..662495021 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -73,7 +73,7 @@ pub use self::node_key_pair::{PlainNodeKeyPair, KeyStoreNodeKeyPair}; /// Start new key server instance pub fn start(client: Arc, sync: Arc, self_key_pair: Arc, config: ServiceConfiguration) -> Result, Error> { let acl_storage: Arc = if config.acl_check_enabled { - acl_storage::OnChainAclStorage::new(&client/*, &sync*/) // TODO: return false until fully synced + acl_storage::OnChainAclStorage::new(&client, &sync) } else { Arc::new(acl_storage::DummyAclStorage::default()) }; From 37973f9112c605a03f30f6e4a0825f74baae15fb Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 24 Nov 2017 13:21:39 +0300 Subject: [PATCH 25/42] SecretStore: do not update servers set until fully synchronized --- secret_store/src/key_server_set.rs | 17 +++++++++++++---- secret_store/src/lib.rs | 2 +- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/secret_store/src/key_server_set.rs b/secret_store/src/key_server_set.rs index 8a94e9f42..04647db41 100644 --- a/secret_store/src/key_server_set.rs +++ b/secret_store/src/key_server_set.rs @@ -21,6 +21,7 @@ use futures::{future, Future}; use parking_lot::Mutex; use ethcore::filter::Filter; use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; +use ethsync::SyncProvider; use native_contracts::KeyServerSet as KeyServerSetContract; use hash::keccak; use bigint::hash::H256; @@ -56,6 +57,8 @@ pub struct OnChainKeyServerSet { struct CachedContract { /// Blockchain client. client: Weak, + /// Sync provider. + sync: Weak, /// Contract address. contract_addr: Option
, /// Active set of key servers. @@ -63,8 +66,8 @@ struct CachedContract { } impl OnChainKeyServerSet { - pub fn new(client: &Arc, key_servers: BTreeMap) -> Result, Error> { - let mut cached_contract = CachedContract::new(client, key_servers)?; + pub fn new(client: &Arc, sync: &Arc, key_servers: BTreeMap) -> Result, Error> { + let mut cached_contract = CachedContract::new(client, sync, key_servers)?; let key_server_contract_address = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned()); // only initialize from contract if it is installed. otherwise - use default nodes // once the contract is installed, all default nodes are lost (if not in the contract' set) @@ -95,9 +98,10 @@ impl ChainNotify for OnChainKeyServerSet { } impl CachedContract { - pub fn new(client: &Arc, key_servers: BTreeMap) -> Result { + pub fn new(client: &Arc, sync: &Arc, key_servers: BTreeMap) -> Result { Ok(CachedContract { client: Arc::downgrade(client), + sync: Arc::downgrade(sync), contract_addr: None, key_servers: key_servers.into_iter() .map(|(p, addr)| { @@ -110,7 +114,12 @@ impl CachedContract { } pub fn update(&mut self, enacted: Vec, retracted: Vec) { - if let Some(client) = self.client.upgrade() { + if let (Some(client), Some(sync)) = (self.client.upgrade(), self.sync.upgrade()) { + // do not update initial server set until fully synchronized + if sync.status().is_syncing(client.queue_info()) { + return; + } + let new_contract_addr = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned()); // new contract installed => read nodes set from the contract diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 662495021..ecb446892 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -77,7 +77,7 @@ pub fn start(client: Arc, sync: Arc, self_key_pair: Arc Date: Tue, 19 Dec 2017 11:02:13 +0300 Subject: [PATCH 26/42] SecretStore: return error 404 when there's no key shares for given key on all nodes --- .../key_version_negotiation_session.rs | 25 +++++++++++++++++-- .../src/key_server_cluster/cluster.rs | 15 ++++++----- secret_store/src/types/all.rs | 1 + 3 files changed, 31 insertions(+), 10 deletions(-) diff --git a/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs b/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs index b12269fe7..a7efccca5 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs @@ -227,7 +227,7 @@ impl SessionImpl where T: SessionTransport { // try to complete session Self::try_complete(&self.core, &mut *data); if no_confirmations_required && data.state != SessionState::Finished { - return Err(Error::ConsensusUnreachable); + return Err(Error::MissingKeyShare); } else if data.state == SessionState::Finished { return Ok(()); } @@ -454,6 +454,8 @@ impl FastestResultComputer { impl SessionResultComputer for FastestResultComputer { fn compute_result(&self, threshold: Option, confirmations: &BTreeSet, versions: &BTreeMap>) -> Option> { match self.threshold.or(threshold) { + // if there's no versions at all && we're not waiting for confirmations anymore + _ if confirmations.is_empty() && versions.is_empty() => Some(Err(Error::MissingKeyShare)), // if we have key share on this node Some(threshold) => { // select version this node have, with enough participants @@ -489,6 +491,9 @@ impl SessionResultComputer for LargestSupportResultComputer { if !confirmations.is_empty() { return None; } + if versions.is_empty() { + return Some(Err(Error::MissingKeyShare)); + } versions.iter() .max_by_key(|&(_, ref n)| n.len()) @@ -507,7 +512,8 @@ mod tests { use key_server_cluster::cluster::tests::DummyCluster; use key_server_cluster::admin_sessions::ShareChangeSessionMeta; use key_server_cluster::message::{Message, KeyVersionNegotiationMessage, RequestKeyVersions, KeyVersions}; - use super::{SessionImpl, SessionTransport, SessionParams, FastestResultComputer, SessionState}; + use super::{SessionImpl, SessionTransport, SessionParams, FastestResultComputer, LargestSupportResultComputer, + SessionResultComputer, SessionState}; struct DummyTransport { cluster: Arc, @@ -722,4 +728,19 @@ mod tests { // we can't be sure that node has given key version because previous ShareAdd session could fail assert!(ml.session(0).data.lock().state != SessionState::Finished); } + + #[test] + fn fastest_computer_returns_missing_share_if_no_versions_returned() { + let computer = FastestResultComputer { + self_node_id: Default::default(), + threshold: None, + }; + assert_eq!(computer.compute_result(Some(10), &Default::default(), &Default::default()), Some(Err(Error::MissingKeyShare))); + } + + #[test] + fn largest_computer_returns_missing_share_if_no_versions_returned() { + let computer = LargestSupportResultComputer; + assert_eq!(computer.compute_result(Some(10), &Default::default(), &Default::default()), Some(Err(Error::MissingKeyShare))); + } } diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 0a975c275..7861a8c91 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -452,8 +452,8 @@ impl ClusterCore { let is_master_node = meta.self_node_id == meta.master_node_id; if is_master_node && session.is_finished() { data.sessions.negotiation_sessions.remove(&session.id()); - if let Ok((version, master)) = session.wait() { - match session.continue_action() { + match session.wait() { + Ok((version, master)) => match session.continue_action() { Some(ContinueAction::Decrypt(session, is_shadow_decryption)) => { let initialization_error = if data.self_key_pair.public() == &master { session.initialize(version, is_shadow_decryption) @@ -479,19 +479,18 @@ impl ClusterCore { } }, None => (), - } - } else { - match session.continue_action() { + }, + Err(error) => match session.continue_action() { Some(ContinueAction::Decrypt(session, _)) => { data.sessions.decryption_sessions.remove(&session.id()); - session.on_session_error(&meta.self_node_id, Error::ConsensusUnreachable); + session.on_session_error(&meta.self_node_id, error); }, Some(ContinueAction::Sign(session, _)) => { data.sessions.signing_sessions.remove(&session.id()); - session.on_session_error(&meta.self_node_id, Error::ConsensusUnreachable); + session.on_session_error(&meta.self_node_id, error); }, None => (), - } + }, } } } diff --git a/secret_store/src/types/all.rs b/secret_store/src/types/all.rs index 7c746f2a0..364694d02 100644 --- a/secret_store/src/types/all.rs +++ b/secret_store/src/types/all.rs @@ -136,6 +136,7 @@ impl From for Error { fn from(err: key_server_cluster::Error) -> Self { match err { key_server_cluster::Error::AccessDenied => Error::AccessDenied, + key_server_cluster::Error::MissingKeyShare => Error::DocumentNotFound, _ => Error::Internal(err.into()), } } From 794de9f74306e23909352c2b44d87cc1cb24b4ea Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 20 Dec 2017 14:50:46 +0300 Subject: [PATCH 27/42] SecretStore: fixing grumbles (part1) --- parity/cli/mod.rs | 2 +- parity/configuration.rs | 2 +- .../key_version_negotiation_session.rs | 3 +- .../admin_sessions/share_add_session.rs | 74 ++++++++++--------- .../key_server_cluster/cluster_sessions.rs | 56 +++++--------- secret_store/src/lib.rs | 2 +- secret_store/src/listener/http_listener.rs | 4 +- secret_store/src/listener/service_contract.rs | 16 ++-- .../src/listener/service_contract_listener.rs | 51 +++++++------ 9 files changed, 100 insertions(+), 110 deletions(-) diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 5e49890e0..330abe936 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -553,7 +553,7 @@ usage! { ARG arg_secretstore_contract: (String) = "none", or |c: &Config| otry!(c.secretstore).service_contract.clone(), "--secretstore-contract=[SOURCE]", - "Secret Store Service contract source: none, registry (contract address is read from registry) or address.", + "Secret Store Service contract address source: none, registry (contract address is read from registry) or address.", ARG arg_secretstore_nodes: (String) = "", or |c: &Config| otry!(c.secretstore).nodes.as_ref().map(|vec| vec.join(",")), "--secretstore-nodes=[NODES]", diff --git a/parity/configuration.rs b/parity/configuration.rs index ec35020f5..b9a614c8b 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -1082,7 +1082,7 @@ impl Configuration { Ok(match self.args.arg_secretstore_contract.as_ref() { "none" => None, "registry" => Some(SecretStoreContractAddress::Registry), - a @ _ => Some(SecretStoreContractAddress::Address(a.parse().map_err(|e| format!("{}", e))?)), + a => Some(SecretStoreContractAddress::Address(a.parse().map_err(|e| format!("{}", e))?)), }) } diff --git a/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs b/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs index f86275f3a..105df299c 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs @@ -203,9 +203,8 @@ impl SessionImpl where T: SessionTransport { self.core.completed.wait(&mut data); } - data.result.as_ref() + data.result.clone() .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") - .clone() } /// Initialize session. diff --git a/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs b/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs index 3156d6f87..39fd70cd4 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs @@ -86,16 +86,8 @@ struct SessionData { pub version: Option, /// Consensus session. pub consensus_session: Option>, - /// NewKeyShare: threshold. - pub key_share_threshold: Option, - /// NewKeyShare: author. - pub key_share_author: Option, - /// NewKeyShare: joint public. - pub key_share_joint_public: Option, - /// NewKeyShare: Common (shared) encryption point. - pub key_share_common_point: Option, - /// NewKeyShare: Encrypted point. - pub key_share_encrypted_point: Option, + /// NewKeyShare (for nodes being added). + pub new_key_share: Option, /// Nodes id numbers. pub id_numbers: Option>>, /// Secret subshares received from nodes. @@ -104,6 +96,20 @@ struct SessionData { pub result: Option>, } +/// New key share. +struct NewKeyShare { + /// NewKeyShare: threshold. + pub threshold: usize, + /// NewKeyShare: author. + pub author: Public, + /// NewKeyShare: joint public. + pub joint_public: Public, + /// NewKeyShare: Common (shared) encryption point. + pub common_point: Option, + /// NewKeyShare: Encrypted point. + pub encrypted_point: Option, +} + /// Session state. #[derive(Debug, PartialEq)] enum SessionState { @@ -167,11 +173,7 @@ impl SessionImpl where T: SessionTransport { state: SessionState::ConsensusEstablishing, version: None, consensus_session: None, - key_share_threshold: None, - key_share_author: None, - key_share_joint_public: None, - key_share_common_point: None, - key_share_encrypted_point: None, + new_key_share: None, id_numbers: None, secret_subshares: None, result: None, @@ -427,9 +429,7 @@ impl SessionImpl where T: SessionTransport { } // we only expect this message once - if data.key_share_threshold.is_some() || data.key_share_author.is_some() || - data.key_share_common_point.is_some() || data.key_share_encrypted_point.is_some() || - data.key_share_joint_public.is_some() { + if data.new_key_share.is_some() { return Err(Error::InvalidStateForRequest); } @@ -444,11 +444,13 @@ impl SessionImpl where T: SessionTransport { // update data data.state = SessionState::WaitingForKeysDissemination; - data.key_share_threshold = Some(message.threshold); - data.key_share_author = Some(message.author.clone().into()); - data.key_share_joint_public = Some(message.joint_public.clone().into()); - data.key_share_common_point = message.common_point.clone().map(Into::into); - data.key_share_encrypted_point = message.encrypted_point.clone().map(Into::into); + data.new_key_share = Some(NewKeyShare { + threshold: message.threshold, + author: message.author.clone().into(), + joint_public: message.joint_public.clone().into(), + common_point: message.common_point.clone().map(Into::into), + encrypted_point: message.encrypted_point.clone().map(Into::into), + }); let id_numbers = data.id_numbers.as_mut() .expect("common key share data is expected after initialization; id_numers are filled during initialization; qed"); @@ -667,8 +669,9 @@ impl SessionImpl where T: SessionTransport { let id_numbers = data.id_numbers.as_ref().expect(explanation); let secret_subshares = data.secret_subshares.as_ref().expect(explanation); let threshold = core.key_share.as_ref().map(|ks| ks.threshold) - .unwrap_or_else(|| *data.key_share_threshold.as_ref() - .expect("computation occurs after receiving key share threshold if not having one already; qed")); + .unwrap_or_else(|| data.new_key_share.as_ref() + .expect("computation occurs after receiving key share threshold if not having one already; qed") + .threshold); let explanation = "id_numbers are checked to have Some value for every consensus group node when consensus is establishe; qed"; let sender_id_number = id_numbers[sender].as_ref().expect(explanation); @@ -694,16 +697,17 @@ impl SessionImpl where T: SessionTransport { let refreshed_key_version = DocumentKeyShareVersion::new(id_numbers.clone().into_iter().map(|(k, v)| (k.clone(), v.expect("id_numbers are checked to have Some value for every consensus group node when consensus is establishe; qed"))).collect(), secret_share); - let mut refreshed_key_share = core.key_share.as_ref().cloned().unwrap_or_else(|| DocumentKeyShare { - author: data.key_share_author.clone() - .expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed"), - threshold: data.key_share_threshold.clone() - .expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed"), - public: data.key_share_joint_public.clone() - .expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed"), - common_point: data.key_share_common_point.clone(), - encrypted_point: data.key_share_encrypted_point.clone(), - versions: Vec::new(), + let mut refreshed_key_share = core.key_share.as_ref().cloned().unwrap_or_else(|| { + let new_key_share = data.new_key_share.as_ref() + .expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed"); + DocumentKeyShare { + author: new_key_share.author.clone(), + threshold: new_key_share.threshold, + public: new_key_share.joint_public.clone(), + common_point: new_key_share.common_point.clone(), + encrypted_point: new_key_share.encrypted_point.clone(), + versions: Vec::new(), + } }); refreshed_key_share.versions.push(refreshed_key_version); diff --git a/secret_store/src/key_server_cluster/cluster_sessions.rs b/secret_store/src/key_server_cluster/cluster_sessions.rs index 2e83c407d..cd07045df 100644 --- a/secret_store/src/key_server_cluster/cluster_sessions.rs +++ b/secret_store/src/key_server_cluster/cluster_sessions.rs @@ -294,25 +294,7 @@ impl ClusterSessionsContainer where S: ClusterSession, SC: C queue: VecDeque::new(), }; sessions.insert(session_id, queued_session); - - // notify listeners - let mut listeners = self.listeners.lock(); - let mut listener_index = 0; - loop { - if listener_index >= listeners.len() { - break; - } - - match listeners[listener_index].upgrade() { - Some(listener) => { - listener.on_session_inserted(session.clone()); - listener_index += 1; - }, - None => { - listeners.swap_remove(listener_index); - }, - } - } + self.notify_listeners(|l| l.on_session_inserted(session.clone())); Ok(session) } @@ -320,25 +302,7 @@ impl ClusterSessionsContainer where S: ClusterSession, SC: C pub fn remove(&self, session_id: &S::Id) { if let Some(session) = self.sessions.write().remove(session_id) { self.container_state.lock().on_session_completed(); - - // notify listeners - let mut listeners = self.listeners.lock(); - let mut listener_index = 0; - loop { - if listener_index >= listeners.len() { - break; - } - - match listeners[listener_index].upgrade() { - Some(listener) => { - listener.on_session_removed(session.session.clone()); - listener_index += 1; - }, - None => { - listeners.swap_remove(listener_index); - }, - } - } + self.notify_listeners(|l| l.on_session_removed(session.session.clone())); } } @@ -385,6 +349,22 @@ impl ClusterSessionsContainer where S: ClusterSession, SC: C } } } + + fn notify_listeners) -> ()>(&self, callback: F) { + let mut listeners = self.listeners.lock(); + let mut listener_index = 0; + while listener_index < listeners.len() { + match listeners[listener_index].upgrade() { + Some(listener) => { + callback(&*listener); + listener_index += 1; + }, + None => { + listeners.swap_remove(listener_index); + }, + } + } + } } impl ClusterSessionsContainer where S: ClusterSession, SC: ClusterSessionCreator, SessionId: From { diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index ecb446892..b2139c000 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -77,7 +77,7 @@ pub fn start(client: Arc, sync: Arc, self_key_pair: Arc, sync: &Arc, address: ContractAddress, self_key_pair: Arc) -> Self { - let contract_addr = match &address { - &ContractAddress::Registry => client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) + let contract_addr = match address { + ContractAddress::Registry => client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) .map(|address| { trace!(target: "secretstore", "{}: installing service contract from address {}", self_key_pair.public(), address); address }) .unwrap_or_default(), - &ContractAddress::Address(ref address) => { + ContractAddress::Address(ref address) => { trace!(target: "secretstore", "{}: installing service contract from address {}", self_key_pair.public(), address); address.clone() @@ -231,12 +231,10 @@ impl ServiceContract for OnChainServiceContract { )?; // send transaction - if contract.address != Default::default() { - client.transact_contract( - contract.address.clone(), - transaction_data - ).map_err(|e| format!("{}", e))?; - } + client.transact_contract( + contract.address.clone(), + transaction_data + ).map_err(|e| format!("{}", e))?; Ok(()) } diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index f11c5cce8..d9567125c 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -31,11 +31,11 @@ use key_storage::KeyStorage; use listener::service_contract::ServiceContract; use {ServerKeyId, NodeKeyPair, KeyServer}; -/// Retry interval (in blocks). Every RETRY_INTEVAL_BLOCKS blocks each KeyServer reads pending requests from +/// Retry interval (in blocks). Every RETRY_INTERVAL_BLOCKS blocks each KeyServer reads pending requests from /// service contract && tries to re-execute. The reason to have this mechanism is primarily because keys /// servers set change takes a lot of time + there could be some races, when blocks are coming to different /// KS at different times. This isn't intended to fix && respond to general session errors! -const RETRY_INTEVAL_BLOCKS: usize = 30; +const RETRY_INTERVAL_BLOCKS: usize = 30; /// Max failed retry requests (in single retry interval). The reason behind this constant is that if several /// pending requests have failed, then most probably other will fail too. @@ -255,7 +255,7 @@ impl ServiceContractListener { // only process request, which haven't been processed recently // there could be a lag when we've just generated server key && retrying on the same block // (or before our tx is mined) - state is not updated yet - if retry_data.generated_keys.contains(&server_key_id){ + if retry_data.generated_keys.contains(&server_key_id) { continue; } @@ -343,7 +343,7 @@ impl ChainNotify for ServiceContractListener { // schedule retry if received enough blocks since last retry // it maybe inaccurate when switching syncing/synced states, but that's ok - if self.data.last_retry.fetch_add(enacted_len, Ordering::Relaxed) >= RETRY_INTEVAL_BLOCKS { + if self.data.last_retry.fetch_add(enacted_len, Ordering::Relaxed) >= RETRY_INTERVAL_BLOCKS { self.data.tasks_queue.push(::std::iter::once(ServiceTask::Retry)); self.data.last_retry.store(0, Ordering::Relaxed); } @@ -356,22 +356,17 @@ impl ClusterSessionsListener for ServiceContractListener { // when it is started by this node, it is published from process_service_task if !is_processed_by_this_key_server(&*self.data.key_server_set, &*self.data.self_key_pair, &session.id()) { // by this time sesion must already be completed - either successfully, or not - debug_assert!(session.is_finished()); + assert!(session.is_finished()); // ignore result - the only thing that we can do is to log the error - let _ = session.wait(Some(Default::default())) + match session.wait(Some(Default::default())) .map_err(|e| format!("{}", e)) - .and_then(|server_key| Self::publish_server_key(&self.data, &session.id(), &server_key)) - .map(|_| { - trace!(target: "secretstore", "{}: completed foreign GenerateServerKey({}) request", - self.data.self_key_pair.public(), session.id()); - () - }) - .map_err(|error| { - warn!(target: "secretstore", "{}: failed to process GenerateServerKey({}) request with: {}", - self.data.self_key_pair.public(), session.id(), error); - error - }); + .and_then(|server_key| Self::publish_server_key(&self.data, &session.id(), &server_key)) { + Ok(_) => trace!(target: "secretstore", "{}: completed foreign GenerateServerKey({}) request", + self.data.self_key_pair.public(), session.id()), + Err(error) => warn!(target: "secretstore", "{}: failed to process GenerateServerKey({}) request with: {}", + self.data.self_key_pair.public(), session.id(), error), + } } } } @@ -417,9 +412,12 @@ impl TasksQueue { fn is_processed_by_this_key_server(key_server_set: &KeyServerSet, self_key_pair: &NodeKeyPair, server_key_id: &H256) -> bool { let servers = key_server_set.get(); let total_servers_count = servers.len(); - if total_servers_count == 0 { - return false; + match total_servers_count { + 0 => return false, + 1 => return true, + _ => (), } + let this_server_index = match servers.keys().enumerate().find(|&(_, s)| s == self_key_pair.public()) { Some((index, _)) => index, None => return false, @@ -480,13 +478,24 @@ mod tests { } #[test] - fn is_not_processed_by_this_key_server_when_not_a_part_of_servers_set() { + fn is_processed_by_this_key_server_with_single_server() { + let self_key_pair = Random.generate().unwrap(); assert_eq!(is_processed_by_this_key_server( + &MapKeyServerSet::new(vec![ + (self_key_pair.public().clone(), "127.0.0.1:8080".parse().unwrap()) + ].into_iter().collect()), + &PlainNodeKeyPair::new(self_key_pair), + &Default::default()), true); + } + + #[test] + fn is_not_processed_by_this_key_server_when_not_a_part_of_servers_set() { + assert!(is_processed_by_this_key_server( &MapKeyServerSet::new(vec![ (Random.generate().unwrap().public().clone(), "127.0.0.1:8080".parse().unwrap()) ].into_iter().collect()), &PlainNodeKeyPair::new(Random.generate().unwrap()), - &Default::default()), false); + &Default::default())); } #[test] From d7650e2b9c43de7d9916e0fa56f0849b706d3696 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 20 Dec 2017 16:02:21 +0300 Subject: [PATCH 28/42] SecretStore: TrustedClient --- secret_store/src/acl_storage.rs | 62 +++++++++---------- secret_store/src/key_server_set.rs | 52 ++++++++-------- secret_store/src/lib.rs | 8 ++- secret_store/src/listener/service_contract.rs | 41 +++++------- secret_store/src/trusted_client.rs | 57 +++++++++++++++++ 5 files changed, 131 insertions(+), 89 deletions(-) create mode 100644 secret_store/src/trusted_client.rs diff --git a/secret_store/src/acl_storage.rs b/secret_store/src/acl_storage.rs index 8d629f022..e5637f81c 100644 --- a/secret_store/src/acl_storage.rs +++ b/secret_store/src/acl_storage.rs @@ -14,17 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::sync::{Arc, Weak}; +use std::sync::Arc; use std::collections::{HashMap, HashSet}; use futures::{future, Future}; use parking_lot::{Mutex, RwLock}; use ethkey::public_to_address; -use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; -use ethsync::SyncProvider; +use ethcore::client::{BlockChainClient, BlockId, ChainNotify}; use native_contracts::SecretStoreAclStorage; use bigint::hash::H256; use util::Address; use bytes::Bytes; +use trusted_client::TrustedClient; use types::all::{Error, ServerKeyId, Public}; const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker"; @@ -44,9 +44,7 @@ pub struct OnChainAclStorage { /// Cached on-chain ACL storage contract. struct CachedContract { /// Blockchain client. - client: Weak, - /// Sync provider. - sync: Weak, + client: TrustedClient, /// Contract address. contract_addr: Option
, /// Contract at given address. @@ -60,12 +58,15 @@ pub struct DummyAclStorage { } impl OnChainAclStorage { - pub fn new(client: &Arc, sync: &Arc) -> Arc { + pub fn new(trusted_client: TrustedClient) -> Result, Error> { + let client = trusted_client.get_untrusted(); let acl_storage = Arc::new(OnChainAclStorage { - contract: Mutex::new(CachedContract::new(client, sync)), + contract: Mutex::new(CachedContract::new(trusted_client)), }); - client.add_notify(acl_storage.clone()); - acl_storage + client + .ok_or(Error::Internal("Constructing OnChainAclStorage without active Client".into()))? + .add_notify(acl_storage.clone()); + Ok(acl_storage) } } @@ -84,17 +85,16 @@ impl ChainNotify for OnChainAclStorage { } impl CachedContract { - pub fn new(client: &Arc, sync: &Arc) -> Self { + pub fn new(client: TrustedClient) -> Self { CachedContract { - client: Arc::downgrade(client), - sync: Arc::downgrade(sync), + client: client, contract_addr: None, contract: None, } } pub fn update(&mut self) { - if let Some(client) = self.client.upgrade() { + if let Some(client) = self.client.get() { let new_contract_addr = client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned()); if self.contract_addr.as_ref() != new_contract_addr.as_ref() { self.contract = new_contract_addr.map(|contract_addr| { @@ -109,26 +109,20 @@ impl CachedContract { } pub fn check(&mut self, public: &Public, document: &ServerKeyId) -> Result { - match (self.client.upgrade(), self.sync.upgrade()) { - (Some(client), Some(sync)) => { - // we can not tell if access to document is allowed until fully synchronized - if sync.status().is_syncing(client.queue_info()) { - return Err(Error::Internal("Trying to check access by non-synchronized client".to_owned())); - } - - // call contract to check accesss - match self.contract.as_ref() { - Some(contract) => { - let address = public_to_address(&public); - let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d)); - contract.check_permissions(do_call, address, document.clone()) - .map_err(|err| Error::Internal(err)) - .wait() - }, - None => Err(Error::Internal("ACL checker contract is not configured".to_owned())), - } - }, - _ => Err(Error::Internal("Calling ACL contract without client".into())), + if let Some(client) = self.client.get() { + // call contract to check accesss + match self.contract.as_ref() { + Some(contract) => { + let address = public_to_address(&public); + let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d)); + contract.check_permissions(do_call, address, document.clone()) + .map_err(|err| Error::Internal(err)) + .wait() + }, + None => Err(Error::Internal("ACL checker contract is not configured".to_owned())), + } + } else { + Err(Error::Internal("Calling ACL contract without trusted blockchain client".into())) } } } diff --git a/secret_store/src/key_server_set.rs b/secret_store/src/key_server_set.rs index 04647db41..e64123a7f 100644 --- a/secret_store/src/key_server_set.rs +++ b/secret_store/src/key_server_set.rs @@ -14,20 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::sync::{Arc, Weak}; +use std::sync::Arc; use std::net::SocketAddr; use std::collections::BTreeMap; use futures::{future, Future}; use parking_lot::Mutex; use ethcore::filter::Filter; use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; -use ethsync::SyncProvider; use native_contracts::KeyServerSet as KeyServerSetContract; use hash::keccak; use bigint::hash::H256; use util::Address; use bytes::Bytes; use types::all::{Error, Public, NodeAddress}; +use trusted_client::TrustedClient; const KEY_SERVER_SET_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_server_set"; @@ -56,9 +56,7 @@ pub struct OnChainKeyServerSet { /// Cached on-chain Key Server set contract. struct CachedContract { /// Blockchain client. - client: Weak, - /// Sync provider. - sync: Weak, + client: TrustedClient, /// Contract address. contract_addr: Option
, /// Active set of key servers. @@ -66,19 +64,14 @@ struct CachedContract { } impl OnChainKeyServerSet { - pub fn new(client: &Arc, sync: &Arc, key_servers: BTreeMap) -> Result, Error> { - let mut cached_contract = CachedContract::new(client, sync, key_servers)?; - let key_server_contract_address = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned()); - // only initialize from contract if it is installed. otherwise - use default nodes - // once the contract is installed, all default nodes are lost (if not in the contract' set) - if key_server_contract_address.is_some() { - cached_contract.read_from_registry(&*client, key_server_contract_address); - } - + pub fn new(trusted_client: TrustedClient, key_servers: BTreeMap) -> Result, Error> { + let client = trusted_client.get_untrusted(); let key_server_set = Arc::new(OnChainKeyServerSet { - contract: Mutex::new(cached_contract), + contract: Mutex::new(CachedContract::new(trusted_client, key_servers)?), }); - client.add_notify(key_server_set.clone()); + client + .ok_or(Error::Internal("Constructing OnChainKeyServerSet without active Client".into()))? + .add_notify(key_server_set.clone()); Ok(key_server_set) } } @@ -98,10 +91,9 @@ impl ChainNotify for OnChainKeyServerSet { } impl CachedContract { - pub fn new(client: &Arc, sync: &Arc, key_servers: BTreeMap) -> Result { - Ok(CachedContract { - client: Arc::downgrade(client), - sync: Arc::downgrade(sync), + pub fn new(client: TrustedClient, key_servers: BTreeMap) -> Result { + let mut cached_contract = CachedContract { + client: client, contract_addr: None, key_servers: key_servers.into_iter() .map(|(p, addr)| { @@ -110,16 +102,22 @@ impl CachedContract { Ok((p, addr)) }) .collect::, Error>>()?, - }) + }; + + if let Some(client) = cached_contract.client.get() { + let key_server_contract_address = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned()); + // only initialize from contract if it is installed. otherwise - use default nodes + // once the contract is installed, all default nodes are lost (if not in the contract' set) + if key_server_contract_address.is_some() { + cached_contract.read_from_registry(&*client, key_server_contract_address); + } + } + + Ok(cached_contract) } pub fn update(&mut self, enacted: Vec, retracted: Vec) { - if let (Some(client), Some(sync)) = (self.client.upgrade(), self.sync.upgrade()) { - // do not update initial server set until fully synchronized - if sync.status().is_syncing(client.queue_info()) { - return; - } - + if let Some(client) = self.client.get() { let new_contract_addr = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned()); // new contract installed => read nodes set from the contract diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index b2139c000..d2eb7b125 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -60,6 +60,7 @@ mod serialization; mod key_server_set; mod node_key_pair; mod listener; +mod trusted_client; use std::sync::Arc; use ethcore::client::Client; @@ -72,12 +73,13 @@ pub use self::node_key_pair::{PlainNodeKeyPair, KeyStoreNodeKeyPair}; /// Start new key server instance pub fn start(client: Arc, sync: Arc, self_key_pair: Arc, config: ServiceConfiguration) -> Result, Error> { + let trusted_client = trusted_client::TrustedClient::new(client.clone(), sync); let acl_storage: Arc = if config.acl_check_enabled { - acl_storage::OnChainAclStorage::new(&client, &sync) + acl_storage::OnChainAclStorage::new(trusted_client.clone())? } else { Arc::new(acl_storage::DummyAclStorage::default()) }; - let key_server_set = key_server_set::OnChainKeyServerSet::new(&client, &sync, config.cluster_config.nodes.clone())?; + let key_server_set = key_server_set::OnChainKeyServerSet::new(trusted_client.clone(), config.cluster_config.nodes.clone())?; let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?); let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set.clone(), self_key_pair.clone(), acl_storage, key_storage.clone())?); let cluster = key_server.cluster(); @@ -88,7 +90,7 @@ pub fn start(client: Arc, sync: Arc, self_key_pair: Arc None, }; let contract_listener = config.service_contract_address.map(|service_contract_address| { - let service_contract = Arc::new(listener::service_contract::OnChainServiceContract::new(&client, &sync, service_contract_address, self_key_pair.clone())); + let service_contract = Arc::new(listener::service_contract::OnChainServiceContract::new(trusted_client, service_contract_address, self_key_pair.clone())); let contract_listener = listener::service_contract_listener::ServiceContractListener::new(listener::service_contract_listener::ServiceContractListenerParams { contract: service_contract, key_server: key_server.clone(), diff --git a/secret_store/src/listener/service_contract.rs b/secret_store/src/listener/service_contract.rs index 7330571f6..0104836ac 100644 --- a/secret_store/src/listener/service_contract.rs +++ b/secret_store/src/listener/service_contract.rs @@ -14,18 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::sync::{Arc, Weak}; +use std::sync::Arc; use futures::{future, Future}; use parking_lot::RwLock; use ethcore::filter::Filter; use ethcore::client::{Client, BlockChainClient, BlockId}; use ethkey::{Public, Signature, public_to_address}; -use ethsync::SyncProvider; use native_contracts::SecretStoreService; use hash::keccak; use bigint::hash::H256; use bigint::prelude::U256; use listener::service_contract_listener::ServiceTask; +use trusted_client::TrustedClient; use {ServerKeyId, NodeKeyPair, ContractAddress}; /// Name of the SecretStore contract in the registry. @@ -55,9 +55,7 @@ pub trait ServiceContract: Send + Sync { /// On-chain service contract. pub struct OnChainServiceContract { /// Blockchain client. - client: Weak, - /// Sync provider. - sync: Weak, + client: TrustedClient, /// This node key pair. self_key_pair: Arc, /// Contract addresss. @@ -82,14 +80,14 @@ struct PendingRequestsIterator { impl OnChainServiceContract { /// Create new on-chain service contract. - pub fn new(client: &Arc, sync: &Arc, address: ContractAddress, self_key_pair: Arc) -> Self { + pub fn new(client: TrustedClient, address: ContractAddress, self_key_pair: Arc) -> Self { let contract_addr = match address { - ContractAddress::Registry => client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) + ContractAddress::Registry => client.get().and_then(|c| c.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()) .map(|address| { trace!(target: "secretstore", "{}: installing service contract from address {}", self_key_pair.public(), address); address - }) + })) .unwrap_or_default(), ContractAddress::Address(ref address) => { trace!(target: "secretstore", "{}: installing service contract from address {}", @@ -99,8 +97,7 @@ impl OnChainServiceContract { }; OnChainServiceContract { - client: Arc::downgrade(client), - sync: Arc::downgrade(sync), + client: client, self_key_pair: self_key_pair, address: address, contract: RwLock::new(Arc::new(SecretStoreService::new(contract_addr))), @@ -111,12 +108,7 @@ impl OnChainServiceContract { impl ServiceContract for OnChainServiceContract { fn update(&self) { if let &ContractAddress::Registry = &self.address { - if let (Some(client), Some(sync)) = (self.client.upgrade(), self.sync.upgrade()) { - // do nothing until synced - if sync.status().is_syncing(client.queue_info()) { - return; - } - + if let Some(client) = self.client.get() { // update contract address from registry let service_contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()).unwrap_or_default(); if self.contract.read().address != service_contract_addr { @@ -130,14 +122,12 @@ impl ServiceContract for OnChainServiceContract { fn is_actual(&self) -> bool { self.contract.read().address != Default::default() - && match (self.client.upgrade(), self.sync.upgrade()) { - (Some(client), Some(sync)) => !sync.status().is_syncing(client.queue_info()), - _ => false, - } + && self.client.get().is_some() } fn read_logs(&self, first_block: H256, last_block: H256) -> Box>> { - let client = match self.client.upgrade() { + // already bound to specific blocks => do not care about trust here + let client = match self.client.get_untrusted() { Some(client) => client, None => { warn!(target: "secretstore", "{}: client is offline during read_pending_requests call", @@ -165,10 +155,10 @@ impl ServiceContract for OnChainServiceContract { } fn read_pending_requests(&self) -> Box> { - let client = match self.client.upgrade() { + let client = match self.client.get() { Some(client) => client, None => { - warn!(target: "secretstore", "{}: client is offline during read_pending_requests call", + warn!(target: "secretstore", "{}: client is untrusted during read_pending_requests call", self.self_key_pair.public()); return Box::new(::std::iter::empty()); }, @@ -205,9 +195,10 @@ impl ServiceContract for OnChainServiceContract { // it is not an error, because key could be generated even without contract return Ok(()); } - let client = match self.client.upgrade() { + + let client = match self.client.get() { Some(client) => client, - None => return Err("client is required to publish key".into()), + None => return Err("trusted client is required to publish key".into()), }; // only publish key if contract waits for publication diff --git a/secret_store/src/trusted_client.rs b/secret_store/src/trusted_client.rs new file mode 100644 index 000000000..688e4099d --- /dev/null +++ b/secret_store/src/trusted_client.rs @@ -0,0 +1,57 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::{Arc, Weak}; +use ethcore::client::{Client, BlockChainClient}; +use ethsync::SyncProvider; + +#[derive(Clone)] +/// 'Trusted' client weak reference. +pub struct TrustedClient { + /// Blockchain client. + client: Weak, + /// Sync provider. + sync: Weak, +} + +impl TrustedClient { + /// Create new trusted client. + pub fn new(client: Arc, sync: Arc) -> Self { + TrustedClient { + client: Arc::downgrade(&client), + sync: Arc::downgrade(&sync), + } + } + + /// Get 'trusted' `Client` reference only if it is synchronized && trusted. + pub fn get(&self) -> Option> { + self.client.upgrade() + .and_then(|client| self.sync.upgrade().map(|sync| (client, sync))) + .and_then(|(client, sync)| { + let is_synced = !sync.status().is_syncing(client.queue_info()); + let is_trusted = client.chain_info().security_level().is_full(); + match is_synced && is_trusted { + true => Some(client), + false => None, + } + }) + } + + /// Get untrusted `Client` reference. + pub fn get_untrusted(&self) -> Option> { + self.client.upgrade() + } +} From b7a744be5979a4f9ab77a4cc4c5926335e6eeedf Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 20 Dec 2017 17:05:32 +0300 Subject: [PATCH 29/42] SecretStore: require N confirmations in PendingRequestsIterator --- secret_store/src/listener/service_contract.rs | 57 +++++++++++-------- 1 file changed, 33 insertions(+), 24 deletions(-) diff --git a/secret_store/src/listener/service_contract.rs b/secret_store/src/listener/service_contract.rs index 0104836ac..bca739484 100644 --- a/secret_store/src/listener/service_contract.rs +++ b/secret_store/src/listener/service_contract.rs @@ -34,6 +34,9 @@ const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service"; /// Key server has been added to the set. const SERVER_KEY_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyRequested(bytes32,uint256)"; +/// Number of confirmations required before request can be processed. +const REQUEST_CONFIRMATIONS_REQUIRED: u64 = 3; + lazy_static! { static ref SERVER_KEY_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_REQUESTED_EVENT_NAME); } @@ -72,6 +75,8 @@ struct PendingRequestsIterator { contract: Arc, /// This node key pair. self_key_pair: Arc, + /// Block, this iterator is created for. + block: H256, /// Current request index. index: U256, /// Requests length. @@ -126,8 +131,7 @@ impl ServiceContract for OnChainServiceContract { } fn read_logs(&self, first_block: H256, last_block: H256) -> Box>> { - // already bound to specific blocks => do not care about trust here - let client = match self.client.get_untrusted() { + let client = match self.client.get() { Some(client) => client, None => { warn!(target: "secretstore", "{}: client is offline during read_pending_requests call", @@ -165,27 +169,32 @@ impl ServiceContract for OnChainServiceContract { }; let contract = self.contract.read(); - let length = match contract.address == Default::default() { - true => 0.into(), - false => { - let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d)); - contract.server_key_generation_requests_count(&do_call).wait() - .map_err(|error| { - warn!(target: "secretstore", "{}: call to server_key_generation_requests_count failed: {}", - self.self_key_pair.public(), error); - error - }) - .unwrap_or_default() - }, - }; - - Box::new(PendingRequestsIterator { - client: client, - contract: contract.clone(), - self_key_pair: self.self_key_pair.clone(), - index: 0.into(), - length: length, - }) + match contract.address == Default::default() { + true => Box::new(::std::iter::empty()), + false => client.block_number(BlockId::Latest) + .and_then(|b| b.checked_sub(REQUEST_CONFIRMATIONS_REQUIRED)) + .and_then(|b| client.block_hash(BlockId::Number(b))) + .and_then(|b| { + let do_call = |a, d| future::done(client.call_contract(BlockId::Hash(b.clone()), a, d)); + contract.server_key_generation_requests_count(&do_call).wait() + .map_err(|error| { + warn!(target: "secretstore", "{}: call to server_key_generation_requests_count failed: {}", + self.self_key_pair.public(), error); + error + }) + .map(|l| (b, l)) + .ok() + }) + .map(|(b, l)| Box::new(PendingRequestsIterator { + client: client, + contract: contract.clone(), + self_key_pair: self.self_key_pair.clone(), + block: b, + index: 0.into(), + length: l, + }) as Box>) + .unwrap_or_else(|| Box::new(::std::iter::empty())) + } } fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> { @@ -243,7 +252,7 @@ impl Iterator for PendingRequestsIterator { self.index = self.index + 1.into(); let self_address = public_to_address(self.self_key_pair.public()); - let do_call = |a, d| future::done(self.client.call_contract(BlockId::Latest, a, d)); + let do_call = |a, d| future::done(self.client.call_contract(BlockId::Hash(self.block.clone()), a, d)); self.contract.get_server_key_id(&do_call, index).wait() .and_then(|server_key_id| self.contract.get_server_key_threshold(&do_call, server_key_id.clone()).wait() From 6efca8860a57fd9e305269e44bb791f9bddea2e6 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 20 Dec 2017 17:22:28 +0300 Subject: [PATCH 30/42] SecretStore: get rid of read_logs in ServiceContract --- secret_store/src/listener/service_contract.rs | 42 --------- .../src/listener/service_contract_listener.rs | 88 +------------------ 2 files changed, 1 insertion(+), 129 deletions(-) diff --git a/secret_store/src/listener/service_contract.rs b/secret_store/src/listener/service_contract.rs index bca739484..05aa7681a 100644 --- a/secret_store/src/listener/service_contract.rs +++ b/secret_store/src/listener/service_contract.rs @@ -17,7 +17,6 @@ use std::sync::Arc; use futures::{future, Future}; use parking_lot::RwLock; -use ethcore::filter::Filter; use ethcore::client::{Client, BlockChainClient, BlockId}; use ethkey::{Public, Signature, public_to_address}; use native_contracts::SecretStoreService; @@ -31,24 +30,15 @@ use {ServerKeyId, NodeKeyPair, ContractAddress}; /// Name of the SecretStore contract in the registry. const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service"; -/// Key server has been added to the set. -const SERVER_KEY_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyRequested(bytes32,uint256)"; - /// Number of confirmations required before request can be processed. const REQUEST_CONFIRMATIONS_REQUIRED: u64 = 3; -lazy_static! { - static ref SERVER_KEY_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_REQUESTED_EVENT_NAME); -} - /// Service contract trait. pub trait ServiceContract: Send + Sync { /// Update contract. fn update(&self); /// Is contract installed && up-to-date (i.e. chain is synced)? fn is_actual(&self) -> bool; - /// Read contract logs from given blocks. Returns topics of every entry. - fn read_logs(&self, first_block: H256, last_block: H256) -> Box>>; /// Publish generated key. fn read_pending_requests(&self) -> Box>; /// Publish server key. @@ -130,34 +120,6 @@ impl ServiceContract for OnChainServiceContract { && self.client.get().is_some() } - fn read_logs(&self, first_block: H256, last_block: H256) -> Box>> { - let client = match self.client.get() { - Some(client) => client, - None => { - warn!(target: "secretstore", "{}: client is offline during read_pending_requests call", - self.self_key_pair.public()); - return Box::new(::std::iter::empty()); - }, - }; - - // read server key generation requests - let contract_address = self.contract.read().address.clone(); - let request_logs = client.logs(Filter { - from_block: BlockId::Hash(first_block), - to_block: BlockId::Hash(last_block), - address: Some(vec![contract_address]), - topics: vec![ - Some(vec![*SERVER_KEY_REQUESTED_EVENT_NAME_HASH]), - None, - None, - None, - ], - limit: None, - }); - - Box::new(request_logs.into_iter().map(|log| log.entry.topics)) - } - fn read_pending_requests(&self) -> Box> { let client = match self.client.get() { Some(client) => client, @@ -296,10 +258,6 @@ pub mod tests { self.is_actual } - fn read_logs(&self, _first_block: H256, _last_block: H256) -> Box>> { - Box::new(self.logs.clone().into_iter()) - } - fn read_pending_requests(&self) -> Box> { Box::new(self.pending_requests.clone().into_iter()) } diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index d9567125c..9637756dc 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -147,31 +147,6 @@ impl ServiceContractListener { contract } - /// Process incoming events of service contract. - fn process_service_contract_events(&self, first: H256, last: H256) { - self.data.tasks_queue.push(self.data.contract.read_logs(first, last) - .filter_map(|topics| match topics.len() { - // when key is already generated && we have this key - 3 if self.data.key_storage.get(&topics[1]).map(|k| k.is_some()).unwrap_or_default() => { - Some(ServiceTask::RestoreServerKey( - topics[1], - )) - } - // when key is not yet generated && this node should be master of this key generation session - 3 if is_processed_by_this_key_server(&*self.data.key_server_set, &*self.data.self_key_pair, &topics[1]) => { - Some(ServiceTask::GenerateServerKey( - topics[1], - topics[2], - )) - }, - 3 => None, - l @ _ => { - warn!(target: "secretstore", "Ignoring ServerKeyRequested event with wrong number of params {}", l); - None - }, - })); - } - /// Service thread procedure. fn run_service_thread(data: Arc) { loop { @@ -326,23 +301,14 @@ impl Drop for ServiceContractListener { impl ChainNotify for ServiceContractListener { fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { - let enacted_len = enacted.len(); - if enacted_len == 0 { - return; - } - self.data.contract.update(); if !self.data.contract.is_actual() { return; } - let reason = "enacted.len() != 0; qed"; - self.process_service_contract_events( - enacted.first().expect(reason).clone(), - enacted.last().expect(reason).clone()); - // schedule retry if received enough blocks since last retry // it maybe inaccurate when switching syncing/synced states, but that's ok + let enacted_len = enacted.len(); if self.data.last_retry.fetch_add(enacted_len, Ordering::Relaxed) >= RETRY_INTERVAL_BLOCKS { self.data.tasks_queue.push(::std::iter::once(ServiceTask::Retry)); self.data.last_retry.store(0, Ordering::Relaxed); @@ -623,58 +589,6 @@ mod tests { &"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true); } - #[test] - fn no_tasks_scheduled_when_no_contract_events() { - let listener = make_service_contract_listener(None, None, None); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); - listener.process_service_contract_events(Default::default(), Default::default()); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); - } - - #[test] - fn server_key_generation_is_scheduled_when_requested_key_is_unknnown() { - let mut contract = DummyServiceContract::default(); - contract.logs.push(vec![Default::default(), Default::default(), Default::default()]); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); - listener.process_service_contract_events(Default::default(), Default::default()); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 2); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().pop_back(), Some(ServiceTask::GenerateServerKey(Default::default(), Default::default()))); - } - - #[test] - fn no_new_tasks_scheduled_when_requested_key_is_unknown_and_request_belongs_to_other_key_server() { - let server_key_id = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap(); - let mut contract = DummyServiceContract::default(); - contract.logs.push(vec![Default::default(), server_key_id, Default::default()]); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); - listener.process_service_contract_events(Default::default(), Default::default()); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); - } - - #[test] - fn server_key_restore_is_scheduled_when_requested_key_is_knnown() { - let mut contract = DummyServiceContract::default(); - contract.logs.push(vec![Default::default(), Default::default(), Default::default()]); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); - listener.data.key_storage.insert(Default::default(), Default::default()).unwrap(); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); - listener.process_service_contract_events(Default::default(), Default::default()); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 2); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().pop_back(), Some(ServiceTask::RestoreServerKey(Default::default()))); - } - - #[test] - fn no_new_tasks_scheduled_when_wrong_number_of_topics_in_log() { - let mut contract = DummyServiceContract::default(); - contract.logs.push(vec![Default::default(), Default::default()]); - let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); - listener.process_service_contract_events(Default::default(), Default::default()); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); - } - #[test] fn generation_session_is_created_when_processing_generate_server_key_task() { let key_server = Arc::new(DummyKeyServer::default()); From b10d56738636fc7a57e94736a5484bd0a1531515 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 20 Dec 2017 19:11:37 +0300 Subject: [PATCH 31/42] SecretStore: ClusterSession::wait_session helper --- .../key_version_negotiation_session.rs | 8 +------ .../servers_set_change_session.rs | 8 +------ .../client_sessions/decryption_session.rs | 9 +------- .../client_sessions/encryption_session.rs | 12 +--------- .../client_sessions/generation_session.rs | 13 ++--------- .../client_sessions/signing_session.rs | 9 +------- .../key_server_cluster/cluster_sessions.rs | 22 +++++++++++++++++-- 7 files changed, 27 insertions(+), 54 deletions(-) diff --git a/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs b/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs index 105df299c..3ccfe6a58 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs @@ -198,13 +198,7 @@ impl SessionImpl where T: SessionTransport { /// Wait for session completion. pub fn wait(&self) -> Result<(H256, NodeId), Error> { - let mut data = self.data.lock(); - if !data.result.is_some() { - self.core.completed.wait(&mut data); - } - - data.result.clone() - .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") + Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone()) } /// Initialize session. diff --git a/secret_store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs b/secret_store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs index 17a1468a2..93bdc27e6 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs @@ -207,13 +207,7 @@ impl SessionImpl { /// Wait for session completion. pub fn wait(&self) -> Result<(), Error> { - let mut data = self.data.lock(); - if !data.result.is_some() { - self.core.completed.wait(&mut data); - } - - data.result.clone() - .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") + Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone()) } /// Initialize servers set change session on master node. diff --git a/secret_store/src/key_server_cluster/client_sessions/decryption_session.rs b/secret_store/src/key_server_cluster/client_sessions/decryption_session.rs index 58ae20661..da2048ccf 100644 --- a/secret_store/src/key_server_cluster/client_sessions/decryption_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/decryption_session.rs @@ -202,14 +202,7 @@ impl SessionImpl { /// Wait for session completion. pub fn wait(&self) -> Result { - let mut data = self.data.lock(); - if !data.result.is_some() { - self.core.completed.wait(&mut data); - } - - data.result.as_ref() - .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") - .clone() + Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone()) } /// Delegate session to other node. diff --git a/secret_store/src/key_server_cluster/client_sessions/encryption_session.rs b/secret_store/src/key_server_cluster/client_sessions/encryption_session.rs index 1cc6ad9f3..ff173a968 100644 --- a/secret_store/src/key_server_cluster/client_sessions/encryption_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/encryption_session.rs @@ -132,17 +132,7 @@ impl SessionImpl { /// Wait for session completion. pub fn wait(&self, timeout: Option) -> Result<(), Error> { - let mut data = self.data.lock(); - if !data.result.is_some() { - match timeout { - None => self.completed.wait(&mut data), - Some(timeout) => { self.completed.wait_for(&mut data, timeout); }, - } - } - - data.result.as_ref() - .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") - .clone() + Self::wait_session(&self.completed, &self.data, timeout, |data| data.result.clone()) } diff --git a/secret_store/src/key_server_cluster/client_sessions/generation_session.rs b/secret_store/src/key_server_cluster/client_sessions/generation_session.rs index 84c1b43e5..92dee3709 100644 --- a/secret_store/src/key_server_cluster/client_sessions/generation_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/generation_session.rs @@ -223,17 +223,8 @@ impl SessionImpl { /// Wait for session completion. pub fn wait(&self, timeout: Option) -> Result { - let mut data = self.data.lock(); - if !data.joint_public_and_secret.is_some() { - match timeout { - None => self.completed.wait(&mut data), - Some(timeout) => { self.completed.wait_for(&mut data, timeout); }, - } - } - - data.joint_public_and_secret.clone() - .expect("checked above or waited for completed; completed is only signaled when joint_public.is_some(); qed") - .map(|p| p.0) + Self::wait_session(&self.completed, &self.data, timeout, |data| data.joint_public_and_secret.clone() + .map(|r| r.map(|r| r.0.clone()))) } /// Get generated public and secret (if any). diff --git a/secret_store/src/key_server_cluster/client_sessions/signing_session.rs b/secret_store/src/key_server_cluster/client_sessions/signing_session.rs index a0895ceb0..cd72cc8e7 100644 --- a/secret_store/src/key_server_cluster/client_sessions/signing_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/signing_session.rs @@ -207,14 +207,7 @@ impl SessionImpl { /// Wait for session completion. pub fn wait(&self) -> Result<(Secret, Secret), Error> { - let mut data = self.data.lock(); - if !data.result.is_some() { - self.core.completed.wait(&mut data); - } - - data.result.as_ref() - .expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed") - .clone() + Self::wait_session(&self.core.completed, &self.data, None, |data| data.result.clone()) } /// Delegate session to other node. diff --git a/secret_store/src/key_server_cluster/cluster_sessions.rs b/secret_store/src/key_server_cluster/cluster_sessions.rs index cd07045df..3cba0e14c 100644 --- a/secret_store/src/key_server_cluster/cluster_sessions.rs +++ b/secret_store/src/key_server_cluster/cluster_sessions.rs @@ -18,7 +18,7 @@ use std::time; use std::sync::{Arc, Weak}; use std::sync::atomic::AtomicBool; use std::collections::{VecDeque, BTreeMap}; -use parking_lot::{Mutex, RwLock}; +use parking_lot::{Mutex, RwLock, Condvar}; use bigint::hash::H256; use ethkey::{Secret, Signature}; use key_server_cluster::{Error, NodeId, SessionId}; @@ -79,6 +79,25 @@ pub trait ClusterSession { fn on_session_error(&self, sender: &NodeId, error: Error); /// Process session message. fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error>; + + /// 'Wait for session completion' helper. + fn wait_session Option>>(completion_event: &Condvar, session_data: &Mutex, timeout: Option, result_reader: F) -> Result { + let mut locked_data = session_data.lock(); + match result_reader(&locked_data) { + Some(result) => result, + None => { + match timeout { + None => completion_event.wait(&mut locked_data), + Some(timeout) => { + completion_event.wait_for(&mut locked_data, timeout); + }, + } + + result_reader(&locked_data) + .expect("waited for completion; completion is only signaled when result.is_some(); qed") + }, + } + } } /// Administrative session. @@ -529,7 +548,6 @@ pub fn create_cluster_view(data: &Arc, requires_all_connections: bo Ok(Arc::new(ClusterView::new(data.clone(), connected_nodes))) } - #[cfg(test)] mod tests { use std::sync::Arc; From ee1ce425468db8020c95ea0eba6b23a9588894e2 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 20 Dec 2017 19:27:47 +0300 Subject: [PATCH 32/42] SecretStore: extracted TasksQueue to separate file --- secret_store/src/listener/mod.rs | 1 + .../src/listener/service_contract_listener.rs | 58 +++-------------- secret_store/src/listener/tasks_queue.rs | 62 +++++++++++++++++++ 3 files changed, 70 insertions(+), 51 deletions(-) create mode 100644 secret_store/src/listener/tasks_queue.rs diff --git a/secret_store/src/listener/mod.rs b/secret_store/src/listener/mod.rs index 403eaf549..a29ee5cfd 100644 --- a/secret_store/src/listener/mod.rs +++ b/secret_store/src/listener/mod.rs @@ -17,6 +17,7 @@ pub mod http_listener; pub mod service_contract; pub mod service_contract_listener; +mod tasks_queue; use std::collections::BTreeSet; use std::sync::Arc; diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index 9637756dc..a6327ef3c 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::collections::{VecDeque, HashSet}; +use std::collections::HashSet; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread; -use parking_lot::{Mutex, Condvar}; +use parking_lot::Mutex; use ethcore::client::ChainNotify; use ethkey::{Random, Generator, Public, sign}; use bytes::Bytes; @@ -29,6 +29,7 @@ use key_server_cluster::{ClusterClient, ClusterSessionsListener, ClusterSession} use key_server_cluster::generation_session::SessionImpl as GenerationSession; use key_storage::KeyStorage; use listener::service_contract::ServiceContract; +use listener::tasks_queue::TasksQueue; use {ServerKeyId, NodeKeyPair, KeyServer}; /// Retry interval (in blocks). Every RETRY_INTERVAL_BLOCKS blocks each KeyServer reads pending requests from @@ -75,7 +76,7 @@ struct ServiceContractListenerData { /// Retry-related data. pub retry_data: Mutex, /// Service tasks queue. - pub tasks_queue: Arc, + pub tasks_queue: Arc>, /// Service contract. pub contract: Arc, /// Key server reference. @@ -96,14 +97,6 @@ struct ServiceContractRetryData { pub generated_keys: HashSet, } -/// Service tasks queue. -struct TasksQueue { - /// Service event. - service_event: Condvar, - /// Service tasks queue. - service_tasks: Mutex>, -} - /// Service task. #[derive(Debug, Clone, PartialEq)] pub enum ServiceTask { @@ -130,7 +123,7 @@ impl ServiceContractListener { key_server_set: params.key_server_set, key_storage: params.key_storage, }); - data.tasks_queue.push(::std::iter::once(ServiceTask::Retry)); + data.tasks_queue.push(ServiceTask::Retry); // we are not starting thread when in test mode let service_handle = if cfg!(test) { @@ -292,7 +285,7 @@ impl ServiceContractListener { impl Drop for ServiceContractListener { fn drop(&mut self) { if let Some(service_handle) = self.service_handle.take() { - self.data.tasks_queue.shutdown(); + self.data.tasks_queue.push_front(ServiceTask::Shutdown); // ignore error as we are already closing let _ = service_handle.join(); } @@ -310,7 +303,7 @@ impl ChainNotify for ServiceContractListener { // it maybe inaccurate when switching syncing/synced states, but that's ok let enacted_len = enacted.len(); if self.data.last_retry.fetch_add(enacted_len, Ordering::Relaxed) >= RETRY_INTERVAL_BLOCKS { - self.data.tasks_queue.push(::std::iter::once(ServiceTask::Retry)); + self.data.tasks_queue.push(ServiceTask::Retry); self.data.last_retry.store(0, Ordering::Relaxed); } } @@ -337,43 +330,6 @@ impl ClusterSessionsListener for ServiceContractListener { } } -impl TasksQueue { - /// Create new tasks queue. - pub fn new() -> Self { - TasksQueue { - service_event: Condvar::new(), - service_tasks: Mutex::new(VecDeque::new()), - } - } - - /// Shutdown tasks queue. - pub fn shutdown(&self) { - let mut service_tasks = self.service_tasks.lock(); - service_tasks.push_front(ServiceTask::Shutdown); - self.service_event.notify_all(); - } - - //// Push new tasks to the queue. - pub fn push(&self, tasks: I) where I: Iterator { - let mut service_tasks = self.service_tasks.lock(); - service_tasks.extend(tasks); - if !service_tasks.is_empty() { - self.service_event.notify_all(); - } - } - - /// Wait for new task. - pub fn wait(&self) -> ServiceTask { - let mut service_tasks = self.service_tasks.lock(); - if service_tasks.is_empty() { - self.service_event.wait(&mut service_tasks); - } - - service_tasks.pop_front() - .expect("service_event is only fired when there are new tasks or is_shutdown == true; is_shutdown == false; qed") - } -} - /// Returns true when session, related to `server_key_id` must be started on this KeyServer. fn is_processed_by_this_key_server(key_server_set: &KeyServerSet, self_key_pair: &NodeKeyPair, server_key_id: &H256) -> bool { let servers = key_server_set.get(); diff --git a/secret_store/src/listener/tasks_queue.rs b/secret_store/src/listener/tasks_queue.rs new file mode 100644 index 000000000..f313c8431 --- /dev/null +++ b/secret_store/src/listener/tasks_queue.rs @@ -0,0 +1,62 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::VecDeque; +use parking_lot::{Mutex, Condvar}; + +#[derive(Default)] +/// Service tasks queue. +pub struct TasksQueue { + /// Service event. + service_event: Condvar, + /// Service tasks queue. + service_tasks: Mutex>, +} + +impl TasksQueue { + /// Create new tasks queue. + pub fn new() -> Self { + TasksQueue { + service_event: Condvar::new(), + service_tasks: Mutex::new(VecDeque::new()), + } + } + + /// Push task to the front of queue. + pub fn push_front(&self, task: Task) { + let mut service_tasks = self.service_tasks.lock(); + service_tasks.push_front(task); + self.service_event.notify_all(); + } + + /// Push task to the back of queue. + pub fn push(&self, task: Task) { + let mut service_tasks = self.service_tasks.lock(); + service_tasks.push_back(task); + self.service_event.notify_all(); + } + + /// Wait for new task. + pub fn wait(&self) -> Task { + let mut service_tasks = self.service_tasks.lock(); + if service_tasks.is_empty() { + self.service_event.wait(&mut service_tasks); + } + + service_tasks.pop_front() + .expect("service_event is only fired when there are new tasks or is_shutdown == true; is_shutdown == false; qed") + } +} From ff094e0a032aaf99e3338a41276a919de11bc1ba Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 21 Dec 2017 11:44:55 +0300 Subject: [PATCH 33/42] Revert "SecretStore: get rid of read_logs in ServiceContract" This reverts commit 6efca8860a57fd9e305269e44bb791f9bddea2e6. --- secret_store/src/listener/service_contract.rs | 42 +++++++++ .../src/listener/service_contract_listener.rs | 88 ++++++++++++++++++- 2 files changed, 129 insertions(+), 1 deletion(-) diff --git a/secret_store/src/listener/service_contract.rs b/secret_store/src/listener/service_contract.rs index 05aa7681a..bca739484 100644 --- a/secret_store/src/listener/service_contract.rs +++ b/secret_store/src/listener/service_contract.rs @@ -17,6 +17,7 @@ use std::sync::Arc; use futures::{future, Future}; use parking_lot::RwLock; +use ethcore::filter::Filter; use ethcore::client::{Client, BlockChainClient, BlockId}; use ethkey::{Public, Signature, public_to_address}; use native_contracts::SecretStoreService; @@ -30,15 +31,24 @@ use {ServerKeyId, NodeKeyPair, ContractAddress}; /// Name of the SecretStore contract in the registry. const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service"; +/// Key server has been added to the set. +const SERVER_KEY_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyRequested(bytes32,uint256)"; + /// Number of confirmations required before request can be processed. const REQUEST_CONFIRMATIONS_REQUIRED: u64 = 3; +lazy_static! { + static ref SERVER_KEY_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_REQUESTED_EVENT_NAME); +} + /// Service contract trait. pub trait ServiceContract: Send + Sync { /// Update contract. fn update(&self); /// Is contract installed && up-to-date (i.e. chain is synced)? fn is_actual(&self) -> bool; + /// Read contract logs from given blocks. Returns topics of every entry. + fn read_logs(&self, first_block: H256, last_block: H256) -> Box>>; /// Publish generated key. fn read_pending_requests(&self) -> Box>; /// Publish server key. @@ -120,6 +130,34 @@ impl ServiceContract for OnChainServiceContract { && self.client.get().is_some() } + fn read_logs(&self, first_block: H256, last_block: H256) -> Box>> { + let client = match self.client.get() { + Some(client) => client, + None => { + warn!(target: "secretstore", "{}: client is offline during read_pending_requests call", + self.self_key_pair.public()); + return Box::new(::std::iter::empty()); + }, + }; + + // read server key generation requests + let contract_address = self.contract.read().address.clone(); + let request_logs = client.logs(Filter { + from_block: BlockId::Hash(first_block), + to_block: BlockId::Hash(last_block), + address: Some(vec![contract_address]), + topics: vec![ + Some(vec![*SERVER_KEY_REQUESTED_EVENT_NAME_HASH]), + None, + None, + None, + ], + limit: None, + }); + + Box::new(request_logs.into_iter().map(|log| log.entry.topics)) + } + fn read_pending_requests(&self) -> Box> { let client = match self.client.get() { Some(client) => client, @@ -258,6 +296,10 @@ pub mod tests { self.is_actual } + fn read_logs(&self, _first_block: H256, _last_block: H256) -> Box>> { + Box::new(self.logs.clone().into_iter()) + } + fn read_pending_requests(&self) -> Box> { Box::new(self.pending_requests.clone().into_iter()) } diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index a6327ef3c..ebf9aff58 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -140,6 +140,31 @@ impl ServiceContractListener { contract } + /// Process incoming events of service contract. + fn process_service_contract_events(&self, first: H256, last: H256) { + self.data.tasks_queue.push(self.data.contract.read_logs(first, last) + .filter_map(|topics| match topics.len() { + // when key is already generated && we have this key + 3 if self.data.key_storage.get(&topics[1]).map(|k| k.is_some()).unwrap_or_default() => { + Some(ServiceTask::RestoreServerKey( + topics[1], + )) + } + // when key is not yet generated && this node should be master of this key generation session + 3 if is_processed_by_this_key_server(&*self.data.key_server_set, &*self.data.self_key_pair, &topics[1]) => { + Some(ServiceTask::GenerateServerKey( + topics[1], + topics[2], + )) + }, + 3 => None, + l @ _ => { + warn!(target: "secretstore", "Ignoring ServerKeyRequested event with wrong number of params {}", l); + None + }, + })); + } + /// Service thread procedure. fn run_service_thread(data: Arc) { loop { @@ -294,14 +319,23 @@ impl Drop for ServiceContractListener { impl ChainNotify for ServiceContractListener { fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { + let enacted_len = enacted.len(); + if enacted_len == 0 { + return; + } + self.data.contract.update(); if !self.data.contract.is_actual() { return; } + let reason = "enacted.len() != 0; qed"; + self.process_service_contract_events( + enacted.first().expect(reason).clone(), + enacted.last().expect(reason).clone()); + // schedule retry if received enough blocks since last retry // it maybe inaccurate when switching syncing/synced states, but that's ok - let enacted_len = enacted.len(); if self.data.last_retry.fetch_add(enacted_len, Ordering::Relaxed) >= RETRY_INTERVAL_BLOCKS { self.data.tasks_queue.push(ServiceTask::Retry); self.data.last_retry.store(0, Ordering::Relaxed); @@ -545,6 +579,58 @@ mod tests { &"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap()), true); } + #[test] + fn no_tasks_scheduled_when_no_contract_events() { + let listener = make_service_contract_listener(None, None, None); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + listener.process_service_contract_events(Default::default(), Default::default()); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + } + + #[test] + fn server_key_generation_is_scheduled_when_requested_key_is_unknnown() { + let mut contract = DummyServiceContract::default(); + contract.logs.push(vec![Default::default(), Default::default(), Default::default()]); + let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + listener.process_service_contract_events(Default::default(), Default::default()); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 2); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().pop_back(), Some(ServiceTask::GenerateServerKey(Default::default(), Default::default()))); + } + + #[test] + fn no_new_tasks_scheduled_when_requested_key_is_unknown_and_request_belongs_to_other_key_server() { + let server_key_id = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse().unwrap(); + let mut contract = DummyServiceContract::default(); + contract.logs.push(vec![Default::default(), server_key_id, Default::default()]); + let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + listener.process_service_contract_events(Default::default(), Default::default()); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + } + + #[test] + fn server_key_restore_is_scheduled_when_requested_key_is_knnown() { + let mut contract = DummyServiceContract::default(); + contract.logs.push(vec![Default::default(), Default::default(), Default::default()]); + let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); + listener.data.key_storage.insert(Default::default(), Default::default()).unwrap(); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + listener.process_service_contract_events(Default::default(), Default::default()); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 2); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().pop_back(), Some(ServiceTask::RestoreServerKey(Default::default()))); + } + + #[test] + fn no_new_tasks_scheduled_when_wrong_number_of_topics_in_log() { + let mut contract = DummyServiceContract::default(); + contract.logs.push(vec![Default::default(), Default::default()]); + let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + listener.process_service_contract_events(Default::default(), Default::default()); + assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + } + #[test] fn generation_session_is_created_when_processing_generate_server_key_task() { let key_server = Arc::new(DummyKeyServer::default()); From 9104d4673c16ae9fe83827562dc0a7aeb821b12b Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 21 Dec 2017 16:19:15 +0300 Subject: [PATCH 34/42] SecretStore: reorganize service contract read --- secret_store/src/listener/service_contract.rs | 114 ++++++++++++------ .../src/listener/service_contract_listener.rs | 46 ++++--- secret_store/src/listener/tasks_queue.rs | 24 +++- 3 files changed, 117 insertions(+), 67 deletions(-) diff --git a/secret_store/src/listener/service_contract.rs b/secret_store/src/listener/service_contract.rs index bca739484..c49bf49b7 100644 --- a/secret_store/src/listener/service_contract.rs +++ b/secret_store/src/listener/service_contract.rs @@ -43,12 +43,10 @@ lazy_static! { /// Service contract trait. pub trait ServiceContract: Send + Sync { - /// Update contract. - fn update(&self); - /// Is contract installed && up-to-date (i.e. chain is synced)? - fn is_actual(&self) -> bool; - /// Read contract logs from given blocks. Returns topics of every entry. - fn read_logs(&self, first_block: H256, last_block: H256) -> Box>>; + /// Update contract when new blocks are enacted. Returns true if contract is installed && up-to-date (i.e. chain is synced). + fn update(&self) -> bool; + /// Read recent contract logs. Returns topics of every entry. + fn read_logs(&self) -> Box>>; /// Publish generated key. fn read_pending_requests(&self) -> Box>; /// Publish server key. @@ -64,7 +62,15 @@ pub struct OnChainServiceContract { /// Contract addresss. address: ContractAddress, /// Contract. - contract: RwLock>, + data: RwLock, +} + +/// On-chain service contract data. +struct SecretStoreServiceData { + /// Contract. + pub contract: Arc, + /// Last block we have read logs from. + pub last_log_block: Option, } /// Pending requests iterator. @@ -105,47 +111,72 @@ impl OnChainServiceContract { client: client, self_key_pair: self_key_pair, address: address, - contract: RwLock::new(Arc::new(SecretStoreService::new(contract_addr))), + data: RwLock::new(SecretStoreServiceData { + contract: Arc::new(SecretStoreService::new(contract_addr)), + last_log_block: None, + }), } } } impl ServiceContract for OnChainServiceContract { - fn update(&self) { + fn update(&self) -> bool { + // TODO [Sec]: registry_address currently reads from BlockId::Latest, instead of + // from block with REQUEST_CONFIRMATIONS_REQUIRED confirmations if let &ContractAddress::Registry = &self.address { if let Some(client) = self.client.get() { // update contract address from registry let service_contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()).unwrap_or_default(); - if self.contract.read().address != service_contract_addr { + if self.data.read().contract.address != service_contract_addr { trace!(target: "secretstore", "{}: installing service contract from address {}", self.self_key_pair.public(), service_contract_addr); - *self.contract.write() = Arc::new(SecretStoreService::new(service_contract_addr)); + self.data.write().contract = Arc::new(SecretStoreService::new(service_contract_addr)); } } } - } - fn is_actual(&self) -> bool { - self.contract.read().address != Default::default() + self.data.read().contract.address != Default::default() && self.client.get().is_some() } - fn read_logs(&self, first_block: H256, last_block: H256) -> Box>> { + fn read_logs(&self) -> Box>> { let client = match self.client.get() { Some(client) => client, None => { - warn!(target: "secretstore", "{}: client is offline during read_pending_requests call", + warn!(target: "secretstore", "{}: client is offline during read_logs call", self.self_key_pair.public()); return Box::new(::std::iter::empty()); }, }; + // prepare range of blocks to read logs from + let (address, first_block, last_block) = { + let mut data = self.data.write(); + let address = data.contract.address.clone(); + let confirmed_block = match get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED) { + Some(confirmed_block) => confirmed_block, + None => return Box::new(::std::iter::empty()), // no block with enough confirmations + }; + let first_block = match data.last_log_block.take().and_then(|b| client.tree_route(&b, &confirmed_block)) { + // if we have a route from last_log_block to confirmed_block => search for logs on this route + // + // potentially this could lead us to reading same logs twice when reorganizing to the fork, which + // already has been canonical previosuly + // the worst thing that can happen in this case is spending some time reading unneeded data from SS db + Some(ref route) if route.index < route.blocks.len() => route.blocks[route.index], + // else we care only about confirmed block + _ => confirmed_block.clone(), + }; + + data.last_log_block = Some(confirmed_block.clone()); + (address, first_block, confirmed_block) + }; + // read server key generation requests - let contract_address = self.contract.read().address.clone(); let request_logs = client.logs(Filter { - from_block: BlockId::Hash(first_block), - to_block: BlockId::Hash(last_block), - address: Some(vec![contract_address]), + from_block: BlockId::Hash(first_block.clone()), + to_block: BlockId::Hash(last_block.clone()), + address: Some(vec![address]), topics: vec![ Some(vec![*SERVER_KEY_REQUESTED_EVENT_NAME_HASH]), None, @@ -155,6 +186,9 @@ impl ServiceContract for OnChainServiceContract { limit: None, }); + trace!(target: "secretstore", "{}: read {} events from service contract in blocks {}..{}", + self.self_key_pair.public(), request_logs.len(), first_block, last_block); + Box::new(request_logs.into_iter().map(|log| log.entry.topics)) } @@ -168,15 +202,15 @@ impl ServiceContract for OnChainServiceContract { }, }; - let contract = self.contract.read(); - match contract.address == Default::default() { + // we only need requests that are here for more than REQUEST_CONFIRMATIONS_REQUIRED blocks + // => we're reading from Latest - (REQUEST_CONFIRMATIONS_REQUIRED + 1) block + let data = self.data.read(); + match data.contract.address == Default::default() { true => Box::new(::std::iter::empty()), - false => client.block_number(BlockId::Latest) - .and_then(|b| b.checked_sub(REQUEST_CONFIRMATIONS_REQUIRED)) - .and_then(|b| client.block_hash(BlockId::Number(b))) + false => get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED + 1) .and_then(|b| { let do_call = |a, d| future::done(client.call_contract(BlockId::Hash(b.clone()), a, d)); - contract.server_key_generation_requests_count(&do_call).wait() + data.contract.server_key_generation_requests_count(&do_call).wait() .map_err(|error| { warn!(target: "secretstore", "{}: call to server_key_generation_requests_count failed: {}", self.self_key_pair.public(), error); @@ -187,7 +221,7 @@ impl ServiceContract for OnChainServiceContract { }) .map(|(b, l)| Box::new(PendingRequestsIterator { client: client, - contract: contract.clone(), + contract: data.contract.clone(), self_key_pair: self.self_key_pair.clone(), block: b, index: 0.into(), @@ -199,8 +233,8 @@ impl ServiceContract for OnChainServiceContract { fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> { // only publish if contract address is set && client is online - let contract = self.contract.read(); - if contract.address == Default::default() { + let data = self.data.read(); + if data.contract.address == Default::default() { // it is not an error, because key could be generated even without contract return Ok(()); } @@ -215,7 +249,7 @@ impl ServiceContract for OnChainServiceContract { // or key has been requested using HTTP API let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d)); let self_address = public_to_address(self.self_key_pair.public()); - if contract.get_server_key_confirmation_status(&do_call, server_key_id.clone(), self_address).wait().unwrap_or(false) { + if data.contract.get_server_key_confirmation_status(&do_call, server_key_id.clone(), self_address).wait().unwrap_or(false) { return Ok(()); } @@ -223,7 +257,7 @@ impl ServiceContract for OnChainServiceContract { let server_key_hash = keccak(server_key); let signed_server_key = self.self_key_pair.sign(&server_key_hash).map_err(|e| format!("{}", e))?; let signed_server_key: Signature = signed_server_key.into_electrum().into(); - let transaction_data = contract.encode_server_key_generated_input(server_key_id.clone(), + let transaction_data = data.contract.encode_server_key_generated_input(server_key_id.clone(), server_key.to_vec(), signed_server_key.v(), signed_server_key.r().into(), @@ -232,7 +266,7 @@ impl ServiceContract for OnChainServiceContract { // send transaction client.transact_contract( - contract.address.clone(), + data.contract.address.clone(), transaction_data ).map_err(|e| format!("{}", e))?; @@ -271,6 +305,13 @@ impl Iterator for PendingRequestsIterator { } } +/// Get hash of the last block with at least n confirmations. +fn get_confirmed_block_hash(client: &Client, confirmations: u64) -> Option { + client.block_number(BlockId::Latest) + .and_then(|b| b.checked_sub(confirmations)) + .and_then(|b| client.block_hash(BlockId::Number(b))) +} + #[cfg(test)] pub mod tests { use parking_lot::Mutex; @@ -289,14 +330,11 @@ pub mod tests { } impl ServiceContract for DummyServiceContract { - fn update(&self) { + fn update(&self) -> bool { + true } - fn is_actual(&self) -> bool { - self.is_actual - } - - fn read_logs(&self, _first_block: H256, _last_block: H256) -> Box>> { + fn read_logs(&self) -> Box>> { Box::new(self.logs.clone().into_iter()) } diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index ebf9aff58..9031461e9 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -141,8 +141,8 @@ impl ServiceContractListener { } /// Process incoming events of service contract. - fn process_service_contract_events(&self, first: H256, last: H256) { - self.data.tasks_queue.push(self.data.contract.read_logs(first, last) + fn process_service_contract_events(&self) { + self.data.tasks_queue.push_many(self.data.contract.read_logs() .filter_map(|topics| match topics.len() { // when key is already generated && we have this key 3 if self.data.key_storage.get(&topics[1]).map(|k| k.is_some()).unwrap_or_default() => { @@ -324,15 +324,11 @@ impl ChainNotify for ServiceContractListener { return; } - self.data.contract.update(); - if !self.data.contract.is_actual() { + if !self.data.contract.update() { return; } - let reason = "enacted.len() != 0; qed"; - self.process_service_contract_events( - enacted.first().expect(reason).clone(), - enacted.last().expect(reason).clone()); + self.process_service_contract_events(); // schedule retry if received enough blocks since last retry // it maybe inaccurate when switching syncing/synced states, but that's ok @@ -582,9 +578,9 @@ mod tests { #[test] fn no_tasks_scheduled_when_no_contract_events() { let listener = make_service_contract_listener(None, None, None); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); - listener.process_service_contract_events(Default::default(), Default::default()); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); + listener.process_service_contract_events(); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); } #[test] @@ -592,10 +588,10 @@ mod tests { let mut contract = DummyServiceContract::default(); contract.logs.push(vec![Default::default(), Default::default(), Default::default()]); let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); - listener.process_service_contract_events(Default::default(), Default::default()); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 2); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().pop_back(), Some(ServiceTask::GenerateServerKey(Default::default(), Default::default()))); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); + listener.process_service_contract_events(); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 2); + assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::GenerateServerKey(Default::default(), Default::default()))); } #[test] @@ -604,9 +600,9 @@ mod tests { let mut contract = DummyServiceContract::default(); contract.logs.push(vec![Default::default(), server_key_id, Default::default()]); let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); - listener.process_service_contract_events(Default::default(), Default::default()); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); + listener.process_service_contract_events(); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); } #[test] @@ -615,10 +611,10 @@ mod tests { contract.logs.push(vec![Default::default(), Default::default(), Default::default()]); let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); listener.data.key_storage.insert(Default::default(), Default::default()).unwrap(); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); - listener.process_service_contract_events(Default::default(), Default::default()); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 2); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().pop_back(), Some(ServiceTask::RestoreServerKey(Default::default()))); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); + listener.process_service_contract_events(); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 2); + assert_eq!(listener.data.tasks_queue.snapshot().pop_back(), Some(ServiceTask::RestoreServerKey(Default::default()))); } #[test] @@ -626,9 +622,9 @@ mod tests { let mut contract = DummyServiceContract::default(); contract.logs.push(vec![Default::default(), Default::default()]); let listener = make_service_contract_listener(Some(Arc::new(contract)), None, None); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); - listener.process_service_contract_events(Default::default(), Default::default()); - assert_eq!(listener.data.tasks_queue.service_tasks.lock().len(), 1); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); + listener.process_service_contract_events(); + assert_eq!(listener.data.tasks_queue.snapshot().len(), 1); } #[test] diff --git a/secret_store/src/listener/tasks_queue.rs b/secret_store/src/listener/tasks_queue.rs index f313c8431..8e4700dbb 100644 --- a/secret_store/src/listener/tasks_queue.rs +++ b/secret_store/src/listener/tasks_queue.rs @@ -19,14 +19,14 @@ use parking_lot::{Mutex, Condvar}; #[derive(Default)] /// Service tasks queue. -pub struct TasksQueue { +pub struct TasksQueue { /// Service event. service_event: Condvar, /// Service tasks queue. service_tasks: Mutex>, } -impl TasksQueue { +impl TasksQueue where Task: Clone { /// Create new tasks queue. pub fn new() -> Self { TasksQueue { @@ -35,6 +35,12 @@ impl TasksQueue { } } + #[cfg(test)] + /// Get current tasks snapshot. + pub fn snapshot(&self) -> VecDeque { + self.service_tasks.lock().clone() + } + /// Push task to the front of queue. pub fn push_front(&self, task: Task) { let mut service_tasks = self.service_tasks.lock(); @@ -49,7 +55,17 @@ impl TasksQueue { self.service_event.notify_all(); } - /// Wait for new task. + /// Push task to the back of queue. + pub fn push_many>(&self, tasks: I) { + let mut service_tasks = self.service_tasks.lock(); + let previous_len = service_tasks.len(); + service_tasks.extend(tasks); + if service_tasks.len() != previous_len { + self.service_event.notify_all(); + } + } + + /// Wait for new task (task is removed from the front of queue). pub fn wait(&self) -> Task { let mut service_tasks = self.service_tasks.lock(); if service_tasks.is_empty() { @@ -57,6 +73,6 @@ impl TasksQueue { } service_tasks.pop_front() - .expect("service_event is only fired when there are new tasks or is_shutdown == true; is_shutdown == false; qed") + .expect("service_event is only fired when there are new tasks; qed") } } From 8d15338c84738f45e139ad7198950702f78ffca5 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 21 Dec 2017 16:20:34 +0300 Subject: [PATCH 35/42] SecretStore: removed extra-tracing --- secret_store/src/listener/service_contract.rs | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/secret_store/src/listener/service_contract.rs b/secret_store/src/listener/service_contract.rs index c49bf49b7..deed4da3e 100644 --- a/secret_store/src/listener/service_contract.rs +++ b/secret_store/src/listener/service_contract.rs @@ -174,8 +174,8 @@ impl ServiceContract for OnChainServiceContract { // read server key generation requests let request_logs = client.logs(Filter { - from_block: BlockId::Hash(first_block.clone()), - to_block: BlockId::Hash(last_block.clone()), + from_block: BlockId::Hash(first_block), + to_block: BlockId::Hash(last_block), address: Some(vec![address]), topics: vec![ Some(vec![*SERVER_KEY_REQUESTED_EVENT_NAME_HASH]), @@ -186,20 +186,13 @@ impl ServiceContract for OnChainServiceContract { limit: None, }); - trace!(target: "secretstore", "{}: read {} events from service contract in blocks {}..{}", - self.self_key_pair.public(), request_logs.len(), first_block, last_block); - Box::new(request_logs.into_iter().map(|log| log.entry.topics)) } fn read_pending_requests(&self) -> Box> { let client = match self.client.get() { Some(client) => client, - None => { - warn!(target: "secretstore", "{}: client is untrusted during read_pending_requests call", - self.self_key_pair.public()); - return Box::new(::std::iter::empty()); - }, + None => return Box::new(::std::iter::empty()), }; // we only need requests that are here for more than REQUEST_CONFIRMATIONS_REQUIRED blocks From 9a5d0fed2c63136cd996e333e3b81d43ab708b97 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 21 Dec 2017 16:54:24 +0300 Subject: [PATCH 36/42] SecretStore: return error if http listner init has failed --- secret_store/src/listener/http_listener.rs | 9 +++++---- secret_store/src/types/all.rs | 3 +++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/secret_store/src/listener/http_listener.rs b/secret_store/src/listener/http_listener.rs index ba1bc7370..61595e150 100644 --- a/secret_store/src/listener/http_listener.rs +++ b/secret_store/src/listener/http_listener.rs @@ -85,10 +85,10 @@ impl KeyServerHttpListener { }); let listener_address = format!("{}:{}", listener_address.address, listener_address.port); - let http_server = HttpServer::http(&listener_address).expect("cannot start HttpServer"); - let http_server = http_server.handle(KeyServerHttpHandler { - handler: shared_handler.clone(), - }).expect("cannot start HttpServer"); + let http_server = HttpServer::http(&listener_address) + .and_then(|http_server| http_server.handle(KeyServerHttpHandler { + handler: shared_handler.clone(), + })).map_err(|err| Error::Hyper(format!("{}", err)))?; let listener = KeyServerHttpListener { http_server: http_server, @@ -234,6 +234,7 @@ fn return_error(mut res: HttpResponse, err: Error) { Error::BadSignature => *res.status_mut() = HttpStatusCode::BadRequest, Error::AccessDenied => *res.status_mut() = HttpStatusCode::Forbidden, Error::DocumentNotFound => *res.status_mut() = HttpStatusCode::NotFound, + Error::Hyper(_) => *res.status_mut() = HttpStatusCode::BadRequest, Error::Serde(_) => *res.status_mut() = HttpStatusCode::BadRequest, Error::Database(_) => *res.status_mut() = HttpStatusCode::InternalServerError, Error::Internal(_) => *res.status_mut() = HttpStatusCode::InternalServerError, diff --git a/secret_store/src/types/all.rs b/secret_store/src/types/all.rs index 8738ba032..db41e647f 100644 --- a/secret_store/src/types/all.rs +++ b/secret_store/src/types/all.rs @@ -44,6 +44,8 @@ pub enum Error { AccessDenied, /// Requested document not found DocumentNotFound, + /// Hyper error + Hyper(String), /// Serialization/deserialization error Serde(String), /// Database-related error @@ -118,6 +120,7 @@ impl fmt::Display for Error { Error::BadSignature => write!(f, "Bad signature"), Error::AccessDenied => write!(f, "Access dened"), Error::DocumentNotFound => write!(f, "Document not found"), + Error::Hyper(ref msg) => write!(f, "Hyper error: {}", msg), Error::Serde(ref msg) => write!(f, "Serialization error: {}", msg), Error::Database(ref msg) => write!(f, "Database error: {}", msg), Error::Internal(ref msg) => write!(f, "Internal error: {}", msg), From 2a73101fabfed1de4492edf5ff2bcaf43287dee5 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 27 Dec 2017 11:44:47 +0300 Subject: [PATCH 37/42] updated doc --- secret_store/src/listener/tasks_queue.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/secret_store/src/listener/tasks_queue.rs b/secret_store/src/listener/tasks_queue.rs index 8e4700dbb..e228d12ce 100644 --- a/secret_store/src/listener/tasks_queue.rs +++ b/secret_store/src/listener/tasks_queue.rs @@ -18,7 +18,7 @@ use std::collections::VecDeque; use parking_lot::{Mutex, Condvar}; #[derive(Default)] -/// Service tasks queue. +/// General deque-based tasks queue. pub struct TasksQueue { /// Service event. service_event: Condvar, From 74d2896397141c816e93de6b586683246c6c9a39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 27 Dec 2017 11:02:39 +0100 Subject: [PATCH 38/42] Fix version. --- util/version/Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/util/version/Cargo.toml b/util/version/Cargo.toml index 22bf07fb1..44f5a08f2 100644 --- a/util/version/Cargo.toml +++ b/util/version/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "parity-version" -version = "0.1.0" +# NOTE: this value is used for Parity version string. +version = "1.9.0" authors = ["Parity Technologies "] build = "build.rs" From 5b5dd85cf9687c908fa89c1f12b8bd1a06c3e3f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 27 Dec 2017 11:32:05 +0100 Subject: [PATCH 39/42] Fix lock file. --- Cargo.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 55e88f5e3..c3480664e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1921,7 +1921,7 @@ dependencies = [ "parity-rpc 1.9.0", "parity-rpc-client 1.4.0", "parity-updater 1.9.0", - "parity-version 0.1.0", + "parity-version 1.9.0", "parity-whisper 0.1.0", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "path 0.1.0", @@ -1967,7 +1967,7 @@ dependencies = [ "parity-hash-fetch 1.9.0", "parity-reactor 0.1.0", "parity-ui 1.9.0", - "parity-version 0.1.0", + "parity-version 1.9.0", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2115,7 +2115,7 @@ dependencies = [ "order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "parity-reactor 0.1.0", "parity-updater 1.9.0", - "parity-version 0.1.0", + "parity-version 1.9.0", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2223,7 +2223,7 @@ dependencies = [ "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "parity-hash-fetch 1.9.0", "parity-reactor 0.1.0", - "parity-version 0.1.0", + "parity-version 1.9.0", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "path 0.1.0", "semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2232,7 +2232,7 @@ dependencies = [ [[package]] name = "parity-version" -version = "0.1.0" +version = "1.9.0" dependencies = [ "ethcore-bytes 0.1.0", "rlp 0.2.1", From fc0eb600f367ecaeb7307dd8ff520927b4e5855f Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 27 Dec 2017 15:21:31 +0300 Subject: [PATCH 40/42] checked_sub -> saturating_sub --- secret_store/src/listener/service_contract.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/secret_store/src/listener/service_contract.rs b/secret_store/src/listener/service_contract.rs index deed4da3e..b7e365bed 100644 --- a/secret_store/src/listener/service_contract.rs +++ b/secret_store/src/listener/service_contract.rs @@ -301,7 +301,7 @@ impl Iterator for PendingRequestsIterator { /// Get hash of the last block with at least n confirmations. fn get_confirmed_block_hash(client: &Client, confirmations: u64) -> Option { client.block_number(BlockId::Latest) - .and_then(|b| b.checked_sub(confirmations)) + .map(|b| b.saturating_sub(confirmations)) .and_then(|b| client.block_hash(BlockId::Number(b))) } From 26e4fc680c7bbe5654ba65627af5e6852f8a8235 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 27 Dec 2017 18:56:06 +0100 Subject: [PATCH 41/42] Fix default CORS settings. (#7387) * Fix default CORS settings. * Add info regarding special options. --- parity/cli/mod.rs | 16 ++++++++-------- parity/cli/tests/config.full.toml | 6 +++--- parity/configuration.rs | 20 +++++++++++++------- parity/ipfs.rs | 4 ++-- parity/rpc.rs | 6 +++--- 5 files changed, 29 insertions(+), 23 deletions(-) diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 60d391ee9..b225ee189 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -466,9 +466,9 @@ usage! { "--jsonrpc-threads=[THREADS]", "Turn on additional processing threads in all RPC servers. Setting this to non-zero value allows parallel cpu-heavy queries execution.", - ARG arg_jsonrpc_cors: (Option) = None, or |c: &Config| otry!(c.rpc).cors.clone(), + ARG arg_jsonrpc_cors: (String) = "none", or |c: &Config| otry!(c.rpc).cors.as_ref().map(|vec| vec.join(",")), "--jsonrpc-cors=[URL]", - "Specify CORS header for JSON-RPC API responses.", + "Specify CORS header for JSON-RPC API responses. Special options: \"all\", \"none\".", ARG arg_jsonrpc_server_threads: (Option) = None, or |c: &Config| otry!(c.rpc).server_threads, "--jsonrpc-server-threads=[NUM]", @@ -538,9 +538,9 @@ usage! { "--ipfs-api-hosts=[HOSTS]", "List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: \"all\", \"none\".", - ARG arg_ipfs_api_cors: (Option) = None, or |c: &Config| otry!(c.ipfs).cors.clone(), + ARG arg_ipfs_api_cors: (String) = "none", or |c: &Config| otry!(c.ipfs).cors.as_ref().map(|vec| vec.join(",")), "--ipfs-api-cors=[URL]", - "Specify CORS header for IPFS API responses.", + "Specify CORS header for IPFS API responses. Special options: \"all\", \"none\".", ["Secret store options"] FLAG flag_no_secretstore: (bool) = false, or |c: &Config| otry!(c.secretstore).disable.clone(), @@ -1052,7 +1052,7 @@ struct Rpc { disable: Option, port: Option, interface: Option, - cors: Option, + cors: Option>, apis: Option>, hosts: Option>, server_threads: Option, @@ -1108,7 +1108,7 @@ struct Ipfs { enable: Option, port: Option, interface: Option, - cors: Option, + cors: Option>, hosts: Option>, } @@ -1468,7 +1468,7 @@ mod tests { flag_no_jsonrpc: false, arg_jsonrpc_port: 8545u16, arg_jsonrpc_interface: "local".into(), - arg_jsonrpc_cors: Some("null".into()), + arg_jsonrpc_cors: "null".into(), arg_jsonrpc_apis: "web3,eth,net,parity,traces,rpc,secretstore".into(), arg_jsonrpc_hosts: "none".into(), arg_jsonrpc_server_threads: None, @@ -1507,7 +1507,7 @@ mod tests { flag_ipfs_api: false, arg_ipfs_api_port: 5001u16, arg_ipfs_api_interface: "local".into(), - arg_ipfs_api_cors: Some("null".into()), + arg_ipfs_api_cors: "null".into(), arg_ipfs_api_hosts: "none".into(), // -- Sealing/Mining Options diff --git a/parity/cli/tests/config.full.toml b/parity/cli/tests/config.full.toml index a49717085..6502da1a2 100644 --- a/parity/cli/tests/config.full.toml +++ b/parity/cli/tests/config.full.toml @@ -49,7 +49,7 @@ reserved_peers = "./path_to_file" disable = false port = 8545 interface = "local" -cors = "null" +cors = ["null"] apis = ["web3", "eth", "net", "parity", "traces", "rpc", "secretstore"] hosts = ["none"] @@ -76,7 +76,7 @@ path = "$HOME/.parity/dapps" user = "test_user" pass = "test_pass" -[secretstore] +[secretstore] disable = false disable_http = false disable_acl_check = false @@ -91,7 +91,7 @@ path = "$HOME/.parity/secretstore" enable = false port = 5001 interface = "local" -cors = "null" +cors = ["null"] hosts = ["none"] [mining] diff --git a/parity/configuration.rs b/parity/configuration.rs index 715e63db5..93a39bc6f 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -775,13 +775,19 @@ impl Configuration { apis.join(",") } - fn cors(cors: Option<&String>) -> Option> { - cors.map(|ref c| c.split(',').map(Into::into).collect()) + fn cors(cors: &str) -> Option> { + match cors { + "none" => return Some(Vec::new()), + "*" | "all" | "any" => return None, + _ => {}, + } + + Some(cors.split(',').map(Into::into).collect()) } fn rpc_cors(&self) -> Option> { - let cors = self.args.arg_jsonrpc_cors.as_ref().or(self.args.arg_rpccorsdomain.as_ref()); - Self::cors(cors) + let cors = self.args.arg_rpccorsdomain.clone().unwrap_or_else(|| self.args.arg_jsonrpc_cors.to_owned()); + Self::cors(&cors) } fn ipfs_cors(&self) -> Option> { @@ -1458,7 +1464,7 @@ mod tests { assert_eq!(net.rpc_enabled, true); assert_eq!(net.rpc_interface, "0.0.0.0".to_owned()); assert_eq!(net.rpc_port, 8000); - assert_eq!(conf.rpc_cors(), Some(vec!["*".to_owned()])); + assert_eq!(conf.rpc_cors(), None); assert_eq!(conf.rpc_apis(), "web3,eth".to_owned()); } @@ -1525,8 +1531,8 @@ mod tests { let conf2 = parse(&["parity", "--ipfs-api-cors", "http://parity.io,http://something.io"]); // then - assert_eq!(conf0.ipfs_cors(), None); - assert_eq!(conf1.ipfs_cors(), Some(vec!["*".into()])); + assert_eq!(conf0.ipfs_cors(), Some(vec![])); + assert_eq!(conf1.ipfs_cors(), None); assert_eq!(conf2.ipfs_cors(), Some(vec!["http://parity.io".into(),"http://something.io".into()])); } diff --git a/parity/ipfs.rs b/parity/ipfs.rs index 45c3f7062..ac9a4662b 100644 --- a/parity/ipfs.rs +++ b/parity/ipfs.rs @@ -34,8 +34,8 @@ impl Default for Configuration { enabled: false, port: 5001, interface: "127.0.0.1".into(), - cors: None, - hosts: Some(Vec::new()), + cors: Some(vec![]), + hosts: Some(vec![]), } } } diff --git a/parity/rpc.rs b/parity/rpc.rs index 3a202b590..d17b77ccd 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -59,8 +59,8 @@ impl Default for HttpConfiguration { interface: "127.0.0.1".into(), port: 8545, apis: ApiSet::UnsafeContext, - cors: None, - hosts: Some(Vec::new()), + cors: Some(vec![]), + hosts: Some(vec![]), server_threads: 1, processing_threads: 4, } @@ -98,7 +98,7 @@ impl From for HttpConfiguration { interface: conf.interface, port: conf.port, apis: rpc_apis::ApiSet::UnsafeContext, - cors: None, + cors: Some(vec![]), hosts: conf.hosts, server_threads: 1, processing_threads: 0, From 48a15cecf849e6b496bbd9963726c351aeefe256 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 28 Dec 2017 15:12:19 +0100 Subject: [PATCH 42/42] Update bootnodes (#7363) * Updating mainnet bootnodes. * Add additional parity-beta bootnodes. * Restore old parity bootnodes and update foudation bootnodes --- ethcore/res/ethereum/foundation.json | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/ethcore/res/ethereum/foundation.json b/ethcore/res/ethereum/foundation.json index c7ff63bf6..95f330cfd 100644 --- a/ethcore/res/ethereum/foundation.json +++ b/ethcore/res/ethereum/foundation.json @@ -173,23 +173,36 @@ "stateRoot": "0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544" }, "nodes": [ + "enode://6a868ced2dec399c53f730261173638a93a40214cf299ccf4d42a76e3fa54701db410669e8006347a4b3a74fa090bb35af0320e4bc8d04cf5b7f582b1db285f5@163.172.131.191:30303", + "enode://66a483383882a518fcc59db6c017f9cd13c71261f13c8d7e67ed43adbbc82a932d88d2291f59be577e9425181fc08828dc916fdd053af935a9491edf9d6006ba@212.47.247.103:30303", + "enode://cd6611461840543d5b9c56fbf088736154c699c43973b3a1a32390cf27106f87e58a818a606ccb05f3866de95a4fe860786fea71bf891ea95f234480d3022aa3@163.172.157.114:30303", "enode://5a62f19d35c0da8b576c9414568c728d4744e6e9d436c0f9db27456400011414f515871f13a6b8e0468534b5116cfe765d7630f680f1707a38467940a9f62511@45.55.33.62:30303", "enode://605e04a43b1156966b3a3b66b980c87b7f18522f7f712035f84576016be909a2798a438b2b17b1a8c58db314d88539a77419ca4be36148c086900fba487c9d39@188.166.255.12:30303", "enode://dc72806c3aa8fda207c8c018aba8d6cf143728b3628b6ded8d5e8cdeb8aa05cbd53f710ecd014c9a8f0d1e98f2874bff8afb15a229202f510a9c0258d1f6d109@159.203.210.80:30303", + "enode://aafde2e81e035f417019a80f5342d1cd0e5bce97f83230fc57e1abbb3a9a5d6fb751446040c67261ed422324ffb69214567e181bb4ac0cc6e817451be0eaad1e@52.178.74.216:30303", + "enode://460e54d7e9a361d326a9e503b3879c6a1075e1bfb7ea919b512ea1fe841e65f82c5f87af028f14a7825be1c1260825d5326b93b43a5bc72e3214a99e0c4c7bd4@52.230.6.166:30303", + "enode://28faaf6b2e86694d8978b8e6986e7813951d7bd25201116fa77de893aabedd2a4a8d5832776905b4c3e616320506516d08239d82aeef4355f6878c3a701a6059@40.71.19.172:30303", + "enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303", + "enode://3f1d12044546b76342d59d4a05532c14b85aa669704bfe1f864fe079415aa2c02d743e03218e57a33fb94523adb54032871a6c51b2cc5514cb7c7e35b3ed0a99@13.93.211.84:30303", + "enode://78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d@191.235.84.50:30303", + "enode://158f8aab45f6d19c6cbf4a089c2670541a8da11978a2f90dbf6a502a4a3bab80d288afdbeb7ec0ef6d92de563767f3b1ea9e8e334ca711e9f8e2df5a0385e8e6@13.75.154.138:30303", + "enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303", + "enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303", + "enode://0cc5f5ffb5d9098c8b8c62325f3797f56509bff942704687b6530992ac706e2cb946b90a34f1f19548cd3c7baccbcaea354531e5983c7d1bc0dee16ce4b6440b@40.118.3.223:30305", + "enode://1c7a64d76c0334b0418c004af2f67c50e36a3be60b5e4790bdac0439d21603469a85fad36f2473c9a80eb043ae60936df905fa28f1ff614c3e5dc34f15dcd2dc@40.118.3.223:30308", + "enode://85c85d7143ae8bb96924f2b54f1b3e70d8c4d367af305325d30a61385a432f247d2c75c45c6b4a60335060d072d7f5b35dd1d4c45f76941f62a4f83b6e75daaf@40.118.3.223:30309", + "enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303", + "enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303", + "enode://4cd540b2c3292e17cff39922e864094bf8b0741fcc8c5dcea14957e389d7944c70278d872902e3d0345927f621547efa659013c400865485ab4bfa0c6596936f@138.201.144.135:30303", "enode://01f76fa0561eca2b9a7e224378dd854278735f1449793c46ad0c4e79e8775d080c21dcc455be391e90a98153c3b05dcc8935c8440de7b56fe6d67251e33f4e3c@51.15.42.252:30303", "enode://2c9059f05c352b29d559192fe6bca272d965c9f2290632a2cfda7f83da7d2634f3ec45ae3a72c54dd4204926fb8082dcf9686e0d7504257541c86fc8569bcf4b@163.172.171.38:30303", "enode://efe4f2493f4aff2d641b1db8366b96ddacfe13e7a6e9c8f8f8cf49f9cdba0fdf3258d8c8f8d0c5db529f8123c8f1d95f36d54d590ca1bb366a5818b9a4ba521c@163.172.187.252:30303", - "enode://cd6611461840543d5b9c56fbf088736154c699c43973b3a1a32390cf27106f87e58a818a606ccb05f3866de95a4fe860786fea71bf891ea95f234480d3022aa3@163.172.157.114:30303", "enode://bcc7240543fe2cf86f5e9093d05753dd83343f8fda7bf0e833f65985c73afccf8f981301e13ef49c4804491eab043647374df1c4adf85766af88a624ecc3330e@136.243.154.244:30303", "enode://ed4227681ca8c70beb2277b9e870353a9693f12e7c548c35df6bca6a956934d6f659999c2decb31f75ce217822eefca149ace914f1cbe461ed5a2ebaf9501455@88.212.206.70:30303", "enode://cadc6e573b6bc2a9128f2f635ac0db3353e360b56deef239e9be7e7fce039502e0ec670b595f6288c0d2116812516ad6b6ff8d5728ff45eba176989e40dead1e@37.128.191.230:30303", "enode://595a9a06f8b9bc9835c8723b6a82105aea5d55c66b029b6d44f229d6d135ac3ecdd3e9309360a961ea39d7bee7bac5d03564077a4e08823acc723370aace65ec@46.20.235.22:30303", "enode://029178d6d6f9f8026fc0bc17d5d1401aac76ec9d86633bba2320b5eed7b312980c0a210b74b20c4f9a8b0b2bf884b111fa9ea5c5f916bb9bbc0e0c8640a0f56c@216.158.85.185:30303", - "enode://fdd1b9bb613cfbc200bba17ce199a9490edc752a833f88d4134bf52bb0d858aa5524cb3ec9366c7a4ef4637754b8b15b5dc913e4ed9fdb6022f7512d7b63f181@212.47.247.103:30303", - "enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303", - "enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303", - "enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303", - "enode://4cd540b2c3292e17cff39922e864094bf8b0741fcc8c5dcea14957e389d7944c70278d872902e3d0345927f621547efa659013c400865485ab4bfa0c6596936f@138.201.144.135:30303" + "enode://fdd1b9bb613cfbc200bba17ce199a9490edc752a833f88d4134bf52bb0d858aa5524cb3ec9366c7a4ef4637754b8b15b5dc913e4ed9fdb6022f7512d7b63f181@212.47.247.103:30303" ], "accounts": { "0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },