From 18582d7b654382473a831eda59dc3d5139b39efe Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 30 Jun 2017 11:26:09 +0300 Subject: [PATCH 001/112] do not cache ACL storage contract --- secret_store/src/acl_storage.rs | 54 ++++++++++++++++++++++++++------- 1 file changed, 43 insertions(+), 11 deletions(-) diff --git a/secret_store/src/acl_storage.rs b/secret_store/src/acl_storage.rs index 816d100dc..8ddc9a6e3 100644 --- a/secret_store/src/acl_storage.rs +++ b/secret_store/src/acl_storage.rs @@ -20,6 +20,7 @@ use parking_lot::Mutex; use ethkey::public_to_address; use ethcore::client::{Client, BlockChainClient, BlockId}; use native_contracts::SecretStoreAclStorage; +use util::{H256, Address}; use types::all::{Error, ServerKeyId, Public}; const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker"; @@ -32,33 +33,64 @@ pub trait AclStorage: Send + Sync { /// On-chain ACL storage implementation. pub struct OnChainAclStorage { + /// Cached on-chain contract. + contract: Mutex, +} + +/// Cached on-chain ACL storage contract. +struct CachedContract { /// Blockchain client. client: Arc, - /// On-chain contract. - contract: Mutex>, + /// Hash of best block, when contract address has been read. + best_block_hash: Option, + /// Contract address. + contract_addr: Option
, + /// Contract at given address. + contract: Option, } impl OnChainAclStorage { pub fn new(client: Arc) -> Self { OnChainAclStorage { - client: client, - contract: Mutex::new(None), + contract: Mutex::new(CachedContract::new(client)), } } } impl AclStorage for OnChainAclStorage { fn check(&self, public: &Public, document: &ServerKeyId) -> Result { - let mut contract = self.contract.lock(); - if !contract.is_some() { - *contract = self.client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned()) - .and_then(|contract_addr| { + self.contract.lock().check(public, document) + } +} + +impl CachedContract { + pub fn new(client: Arc) -> Self { + CachedContract { + client: client, + best_block_hash: None, + contract_addr: None, + contract: None, + } + } + + pub fn check(&mut self, public: &Public, document: &ServerKeyId) -> Result { + let new_best_block_hash = self.client.best_block_header().hash(); + if self.best_block_hash.as_ref() != Some(&new_best_block_hash) { + let new_contract_addr = self.client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned()); + if self.contract_addr.as_ref() != new_contract_addr.as_ref() { + self.contract = new_contract_addr.map(|contract_addr| { trace!(target: "secretstore", "Configuring for ACL checker contract from {}", contract_addr); - Some(SecretStoreAclStorage::new(contract_addr)) - }) + SecretStoreAclStorage::new(contract_addr) + }); + + self.contract_addr = new_contract_addr; + } + + self.best_block_hash = Some(new_best_block_hash); } - if let Some(ref contract) = *contract { + + if let Some(contract) = self.contract.as_ref() { let address = public_to_address(&public); let do_call = |a, d| future::done(self.client.call_contract(BlockId::Latest, a, d)); contract.check_permissions(do_call, address, document.clone()) From 5cc40d45251196353bf2b99dd0ad09c38c9ecae1 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 14 Jul 2017 14:51:24 +0300 Subject: [PATCH 002/112] when error comes before initialization --- .../key_server_cluster/jobs/job_session.rs | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/secret_store/src/key_server_cluster/jobs/job_session.rs b/secret_store/src/key_server_cluster/jobs/job_session.rs index 7ae1da42a..6608397dd 100644 --- a/secret_store/src/key_server_cluster/jobs/job_session.rs +++ b/secret_store/src/key_server_cluster/jobs/job_session.rs @@ -299,22 +299,22 @@ impl JobSession where Executor: JobExe return Err(Error::ConsensusUnreachable); } - let active_data = self.data.active_data.as_mut() - .expect("we have checked that we are on master node; on master nodes active_data is filled during initialization; qed"); - if active_data.rejects.contains(node) { - return Ok(()); - } - if active_data.requests.remove(node) || active_data.responses.remove(node).is_some() { - active_data.rejects.insert(node.clone()); - if self.data.state == JobSessionState::Finished && active_data.responses.len() < self.meta.threshold + 1 { - self.data.state = JobSessionState::Active; - } - if active_data.requests.len() + active_data.responses.len() >= self.meta.threshold + 1 { + if let Some(active_data) = self.data.active_data.as_mut() { + if active_data.rejects.contains(node) { return Ok(()); } + if active_data.requests.remove(node) || active_data.responses.remove(node).is_some() { + active_data.rejects.insert(node.clone()); + if self.data.state == JobSessionState::Finished && active_data.responses.len() < self.meta.threshold + 1 { + self.data.state = JobSessionState::Active; + } + if active_data.requests.len() + active_data.responses.len() >= self.meta.threshold + 1 { + return Ok(()); + } - self.data.state = JobSessionState::Failed; - return Err(Error::ConsensusUnreachable); + self.data.state = JobSessionState::Failed; + return Err(Error::ConsensusUnreachable); + } } Ok(()) From 81de7e1075600dbaca46bc14baab6cf56ec7c4e0 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 19 Jul 2017 11:35:17 +0300 Subject: [PATCH 003/112] initial KeyServerSet commit --- Cargo.lock | 1 + ethcore/native_contracts/build.rs | 2 + .../native_contracts/res/key_server_set.json | 1 + .../native_contracts/src/key_server_set.rs | 21 +++ ethcore/native_contracts/src/lib.rs | 2 + secret_store/Cargo.toml | 1 + secret_store/src/acl_storage.rs | 61 ++++---- secret_store/src/key_server.rs | 11 +- .../src/key_server_cluster/cluster.rs | 13 +- .../key_server_cluster/cluster_sessions.rs | 2 +- secret_store/src/key_server_cluster/mod.rs | 1 + secret_store/src/key_server_set.rs | 145 ++++++++++++++++++ secret_store/src/lib.rs | 10 +- 13 files changed, 230 insertions(+), 41 deletions(-) create mode 100644 ethcore/native_contracts/res/key_server_set.json create mode 100644 ethcore/native_contracts/src/key_server_set.rs create mode 100644 secret_store/src/key_server_set.rs diff --git a/Cargo.lock b/Cargo.lock index ad3cca5c7..ae33d7590 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -669,6 +669,7 @@ dependencies = [ "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "native-contracts 0.1.0", "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/ethcore/native_contracts/build.rs b/ethcore/native_contracts/build.rs index cec830929..bcb64067c 100644 --- a/ethcore/native_contracts/build.rs +++ b/ethcore/native_contracts/build.rs @@ -21,6 +21,7 @@ use std::fs::File; use std::io::Write; // TODO: just walk the "res" directory and generate whole crate automatically. +const KEY_SERVER_SET_ABI: &'static str = include_str!("res/key_server_set.json"); const REGISTRY_ABI: &'static str = include_str!("res/registrar.json"); const URLHINT_ABI: &'static str = include_str!("res/urlhint.json"); const SERVICE_TRANSACTION_ABI: &'static str = include_str!("res/service_transaction.json"); @@ -45,6 +46,7 @@ fn build_test_contracts() { } fn main() { + build_file("KeyServerSet", KEY_SERVER_SET_ABI, "key_server_set.rs"); build_file("Registry", REGISTRY_ABI, "registry.rs"); build_file("Urlhint", URLHINT_ABI, "urlhint.rs"); build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs"); diff --git a/ethcore/native_contracts/res/key_server_set.json b/ethcore/native_contracts/res/key_server_set.json new file mode 100644 index 000000000..93f68837a --- /dev/null +++ b/ethcore/native_contracts/res/key_server_set.json @@ -0,0 +1 @@ +[{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"keyServersList","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getKeyServerPublic","outputs":[{"name":"","type":"bytes"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"getKeyServers","outputs":[{"name":"","type":"address[]"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getKeyServerAddress","outputs":[{"name":"","type":"string"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"keyServer","type":"address"}],"name":"removeKeyServer","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"keyServerPublic","type":"bytes"},{"name":"keyServerIp","type":"string"}],"name":"addKeyServer","outputs":[],"payable":false,"type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"keyServer","type":"address"}],"name":"KeyServerAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"keyServer","type":"address"}],"name":"KeyServerRemoved","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"old","type":"address"},{"indexed":true,"name":"current","type":"address"}],"name":"NewOwner","type":"event"}] \ No newline at end of file diff --git a/ethcore/native_contracts/src/key_server_set.rs b/ethcore/native_contracts/src/key_server_set.rs new file mode 100644 index 000000000..60b137aae --- /dev/null +++ b/ethcore/native_contracts/src/key_server_set.rs @@ -0,0 +1,21 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +#![allow(unused_mut, unused_variables, unused_imports)] + +//! Secret store Key Server set contract. + +include!(concat!(env!("OUT_DIR"), "/key_server_set.rs")); diff --git a/ethcore/native_contracts/src/lib.rs b/ethcore/native_contracts/src/lib.rs index e35a4ec19..33cb91563 100644 --- a/ethcore/native_contracts/src/lib.rs +++ b/ethcore/native_contracts/src/lib.rs @@ -23,6 +23,7 @@ extern crate byteorder; extern crate ethabi; extern crate ethcore_util as util; +mod key_server_set; mod registry; mod urlhint; mod service_transaction; @@ -32,6 +33,7 @@ mod validator_report; pub mod test_contracts; +pub use self::key_server_set::KeyServerSet; pub use self::registry::Registry; pub use self::urlhint::Urlhint; pub use self::service_transaction::ServiceTransactionChecker; diff --git a/secret_store/Cargo.toml b/secret_store/Cargo.toml index eea49978d..19f342aa9 100644 --- a/secret_store/Cargo.toml +++ b/secret_store/Cargo.toml @@ -35,3 +35,4 @@ ethcore-logger = { path = "../logger" } ethcrypto = { path = "../ethcrypto" } ethkey = { path = "../ethkey" } native-contracts = { path = "../ethcore/native_contracts" } +lazy_static = "0.2" diff --git a/secret_store/src/acl_storage.rs b/secret_store/src/acl_storage.rs index 8ddc9a6e3..7ae72cbfc 100644 --- a/secret_store/src/acl_storage.rs +++ b/secret_store/src/acl_storage.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::sync::Arc; +use std::sync::{Arc, Weak}; use futures::{future, Future}; use parking_lot::Mutex; use ethkey::public_to_address; -use ethcore::client::{Client, BlockChainClient, BlockId}; +use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; use native_contracts::SecretStoreAclStorage; -use util::{H256, Address}; +use util::{H256, Address, Bytes}; use types::all::{Error, ServerKeyId, Public}; const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker"; @@ -40,9 +40,7 @@ pub struct OnChainAclStorage { /// Cached on-chain ACL storage contract. struct CachedContract { /// Blockchain client. - client: Arc, - /// Hash of best block, when contract address has been read. - best_block_hash: Option, + client: Weak, /// Contract address. contract_addr: Option
, /// Contract at given address. @@ -50,10 +48,12 @@ struct CachedContract { } impl OnChainAclStorage { - pub fn new(client: Arc) -> Self { - OnChainAclStorage { + pub fn new(client: &Arc) -> Arc { + let acl_storage = Arc::new(OnChainAclStorage { contract: Mutex::new(CachedContract::new(client)), - } + }); + client.add_notify(acl_storage.clone()); + acl_storage } } @@ -63,20 +63,24 @@ impl AclStorage for OnChainAclStorage { } } +impl ChainNotify for OnChainAclStorage { + fn new_blocks(&self, _imported: Vec, _invalid: Vec, _enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { + self.contract.lock().update() + } +} + impl CachedContract { - pub fn new(client: Arc) -> Self { + pub fn new(client: &Arc) -> Self { CachedContract { - client: client, - best_block_hash: None, + client: Arc::downgrade(client), contract_addr: None, contract: None, } } - pub fn check(&mut self, public: &Public, document: &ServerKeyId) -> Result { - let new_best_block_hash = self.client.best_block_header().hash(); - if self.best_block_hash.as_ref() != Some(&new_best_block_hash) { - let new_contract_addr = self.client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned()); + pub fn update(&mut self) { + if let Some(client) = self.client.upgrade() { + let new_contract_addr = client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned()); if self.contract_addr.as_ref() != new_contract_addr.as_ref() { self.contract = new_contract_addr.map(|contract_addr| { trace!(target: "secretstore", "Configuring for ACL checker contract from {}", contract_addr); @@ -86,18 +90,23 @@ impl CachedContract { self.contract_addr = new_contract_addr; } - - self.best_block_hash = Some(new_best_block_hash); } + } - if let Some(contract) = self.contract.as_ref() { - let address = public_to_address(&public); - let do_call = |a, d| future::done(self.client.call_contract(BlockId::Latest, a, d)); - contract.check_permissions(do_call, address, document.clone()) - .map_err(|err| Error::Internal(err)) - .wait() - } else { - Err(Error::Internal("ACL checker contract is not configured".to_owned())) + pub fn check(&mut self, public: &Public, document: &ServerKeyId) -> Result { + match self.contract.as_ref() { + Some(contract) => { + let address = public_to_address(&public); + let do_call = |a, d| future::done( + self.client + .upgrade() + .ok_or("Calling contract without client".into()) + .and_then(|c| c.call_contract(BlockId::Latest, a, d))); + contract.check_permissions(do_call, address, document.clone()) + .map_err(|err| Error::Internal(err)) + .wait() + }, + None => Err(Error::Internal("ACL checker contract is not configured".to_owned())), } } } diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index fd4e154fa..969782ca2 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -24,6 +24,7 @@ use ethcrypto; use ethkey; use super::acl_storage::AclStorage; use super::key_storage::KeyStorage; +use super::key_server_set::KeyServerSet; use key_server_cluster::{math, ClusterCore}; use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer}; use types::all::{Error, Public, RequestSignature, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow, @@ -44,9 +45,9 @@ pub struct KeyServerCore { impl KeyServerImpl { /// Create new key server instance - pub fn new(config: &ClusterConfiguration, acl_storage: Arc, key_storage: Arc) -> Result { + pub fn new(config: &ClusterConfiguration, key_server_set: Arc, acl_storage: Arc, key_storage: Arc) -> Result { Ok(KeyServerImpl { - data: Arc::new(Mutex::new(KeyServerCore::new(config, acl_storage, key_storage)?)), + data: Arc::new(Mutex::new(KeyServerCore::new(config, key_server_set, acl_storage, key_storage)?)), }) } @@ -143,14 +144,12 @@ impl MessageSigner for KeyServerImpl { } impl KeyServerCore { - pub fn new(config: &ClusterConfiguration, acl_storage: Arc, key_storage: Arc) -> Result { + pub fn new(config: &ClusterConfiguration, key_server_set: Arc, acl_storage: Arc, key_storage: Arc) -> Result { let config = NetClusterConfiguration { threads: config.threads, self_key_pair: ethkey::KeyPair::from_secret_slice(&config.self_private)?, listen_address: (config.listener_address.address.clone(), config.listener_address.port), - nodes: config.nodes.iter() - .map(|(node_id, node_address)| (node_id.clone(), (node_address.address.clone(), node_address.port))) - .collect(), + key_server_set: key_server_set, allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes, acl_storage: acl_storage, key_storage: key_storage, diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index c86f30267..b929e835d 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -28,7 +28,7 @@ use tokio_core::reactor::{Handle, Remote, Interval}; use tokio_core::net::{TcpListener, TcpStream}; use ethkey::{Public, KeyPair, Signature, Random, Generator}; use util::H256; -use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage}; +use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, KeyServerSet}; use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, GenerationSessionWrapper, EncryptionSessionWrapper, DecryptionSessionWrapper, SigningSessionWrapper}; use key_server_cluster::message::{self, Message, ClusterMessage, GenerationMessage, EncryptionMessage, DecryptionMessage, @@ -102,8 +102,8 @@ pub struct ClusterConfiguration { pub self_key_pair: KeyPair, /// Interface to listen to. pub listen_address: (String, u16), - /// Cluster nodes. - pub nodes: BTreeMap, + /// Cluster nodes set. + pub key_server_set: Arc, /// Reference to key storage pub key_storage: Arc, /// Reference to ACL storage @@ -671,9 +671,10 @@ impl ClusterConnections { connections: RwLock::new(BTreeMap::new()), }; - for (node_id, &(ref node_addr, node_port)) in config.nodes.iter().filter(|&(node_id, _)| node_id != config.self_key_pair.public()) { - let socket_address = make_socket_address(&node_addr, node_port)?; - connections.nodes.insert(node_id.clone(), socket_address); + let nodes = config.key_server_set.get(); + for (node_id, socket_address) in nodes.iter().filter(|&(node_id, _)| node_id != config.self_key_pair.public()) { + //let socket_address = make_socket_address(&node_addr, node_port)?; + connections.nodes.insert(node_id.clone(), socket_address.clone()); } Ok(connections) diff --git a/secret_store/src/key_server_cluster/cluster_sessions.rs b/secret_store/src/key_server_cluster/cluster_sessions.rs index f66ad972f..f8e4974b1 100644 --- a/secret_store/src/key_server_cluster/cluster_sessions.rs +++ b/secret_store/src/key_server_cluster/cluster_sessions.rs @@ -135,7 +135,7 @@ impl ClusterSessions { pub fn new(config: &ClusterConfiguration) -> Self { ClusterSessions { self_node_id: config.self_key_pair.public().clone(), - nodes: config.nodes.keys().cloned().collect(), + nodes: config.key_server_set.get().keys().cloned().collect(), acl_storage: config.acl_storage.clone(), key_storage: config.key_storage.clone(), generation_sessions: ClusterSessionsContainer::new(), diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index 71c505f95..11c32d528 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -23,6 +23,7 @@ use super::types::all::ServerKeyId; pub use super::types::all::{NodeId, EncryptedDocumentKeyShadow}; pub use super::acl_storage::AclStorage; pub use super::key_storage::{KeyStorage, DocumentKeyShare}; +pub use super::key_server_set::KeyServerSet; pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic, SerializableMessageHash}; pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient}; pub use self::generation_session::Session as GenerationSession; diff --git a/secret_store/src/key_server_set.rs b/secret_store/src/key_server_set.rs new file mode 100644 index 000000000..22ffbdb07 --- /dev/null +++ b/secret_store/src/key_server_set.rs @@ -0,0 +1,145 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::{Arc, Weak}; +use std::net::SocketAddr; +use std::collections::HashMap; +use futures::{future, Future}; +use parking_lot::Mutex; +use ethcore::filter::Filter; +use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; +use native_contracts::KeyServerSet as KeyServerSetContract; +use util::{H256, Address, Bytes, Hashable}; +use types::all::Public; + +const KEY_SERVER_SET_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_server_set"; + +// TODO: ethabi should be able to generate this. +const ADDED_EVENT_NAME: &'static [u8] = &*b"KeyServerAdded()"; +const REMOVED_EVENT_NAME: &'static [u8] = &*b"KeyServerRemoved()"; + +lazy_static! { + static ref ADDED_EVENT_NAME_HASH: H256 = ADDED_EVENT_NAME.sha3(); + static ref REMOVED_EVENT_NAME_HASH: H256 = REMOVED_EVENT_NAME.sha3(); +} + +/// Key Server set +pub trait KeyServerSet: Send + Sync { + /// Get set of configured key servers + fn get(&self) -> HashMap; +} + +/// On-chain Key Server set implementation. +pub struct OnChainKeyServerSet { + /// Cached on-chain contract. + contract: Mutex, +} + +/// Cached on-chain Key Server set contract. +struct CachedContract { + /// Blockchain client. + client: Weak, + /// Contract address. + contract_addr: Option
, + /// Active set of key servers. + key_servers: HashMap, +} + +impl OnChainKeyServerSet { + pub fn new(client: &Arc, key_servers: HashMap) -> Arc { + let key_server_set = Arc::new(OnChainKeyServerSet { + contract: Mutex::new(CachedContract::new(client, key_servers)), + }); + client.add_notify(key_server_set.clone()); + key_server_set + } +} + +impl KeyServerSet for OnChainKeyServerSet { + fn get(&self) -> HashMap { + self.contract.lock().get() + } +} + +impl ChainNotify for OnChainKeyServerSet { + fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { + self.contract.lock().update(enacted, retracted) + } +} + +impl CachedContract { + pub fn new(client: &Arc, key_servers: HashMap) -> Self { + CachedContract { + client: Arc::downgrade(client), + contract_addr: None, + key_servers: key_servers, + } + } + + pub fn update(&mut self, enacted: Vec, _retracted: Vec) { + if let Some(client) = self.client.upgrade() { + let new_contract_addr = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned()); + + // new contract installed + if self.contract_addr.as_ref() != new_contract_addr.as_ref() { +println!("=== Installing contract from address: {:?}", new_contract_addr); + self.key_servers = new_contract_addr.map(|contract_addr| { + trace!(target: "secretstore", "Configuring for key server set contract from {}", contract_addr); + + KeyServerSetContract::new(contract_addr) + }) + .map(|contract| { + let mut key_servers = HashMap::new(); + let do_call = |a, d| future::done(self.client.upgrade().ok_or("Calling contract without client".into()).and_then(|c| c.call_contract(BlockId::Latest, a, d))); + let key_servers_list = contract.get_key_servers(do_call).wait() + .map_err(|err| { trace!(target: "secretstore", "Error {} reading list of key servers from contract", err); err }) + .unwrap_or_default(); + for key_server in key_servers_list { + let key_server_public = contract.get_key_server_public( + |a, d| future::done(self.client.upgrade().ok_or("Calling contract without client".into()).and_then(|c| c.call_contract(BlockId::Latest, a, d))), key_server).wait() + .and_then(|p| if p.len() == 64 { Ok(Public::from_slice(&p)) } else { Err(format!("Invalid public length {}", p.len())) }); + let key_server_ip = contract.get_key_server_address( + |a, d| future::done(self.client.upgrade().ok_or("Calling contract without client".into()).and_then(|c| c.call_contract(BlockId::Latest, a, d))), key_server).wait() + .and_then(|a| a.parse().map_err(|e| format!("Invalid ip address: {}", e))); + if let (Ok(key_server_public), Ok(key_server_ip)) = (key_server_public, key_server_ip) { + key_servers.insert(key_server_public, key_server_ip); + } + } + key_servers + }) + .unwrap_or_default(); + + return; + } + + // check for events + for enacted_hash in enacted { + let filter = Filter { + from_block: BlockId::Hash(enacted_hash.clone()), + to_block: BlockId::Hash(enacted_hash.clone()), + address: self.contract_addr.clone().map(|a| vec![a]), + topics: vec![Some(vec![*ADDED_EVENT_NAME_HASH]), Some(vec![*REMOVED_EVENT_NAME_HASH])], + limit: None, + }; + println!("=== Number of filtered log entries: {}", client.logs(filter).len()); + } + } + } + + fn get(&self) -> HashMap { + self.key_servers.clone() + } +} diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index f8a74dd1a..235d0edd9 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -21,6 +21,8 @@ extern crate log; extern crate futures; extern crate futures_cpupool; extern crate hyper; +#[macro_use] +extern crate lazy_static; extern crate parking_lot; extern crate rustc_hex; extern crate serde; @@ -56,6 +58,7 @@ mod http_listener; mod key_server; mod key_storage; mod serialization; +mod key_server_set; use std::sync::Arc; use ethcore::client::Client; @@ -68,9 +71,12 @@ pub use traits::{KeyServer}; pub fn start(client: Arc, config: ServiceConfiguration) -> Result, Error> { use std::sync::Arc; - let acl_storage = Arc::new(acl_storage::OnChainAclStorage::new(client)); + let acl_storage = acl_storage::OnChainAclStorage::new(&client); + let key_server_set = key_server_set::OnChainKeyServerSet::new(&client, config.cluster_config.nodes.clone() + .into_iter() + .map(|a| (a.0, format!("{}:{}", a.1.address, a.1.port).parse().unwrap())).collect()); // TODO: remove after switching to enode:/// let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?); - let key_server = key_server::KeyServerImpl::new(&config.cluster_config, acl_storage, key_storage)?; + let key_server = key_server::KeyServerImpl::new(&config.cluster_config, key_server_set, acl_storage, key_storage)?; let listener = http_listener::KeyServerHttpListener::start(&config.listener_address, key_server)?; Ok(Box::new(listener)) } From 5080cc3c9edd822d3436df989bf453f89336a89d Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 19 Jul 2017 12:36:40 +0300 Subject: [PATCH 004/112] update_nodes_set in maintain --- .../src/key_server_cluster/cluster.rs | 70 ++++++++++++------- secret_store/src/key_server_set.rs | 16 ++--- 2 files changed, 52 insertions(+), 34 deletions(-) diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index b929e835d..3f381b6f0 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -158,9 +158,17 @@ pub struct ClusterConnections { /// Self node id. pub self_node_id: NodeId, /// All known other key servers. - pub nodes: BTreeMap, + pub key_server_set: Arc, + /// Connections data. + pub data: RwLock, +} + +/// Cluster connections data. +pub struct ClusterConnectionsData { + /// Active key servers set. + pub nodes: BTreeMap, /// Active connections to key servers. - pub connections: RwLock>>, + pub connections: BTreeMap>, } /// Cluster view core. @@ -354,6 +362,7 @@ impl ClusterCore { /// Try to connect to every disconnected node. fn connect_disconnected_nodes(data: Arc) { + data.connections.update_nodes_set(); for (node_id, node_address) in data.connections.disconnected_nodes() { if data.config.allow_connecting_to_higher_nodes || data.self_key_pair.public() < &node_id { ClusterCore::connect(data.clone(), node_address); @@ -665,34 +674,29 @@ impl ClusterCore { impl ClusterConnections { pub fn new(config: &ClusterConfiguration) -> Result { - let mut connections = ClusterConnections { + Ok(ClusterConnections { self_node_id: config.self_key_pair.public().clone(), - nodes: BTreeMap::new(), - connections: RwLock::new(BTreeMap::new()), - }; - - let nodes = config.key_server_set.get(); - for (node_id, socket_address) in nodes.iter().filter(|&(node_id, _)| node_id != config.self_key_pair.public()) { - //let socket_address = make_socket_address(&node_addr, node_port)?; - connections.nodes.insert(node_id.clone(), socket_address.clone()); - } - - Ok(connections) + key_server_set: config.key_server_set.clone(), + data: RwLock::new(ClusterConnectionsData { + nodes: config.key_server_set.get(), + connections: BTreeMap::new(), + }), + }) } pub fn cluster_state(&self) -> ClusterState { ClusterState { - connected: self.connections.read().keys().cloned().collect(), + connected: self.data.read().connections.keys().cloned().collect(), } } pub fn get(&self, node: &NodeId) -> Option> { - self.connections.read().get(node).cloned() + self.data.read().connections.get(node).cloned() } pub fn insert(&self, connection: Arc) -> bool { - let mut connections = self.connections.write(); - if connections.contains_key(connection.node_id()) { + let mut data = self.data.write(); + if data.connections.contains_key(connection.node_id()) { // we have already connected to the same node // the agreement is that node with lower id must establish connection to node with higher id if (&self.self_node_id < connection.node_id() && connection.is_inbound()) @@ -702,13 +706,13 @@ impl ClusterConnections { } trace!(target: "secretstore_net", "{}: inserting connection to {} at {}", self.self_node_id, connection.node_id(), connection.node_address()); - connections.insert(connection.node_id().clone(), connection); + data.connections.insert(connection.node_id().clone(), connection); true } pub fn remove(&self, node: &NodeId, is_inbound: bool) { - let mut connections = self.connections.write(); - if let Entry::Occupied(entry) = connections.entry(node.clone()) { + let mut data = self.data.write(); + if let Entry::Occupied(entry) = data.connections.entry(node.clone()) { if entry.get().is_inbound() != is_inbound { return; } @@ -719,20 +723,34 @@ impl ClusterConnections { } pub fn connected_nodes(&self) -> BTreeSet { - self.connections.read().keys().cloned().collect() + self.data.read().connections.keys().cloned().collect() } pub fn active_connections(&self)-> Vec> { - self.connections.read().values().cloned().collect() + self.data.read().connections.values().cloned().collect() } pub fn disconnected_nodes(&self) -> BTreeMap { - let connections = self.connections.read(); - self.nodes.iter() - .filter(|&(node_id, _)| !connections.contains_key(node_id)) + let data = self.data.read(); + data.nodes.iter() + .filter(|&(node_id, _)| !data.connections.contains_key(node_id)) .map(|(node_id, node_address)| (node_id.clone(), node_address.clone())) .collect() } + + pub fn update_nodes_set(&self) { + let mut data = self.data.write(); + let new_nodes = self.key_server_set.get(); + for obsolete_node in data.nodes.keys().cloned().collect::>() { + if !new_nodes.contains_key(&obsolete_node) { + data.nodes.remove(&obsolete_node); + data.connections.remove(&obsolete_node); + } + } + for (new_node_public, new_node_addr) in new_nodes { + data.nodes.insert(new_node_public, new_node_addr); + } + } } impl ClusterData { diff --git a/secret_store/src/key_server_set.rs b/secret_store/src/key_server_set.rs index 22ffbdb07..eecfa3091 100644 --- a/secret_store/src/key_server_set.rs +++ b/secret_store/src/key_server_set.rs @@ -16,7 +16,7 @@ use std::sync::{Arc, Weak}; use std::net::SocketAddr; -use std::collections::HashMap; +use std::collections::BTreeMap; use futures::{future, Future}; use parking_lot::Mutex; use ethcore::filter::Filter; @@ -39,7 +39,7 @@ lazy_static! { /// Key Server set pub trait KeyServerSet: Send + Sync { /// Get set of configured key servers - fn get(&self) -> HashMap; + fn get(&self) -> BTreeMap; } /// On-chain Key Server set implementation. @@ -55,11 +55,11 @@ struct CachedContract { /// Contract address. contract_addr: Option
, /// Active set of key servers. - key_servers: HashMap, + key_servers: BTreeMap, } impl OnChainKeyServerSet { - pub fn new(client: &Arc, key_servers: HashMap) -> Arc { + pub fn new(client: &Arc, key_servers: BTreeMap) -> Arc { let key_server_set = Arc::new(OnChainKeyServerSet { contract: Mutex::new(CachedContract::new(client, key_servers)), }); @@ -69,7 +69,7 @@ impl OnChainKeyServerSet { } impl KeyServerSet for OnChainKeyServerSet { - fn get(&self) -> HashMap { + fn get(&self) -> BTreeMap { self.contract.lock().get() } } @@ -81,7 +81,7 @@ impl ChainNotify for OnChainKeyServerSet { } impl CachedContract { - pub fn new(client: &Arc, key_servers: HashMap) -> Self { + pub fn new(client: &Arc, key_servers: BTreeMap) -> Self { CachedContract { client: Arc::downgrade(client), contract_addr: None, @@ -102,7 +102,7 @@ println!("=== Installing contract from address: {:?}", new_contract_addr); KeyServerSetContract::new(contract_addr) }) .map(|contract| { - let mut key_servers = HashMap::new(); + let mut key_servers = BTreeMap::new(); let do_call = |a, d| future::done(self.client.upgrade().ok_or("Calling contract without client".into()).and_then(|c| c.call_contract(BlockId::Latest, a, d))); let key_servers_list = contract.get_key_servers(do_call).wait() .map_err(|err| { trace!(target: "secretstore", "Error {} reading list of key servers from contract", err); err }) @@ -139,7 +139,7 @@ println!("=== Installing contract from address: {:?}", new_contract_addr); } } - fn get(&self) -> HashMap { + fn get(&self) -> BTreeMap { self.key_servers.clone() } } From 7664ff5acd26d781eb9c4d5708d37735b9da01fc Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 19 Jul 2017 15:14:37 +0300 Subject: [PATCH 005/112] do not connect to self --- .../src/key_server_cluster/cluster.rs | 9 ++- secret_store/src/key_server_set.rs | 74 +++++++++++-------- 2 files changed, 51 insertions(+), 32 deletions(-) diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 3f381b6f0..87c602eae 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -674,11 +674,14 @@ impl ClusterCore { impl ClusterConnections { pub fn new(config: &ClusterConfiguration) -> Result { + let mut nodes = config.key_server_set.get(); + nodes.remove(config.self_key_pair.public()); + Ok(ClusterConnections { self_node_id: config.self_key_pair.public().clone(), key_server_set: config.key_server_set.clone(), data: RwLock::new(ClusterConnectionsData { - nodes: config.key_server_set.get(), + nodes: nodes, connections: BTreeMap::new(), }), }) @@ -740,7 +743,9 @@ impl ClusterConnections { pub fn update_nodes_set(&self) { let mut data = self.data.write(); - let new_nodes = self.key_server_set.get(); + let mut new_nodes = self.key_server_set.get(); + new_nodes.remove(&self.self_node_id); + for obsolete_node in data.nodes.keys().cloned().collect::>() { if !new_nodes.contains_key(&obsolete_node) { data.nodes.remove(&obsolete_node); diff --git a/secret_store/src/key_server_set.rs b/secret_store/src/key_server_set.rs index eecfa3091..f9ec120fd 100644 --- a/secret_store/src/key_server_set.rs +++ b/secret_store/src/key_server_set.rs @@ -60,8 +60,15 @@ struct CachedContract { impl OnChainKeyServerSet { pub fn new(client: &Arc, key_servers: BTreeMap) -> Arc { + let mut cached_contract = CachedContract::new(client, key_servers); + let key_server_contract_address = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned()); + // only initialize from contract if it is installed. otherwise - use default nodes + if key_server_contract_address.is_some() { + cached_contract.read_from_registry(&*client, key_server_contract_address); + } + let key_server_set = Arc::new(OnChainKeyServerSet { - contract: Mutex::new(CachedContract::new(client, key_servers)), + contract: Mutex::new(cached_contract), }); client.add_notify(key_server_set.clone()); key_server_set @@ -76,6 +83,7 @@ impl KeyServerSet for OnChainKeyServerSet { impl ChainNotify for OnChainKeyServerSet { fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { +println!("=== new_blocks: imported {}, invalid: {}, enactd: {}, retracted: {}, sealed: {}, proposed: {}", _imported.len(), _invalid.len(), enacted.len(), retracted.len(), _sealed.len(), _proposed.len()); self.contract.lock().update(enacted, retracted) } } @@ -92,36 +100,10 @@ impl CachedContract { pub fn update(&mut self, enacted: Vec, _retracted: Vec) { if let Some(client) = self.client.upgrade() { let new_contract_addr = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned()); - +println!("=== Registry address = {:?}", new_contract_addr); // new contract installed if self.contract_addr.as_ref() != new_contract_addr.as_ref() { -println!("=== Installing contract from address: {:?}", new_contract_addr); - self.key_servers = new_contract_addr.map(|contract_addr| { - trace!(target: "secretstore", "Configuring for key server set contract from {}", contract_addr); - - KeyServerSetContract::new(contract_addr) - }) - .map(|contract| { - let mut key_servers = BTreeMap::new(); - let do_call = |a, d| future::done(self.client.upgrade().ok_or("Calling contract without client".into()).and_then(|c| c.call_contract(BlockId::Latest, a, d))); - let key_servers_list = contract.get_key_servers(do_call).wait() - .map_err(|err| { trace!(target: "secretstore", "Error {} reading list of key servers from contract", err); err }) - .unwrap_or_default(); - for key_server in key_servers_list { - let key_server_public = contract.get_key_server_public( - |a, d| future::done(self.client.upgrade().ok_or("Calling contract without client".into()).and_then(|c| c.call_contract(BlockId::Latest, a, d))), key_server).wait() - .and_then(|p| if p.len() == 64 { Ok(Public::from_slice(&p)) } else { Err(format!("Invalid public length {}", p.len())) }); - let key_server_ip = contract.get_key_server_address( - |a, d| future::done(self.client.upgrade().ok_or("Calling contract without client".into()).and_then(|c| c.call_contract(BlockId::Latest, a, d))), key_server).wait() - .and_then(|a| a.parse().map_err(|e| format!("Invalid ip address: {}", e))); - if let (Ok(key_server_public), Ok(key_server_ip)) = (key_server_public, key_server_ip) { - key_servers.insert(key_server_public, key_server_ip); - } - } - key_servers - }) - .unwrap_or_default(); - + self.read_from_registry(&*client, new_contract_addr); return; } @@ -139,7 +121,39 @@ println!("=== Installing contract from address: {:?}", new_contract_addr); } } - fn get(&self) -> BTreeMap { + pub fn get(&self) -> BTreeMap { self.key_servers.clone() } + + fn read_from_registry(&mut self, client: &Client, new_contract_address: Option
) { +println!("=== Installing contract from address: {:?}", new_contract_address); + self.key_servers = new_contract_address.map(|contract_addr| { + trace!(target: "secretstore", "Configuring for key server set contract from {}", contract_addr); + + KeyServerSetContract::new(contract_addr) + }) + .map(|contract| { + let mut key_servers = BTreeMap::new(); + let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d)); + let key_servers_list = contract.get_key_servers(do_call).wait() + .map_err(|err| { trace!(target: "secretstore", "Error {} reading list of key servers from contract", err); err }) + .unwrap_or_default(); + for key_server in key_servers_list { + let key_server_public = contract.get_key_server_public( + |a, d| future::done(client.call_contract(BlockId::Latest, a, d)), key_server).wait() + .and_then(|p| if p.len() == 64 { Ok(Public::from_slice(&p)) } else { Err(format!("Invalid public length {}", p.len())) }); + let key_server_ip = contract.get_key_server_address( + |a, d| future::done(client.call_contract(BlockId::Latest, a, d)), key_server).wait() + .and_then(|a| a.parse().map_err(|e| format!("Invalid ip address: {}", e))); + if let (Ok(key_server_public), Ok(key_server_ip)) = (key_server_public, key_server_ip) { +println!("=== PARSED {:?} {:?}", key_server_public, key_server_ip); + key_servers.insert(key_server_public, key_server_ip); + } +else { println!("=== ERROR parsing"); } + } + key_servers + }) + .unwrap_or_default(); + self.contract_addr = new_contract_address; + } } From 80b9e931f55f06034fc597a49174d13683f6288e Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 20 Jul 2017 12:19:29 +0300 Subject: [PATCH 006/112] fixed connection establishing --- .../src/key_server_cluster/cluster.rs | 44 ++++++++++++++++--- .../src/key_server_cluster/io/handshake.rs | 13 +++--- .../net/accept_connection.rs | 7 ++- secret_store/src/key_server_set.rs | 37 +++++++++------- 4 files changed, 67 insertions(+), 34 deletions(-) diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 87c602eae..ff9ff9b22 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -289,8 +289,7 @@ impl ClusterCore { /// Accept connection future. fn accept_connection_future(handle: &Handle, data: Arc, stream: TcpStream, node_address: SocketAddr) -> BoxedEmptyFuture { - let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect(); - net_accept_connection(node_address, stream, handle, data.self_key_pair.clone(), disconnected_nodes) + net_accept_connection(node_address, stream, handle, data.self_key_pair.clone()) .then(move |result| ClusterCore::process_connection_result(data, true, result)) .then(|_| finished(())) .boxed() @@ -381,14 +380,16 @@ impl ClusterCore { finished(Ok(())).boxed() } }, - Ok(DeadlineStatus::Meet(Err(_))) => { + Ok(DeadlineStatus::Meet(Err(err))) => { + warn!(target: "secretstore_net", "{}: protocol error {} when establishind connection", data.self_key_pair.public(), err); finished(Ok(())).boxed() }, Ok(DeadlineStatus::Timeout) => { + warn!(target: "secretstore_net", "{}: timeout when establishind connection", data.self_key_pair.public()); finished(Ok(())).boxed() }, - Err(_) => { - // network error + Err(err) => { + warn!(target: "secretstore_net", "{}: network error {} when establishind connection", data.self_key_pair.public(), err); finished(Ok(())).boxed() }, } @@ -699,6 +700,12 @@ impl ClusterConnections { pub fn insert(&self, connection: Arc) -> bool { let mut data = self.data.write(); + if !data.nodes.contains_key(connection.node_id()) { + // incoming connections are checked here + trace!(target: "secretstore_net", "{}: ignoring unknown connection from {} at {}", self.self_node_id, connection.node_id(), connection.node_address()); + debug_assert!(connection.is_inbound()); + return false; + } if data.connections.contains_key(connection.node_id()) { // we have already connected to the same node // the agreement is that node with lower id must establish connection to node with higher id @@ -746,14 +753,37 @@ impl ClusterConnections { let mut new_nodes = self.key_server_set.get(); new_nodes.remove(&self.self_node_id); + let mut num_added_nodes = 0; + let mut num_removed_nodes = 0; + let mut num_changed_nodes = 0; + for obsolete_node in data.nodes.keys().cloned().collect::>() { if !new_nodes.contains_key(&obsolete_node) { + if let Entry::Occupied(entry) = data.connections.entry(obsolete_node) { + trace!(target: "secretstore_net", "{}: removing connection to {} at {}", self.self_node_id, entry.get().node_id(), entry.get().node_address()); + entry.remove(); + } + data.nodes.remove(&obsolete_node); - data.connections.remove(&obsolete_node); + num_removed_nodes += 1; } } + for (new_node_public, new_node_addr) in new_nodes { - data.nodes.insert(new_node_public, new_node_addr); + match data.nodes.insert(new_node_public, new_node_addr) { + None => num_added_nodes += 1, + Some(old_node_addr) => if new_node_addr != old_node_addr { + if let Entry::Occupied(entry) = data.connections.entry(new_node_public) { + trace!(target: "secretstore_net", "{}: removing connection to {} at {}", self.self_node_id, entry.get().node_id(), entry.get().node_address()); + entry.remove(); + } + num_changed_nodes += 1; + }, + } + } + + if num_added_nodes != 0 || num_removed_nodes != 0 || num_changed_nodes != 0 { + trace!(target: "secretstore_net", "{}: updated nodes set: removed {}, added {}, changed {}", self.self_node_id, num_removed_nodes, num_added_nodes, num_changed_nodes); } } } diff --git a/secret_store/src/key_server_cluster/io/handshake.rs b/secret_store/src/key_server_cluster/io/handshake.rs index 38d8a6ac1..90f4d04cc 100644 --- a/secret_store/src/key_server_cluster/io/handshake.rs +++ b/secret_store/src/key_server_cluster/io/handshake.rs @@ -45,7 +45,7 @@ pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Resul state: state, self_key_pair: self_key_pair, self_confirmation_plain: self_confirmation_plain.unwrap_or(Default::default()), - trusted_nodes: trusted_nodes, + trusted_nodes: Some(trusted_nodes), other_node_id: None, other_confirmation_plain: None, shared_key: None, @@ -53,7 +53,7 @@ pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Resul } /// Wait for handshake procedure to be started by another node from the cluster. -pub fn accept_handshake(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { +pub fn accept_handshake(a: A, self_key_pair: KeyPair) -> Handshake where A: AsyncWrite + AsyncRead { let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); let (error, state) = match self_confirmation_plain.clone() { Ok(_) => (None, HandshakeState::ReceivePublicKey(read_message(a))), @@ -66,7 +66,7 @@ pub fn accept_handshake(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet state: state, self_key_pair: self_key_pair, self_confirmation_plain: self_confirmation_plain.unwrap_or(Default::default()), - trusted_nodes: trusted_nodes, + trusted_nodes: None, other_node_id: None, other_confirmation_plain: None, shared_key: None, @@ -89,7 +89,7 @@ pub struct Handshake { state: HandshakeState, self_key_pair: KeyPair, self_confirmation_plain: H256, - trusted_nodes: BTreeSet, + trusted_nodes: Option>, other_node_id: Option, other_confirmation_plain: Option, shared_key: Option, @@ -172,7 +172,8 @@ impl Future for Handshake where A: AsyncRead + AsyncWrite { Err(err) => return Ok((stream, Err(err.into())).into()), }; - if !self.trusted_nodes.contains(&*message.node_id) { + if !self.trusted_nodes.as_ref().map(|tn| tn.contains(&*message.node_id)).unwrap_or(true) { +println!("=== HANDSHAKE - INVALID NODE: self.trusted_nodes = {:?}, message.node_id = {:?}", self.trusted_nodes, message.node_id); return Ok((stream, Err(Error::InvalidNodeId)).into()); } @@ -300,7 +301,7 @@ mod tests { let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect(); let shared_key = compute_shared_key(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap(); - let mut handshake = accept_handshake(io, self_key_pair, trusted_nodes); + let mut handshake = accept_handshake(io, self_key_pair); handshake.set_self_confirmation_plain(self_confirmation_plain); let handshake_result = handshake.wait().unwrap(); diff --git a/secret_store/src/key_server_cluster/net/accept_connection.rs b/secret_store/src/key_server_cluster/net/accept_connection.rs index 0daa8b2da..339625f3f 100644 --- a/secret_store/src/key_server_cluster/net/accept_connection.rs +++ b/secret_store/src/key_server_cluster/net/accept_connection.rs @@ -17,19 +17,18 @@ use std::io; use std::net::SocketAddr; use std::time::Duration; -use std::collections::BTreeSet; use futures::{Future, Poll}; use tokio_core::reactor::Handle; use tokio_core::net::TcpStream; use ethkey::KeyPair; -use key_server_cluster::{Error, NodeId}; +use key_server_cluster::Error; use key_server_cluster::io::{accept_handshake, Handshake, Deadline, deadline}; use key_server_cluster::net::Connection; /// Create future for accepting incoming connection. -pub fn accept_connection(address: SocketAddr, stream: TcpStream, handle: &Handle, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Deadline { +pub fn accept_connection(address: SocketAddr, stream: TcpStream, handle: &Handle, self_key_pair: KeyPair) -> Deadline { let accept = AcceptConnection { - handshake: accept_handshake(stream, self_key_pair, trusted_nodes), + handshake: accept_handshake(stream, self_key_pair), address: address, }; diff --git a/secret_store/src/key_server_set.rs b/secret_store/src/key_server_set.rs index f9ec120fd..7b0bd5c9f 100644 --- a/secret_store/src/key_server_set.rs +++ b/secret_store/src/key_server_set.rs @@ -28,8 +28,8 @@ use types::all::Public; const KEY_SERVER_SET_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_server_set"; // TODO: ethabi should be able to generate this. -const ADDED_EVENT_NAME: &'static [u8] = &*b"KeyServerAdded()"; -const REMOVED_EVENT_NAME: &'static [u8] = &*b"KeyServerRemoved()"; +const ADDED_EVENT_NAME: &'static [u8] = &*b"KeyServerAdded(address)"; +const REMOVED_EVENT_NAME: &'static [u8] = &*b"KeyServerRemoved(address)"; lazy_static! { static ref ADDED_EVENT_NAME_HASH: H256 = ADDED_EVENT_NAME.sha3(); @@ -83,7 +83,6 @@ impl KeyServerSet for OnChainKeyServerSet { impl ChainNotify for OnChainKeyServerSet { fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { -println!("=== new_blocks: imported {}, invalid: {}, enactd: {}, retracted: {}, sealed: {}, proposed: {}", _imported.len(), _invalid.len(), enacted.len(), retracted.len(), _sealed.len(), _proposed.len()); self.contract.lock().update(enacted, retracted) } } @@ -97,26 +96,33 @@ impl CachedContract { } } - pub fn update(&mut self, enacted: Vec, _retracted: Vec) { + pub fn update(&mut self, enacted: Vec, retracted: Vec) { if let Some(client) = self.client.upgrade() { let new_contract_addr = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned()); -println!("=== Registry address = {:?}", new_contract_addr); + // new contract installed if self.contract_addr.as_ref() != new_contract_addr.as_ref() { self.read_from_registry(&*client, new_contract_addr); return; } - // check for events - for enacted_hash in enacted { - let filter = Filter { - from_block: BlockId::Hash(enacted_hash.clone()), - to_block: BlockId::Hash(enacted_hash.clone()), + // check for contract events + let is_set_changed = self.contract_addr.is_some() && enacted.iter() + .chain(retracted.iter()) + .any(|block_hash| !client.logs(Filter { + from_block: BlockId::Hash(block_hash.clone()), + to_block: BlockId::Hash(block_hash.clone()), address: self.contract_addr.clone().map(|a| vec![a]), - topics: vec![Some(vec![*ADDED_EVENT_NAME_HASH]), Some(vec![*REMOVED_EVENT_NAME_HASH])], - limit: None, - }; - println!("=== Number of filtered log entries: {}", client.logs(filter).len()); + topics: vec![ + Some(vec![*ADDED_EVENT_NAME_HASH, *REMOVED_EVENT_NAME_HASH]), + None, + None, + None, + ], + limit: Some(1), + }).is_empty()); + if is_set_changed { + self.read_from_registry(&*client, new_contract_addr); } } } @@ -126,7 +132,6 @@ println!("=== Registry address = {:?}", new_contract_addr); } fn read_from_registry(&mut self, client: &Client, new_contract_address: Option
) { -println!("=== Installing contract from address: {:?}", new_contract_address); self.key_servers = new_contract_address.map(|contract_addr| { trace!(target: "secretstore", "Configuring for key server set contract from {}", contract_addr); @@ -146,10 +151,8 @@ println!("=== Installing contract from address: {:?}", new_contract_address); |a, d| future::done(client.call_contract(BlockId::Latest, a, d)), key_server).wait() .and_then(|a| a.parse().map_err(|e| format!("Invalid ip address: {}", e))); if let (Ok(key_server_public), Ok(key_server_ip)) = (key_server_public, key_server_ip) { -println!("=== PARSED {:?} {:?}", key_server_public, key_server_ip); key_servers.insert(key_server_public, key_server_ip); } -else { println!("=== ERROR parsing"); } } key_servers }) From 9a9c4f6ad6ee6e6beda9820ade823191987b98f9 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 20 Jul 2017 12:25:41 +0300 Subject: [PATCH 007/112] removed println --- secret_store/src/key_server_cluster/io/handshake.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/secret_store/src/key_server_cluster/io/handshake.rs b/secret_store/src/key_server_cluster/io/handshake.rs index 90f4d04cc..df8f6cbf7 100644 --- a/secret_store/src/key_server_cluster/io/handshake.rs +++ b/secret_store/src/key_server_cluster/io/handshake.rs @@ -173,7 +173,6 @@ impl Future for Handshake where A: AsyncRead + AsyncWrite { }; if !self.trusted_nodes.as_ref().map(|tn| tn.contains(&*message.node_id)).unwrap_or(true) { -println!("=== HANDSHAKE - INVALID NODE: self.trusted_nodes = {:?}, message.node_id = {:?}", self.trusted_nodes, message.node_id); return Ok((stream, Err(Error::InvalidNodeId)).into()); } From a35db9f45476d44d77870f5cd1e52764a7676d07 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 20 Jul 2017 12:55:52 +0300 Subject: [PATCH 008/112] improved KeyServerSet tracing --- secret_store/src/key_server_set.rs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/secret_store/src/key_server_set.rs b/secret_store/src/key_server_set.rs index 7b0bd5c9f..6b58e54de 100644 --- a/secret_store/src/key_server_set.rs +++ b/secret_store/src/key_server_set.rs @@ -27,8 +27,9 @@ use types::all::Public; const KEY_SERVER_SET_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_server_set"; -// TODO: ethabi should be able to generate this. +/// Key server has been added to the set. const ADDED_EVENT_NAME: &'static [u8] = &*b"KeyServerAdded(address)"; +/// Key server has been removed from the set. const REMOVED_EVENT_NAME: &'static [u8] = &*b"KeyServerRemoved(address)"; lazy_static! { @@ -63,6 +64,7 @@ impl OnChainKeyServerSet { let mut cached_contract = CachedContract::new(client, key_servers); let key_server_contract_address = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned()); // only initialize from contract if it is installed. otherwise - use default nodes + // once the contract is installed, all default nodes are lost (if not in the contract' set) if key_server_contract_address.is_some() { cached_contract.read_from_registry(&*client, key_server_contract_address); } @@ -100,7 +102,7 @@ impl CachedContract { if let Some(client) = self.client.upgrade() { let new_contract_addr = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned()); - // new contract installed + // new contract installed => read nodes set from the contract if self.contract_addr.as_ref() != new_contract_addr.as_ref() { self.read_from_registry(&*client, new_contract_addr); return; @@ -121,6 +123,7 @@ impl CachedContract { ], limit: Some(1), }).is_empty()); + // to simplify processing - just re-read the whole nodes set from the contract if is_set_changed { self.read_from_registry(&*client, new_contract_addr); } @@ -150,8 +153,12 @@ impl CachedContract { let key_server_ip = contract.get_key_server_address( |a, d| future::done(client.call_contract(BlockId::Latest, a, d)), key_server).wait() .and_then(|a| a.parse().map_err(|e| format!("Invalid ip address: {}", e))); - if let (Ok(key_server_public), Ok(key_server_ip)) = (key_server_public, key_server_ip) { - key_servers.insert(key_server_public, key_server_ip); + + // only add successfully parsed nodes + match (key_server_public, key_server_ip) { + (Ok(key_server_public), Ok(key_server_ip)) => { key_servers.insert(key_server_public, key_server_ip); }, + (Err(public_err), _) => warn!(target: "secretstore_net", "received invalid public from key server set contract: {}", public_err), + (_, Err(ip_err)) => warn!(target: "secretstore_net", "received invalid IP from key server set contract: {}", ip_err), } } key_servers From b9ad093d06fc11f1dba8d30d5986a4aa8dd93cbe Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 20 Jul 2017 13:15:16 +0300 Subject: [PATCH 009/112] moved parsing to KeyServerSet --- secret_store/src/key_server_set.rs | 22 ++++++++++++++-------- secret_store/src/lib.rs | 4 +--- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/secret_store/src/key_server_set.rs b/secret_store/src/key_server_set.rs index 6b58e54de..302f76196 100644 --- a/secret_store/src/key_server_set.rs +++ b/secret_store/src/key_server_set.rs @@ -23,7 +23,7 @@ use ethcore::filter::Filter; use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; use native_contracts::KeyServerSet as KeyServerSetContract; use util::{H256, Address, Bytes, Hashable}; -use types::all::Public; +use types::all::{Error, Public, NodeAddress}; const KEY_SERVER_SET_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_server_set"; @@ -60,8 +60,8 @@ struct CachedContract { } impl OnChainKeyServerSet { - pub fn new(client: &Arc, key_servers: BTreeMap) -> Arc { - let mut cached_contract = CachedContract::new(client, key_servers); + pub fn new(client: &Arc, key_servers: BTreeMap) -> Result, Error> { + let mut cached_contract = CachedContract::new(client, key_servers)?; let key_server_contract_address = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned()); // only initialize from contract if it is installed. otherwise - use default nodes // once the contract is installed, all default nodes are lost (if not in the contract' set) @@ -73,7 +73,7 @@ impl OnChainKeyServerSet { contract: Mutex::new(cached_contract), }); client.add_notify(key_server_set.clone()); - key_server_set + Ok(key_server_set) } } @@ -90,12 +90,18 @@ impl ChainNotify for OnChainKeyServerSet { } impl CachedContract { - pub fn new(client: &Arc, key_servers: BTreeMap) -> Self { - CachedContract { + pub fn new(client: &Arc, key_servers: BTreeMap) -> Result { + Ok(CachedContract { client: Arc::downgrade(client), contract_addr: None, - key_servers: key_servers, - } + key_servers: key_servers.into_iter() + .map(|(p, addr)| { + let addr = format!("{}:{}", addr.address, addr.port).parse() + .map_err(|err| Error::Internal(format!("error parsing node address: {}", err)))?; + Ok((p, addr)) + }) + .collect::, Error>>()?, + }) } pub fn update(&mut self, enacted: Vec, retracted: Vec) { diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 235d0edd9..9750f7223 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -72,9 +72,7 @@ pub fn start(client: Arc, config: ServiceConfiguration) -> Result Date: Thu, 20 Jul 2017 13:28:31 +0300 Subject: [PATCH 010/112] re-read only when blockchain is changed --- secret_store/src/acl_storage.rs | 6 ++++-- secret_store/src/key_server_set.rs | 4 +++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/secret_store/src/acl_storage.rs b/secret_store/src/acl_storage.rs index 7ae72cbfc..37d5bcd25 100644 --- a/secret_store/src/acl_storage.rs +++ b/secret_store/src/acl_storage.rs @@ -64,8 +64,10 @@ impl AclStorage for OnChainAclStorage { } impl ChainNotify for OnChainAclStorage { - fn new_blocks(&self, _imported: Vec, _invalid: Vec, _enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { - self.contract.lock().update() + fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { + if !enacted.is_empty() || !retracted.is_empty() { + self.contract.lock().update() + } } } diff --git a/secret_store/src/key_server_set.rs b/secret_store/src/key_server_set.rs index 302f76196..47f033db1 100644 --- a/secret_store/src/key_server_set.rs +++ b/secret_store/src/key_server_set.rs @@ -85,7 +85,9 @@ impl KeyServerSet for OnChainKeyServerSet { impl ChainNotify for OnChainKeyServerSet { fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { - self.contract.lock().update(enacted, retracted) + if !enacted.is_empty() || !retracted.is_empty() { + self.contract.lock().update(enacted, retracted) + } } } From 023e5b4b9086c85405746dc38b95fcd8c9786746 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 21 Jul 2017 10:49:10 +0300 Subject: [PATCH 011/112] do not try to connect if not a part of cluster --- secret_store/src/key_server_cluster/cluster.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index ff9ff9b22..831867029 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -751,7 +751,11 @@ impl ClusterConnections { pub fn update_nodes_set(&self) { let mut data = self.data.write(); let mut new_nodes = self.key_server_set.get(); - new_nodes.remove(&self.self_node_id); + // we do not need to connect to self + // + we do not need to try to connect to any other node if we are not the part of a cluster + if new_nodes.remove(&self.self_node_id).is_none() { + new_nodes.clear(); + } let mut num_added_nodes = 0; let mut num_removed_nodes = 0; From 5fb9652af56410a805da24e4f2177578f062be53 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 21 Jul 2017 11:25:26 +0300 Subject: [PATCH 012/112] improved logging --- secret_store/src/key_server_cluster/cluster.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 831867029..649ad4bc9 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -715,7 +715,8 @@ impl ClusterConnections { } } - trace!(target: "secretstore_net", "{}: inserting connection to {} at {}", self.self_node_id, connection.node_id(), connection.node_address()); + trace!(target: "secretstore_net", "{}: inserting connection to {} at {}. Connected to {} of {} nodes", + self.self_node_id, connection.node_id(), connection.node_address(), data.connections.len() + 1, data.nodes.len()); data.connections.insert(connection.node_id().clone(), connection); true } @@ -787,7 +788,8 @@ impl ClusterConnections { } if num_added_nodes != 0 || num_removed_nodes != 0 || num_changed_nodes != 0 { - trace!(target: "secretstore_net", "{}: updated nodes set: removed {}, added {}, changed {}", self.self_node_id, num_removed_nodes, num_added_nodes, num_changed_nodes); + trace!(target: "secretstore_net", "{}: updated nodes set: removed {}, added {}, changed {}. Connected to {} of {} nodes", + self.self_node_id, num_removed_nodes, num_added_nodes, num_changed_nodes, data.connections.len(), data.nodes.len()); } } } From b31b067743a62068425a9e87eee6edd9864c83f5 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 24 Jul 2017 12:36:31 +0300 Subject: [PATCH 013/112] fixed tests --- secret_store/src/key_server.rs | 8 +++++- .../src/key_server_cluster/cluster.rs | 10 +++---- secret_store/src/key_server_cluster/mod.rs | 2 ++ secret_store/src/key_server_set.rs | 27 +++++++++++++++++++ 4 files changed, 41 insertions(+), 6 deletions(-) diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index 969782ca2..c83e460f3 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -192,10 +192,13 @@ impl Drop for KeyServerCore { pub mod tests { use std::time; use std::sync::Arc; + use std::net::SocketAddr; + use std::collections::BTreeMap; use ethcrypto; use ethkey::{self, Secret, Random, Generator}; use acl_storage::tests::DummyAclStorage; use key_storage::tests::DummyKeyStorage; + use key_server_set::tests::MapKeyServerSet; use key_server_cluster::math; use util::H256; use types::all::{Error, Public, ClusterConfiguration, NodeAddress, RequestSignature, ServerKeyId, @@ -253,8 +256,11 @@ pub mod tests { })).collect(), allow_connecting_to_higher_nodes: false, }).collect(); + let key_servers_set: BTreeMap = configs[0].nodes.iter() + .map(|(k, a)| (k.clone(), format!("{}:{}", a.address, a.port).parse().unwrap())) + .collect(); let key_servers: Vec<_> = configs.into_iter().map(|cfg| - KeyServerImpl::new(&cfg, Arc::new(DummyAclStorage::default()), Arc::new(DummyKeyStorage::default())).unwrap() + KeyServerImpl::new(&cfg, Arc::new(MapKeyServerSet::new(key_servers_set.clone())), Arc::new(DummyAclStorage::default()), Arc::new(DummyKeyStorage::default())).unwrap() ).collect(); // wait until connections are established. It is fast => do not bother with events here diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 649ad4bc9..d77a82431 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -989,7 +989,7 @@ pub mod tests { use parking_lot::Mutex; use tokio_core::reactor::Core; use ethkey::{Random, Generator, Public}; - use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage}; + use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet}; use key_server_cluster::message::Message; use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration}; use key_server_cluster::generation_session::{Session as GenerationSession, SessionState as GenerationSessionState}; @@ -1059,7 +1059,7 @@ pub mod tests { } pub fn all_connections_established(cluster: &Arc) -> bool { - cluster.config().nodes.keys() + cluster.config().key_server_set.get().keys() .filter(|p| *p != cluster.config().self_key_pair.public()) .all(|p| cluster.connection(p).is_some()) } @@ -1070,9 +1070,9 @@ pub mod tests { threads: 1, self_key_pair: key_pairs[i].clone(), listen_address: ("127.0.0.1".to_owned(), ports_begin + i as u16), - nodes: key_pairs.iter().enumerate() - .map(|(j, kp)| (kp.public().clone(), ("127.0.0.1".into(), ports_begin + j as u16))) - .collect(), + key_server_set: Arc::new(MapKeyServerSet::new(key_pairs.iter().enumerate() + .map(|(j, kp)| (kp.public().clone(), format!("127.0.0.1:{}", ports_begin + j as u16).parse().unwrap())) + .collect())), allow_connecting_to_higher_nodes: false, key_storage: Arc::new(DummyKeyStorage::default()), acl_storage: Arc::new(DummyAclStorage::default()), diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index 11c32d528..8f6ae4add 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -34,6 +34,8 @@ pub use self::decryption_session::Session as DecryptionSession; pub use super::key_storage::tests::DummyKeyStorage; #[cfg(test)] pub use super::acl_storage::tests::DummyAclStorage; +#[cfg(test)] +pub use super::key_server_set::tests::MapKeyServerSet; pub type SessionId = ServerKeyId; diff --git a/secret_store/src/key_server_set.rs b/secret_store/src/key_server_set.rs index 47f033db1..e17dceed5 100644 --- a/secret_store/src/key_server_set.rs +++ b/secret_store/src/key_server_set.rs @@ -175,3 +175,30 @@ impl CachedContract { self.contract_addr = new_contract_address; } } + +#[cfg(test)] +pub mod tests { + use std::collections::BTreeMap; + use std::net::SocketAddr; + use ethkey::Public; + use super::KeyServerSet; + + #[derive(Default)] + pub struct MapKeyServerSet { + nodes: BTreeMap, + } + + impl MapKeyServerSet { + pub fn new(nodes: BTreeMap) -> Self { + MapKeyServerSet { + nodes: nodes, + } + } + } + + impl KeyServerSet for MapKeyServerSet { + fn get(&self) -> BTreeMap { + self.nodes.clone() + } + } +} From 45f2b824110e1f7378217e8d6b73cffd261b6755 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 25 Jul 2017 09:24:54 +0300 Subject: [PATCH 014/112] NodeKeyPAir trait --- secret_store/src/key_server.rs | 20 +++--- .../src/key_server_cluster/cluster.rs | 10 +-- .../src/key_server_cluster/io/handshake.rs | 48 +++++++------ .../src/key_server_cluster/io/message.rs | 15 ++--- secret_store/src/key_server_cluster/io/mod.rs | 2 +- secret_store/src/key_server_cluster/mod.rs | 3 + .../net/accept_connection.rs | 6 +- .../src/key_server_cluster/net/connect.rs | 8 +-- secret_store/src/key_storage.rs | 2 +- secret_store/src/lib.rs | 8 ++- secret_store/src/node_key_pair.rs | 67 +++++++++++++++++++ secret_store/src/traits.rs | 12 ++++ secret_store/src/types/all.rs | 2 - 13 files changed, 146 insertions(+), 57 deletions(-) create mode 100644 secret_store/src/node_key_pair.rs diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index c83e460f3..0944dd37c 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -26,7 +26,7 @@ use super::acl_storage::AclStorage; use super::key_storage::KeyStorage; use super::key_server_set::KeyServerSet; use key_server_cluster::{math, ClusterCore}; -use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer}; +use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer, NodeKeyPair}; use types::all::{Error, Public, RequestSignature, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow, ClusterConfiguration, MessageHash, EncryptedMessageSignature}; use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration}; @@ -45,9 +45,9 @@ pub struct KeyServerCore { impl KeyServerImpl { /// Create new key server instance - pub fn new(config: &ClusterConfiguration, key_server_set: Arc, acl_storage: Arc, key_storage: Arc) -> Result { + pub fn new(config: &ClusterConfiguration, key_server_set: Arc, self_key_pair: Arc, acl_storage: Arc, key_storage: Arc) -> Result { Ok(KeyServerImpl { - data: Arc::new(Mutex::new(KeyServerCore::new(config, key_server_set, acl_storage, key_storage)?)), + data: Arc::new(Mutex::new(KeyServerCore::new(config, key_server_set, self_key_pair, acl_storage, key_storage)?)), }) } @@ -144,10 +144,10 @@ impl MessageSigner for KeyServerImpl { } impl KeyServerCore { - pub fn new(config: &ClusterConfiguration, key_server_set: Arc, acl_storage: Arc, key_storage: Arc) -> Result { + pub fn new(config: &ClusterConfiguration, key_server_set: Arc, self_key_pair: Arc, acl_storage: Arc, key_storage: Arc) -> Result { let config = NetClusterConfiguration { threads: config.threads, - self_key_pair: ethkey::KeyPair::from_secret_slice(&config.self_private)?, + self_key_pair: self_key_pair, listen_address: (config.listener_address.address.clone(), config.listener_address.port), key_server_set: key_server_set, allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes, @@ -198,6 +198,7 @@ pub mod tests { use ethkey::{self, Secret, Random, Generator}; use acl_storage::tests::DummyAclStorage; use key_storage::tests::DummyKeyStorage; + use node_key_pair::PlainNodeKeyPair; use key_server_set::tests::MapKeyServerSet; use key_server_cluster::math; use util::H256; @@ -244,7 +245,7 @@ pub mod tests { let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); let configs: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration { threads: 1, - self_private: (***key_pairs[i].secret()).into(), +// self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())), listener_address: NodeAddress { address: "127.0.0.1".into(), port: start_port + (i as u16), @@ -259,8 +260,11 @@ pub mod tests { let key_servers_set: BTreeMap = configs[0].nodes.iter() .map(|(k, a)| (k.clone(), format!("{}:{}", a.address, a.port).parse().unwrap())) .collect(); - let key_servers: Vec<_> = configs.into_iter().map(|cfg| - KeyServerImpl::new(&cfg, Arc::new(MapKeyServerSet::new(key_servers_set.clone())), Arc::new(DummyAclStorage::default()), Arc::new(DummyKeyStorage::default())).unwrap() + let key_servers: Vec<_> = configs.into_iter().enumerate().map(|(i, cfg)| + KeyServerImpl::new(&cfg, Arc::new(MapKeyServerSet::new(key_servers_set.clone())), + Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())), + Arc::new(DummyAclStorage::default()), + Arc::new(DummyKeyStorage::default())).unwrap() ).collect(); // wait until connections are established. It is fast => do not bother with events here diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index d77a82431..155dd4a01 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -28,7 +28,7 @@ use tokio_core::reactor::{Handle, Remote, Interval}; use tokio_core::net::{TcpListener, TcpStream}; use ethkey::{Public, KeyPair, Signature, Random, Generator}; use util::H256; -use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, KeyServerSet}; +use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair}; use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, GenerationSessionWrapper, EncryptionSessionWrapper, DecryptionSessionWrapper, SigningSessionWrapper}; use key_server_cluster::message::{self, Message, ClusterMessage, GenerationMessage, EncryptionMessage, DecryptionMessage, @@ -99,7 +99,7 @@ pub struct ClusterConfiguration { /// Allow connecting to 'higher' nodes. pub allow_connecting_to_higher_nodes: bool, /// KeyPair this node holds. - pub self_key_pair: KeyPair, + pub self_key_pair: Arc, /// Interface to listen to. pub listen_address: (String, u16), /// Cluster nodes set. @@ -146,7 +146,7 @@ pub struct ClusterData { /// Handle to the cpu thread pool. pool: CpuPool, /// KeyPair this node holds. - self_key_pair: KeyPair, + self_key_pair: Arc, /// Connections data. connections: ClusterConnections, /// Active sessions data. @@ -989,7 +989,7 @@ pub mod tests { use parking_lot::Mutex; use tokio_core::reactor::Core; use ethkey::{Random, Generator, Public}; - use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet}; + use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair}; use key_server_cluster::message::Message; use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration}; use key_server_cluster::generation_session::{Session as GenerationSession, SessionState as GenerationSessionState}; @@ -1068,7 +1068,7 @@ pub mod tests { let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); let cluster_params: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration { threads: 1, - self_key_pair: key_pairs[i].clone(), + self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())), listen_address: ("127.0.0.1".to_owned(), ports_begin + i as u16), key_server_set: Arc::new(MapKeyServerSet::new(key_pairs.iter().enumerate() .map(|(j, kp)| (kp.public().clone(), format!("127.0.0.1:{}", ports_begin + j as u16).parse().unwrap())) diff --git a/secret_store/src/key_server_cluster/io/handshake.rs b/secret_store/src/key_server_cluster/io/handshake.rs index df8f6cbf7..bf52ab798 100644 --- a/secret_store/src/key_server_cluster/io/handshake.rs +++ b/secret_store/src/key_server_cluster/io/handshake.rs @@ -15,24 +15,25 @@ // along with Parity. If not, see . use std::io; +use std::sync::Arc; use std::collections::BTreeSet; use futures::{Future, Poll, Async}; use tokio_io::{AsyncRead, AsyncWrite}; -use ethkey::{Random, Generator, KeyPair, Secret, sign, verify_public}; +use ethkey::{Random, Generator, KeyPair, verify_public}; use util::H256; -use key_server_cluster::{NodeId, Error}; +use key_server_cluster::{NodeId, Error, NodeKeyPair}; use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature}; use key_server_cluster::io::{write_message, write_encrypted_message, WriteMessage, ReadMessage, - read_message, read_encrypted_message, compute_shared_key}; + read_message, read_encrypted_message, fix_shared_key}; /// Start handshake procedure with another node from the cluster. -pub fn handshake(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { +pub fn handshake(a: A, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); handshake_with_plain_confirmation(a, self_confirmation_plain, self_key_pair, trusted_nodes) } /// Start handshake procedure with another node from the cluster and given plain confirmation. -pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Result, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { +pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Result, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { let (error, state) = match self_confirmation_plain.clone() .and_then(|c| Handshake::::make_public_key_message(self_key_pair.public().clone(), c)) { Ok(message) => (None, HandshakeState::SendPublicKey(write_message(a, message))), @@ -53,7 +54,7 @@ pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Resul } /// Wait for handshake procedure to be started by another node from the cluster. -pub fn accept_handshake(a: A, self_key_pair: KeyPair) -> Handshake where A: AsyncWrite + AsyncRead { +pub fn accept_handshake(a: A, self_key_pair: Arc) -> Handshake where A: AsyncWrite + AsyncRead { let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); let (error, state) = match self_confirmation_plain.clone() { Ok(_) => (None, HandshakeState::ReceivePublicKey(read_message(a))), @@ -87,7 +88,7 @@ pub struct Handshake { is_active: bool, error: Option<(A, Result)>, state: HandshakeState, - self_key_pair: KeyPair, + self_key_pair: Arc, self_confirmation_plain: H256, trusted_nodes: Option>, other_node_id: Option, @@ -117,9 +118,9 @@ impl Handshake where A: AsyncRead + AsyncWrite { }))) } - fn make_private_key_signature_message(secret: &Secret, confirmation_plain: &H256) -> Result { + fn make_private_key_signature_message(self_key_pair: &NodeKeyPair, confirmation_plain: &H256) -> Result { Ok(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature { - confirmation_signed: sign(secret, confirmation_plain)?.into(), + confirmation_signed: self_key_pair.sign(confirmation_plain)?.into(), }))) } } @@ -142,15 +143,15 @@ impl Future for Handshake where A: AsyncRead + AsyncWrite { read_message(stream) ), Async::NotReady) } else { - self.shared_key = match compute_shared_key(self.self_key_pair.secret(), + self.shared_key = match self.self_key_pair.compute_shared_key( self.other_node_id.as_ref().expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; other_node_id is filled in ReceivePublicKey; qed") - ) { + ).map_err(Into::into).and_then(|sk| fix_shared_key(sk.secret())) { Ok(shared_key) => Some(shared_key), - Err(err) => return Ok((stream, Err(err)).into()), + Err(err) => return Ok((stream, Err(err.into())).into()), }; let message = match Handshake::::make_private_key_signature_message( - self.self_key_pair.secret(), + &*self.self_key_pair, self.other_confirmation_plain.as_ref().expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; other_confirmation_plain is filled in ReceivePublicKey; qed") ) { Ok(message) => message, @@ -179,15 +180,15 @@ impl Future for Handshake where A: AsyncRead + AsyncWrite { self.other_node_id = Some(message.node_id.into()); self.other_confirmation_plain = Some(message.confirmation_plain.into()); if self.is_active { - self.shared_key = match compute_shared_key(self.self_key_pair.secret(), + self.shared_key = match self.self_key_pair.compute_shared_key( self.other_node_id.as_ref().expect("filled couple of lines above; qed") - ) { + ).map_err(Into::into).and_then(|sk| fix_shared_key(sk.secret())) { Ok(shared_key) => Some(shared_key), - Err(err) => return Ok((stream, Err(err)).into()), + Err(err) => return Ok((stream, Err(err.into())).into()), }; let message = match Handshake::::make_private_key_signature_message( - self.self_key_pair.secret(), + &*self.self_key_pair, self.other_confirmation_plain.as_ref().expect("filled couple of lines above; qed") ) { Ok(message) => message, @@ -248,11 +249,14 @@ impl Future for Handshake where A: AsyncRead + AsyncWrite { #[cfg(test)] mod tests { + use std::sync::Arc; use std::collections::BTreeSet; use futures::Future; use ethkey::{Random, Generator, sign}; + use ethcrypto::ecdh::agree; use util::H256; - use key_server_cluster::io::message::compute_shared_key; + use key_server_cluster::PlainNodeKeyPair; + use key_server_cluster::io::message::fix_shared_key; use key_server_cluster::io::message::tests::TestIo; use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature}; use super::{handshake_with_plain_confirmation, accept_handshake, HandshakeResult}; @@ -283,9 +287,9 @@ mod tests { let (self_confirmation_plain, io) = prepare_test_io(); let self_key_pair = io.self_key_pair().clone(); let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect(); - let shared_key = compute_shared_key(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap(); + let shared_key = fix_shared_key(&agree(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap()).unwrap(); - let handshake = handshake_with_plain_confirmation(io, Ok(self_confirmation_plain), self_key_pair, trusted_nodes); + let handshake = handshake_with_plain_confirmation(io, Ok(self_confirmation_plain), Arc::new(PlainNodeKeyPair::new(self_key_pair)), trusted_nodes); let handshake_result = handshake.wait().unwrap(); assert_eq!(handshake_result.1, Ok(HandshakeResult { node_id: handshake_result.0.peer_public().clone(), @@ -298,9 +302,9 @@ mod tests { let (self_confirmation_plain, io) = prepare_test_io(); let self_key_pair = io.self_key_pair().clone(); let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect(); - let shared_key = compute_shared_key(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap(); + let shared_key = fix_shared_key(&agree(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap()).unwrap(); - let mut handshake = accept_handshake(io, self_key_pair); + let mut handshake = accept_handshake(io, Arc::new(PlainNodeKeyPair::new(self_key_pair))); handshake.set_self_confirmation_plain(self_confirmation_plain); let handshake_result = handshake.wait().unwrap(); diff --git a/secret_store/src/key_server_cluster/io/message.rs b/secret_store/src/key_server_cluster/io/message.rs index 49b71e39d..5a6b50a3e 100644 --- a/secret_store/src/key_server_cluster/io/message.rs +++ b/secret_store/src/key_server_cluster/io/message.rs @@ -19,9 +19,8 @@ use std::u16; use std::ops::Deref; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use serde_json; -use ethcrypto::ecdh::agree; use ethcrypto::ecies::{encrypt_single_message, decrypt_single_message}; -use ethkey::{Public, Secret, KeyPair}; +use ethkey::{Secret, KeyPair}; use ethkey::math::curve_order; use util::{H256, U256}; use key_server_cluster::Error; @@ -154,12 +153,11 @@ pub fn decrypt_message(key: &KeyPair, payload: Vec) -> Result, Error Ok(decrypt_single_message(key.secret(), &payload)?) } -/// Compute shared encryption key. -pub fn compute_shared_key(self_secret: &Secret, other_public: &Public) -> Result { +/// Fix shared encryption key. +pub fn fix_shared_key(shared_secret: &Secret) -> Result { // secret key created in agree function is invalid, as it is not calculated mod EC.field.n // => let's do it manually - let shared_secret = agree(self_secret, other_public)?; - let shared_secret: H256 = (*shared_secret).into(); + let shared_secret: H256 = (**shared_secret).into(); let shared_secret: U256 = shared_secret.into(); let shared_secret: H256 = (shared_secret % curve_order()).into(); let shared_key_pair = KeyPair::from_secret_slice(&*shared_secret)?; @@ -204,8 +202,9 @@ pub mod tests { use futures::Poll; use tokio_io::{AsyncRead, AsyncWrite}; use ethkey::{KeyPair, Public}; + use ethcrypto::ecdh::agree; use key_server_cluster::message::Message; - use super::{MESSAGE_HEADER_SIZE, MessageHeader, compute_shared_key, encrypt_message, serialize_message, + use super::{MESSAGE_HEADER_SIZE, MessageHeader, fix_shared_key, encrypt_message, serialize_message, serialize_header, deserialize_header}; pub struct TestIo { @@ -217,7 +216,7 @@ pub mod tests { impl TestIo { pub fn new(self_key_pair: KeyPair, peer_public: Public) -> Self { - let shared_key_pair = compute_shared_key(self_key_pair.secret(), &peer_public).unwrap(); + let shared_key_pair = fix_shared_key(&agree(self_key_pair.secret(), &peer_public).unwrap()).unwrap(); TestIo { self_key_pair: self_key_pair, peer_public: peer_public, diff --git a/secret_store/src/key_server_cluster/io/mod.rs b/secret_store/src/key_server_cluster/io/mod.rs index 57071038e..dfea33683 100644 --- a/secret_store/src/key_server_cluster/io/mod.rs +++ b/secret_store/src/key_server_cluster/io/mod.rs @@ -26,7 +26,7 @@ mod write_message; pub use self::deadline::{deadline, Deadline, DeadlineStatus}; pub use self::handshake::{handshake, accept_handshake, Handshake, HandshakeResult}; pub use self::message::{MessageHeader, SerializedMessage, serialize_message, deserialize_message, - encrypt_message, compute_shared_key}; + encrypt_message, fix_shared_key}; pub use self::read_header::{read_header, ReadHeader}; pub use self::read_payload::{read_payload, read_encrypted_payload, ReadPayload}; pub use self::read_message::{read_message, read_encrypted_message, ReadMessage}; diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index 8f6ae4add..102c3672f 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -20,6 +20,7 @@ use ethkey; use ethcrypto; use super::types::all::ServerKeyId; +pub use super::traits::NodeKeyPair; pub use super::types::all::{NodeId, EncryptedDocumentKeyShadow}; pub use super::acl_storage::AclStorage; pub use super::key_storage::{KeyStorage, DocumentKeyShare}; @@ -30,6 +31,8 @@ pub use self::generation_session::Session as GenerationSession; pub use self::encryption_session::Session as EncryptionSession; pub use self::decryption_session::Session as DecryptionSession; +#[cfg(test)] +pub use super::node_key_pair::PlainNodeKeyPair; #[cfg(test)] pub use super::key_storage::tests::DummyKeyStorage; #[cfg(test)] diff --git a/secret_store/src/key_server_cluster/net/accept_connection.rs b/secret_store/src/key_server_cluster/net/accept_connection.rs index 339625f3f..d85e492dd 100644 --- a/secret_store/src/key_server_cluster/net/accept_connection.rs +++ b/secret_store/src/key_server_cluster/net/accept_connection.rs @@ -15,18 +15,18 @@ // along with Parity. If not, see . use std::io; +use std::sync::Arc; use std::net::SocketAddr; use std::time::Duration; use futures::{Future, Poll}; use tokio_core::reactor::Handle; use tokio_core::net::TcpStream; -use ethkey::KeyPair; -use key_server_cluster::Error; +use key_server_cluster::{Error, NodeKeyPair}; use key_server_cluster::io::{accept_handshake, Handshake, Deadline, deadline}; use key_server_cluster::net::Connection; /// Create future for accepting incoming connection. -pub fn accept_connection(address: SocketAddr, stream: TcpStream, handle: &Handle, self_key_pair: KeyPair) -> Deadline { +pub fn accept_connection(address: SocketAddr, stream: TcpStream, handle: &Handle, self_key_pair: Arc) -> Deadline { let accept = AcceptConnection { handshake: accept_handshake(stream, self_key_pair), address: address, diff --git a/secret_store/src/key_server_cluster/net/connect.rs b/secret_store/src/key_server_cluster/net/connect.rs index 449168ab2..7515494e4 100644 --- a/secret_store/src/key_server_cluster/net/connect.rs +++ b/secret_store/src/key_server_cluster/net/connect.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::sync::Arc; use std::collections::BTreeSet; use std::io; use std::time::Duration; @@ -21,13 +22,12 @@ use std::net::SocketAddr; use futures::{Future, Poll, Async}; use tokio_core::reactor::Handle; use tokio_core::net::{TcpStream, TcpStreamNew}; -use ethkey::KeyPair; -use key_server_cluster::{Error, NodeId}; +use key_server_cluster::{Error, NodeId, NodeKeyPair}; use key_server_cluster::io::{handshake, Handshake, Deadline, deadline}; use key_server_cluster::net::Connection; /// Create future for connecting to other node. -pub fn connect(address: &SocketAddr, handle: &Handle, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Deadline { +pub fn connect(address: &SocketAddr, handle: &Handle, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Deadline { let connect = Connect { state: ConnectState::TcpConnect(TcpStream::connect(address, handle)), address: address.clone(), @@ -48,7 +48,7 @@ enum ConnectState { pub struct Connect { state: ConnectState, address: SocketAddr, - self_key_pair: KeyPair, + self_key_pair: Arc, trusted_nodes: BTreeSet, } diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index d5af7a5fa..18c61c1bf 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -241,7 +241,7 @@ pub mod tests { data_path: path.as_str().to_owned(), cluster_config: ClusterConfiguration { threads: 1, - self_private: (**Random.generate().unwrap().secret().clone()).into(), + //self_private: (**Random.generate().unwrap().secret().clone()).into(), listener_address: NodeAddress { address: "0.0.0.0".to_owned(), port: 8083, diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 9750f7223..7e9897e60 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -59,22 +59,24 @@ mod key_server; mod key_storage; mod serialization; mod key_server_set; +mod node_key_pair; use std::sync::Arc; use ethcore::client::Client; pub use types::all::{ServerKeyId, EncryptedDocumentKey, RequestSignature, Public, Error, NodeAddress, ServiceConfiguration, ClusterConfiguration}; -pub use traits::{KeyServer}; +pub use traits::{NodeKeyPair, KeyServer}; +pub use self::node_key_pair::{PlainNodeKeyPair, KeyStoreNodeKeyPair}; /// Start new key server instance -pub fn start(client: Arc, config: ServiceConfiguration) -> Result, Error> { +pub fn start(client: Arc, self_key_pair: Arc, config: ServiceConfiguration) -> Result, Error> { use std::sync::Arc; let acl_storage = acl_storage::OnChainAclStorage::new(&client); let key_server_set = key_server_set::OnChainKeyServerSet::new(&client, config.cluster_config.nodes.clone())?; let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?); - let key_server = key_server::KeyServerImpl::new(&config.cluster_config, key_server_set, acl_storage, key_storage)?; + let key_server = key_server::KeyServerImpl::new(&config.cluster_config, key_server_set, self_key_pair, acl_storage, key_storage)?; let listener = http_listener::KeyServerHttpListener::start(&config.listener_address, key_server)?; Ok(Box::new(listener)) } diff --git a/secret_store/src/node_key_pair.rs b/secret_store/src/node_key_pair.rs new file mode 100644 index 000000000..8676dd16d --- /dev/null +++ b/secret_store/src/node_key_pair.rs @@ -0,0 +1,67 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use ethcrypto::ecdh::agree; +use ethkey::{KeyPair, Public, Signature, Error as EthKeyError, sign}; +use ethcore::account_provider::AccountProvider; +use util::H256; +use traits::NodeKeyPair; + +pub struct PlainNodeKeyPair { + key_pair: KeyPair, +} + +pub struct KeyStoreNodeKeyPair { + _account_provider: Arc, +} + +impl PlainNodeKeyPair { + pub fn new(key_pair: KeyPair) -> Self { + PlainNodeKeyPair { + key_pair: key_pair, + } + } +} + +impl NodeKeyPair for PlainNodeKeyPair { + fn public(&self) -> &Public { + self.key_pair.public() + } + + fn sign(&self, data: &H256) -> Result { + sign(self.key_pair.secret(), data) + } + + fn compute_shared_key(&self, peer_public: &Public) -> Result { + agree(self.key_pair.secret(), peer_public).map_err(|e| EthKeyError::Custom(e.into())) + .and_then(KeyPair::from_secret) + } +} + +impl NodeKeyPair for KeyStoreNodeKeyPair { + fn public(&self) -> &Public { + unimplemented!() + } + + fn sign(&self, _data: &H256) -> Result { + unimplemented!() + } + + fn compute_shared_key(&self, _peer_public: &Public) -> Result { + unimplemented!() + } +} diff --git a/secret_store/src/traits.rs b/secret_store/src/traits.rs index 33a4eff3c..31da748e0 100644 --- a/secret_store/src/traits.rs +++ b/secret_store/src/traits.rs @@ -14,9 +14,21 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use ethkey::{KeyPair, Signature, Error as EthKeyError}; +use util::H256; use types::all::{Error, Public, ServerKeyId, MessageHash, EncryptedMessageSignature, RequestSignature, EncryptedDocumentKey, EncryptedDocumentKeyShadow}; +/// Node key pair. +pub trait NodeKeyPair: Send + Sync { + /// Public portion of key. + fn public(&self) -> &Public; + /// Sign data with node key. + fn sign(&self, data: &H256) -> Result; + /// Compute shared key to encrypt channel between two nodes. + fn compute_shared_key(&self, peer_public: &Public) -> Result; +} + /// Server key (SK) generator. pub trait ServerKeyGenerator { /// Generate new SK. diff --git a/secret_store/src/types/all.rs b/secret_store/src/types/all.rs index 54fc8acae..8dc92f175 100644 --- a/secret_store/src/types/all.rs +++ b/secret_store/src/types/all.rs @@ -83,8 +83,6 @@ pub struct ServiceConfiguration { pub struct ClusterConfiguration { /// Number of threads reserved by cluster. pub threads: usize, - /// Private key this node holds. - pub self_private: Vec, // holds ethkey::Secret /// This node address. pub listener_address: NodeAddress, /// All cluster nodes addresses. From fb68b0924ab7902f301d697643a10d27f50fb739 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 25 Jul 2017 09:56:23 +0300 Subject: [PATCH 015/112] fixed parity to use new trait --- parity/secretstore.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/parity/secretstore.rs b/parity/secretstore.rs index f215c937c..b29f43479 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -65,6 +65,7 @@ mod server { #[cfg(feature="secretstore")] mod server { + use std::sync::Arc; use ethcore_secretstore; use ethkey::KeyPair; use super::{Configuration, Dependencies}; @@ -86,7 +87,6 @@ mod server { data_path: conf.data_path.clone(), cluster_config: ethcore_secretstore::ClusterConfiguration { threads: 4, - self_private: (**self_secret).into(), listener_address: ethcore_secretstore::NodeAddress { address: conf.interface.clone(), port: conf.port, @@ -103,7 +103,8 @@ mod server { .map_err(|e| format!("valid secret is required when using secretstore. Error: {}", e))?; conf.cluster_config.nodes.insert(self_key_pair.public().clone(), conf.cluster_config.listener_address.clone()); - let key_server = ethcore_secretstore::start(deps.client, conf) + let node_key_pair = Arc::new(ethcore_secretstore::PlainNodeKeyPair::new(self_key_pair)); + let key_server = ethcore_secretstore::start(deps.client, node_key_pair, conf) .map_err(Into::::into)?; Ok(KeyServer { From 9e30d85fdc4c2c76d7fc0bada8302cff9b66a383 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 25 Jul 2017 10:19:48 +0300 Subject: [PATCH 016/112] continue integrating with parity --- parity/configuration.rs | 8 ++++---- parity/secretstore.rs | 29 +++++++++++++++++++---------- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/parity/configuration.rs b/parity/configuration.rs index fe397dff5..e037defa2 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -41,7 +41,7 @@ use ethcore_logger::Config as LogConfig; use dir::{self, Directories, default_hypervisor_path, default_local_path, default_data_path}; use dapps::Configuration as DappsConfiguration; use ipfs::Configuration as IpfsConfiguration; -use secretstore::Configuration as SecretStoreConfiguration; +use secretstore::{Configuration as SecretStoreConfiguration, NodeSecretKey}; use updater::{UpdatePolicy, UpdateFilter, ReleaseTrack}; use run::RunCmd; use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, KillBlockchain, ExportState, DataFormat}; @@ -995,10 +995,10 @@ impl Configuration { self.interface(&self.args.flag_secretstore_http_interface) } - fn secretstore_self_secret(&self) -> Result, String> { + fn secretstore_self_secret(&self) -> Result, String> { match self.args.flag_secretstore_secret { - Some(ref s) => Ok(Some(s.parse() - .map_err(|e| format!("Invalid secret store secret: {}. Error: {:?}", s, e))?)), + Some(ref s) => Ok(Some(NodeSecretKey::Plain(s.parse() + .map_err(|e| format!("Invalid secret store secret: {}. Error: {:?}", s, e))?))), None => Ok(None), } } diff --git a/parity/secretstore.rs b/parity/secretstore.rs index b29f43479..7cdd26377 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -21,13 +21,20 @@ use ethcore::client::Client; use ethkey::{Secret, Public}; use helpers::replace_home; +#[derive(Debug, PartialEq, Clone)] +/// This node secret key. +pub enum NodeSecretKey { + /// Stored as plain text in configuration file. + Plain(Secret), +} + #[derive(Debug, PartialEq, Clone)] /// Secret store configuration pub struct Configuration { /// Is secret store functionality enabled? pub enabled: bool, /// This node secret. - pub self_secret: Option, + pub self_secret: Option, /// Other nodes IDs + addresses. pub nodes: BTreeMap, /// Interface to listen to @@ -66,9 +73,9 @@ mod server { #[cfg(feature="secretstore")] mod server { use std::sync::Arc; - use ethcore_secretstore; + use ethcore_secretstore::{self, NodeKeyPair}; use ethkey::KeyPair; - use super::{Configuration, Dependencies}; + use super::{Configuration, Dependencies, NodeSecretKey}; /// Key server pub struct KeyServer { @@ -77,8 +84,13 @@ mod server { impl KeyServer { /// Create new key server - pub fn new(conf: Configuration, deps: Dependencies) -> Result { - let self_secret = conf.self_secret.ok_or("self secret is required when using secretstore")?; + pub fn new(mut conf: Configuration, deps: Dependencies) -> Result { + let self_secret = match conf.self_secret.take() { + Some(NodeSecretKey::Plain(secret)) => Arc::new(ethcore_secretstore::PlainNodeKeyPair::new( + KeyPair::from_secret(secret).map_err(|e| format!("invalid secret: {}", e))?)), + None => return Err("self secret is required when using secretstore".into()), + }; + let mut conf = ethcore_secretstore::ServiceConfiguration { listener_address: ethcore_secretstore::NodeAddress { address: conf.http_interface.clone(), @@ -99,12 +111,9 @@ mod server { }, }; - let self_key_pair = KeyPair::from_secret(self_secret.clone()) - .map_err(|e| format!("valid secret is required when using secretstore. Error: {}", e))?; - conf.cluster_config.nodes.insert(self_key_pair.public().clone(), conf.cluster_config.listener_address.clone()); + conf.cluster_config.nodes.insert(self_secret.public().clone(), conf.cluster_config.listener_address.clone()); - let node_key_pair = Arc::new(ethcore_secretstore::PlainNodeKeyPair::new(self_key_pair)); - let key_server = ethcore_secretstore::start(deps.client, node_key_pair, conf) + let key_server = ethcore_secretstore::start(deps.client, self_secret, conf) .map_err(Into::::into)?; Ok(KeyServer { From 2e9df2c39dbb6790d6c43befcf5214bcf650eeb7 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 25 Jul 2017 16:30:24 +0300 Subject: [PATCH 017/112] updated parity for NodeKeyPair --- ethcore/src/account_provider/mod.rs | 5 +++++ parity/configuration.rs | 5 ++++- parity/run.rs | 4 +++- parity/secretstore.rs | 32 ++++++++++++++++++++++++++--- secret_store/src/node_key_pair.rs | 26 ++++++++++++++++++----- 5 files changed, 62 insertions(+), 10 deletions(-) diff --git a/ethcore/src/account_provider/mod.rs b/ethcore/src/account_provider/mod.rs index 249ca40af..769db692c 100755 --- a/ethcore/src/account_provider/mod.rs +++ b/ethcore/src/account_provider/mod.rs @@ -519,6 +519,11 @@ impl AccountProvider { } } + /// Returns account public key. + pub fn account_public(&self, address: Address, password: &str) -> Result { + self.sstore.public(&self.sstore.account_ref(&address)?, password) + } + /// Returns each account along with name and meta. pub fn set_account_name(&self, address: Address, name: String) -> Result<(), Error> { self.sstore.set_name(&self.sstore.account_ref(&address)?, name)?; diff --git a/parity/configuration.rs b/parity/configuration.rs index e037defa2..09dbfeedf 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -997,8 +997,11 @@ impl Configuration { fn secretstore_self_secret(&self) -> Result, String> { match self.args.flag_secretstore_secret { - Some(ref s) => Ok(Some(NodeSecretKey::Plain(s.parse() + Some(ref s) if s.len() == 64 => Ok(Some(NodeSecretKey::Plain(s.parse() .map_err(|e| format!("Invalid secret store secret: {}. Error: {:?}", s, e))?))), + Some(ref s) if s.len() == 40 => Ok(Some(NodeSecretKey::KeyStore(s.parse() + .map_err(|e| format!("Invalid secret store secret address: {}. Error: {:?}", s, e))?))), + Some(_) => Err(format!("Invalid secret store secret. Must be either existing account address, or hex-encoded private key")), None => Ok(None), } } diff --git a/parity/run.rs b/parity/run.rs index 30f4c8759..8fc5da405 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -489,7 +489,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R } // Attempt to sign in the engine signer. - if !passwords.into_iter().any(|p| miner.set_engine_signer(engine_signer, p).is_ok()) { + if !passwords.iter().any(|p| miner.set_engine_signer(engine_signer, (*p).clone()).is_ok()) { return Err(format!("No valid password for the consensus signer {}. {}", engine_signer, VERIFY_PASSWORD_HINT)); } } @@ -705,6 +705,8 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // secret store key server let secretstore_deps = secretstore::Dependencies { client: client.clone(), + account_provider: account_provider, + accounts_passwords: &passwords, }; let secretstore_key_server = secretstore::start(cmd.secretstore_conf.clone(), secretstore_deps)?; diff --git a/parity/secretstore.rs b/parity/secretstore.rs index 7cdd26377..def2cd1a6 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -17,15 +17,19 @@ use std::collections::BTreeMap; use std::sync::Arc; use dir::default_data_path; +use ethcore::account_provider::AccountProvider; use ethcore::client::Client; use ethkey::{Secret, Public}; use helpers::replace_home; +use util::Address; #[derive(Debug, PartialEq, Clone)] /// This node secret key. pub enum NodeSecretKey { /// Stored as plain text in configuration file. Plain(Secret), + /// Stored as account in key store. + KeyStore(Address), } #[derive(Debug, PartialEq, Clone)] @@ -50,9 +54,13 @@ pub struct Configuration { } /// Secret store dependencies -pub struct Dependencies { +pub struct Dependencies<'a> { /// Blockchain client. pub client: Arc, + /// Account provider. + pub account_provider: Arc, + /// Passed accounts passwords. + pub accounts_passwords: &'a [String], } #[cfg(not(feature = "secretstore"))] @@ -73,7 +81,7 @@ mod server { #[cfg(feature="secretstore")] mod server { use std::sync::Arc; - use ethcore_secretstore::{self, NodeKeyPair}; + use ethcore_secretstore; use ethkey::KeyPair; use super::{Configuration, Dependencies, NodeSecretKey}; @@ -85,9 +93,27 @@ mod server { impl KeyServer { /// Create new key server pub fn new(mut conf: Configuration, deps: Dependencies) -> Result { - let self_secret = match conf.self_secret.take() { + let self_secret: Arc = match conf.self_secret.take() { Some(NodeSecretKey::Plain(secret)) => Arc::new(ethcore_secretstore::PlainNodeKeyPair::new( KeyPair::from_secret(secret).map_err(|e| format!("invalid secret: {}", e))?)), + Some(NodeSecretKey::KeyStore(account)) => { + // Check if account exists + if !deps.account_provider.has_account(account.clone()).unwrap_or(false) { + return Err(format!("Account {} passed as secret store node key is not found", account)); + } + + // Check if any passwords have been read from the password file(s) + if deps.accounts_passwords.is_empty() { + return Err(format!("No password found for the secret store node account {}", account)); + } + + // Attempt to sign in the engine signer. + let password = deps.accounts_passwords.iter() + .find(|p| deps.account_provider.sign(account.clone(), Some((*p).clone()), Default::default()).is_ok()) + .ok_or(format!("No valid password for the secret store node account {}", account))?; + Arc::new(ethcore_secretstore::KeyStoreNodeKeyPair::new(deps.account_provider, account, password.clone()) + .map_err(|e| format!("{}", e))?) + }, None => return Err("self secret is required when using secretstore".into()), }; diff --git a/secret_store/src/node_key_pair.rs b/secret_store/src/node_key_pair.rs index 8676dd16d..556625079 100644 --- a/secret_store/src/node_key_pair.rs +++ b/secret_store/src/node_key_pair.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use ethcrypto::ecdh::agree; use ethkey::{KeyPair, Public, Signature, Error as EthKeyError, sign}; use ethcore::account_provider::AccountProvider; -use util::H256; +use util::{Address, H256}; use traits::NodeKeyPair; pub struct PlainNodeKeyPair { @@ -26,7 +26,10 @@ pub struct PlainNodeKeyPair { } pub struct KeyStoreNodeKeyPair { - _account_provider: Arc, + account_provider: Arc, + address: Address, + public: Public, + password: String, } impl PlainNodeKeyPair { @@ -52,13 +55,26 @@ impl NodeKeyPair for PlainNodeKeyPair { } } +impl KeyStoreNodeKeyPair { + pub fn new(account_provider: Arc, address: Address, password: String) -> Result { + let public = account_provider.account_public(address.clone(), &password).map_err(|e| EthKeyError::Custom(format!("{}", e)))?; + Ok(KeyStoreNodeKeyPair { + account_provider: account_provider, + address: address, + public: public, + password: password, + }) + } +} + impl NodeKeyPair for KeyStoreNodeKeyPair { fn public(&self) -> &Public { - unimplemented!() + &self.public } - fn sign(&self, _data: &H256) -> Result { - unimplemented!() + fn sign(&self, data: &H256) -> Result { + self.account_provider.sign(self.address.clone(), Some(self.password.clone()), data.clone()) + .map_err(|e| EthKeyError::Custom(format!("{}", e))) } fn compute_shared_key(&self, _peer_public: &Public) -> Result { From eb895fbb3199cd81c887c3048ba2c2090f927ac6 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 25 Jul 2017 17:54:32 +0300 Subject: [PATCH 018/112] completed KeyStoreNodeKeyPair --- Cargo.lock | 1 + ethcore/src/account_provider/mod.rs | 7 +++++++ ethstore/Cargo.toml | 1 + ethstore/src/account/safe_account.rs | 9 ++++++++- ethstore/src/ethstore.rs | 12 ++++++++++++ ethstore/src/lib.rs | 1 + ethstore/src/secret_store.rs | 2 ++ secret_store/src/key_server.rs | 1 - secret_store/src/node_key_pair.rs | 5 +++-- 9 files changed, 35 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ae33d7590..0c049c948 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -794,6 +794,7 @@ name = "ethstore" version = "0.1.0" dependencies = [ "ethcore-bigint 0.1.3", + "ethcore-util 1.8.0", "ethcrypto 0.1.0", "ethkey 0.2.0", "itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/ethcore/src/account_provider/mod.rs b/ethcore/src/account_provider/mod.rs index 769db692c..752cec964 100755 --- a/ethcore/src/account_provider/mod.rs +++ b/ethcore/src/account_provider/mod.rs @@ -702,6 +702,13 @@ impl AccountProvider { Ok(self.sstore.decrypt(&account, &password, shared_mac, message)?) } + /// Agree on shared key. + pub fn agree(&self, address: Address, password: Option, other_public: &Public) -> Result { + let account = self.sstore.account_ref(&address)?; + let password = password.map(Ok).unwrap_or_else(|| self.password(&account))?; + Ok(self.sstore.agree(&account, &password, other_public)?) + } + /// Returns the underlying `SecretStore` reference if one exists. pub fn list_geth_accounts(&self, testnet: bool) -> Vec
{ self.sstore.list_geth_accounts(testnet).into_iter().map(|a| Address::from(a).into()).collect() diff --git a/ethstore/Cargo.toml b/ethstore/Cargo.toml index 117332022..9d8d2fce5 100755 --- a/ethstore/Cargo.toml +++ b/ethstore/Cargo.toml @@ -19,6 +19,7 @@ itertools = "0.5" parking_lot = "0.4" ethcrypto = { path = "../ethcrypto" } ethcore-bigint = { path = "../util/bigint" } +ethcore-util = { path = "../util" } smallvec = "0.4" parity-wordlist = "1.0" tempdir = "0.3" diff --git a/ethstore/src/account/safe_account.rs b/ethstore/src/account/safe_account.rs index e0512fe8d..478b796e6 100755 --- a/ethstore/src/account/safe_account.rs +++ b/ethstore/src/account/safe_account.rs @@ -14,7 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use ethkey::{KeyPair, sign, Address, Signature, Message, Public}; +use ethkey::{KeyPair, sign, Address, Signature, Message, Public, Secret}; +use crypto::ecdh::agree; use {json, Error, crypto}; use account::Version; use super::crypto::Crypto; @@ -135,6 +136,12 @@ impl SafeAccount { crypto::ecies::decrypt(&secret, shared_mac, message).map_err(From::from) } + /// Agree on shared key. + pub fn agree(&self, password: &str, other: &Public) -> Result { + let secret = self.crypto.secret(password)?; + agree(&secret, other).map_err(From::from) + } + /// Derive public key. pub fn public(&self, password: &str) -> Result { let secret = self.crypto.secret(password)?; diff --git a/ethstore/src/ethstore.rs b/ethstore/src/ethstore.rs index 246671990..d32fa9f62 100755 --- a/ethstore/src/ethstore.rs +++ b/ethstore/src/ethstore.rs @@ -97,6 +97,10 @@ impl SimpleSecretStore for EthStore { self.store.sign_derived(account_ref, password, derivation, message) } + fn agree(&self, account: &StoreAccountRef, password: &str, other: &Public) -> Result { + self.store.agree(account, password, other) + } + fn decrypt(&self, account: &StoreAccountRef, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error> { let account = self.get(account)?; account.decrypt(password, shared_mac, message) @@ -509,6 +513,14 @@ impl SimpleSecretStore for EthMultiStore { Err(Error::InvalidPassword) } + fn agree(&self, account: &StoreAccountRef, password: &str, other: &Public) -> Result { + let accounts = self.get_matching(account, password)?; + for account in accounts { + return account.agree(password, other); + } + Err(Error::InvalidPassword) + } + fn create_vault(&self, name: &str, password: &str) -> Result<(), Error> { let is_vault_created = { // lock border let mut vaults = self.vaults.lock(); diff --git a/ethstore/src/lib.rs b/ethstore/src/lib.rs index 65935f89c..311e9e73a 100755 --- a/ethstore/src/lib.rs +++ b/ethstore/src/lib.rs @@ -35,6 +35,7 @@ extern crate ethcore_bigint as bigint; extern crate ethcrypto as crypto; extern crate ethkey as _ethkey; extern crate parity_wordlist; +extern crate ethcore_util as util; #[macro_use] extern crate log; diff --git a/ethstore/src/secret_store.rs b/ethstore/src/secret_store.rs index 2deae023e..e364245b7 100755 --- a/ethstore/src/secret_store.rs +++ b/ethstore/src/secret_store.rs @@ -60,6 +60,8 @@ pub trait SimpleSecretStore: Send + Sync { fn sign_derived(&self, account_ref: &StoreAccountRef, password: &str, derivation: Derivation, message: &Message) -> Result; /// Decrypt a messages with given account. fn decrypt(&self, account: &StoreAccountRef, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error>; + /// Agree on shared key. + fn agree(&self, account: &StoreAccountRef, password: &str, other: &Public) -> Result; /// Returns all accounts in this secret store. fn accounts(&self) -> Result, Error>; diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index 0944dd37c..6526bff68 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -245,7 +245,6 @@ pub mod tests { let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); let configs: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration { threads: 1, -// self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())), listener_address: NodeAddress { address: "127.0.0.1".into(), port: start_port + (i as u16), diff --git a/secret_store/src/node_key_pair.rs b/secret_store/src/node_key_pair.rs index 556625079..ce6c88a07 100644 --- a/secret_store/src/node_key_pair.rs +++ b/secret_store/src/node_key_pair.rs @@ -77,7 +77,8 @@ impl NodeKeyPair for KeyStoreNodeKeyPair { .map_err(|e| EthKeyError::Custom(format!("{}", e))) } - fn compute_shared_key(&self, _peer_public: &Public) -> Result { - unimplemented!() + fn compute_shared_key(&self, peer_public: &Public) -> Result { + KeyPair::from_secret(self.account_provider.agree(self.address.clone(), Some(self.password.clone()), peer_public) + .map_err(|e| EthKeyError::Custom(format!("{}", e)))?) } } From 4938dfd971a6524310611fbb0da0f0fdd02e1a72 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 25 Jul 2017 17:57:27 +0300 Subject: [PATCH 019/112] removed comment --- secret_store/src/key_storage.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index 18c61c1bf..08ebe6e1c 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -241,7 +241,6 @@ pub mod tests { data_path: path.as_str().to_owned(), cluster_config: ClusterConfiguration { threads: 1, - //self_private: (**Random.generate().unwrap().secret().clone()).into(), listener_address: NodeAddress { address: "0.0.0.0".to_owned(), port: 8083, From 417a037ac517d1e783f6c42c4fbc9e475a7ffffc Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 26 Jul 2017 14:09:41 +0300 Subject: [PATCH 020/112] improved logging --- .../src/key_server_cluster/cluster.rs | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 155dd4a01..c267d1259 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -262,7 +262,7 @@ impl ClusterCore { fn connect_future(handle: &Handle, data: Arc, node_address: SocketAddr) -> BoxedEmptyFuture { let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect(); net_connect(&node_address, handle, data.self_key_pair.clone(), disconnected_nodes) - .then(move |result| ClusterCore::process_connection_result(data, false, result)) + .then(move |result| ClusterCore::process_connection_result(data, Some(node_address), result)) .then(|_| finished(())) .boxed() } @@ -290,7 +290,7 @@ impl ClusterCore { /// Accept connection future. fn accept_connection_future(handle: &Handle, data: Arc, stream: TcpStream, node_address: SocketAddr) -> BoxedEmptyFuture { net_accept_connection(node_address, stream, handle, data.self_key_pair.clone()) - .then(move |result| ClusterCore::process_connection_result(data, true, result)) + .then(move |result| ClusterCore::process_connection_result(data, None, result)) .then(|_| finished(())) .boxed() } @@ -370,10 +370,10 @@ impl ClusterCore { } /// Process connection future result. - fn process_connection_result(data: Arc, is_inbound: bool, result: Result>, io::Error>) -> IoFuture> { + fn process_connection_result(data: Arc, outbound_addr: Option, result: Result>, io::Error>) -> IoFuture> { match result { Ok(DeadlineStatus::Meet(Ok(connection))) => { - let connection = Connection::new(is_inbound, connection); + let connection = Connection::new(outbound_addr.is_none(), connection); if data.connections.insert(connection.clone()) { ClusterCore::process_connection_messages(data.clone(), connection) } else { @@ -381,15 +381,21 @@ impl ClusterCore { } }, Ok(DeadlineStatus::Meet(Err(err))) => { - warn!(target: "secretstore_net", "{}: protocol error {} when establishind connection", data.self_key_pair.public(), err); + warn!(target: "secretstore_net", "{}: protocol error {} when establishing {} connection{}", + data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, + outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); finished(Ok(())).boxed() }, Ok(DeadlineStatus::Timeout) => { - warn!(target: "secretstore_net", "{}: timeout when establishind connection", data.self_key_pair.public()); + warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}", + data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" }, + outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); finished(Ok(())).boxed() }, Err(err) => { - warn!(target: "secretstore_net", "{}: network error {} when establishind connection", data.self_key_pair.public(), err); + warn!(target: "secretstore_net", "{}: network error {} when establishing {} connection{}", + data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, + outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); finished(Ok(())).boxed() }, } From dcfb8c1a10f6590c5b565e71ec491fb9fba038dc Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 26 Jul 2017 14:09:52 +0300 Subject: [PATCH 021/112] fixed generation session lags --- secret_store/src/key_server_cluster/generation_session.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/secret_store/src/key_server_cluster/generation_session.rs b/secret_store/src/key_server_cluster/generation_session.rs index e94d5bd35..0ba82524e 100644 --- a/secret_store/src/key_server_cluster/generation_session.rs +++ b/secret_store/src/key_server_cluster/generation_session.rs @@ -399,7 +399,7 @@ impl SessionImpl { // check state if data.state != SessionState::WaitingForKeysDissemination { match data.state { - SessionState::WaitingForInitializationComplete => return Err(Error::TooEarlyForRequest), + SessionState::WaitingForInitializationComplete | SessionState::WaitingForInitializationConfirm(_) => return Err(Error::TooEarlyForRequest), _ => return Err(Error::InvalidStateForRequest), } } From c466def1e8bc2e9a25c1cf5434cd18134200be68 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 27 Jul 2017 11:33:09 +0300 Subject: [PATCH 022/112] improved logging --- parity/secretstore.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/parity/secretstore.rs b/parity/secretstore.rs index def2cd1a6..0f23ffdf5 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -117,7 +117,8 @@ mod server { None => return Err("self secret is required when using secretstore".into()), }; - let mut conf = ethcore_secretstore::ServiceConfiguration { + let key_server_name = format!("{}:{}", conf.interface, conf.port); + let mut cconf = ethcore_secretstore::ServiceConfiguration { listener_address: ethcore_secretstore::NodeAddress { address: conf.http_interface.clone(), port: conf.http_port, @@ -137,10 +138,10 @@ mod server { }, }; - conf.cluster_config.nodes.insert(self_secret.public().clone(), conf.cluster_config.listener_address.clone()); + cconf.cluster_config.nodes.insert(self_secret.public().clone(), cconf.cluster_config.listener_address.clone()); - let key_server = ethcore_secretstore::start(deps.client, self_secret, conf) - .map_err(Into::::into)?; + let key_server = ethcore_secretstore::start(deps.client, self_secret, cconf) + .map_err(|e| format!("Error starting KeyServer {}: {}", key_server_name, e))?; Ok(KeyServer { _key_server: key_server, From 7c05a906d095a0e1c0f6cac16eec88247612856c Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 27 Jul 2017 13:29:09 +0300 Subject: [PATCH 023/112] cli option to disable SS HTTP API --- parity/cli/config.full.toml | 3 ++- parity/cli/mod.rs | 5 ++++ parity/cli/usage.txt | 1 + parity/configuration.rs | 6 +++++ parity/secretstore.rs | 7 ++++-- secret_store/src/http_listener.rs | 25 ++++++++++++------- .../key_server_cluster/generation_session.rs | 2 +- secret_store/src/key_storage.rs | 5 +--- secret_store/src/lib.rs | 2 +- secret_store/src/types/all.rs | 4 +-- 10 files changed, 40 insertions(+), 20 deletions(-) diff --git a/parity/cli/config.full.toml b/parity/cli/config.full.toml index 581871997..47ca9ffd8 100644 --- a/parity/cli/config.full.toml +++ b/parity/cli/config.full.toml @@ -76,8 +76,9 @@ path = "$HOME/.parity/dapps" user = "test_user" pass = "test_pass" -[secretstore] +[secretstore] disable = false +disable_http = false nodes = [] http_interface = "local" http_port = 8082 diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 262e054a2..27c5c40ff 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -216,6 +216,8 @@ usage! { // Secret Store flag_no_secretstore: bool = false, or |c: &Config| otry!(c.secretstore).disable.clone(), + flag_no_secretstore_http: bool = false, + or |c: &Config| otry!(c.secretstore).disable_http.clone(), flag_secretstore_secret: Option = None, or |c: &Config| otry!(c.secretstore).self_secret.clone().map(Some), flag_secretstore_nodes: String = "", @@ -510,6 +512,7 @@ struct Dapps { #[derive(Default, Debug, PartialEq, Deserialize)] struct SecretStore { disable: Option, + disable_http: Option, self_secret: Option, nodes: Option>, interface: Option, @@ -779,6 +782,7 @@ mod tests { flag_no_dapps: false, flag_no_secretstore: false, + flag_no_secretstore_http: false, flag_secretstore_secret: None, flag_secretstore_nodes: "".into(), flag_secretstore_interface: "local".into(), @@ -1009,6 +1013,7 @@ mod tests { }), secretstore: Some(SecretStore { disable: None, + disable_http: None, self_secret: None, nodes: None, interface: None, diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index dc4796e05..38c76b71f 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -228,6 +228,7 @@ API and Console Options: Secret Store Options: --no-secretstore Disable Secret Store functionality. (default: {flag_no_secretstore}) + --no-secretstore-http Disable Secret Store HTTP API. (default: {flag_no_secretstore_http}) --secretstore-secret SECRET Hex-encoded secret key of this node. (required, default: {flag_secretstore_secret:?}). --secretstore-nodes NODES Comma-separated list of other secret store cluster nodes in form diff --git a/parity/configuration.rs b/parity/configuration.rs index 09dbfeedf..b17eeadef 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -586,6 +586,7 @@ impl Configuration { fn secretstore_config(&self) -> Result { Ok(SecretStoreConfiguration { enabled: self.secretstore_enabled(), + http_enabled: self.secretstore_http_enabled(), self_secret: self.secretstore_self_secret()?, nodes: self.secretstore_nodes()?, interface: self.secretstore_interface(), @@ -1050,6 +1051,10 @@ impl Configuration { !self.args.flag_no_secretstore && cfg!(feature = "secretstore") } + fn secretstore_http_enabled(&self) -> bool { + !self.args.flag_no_secretstore_http && cfg!(feature = "secretstore") + } + fn ui_enabled(&self) -> bool { if self.args.flag_force_ui { return true; @@ -1331,6 +1336,7 @@ mod tests { no_persistent_txqueue: false, }; expected.secretstore_conf.enabled = cfg!(feature = "secretstore"); + expected.secretstore_conf.http_enabled = cfg!(feature = "secretstore"); assert_eq!(conf.into_command().unwrap().cmd, Cmd::Run(expected)); } diff --git a/parity/secretstore.rs b/parity/secretstore.rs index 0f23ffdf5..ef577c988 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -37,6 +37,8 @@ pub enum NodeSecretKey { pub struct Configuration { /// Is secret store functionality enabled? pub enabled: bool, + /// Is HTTP API enabled? + pub http_enabled: bool, /// This node secret. pub self_secret: Option, /// Other nodes IDs + addresses. @@ -119,10 +121,10 @@ mod server { let key_server_name = format!("{}:{}", conf.interface, conf.port); let mut cconf = ethcore_secretstore::ServiceConfiguration { - listener_address: ethcore_secretstore::NodeAddress { + listener_address: if conf.http_enabled { Some(ethcore_secretstore::NodeAddress { address: conf.http_interface.clone(), port: conf.http_port, - }, + }) } else { None }, data_path: conf.data_path.clone(), cluster_config: ethcore_secretstore::ClusterConfiguration { threads: 4, @@ -157,6 +159,7 @@ impl Default for Configuration { let data_dir = default_data_path(); Configuration { enabled: true, + http_enabled: true, self_secret: None, nodes: BTreeMap::new(), interface: "127.0.0.1".to_owned(), diff --git a/secret_store/src/http_listener.rs b/secret_store/src/http_listener.rs index 1f7f14ede..86688618a 100644 --- a/secret_store/src/http_listener.rs +++ b/secret_store/src/http_listener.rs @@ -39,7 +39,7 @@ use types::all::{Error, Public, MessageHash, EncryptedMessageSignature, NodeAddr /// To sign message with server key: GET /{server_key_id}/{signature}/{message_hash} pub struct KeyServerHttpListener { - _http_server: HttpListening, + http_server: Option, handler: Arc>, } @@ -74,19 +74,26 @@ struct KeyServerSharedHttpHandler { impl KeyServerHttpListener where T: KeyServer + 'static { /// Start KeyServer http listener - pub fn start(listener_address: &NodeAddress, key_server: T) -> Result { + pub fn start(listener_address: Option, key_server: T) -> Result { let shared_handler = Arc::new(KeyServerSharedHttpHandler { key_server: key_server, }); - let handler = KeyServerHttpHandler { + /*let handler = KeyServerHttpHandler { handler: shared_handler.clone(), - }; + };*/ - let listener_addr: &str = &format!("{}:{}", listener_address.address, listener_address.port); + let http_server = listener_address + .map(|listener_address| format!("{}:{}", listener_address.address, listener_address.port)) + .map(|listener_address| HttpServer::http(&listener_address).expect("cannot start HttpServer")) + .map(|http_server| http_server.handle(KeyServerHttpHandler { + handler: shared_handler.clone(), + }).expect("cannot start HttpServer")); + + /*let listener_addr: &str = &format!("{}:{}", listener_address.address, listener_address.port); let http_server = HttpServer::http(&listener_addr).expect("cannot start HttpServer"); - let http_server = http_server.handle(handler).expect("cannot start HttpServer"); + let http_server = http_server.handle(handler).expect("cannot start HttpServer");*/ let listener = KeyServerHttpListener { - _http_server: http_server, + http_server: http_server, handler: shared_handler, }; Ok(listener) @@ -128,7 +135,7 @@ impl MessageSigner for KeyServerHttpListener where T: KeyServer + 'static impl Drop for KeyServerHttpListener where T: KeyServer + 'static { fn drop(&mut self) { // ignore error as we are dropping anyway - let _ = self._http_server.close(); + self.http_server.take().map(|mut s| { let _ = s.close(); }); } } @@ -318,7 +325,7 @@ mod tests { fn http_listener_successfully_drops() { let key_server = DummyKeyServer; let address = NodeAddress { address: "127.0.0.1".into(), port: 9000 }; - let listener = KeyServerHttpListener::start(&address, key_server).unwrap(); + let listener = KeyServerHttpListener::start(Some(address), key_server).unwrap(); drop(listener); } diff --git a/secret_store/src/key_server_cluster/generation_session.rs b/secret_store/src/key_server_cluster/generation_session.rs index 0ba82524e..ade78bc57 100644 --- a/secret_store/src/key_server_cluster/generation_session.rs +++ b/secret_store/src/key_server_cluster/generation_session.rs @@ -1104,7 +1104,7 @@ pub mod tests { secret1: math::generate_random_scalar().unwrap().into(), secret2: math::generate_random_scalar().unwrap().into(), publics: vec![math::generate_random_point().unwrap().into()], - }).unwrap_err(), Error::InvalidStateForRequest); + }).unwrap_err(), Error::TooEarlyForRequest); } #[test] diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index 08ebe6e1c..20b5eaf6c 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -234,10 +234,7 @@ pub mod tests { fn persistent_key_storage() { let path = RandomTempPath::create_dir(); let config = ServiceConfiguration { - listener_address: NodeAddress { - address: "0.0.0.0".to_owned(), - port: 8082, - }, + listener_address: None, data_path: path.as_str().to_owned(), cluster_config: ClusterConfiguration { threads: 1, diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 7e9897e60..6ead7c657 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -77,6 +77,6 @@ pub fn start(client: Arc, self_key_pair: Arc, config: Servi let key_server_set = key_server_set::OnChainKeyServerSet::new(&client, config.cluster_config.nodes.clone())?; let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?); let key_server = key_server::KeyServerImpl::new(&config.cluster_config, key_server_set, self_key_pair, acl_storage, key_storage)?; - let listener = http_listener::KeyServerHttpListener::start(&config.listener_address, key_server)?; + let listener = http_listener::KeyServerHttpListener::start(config.listener_address, key_server)?; Ok(Box::new(listener)) } diff --git a/secret_store/src/types/all.rs b/secret_store/src/types/all.rs index 8dc92f175..6bc0d9c87 100644 --- a/secret_store/src/types/all.rs +++ b/secret_store/src/types/all.rs @@ -69,8 +69,8 @@ pub struct NodeAddress { #[binary] /// Secret store configuration pub struct ServiceConfiguration { - /// HTTP listener address. - pub listener_address: NodeAddress, + /// HTTP listener address. If None, HTTP API is disabled. + pub listener_address: Option, /// Data directory path for secret store pub data_path: String, /// Cluster configuration. From c345bc3d856e48c3c37f0a56bbea7cc1eabc74dc Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 27 Jul 2017 15:48:07 +0300 Subject: [PATCH 024/112] cli option to disable SS ACL check --- parity/cli/config.full.toml | 1 + parity/cli/mod.rs | 5 ++ parity/cli/usage.txt | 1 + parity/configuration.rs | 5 ++ parity/secretstore.rs | 4 ++ secret_store/src/acl_storage.rs | 57 ++++++++----------- secret_store/src/key_server.rs | 2 +- .../key_server_cluster/decryption_session.rs | 2 +- secret_store/src/key_server_cluster/mod.rs | 2 +- .../src/key_server_cluster/signing_session.rs | 2 +- secret_store/src/key_storage.rs | 1 + secret_store/src/lib.rs | 6 +- secret_store/src/types/all.rs | 2 + 13 files changed, 53 insertions(+), 37 deletions(-) diff --git a/parity/cli/config.full.toml b/parity/cli/config.full.toml index 47ca9ffd8..75677ed6c 100644 --- a/parity/cli/config.full.toml +++ b/parity/cli/config.full.toml @@ -79,6 +79,7 @@ pass = "test_pass" [secretstore] disable = false disable_http = false +disable_acl_check = false nodes = [] http_interface = "local" http_port = 8082 diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 27c5c40ff..50032767a 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -218,6 +218,8 @@ usage! { or |c: &Config| otry!(c.secretstore).disable.clone(), flag_no_secretstore_http: bool = false, or |c: &Config| otry!(c.secretstore).disable_http.clone(), + flag_no_secretstore_acl_check: bool = false, + or |c: &Config| otry!(c.secretstore).disable_acl_check.clone(), flag_secretstore_secret: Option = None, or |c: &Config| otry!(c.secretstore).self_secret.clone().map(Some), flag_secretstore_nodes: String = "", @@ -513,6 +515,7 @@ struct Dapps { struct SecretStore { disable: Option, disable_http: Option, + disable_acl_check: Option, self_secret: Option, nodes: Option>, interface: Option, @@ -783,6 +786,7 @@ mod tests { flag_no_secretstore: false, flag_no_secretstore_http: false, + flag_no_secretstore_acl_check: false, flag_secretstore_secret: None, flag_secretstore_nodes: "".into(), flag_secretstore_interface: "local".into(), @@ -1014,6 +1018,7 @@ mod tests { secretstore: Some(SecretStore { disable: None, disable_http: None, + disable_acl_check: None, self_secret: None, nodes: None, interface: None, diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 38c76b71f..da36c4a2b 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -229,6 +229,7 @@ API and Console Options: Secret Store Options: --no-secretstore Disable Secret Store functionality. (default: {flag_no_secretstore}) --no-secretstore-http Disable Secret Store HTTP API. (default: {flag_no_secretstore_http}) + --no-acl-check Disable ACL check (useful for test environments). (default: {flag_no_secretstore_acl_check}) --secretstore-secret SECRET Hex-encoded secret key of this node. (required, default: {flag_secretstore_secret:?}). --secretstore-nodes NODES Comma-separated list of other secret store cluster nodes in form diff --git a/parity/configuration.rs b/parity/configuration.rs index b17eeadef..0583bdb81 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -587,6 +587,7 @@ impl Configuration { Ok(SecretStoreConfiguration { enabled: self.secretstore_enabled(), http_enabled: self.secretstore_http_enabled(), + acl_check_enabled: self.secretstore_acl_check_enabled(), self_secret: self.secretstore_self_secret()?, nodes: self.secretstore_nodes()?, interface: self.secretstore_interface(), @@ -1055,6 +1056,10 @@ impl Configuration { !self.args.flag_no_secretstore_http && cfg!(feature = "secretstore") } + fn secretstore_acl_check_enabled(&self) -> bool { + !self.args.flag_no_secretstore_acl_check + } + fn ui_enabled(&self) -> bool { if self.args.flag_force_ui { return true; diff --git a/parity/secretstore.rs b/parity/secretstore.rs index ef577c988..8094ef323 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -39,6 +39,8 @@ pub struct Configuration { pub enabled: bool, /// Is HTTP API enabled? pub http_enabled: bool, + /// Is ACL check enabled. + pub acl_check_enabled: bool, /// This node secret. pub self_secret: Option, /// Other nodes IDs + addresses. @@ -126,6 +128,7 @@ mod server { port: conf.http_port, }) } else { None }, data_path: conf.data_path.clone(), + acl_check_enabled: conf.acl_check_enabled, cluster_config: ethcore_secretstore::ClusterConfiguration { threads: 4, listener_address: ethcore_secretstore::NodeAddress { @@ -160,6 +163,7 @@ impl Default for Configuration { Configuration { enabled: true, http_enabled: true, + acl_check_enabled: true, self_secret: None, nodes: BTreeMap::new(), interface: "127.0.0.1".to_owned(), diff --git a/secret_store/src/acl_storage.rs b/secret_store/src/acl_storage.rs index 37d5bcd25..0a3ee9276 100644 --- a/secret_store/src/acl_storage.rs +++ b/secret_store/src/acl_storage.rs @@ -15,8 +15,9 @@ // along with Parity. If not, see . use std::sync::{Arc, Weak}; +use std::collections::{HashMap, HashSet}; use futures::{future, Future}; -use parking_lot::Mutex; +use parking_lot::{Mutex, RwLock}; use ethkey::public_to_address; use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; use native_contracts::SecretStoreAclStorage; @@ -47,6 +48,12 @@ struct CachedContract { contract: Option, } +#[derive(Default, Debug)] +/// Dummy ACL storage implementation (check always passed). +pub struct DummyAclStorage { + prohibited: RwLock>>, +} + impl OnChainAclStorage { pub fn new(client: &Arc) -> Arc { let acl_storage = Arc::new(OnChainAclStorage { @@ -113,36 +120,22 @@ impl CachedContract { } } -#[cfg(test)] -pub mod tests { - use std::collections::{HashMap, HashSet}; - use parking_lot::RwLock; - use types::all::{Error, ServerKeyId, Public}; - use super::AclStorage; - - #[derive(Default, Debug)] - /// Dummy ACL storage implementation - pub struct DummyAclStorage { - prohibited: RwLock>>, - } - - impl DummyAclStorage { - #[cfg(test)] - /// Prohibit given requestor access to given document - pub fn prohibit(&self, public: Public, document: ServerKeyId) { - self.prohibited.write() - .entry(public) - .or_insert_with(Default::default) - .insert(document); - } - } - - impl AclStorage for DummyAclStorage { - fn check(&self, public: &Public, document: &ServerKeyId) -> Result { - Ok(self.prohibited.read() - .get(public) - .map(|docs| !docs.contains(document)) - .unwrap_or(true)) - } +impl DummyAclStorage { + #[cfg(test)] + /// Prohibit given requestor access to given document + pub fn prohibit(&self, public: Public, document: ServerKeyId) { + self.prohibited.write() + .entry(public) + .or_insert_with(Default::default) + .insert(document); + } +} + +impl AclStorage for DummyAclStorage { + fn check(&self, public: &Public, document: &ServerKeyId) -> Result { + Ok(self.prohibited.read() + .get(public) + .map(|docs| !docs.contains(document)) + .unwrap_or(true)) } } diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index 6526bff68..f9fad19df 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -196,7 +196,7 @@ pub mod tests { use std::collections::BTreeMap; use ethcrypto; use ethkey::{self, Secret, Random, Generator}; - use acl_storage::tests::DummyAclStorage; + use acl_storage::DummyAclStorage; use key_storage::tests::DummyKeyStorage; use node_key_pair::PlainNodeKeyPair; use key_server_set::tests::MapKeyServerSet; diff --git a/secret_store/src/key_server_cluster/decryption_session.rs b/secret_store/src/key_server_cluster/decryption_session.rs index 6a806bb92..afc73f858 100644 --- a/secret_store/src/key_server_cluster/decryption_session.rs +++ b/secret_store/src/key_server_cluster/decryption_session.rs @@ -467,7 +467,7 @@ impl Ord for DecryptionSessionId { mod tests { use std::sync::Arc; use std::collections::BTreeMap; - use super::super::super::acl_storage::tests::DummyAclStorage; + use super::super::super::acl_storage::DummyAclStorage; use ethkey::{self, KeyPair, Random, Generator, Public, Secret}; use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, Error, EncryptedDocumentKeyShadow, SessionMeta}; use key_server_cluster::cluster::tests::DummyCluster; diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index 102c3672f..4fcda1539 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -36,7 +36,7 @@ pub use super::node_key_pair::PlainNodeKeyPair; #[cfg(test)] pub use super::key_storage::tests::DummyKeyStorage; #[cfg(test)] -pub use super::acl_storage::tests::DummyAclStorage; +pub use super::acl_storage::DummyAclStorage; #[cfg(test)] pub use super::key_server_set::tests::MapKeyServerSet; diff --git a/secret_store/src/key_server_cluster/signing_session.rs b/secret_store/src/key_server_cluster/signing_session.rs index 00246ae64..e647c8b14 100644 --- a/secret_store/src/key_server_cluster/signing_session.rs +++ b/secret_store/src/key_server_cluster/signing_session.rs @@ -572,7 +572,7 @@ mod tests { use std::collections::{BTreeMap, VecDeque}; use ethkey::{self, Random, Generator, Public}; use util::H256; - use super::super::super::acl_storage::tests::DummyAclStorage; + use super::super::super::acl_storage::DummyAclStorage; use key_server_cluster::{NodeId, SessionId, SessionMeta, Error, KeyStorage}; use key_server_cluster::cluster::tests::DummyCluster; use key_server_cluster::generation_session::{Session as GenerationSession}; diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index 20b5eaf6c..2fad4cdf7 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -235,6 +235,7 @@ pub mod tests { let path = RandomTempPath::create_dir(); let config = ServiceConfiguration { listener_address: None, + acl_check_enabled: true, data_path: path.as_str().to_owned(), cluster_config: ClusterConfiguration { threads: 1, diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 6ead7c657..d7f35a55a 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -73,7 +73,11 @@ pub use self::node_key_pair::{PlainNodeKeyPair, KeyStoreNodeKeyPair}; pub fn start(client: Arc, self_key_pair: Arc, config: ServiceConfiguration) -> Result, Error> { use std::sync::Arc; - let acl_storage = acl_storage::OnChainAclStorage::new(&client); + let acl_storage: Arc = if config.acl_check_enabled { + acl_storage::OnChainAclStorage::new(&client) + } else { + Arc::new(acl_storage::DummyAclStorage::default()) + }; let key_server_set = key_server_set::OnChainKeyServerSet::new(&client, config.cluster_config.nodes.clone())?; let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?); let key_server = key_server::KeyServerImpl::new(&config.cluster_config, key_server_set, self_key_pair, acl_storage, key_storage)?; diff --git a/secret_store/src/types/all.rs b/secret_store/src/types/all.rs index 6bc0d9c87..6867c82f3 100644 --- a/secret_store/src/types/all.rs +++ b/secret_store/src/types/all.rs @@ -71,6 +71,8 @@ pub struct NodeAddress { pub struct ServiceConfiguration { /// HTTP listener address. If None, HTTP API is disabled. pub listener_address: Option, + /// Is ACL check enabled. If false, everyone has access to all keys. Useful for tests only. + pub acl_check_enabled: bool, /// Data directory path for secret store pub data_path: String, /// Cluster configuration. From 47c058a337f760e7f3435fc6d5dbaca12340da66 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 2 Aug 2017 12:05:47 +0300 Subject: [PATCH 025/112] fixed warning --- secret_store/src/key_server_cluster/io/deadline.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/secret_store/src/key_server_cluster/io/deadline.rs b/secret_store/src/key_server_cluster/io/deadline.rs index 501a69057..10f485b7d 100644 --- a/secret_store/src/key_server_cluster/io/deadline.rs +++ b/secret_store/src/key_server_cluster/io/deadline.rs @@ -19,7 +19,7 @@ use std::time::Duration; use futures::{Future, Select, BoxFuture, Poll, Async}; use tokio_core::reactor::{Handle, Timeout}; -type DeadlineBox where F: Future = BoxFuture, F::Error>; +type DeadlineBox = BoxFuture::Item>, ::Error>; /// Complete a passed future or fail if it is not completed within timeout. pub fn deadline(duration: Duration, handle: &Handle, future: F) -> Result, io::Error> From b0f9d73f6a55e705d96317f2ccdb011b6fedd1da Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 3 Aug 2017 16:42:56 +0200 Subject: [PATCH 026/112] InstantSeal fix --- ethcore/src/engines/instant_seal.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ethcore/src/engines/instant_seal.rs b/ethcore/src/engines/instant_seal.rs index 114e27549..976f815e0 100644 --- a/ethcore/src/engines/instant_seal.rs +++ b/ethcore/src/engines/instant_seal.rs @@ -19,7 +19,7 @@ use util::Address; use builtin::Builtin; use engines::{Engine, Seal}; use spec::CommonParams; -use block::ExecutedBlock; +use block::{ExecutedBlock, IsBlock}; /// An engine which does not provide any consensus mechanism, just seals blocks internally. pub struct InstantSeal { @@ -56,8 +56,8 @@ impl Engine for InstantSeal { fn seals_internally(&self) -> Option { Some(true) } - fn generate_seal(&self, _block: &ExecutedBlock) -> Seal { - Seal::Regular(Vec::new()) + fn generate_seal(&self, block: &ExecutedBlock) -> Seal { + if block.transactions().is_empty() { Seal::None } else { Seal::Regular(Vec::new()) } } } From 6b3f5c977aafb753a360830cd1a797430a76ae8a Mon Sep 17 00:00:00 2001 From: NikVolf Date: Thu, 3 Aug 2017 21:35:51 +0300 Subject: [PATCH 027/112] overflow check in addition --- util/rlp/src/error.rs | 2 ++ util/rlp/src/untrusted_rlp.rs | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/util/rlp/src/error.rs b/util/rlp/src/error.rs index 6b15ea8a6..5113fdc17 100644 --- a/util/rlp/src/error.rs +++ b/util/rlp/src/error.rs @@ -30,6 +30,8 @@ pub enum DecoderError { RlpInvalidIndirection, /// Declared length is inconsistent with data specified after. RlpInconsistentLengthAndData, + /// Declared length is invalid and results in overflow + RlpInvalidLength, /// Custom rlp decoding error. Custom(&'static str), } diff --git a/util/rlp/src/untrusted_rlp.rs b/util/rlp/src/untrusted_rlp.rs index 16714fe98..e7eb44f5d 100644 --- a/util/rlp/src/untrusted_rlp.rs +++ b/util/rlp/src/untrusted_rlp.rs @@ -371,7 +371,8 @@ impl<'a> BasicDecoder<'a> { } let len = decode_usize(&bytes[1..begin_of_value])?; - let last_index_of_value = begin_of_value + len; + let last_index_of_value = begin_of_value.overflowing_add(len) + .ok_or(DecoderError::RlpInvalidLength)?; if bytes.len() < last_index_of_value { return Err(DecoderError::RlpInconsistentLengthAndData); } From d30e47a50e73cb8c7c1298615e431877a153c1b0 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Thu, 3 Aug 2017 21:40:16 +0300 Subject: [PATCH 028/112] add test --- util/rlp/src/untrusted_rlp.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/util/rlp/src/untrusted_rlp.rs b/util/rlp/src/untrusted_rlp.rs index e7eb44f5d..41149606c 100644 --- a/util/rlp/src/untrusted_rlp.rs +++ b/util/rlp/src/untrusted_rlp.rs @@ -371,7 +371,7 @@ impl<'a> BasicDecoder<'a> { } let len = decode_usize(&bytes[1..begin_of_value])?; - let last_index_of_value = begin_of_value.overflowing_add(len) + let last_index_of_value = begin_of_value.checked_add(len) .ok_or(DecoderError::RlpInvalidLength)?; if bytes.len() < last_index_of_value { return Err(DecoderError::RlpInconsistentLengthAndData); @@ -386,7 +386,7 @@ impl<'a> BasicDecoder<'a> { #[cfg(test)] mod tests { - use UntrustedRlp; + use {UntrustedRlp, DecoderError}; #[test] fn test_rlp_display() { @@ -395,4 +395,12 @@ mod tests { let rlp = UntrustedRlp::new(&data); assert_eq!(format!("{}", rlp), "[\"0x05\", \"0x010efbef67941f79b2\", \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\", \"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470\"]"); } + + #[test] + fn length_overflow() { + let bs = [0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe5]; + let rlp = UntrustedRlp::new(&bs); + let res: Result = rlp.as_val(); + assert_eq!(Err(DecoderError::RlpInvalidLength), res); + } } From 63f8cc350384b106741868a8b807a0516f1af741 Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 4 Aug 2017 13:06:01 +0200 Subject: [PATCH 029/112] price-info does not depend on util --- Cargo.lock | 2 +- price-info/Cargo.toml | 2 +- price-info/src/lib.rs | 205 +++++++++++++++++++++++++++++++- price-info/src/price_info.rs | 222 ----------------------------------- 4 files changed, 205 insertions(+), 226 deletions(-) delete mode 100644 price-info/src/price_info.rs diff --git a/Cargo.lock b/Cargo.lock index cc8c7d3c7..2487579d5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2208,10 +2208,10 @@ dependencies = [ name = "price-info" version = "1.7.0" dependencies = [ - "ethcore-util 1.8.0", "fetch 0.1.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/price-info/Cargo.toml b/price-info/Cargo.toml index b0226df8e..a6cc13eb7 100644 --- a/price-info/Cargo.toml +++ b/price-info/Cargo.toml @@ -13,4 +13,4 @@ log = "0.3" serde_json = "1.0" [dev-dependencies] -ethcore-util = { path = "../util" } +parking_lot = "0.4" diff --git a/price-info/src/lib.rs b/price-info/src/lib.rs index ec6fcfb5d..198711bb1 100644 --- a/price-info/src/lib.rs +++ b/price-info/src/lib.rs @@ -26,6 +26,207 @@ extern crate log; pub extern crate fetch; -mod price_info; +use std::cmp; +use std::fmt; +use std::io; +use std::io::Read; +use std::str::FromStr; -pub use price_info::*; +use fetch::{Client as FetchClient, Fetch}; +use futures::Future; +use serde_json::Value; + +/// Current ETH price information. +#[derive(Debug)] +pub struct PriceInfo { + /// Current ETH price in USD. + pub ethusd: f32, +} + +/// Price info error. +#[derive(Debug)] +pub enum Error { + /// The API returned an unexpected status code or content. + UnexpectedResponse(&'static str, String), + /// There was an error when trying to reach the API. + Fetch(fetch::Error), + /// IO error when reading API response. + Io(io::Error), +} + +impl From for Error { + fn from(err: io::Error) -> Self { Error::Io(err) } +} + +impl From for Error { + fn from(err: fetch::Error) -> Self { Error::Fetch(err) } +} + +/// A client to get the current ETH price using an external API. +pub struct Client { + api_endpoint: String, + fetch: F, +} + +impl fmt::Debug for Client { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("price_info::Client") + .field("api_endpoint", &self.api_endpoint) + .finish() + } +} + +impl cmp::PartialEq for Client { + fn eq(&self, other: &Client) -> bool { + self.api_endpoint == other.api_endpoint + } +} + +impl Client { + /// Creates a new instance of the `Client` given a `fetch::Client`. + pub fn new(fetch: F) -> Client { + let api_endpoint = "http://api.etherscan.io/api?module=stats&action=ethprice".to_owned(); + Client { api_endpoint, fetch } + } + + /// Gets the current ETH price and calls `set_price` with the result. + pub fn get(&self, set_price: G) { + self.fetch.forget(self.fetch.fetch(&self.api_endpoint) + .map_err(|err| Error::Fetch(err)) + .and_then(move |mut response| { + let mut result = String::new(); + response.read_to_string(&mut result)?; + + if response.is_success() { + let value: Result = serde_json::from_str(&result); + if let Ok(v) = value { + let obj = v.pointer("/result/ethusd").and_then(|obj| { + match *obj { + Value::String(ref s) => FromStr::from_str(s).ok(), + _ => None, + } + }); + + if let Some(ethusd) = obj { + set_price(PriceInfo { ethusd }); + return Ok(()); + } + } + } + + let status = response.status().canonical_reason().unwrap_or("unknown"); + Err(Error::UnexpectedResponse(status, result)) + }) + .map_err(|err| { + warn!("Failed to auto-update latest ETH price: {:?}", err); + err + }) + ); + } +} + +#[cfg(test)] +mod test { + extern crate parking_lot; + + use self::parking_lot::Mutex; + use std::sync::Arc; + use std::sync::atomic::{AtomicBool, Ordering}; + use fetch; + use fetch::Fetch; + use futures; + use futures::future::{Future, FutureResult}; + use Client; + + #[derive(Clone)] + struct FakeFetch(Option, Arc>); + impl Fetch for FakeFetch { + type Result = FutureResult; + fn new() -> Result where Self: Sized { Ok(FakeFetch(None, Default::default())) } + fn fetch_with_abort(&self, url: &str, _abort: fetch::Abort) -> Self::Result { + assert_eq!(url, "http://api.etherscan.io/api?module=stats&action=ethprice"); + let mut val = self.1.lock(); + *val = *val + 1; + if let Some(ref response) = self.0 { + let data = ::std::io::Cursor::new(response.clone()); + futures::future::ok(fetch::Response::from_reader(data)) + } else { + futures::future::ok(fetch::Response::not_found()) + } + } + + // this guarantees that the calls to price_info::Client::get will block for execution + fn forget(&self, f: F) where + F: Future + Send + 'static, + I: Send + 'static, + E: Send + 'static { + let _ = f.wait(); + } + } + + fn price_info_ok(response: &str) -> Client { + Client::new(FakeFetch(Some(response.to_owned()), Default::default())) + } + + fn price_info_not_found() -> Client { + Client::new(FakeFetch::new().unwrap()) + } + + #[test] + fn should_get_price_info() { + // given + let response = r#"{ + "status": "1", + "message": "OK", + "result": { + "ethbtc": "0.0891", + "ethbtc_timestamp": "1499894236", + "ethusd": "209.55", + "ethusd_timestamp": "1499894229" + } + }"#; + + let price_info = price_info_ok(response); + + // when + price_info.get(|price| { + + // then + assert_eq!(price.ethusd, 209.55); + }); + } + + #[test] + fn should_not_call_set_price_if_response_is_malformed() { + // given + let response = "{}"; + + let price_info = price_info_ok(response); + let b = Arc::new(AtomicBool::new(false)); + + // when + let bb = b.clone(); + price_info.get(move |_| { + bb.store(true, Ordering::Relaxed); + }); + + // then + assert_eq!(b.load(Ordering::Relaxed), false); + } + + #[test] + fn should_not_call_set_price_if_response_is_invalid() { + // given + let price_info = price_info_not_found(); + let b = Arc::new(AtomicBool::new(false)); + + // when + let bb = b.clone(); + price_info.get(move |_| { + bb.store(true, Ordering::Relaxed); + }); + + // then + assert_eq!(b.load(Ordering::Relaxed), false); + } +} diff --git a/price-info/src/price_info.rs b/price-info/src/price_info.rs deleted file mode 100644 index 36ca033d2..000000000 --- a/price-info/src/price_info.rs +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::cmp; -use std::fmt; -use std::io; -use std::io::Read; -use std::str::FromStr; - -use fetch; -use fetch::{Client as FetchClient, Fetch}; -use futures::Future; -use serde_json; -use serde_json::Value; - -/// Current ETH price information. -#[derive(Debug)] -pub struct PriceInfo { - /// Current ETH price in USD. - pub ethusd: f32, -} - -/// Price info error. -#[derive(Debug)] -pub enum Error { - /// The API returned an unexpected status code or content. - UnexpectedResponse(&'static str, String), - /// There was an error when trying to reach the API. - Fetch(fetch::Error), - /// IO error when reading API response. - Io(io::Error), -} - -impl From for Error { - fn from(err: io::Error) -> Self { Error::Io(err) } -} - -impl From for Error { - fn from(err: fetch::Error) -> Self { Error::Fetch(err) } -} - -/// A client to get the current ETH price using an external API. -pub struct Client { - api_endpoint: String, - fetch: F, -} - -impl fmt::Debug for Client { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("price_info::Client") - .field("api_endpoint", &self.api_endpoint) - .finish() - } -} - -impl cmp::PartialEq for Client { - fn eq(&self, other: &Client) -> bool { - self.api_endpoint == other.api_endpoint - } -} - -impl Client { - /// Creates a new instance of the `Client` given a `fetch::Client`. - pub fn new(fetch: F) -> Client { - let api_endpoint = "http://api.etherscan.io/api?module=stats&action=ethprice".to_owned(); - Client { api_endpoint, fetch } - } - - /// Gets the current ETH price and calls `set_price` with the result. - pub fn get(&self, set_price: G) { - self.fetch.forget(self.fetch.fetch(&self.api_endpoint) - .map_err(|err| Error::Fetch(err)) - .and_then(move |mut response| { - let mut result = String::new(); - response.read_to_string(&mut result)?; - - if response.is_success() { - let value: Result = serde_json::from_str(&result); - if let Ok(v) = value { - let obj = v.pointer("/result/ethusd").and_then(|obj| { - match *obj { - Value::String(ref s) => FromStr::from_str(s).ok(), - _ => None, - } - }); - - if let Some(ethusd) = obj { - set_price(PriceInfo { ethusd }); - return Ok(()); - } - } - } - - let status = response.status().canonical_reason().unwrap_or("unknown"); - Err(Error::UnexpectedResponse(status, result)) - }) - .map_err(|err| { - warn!("Failed to auto-update latest ETH price: {:?}", err); - err - }) - ); - } -} - -#[cfg(test)] -mod test { - extern crate ethcore_util as util; - - use self::util::Mutex; - use std::sync::Arc; - use std::sync::atomic::{AtomicBool, Ordering}; - use fetch; - use fetch::Fetch; - use futures; - use futures::future::{Future, FutureResult}; - use price_info::Client; - - #[derive(Clone)] - struct FakeFetch(Option, Arc>); - impl Fetch for FakeFetch { - type Result = FutureResult; - fn new() -> Result where Self: Sized { Ok(FakeFetch(None, Default::default())) } - fn fetch_with_abort(&self, url: &str, _abort: fetch::Abort) -> Self::Result { - assert_eq!(url, "http://api.etherscan.io/api?module=stats&action=ethprice"); - let mut val = self.1.lock(); - *val = *val + 1; - if let Some(ref response) = self.0 { - let data = ::std::io::Cursor::new(response.clone()); - futures::future::ok(fetch::Response::from_reader(data)) - } else { - futures::future::ok(fetch::Response::not_found()) - } - } - - // this guarantees that the calls to price_info::Client::get will block for execution - fn forget(&self, f: F) where - F: Future + Send + 'static, - I: Send + 'static, - E: Send + 'static { - let _ = f.wait(); - } - } - - fn price_info_ok(response: &str) -> Client { - Client::new(FakeFetch(Some(response.to_owned()), Default::default())) - } - - fn price_info_not_found() -> Client { - Client::new(FakeFetch::new().unwrap()) - } - - #[test] - fn should_get_price_info() { - // given - let response = r#"{ - "status": "1", - "message": "OK", - "result": { - "ethbtc": "0.0891", - "ethbtc_timestamp": "1499894236", - "ethusd": "209.55", - "ethusd_timestamp": "1499894229" - } - }"#; - - let price_info = price_info_ok(response); - - // when - price_info.get(|price| { - - // then - assert_eq!(price.ethusd, 209.55); - }); - } - - #[test] - fn should_not_call_set_price_if_response_is_malformed() { - // given - let response = "{}"; - - let price_info = price_info_ok(response); - let b = Arc::new(AtomicBool::new(false)); - - // when - let bb = b.clone(); - price_info.get(move |_| { - bb.store(true, Ordering::Relaxed); - }); - - // then - assert_eq!(b.load(Ordering::Relaxed), false); - } - - #[test] - fn should_not_call_set_price_if_response_is_invalid() { - // given - let price_info = price_info_not_found(); - let b = Arc::new(AtomicBool::new(false)); - - // when - let bb = b.clone(); - price_info.get(move |_| { - bb.store(true, Ordering::Relaxed); - }); - - // then - assert_eq!(b.load(Ordering::Relaxed), false); - } -} From f72196f1bb904a2f8337af73c02e80f4d44c33b1 Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 4 Aug 2017 13:39:57 +0200 Subject: [PATCH 030/112] a bit more idiomatic price-info --- price-info/src/lib.rs | 40 ++++++++++++++++++++-------------------- util/fetch/src/client.rs | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/price-info/src/lib.rs b/price-info/src/lib.rs index 198711bb1..ba5719f40 100644 --- a/price-info/src/lib.rs +++ b/price-info/src/lib.rs @@ -30,7 +30,6 @@ use std::cmp; use std::fmt; use std::io; use std::io::Read; -use std::str::FromStr; use fetch::{Client as FetchClient, Fetch}; use futures::Future; @@ -46,8 +45,10 @@ pub struct PriceInfo { /// Price info error. #[derive(Debug)] pub enum Error { - /// The API returned an unexpected status code or content. - UnexpectedResponse(&'static str, String), + /// The API returned an unexpected status code. + StatusCode(&'static str), + /// The API returned an unexpected status content. + UnexpectedResponse(String), /// There was an error when trying to reach the API. Fetch(fetch::Error), /// IO error when reading API response. @@ -94,28 +95,27 @@ impl Client { self.fetch.forget(self.fetch.fetch(&self.api_endpoint) .map_err(|err| Error::Fetch(err)) .and_then(move |mut response| { + if !response.is_success() { + return Err(Error::StatusCode(response.status().canonical_reason().unwrap_or("unknown"))); + } let mut result = String::new(); response.read_to_string(&mut result)?; - if response.is_success() { - let value: Result = serde_json::from_str(&result); - if let Ok(v) = value { - let obj = v.pointer("/result/ethusd").and_then(|obj| { - match *obj { - Value::String(ref s) => FromStr::from_str(s).ok(), - _ => None, - } - }); + let value: Option = serde_json::from_str(&result).ok(); - if let Some(ethusd) = obj { - set_price(PriceInfo { ethusd }); - return Ok(()); - } - } + let ethusd = value + .as_ref() + .and_then(|value| value.pointer("/result/ethusd")) + .and_then(|obj| obj.as_str()) + .and_then(|s| s.parse().ok()); + + match ethusd { + Some(ethusd) => { + set_price(PriceInfo { ethusd }); + Ok(()) + }, + None => Err(Error::UnexpectedResponse(result)), } - - let status = response.status().canonical_reason().unwrap_or("unknown"); - Err(Error::UnexpectedResponse(status, result)) }) .map_err(|err| { warn!("Failed to auto-update latest ETH price: {:?}", err); diff --git a/util/fetch/src/client.rs b/util/fetch/src/client.rs index 64193639a..a8edcf482 100644 --- a/util/fetch/src/client.rs +++ b/util/fetch/src/client.rs @@ -297,7 +297,7 @@ impl Response { /// Returns `true` if response status code is successful. pub fn is_success(&self) -> bool { - self.status() == reqwest::StatusCode::Ok + self.status().is_success() } /// Returns `true` if content type of this response is `text/html` From 7ddfd2f0301b85c847df4bdaa1545a36d6533942 Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 4 Aug 2017 14:12:27 +0200 Subject: [PATCH 031/112] revert fetch is_success changes --- util/fetch/src/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/fetch/src/client.rs b/util/fetch/src/client.rs index a8edcf482..64193639a 100644 --- a/util/fetch/src/client.rs +++ b/util/fetch/src/client.rs @@ -297,7 +297,7 @@ impl Response { /// Returns `true` if response status code is successful. pub fn is_success(&self) -> bool { - self.status().is_success() + self.status() == reqwest::StatusCode::Ok } /// Returns `true` if content type of this response is `text/html` From 35bfbc39f858f82961ccfa2bb3024dea90d5ca1e Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 4 Aug 2017 15:45:47 +0200 Subject: [PATCH 032/112] native-contracts crate does not depend on util any more --- Cargo.lock | 2 +- ethcore/native_contracts/Cargo.toml | 2 +- ethcore/native_contracts/generator/src/lib.rs | 42 +++++++++---------- ethcore/native_contracts/src/lib.rs | 2 +- 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cc8c7d3c7..92d3bd5c5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1580,7 +1580,7 @@ version = "0.1.0" dependencies = [ "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore-util 1.8.0", + "ethcore-bigint 0.1.3", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "native-contract-generator 0.1.0", ] diff --git a/ethcore/native_contracts/Cargo.toml b/ethcore/native_contracts/Cargo.toml index 5dc18c8f5..2f91a4848 100644 --- a/ethcore/native_contracts/Cargo.toml +++ b/ethcore/native_contracts/Cargo.toml @@ -9,7 +9,7 @@ build = "build.rs" ethabi = "2.0" futures = "0.1" byteorder = "1.0" -ethcore-util = { path = "../../util" } +ethcore-bigint = { path = "../../util/bigint" } [build-dependencies] native-contract-generator = { path = "generator" } diff --git a/ethcore/native_contracts/generator/src/lib.rs b/ethcore/native_contracts/generator/src/lib.rs index 793ad6085..996ee4969 100644 --- a/ethcore/native_contracts/generator/src/lib.rs +++ b/ethcore/native_contracts/generator/src/lib.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . //! Rust code contract generator. -//! The code generated will require a dependence on the `ethcore-util`, +//! The code generated will require a dependence on the `ethcore-bigint::prelude`, //! `ethabi`, `byteorder`, and `futures` crates. //! This currently isn't hygienic, so compilation of generated code may fail //! due to missing crates or name collisions. This will change when @@ -48,14 +48,14 @@ pub fn generate_module(struct_name: &str, abi: &str) -> Result { use byteorder::{{BigEndian, ByteOrder}}; use futures::{{future, Future, IntoFuture, BoxFuture}}; use ethabi::{{Contract, Interface, Token, Event}}; -use util; +use bigint; /// Generated Rust bindings to an Ethereum contract. #[derive(Clone, Debug)] pub struct {name} {{ contract: Contract, /// Address to make calls to. - pub address: util::Address, + pub address: bigint::prelude::H160, }} const ABI: &'static str = r#"{abi_str}"#; @@ -63,7 +63,7 @@ const ABI: &'static str = r#"{abi_str}"#; impl {name} {{ /// Create a new instance of `{name}` with an address. /// Calls can be made, given a callback for dispatching calls asynchronously. - pub fn new(address: util::Address) -> Self {{ + pub fn new(address: bigint::prelude::H160) -> Self {{ let contract = Contract::new(Interface::load(ABI.as_bytes()) .expect("ABI checked at generation-time; qed")); {name} {{ @@ -108,7 +108,7 @@ fn generate_functions(contract: &Contract) -> Result { /// Outputs: {abi_outputs:?} pub fn {snake_name}(&self, call: F, {params}) -> BoxFuture<{output_type}, String> where - F: FnOnce(util::Address, Vec) -> U, + F: FnOnce(bigint::prelude::H160, Vec) -> U, U: IntoFuture, Error=String>, U::Future: Send + 'static {{ @@ -217,8 +217,8 @@ fn output_params_codegen(outputs: &[ParamType]) -> Result<(String, String), Para // create code for an argument type from param type. fn rust_type(input: ParamType) -> Result { Ok(match input { - ParamType::Address => "util::Address".into(), - ParamType::FixedBytes(len) if len <= 32 => format!("util::H{}", len * 8), + ParamType::Address => "bigint::prelude::H160".into(), + ParamType::FixedBytes(len) if len <= 32 => format!("bigint::prelude::H{}", len * 8), ParamType::Bytes | ParamType::FixedBytes(_) => "Vec".into(), ParamType::Int(width) => match width { 8 | 16 | 32 | 64 => format!("i{}", width), @@ -226,7 +226,7 @@ fn rust_type(input: ParamType) -> Result { }, ParamType::Uint(width) => match width { 8 | 16 | 32 | 64 => format!("u{}", width), - 128 | 160 | 256 => format!("util::U{}", width), + 128 | 160 | 256 => format!("bigint::prelude::U{}", width), _ => return Err(ParamType::Uint(width)), }, ParamType::Bool => "bool".into(), @@ -259,8 +259,8 @@ fn tokenize(name: &str, input: ParamType) -> (bool, String) { }, ParamType::Uint(width) => format!( "let mut r = [0; 32]; {}.to_big_endian(&mut r); Token::Uint(r)", - if width <= 64 { format!("util::U256::from({} as u64)", name) } - else { format!("util::U256::from({})", name) } + if width <= 64 { format!("bigint::prelude::U256::from({} as u64)", name) } + else { format!("bigint::prelude::U256::from({})", name) } ), ParamType::Bool => format!("Token::Bool({})", name), ParamType::String => format!("Token::String({})", name), @@ -281,11 +281,11 @@ fn tokenize(name: &str, input: ParamType) -> (bool, String) { // panics on unsupported types. fn detokenize(name: &str, output_type: ParamType) -> String { match output_type { - ParamType::Address => format!("{}.to_address().map(util::H160)", name), + ParamType::Address => format!("{}.to_address().map(bigint::prelude::H160)", name), ParamType::Bytes => format!("{}.to_bytes()", name), ParamType::FixedBytes(len) if len <= 32 => { // ensure no panic on slice too small. - let read_hash = format!("b.resize({}, 0); util::H{}::from_slice(&b[..{}])", + let read_hash = format!("b.resize({}, 0); bigint::prelude::H{}::from_slice(&b[..{}])", len, len * 8, len); format!("{}.to_fixed_bytes().map(|mut b| {{ {} }})", @@ -302,8 +302,8 @@ fn detokenize(name: &str, output_type: ParamType) -> String { } ParamType::Uint(width) => { let read_uint = match width { - 8 | 16 | 32 | 64 => format!("util::U256(u).low_u64() as u{}", width), - _ => format!("util::U{}::from(&u[..])", width), + 8 | 16 | 32 | 64 => format!("bigint::prelude::U256(u).low_u64() as u{}", width), + _ => format!("bigint::prelude::U{}::from(&u[..])", width), }; format!("{}.to_uint().map(|u| {})", name, read_uint) @@ -328,30 +328,30 @@ mod tests { #[test] fn input_types() { assert_eq!(::input_params_codegen(&[]).unwrap().0, ""); - assert_eq!(::input_params_codegen(&[ParamType::Address]).unwrap().0, "param_0: util::Address, "); + assert_eq!(::input_params_codegen(&[ParamType::Address]).unwrap().0, "param_0: bigint::prelude::H160, "); assert_eq!(::input_params_codegen(&[ParamType::Address, ParamType::Bytes]).unwrap().0, - "param_0: util::Address, param_1: Vec, "); + "param_0: bigint::prelude::H160, param_1: Vec, "); } #[test] fn output_types() { assert_eq!(::output_params_codegen(&[]).unwrap().0, "()"); - assert_eq!(::output_params_codegen(&[ParamType::Address]).unwrap().0, "(util::Address)"); + assert_eq!(::output_params_codegen(&[ParamType::Address]).unwrap().0, "(bigint::prelude::H160)"); assert_eq!(::output_params_codegen(&[ParamType::Address, ParamType::Array(Box::new(ParamType::Bytes))]).unwrap().0, - "(util::Address, Vec>)"); + "(bigint::prelude::H160, Vec>)"); } #[test] fn rust_type() { - assert_eq!(::rust_type(ParamType::FixedBytes(32)).unwrap(), "util::H256"); + assert_eq!(::rust_type(ParamType::FixedBytes(32)).unwrap(), "bigint::prelude::H256"); assert_eq!(::rust_type(ParamType::Array(Box::new(ParamType::FixedBytes(32)))).unwrap(), - "Vec"); + "Vec"); assert_eq!(::rust_type(ParamType::Uint(64)).unwrap(), "u64"); assert!(::rust_type(ParamType::Uint(63)).is_err()); assert_eq!(::rust_type(ParamType::Int(32)).unwrap(), "i32"); - assert_eq!(::rust_type(ParamType::Uint(256)).unwrap(), "util::U256"); + assert_eq!(::rust_type(ParamType::Uint(256)).unwrap(), "bigint::prelude::U256"); } // codegen tests will need bootstrapping of some kind. diff --git a/ethcore/native_contracts/src/lib.rs b/ethcore/native_contracts/src/lib.rs index e35a4ec19..9b73ace79 100644 --- a/ethcore/native_contracts/src/lib.rs +++ b/ethcore/native_contracts/src/lib.rs @@ -21,7 +21,7 @@ extern crate futures; extern crate byteorder; extern crate ethabi; -extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; mod registry; mod urlhint; From f157461ee19740c8d013bb9885d3aa0aa5f91a8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 4 Aug 2017 15:58:14 +0200 Subject: [PATCH 033/112] Multi-call RPC (#6195) * Removing duplicated pending state accessors in miner. * Merge miner+client call. * Multicall & multicall RPC. * Sensible defaults. * Fix tests. --- ethcore/src/client/client.rs | 126 ++++++++++++++---- ethcore/src/client/test_client.rs | 31 +++-- ethcore/src/client/traits.rs | 6 +- ethcore/src/engines/tendermint/mod.rs | 2 +- ethcore/src/miner/miner.rs | 113 ++++------------ ethcore/src/miner/mod.rs | 21 +-- ethcore/types/src/block_status.rs | 2 + rpc/src/v1/impls/eth.rs | 75 +++-------- rpc/src/v1/impls/light/parity.rs | 6 +- rpc/src/v1/impls/light/trace.rs | 12 +- rpc/src/v1/impls/parity.rs | 28 +++- rpc/src/v1/impls/traces.rs | 30 ++++- rpc/src/v1/tests/helpers/miner_service.rs | 41 +----- rpc/src/v1/tests/mocked/eth.rs | 5 +- rpc/src/v1/tests/mocked/parity.rs | 41 +++++- rpc/src/v1/tests/mocked/traces.rs | 10 ++ rpc/src/v1/traits/parity.rs | 6 +- rpc/src/v1/traits/traces.rs | 12 +- rpc/src/v1/types/mod.rs | 4 + .../src/key_server_cluster/io/deadline.rs | 4 +- sync/src/block_sync.rs | 2 +- sync/src/chain.rs | 2 +- 22 files changed, 308 insertions(+), 271 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 94a5e09f3..7141c7bdb 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -906,7 +906,7 @@ impl Client { pub fn state_at(&self, id: BlockId) -> Option> { // fast path for latest state. match id.clone() { - BlockId::Pending => return self.miner.pending_state().or_else(|| Some(self.state())), + BlockId::Pending => return self.miner.pending_state(self.chain.read().best_block_number()).or_else(|| Some(self.state())), BlockId::Latest => return Some(self.state()), _ => {}, } @@ -1055,19 +1055,20 @@ impl Client { self.history } - fn block_hash(chain: &BlockChain, id: BlockId) -> Option { + fn block_hash(chain: &BlockChain, miner: &Miner, id: BlockId) -> Option { match id { BlockId::Hash(hash) => Some(hash), BlockId::Number(number) => chain.block_hash(number), BlockId::Earliest => chain.block_hash(0), - BlockId::Latest | BlockId::Pending => Some(chain.best_block_hash()), + BlockId::Latest => Some(chain.best_block_hash()), + BlockId::Pending => miner.pending_block_header(chain.best_block_number()).map(|header| header.hash()) } } fn transaction_address(&self, id: TransactionId) -> Option { match id { TransactionId::Hash(ref hash) => self.chain.read().transaction_address(hash), - TransactionId::Location(id, index) => Self::block_hash(&self.chain.read(), id).map(|hash| TransactionAddress { + TransactionId::Location(id, index) => Self::block_hash(&self.chain.read(), &self.miner, id).map(|hash| TransactionAddress { block_hash: hash, index: index, }) @@ -1110,6 +1111,31 @@ impl Client { data: data, }.fake_sign(from) } + + fn do_call(&self, env_info: &EnvInfo, state: &mut State, increase_balance: bool, t: &SignedTransaction, analytics: CallAnalytics) -> Result { + let original_state = if analytics.state_diffing { Some(state.clone()) } else { None }; + + // give the sender a sufficient balance (if calling in pending block) + if increase_balance { + let sender = t.sender(); + let balance = state.balance(&sender).map_err(ExecutionError::from)?; + let needed_balance = t.value + t.gas * t.gas_price; + if balance < needed_balance { + state.add_balance(&sender, &(needed_balance - balance), state::CleanupMode::NoEmpty) + .map_err(ExecutionError::from)?; + } + } + + let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; + let mut ret = Executive::new(state, env_info, &*self.engine).transact_virtual(t, options)?; + + // TODO gav move this into Executive. + if let Some(original) = original_state { + ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?); + } + + Ok(ret) + } } impl snapshot::DatabaseRestore for Client { @@ -1134,23 +1160,31 @@ impl snapshot::DatabaseRestore for Client { } impl BlockChainClient for Client { - fn call(&self, t: &SignedTransaction, block: BlockId, analytics: CallAnalytics) -> Result { + fn call(&self, transaction: &SignedTransaction, analytics: CallAnalytics, block: BlockId) -> Result { let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?; env_info.gas_limit = U256::max_value(); // that's just a copy of the state. let mut state = self.state_at(block).ok_or(CallError::StatePruned)?; - let original_state = if analytics.state_diffing { Some(state.clone()) } else { None }; - let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; - let mut ret = Executive::new(&mut state, &env_info, &*self.engine).transact_virtual(t, options)?; + self.do_call(&env_info, &mut state, block == BlockId::Pending, transaction, analytics) + } - // TODO gav move this into Executive. - if let Some(original) = original_state { - ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?); + fn call_many(&self, transactions: &[(SignedTransaction, CallAnalytics)], block: BlockId) -> Result, CallError> { + let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?; + env_info.gas_limit = U256::max_value(); + + // that's just a copy of the state. + let mut state = self.state_at(block).ok_or(CallError::StatePruned)?; + let mut results = Vec::with_capacity(transactions.len()); + + for &(ref t, analytics) in transactions { + let ret = self.do_call(&env_info, &mut state, block == BlockId::Pending, t, analytics)?; + env_info.gas_used = ret.cumulative_gas_used; + results.push(ret); } - Ok(ret) + Ok(results) } fn estimate_gas(&self, t: &SignedTransaction, block: BlockId) -> Result { @@ -1303,7 +1337,16 @@ impl BlockChainClient for Client { fn block_header(&self, id: BlockId) -> Option<::encoded::Header> { let chain = self.chain.read(); - Self::block_hash(&chain, id).and_then(|hash| chain.block_header_data(&hash)) + + if let BlockId::Pending = id { + if let Some(block) = self.miner.pending_block(chain.best_block_number()) { + return Some(encoded::Header::new(block.header.rlp(Seal::Without))); + } + // fall back to latest + return self.block_header(BlockId::Latest); + } + + Self::block_hash(&chain, &self.miner, id).and_then(|hash| chain.block_header_data(&hash)) } fn block_number(&self, id: BlockId) -> Option { @@ -1311,30 +1354,48 @@ impl BlockChainClient for Client { BlockId::Number(number) => Some(number), BlockId::Hash(ref hash) => self.chain.read().block_number(hash), BlockId::Earliest => Some(0), - BlockId::Latest | BlockId::Pending => Some(self.chain.read().best_block_number()), + BlockId::Latest => Some(self.chain.read().best_block_number()), + BlockId::Pending => Some(self.chain.read().best_block_number() + 1), } } fn block_body(&self, id: BlockId) -> Option { let chain = self.chain.read(); - Self::block_hash(&chain, id).and_then(|hash| chain.block_body(&hash)) + + if let BlockId::Pending = id { + if let Some(block) = self.miner.pending_block(chain.best_block_number()) { + return Some(encoded::Body::new(BlockChain::block_to_body(&block.rlp_bytes(Seal::Without)))); + } + // fall back to latest + return self.block_body(BlockId::Latest); + } + + Self::block_hash(&chain, &self.miner, id).and_then(|hash| chain.block_body(&hash)) } fn block(&self, id: BlockId) -> Option { + let chain = self.chain.read(); + if let BlockId::Pending = id { - if let Some(block) = self.miner.pending_block() { + if let Some(block) = self.miner.pending_block(chain.best_block_number()) { return Some(encoded::Block::new(block.rlp_bytes(Seal::Without))); } + // fall back to latest + return self.block(BlockId::Latest); } - let chain = self.chain.read(); - Self::block_hash(&chain, id).and_then(|hash| { + + Self::block_hash(&chain, &self.miner, id).and_then(|hash| { chain.block(&hash) }) } fn block_status(&self, id: BlockId) -> BlockStatus { + if let BlockId::Pending = id { + return BlockStatus::Pending; + } + let chain = self.chain.read(); - match Self::block_hash(&chain, id) { + match Self::block_hash(&chain, &self.miner, id) { Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain, Some(hash) => self.block_queue.status(&hash).into(), None => BlockStatus::Unknown @@ -1342,13 +1403,18 @@ impl BlockChainClient for Client { } fn block_total_difficulty(&self, id: BlockId) -> Option { - if let BlockId::Pending = id { - if let Some(block) = self.miner.pending_block() { - return Some(*block.header.difficulty() + self.block_total_difficulty(BlockId::Latest).expect("blocks in chain have details; qed")); - } - } let chain = self.chain.read(); - Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty) + if let BlockId::Pending = id { + let latest_difficulty = self.block_total_difficulty(BlockId::Latest).expect("blocks in chain have details; qed"); + let pending_difficulty = self.miner.pending_block_header(chain.best_block_number()).map(|header| *header.difficulty()); + if let Some(difficulty) = pending_difficulty { + return Some(difficulty + latest_difficulty); + } + // fall back to latest + return Some(latest_difficulty); + } + + Self::block_hash(&chain, &self.miner, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty) } fn nonce(&self, address: &Address, id: BlockId) -> Option { @@ -1361,7 +1427,7 @@ impl BlockChainClient for Client { fn block_hash(&self, id: BlockId) -> Option { let chain = self.chain.read(); - Self::block_hash(&chain, id) + Self::block_hash(&chain, &self.miner, id) } fn code(&self, address: &Address, id: BlockId) -> Option> { @@ -1526,7 +1592,8 @@ impl BlockChainClient for Client { if self.chain.read().is_known(&unverified.hash()) { return Err(BlockImportError::Import(ImportError::AlreadyInChain)); } - if self.block_status(BlockId::Hash(unverified.parent_hash())) == BlockStatus::Unknown { + let status = self.block_status(BlockId::Hash(unverified.parent_hash())); + if status == BlockStatus::Unknown || status == BlockStatus::Pending { return Err(BlockImportError::Block(BlockError::UnknownParent(unverified.parent_hash()))); } } @@ -1540,7 +1607,8 @@ impl BlockChainClient for Client { if self.chain.read().is_known(&header.hash()) { return Err(BlockImportError::Import(ImportError::AlreadyInChain)); } - if self.block_status(BlockId::Hash(header.parent_hash())) == BlockStatus::Unknown { + let status = self.block_status(BlockId::Hash(header.parent_hash())); + if status == BlockStatus::Unknown || status == BlockStatus::Pending { return Err(BlockImportError::Block(BlockError::UnknownParent(header.parent_hash()))); } } @@ -1686,7 +1754,7 @@ impl BlockChainClient for Client { fn call_contract(&self, block_id: BlockId, address: Address, data: Bytes) -> Result { let transaction = self.contract_call_tx(block_id, address, data); - self.call(&transaction, block_id, Default::default()) + self.call(&transaction, Default::default(), block_id) .map_err(|e| format!("{:?}", e)) .map(|executed| { executed.output diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 38b0b50f4..2a205868d 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -401,10 +401,18 @@ impl MiningBlockChainClient for TestBlockChainClient { } impl BlockChainClient for TestBlockChainClient { - fn call(&self, _t: &SignedTransaction, _block: BlockId, _analytics: CallAnalytics) -> Result { + fn call(&self, _t: &SignedTransaction, _analytics: CallAnalytics, _block: BlockId) -> Result { self.execution_result.read().clone().unwrap() } + fn call_many(&self, txs: &[(SignedTransaction, CallAnalytics)], block: BlockId) -> Result, CallError> { + let mut res = Vec::with_capacity(txs.len()); + for &(ref tx, analytics) in txs { + res.push(self.call(tx, analytics, block)?); + } + Ok(res) + } + fn estimate_gas(&self, _t: &SignedTransaction, _block: BlockId) -> Result { Ok(21000.into()) } @@ -423,7 +431,7 @@ impl BlockChainClient for TestBlockChainClient { fn nonce(&self, address: &Address, id: BlockId) -> Option { match id { - BlockId::Latest => Some(self.nonces.read().get(address).cloned().unwrap_or(self.spec.params().account_start_nonce)), + BlockId::Latest | BlockId::Pending => Some(self.nonces.read().get(address).cloned().unwrap_or(self.spec.params().account_start_nonce)), _ => None, } } @@ -438,16 +446,15 @@ impl BlockChainClient for TestBlockChainClient { fn code(&self, address: &Address, id: BlockId) -> Option> { match id { - BlockId::Latest => Some(self.code.read().get(address).cloned()), + BlockId::Latest | BlockId::Pending => Some(self.code.read().get(address).cloned()), _ => None, } } fn balance(&self, address: &Address, id: BlockId) -> Option { - if let BlockId::Latest = id { - Some(self.balances.read().get(address).cloned().unwrap_or_else(U256::zero)) - } else { - None + match id { + BlockId::Latest | BlockId::Pending => Some(self.balances.read().get(address).cloned().unwrap_or_else(U256::zero)), + _ => None, } } @@ -456,10 +463,9 @@ impl BlockChainClient for TestBlockChainClient { } fn storage_at(&self, address: &Address, position: &H256, id: BlockId) -> Option { - if let BlockId::Latest = id { - Some(self.storage.read().get(&(address.clone(), position.clone())).cloned().unwrap_or_else(H256::new)) - } else { - None + match id { + BlockId::Latest | BlockId::Pending => Some(self.storage.read().get(&(address.clone(), position.clone())).cloned().unwrap_or_else(H256::new)), + _ => None, } } @@ -548,7 +554,8 @@ impl BlockChainClient for TestBlockChainClient { match id { BlockId::Number(number) if (number as usize) < self.blocks.read().len() => BlockStatus::InChain, BlockId::Hash(ref hash) if self.blocks.read().get(hash).is_some() => BlockStatus::InChain, - BlockId::Latest | BlockId::Pending | BlockId::Earliest => BlockStatus::InChain, + BlockId::Latest | BlockId::Earliest => BlockStatus::InChain, + BlockId::Pending => BlockStatus::Pending, _ => BlockStatus::Unknown, } } diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index e76da60bd..8e1bd8b18 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -182,7 +182,11 @@ pub trait BlockChainClient : Sync + Send { fn logs(&self, filter: Filter) -> Vec; /// Makes a non-persistent transaction call. - fn call(&self, t: &SignedTransaction, block: BlockId, analytics: CallAnalytics) -> Result; + fn call(&self, tx: &SignedTransaction, analytics: CallAnalytics, block: BlockId) -> Result; + + /// Makes multiple non-persistent but dependent transaction calls. + /// Returns a vector of successes or a failure if any of the transaction fails. + fn call_many(&self, txs: &[(SignedTransaction, CallAnalytics)], block: BlockId) -> Result, CallError>; /// Estimates how much gas will be necessary for a call. fn estimate_gas(&self, t: &SignedTransaction, block: BlockId) -> Result; diff --git a/ethcore/src/engines/tendermint/mod.rs b/ethcore/src/engines/tendermint/mod.rs index b65c79204..1c962d633 100644 --- a/ethcore/src/engines/tendermint/mod.rs +++ b/ethcore/src/engines/tendermint/mod.rs @@ -1050,7 +1050,7 @@ mod tests { client.miner().import_own_transaction(client.as_ref(), transaction.into()).unwrap(); // Propose - let proposal = Some(client.miner().pending_block().unwrap().header.bare_hash()); + let proposal = Some(client.miner().pending_block(0).unwrap().header.bare_hash()); // Propose timeout engine.step(); diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 7256f2ff5..02463d929 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -21,8 +21,8 @@ use std::sync::Arc; use util::*; use using_queue::{UsingQueue, GetAction}; use account_provider::{AccountProvider, SignError as AccountError}; -use state::{State, CleanupMode}; -use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockId, CallAnalytics, TransactionId}; +use state::State; +use client::{MiningBlockChainClient, BlockId, TransactionId}; use client::TransactionImportResult; use executive::contract_address; use block::{ClosedBlock, IsBlock, Block}; @@ -39,7 +39,7 @@ use miner::local_transactions::{Status as LocalTransactionStatus}; use miner::service_transaction_checker::ServiceTransactionChecker; use price_info::{Client as PriceInfoClient, PriceInfo}; use price_info::fetch::Client as FetchClient; -use header::BlockNumber; +use header::{Header, BlockNumber}; /// Different possible definitions for pending transaction set. #[derive(Debug, PartialEq)] @@ -331,13 +331,28 @@ impl Miner { } /// Get `Some` `clone()` of the current pending block's state or `None` if we're not sealing. - pub fn pending_state(&self) -> Option> { - self.sealing_work.lock().queue.peek_last_ref().map(|b| b.block().fields().state.clone()) + pub fn pending_state(&self, latest_block_number: BlockNumber) -> Option> { + self.map_pending_block(|b| b.state().clone(), latest_block_number) } /// Get `Some` `clone()` of the current pending block or `None` if we're not sealing. - pub fn pending_block(&self) -> Option { - self.sealing_work.lock().queue.peek_last_ref().map(|b| b.to_base()) + pub fn pending_block(&self, latest_block_number: BlockNumber) -> Option { + self.map_pending_block(|b| b.to_base(), latest_block_number) + } + + /// Get `Some` `clone()` of the current pending block header or `None` if we're not sealing. + pub fn pending_block_header(&self, latest_block_number: BlockNumber) -> Option
{ + self.map_pending_block(|b| b.header().clone(), latest_block_number) + } + + fn map_pending_block(&self, f: F, latest_block_number: BlockNumber) -> Option where + F: FnOnce(&ClosedBlock) -> T, + { + self.from_pending_block( + latest_block_number, + || None, + |block| Some(f(block)), + ) } #[cfg_attr(feature="dev", allow(match_same_arms))] @@ -679,7 +694,7 @@ impl Miner { #[cfg_attr(feature="dev", allow(wrong_self_convention))] #[cfg_attr(feature="dev", allow(redundant_closure))] fn from_pending_block(&self, latest_block_number: BlockNumber, from_chain: F, map_block: G) -> H - where F: Fn() -> H, G: Fn(&ClosedBlock) -> H { + where F: Fn() -> H, G: FnOnce(&ClosedBlock) -> H { let sealing_work = self.sealing_work.lock(); sealing_work.queue.peek_last_ref().map_or_else( || from_chain(), @@ -717,84 +732,6 @@ impl MinerService for Miner { } } - fn call(&self, client: &MiningBlockChainClient, t: &SignedTransaction, analytics: CallAnalytics) -> Result { - let sealing_work = self.sealing_work.lock(); - match sealing_work.queue.peek_last_ref() { - Some(work) => { - let block = work.block(); - - // TODO: merge this code with client.rs's fn call somwhow. - let header = block.header(); - let last_hashes = Arc::new(client.last_hashes()); - let env_info = EnvInfo { - number: header.number(), - author: *header.author(), - timestamp: header.timestamp(), - difficulty: *header.difficulty(), - last_hashes: last_hashes, - gas_used: U256::zero(), - gas_limit: U256::max_value(), - }; - // that's just a copy of the state. - let mut state = block.state().clone(); - let original_state = if analytics.state_diffing { Some(state.clone()) } else { None }; - - let sender = t.sender(); - let balance = state.balance(&sender).map_err(ExecutionError::from)?; - let needed_balance = t.value + t.gas * t.gas_price; - if balance < needed_balance { - // give the sender a sufficient balance - state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty) - .map_err(ExecutionError::from)?; - } - let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; - let mut ret = Executive::new(&mut state, &env_info, &*self.engine).transact(t, options)?; - - // TODO gav move this into Executive. - if let Some(original) = original_state { - ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?); - } - - Ok(ret) - }, - None => client.call(t, BlockId::Latest, analytics) - } - } - - // TODO: The `chain.latest_x` actually aren't infallible, they just panic on corruption. - // TODO: return trie::Result here, or other. - fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> Option { - self.from_pending_block( - chain.chain_info().best_block_number, - || Some(chain.latest_balance(address)), - |b| b.block().fields().state.balance(address).ok(), - ) - } - - fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> Option { - self.from_pending_block( - chain.chain_info().best_block_number, - || Some(chain.latest_storage_at(address, position)), - |b| b.block().fields().state.storage_at(address, position).ok(), - ) - } - - fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> Option { - self.from_pending_block( - chain.chain_info().best_block_number, - || Some(chain.latest_nonce(address)), - |b| b.block().fields().state.nonce(address).ok(), - ) - } - - fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option> { - self.from_pending_block( - chain.chain_info().best_block_number, - || Some(chain.latest_code(address)), - |b| b.block().fields().state.code(address).ok().map(|c| c.map(|c| (&*c).clone())) - ) - } - fn set_author(&self, author: Address) { if self.engine.seals_internally().is_some() { let mut sealing_work = self.sealing_work.lock(); @@ -1466,14 +1403,14 @@ mod tests { miner.update_sealing(&*client); client.flush_queue(); - assert!(miner.pending_block().is_none()); + assert!(miner.pending_block(0).is_none()); assert_eq!(client.chain_info().best_block_number, 3 as BlockNumber); assert_eq!(miner.import_own_transaction(&*client, PendingTransaction::new(transaction_with_network_id(spec.network_id()).into(), None)).unwrap(), TransactionImportResult::Current); miner.update_sealing(&*client); client.flush_queue(); - assert!(miner.pending_block().is_none()); + assert!(miner.pending_block(0).is_none()); assert_eq!(client.chain_info().best_block_number, 4 as BlockNumber); } diff --git a/ethcore/src/miner/mod.rs b/ethcore/src/miner/mod.rs index 1c07f4fab..b4cb065fd 100644 --- a/ethcore/src/miner/mod.rs +++ b/ethcore/src/miner/mod.rs @@ -62,12 +62,12 @@ pub use self::stratum::{Stratum, Error as StratumError, Options as StratumOption use std::collections::BTreeMap; use util::{H256, U256, Address, Bytes}; -use client::{MiningBlockChainClient, Executed, CallAnalytics}; +use client::{MiningBlockChainClient}; use block::ClosedBlock; use header::BlockNumber; use receipt::{RichReceipt, Receipt}; -use error::{Error, CallError}; -use transaction::{UnverifiedTransaction, PendingTransaction, SignedTransaction}; +use error::{Error}; +use transaction::{UnverifiedTransaction, PendingTransaction}; /// Miner client API pub trait MinerService : Send + Sync { @@ -185,21 +185,6 @@ pub trait MinerService : Send + Sync { /// Suggested gas limit. fn sensible_gas_limit(&self) -> U256 { 21000.into() } - - /// Latest account balance in pending state. - fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> Option; - - /// Call into contract code using pending state. - fn call(&self, chain: &MiningBlockChainClient, t: &SignedTransaction, analytics: CallAnalytics) -> Result; - - /// Get storage value in pending state. - fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> Option; - - /// Get account nonce in pending state. - fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> Option; - - /// Get contract code in pending state. - fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option>; } /// Mining status diff --git a/ethcore/types/src/block_status.rs b/ethcore/types/src/block_status.rs index 937077795..d330b9ed1 100644 --- a/ethcore/types/src/block_status.rs +++ b/ethcore/types/src/block_status.rs @@ -23,6 +23,8 @@ pub enum BlockStatus { Queued, /// Known as bad. Bad, + /// Pending block. + Pending, /// Unknown. Unknown, } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 6ae614db1..7c656c752 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -264,6 +264,7 @@ fn check_known(client: &C, number: BlockNumber) -> Result<(), Error> where C: match client.block_status(number.into()) { BlockStatus::InChain => Ok(()), + BlockStatus::Pending => Ok(()), _ => Err(errors::unknown_block()), } } @@ -361,20 +362,12 @@ impl Eth for EthClient where fn balance(&self, address: RpcH160, num: Trailing) -> BoxFuture { let address = address.into(); - let res = match num.unwrap_or_default() { - BlockNumber::Pending => { - match self.miner.balance(&*self.client, &address) { - Some(balance) => Ok(balance.into()), - None => Err(errors::database("latest balance missing")) - } - } - id => { - try_bf!(check_known(&*self.client, id.clone())); - match self.client.balance(&address, id.into()) { - Some(balance) => Ok(balance.into()), - None => Err(errors::state_pruned()), - } - } + let id = num.unwrap_or_default(); + + try_bf!(check_known(&*self.client, id.clone())); + let res = match self.client.balance(&address, id.into()) { + Some(balance) => Ok(balance.into()), + None => Err(errors::state_pruned()), }; future::done(res).boxed() @@ -384,20 +377,12 @@ impl Eth for EthClient where let address: Address = RpcH160::into(address); let position: U256 = RpcU256::into(pos); - let res = match num.unwrap_or_default() { - BlockNumber::Pending => { - match self.miner.storage_at(&*self.client, &address, &H256::from(position)) { - Some(s) => Ok(s.into()), - None => Err(errors::database("latest storage missing")) - } - } - id => { - try_bf!(check_known(&*self.client, id.clone())); - match self.client.storage_at(&address, &H256::from(position), id.into()) { - Some(s) => Ok(s.into()), - None => Err(errors::state_pruned()), - } - } + let id = num.unwrap_or_default(); + + try_bf!(check_known(&*self.client, id.clone())); + let res = match self.client.storage_at(&address, &H256::from(position), id.into()) { + Some(s) => Ok(s.into()), + None => Err(errors::state_pruned()), }; future::done(res).boxed() @@ -410,18 +395,12 @@ impl Eth for EthClient where BlockNumber::Pending if self.options.pending_nonce_from_queue => { let nonce = self.miner.last_nonce(&address) .map(|n| n + 1.into()) - .or_else(|| self.miner.nonce(&*self.client, &address)); + .or_else(|| self.client.nonce(&address, BlockNumber::Pending.into())); match nonce { Some(nonce) => Ok(nonce.into()), None => Err(errors::database("latest nonce missing")) } } - BlockNumber::Pending => { - match self.miner.nonce(&*self.client, &address) { - Some(nonce) => Ok(nonce.into()), - None => Err(errors::database("latest nonce missing")) - } - } id => { try_bf!(check_known(&*self.client, id.clone())); match self.client.nonce(&address, id.into()) { @@ -468,20 +447,12 @@ impl Eth for EthClient where fn code_at(&self, address: RpcH160, num: Trailing) -> BoxFuture { let address: Address = RpcH160::into(address); - let res = match num.unwrap_or_default() { - BlockNumber::Pending => { - match self.miner.code(&*self.client, &address) { - Some(code) => Ok(code.map_or_else(Bytes::default, Bytes::new)), - None => Err(errors::database("latest code missing")) - } - } - id => { - try_bf!(check_known(&*self.client, id.clone())); - match self.client.code(&address, id.into()) { - Some(code) => Ok(code.map_or_else(Bytes::default, Bytes::new)), - None => Err(errors::state_pruned()), - } - } + let id = num.unwrap_or_default(); + try_bf!(check_known(&*self.client, id.clone())); + + let res = match self.client.code(&address, id.into()) { + Some(code) => Ok(code.map_or_else(Bytes::default, Bytes::new)), + None => Err(errors::state_pruned()), }; future::done(res).boxed() @@ -648,10 +619,8 @@ impl Eth for EthClient where Err(e) => return future::err(e).boxed(), }; - let result = match num.unwrap_or_default() { - BlockNumber::Pending => self.miner.call(&*self.client, &signed, Default::default()), - num => self.client.call(&signed, num.into(), Default::default()), - }; + let num = num.unwrap_or_default(); + let result = self.client.call(&signed, Default::default(), num.into()); future::done(result .map(|b| b.output.into()) diff --git a/rpc/src/v1/impls/light/parity.rs b/rpc/src/v1/impls/light/parity.rs index 481cdb136..62a7721fc 100644 --- a/rpc/src/v1/impls/light/parity.rs +++ b/rpc/src/v1/impls/light/parity.rs @@ -39,7 +39,7 @@ use v1::helpers::light_fetch::LightFetch; use v1::metadata::Metadata; use v1::traits::Parity; use v1::types::{ - Bytes, U256, H160, H256, H512, + Bytes, U256, H160, H256, H512, CallRequest, Peers, Transaction, RpcSettings, Histogram, TransactionStats, LocalTransactionStatus, BlockNumber, ConsensusCapability, VersionInfo, @@ -389,4 +389,8 @@ impl Parity for ParityClient { fn ipfs_cid(&self, content: Bytes) -> Result { ipfs::cid(content) } + + fn call(&self, _requests: Vec, _block: Trailing) -> BoxFuture, Error> { + future::err(errors::light_unimplemented(None)).boxed() + } } diff --git a/rpc/src/v1/impls/light/trace.rs b/rpc/src/v1/impls/light/trace.rs index 00d19dc24..81ed592c5 100644 --- a/rpc/src/v1/impls/light/trace.rs +++ b/rpc/src/v1/impls/light/trace.rs @@ -20,7 +20,7 @@ use jsonrpc_core::Error; use jsonrpc_macros::Trailing; use v1::traits::Traces; use v1::helpers::errors; -use v1::types::{TraceFilter, LocalizedTrace, BlockNumber, Index, CallRequest, Bytes, TraceResults, H256}; +use v1::types::{TraceFilter, LocalizedTrace, BlockNumber, Index, CallRequest, Bytes, TraceResults, TraceOptions, H256}; /// Traces api implementation. // TODO: all calling APIs should be possible w. proved remote TX execution. @@ -43,15 +43,19 @@ impl Traces for TracesClient { Err(errors::light_unimplemented(None)) } - fn call(&self, _request: CallRequest, _flags: Vec, _block: Trailing) -> Result { + fn call(&self, _request: CallRequest, _flags: TraceOptions, _block: Trailing) -> Result { Err(errors::light_unimplemented(None)) } - fn raw_transaction(&self, _raw_transaction: Bytes, _flags: Vec, _block: Trailing) -> Result { + fn call_many(&self, _request: Vec<(CallRequest, TraceOptions)>, _block: Trailing) -> Result, Error> { Err(errors::light_unimplemented(None)) } - fn replay_transaction(&self, _transaction_hash: H256, _flags: Vec) -> Result { + fn raw_transaction(&self, _raw_transaction: Bytes, _flags: TraceOptions, _block: Trailing) -> Result { + Err(errors::light_unimplemented(None)) + } + + fn replay_transaction(&self, _transaction_hash: H256, _flags: TraceOptions) -> Result { Err(errors::light_unimplemented(None)) } } diff --git a/rpc/src/v1/impls/parity.rs b/rpc/src/v1/impls/parity.rs index 2cffdd4e6..d855d7e17 100644 --- a/rpc/src/v1/impls/parity.rs +++ b/rpc/src/v1/impls/parity.rs @@ -28,22 +28,23 @@ use crypto::ecies; use ethkey::{Brain, Generator}; use ethstore::random_phrase; use ethsync::{SyncProvider, ManageNetwork}; +use ethcore::account_provider::AccountProvider; +use ethcore::client::{MiningBlockChainClient}; use ethcore::ids::BlockId; use ethcore::miner::MinerService; -use ethcore::client::{MiningBlockChainClient}; use ethcore::mode::Mode; -use ethcore::account_provider::AccountProvider; +use ethcore::transaction::SignedTransaction; use updater::{Service as UpdateService}; use crypto::DEFAULT_MAC; use jsonrpc_core::Error; use jsonrpc_macros::Trailing; -use v1::helpers::{self, errors, ipfs, SigningQueue, SignerService, NetworkSettings}; +use v1::helpers::{self, errors, fake_sign, ipfs, SigningQueue, SignerService, NetworkSettings}; use v1::helpers::accounts::unwrap_provider; use v1::metadata::Metadata; use v1::traits::Parity; use v1::types::{ - Bytes, U256, H160, H256, H512, + Bytes, U256, H160, H256, H512, CallRequest, Peers, Transaction, RpcSettings, Histogram, TransactionStats, LocalTransactionStatus, BlockNumber, ConsensusCapability, VersionInfo, @@ -409,4 +410,23 @@ impl Parity for ParityClient where fn ipfs_cid(&self, content: Bytes) -> Result { ipfs::cid(content) } + + fn call(&self, requests: Vec, block: Trailing) -> BoxFuture, Error> { + let requests: Result, Error> = requests + .into_iter() + .map(|request| Ok(( + fake_sign::sign_call(&self.client, &self.miner, request.into())?, + Default::default() + ))) + .collect(); + + let block = block.unwrap_or_default(); + let requests = try_bf!(requests); + + let result = self.client.call_many(&requests, block.into()) + .map(|res| res.into_iter().map(|res| res.output.into()).collect()) + .map_err(errors::call); + + future::done(result).boxed() + } } diff --git a/rpc/src/v1/impls/traces.rs b/rpc/src/v1/impls/traces.rs index 2c65f4403..31a0e889e 100644 --- a/rpc/src/v1/impls/traces.rs +++ b/rpc/src/v1/impls/traces.rs @@ -27,9 +27,9 @@ use jsonrpc_core::Error; use jsonrpc_macros::Trailing; use v1::traits::Traces; use v1::helpers::{errors, fake_sign}; -use v1::types::{TraceFilter, LocalizedTrace, BlockNumber, Index, CallRequest, Bytes, TraceResults, H256}; +use v1::types::{TraceFilter, LocalizedTrace, BlockNumber, Index, CallRequest, Bytes, TraceResults, TraceOptions, H256}; -fn to_call_analytics(flags: Vec) -> CallAnalytics { +fn to_call_analytics(flags: TraceOptions) -> CallAnalytics { CallAnalytics { transaction_tracing: flags.contains(&("trace".to_owned())), vm_tracing: flags.contains(&("vmTrace".to_owned())), @@ -79,29 +79,45 @@ impl Traces for TracesClient where C: MiningBlockChainClient + 'stat .map(LocalizedTrace::from)) } - fn call(&self, request: CallRequest, flags: Vec, block: Trailing) -> Result { + fn call(&self, request: CallRequest, flags: TraceOptions, block: Trailing) -> Result { let block = block.unwrap_or_default(); let request = CallRequest::into(request); let signed = fake_sign::sign_call(&self.client, &self.miner, request)?; - self.client.call(&signed, block.into(), to_call_analytics(flags)) + self.client.call(&signed, to_call_analytics(flags), block.into()) .map(TraceResults::from) .map_err(errors::call) } - fn raw_transaction(&self, raw_transaction: Bytes, flags: Vec, block: Trailing) -> Result { + fn call_many(&self, requests: Vec<(CallRequest, TraceOptions)>, block: Trailing) -> Result, Error> { + let block = block.unwrap_or_default(); + + let requests = requests.into_iter() + .map(|(request, flags)| { + let request = CallRequest::into(request); + let signed = fake_sign::sign_call(&self.client, &self.miner, request)?; + Ok((signed, to_call_analytics(flags))) + }) + .collect::, _>>()?; + + self.client.call_many(&requests, block.into()) + .map(|results| results.into_iter().map(TraceResults::from).collect()) + .map_err(errors::call) + } + + fn raw_transaction(&self, raw_transaction: Bytes, flags: TraceOptions, block: Trailing) -> Result { let block = block.unwrap_or_default(); let tx = UntrustedRlp::new(&raw_transaction.into_vec()).as_val().map_err(|e| errors::invalid_params("Transaction is not valid RLP", e))?; let signed = SignedTransaction::new(tx).map_err(errors::transaction)?; - self.client.call(&signed, block.into(), to_call_analytics(flags)) + self.client.call(&signed, to_call_analytics(flags), block.into()) .map(TraceResults::from) .map_err(errors::call) } - fn replay_transaction(&self, transaction_hash: H256, flags: Vec) -> Result { + fn replay_transaction(&self, transaction_hash: H256, flags: TraceOptions) -> Result { self.client.replay(TransactionId::Hash(transaction_hash.into()), to_call_analytics(flags)) .map(TraceResults::from) .map_err(errors::call) diff --git a/rpc/src/v1/tests/helpers/miner_service.rs b/rpc/src/v1/tests/helpers/miner_service.rs index ef9b5724b..7139d636b 100644 --- a/rpc/src/v1/tests/helpers/miner_service.rs +++ b/rpc/src/v1/tests/helpers/miner_service.rs @@ -19,9 +19,9 @@ use std::collections::{BTreeMap, HashMap}; use std::collections::hash_map::Entry; use util::{Address, H256, Bytes, U256, RwLock, Mutex}; -use ethcore::error::{Error, CallError}; -use ethcore::client::{MiningBlockChainClient, Executed, CallAnalytics}; -use ethcore::block::{ClosedBlock, IsBlock}; +use ethcore::error::Error; +use ethcore::client::MiningBlockChainClient; +use ethcore::block::ClosedBlock; use ethcore::header::BlockNumber; use ethcore::transaction::{UnverifiedTransaction, SignedTransaction, PendingTransaction}; use ethcore::receipt::{Receipt, RichReceipt}; @@ -280,41 +280,6 @@ impl MinerService for TestMinerService { unimplemented!(); } - fn balance(&self, _chain: &MiningBlockChainClient, address: &Address) -> Option { - self.latest_closed_block.lock() - .as_ref() - .map(|b| b.block().fields().state.balance(address)) - .map(|b| b.ok()) - .unwrap_or(Some(U256::default())) - } - - fn call(&self, _chain: &MiningBlockChainClient, _t: &SignedTransaction, _analytics: CallAnalytics) -> Result { - unimplemented!(); - } - - fn storage_at(&self, _chain: &MiningBlockChainClient, address: &Address, position: &H256) -> Option { - self.latest_closed_block.lock() - .as_ref() - .map(|b| b.block().fields().state.storage_at(address, position)) - .map(|s| s.ok()) - .unwrap_or(Some(H256::default())) - } - - fn nonce(&self, _chain: &MiningBlockChainClient, address: &Address) -> Option { - // we assume all transactions are in a pending block, ignoring the - // reality of gas limits. - Some(self.last_nonce(address).unwrap_or(U256::zero())) - } - - fn code(&self, _chain: &MiningBlockChainClient, address: &Address) -> Option> { - self.latest_closed_block.lock() - .as_ref() - .map(|b| b.block().fields().state.code(address)) - .map(|c| c.ok()) - .unwrap_or(None) - .map(|c| c.map(|c| (&*c).clone())) - } - fn sensible_gas_price(&self) -> U256 { 20000000000u64.into() } diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index eb5a7d4e9..f935b93e2 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -432,10 +432,7 @@ fn rpc_eth_balance_pending() { "id": 1 }"#; - // the TestMinerService doesn't communicate with the the TestBlockChainClient in any way. - // if this returns zero, we know that the "pending" call is being properly forwarded to the - // miner. - let response = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x5","id":1}"#; assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); } diff --git a/rpc/src/v1/tests/mocked/parity.rs b/rpc/src/v1/tests/mocked/parity.rs index aeeb7902d..b5cdada6f 100644 --- a/rpc/src/v1/tests/mocked/parity.rs +++ b/rpc/src/v1/tests/mocked/parity.rs @@ -16,10 +16,10 @@ use std::sync::Arc; use ethcore_logger::RotatingLogger; -use util::Address; +use util::{Address, U256}; use ethsync::ManageNetwork; use ethcore::account_provider::AccountProvider; -use ethcore::client::{TestBlockChainClient}; +use ethcore::client::{TestBlockChainClient, Executed}; use ethcore::miner::LocalTransactionStatus; use ethstore::ethkey::{Generator, Random}; @@ -504,3 +504,40 @@ fn rpc_parity_cid() { assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } + +#[test] +fn rpc_parity_call() { + let deps = Dependencies::new(); + deps.client.set_execution_result(Ok(Executed { + exception: None, + gas: U256::zero(), + gas_used: U256::from(0xff30), + refunded: U256::from(0x5), + cumulative_gas_used: U256::zero(), + logs: vec![], + contracts_created: vec![], + output: vec![0x12, 0x34, 0xff], + trace: vec![], + vm_trace: None, + state_diff: None, + })); + let io = deps.default_client(); + + let request = r#"{ + "jsonrpc": "2.0", + "method": "parity_call", + "params": [[{ + "from": "0xb60e8dd61c5d32be8058bb8eb970870f07233155", + "to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567", + "gas": "0x76c0", + "gasPrice": "0x9184e72a000", + "value": "0x9184e72a", + "data": "0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675" + }], + "latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":["0x1234ff"],"id":1}"#; + + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); +} diff --git a/rpc/src/v1/tests/mocked/traces.rs b/rpc/src/v1/tests/mocked/traces.rs index f64142f8c..50890648c 100644 --- a/rpc/src/v1/tests/mocked/traces.rs +++ b/rpc/src/v1/tests/mocked/traces.rs @@ -170,6 +170,16 @@ fn rpc_trace_call() { assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); } +#[test] +fn rpc_trace_multi_call() { + let tester = io(); + + let request = r#"{"jsonrpc":"2.0","method":"trace_callMany","params":[[[{}, ["stateDiff", "vmTrace", "trace"]]]],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":[{"output":"0x010203","stateDiff":null,"trace":[],"vmTrace":null}],"id":1}"#; + + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); +} + #[test] fn rpc_trace_call_state_pruned() { let tester = io(); diff --git a/rpc/src/v1/traits/parity.rs b/rpc/src/v1/traits/parity.rs index 92904aa40..a0b1a6e99 100644 --- a/rpc/src/v1/traits/parity.rs +++ b/rpc/src/v1/traits/parity.rs @@ -23,7 +23,7 @@ use jsonrpc_macros::Trailing; use futures::BoxFuture; use v1::types::{ - H160, H256, H512, U256, Bytes, + H160, H256, H512, U256, Bytes, CallRequest, Peers, Transaction, RpcSettings, Histogram, TransactionStats, LocalTransactionStatus, BlockNumber, ConsensusCapability, VersionInfo, @@ -203,5 +203,9 @@ build_rpc_trait! { /// Get IPFS CIDv0 given protobuf encoded bytes. #[rpc(name = "parity_cidV0")] fn ipfs_cid(&self, Bytes) -> Result; + + /// Call contract, returning the output data. + #[rpc(async, name = "parity_call")] + fn call(&self, Vec, Trailing) -> BoxFuture, Error>; } } diff --git a/rpc/src/v1/traits/traces.rs b/rpc/src/v1/traits/traces.rs index 64b37ac1d..568788af2 100644 --- a/rpc/src/v1/traits/traces.rs +++ b/rpc/src/v1/traits/traces.rs @@ -18,7 +18,7 @@ use jsonrpc_core::Error; use jsonrpc_macros::Trailing; -use v1::types::{TraceFilter, LocalizedTrace, BlockNumber, Index, CallRequest, Bytes, TraceResults, H256}; +use v1::types::{TraceFilter, LocalizedTrace, BlockNumber, Index, CallRequest, Bytes, TraceResults, H256, TraceOptions}; build_rpc_trait! { /// Traces specific rpc interface. @@ -41,14 +41,18 @@ build_rpc_trait! { /// Executes the given call and returns a number of possible traces for it. #[rpc(name = "trace_call")] - fn call(&self, CallRequest, Vec, Trailing) -> Result; + fn call(&self, CallRequest, TraceOptions, Trailing) -> Result; + + /// Executes all given calls and returns a number of possible traces for each of it. + #[rpc(name = "trace_callMany")] + fn call_many(&self, Vec<(CallRequest, TraceOptions)>, Trailing) -> Result, Error>; /// Executes the given raw transaction and returns a number of possible traces for it. #[rpc(name = "trace_rawTransaction")] - fn raw_transaction(&self, Bytes, Vec, Trailing) -> Result; + fn raw_transaction(&self, Bytes, TraceOptions, Trailing) -> Result; /// Executes the transaction with the given hash and returns a number of possible traces for it. #[rpc(name = "trace_replayTransaction")] - fn replay_transaction(&self, H256, Vec) -> Result; + fn replay_transaction(&self, H256, TraceOptions) -> Result; } } diff --git a/rpc/src/v1/types/mod.rs b/rpc/src/v1/types/mod.rs index 3e463f958..1407ebcf2 100644 --- a/rpc/src/v1/types/mod.rs +++ b/rpc/src/v1/types/mod.rs @@ -78,3 +78,7 @@ pub use self::transaction_request::TransactionRequest; pub use self::transaction_condition::TransactionCondition; pub use self::uint::{U128, U256}; pub use self::work::Work; + +// TODO [ToDr] Refactor to a proper type Vec of enums? +/// Expected tracing type. +pub type TraceOptions = Vec; diff --git a/secret_store/src/key_server_cluster/io/deadline.rs b/secret_store/src/key_server_cluster/io/deadline.rs index 501a69057..aea339ca9 100644 --- a/secret_store/src/key_server_cluster/io/deadline.rs +++ b/secret_store/src/key_server_cluster/io/deadline.rs @@ -19,7 +19,7 @@ use std::time::Duration; use futures::{Future, Select, BoxFuture, Poll, Async}; use tokio_core::reactor::{Handle, Timeout}; -type DeadlineBox where F: Future = BoxFuture, F::Error>; +type DeadlineBox = BoxFuture::Item>, ::Error>; /// Complete a passed future or fail if it is not completed within timeout. pub fn deadline(duration: Duration, handle: &Handle, future: F) -> Result, io::Error> @@ -82,4 +82,4 @@ mod tests { core.turn(Some(Duration::from_millis(3))); assert_eq!(deadline.wait().unwrap(), DeadlineStatus::Meet(())); } -} \ No newline at end of file +} diff --git a/sync/src/block_sync.rs b/sync/src/block_sync.rs index 8215b775b..92c8df429 100644 --- a/sync/src/block_sync.rs +++ b/sync/src/block_sync.rs @@ -267,7 +267,7 @@ impl BlockDownloader { BlockStatus::Bad => { return Err(BlockDownloaderImportError::Invalid); }, - BlockStatus::Unknown => { + BlockStatus::Unknown | BlockStatus::Pending => { headers.push(hdr.as_raw().to_vec()); hashes.push(hash); } diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 38872b24f..d6937381c 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -995,7 +995,7 @@ impl ChainSync { BlockStatus::Queued => { trace!(target: "sync", "New hash block already queued {:?}", hash); }, - BlockStatus::Unknown => { + BlockStatus::Unknown | BlockStatus::Pending => { new_hashes.push(hash.clone()); if number > max_height { trace!(target: "sync", "New unknown block hash {:?}", hash); From e43b1084c325e32a2ec73ae313b8ac0bcb38538c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 4 Aug 2017 21:43:31 +0200 Subject: [PATCH 034/112] format instant change proofs correctly --- ethcore/src/engines/authority_round/mod.rs | 1 + ethcore/src/engines/tendermint/mod.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/ethcore/src/engines/authority_round/mod.rs b/ethcore/src/engines/authority_round/mod.rs index 18389f168..e7284bfbe 100644 --- a/ethcore/src/engines/authority_round/mod.rs +++ b/ethcore/src/engines/authority_round/mod.rs @@ -690,6 +690,7 @@ impl Engine for AuthorityRound { // apply immediate transitions. if let Some(change) = self.validators.is_epoch_end(first, chain_head) { + let change = combine_proofs(chain_head.number(), &change, &[]); return Some(change) } diff --git a/ethcore/src/engines/tendermint/mod.rs b/ethcore/src/engines/tendermint/mod.rs index 1c962d633..cc75e99c3 100644 --- a/ethcore/src/engines/tendermint/mod.rs +++ b/ethcore/src/engines/tendermint/mod.rs @@ -643,6 +643,7 @@ impl Engine for Tendermint { let first = chain_head.number() == 0; if let Some(change) = self.validators.is_epoch_end(first, chain_head) { + let change = combine_proofs(chain_head.number(), &change, &[]); return Some(change) } else if let Some(pending) = transition_store(chain_head.hash()) { let signal_number = chain_head.number(); From ee07bf29ce8bf12db7bed261f61d928f8d526994 Mon Sep 17 00:00:00 2001 From: Pieter Vander Vennet Date: Sun, 6 Aug 2017 18:33:54 +0200 Subject: [PATCH 035/112] Fixed typo (s/seems is/seems) --- js/src/dapps/localtx/Application/application.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/src/dapps/localtx/Application/application.js b/js/src/dapps/localtx/Application/application.js index 8efadcf1a..27dc41b3e 100644 --- a/js/src/dapps/localtx/Application/application.js +++ b/js/src/dapps/localtx/Application/application.js @@ -151,7 +151,7 @@ export default class Application extends Component { if (!transactions.length) { return ( -

The queue seems is empty.

+

The queue seems empty.

); } From 82f90085ee6f5496db7af3b8873f48cf3258ead8 Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Mon, 7 Aug 2017 00:53:11 +0000 Subject: [PATCH 036/112] [ci skip] js-precompiled 20170807-004826 --- Cargo.lock | 2 +- js/package-lock.json | 2 +- js/package.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cc8c7d3c7..48ab1e4ac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2052,7 +2052,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#06f77d96f1b1a771d643f07b60c802d448b6415c" +source = "git+https://github.com/paritytech/js-precompiled.git#dfb9367a495d5ca3eac3c92a4197cf8652756d37" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package-lock.json b/js/package-lock.json index 6324f5ab8..46c7ae176 100644 --- a/js/package-lock.json +++ b/js/package-lock.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.8.9", + "version": "1.8.10", "lockfileVersion": 1, "requires": true, "dependencies": { diff --git a/js/package.json b/js/package.json index e317ee6c1..d0f8a037b 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.8.9", + "version": "1.8.10", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", From 455ecf700ca57d5b2d9c0bd27a95cef799cd0ea1 Mon Sep 17 00:00:00 2001 From: debris Date: Mon, 7 Aug 2017 10:06:02 +0200 Subject: [PATCH 037/112] updated tiny-keccak to 1.3 --- Cargo.lock | 24 ++++++++++++------------ ethcrypto/Cargo.toml | 2 +- ethkey/Cargo.toml | 2 +- ethstore/Cargo.toml | 2 +- evmjit/Cargo.toml | 2 +- js/wasm/ethkey/Cargo.toml | 2 +- util/bloomable/Cargo.toml | 2 +- util/network/Cargo.toml | 2 +- whisper/Cargo.toml | 2 +- 9 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cc8c7d3c7..f07073354 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -195,7 +195,7 @@ name = "bloomable" version = "0.1.0" dependencies = [ "ethcore-bigint 0.1.3", - "tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -470,7 +470,7 @@ dependencies = [ "serde 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -702,7 +702,7 @@ dependencies = [ "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -787,7 +787,7 @@ dependencies = [ "sha3 0.1.0", "target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -800,7 +800,7 @@ dependencies = [ "ethkey 0.2.0", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "subtle 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -826,7 +826,7 @@ dependencies = [ "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -862,7 +862,7 @@ dependencies = [ "smallvec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -943,7 +943,7 @@ dependencies = [ name = "evmjit" version = "1.8.0" dependencies = [ - "tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1545,7 +1545,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "ring 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", - "tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2113,7 +2113,7 @@ dependencies = [ "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2863,7 +2863,7 @@ dependencies = [ [[package]] name = "tiny-keccak" -version = "1.2.1" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -3446,7 +3446,7 @@ dependencies = [ "checksum thread-id 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4437c97558c70d129e40629a5b385b3fb1ffac301e63941335e4d354081ec14a" "checksum thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c85048c6260d17cf486ceae3282d9fb6b90be220bf5b28c400f5485ffc29f0c7" "checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af" -"checksum tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b50173faa6ee499206f77b189d7ff3bef40f6969f228c9ec22b82080df9aa41" +"checksum tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d52d12ad79e4063e0cb0ca5efa202ed7244b6ce4d25f4d3abe410b2a66128292" "checksum tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "99e958104a67877907c1454386d5482fe8e965a55d60be834a15a44328e7dc76" "checksum tokio-io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "48f55df1341bb92281f229a6030bc2abffde2c7a44c6d6b802b7687dd8be0775" "checksum tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)" = "" diff --git a/ethcrypto/Cargo.toml b/ethcrypto/Cargo.toml index 8e5131974..a84032f26 100644 --- a/ethcrypto/Cargo.toml +++ b/ethcrypto/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] [dependencies] rust-crypto = "0.2.36" -tiny-keccak = "1.2" +tiny-keccak = "1.3" eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" } ethkey = { path = "../ethkey" } ethcore-bigint = { path = "../util/bigint" } diff --git a/ethkey/Cargo.toml b/ethkey/Cargo.toml index 342410adc..519accadf 100644 --- a/ethkey/Cargo.toml +++ b/ethkey/Cargo.toml @@ -6,7 +6,7 @@ authors = ["Parity Technologies "] [dependencies] rand = "0.3.14" lazy_static = "0.2" -tiny-keccak = "1.2" +tiny-keccak = "1.3" eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" } rustc-hex = "1.0" ethcore-bigint = { path = "../util/bigint" } diff --git a/ethstore/Cargo.toml b/ethstore/Cargo.toml index 117332022..200dec366 100755 --- a/ethstore/Cargo.toml +++ b/ethstore/Cargo.toml @@ -13,7 +13,7 @@ serde_json = "1.0" serde_derive = "1.0" rustc-hex = "1.0" rust-crypto = "0.2.36" -tiny-keccak = "1.0" +tiny-keccak = "1.3" time = "0.1.34" itertools = "0.5" parking_lot = "0.4" diff --git a/evmjit/Cargo.toml b/evmjit/Cargo.toml index e4daf3dae..dbc0cd51a 100644 --- a/evmjit/Cargo.toml +++ b/evmjit/Cargo.toml @@ -7,4 +7,4 @@ authors = ["Parity Technologies "] crate-type = ["dylib"] [dependencies] -tiny-keccak = "1.2" +tiny-keccak = "1.3" diff --git a/js/wasm/ethkey/Cargo.toml b/js/wasm/ethkey/Cargo.toml index 8391f8b0b..8eb9159e9 100644 --- a/js/wasm/ethkey/Cargo.toml +++ b/js/wasm/ethkey/Cargo.toml @@ -9,7 +9,7 @@ authors = ["Parity Technologies "] members = [] [dependencies] -tiny-keccak = "1.0" +tiny-keccak = "1.3" tiny-secp256k1 = "0.1" libc = { version = "0.2.14", default-features = false } diff --git a/util/bloomable/Cargo.toml b/util/bloomable/Cargo.toml index 46009d381..f85b67943 100644 --- a/util/bloomable/Cargo.toml +++ b/util/bloomable/Cargo.toml @@ -7,4 +7,4 @@ authors = ["debris "] ethcore-bigint = { path = "../bigint" } [dev-dependencies] -tiny-keccak = "1.2.1" +tiny-keccak = "1.3" diff --git a/util/network/Cargo.toml b/util/network/Cargo.toml index 7c44aff7f..2d571d632 100644 --- a/util/network/Cargo.toml +++ b/util/network/Cargo.toml @@ -12,7 +12,7 @@ mio = "0.6.8" bytes = "0.4" rand = "0.3.12" time = "0.1.34" -tiny-keccak = "1.0" +tiny-keccak = "1.3" rust-crypto = "0.2.34" slab = "0.2" clippy = { version = "0.0.103", optional = true} diff --git a/whisper/Cargo.toml b/whisper/Cargo.toml index 7b74c2990..b4898f00d 100644 --- a/whisper/Cargo.toml +++ b/whisper/Cargo.toml @@ -25,7 +25,7 @@ serde_json = "1.0" slab = "0.3" smallvec = "0.4" time = "0.1" -tiny-keccak = "1.2.1" +tiny-keccak = "1.3" jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } From 67ccfa1da1b8f228bf6e9f460551baaaf913177d Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Mon, 7 Aug 2017 09:43:01 +0000 Subject: [PATCH 038/112] [ci skip] js-precompiled 20170807-093816 --- Cargo.lock | 2 +- js/package-lock.json | 2 +- js/package.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 946507428..a66df64da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2052,7 +2052,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#dfb9367a495d5ca3eac3c92a4197cf8652756d37" +source = "git+https://github.com/paritytech/js-precompiled.git#dd9b92d9d8c244678e15163347f9adb2e2560959" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package-lock.json b/js/package-lock.json index 46c7ae176..7514c3c29 100644 --- a/js/package-lock.json +++ b/js/package-lock.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.8.10", + "version": "1.8.11", "lockfileVersion": 1, "requires": true, "dependencies": { diff --git a/js/package.json b/js/package.json index d0f8a037b..3c399db87 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.8.10", + "version": "1.8.11", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", From a20892e5e61439bccc16ea1901e5fc81dd1baabd Mon Sep 17 00:00:00 2001 From: Afri <5chdn@users.noreply.github.com> Date: Mon, 7 Aug 2017 18:12:00 +0200 Subject: [PATCH 039/112] Fix the README badges (#6229) * Update README.md * Set latest release to 1.7 --- README.md | 48 +++++++++++++++++------------------------------- 1 file changed, 17 insertions(+), 31 deletions(-) diff --git a/README.md b/README.md index 37b06f01c..16758bc27 100644 --- a/README.md +++ b/README.md @@ -1,55 +1,41 @@ -# [Parity](https://parity.io/parity.html) -### Fast, light, and robust Ethereum implementation +# [Parity](https://parity.io/parity.html) - fast, light, and robust Ethereum client -### [Download latest release](https://github.com/paritytech/parity/releases) +[![build status](https://gitlab.parity.io/parity/parity/badges/master/build.svg)](https://gitlab.parity.io/parity/parity/commits/master) +[![Snap Status](https://build.snapcraft.io/badge/paritytech/parity.svg)](https://build.snapcraft.io/user/paritytech/parity) +[![GPLv3](https://img.shields.io/badge/license-GPL%20v3-green.svg)](https://www.gnu.org/licenses/gpl-3.0.en.html) -[![build status](https://gitlab.parity.io/parity/parity/badges/master/build.svg)](https://gitlab.parity.io/parity/parity/commits/master) [![Coverage Status][coveralls-image]][coveralls-url] [![GPLv3][license-image]][license-url] [![Snap Status](https://build.snapcraft.io/badge/paritytech/parity.svg)](https://build.snapcraft.io/user/paritytech/parity) +- [Download the latest release here.](https://github.com/paritytech/parity/releases) ### Join the chat! -Parity [![Join the chat at https://gitter.im/ethcore/parity][gitter-image]][gitter-url] and -parity.js [![Join the chat at https://gitter.im/ethcore/parity.js](https://badges.gitter.im/ethcore/parity.js.svg)](https://gitter.im/ethcore/parity.js?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -[Internal Documentation][doc-url] - - -Be sure to check out [our wiki][wiki-url] for more information. - -[coveralls-image]: https://coveralls.io/repos/github/paritytech/parity/badge.svg?branch=master -[coveralls-url]: https://coveralls.io/github/paritytech/parity?branch=master -[gitter-image]: https://badges.gitter.im/Join%20Chat.svg -[gitter-url]: https://gitter.im/ethcore/parity?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge -[license-image]: https://img.shields.io/badge/license-GPL%20v3-green.svg -[license-url]: https://www.gnu.org/licenses/gpl-3.0.en.html -[doc-url]: https://paritytech.github.io/parity/ethcore/index.html -[wiki-url]: https://github.com/paritytech/parity/wiki +Get in touch with us on Gitter: +[![Gitter: Parity](https://img.shields.io/badge/gitter-parity-4AB495.svg)](https://gitter.im/paritytech/parity) +[![Gitter: Parity.js](https://img.shields.io/badge/gitter-parity.js-4AB495.svg)](https://gitter.im/paritytech/parity.js) +[![Gitter: Parity/Miners](https://img.shields.io/badge/gitter-parity/miners-4AB495.svg)](https://gitter.im/paritytech/parity/miners) +[![Gitter: Parity-PoA](https://img.shields.io/badge/gitter-parity--poa-4AB495.svg)](https://gitter.im/paritytech/parity-poa) +Be sure to check out [our wiki](https://github.com/paritytech/parity/wiki) and the [internal documentation](https://paritytech.github.io/parity/ethcore/index.html) for more information. ---- - ## About Parity -Parity's goal is to be the fastest, lightest, and most secure Ethereum client. We are developing Parity using the sophisticated and -cutting-edge Rust programming language. Parity is licensed under the GPLv3, and can be used for all your Ethereum needs. +Parity's goal is to be the fastest, lightest, and most secure Ethereum client. We are developing Parity using the sophisticated and cutting-edge Rust programming language. Parity is licensed under the GPLv3, and can be used for all your Ethereum needs. + +Parity comes with a built-in wallet. To access [Parity Wallet](http://web3.site/) simply go to http://web3.site/ (if you don't have access to the internet, but still want to use the service, you can also use http://127.0.0.1:8180/). It includes various functionality allowing you to: -Parity comes with a built-in wallet. To access [Parity Wallet](http://web3.site/) simply go to http://web3.site/ (if you don't have access to the internet, but still want to use the service, you can also use http://127.0.0.1:8180/). It -includes various functionality allowing you to: - create and manage your Ethereum accounts; - manage your Ether and any Ethereum tokens; - create and register your own tokens; - and much more. -By default, Parity will also run a JSONRPC server on `127.0.0.1:8545`. This is fully configurable and supports a number -of RPC APIs. +By default, Parity will also run a JSONRPC server on `127.0.0.1:8545`. This is fully configurable and supports a number of RPC APIs. -If you run into an issue while using parity, feel free to file one in this repository -or hop on our [gitter chat room][gitter-url] to ask a question. We are glad to help! +If you run into an issue while using parity, feel free to file one in this repository or hop on our [gitter chat room](https://gitter.im/paritytech/parity) to ask a question. We are glad to help! **For security-critical issues**, please refer to the security policy outlined in `SECURITY.MD`. -Parity's current release is 1.6. You can download it at https://github.com/paritytech/parity/releases or follow the instructions -below to build from source. +Parity's current release is 1.7. You can download it at https://github.com/paritytech/parity/releases or follow the instructions below to build from source. ---- From 872e5537bb8a42a1ed1088e569276c3f30cc976f Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 7 Aug 2017 19:54:05 +0300 Subject: [PATCH 040/112] SecretStore: do not cache ACL contract + on-chain key servers configuration (#6107) * do not cache ACL storage contract * when error comes before initialization * initial KeyServerSet commit * update_nodes_set in maintain * do not connect to self * fixed connection establishing * removed println * improved KeyServerSet tracing * moved parsing to KeyServerSet * re-read only when blockchain is changed * do not try to connect if not a part of cluster * improved logging * fixed tests --- Cargo.lock | 1 + ethcore/native_contracts/build.rs | 2 + .../native_contracts/res/key_server_set.json | 1 + .../native_contracts/src/key_server_set.rs | 21 ++ ethcore/native_contracts/src/lib.rs | 2 + secret_store/Cargo.toml | 1 + secret_store/src/acl_storage.rs | 91 +++++--- secret_store/src/key_server.rs | 19 +- .../src/key_server_cluster/cluster.rs | 138 ++++++++---- .../key_server_cluster/cluster_sessions.rs | 2 +- .../src/key_server_cluster/io/handshake.rs | 12 +- .../key_server_cluster/jobs/job_session.rs | 26 +-- secret_store/src/key_server_cluster/mod.rs | 3 + .../net/accept_connection.rs | 7 +- secret_store/src/key_server_set.rs | 204 ++++++++++++++++++ secret_store/src/lib.rs | 8 +- 16 files changed, 442 insertions(+), 96 deletions(-) create mode 100644 ethcore/native_contracts/res/key_server_set.json create mode 100644 ethcore/native_contracts/src/key_server_set.rs create mode 100644 secret_store/src/key_server_set.rs diff --git a/Cargo.lock b/Cargo.lock index a66df64da..e705e39f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -723,6 +723,7 @@ dependencies = [ "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "native-contracts 0.1.0", "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/ethcore/native_contracts/build.rs b/ethcore/native_contracts/build.rs index cec830929..bcb64067c 100644 --- a/ethcore/native_contracts/build.rs +++ b/ethcore/native_contracts/build.rs @@ -21,6 +21,7 @@ use std::fs::File; use std::io::Write; // TODO: just walk the "res" directory and generate whole crate automatically. +const KEY_SERVER_SET_ABI: &'static str = include_str!("res/key_server_set.json"); const REGISTRY_ABI: &'static str = include_str!("res/registrar.json"); const URLHINT_ABI: &'static str = include_str!("res/urlhint.json"); const SERVICE_TRANSACTION_ABI: &'static str = include_str!("res/service_transaction.json"); @@ -45,6 +46,7 @@ fn build_test_contracts() { } fn main() { + build_file("KeyServerSet", KEY_SERVER_SET_ABI, "key_server_set.rs"); build_file("Registry", REGISTRY_ABI, "registry.rs"); build_file("Urlhint", URLHINT_ABI, "urlhint.rs"); build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs"); diff --git a/ethcore/native_contracts/res/key_server_set.json b/ethcore/native_contracts/res/key_server_set.json new file mode 100644 index 000000000..93f68837a --- /dev/null +++ b/ethcore/native_contracts/res/key_server_set.json @@ -0,0 +1 @@ +[{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"keyServersList","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getKeyServerPublic","outputs":[{"name":"","type":"bytes"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"getKeyServers","outputs":[{"name":"","type":"address[]"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getKeyServerAddress","outputs":[{"name":"","type":"string"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"keyServer","type":"address"}],"name":"removeKeyServer","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"keyServerPublic","type":"bytes"},{"name":"keyServerIp","type":"string"}],"name":"addKeyServer","outputs":[],"payable":false,"type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"keyServer","type":"address"}],"name":"KeyServerAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"keyServer","type":"address"}],"name":"KeyServerRemoved","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"old","type":"address"},{"indexed":true,"name":"current","type":"address"}],"name":"NewOwner","type":"event"}] \ No newline at end of file diff --git a/ethcore/native_contracts/src/key_server_set.rs b/ethcore/native_contracts/src/key_server_set.rs new file mode 100644 index 000000000..60b137aae --- /dev/null +++ b/ethcore/native_contracts/src/key_server_set.rs @@ -0,0 +1,21 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +#![allow(unused_mut, unused_variables, unused_imports)] + +//! Secret store Key Server set contract. + +include!(concat!(env!("OUT_DIR"), "/key_server_set.rs")); diff --git a/ethcore/native_contracts/src/lib.rs b/ethcore/native_contracts/src/lib.rs index e35a4ec19..33cb91563 100644 --- a/ethcore/native_contracts/src/lib.rs +++ b/ethcore/native_contracts/src/lib.rs @@ -23,6 +23,7 @@ extern crate byteorder; extern crate ethabi; extern crate ethcore_util as util; +mod key_server_set; mod registry; mod urlhint; mod service_transaction; @@ -32,6 +33,7 @@ mod validator_report; pub mod test_contracts; +pub use self::key_server_set::KeyServerSet; pub use self::registry::Registry; pub use self::urlhint::Urlhint; pub use self::service_transaction::ServiceTransactionChecker; diff --git a/secret_store/Cargo.toml b/secret_store/Cargo.toml index eea49978d..19f342aa9 100644 --- a/secret_store/Cargo.toml +++ b/secret_store/Cargo.toml @@ -35,3 +35,4 @@ ethcore-logger = { path = "../logger" } ethcrypto = { path = "../ethcrypto" } ethkey = { path = "../ethkey" } native-contracts = { path = "../ethcore/native_contracts" } +lazy_static = "0.2" diff --git a/secret_store/src/acl_storage.rs b/secret_store/src/acl_storage.rs index 816d100dc..37d5bcd25 100644 --- a/secret_store/src/acl_storage.rs +++ b/secret_store/src/acl_storage.rs @@ -14,12 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::sync::Arc; +use std::sync::{Arc, Weak}; use futures::{future, Future}; use parking_lot::Mutex; use ethkey::public_to_address; -use ethcore::client::{Client, BlockChainClient, BlockId}; +use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; use native_contracts::SecretStoreAclStorage; +use util::{H256, Address, Bytes}; use types::all::{Error, ServerKeyId, Public}; const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker"; @@ -32,40 +33,82 @@ pub trait AclStorage: Send + Sync { /// On-chain ACL storage implementation. pub struct OnChainAclStorage { + /// Cached on-chain contract. + contract: Mutex, +} + +/// Cached on-chain ACL storage contract. +struct CachedContract { /// Blockchain client. - client: Arc, - /// On-chain contract. - contract: Mutex>, + client: Weak, + /// Contract address. + contract_addr: Option
, + /// Contract at given address. + contract: Option, } impl OnChainAclStorage { - pub fn new(client: Arc) -> Self { - OnChainAclStorage { - client: client, - contract: Mutex::new(None), - } + pub fn new(client: &Arc) -> Arc { + let acl_storage = Arc::new(OnChainAclStorage { + contract: Mutex::new(CachedContract::new(client)), + }); + client.add_notify(acl_storage.clone()); + acl_storage } } impl AclStorage for OnChainAclStorage { fn check(&self, public: &Public, document: &ServerKeyId) -> Result { - let mut contract = self.contract.lock(); - if !contract.is_some() { - *contract = self.client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned()) - .and_then(|contract_addr| { + self.contract.lock().check(public, document) + } +} + +impl ChainNotify for OnChainAclStorage { + fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { + if !enacted.is_empty() || !retracted.is_empty() { + self.contract.lock().update() + } + } +} + +impl CachedContract { + pub fn new(client: &Arc) -> Self { + CachedContract { + client: Arc::downgrade(client), + contract_addr: None, + contract: None, + } + } + + pub fn update(&mut self) { + if let Some(client) = self.client.upgrade() { + let new_contract_addr = client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned()); + if self.contract_addr.as_ref() != new_contract_addr.as_ref() { + self.contract = new_contract_addr.map(|contract_addr| { trace!(target: "secretstore", "Configuring for ACL checker contract from {}", contract_addr); - Some(SecretStoreAclStorage::new(contract_addr)) - }) + SecretStoreAclStorage::new(contract_addr) + }); + + self.contract_addr = new_contract_addr; + } } - if let Some(ref contract) = *contract { - let address = public_to_address(&public); - let do_call = |a, d| future::done(self.client.call_contract(BlockId::Latest, a, d)); - contract.check_permissions(do_call, address, document.clone()) - .map_err(|err| Error::Internal(err)) - .wait() - } else { - Err(Error::Internal("ACL checker contract is not configured".to_owned())) + } + + pub fn check(&mut self, public: &Public, document: &ServerKeyId) -> Result { + match self.contract.as_ref() { + Some(contract) => { + let address = public_to_address(&public); + let do_call = |a, d| future::done( + self.client + .upgrade() + .ok_or("Calling contract without client".into()) + .and_then(|c| c.call_contract(BlockId::Latest, a, d))); + contract.check_permissions(do_call, address, document.clone()) + .map_err(|err| Error::Internal(err)) + .wait() + }, + None => Err(Error::Internal("ACL checker contract is not configured".to_owned())), } } } diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index fd4e154fa..c83e460f3 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -24,6 +24,7 @@ use ethcrypto; use ethkey; use super::acl_storage::AclStorage; use super::key_storage::KeyStorage; +use super::key_server_set::KeyServerSet; use key_server_cluster::{math, ClusterCore}; use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer}; use types::all::{Error, Public, RequestSignature, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow, @@ -44,9 +45,9 @@ pub struct KeyServerCore { impl KeyServerImpl { /// Create new key server instance - pub fn new(config: &ClusterConfiguration, acl_storage: Arc, key_storage: Arc) -> Result { + pub fn new(config: &ClusterConfiguration, key_server_set: Arc, acl_storage: Arc, key_storage: Arc) -> Result { Ok(KeyServerImpl { - data: Arc::new(Mutex::new(KeyServerCore::new(config, acl_storage, key_storage)?)), + data: Arc::new(Mutex::new(KeyServerCore::new(config, key_server_set, acl_storage, key_storage)?)), }) } @@ -143,14 +144,12 @@ impl MessageSigner for KeyServerImpl { } impl KeyServerCore { - pub fn new(config: &ClusterConfiguration, acl_storage: Arc, key_storage: Arc) -> Result { + pub fn new(config: &ClusterConfiguration, key_server_set: Arc, acl_storage: Arc, key_storage: Arc) -> Result { let config = NetClusterConfiguration { threads: config.threads, self_key_pair: ethkey::KeyPair::from_secret_slice(&config.self_private)?, listen_address: (config.listener_address.address.clone(), config.listener_address.port), - nodes: config.nodes.iter() - .map(|(node_id, node_address)| (node_id.clone(), (node_address.address.clone(), node_address.port))) - .collect(), + key_server_set: key_server_set, allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes, acl_storage: acl_storage, key_storage: key_storage, @@ -193,10 +192,13 @@ impl Drop for KeyServerCore { pub mod tests { use std::time; use std::sync::Arc; + use std::net::SocketAddr; + use std::collections::BTreeMap; use ethcrypto; use ethkey::{self, Secret, Random, Generator}; use acl_storage::tests::DummyAclStorage; use key_storage::tests::DummyKeyStorage; + use key_server_set::tests::MapKeyServerSet; use key_server_cluster::math; use util::H256; use types::all::{Error, Public, ClusterConfiguration, NodeAddress, RequestSignature, ServerKeyId, @@ -254,8 +256,11 @@ pub mod tests { })).collect(), allow_connecting_to_higher_nodes: false, }).collect(); + let key_servers_set: BTreeMap = configs[0].nodes.iter() + .map(|(k, a)| (k.clone(), format!("{}:{}", a.address, a.port).parse().unwrap())) + .collect(); let key_servers: Vec<_> = configs.into_iter().map(|cfg| - KeyServerImpl::new(&cfg, Arc::new(DummyAclStorage::default()), Arc::new(DummyKeyStorage::default())).unwrap() + KeyServerImpl::new(&cfg, Arc::new(MapKeyServerSet::new(key_servers_set.clone())), Arc::new(DummyAclStorage::default()), Arc::new(DummyKeyStorage::default())).unwrap() ).collect(); // wait until connections are established. It is fast => do not bother with events here diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index c86f30267..d77a82431 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -28,7 +28,7 @@ use tokio_core::reactor::{Handle, Remote, Interval}; use tokio_core::net::{TcpListener, TcpStream}; use ethkey::{Public, KeyPair, Signature, Random, Generator}; use util::H256; -use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage}; +use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, KeyServerSet}; use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, GenerationSessionWrapper, EncryptionSessionWrapper, DecryptionSessionWrapper, SigningSessionWrapper}; use key_server_cluster::message::{self, Message, ClusterMessage, GenerationMessage, EncryptionMessage, DecryptionMessage, @@ -102,8 +102,8 @@ pub struct ClusterConfiguration { pub self_key_pair: KeyPair, /// Interface to listen to. pub listen_address: (String, u16), - /// Cluster nodes. - pub nodes: BTreeMap, + /// Cluster nodes set. + pub key_server_set: Arc, /// Reference to key storage pub key_storage: Arc, /// Reference to ACL storage @@ -158,9 +158,17 @@ pub struct ClusterConnections { /// Self node id. pub self_node_id: NodeId, /// All known other key servers. - pub nodes: BTreeMap, + pub key_server_set: Arc, + /// Connections data. + pub data: RwLock, +} + +/// Cluster connections data. +pub struct ClusterConnectionsData { + /// Active key servers set. + pub nodes: BTreeMap, /// Active connections to key servers. - pub connections: RwLock>>, + pub connections: BTreeMap>, } /// Cluster view core. @@ -281,8 +289,7 @@ impl ClusterCore { /// Accept connection future. fn accept_connection_future(handle: &Handle, data: Arc, stream: TcpStream, node_address: SocketAddr) -> BoxedEmptyFuture { - let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect(); - net_accept_connection(node_address, stream, handle, data.self_key_pair.clone(), disconnected_nodes) + net_accept_connection(node_address, stream, handle, data.self_key_pair.clone()) .then(move |result| ClusterCore::process_connection_result(data, true, result)) .then(|_| finished(())) .boxed() @@ -354,6 +361,7 @@ impl ClusterCore { /// Try to connect to every disconnected node. fn connect_disconnected_nodes(data: Arc) { + data.connections.update_nodes_set(); for (node_id, node_address) in data.connections.disconnected_nodes() { if data.config.allow_connecting_to_higher_nodes || data.self_key_pair.public() < &node_id { ClusterCore::connect(data.clone(), node_address); @@ -372,14 +380,16 @@ impl ClusterCore { finished(Ok(())).boxed() } }, - Ok(DeadlineStatus::Meet(Err(_))) => { + Ok(DeadlineStatus::Meet(Err(err))) => { + warn!(target: "secretstore_net", "{}: protocol error {} when establishind connection", data.self_key_pair.public(), err); finished(Ok(())).boxed() }, Ok(DeadlineStatus::Timeout) => { + warn!(target: "secretstore_net", "{}: timeout when establishind connection", data.self_key_pair.public()); finished(Ok(())).boxed() }, - Err(_) => { - // network error + Err(err) => { + warn!(target: "secretstore_net", "{}: network error {} when establishind connection", data.self_key_pair.public(), err); finished(Ok(())).boxed() }, } @@ -665,33 +675,38 @@ impl ClusterCore { impl ClusterConnections { pub fn new(config: &ClusterConfiguration) -> Result { - let mut connections = ClusterConnections { + let mut nodes = config.key_server_set.get(); + nodes.remove(config.self_key_pair.public()); + + Ok(ClusterConnections { self_node_id: config.self_key_pair.public().clone(), - nodes: BTreeMap::new(), - connections: RwLock::new(BTreeMap::new()), - }; - - for (node_id, &(ref node_addr, node_port)) in config.nodes.iter().filter(|&(node_id, _)| node_id != config.self_key_pair.public()) { - let socket_address = make_socket_address(&node_addr, node_port)?; - connections.nodes.insert(node_id.clone(), socket_address); - } - - Ok(connections) + key_server_set: config.key_server_set.clone(), + data: RwLock::new(ClusterConnectionsData { + nodes: nodes, + connections: BTreeMap::new(), + }), + }) } pub fn cluster_state(&self) -> ClusterState { ClusterState { - connected: self.connections.read().keys().cloned().collect(), + connected: self.data.read().connections.keys().cloned().collect(), } } pub fn get(&self, node: &NodeId) -> Option> { - self.connections.read().get(node).cloned() + self.data.read().connections.get(node).cloned() } pub fn insert(&self, connection: Arc) -> bool { - let mut connections = self.connections.write(); - if connections.contains_key(connection.node_id()) { + let mut data = self.data.write(); + if !data.nodes.contains_key(connection.node_id()) { + // incoming connections are checked here + trace!(target: "secretstore_net", "{}: ignoring unknown connection from {} at {}", self.self_node_id, connection.node_id(), connection.node_address()); + debug_assert!(connection.is_inbound()); + return false; + } + if data.connections.contains_key(connection.node_id()) { // we have already connected to the same node // the agreement is that node with lower id must establish connection to node with higher id if (&self.self_node_id < connection.node_id() && connection.is_inbound()) @@ -700,14 +715,15 @@ impl ClusterConnections { } } - trace!(target: "secretstore_net", "{}: inserting connection to {} at {}", self.self_node_id, connection.node_id(), connection.node_address()); - connections.insert(connection.node_id().clone(), connection); + trace!(target: "secretstore_net", "{}: inserting connection to {} at {}. Connected to {} of {} nodes", + self.self_node_id, connection.node_id(), connection.node_address(), data.connections.len() + 1, data.nodes.len()); + data.connections.insert(connection.node_id().clone(), connection); true } pub fn remove(&self, node: &NodeId, is_inbound: bool) { - let mut connections = self.connections.write(); - if let Entry::Occupied(entry) = connections.entry(node.clone()) { + let mut data = self.data.write(); + if let Entry::Occupied(entry) = data.connections.entry(node.clone()) { if entry.get().is_inbound() != is_inbound { return; } @@ -718,20 +734,64 @@ impl ClusterConnections { } pub fn connected_nodes(&self) -> BTreeSet { - self.connections.read().keys().cloned().collect() + self.data.read().connections.keys().cloned().collect() } pub fn active_connections(&self)-> Vec> { - self.connections.read().values().cloned().collect() + self.data.read().connections.values().cloned().collect() } pub fn disconnected_nodes(&self) -> BTreeMap { - let connections = self.connections.read(); - self.nodes.iter() - .filter(|&(node_id, _)| !connections.contains_key(node_id)) + let data = self.data.read(); + data.nodes.iter() + .filter(|&(node_id, _)| !data.connections.contains_key(node_id)) .map(|(node_id, node_address)| (node_id.clone(), node_address.clone())) .collect() } + + pub fn update_nodes_set(&self) { + let mut data = self.data.write(); + let mut new_nodes = self.key_server_set.get(); + // we do not need to connect to self + // + we do not need to try to connect to any other node if we are not the part of a cluster + if new_nodes.remove(&self.self_node_id).is_none() { + new_nodes.clear(); + } + + let mut num_added_nodes = 0; + let mut num_removed_nodes = 0; + let mut num_changed_nodes = 0; + + for obsolete_node in data.nodes.keys().cloned().collect::>() { + if !new_nodes.contains_key(&obsolete_node) { + if let Entry::Occupied(entry) = data.connections.entry(obsolete_node) { + trace!(target: "secretstore_net", "{}: removing connection to {} at {}", self.self_node_id, entry.get().node_id(), entry.get().node_address()); + entry.remove(); + } + + data.nodes.remove(&obsolete_node); + num_removed_nodes += 1; + } + } + + for (new_node_public, new_node_addr) in new_nodes { + match data.nodes.insert(new_node_public, new_node_addr) { + None => num_added_nodes += 1, + Some(old_node_addr) => if new_node_addr != old_node_addr { + if let Entry::Occupied(entry) = data.connections.entry(new_node_public) { + trace!(target: "secretstore_net", "{}: removing connection to {} at {}", self.self_node_id, entry.get().node_id(), entry.get().node_address()); + entry.remove(); + } + num_changed_nodes += 1; + }, + } + } + + if num_added_nodes != 0 || num_removed_nodes != 0 || num_changed_nodes != 0 { + trace!(target: "secretstore_net", "{}: updated nodes set: removed {}, added {}, changed {}. Connected to {} of {} nodes", + self.self_node_id, num_removed_nodes, num_added_nodes, num_changed_nodes, data.connections.len(), data.nodes.len()); + } + } } impl ClusterData { @@ -929,7 +989,7 @@ pub mod tests { use parking_lot::Mutex; use tokio_core::reactor::Core; use ethkey::{Random, Generator, Public}; - use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage}; + use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet}; use key_server_cluster::message::Message; use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration}; use key_server_cluster::generation_session::{Session as GenerationSession, SessionState as GenerationSessionState}; @@ -999,7 +1059,7 @@ pub mod tests { } pub fn all_connections_established(cluster: &Arc) -> bool { - cluster.config().nodes.keys() + cluster.config().key_server_set.get().keys() .filter(|p| *p != cluster.config().self_key_pair.public()) .all(|p| cluster.connection(p).is_some()) } @@ -1010,9 +1070,9 @@ pub mod tests { threads: 1, self_key_pair: key_pairs[i].clone(), listen_address: ("127.0.0.1".to_owned(), ports_begin + i as u16), - nodes: key_pairs.iter().enumerate() - .map(|(j, kp)| (kp.public().clone(), ("127.0.0.1".into(), ports_begin + j as u16))) - .collect(), + key_server_set: Arc::new(MapKeyServerSet::new(key_pairs.iter().enumerate() + .map(|(j, kp)| (kp.public().clone(), format!("127.0.0.1:{}", ports_begin + j as u16).parse().unwrap())) + .collect())), allow_connecting_to_higher_nodes: false, key_storage: Arc::new(DummyKeyStorage::default()), acl_storage: Arc::new(DummyAclStorage::default()), diff --git a/secret_store/src/key_server_cluster/cluster_sessions.rs b/secret_store/src/key_server_cluster/cluster_sessions.rs index f66ad972f..f8e4974b1 100644 --- a/secret_store/src/key_server_cluster/cluster_sessions.rs +++ b/secret_store/src/key_server_cluster/cluster_sessions.rs @@ -135,7 +135,7 @@ impl ClusterSessions { pub fn new(config: &ClusterConfiguration) -> Self { ClusterSessions { self_node_id: config.self_key_pair.public().clone(), - nodes: config.nodes.keys().cloned().collect(), + nodes: config.key_server_set.get().keys().cloned().collect(), acl_storage: config.acl_storage.clone(), key_storage: config.key_storage.clone(), generation_sessions: ClusterSessionsContainer::new(), diff --git a/secret_store/src/key_server_cluster/io/handshake.rs b/secret_store/src/key_server_cluster/io/handshake.rs index 38d8a6ac1..df8f6cbf7 100644 --- a/secret_store/src/key_server_cluster/io/handshake.rs +++ b/secret_store/src/key_server_cluster/io/handshake.rs @@ -45,7 +45,7 @@ pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Resul state: state, self_key_pair: self_key_pair, self_confirmation_plain: self_confirmation_plain.unwrap_or(Default::default()), - trusted_nodes: trusted_nodes, + trusted_nodes: Some(trusted_nodes), other_node_id: None, other_confirmation_plain: None, shared_key: None, @@ -53,7 +53,7 @@ pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Resul } /// Wait for handshake procedure to be started by another node from the cluster. -pub fn accept_handshake(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { +pub fn accept_handshake(a: A, self_key_pair: KeyPair) -> Handshake where A: AsyncWrite + AsyncRead { let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); let (error, state) = match self_confirmation_plain.clone() { Ok(_) => (None, HandshakeState::ReceivePublicKey(read_message(a))), @@ -66,7 +66,7 @@ pub fn accept_handshake(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet state: state, self_key_pair: self_key_pair, self_confirmation_plain: self_confirmation_plain.unwrap_or(Default::default()), - trusted_nodes: trusted_nodes, + trusted_nodes: None, other_node_id: None, other_confirmation_plain: None, shared_key: None, @@ -89,7 +89,7 @@ pub struct Handshake { state: HandshakeState, self_key_pair: KeyPair, self_confirmation_plain: H256, - trusted_nodes: BTreeSet, + trusted_nodes: Option>, other_node_id: Option, other_confirmation_plain: Option, shared_key: Option, @@ -172,7 +172,7 @@ impl Future for Handshake where A: AsyncRead + AsyncWrite { Err(err) => return Ok((stream, Err(err.into())).into()), }; - if !self.trusted_nodes.contains(&*message.node_id) { + if !self.trusted_nodes.as_ref().map(|tn| tn.contains(&*message.node_id)).unwrap_or(true) { return Ok((stream, Err(Error::InvalidNodeId)).into()); } @@ -300,7 +300,7 @@ mod tests { let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect(); let shared_key = compute_shared_key(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap(); - let mut handshake = accept_handshake(io, self_key_pair, trusted_nodes); + let mut handshake = accept_handshake(io, self_key_pair); handshake.set_self_confirmation_plain(self_confirmation_plain); let handshake_result = handshake.wait().unwrap(); diff --git a/secret_store/src/key_server_cluster/jobs/job_session.rs b/secret_store/src/key_server_cluster/jobs/job_session.rs index 7ae1da42a..6608397dd 100644 --- a/secret_store/src/key_server_cluster/jobs/job_session.rs +++ b/secret_store/src/key_server_cluster/jobs/job_session.rs @@ -299,22 +299,22 @@ impl JobSession where Executor: JobExe return Err(Error::ConsensusUnreachable); } - let active_data = self.data.active_data.as_mut() - .expect("we have checked that we are on master node; on master nodes active_data is filled during initialization; qed"); - if active_data.rejects.contains(node) { - return Ok(()); - } - if active_data.requests.remove(node) || active_data.responses.remove(node).is_some() { - active_data.rejects.insert(node.clone()); - if self.data.state == JobSessionState::Finished && active_data.responses.len() < self.meta.threshold + 1 { - self.data.state = JobSessionState::Active; - } - if active_data.requests.len() + active_data.responses.len() >= self.meta.threshold + 1 { + if let Some(active_data) = self.data.active_data.as_mut() { + if active_data.rejects.contains(node) { return Ok(()); } + if active_data.requests.remove(node) || active_data.responses.remove(node).is_some() { + active_data.rejects.insert(node.clone()); + if self.data.state == JobSessionState::Finished && active_data.responses.len() < self.meta.threshold + 1 { + self.data.state = JobSessionState::Active; + } + if active_data.requests.len() + active_data.responses.len() >= self.meta.threshold + 1 { + return Ok(()); + } - self.data.state = JobSessionState::Failed; - return Err(Error::ConsensusUnreachable); + self.data.state = JobSessionState::Failed; + return Err(Error::ConsensusUnreachable); + } } Ok(()) diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index 71c505f95..8f6ae4add 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -23,6 +23,7 @@ use super::types::all::ServerKeyId; pub use super::types::all::{NodeId, EncryptedDocumentKeyShadow}; pub use super::acl_storage::AclStorage; pub use super::key_storage::{KeyStorage, DocumentKeyShare}; +pub use super::key_server_set::KeyServerSet; pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic, SerializableMessageHash}; pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient}; pub use self::generation_session::Session as GenerationSession; @@ -33,6 +34,8 @@ pub use self::decryption_session::Session as DecryptionSession; pub use super::key_storage::tests::DummyKeyStorage; #[cfg(test)] pub use super::acl_storage::tests::DummyAclStorage; +#[cfg(test)] +pub use super::key_server_set::tests::MapKeyServerSet; pub type SessionId = ServerKeyId; diff --git a/secret_store/src/key_server_cluster/net/accept_connection.rs b/secret_store/src/key_server_cluster/net/accept_connection.rs index 0daa8b2da..339625f3f 100644 --- a/secret_store/src/key_server_cluster/net/accept_connection.rs +++ b/secret_store/src/key_server_cluster/net/accept_connection.rs @@ -17,19 +17,18 @@ use std::io; use std::net::SocketAddr; use std::time::Duration; -use std::collections::BTreeSet; use futures::{Future, Poll}; use tokio_core::reactor::Handle; use tokio_core::net::TcpStream; use ethkey::KeyPair; -use key_server_cluster::{Error, NodeId}; +use key_server_cluster::Error; use key_server_cluster::io::{accept_handshake, Handshake, Deadline, deadline}; use key_server_cluster::net::Connection; /// Create future for accepting incoming connection. -pub fn accept_connection(address: SocketAddr, stream: TcpStream, handle: &Handle, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Deadline { +pub fn accept_connection(address: SocketAddr, stream: TcpStream, handle: &Handle, self_key_pair: KeyPair) -> Deadline { let accept = AcceptConnection { - handshake: accept_handshake(stream, self_key_pair, trusted_nodes), + handshake: accept_handshake(stream, self_key_pair), address: address, }; diff --git a/secret_store/src/key_server_set.rs b/secret_store/src/key_server_set.rs new file mode 100644 index 000000000..e17dceed5 --- /dev/null +++ b/secret_store/src/key_server_set.rs @@ -0,0 +1,204 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::{Arc, Weak}; +use std::net::SocketAddr; +use std::collections::BTreeMap; +use futures::{future, Future}; +use parking_lot::Mutex; +use ethcore::filter::Filter; +use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; +use native_contracts::KeyServerSet as KeyServerSetContract; +use util::{H256, Address, Bytes, Hashable}; +use types::all::{Error, Public, NodeAddress}; + +const KEY_SERVER_SET_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_server_set"; + +/// Key server has been added to the set. +const ADDED_EVENT_NAME: &'static [u8] = &*b"KeyServerAdded(address)"; +/// Key server has been removed from the set. +const REMOVED_EVENT_NAME: &'static [u8] = &*b"KeyServerRemoved(address)"; + +lazy_static! { + static ref ADDED_EVENT_NAME_HASH: H256 = ADDED_EVENT_NAME.sha3(); + static ref REMOVED_EVENT_NAME_HASH: H256 = REMOVED_EVENT_NAME.sha3(); +} + +/// Key Server set +pub trait KeyServerSet: Send + Sync { + /// Get set of configured key servers + fn get(&self) -> BTreeMap; +} + +/// On-chain Key Server set implementation. +pub struct OnChainKeyServerSet { + /// Cached on-chain contract. + contract: Mutex, +} + +/// Cached on-chain Key Server set contract. +struct CachedContract { + /// Blockchain client. + client: Weak, + /// Contract address. + contract_addr: Option
, + /// Active set of key servers. + key_servers: BTreeMap, +} + +impl OnChainKeyServerSet { + pub fn new(client: &Arc, key_servers: BTreeMap) -> Result, Error> { + let mut cached_contract = CachedContract::new(client, key_servers)?; + let key_server_contract_address = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned()); + // only initialize from contract if it is installed. otherwise - use default nodes + // once the contract is installed, all default nodes are lost (if not in the contract' set) + if key_server_contract_address.is_some() { + cached_contract.read_from_registry(&*client, key_server_contract_address); + } + + let key_server_set = Arc::new(OnChainKeyServerSet { + contract: Mutex::new(cached_contract), + }); + client.add_notify(key_server_set.clone()); + Ok(key_server_set) + } +} + +impl KeyServerSet for OnChainKeyServerSet { + fn get(&self) -> BTreeMap { + self.contract.lock().get() + } +} + +impl ChainNotify for OnChainKeyServerSet { + fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { + if !enacted.is_empty() || !retracted.is_empty() { + self.contract.lock().update(enacted, retracted) + } + } +} + +impl CachedContract { + pub fn new(client: &Arc, key_servers: BTreeMap) -> Result { + Ok(CachedContract { + client: Arc::downgrade(client), + contract_addr: None, + key_servers: key_servers.into_iter() + .map(|(p, addr)| { + let addr = format!("{}:{}", addr.address, addr.port).parse() + .map_err(|err| Error::Internal(format!("error parsing node address: {}", err)))?; + Ok((p, addr)) + }) + .collect::, Error>>()?, + }) + } + + pub fn update(&mut self, enacted: Vec, retracted: Vec) { + if let Some(client) = self.client.upgrade() { + let new_contract_addr = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned()); + + // new contract installed => read nodes set from the contract + if self.contract_addr.as_ref() != new_contract_addr.as_ref() { + self.read_from_registry(&*client, new_contract_addr); + return; + } + + // check for contract events + let is_set_changed = self.contract_addr.is_some() && enacted.iter() + .chain(retracted.iter()) + .any(|block_hash| !client.logs(Filter { + from_block: BlockId::Hash(block_hash.clone()), + to_block: BlockId::Hash(block_hash.clone()), + address: self.contract_addr.clone().map(|a| vec![a]), + topics: vec![ + Some(vec![*ADDED_EVENT_NAME_HASH, *REMOVED_EVENT_NAME_HASH]), + None, + None, + None, + ], + limit: Some(1), + }).is_empty()); + // to simplify processing - just re-read the whole nodes set from the contract + if is_set_changed { + self.read_from_registry(&*client, new_contract_addr); + } + } + } + + pub fn get(&self) -> BTreeMap { + self.key_servers.clone() + } + + fn read_from_registry(&mut self, client: &Client, new_contract_address: Option
) { + self.key_servers = new_contract_address.map(|contract_addr| { + trace!(target: "secretstore", "Configuring for key server set contract from {}", contract_addr); + + KeyServerSetContract::new(contract_addr) + }) + .map(|contract| { + let mut key_servers = BTreeMap::new(); + let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d)); + let key_servers_list = contract.get_key_servers(do_call).wait() + .map_err(|err| { trace!(target: "secretstore", "Error {} reading list of key servers from contract", err); err }) + .unwrap_or_default(); + for key_server in key_servers_list { + let key_server_public = contract.get_key_server_public( + |a, d| future::done(client.call_contract(BlockId::Latest, a, d)), key_server).wait() + .and_then(|p| if p.len() == 64 { Ok(Public::from_slice(&p)) } else { Err(format!("Invalid public length {}", p.len())) }); + let key_server_ip = contract.get_key_server_address( + |a, d| future::done(client.call_contract(BlockId::Latest, a, d)), key_server).wait() + .and_then(|a| a.parse().map_err(|e| format!("Invalid ip address: {}", e))); + + // only add successfully parsed nodes + match (key_server_public, key_server_ip) { + (Ok(key_server_public), Ok(key_server_ip)) => { key_servers.insert(key_server_public, key_server_ip); }, + (Err(public_err), _) => warn!(target: "secretstore_net", "received invalid public from key server set contract: {}", public_err), + (_, Err(ip_err)) => warn!(target: "secretstore_net", "received invalid IP from key server set contract: {}", ip_err), + } + } + key_servers + }) + .unwrap_or_default(); + self.contract_addr = new_contract_address; + } +} + +#[cfg(test)] +pub mod tests { + use std::collections::BTreeMap; + use std::net::SocketAddr; + use ethkey::Public; + use super::KeyServerSet; + + #[derive(Default)] + pub struct MapKeyServerSet { + nodes: BTreeMap, + } + + impl MapKeyServerSet { + pub fn new(nodes: BTreeMap) -> Self { + MapKeyServerSet { + nodes: nodes, + } + } + } + + impl KeyServerSet for MapKeyServerSet { + fn get(&self) -> BTreeMap { + self.nodes.clone() + } + } +} diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index f8a74dd1a..9750f7223 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -21,6 +21,8 @@ extern crate log; extern crate futures; extern crate futures_cpupool; extern crate hyper; +#[macro_use] +extern crate lazy_static; extern crate parking_lot; extern crate rustc_hex; extern crate serde; @@ -56,6 +58,7 @@ mod http_listener; mod key_server; mod key_storage; mod serialization; +mod key_server_set; use std::sync::Arc; use ethcore::client::Client; @@ -68,9 +71,10 @@ pub use traits::{KeyServer}; pub fn start(client: Arc, config: ServiceConfiguration) -> Result, Error> { use std::sync::Arc; - let acl_storage = Arc::new(acl_storage::OnChainAclStorage::new(client)); + let acl_storage = acl_storage::OnChainAclStorage::new(&client); + let key_server_set = key_server_set::OnChainKeyServerSet::new(&client, config.cluster_config.nodes.clone())?; let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?); - let key_server = key_server::KeyServerImpl::new(&config.cluster_config, acl_storage, key_storage)?; + let key_server = key_server::KeyServerImpl::new(&config.cluster_config, key_server_set, acl_storage, key_storage)?; let listener = http_listener::KeyServerHttpListener::start(&config.listener_address, key_server)?; Ok(Box::new(listener)) } From afbda7baa8a17add547d7b39635bb8d29a88716f Mon Sep 17 00:00:00 2001 From: Afri <5chdn@users.noreply.github.com> Date: Tue, 8 Aug 2017 20:18:00 +0200 Subject: [PATCH 041/112] Bump master to 1.8.0 (#6256) * Bump master to 1.8.0 * Use jsonrpc-core to avoid breaking master * Update Cargo.lock --- Cargo.lock | 2 +- Cargo.toml | 2 +- mac/Parity.pkgproj | 2 +- mac/Parity/Info.plist | 2 +- nsis/installer.nsi | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e705e39f9..82fbf0ae9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1787,7 +1787,7 @@ dependencies = [ [[package]] name = "parity" -version = "1.7.0" +version = "1.8.0" dependencies = [ "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/Cargo.toml b/Cargo.toml index 03d9c9664..3adaf62d4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Parity Ethereum client" name = "parity" -version = "1.7.0" +version = "1.8.0" license = "GPL-3.0" authors = ["Parity Technologies "] build = "build.rs" diff --git a/mac/Parity.pkgproj b/mac/Parity.pkgproj index cc7810dba..1c87af629 100755 --- a/mac/Parity.pkgproj +++ b/mac/Parity.pkgproj @@ -462,7 +462,7 @@ OVERWRITE_PERMISSIONS VERSION - 1.7.0 + 1.8.0 UUID 2DCD5B81-7BAF-4DA1-9251-6274B089FD36 diff --git a/mac/Parity/Info.plist b/mac/Parity/Info.plist index 017939aec..dba951778 100644 --- a/mac/Parity/Info.plist +++ b/mac/Parity/Info.plist @@ -17,7 +17,7 @@ CFBundlePackageType APPL CFBundleShortVersionString - 1.6 + 1.8 CFBundleVersion 1 LSApplicationCategoryType diff --git a/nsis/installer.nsi b/nsis/installer.nsi index 7173beccc..5b7940302 100644 --- a/nsis/installer.nsi +++ b/nsis/installer.nsi @@ -9,7 +9,7 @@ !define COMPANYNAME "Parity" !define DESCRIPTION "Fast, light, robust Ethereum implementation" !define VERSIONMAJOR 1 -!define VERSIONMINOR 7 +!define VERSIONMINOR 8 !define VERSIONBUILD 0 !define ARGS "--warp" !define FIRST_START_ARGS "ui --warp --mode=passive" From 72fa6a79a244d6aa396704a59c935f21e99d39c0 Mon Sep 17 00:00:00 2001 From: Afri <5chdn@users.noreply.github.com> Date: Wed, 9 Aug 2017 08:43:40 +0200 Subject: [PATCH 042/112] Add GitHub issue templates. (#6259) * Prepare GH templates for contributors * Add GH issue template * Add homebrew as option --- .github/ISSUE_TEMPLATE.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE.md diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..f4b5311d5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,12 @@ +_Before filing a new issue, please **provide the following information**._ + +> I'm running: +> +> - **Parity version**: 0.0.0 +> - **Operating system**: Windows / MacOS / Linux +> - **And installed**: via installer / homebrew / binaries / from source + +_Your issue description goes here below. Try to include **actual** vs. **expected behavior** and **steps to reproduce** the issue._ + +--- + From e93466c897bf2a2eeb8e47b916417bf1f7e0990b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 9 Aug 2017 08:45:07 +0200 Subject: [PATCH 043/112] Using multiple NTP servers (#6173) * Small improvements to time estimation. * Allow multiple NTP servers to be used. * Removing boxing. * Be nice. * Be nicer. * Update list of servers and add reference. --- dapps/src/api/api.rs | 3 +- dapps/src/api/time.rs | 158 +++++++++++++++++++++++++-------- dapps/src/lib.rs | 14 +-- dapps/src/tests/helpers/mod.rs | 2 +- parity/cli/config.toml | 2 +- parity/cli/mod.rs | 10 +-- parity/cli/usage.txt | 8 +- parity/configuration.rs | 29 ++++-- parity/dapps.rs | 27 +++--- parity/rpc.rs | 9 +- parity/run.rs | 4 +- 11 files changed, 188 insertions(+), 78 deletions(-) diff --git a/dapps/src/api/api.rs b/dapps/src/api/api.rs index 3f1c50de8..7bd7fa049 100644 --- a/dapps/src/api/api.rs +++ b/dapps/src/api/api.rs @@ -21,7 +21,7 @@ use hyper::method::Method; use hyper::status::StatusCode; use api::{response, types}; -use api::time::TimeChecker; +use api::time::{TimeChecker, MAX_DRIFT}; use apps::fetcher::Fetcher; use handlers::{self, extract_url}; use endpoint::{Endpoint, Handler, EndpointPath}; @@ -122,7 +122,6 @@ impl RestApiRouter { // Check time let time = { - const MAX_DRIFT: i64 = 500; let (status, message, details) = match time { Ok(Ok(diff)) if diff < MAX_DRIFT && diff > -MAX_DRIFT => { (HealthStatus::Ok, "".into(), diff) diff --git a/dapps/src/api/time.rs b/dapps/src/api/time.rs index 3117f4cc9..06b9cee7f 100644 --- a/dapps/src/api/time.rs +++ b/dapps/src/api/time.rs @@ -33,11 +33,13 @@ use std::io; use std::{fmt, mem, time}; -use std::sync::Arc; use std::collections::VecDeque; +use std::sync::atomic::{self, AtomicUsize}; +use std::sync::Arc; use futures::{self, Future, BoxFuture}; -use futures_cpupool::CpuPool; +use futures::future::{self, IntoFuture}; +use futures_cpupool::{CpuPool, CpuFuture}; use ntp; use time::{Duration, Timespec}; use util::RwLock; @@ -45,6 +47,8 @@ use util::RwLock; /// Time checker error. #[derive(Debug, Clone, PartialEq)] pub enum Error { + /// No servers are currently available for a query. + NoServersAvailable, /// There was an error when trying to reach the NTP server. Ntp(String), /// IO error when reading NTP response. @@ -56,6 +60,7 @@ impl fmt::Display for Error { use self::Error::*; match *self { + NoServersAvailable => write!(fmt, "No NTP servers available"), Ntp(ref err) => write!(fmt, "NTP error: {}", err), Io(ref err) => write!(fmt, "Connection Error: {}", err), } @@ -72,58 +77,123 @@ impl From for Error { /// NTP time drift checker. pub trait Ntp { + /// Returned Future. + type Future: IntoFuture; + /// Returns the current time drift. - fn drift(&self) -> BoxFuture; + fn drift(&self) -> Self::Future; +} + +const SERVER_MAX_POLL_INTERVAL_SECS: u64 = 60; +#[derive(Debug)] +struct Server { + pub address: String, + next_call: RwLock, + failures: AtomicUsize, +} + +impl Server { + pub fn is_available(&self) -> bool { + *self.next_call.read() < time::Instant::now() + } + + pub fn report_success(&self) { + self.failures.store(0, atomic::Ordering::SeqCst); + self.update_next_call(1) + } + + pub fn report_failure(&self) { + let errors = self.failures.fetch_add(1, atomic::Ordering::SeqCst); + self.update_next_call(1 << errors) + } + + fn update_next_call(&self, delay: usize) { + *self.next_call.write() = time::Instant::now() + time::Duration::from_secs(delay as u64 * SERVER_MAX_POLL_INTERVAL_SECS); + } +} + +impl> From for Server { + fn from(t: T) -> Self { + Server { + address: t.as_ref().to_owned(), + next_call: RwLock::new(time::Instant::now()), + failures: Default::default(), + } + } } /// NTP client using the SNTP algorithm for calculating drift. #[derive(Clone)] pub struct SimpleNtp { - address: Arc, + addresses: Vec>, pool: CpuPool, } impl fmt::Debug for SimpleNtp { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Ntp {{ address: {} }}", self.address) + f + .debug_struct("SimpleNtp") + .field("addresses", &self.addresses) + .finish() } } impl SimpleNtp { - fn new(address: &str, pool: CpuPool) -> SimpleNtp { + fn new>(addresses: &[T], pool: CpuPool) -> SimpleNtp { SimpleNtp { - address: Arc::new(address.to_owned()), + addresses: addresses.iter().map(Server::from).map(Arc::new).collect(), pool: pool, } } } impl Ntp for SimpleNtp { - fn drift(&self) -> BoxFuture { - let address = self.address.clone(); - if &*address == "none" { - return futures::future::err(Error::Ntp("NTP server is not provided.".into())).boxed(); - } + type Future = future::Either< + CpuFuture, + future::FutureResult, + >; - self.pool.spawn_fn(move || { - let packet = ntp::request(&*address)?; - let dest_time = ::time::now_utc().to_timespec(); - let orig_time = Timespec::from(packet.orig_time); - let recv_time = Timespec::from(packet.recv_time); - let transmit_time = Timespec::from(packet.transmit_time); + fn drift(&self) -> Self::Future { + use self::future::Either::{A, B}; - let drift = ((recv_time - orig_time) + (transmit_time - dest_time)) / 2; + let server = self.addresses.iter().find(|server| server.is_available()); + server.map(|server| { + let server = server.clone(); + A(self.pool.spawn_fn(move || { + debug!(target: "dapps", "Fetching time from {}.", server.address); - Ok(drift) - }).boxed() + match ntp::request(&server.address) { + Ok(packet) => { + let dest_time = ::time::now_utc().to_timespec(); + let orig_time = Timespec::from(packet.orig_time); + let recv_time = Timespec::from(packet.recv_time); + let transmit_time = Timespec::from(packet.transmit_time); + + let drift = ((recv_time - orig_time) + (transmit_time - dest_time)) / 2; + + server.report_success(); + Ok(drift) + }, + Err(err) => { + server.report_failure(); + Err(err.into()) + }, + } + })) + }).unwrap_or_else(|| B(future::err(Error::NoServersAvailable))) } } // NOTE In a positive scenario first results will be seen after: -// MAX_RESULTS * UPDATE_TIMEOUT_OK_SECS seconds. -const MAX_RESULTS: usize = 7; -const UPDATE_TIMEOUT_OK_SECS: u64 = 30; -const UPDATE_TIMEOUT_ERR_SECS: u64 = 2; +// MAX_RESULTS * UPDATE_TIMEOUT_INCOMPLETE_SECS seconds. +const MAX_RESULTS: usize = 4; +const UPDATE_TIMEOUT_OK_SECS: u64 = 6 * 60 * 60; +const UPDATE_TIMEOUT_WARN_SECS: u64 = 15 * 60; +const UPDATE_TIMEOUT_ERR_SECS: u64 = 60; +const UPDATE_TIMEOUT_INCOMPLETE_SECS: u64 = 10; + +/// Maximal valid time drift. +pub const MAX_DRIFT: i64 = 500; #[derive(Debug, Clone)] /// A time checker. @@ -134,13 +204,13 @@ pub struct TimeChecker { impl TimeChecker { /// Creates new time checker given the NTP server address. - pub fn new(ntp_address: String, pool: CpuPool) -> Self { + pub fn new>(ntp_addresses: &[T], pool: CpuPool) -> Self { let last_result = Arc::new(RwLock::new( // Assume everything is ok at the very beginning. (time::Instant::now(), vec![Ok(0)].into()) )); - let ntp = SimpleNtp::new(&ntp_address, pool); + let ntp = SimpleNtp::new(ntp_addresses, pool); TimeChecker { ntp, @@ -149,22 +219,34 @@ impl TimeChecker { } } -impl TimeChecker { +impl TimeChecker where ::Future: Send + 'static { /// Updates the time pub fn update(&self) -> BoxFuture { + trace!(target: "dapps", "Updating time from NTP."); let last_result = self.last_result.clone(); - self.ntp.drift().then(move |res| { + self.ntp.drift().into_future().then(move |res| { + let res = res.map(|d| d.num_milliseconds()); + + if let Err(Error::NoServersAvailable) = res { + debug!(target: "dapps", "No NTP servers available. Selecting an older result."); + return select_result(last_result.read().1.iter()); + } + + // Update the results. let mut results = mem::replace(&mut last_result.write().1, VecDeque::new()); + let has_all_results = results.len() >= MAX_RESULTS; let valid_till = time::Instant::now() + time::Duration::from_secs( - if res.is_ok() && results.len() == MAX_RESULTS { - UPDATE_TIMEOUT_OK_SECS - } else { - UPDATE_TIMEOUT_ERR_SECS + match res { + Ok(time) if has_all_results && time < MAX_DRIFT => UPDATE_TIMEOUT_OK_SECS, + Ok(_) if has_all_results => UPDATE_TIMEOUT_WARN_SECS, + Err(_) if has_all_results => UPDATE_TIMEOUT_ERR_SECS, + _ => UPDATE_TIMEOUT_INCOMPLETE_SECS, } ); + trace!(target: "dapps", "New time drift received: {:?}", res); // Push the result. - results.push_back(res.map(|d| d.num_milliseconds())); + results.push_back(res); while results.len() > MAX_RESULTS { results.pop_front(); } @@ -209,7 +291,7 @@ mod tests { use std::cell::{Cell, RefCell}; use std::time::Instant; use time::Duration; - use futures::{self, BoxFuture, Future}; + use futures::{future, Future}; use super::{Ntp, TimeChecker, Error}; use util::RwLock; @@ -224,9 +306,11 @@ mod tests { } impl Ntp for FakeNtp { - fn drift(&self) -> BoxFuture { + type Future = future::FutureResult; + + fn drift(&self) -> Self::Future { self.1.set(self.1.get() + 1); - futures::future::ok(self.0.borrow_mut().pop().expect("Unexpected call to drift().")).boxed() + future::ok(self.0.borrow_mut().pop().expect("Unexpected call to drift().")) } } diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 0cb7024cc..f34c24cae 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -130,7 +130,7 @@ impl Middleware { /// Creates new middleware for UI server. pub fn ui( - ntp_server: &str, + ntp_servers: &[String], pool: CpuPool, remote: Remote, dapps_domain: &str, @@ -146,7 +146,7 @@ impl Middleware { ).embeddable_on(None).allow_dapps(false)); let special = { let mut special = special_endpoints( - ntp_server, + ntp_servers, pool, content_fetcher.clone(), remote.clone(), @@ -171,7 +171,7 @@ impl Middleware { /// Creates new Dapps server middleware. pub fn dapps( - ntp_server: &str, + ntp_servers: &[String], pool: CpuPool, remote: Remote, ui_address: Option<(String, u16)>, @@ -203,7 +203,7 @@ impl Middleware { let special = { let mut special = special_endpoints( - ntp_server, + ntp_servers, pool, content_fetcher.clone(), remote.clone(), @@ -237,8 +237,8 @@ impl http::RequestMiddleware for Middleware { } } -fn special_endpoints( - ntp_server: &str, +fn special_endpoints>( + ntp_servers: &[T], pool: CpuPool, content_fetcher: Arc, remote: Remote, @@ -250,7 +250,7 @@ fn special_endpoints( special.insert(router::SpecialEndpoint::Api, Some(api::RestApi::new( content_fetcher, sync_status, - api::TimeChecker::new(ntp_server.into(), pool), + api::TimeChecker::new(ntp_servers, pool), remote, ))); special diff --git a/dapps/src/tests/helpers/mod.rs b/dapps/src/tests/helpers/mod.rs index 2d9d5f341..38dd82de6 100644 --- a/dapps/src/tests/helpers/mod.rs +++ b/dapps/src/tests/helpers/mod.rs @@ -255,7 +255,7 @@ impl Server { fetch: F, ) -> Result { let middleware = Middleware::dapps( - "pool.ntp.org:123", + &["0.pool.ntp.org:123".into(), "1.pool.ntp.org:123".into()], CpuPool::new(4), remote, signer_address, diff --git a/parity/cli/config.toml b/parity/cli/config.toml index 4af4ca076..08da653de 100644 --- a/parity/cli/config.toml +++ b/parity/cli/config.toml @@ -78,7 +78,7 @@ disable_periodic = true jit = false [misc] -ntp_server = "pool.ntp.org:123" +ntp_servers = ["0.parity.pool.ntp.org:123"] logging = "own_tx=trace" log_file = "/var/log/parity.log" color = true diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index c10069b02..b978918ce 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -359,8 +359,8 @@ usage! { or |c: &Config| otry!(c.vm).jit.clone(), // -- Miscellaneous Options - flag_ntp_server: String = "none", - or |c: &Config| otry!(c.misc).ntp_server.clone(), + flag_ntp_servers: String = "0.parity.pool.ntp.org:123,1.parity.pool.ntp.org:123,2.parity.pool.ntp.org:123,3.parity.pool.ntp.org:123", + or |c: &Config| otry!(c.misc).ntp_servers.clone().map(|vec| vec.join(",")), flag_logging: Option = None, or |c: &Config| otry!(c.misc).logging.clone().map(Some), flag_log_file: Option = None, @@ -606,7 +606,7 @@ struct VM { #[derive(Default, Debug, PartialEq, Deserialize)] struct Misc { - ntp_server: Option, + ntp_servers: Option>, logging: Option, log_file: Option, color: Option, @@ -919,7 +919,7 @@ mod tests { flag_dapps_apis_all: None, // -- Miscellaneous Options - flag_ntp_server: "none".into(), + flag_ntp_servers: "0.parity.pool.ntp.org:123,1.parity.pool.ntp.org:123,2.parity.pool.ntp.org:123,3.parity.pool.ntp.org:123".into(), flag_version: false, flag_logging: Some("own_tx=trace".into()), flag_log_file: Some("/var/log/parity.log".into()), @@ -1098,7 +1098,7 @@ mod tests { jit: Some(false), }), misc: Some(Misc { - ntp_server: Some("pool.ntp.org:123".into()), + ntp_servers: Some(vec!["0.parity.pool.ntp.org:123".into()]), logging: Some("own_tx=trace".into()), log_file: Some("/var/log/parity.log".into()), color: Some(true), diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index c1d1ab9de..cb090dced 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -78,7 +78,7 @@ Operating Options: Convenience Options: -c --config CONFIG Specify a configuration. CONFIG may be either a - configuration file or a preset: dev, insecure, dev-insecure, + configuration file or a preset: dev, insecure, dev-insecure, mining, or non-standard-ports. (default: {flag_config}). --ports-shift SHIFT Add SHIFT to all port numbers Parity is listening on. @@ -483,8 +483,10 @@ Internal Options: --can-restart Executable will auto-restart if exiting with 69. Miscellaneous Options: - --ntp-server HOST NTP server to provide current time (host:port). Used to verify node health. - (default: {flag_ntp_server}) + --ntp-servers HOSTS Comma separated list of NTP servers to provide current time (host:port). + Used to verify node health. Parity uses pool.ntp.org NTP servers, + consider joining the pool: http://www.pool.ntp.org/join.html + (default: {flag_ntp_servers}) -l --logging LOGGING Specify the logging level. Must conform to the same format as RUST_LOG. (default: {flag_logging:?}) --log-file FILENAME Specify a filename into which logging should be diff --git a/parity/configuration.rs b/parity/configuration.rs index 7a80f8f93..b28d2608e 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -551,10 +551,14 @@ impl Configuration { Ok(options) } + fn ntp_servers(&self) -> Vec { + self.args.flag_ntp_servers.split(",").map(str::to_owned).collect() + } + fn ui_config(&self) -> UiConfiguration { UiConfiguration { enabled: self.ui_enabled(), - ntp_server: self.args.flag_ntp_server.clone(), + ntp_servers: self.ntp_servers(), interface: self.ui_interface(), port: self.args.flag_ports_shift + self.args.flag_ui_port, hosts: self.ui_hosts(), @@ -564,7 +568,7 @@ impl Configuration { fn dapps_config(&self) -> DappsConfiguration { DappsConfiguration { enabled: self.dapps_enabled(), - ntp_server: self.args.flag_ntp_server.clone(), + ntp_servers: self.ntp_servers(), dapps_path: PathBuf::from(self.directories().dapps), extra_dapps: if self.args.cmd_dapp { self.args.arg_path.iter().map(|path| PathBuf::from(path)).collect() @@ -1278,7 +1282,12 @@ mod tests { support_token_api: true }, UiConfiguration { enabled: true, - ntp_server: "none".into(), + ntp_servers: vec![ + "0.parity.pool.ntp.org:123".into(), + "1.parity.pool.ntp.org:123".into(), + "2.parity.pool.ntp.org:123".into(), + "3.parity.pool.ntp.org:123".into(), + ], interface: "127.0.0.1".into(), port: 8180, hosts: Some(vec![]), @@ -1521,10 +1530,16 @@ mod tests { let conf3 = parse(&["parity", "--ui-path", "signer", "--ui-interface", "test"]); // then + let ntp_servers = vec![ + "0.parity.pool.ntp.org:123".into(), + "1.parity.pool.ntp.org:123".into(), + "2.parity.pool.ntp.org:123".into(), + "3.parity.pool.ntp.org:123".into(), + ]; assert_eq!(conf0.directories().signer, "signer".to_owned()); assert_eq!(conf0.ui_config(), UiConfiguration { enabled: true, - ntp_server: "none".into(), + ntp_servers: ntp_servers.clone(), interface: "127.0.0.1".into(), port: 8180, hosts: Some(vec![]), @@ -1533,7 +1548,7 @@ mod tests { assert_eq!(conf1.directories().signer, "signer".to_owned()); assert_eq!(conf1.ui_config(), UiConfiguration { enabled: true, - ntp_server: "none".into(), + ntp_servers: ntp_servers.clone(), interface: "127.0.0.1".into(), port: 8180, hosts: Some(vec![]), @@ -1543,7 +1558,7 @@ mod tests { assert_eq!(conf2.directories().signer, "signer".to_owned()); assert_eq!(conf2.ui_config(), UiConfiguration { enabled: true, - ntp_server: "none".into(), + ntp_servers: ntp_servers.clone(), interface: "127.0.0.1".into(), port: 3123, hosts: Some(vec![]), @@ -1552,7 +1567,7 @@ mod tests { assert_eq!(conf3.directories().signer, "signer".to_owned()); assert_eq!(conf3.ui_config(), UiConfiguration { enabled: true, - ntp_server: "none".into(), + ntp_servers: ntp_servers.clone(), interface: "test".into(), port: 8180, hosts: Some(vec![]), diff --git a/parity/dapps.rs b/parity/dapps.rs index cec3765f2..a67b3fe3a 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -36,7 +36,7 @@ use util::{Bytes, Address}; #[derive(Debug, PartialEq, Clone)] pub struct Configuration { pub enabled: bool, - pub ntp_server: String, + pub ntp_servers: Vec, pub dapps_path: PathBuf, pub extra_dapps: Vec, pub extra_embed_on: Vec<(String, u16)>, @@ -47,7 +47,12 @@ impl Default for Configuration { let data_dir = default_data_path(); Configuration { enabled: true, - ntp_server: "none".into(), + ntp_servers: vec![ + "0.parity.pool.ntp.org:123".into(), + "1.parity.pool.ntp.org:123".into(), + "2.parity.pool.ntp.org:123".into(), + "3.parity.pool.ntp.org:123".into(), + ], dapps_path: replace_home(&data_dir, "$BASE/dapps").into(), extra_dapps: vec![], extra_embed_on: vec![], @@ -158,7 +163,7 @@ pub fn new(configuration: Configuration, deps: Dependencies) -> Result Result Result, String> { +pub fn new_ui(enabled: bool, ntp_servers: &[String], deps: Dependencies) -> Result, String> { if !enabled { return Ok(None); } server::ui_middleware( deps, - ntp_server, + ntp_servers, rpc::DAPPS_DOMAIN, ).map(Some) } @@ -204,7 +209,7 @@ mod server { pub fn dapps_middleware( _deps: Dependencies, - _ntp_server: &str, + _ntp_servers: &[String], _dapps_path: PathBuf, _extra_dapps: Vec, _dapps_domain: &str, @@ -215,7 +220,7 @@ mod server { pub fn ui_middleware( _deps: Dependencies, - _ntp_server: &str, + _ntp_servers: &[String], _dapps_domain: &str, ) -> Result { Err("Your Parity version has been compiled without UI support.".into()) @@ -241,7 +246,7 @@ mod server { pub fn dapps_middleware( deps: Dependencies, - ntp_server: &str, + ntp_servers: &[String], dapps_path: PathBuf, extra_dapps: Vec, dapps_domain: &str, @@ -252,7 +257,7 @@ mod server { let web_proxy_tokens = Arc::new(move |token| signer.web_proxy_access_token_domain(&token)); Ok(parity_dapps::Middleware::dapps( - ntp_server, + ntp_servers, deps.pool, parity_remote, deps.ui_address, @@ -269,12 +274,12 @@ mod server { pub fn ui_middleware( deps: Dependencies, - ntp_server: &str, + ntp_servers: &[String], dapps_domain: &str, ) -> Result { let parity_remote = parity_reactor::Remote::new(deps.remote.clone()); Ok(parity_dapps::Middleware::ui( - ntp_server, + ntp_servers, deps.pool, parity_remote, dapps_domain, diff --git a/parity/rpc.rs b/parity/rpc.rs index b15c331d6..9173e7b2f 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -73,7 +73,7 @@ impl Default for HttpConfiguration { #[derive(Debug, PartialEq, Clone)] pub struct UiConfiguration { pub enabled: bool, - pub ntp_server: String, + pub ntp_servers: Vec, pub interface: String, pub port: u16, pub hosts: Option>, @@ -107,7 +107,12 @@ impl Default for UiConfiguration { fn default() -> Self { UiConfiguration { enabled: true && cfg!(feature = "ui-enabled"), - ntp_server: "none".into(), + ntp_servers: vec![ + "0.parity.pool.ntp.org:123".into(), + "1.parity.pool.ntp.org:123".into(), + "2.parity.pool.ntp.org:123".into(), + "3.parity.pool.ntp.org:123".into(), + ], port: 8180, interface: "127.0.0.1".into(), hosts: Some(vec![]), diff --git a/parity/run.rs b/parity/run.rs index c08294a42..0c5f11bbb 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -311,7 +311,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> }; let dapps_middleware = dapps::new(cmd.dapps_conf.clone(), dapps_deps.clone())?; - let ui_middleware = dapps::new_ui(cmd.ui_conf.enabled, &cmd.ui_conf.ntp_server, dapps_deps)?; + let ui_middleware = dapps::new_ui(cmd.ui_conf.enabled, &cmd.ui_conf.ntp_servers, dapps_deps)?; // start RPCs let dapps_service = dapps::service(&dapps_middleware); @@ -687,7 +687,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R } }; let dapps_middleware = dapps::new(cmd.dapps_conf.clone(), dapps_deps.clone())?; - let ui_middleware = dapps::new_ui(cmd.ui_conf.enabled, &cmd.ui_conf.ntp_server, dapps_deps)?; + let ui_middleware = dapps::new_ui(cmd.ui_conf.enabled, &cmd.ui_conf.ntp_servers, dapps_deps)?; let dapps_service = dapps::service(&dapps_middleware); let deps_for_rpc_apis = Arc::new(rpc_apis::FullDependencies { From 78b3d71745776606aa592f2fbc6819c9d2dd12bc Mon Sep 17 00:00:00 2001 From: Marek Kotewicz Date: Wed, 9 Aug 2017 10:57:23 +0200 Subject: [PATCH 044/112] propagate stratum submit share error upstream, fixes #6258 (#6260) --- ethcore/src/miner/stratum.rs | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/ethcore/src/miner/stratum.rs b/ethcore/src/miner/stratum.rs index e419247f0..0031bb715 100644 --- a/ethcore/src/miner/stratum.rs +++ b/ethcore/src/miner/stratum.rs @@ -32,7 +32,6 @@ use util::Mutex; use miner::{self, Miner, MinerService}; use client::Client; use block::IsBlock; -use std::str::FromStr; use rlp::encode; /// Configures stratum server options. @@ -60,7 +59,7 @@ impl SubmitPayload { return Err(PayloadError::ArgumentsAmountUnexpected(payload.len())); } - let nonce = match H64::from_str(clean_0x(&payload[0])) { + let nonce = match clean_0x(&payload[0]).parse::() { Ok(nonce) => nonce, Err(e) => { warn!(target: "stratum", "submit_work ({}): invalid nonce ({:?})", &payload[0], e); @@ -68,7 +67,7 @@ impl SubmitPayload { } }; - let pow_hash = match H256::from_str(clean_0x(&payload[1])) { + let pow_hash = match clean_0x(&payload[1]).parse::() { Ok(pow_hash) => pow_hash, Err(e) => { warn!(target: "stratum", "submit_work ({}): invalid hash ({:?})", &payload[1], e); @@ -76,7 +75,7 @@ impl SubmitPayload { } }; - let mix_hash = match H256::from_str(clean_0x(&payload[2])) { + let mix_hash = match clean_0x(&payload[2]).parse::() { Ok(mix_hash) => mix_hash, Err(e) => { warn!(target: "stratum", "submit_work ({}): invalid mix-hash ({:?})", &payload[2], e); @@ -133,7 +132,7 @@ impl JobDispatcher for StratumJobDispatcher { fn submit(&self, payload: Vec) -> Result<(), StratumServiceError> { let payload = SubmitPayload::from_args(payload).map_err(|e| - StratumServiceError::Dispatch(format!("{}", e)) + StratumServiceError::Dispatch(e.to_string()) )?; trace!( @@ -144,14 +143,16 @@ impl JobDispatcher for StratumJobDispatcher { payload.mix_hash, ); - self.with_core_void(|client, miner| { + self.with_core_result(|client, miner| { let seal = vec![encode(&payload.mix_hash).into_vec(), encode(&payload.nonce).into_vec()]; - if let Err(e) = miner.submit_seal(&*client, payload.pow_hash, seal) { - warn!(target: "stratum", "submit_seal error: {:?}", e); - }; - }); - - Ok(()) + match miner.submit_seal(&*client, payload.pow_hash, seal) { + Ok(_) => Ok(()), + Err(e) => { + warn!(target: "stratum", "submit_seal error: {:?}", e); + Err(StratumServiceError::Dispatch(e.to_string())) + } + } + }) } } @@ -181,8 +182,11 @@ impl StratumJobDispatcher { self.client.upgrade().and_then(|client| self.miner.upgrade().and_then(|miner| (f)(client, miner))) } - fn with_core_void(&self, f: F) where F: Fn(Arc, Arc) { - self.client.upgrade().map(|client| self.miner.upgrade().map(|miner| (f)(client, miner))); + fn with_core_result(&self, f: F) -> Result<(), StratumServiceError> where F: Fn(Arc, Arc) -> Result<(), StratumServiceError> { + match (self.client.upgrade(), self.miner.upgrade()) { + (Some(client), Some(miner)) => f(client, miner), + _ => Ok(()), + } } } @@ -230,7 +234,7 @@ impl Stratum { let dispatcher = Arc::new(StratumJobDispatcher::new(miner, client)); let stratum_svc = StratumService::start( - &SocketAddr::new(IpAddr::from_str(&options.listen_addr)?, options.port), + &SocketAddr::new(options.listen_addr.parse::()?, options.port), dispatcher.clone(), options.secret.clone(), )?; From d209100a60ed7d4f2dcaa1d2b8e0e510ff95fe34 Mon Sep 17 00:00:00 2001 From: Alexey Date: Wed, 9 Aug 2017 11:57:54 +0300 Subject: [PATCH 045/112] Wasm storage read test (#6255) * Test for read from prepopulated storage implemented * wasm-tests submodule update * fix identation * comment for storage_read added --- ethcore/res/wasm-tests | 2 +- ethcore/wasm/src/tests.rs | 38 ++++++++++++++++++++++++++++++++------ 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/ethcore/res/wasm-tests b/ethcore/res/wasm-tests index 04c9d84c5..18dad7912 160000 --- a/ethcore/res/wasm-tests +++ b/ethcore/res/wasm-tests @@ -1 +1 @@ -Subproject commit 04c9d84c5fe5c3ad707be58664c7e72b97cc9996 +Subproject commit 18dad7912cf42937ff725ffd944d01ff7ca1cce7 diff --git a/ethcore/wasm/src/tests.rs b/ethcore/wasm/src/tests.rs index c12dc8935..401b4db09 100644 --- a/ethcore/wasm/src/tests.rs +++ b/ethcore/wasm/src/tests.rs @@ -378,13 +378,39 @@ fn realloc() { let mut ext = FakeExt::new(); let (gas_left, result) = { - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { panic!("Realloc should return payload"); }, - GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), - } + let mut interpreter = wasm_interpreter(); + let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => { panic!("Realloc should return payload"); }, + GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), + } }; assert_eq!(gas_left, U256::from(98326)); assert_eq!(result, vec![0u8; 2]); } + +// Tests that contract's ability to read from a storage +// Test prepopulates address into storage, than executes a contract which read that address from storage and write this address into result +#[test] +fn storage_read() { + let code = load_sample!("storage_read.wasm"); + let address: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); + + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + let mut ext = FakeExt::new(); + ext.store.insert("0100000000000000000000000000000000000000000000000000000000000000".into(), address.into()); + + let (gas_left, result) = { + let mut interpreter = wasm_interpreter(); + let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => { panic!("storage_read should return payload"); }, + GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), + } + }; + + assert_eq!(gas_left, U256::from(99752)); + assert_eq!(Address::from(&result[12..32]), address); +} From 33ba5b63f394a83619d5288e4b94f3f5a82351cb Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 9 Aug 2017 12:09:40 +0300 Subject: [PATCH 046/112] SecretStore: encrypt messages using private key from key store (#6146) * do not cache ACL storage contract * when error comes before initialization * initial KeyServerSet commit * update_nodes_set in maintain * do not connect to self * fixed connection establishing * removed println * improved KeyServerSet tracing * moved parsing to KeyServerSet * re-read only when blockchain is changed * do not try to connect if not a part of cluster * improved logging * fixed tests * NodeKeyPAir trait * fixed parity to use new trait * continue integrating with parity * updated parity for NodeKeyPair * completed KeyStoreNodeKeyPair * removed comment * removed dependency && style --- ethcore/src/account_provider/mod.rs | 12 +++ ethstore/src/account/safe_account.rs | 9 +- ethstore/src/ethstore.rs | 24 ++++-- ethstore/src/secret_store.rs | 2 + parity/configuration.rs | 11 ++- parity/run.rs | 4 +- parity/secretstore.rs | 56 ++++++++++--- secret_store/src/key_server.rs | 19 +++-- .../src/key_server_cluster/cluster.rs | 10 +-- .../src/key_server_cluster/io/handshake.rs | 48 ++++++----- .../src/key_server_cluster/io/message.rs | 15 ++-- secret_store/src/key_server_cluster/io/mod.rs | 2 +- secret_store/src/key_server_cluster/mod.rs | 3 + .../net/accept_connection.rs | 6 +- .../src/key_server_cluster/net/connect.rs | 8 +- secret_store/src/key_storage.rs | 1 - secret_store/src/lib.rs | 8 +- secret_store/src/node_key_pair.rs | 84 +++++++++++++++++++ secret_store/src/traits.rs | 12 +++ secret_store/src/types/all.rs | 2 - 20 files changed, 257 insertions(+), 79 deletions(-) create mode 100644 secret_store/src/node_key_pair.rs diff --git a/ethcore/src/account_provider/mod.rs b/ethcore/src/account_provider/mod.rs index 249ca40af..752cec964 100755 --- a/ethcore/src/account_provider/mod.rs +++ b/ethcore/src/account_provider/mod.rs @@ -519,6 +519,11 @@ impl AccountProvider { } } + /// Returns account public key. + pub fn account_public(&self, address: Address, password: &str) -> Result { + self.sstore.public(&self.sstore.account_ref(&address)?, password) + } + /// Returns each account along with name and meta. pub fn set_account_name(&self, address: Address, name: String) -> Result<(), Error> { self.sstore.set_name(&self.sstore.account_ref(&address)?, name)?; @@ -697,6 +702,13 @@ impl AccountProvider { Ok(self.sstore.decrypt(&account, &password, shared_mac, message)?) } + /// Agree on shared key. + pub fn agree(&self, address: Address, password: Option, other_public: &Public) -> Result { + let account = self.sstore.account_ref(&address)?; + let password = password.map(Ok).unwrap_or_else(|| self.password(&account))?; + Ok(self.sstore.agree(&account, &password, other_public)?) + } + /// Returns the underlying `SecretStore` reference if one exists. pub fn list_geth_accounts(&self, testnet: bool) -> Vec
{ self.sstore.list_geth_accounts(testnet).into_iter().map(|a| Address::from(a).into()).collect() diff --git a/ethstore/src/account/safe_account.rs b/ethstore/src/account/safe_account.rs index e0512fe8d..478b796e6 100755 --- a/ethstore/src/account/safe_account.rs +++ b/ethstore/src/account/safe_account.rs @@ -14,7 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use ethkey::{KeyPair, sign, Address, Signature, Message, Public}; +use ethkey::{KeyPair, sign, Address, Signature, Message, Public, Secret}; +use crypto::ecdh::agree; use {json, Error, crypto}; use account::Version; use super::crypto::Crypto; @@ -135,6 +136,12 @@ impl SafeAccount { crypto::ecies::decrypt(&secret, shared_mac, message).map_err(From::from) } + /// Agree on shared key. + pub fn agree(&self, password: &str, other: &Public) -> Result { + let secret = self.crypto.secret(password)?; + agree(&secret, other).map_err(From::from) + } + /// Derive public key. pub fn public(&self, password: &str) -> Result { let secret = self.crypto.secret(password)?; diff --git a/ethstore/src/ethstore.rs b/ethstore/src/ethstore.rs index 246671990..f3bb24071 100755 --- a/ethstore/src/ethstore.rs +++ b/ethstore/src/ethstore.rs @@ -97,6 +97,10 @@ impl SimpleSecretStore for EthStore { self.store.sign_derived(account_ref, password, derivation, message) } + fn agree(&self, account: &StoreAccountRef, password: &str, other: &Public) -> Result { + self.store.agree(account, password, other) + } + fn decrypt(&self, account: &StoreAccountRef, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error> { let account = self.get(account)?; account.decrypt(password, shared_mac, message) @@ -495,18 +499,26 @@ impl SimpleSecretStore for EthMultiStore { fn sign(&self, account: &StoreAccountRef, password: &str, message: &Message) -> Result { let accounts = self.get_matching(account, password)?; - for account in accounts { - return account.sign(password, message); + match accounts.first() { + Some(ref account) => account.sign(password, message), + None => Err(Error::InvalidPassword), } - Err(Error::InvalidPassword) } fn decrypt(&self, account: &StoreAccountRef, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error> { let accounts = self.get_matching(account, password)?; - for account in accounts { - return account.decrypt(password, shared_mac, message); + match accounts.first() { + Some(ref account) => account.decrypt(password, shared_mac, message), + None => Err(Error::InvalidPassword), + } + } + + fn agree(&self, account: &StoreAccountRef, password: &str, other: &Public) -> Result { + let accounts = self.get_matching(account, password)?; + match accounts.first() { + Some(ref account) => account.agree(password, other), + None => Err(Error::InvalidPassword), } - Err(Error::InvalidPassword) } fn create_vault(&self, name: &str, password: &str) -> Result<(), Error> { diff --git a/ethstore/src/secret_store.rs b/ethstore/src/secret_store.rs index 2deae023e..e364245b7 100755 --- a/ethstore/src/secret_store.rs +++ b/ethstore/src/secret_store.rs @@ -60,6 +60,8 @@ pub trait SimpleSecretStore: Send + Sync { fn sign_derived(&self, account_ref: &StoreAccountRef, password: &str, derivation: Derivation, message: &Message) -> Result; /// Decrypt a messages with given account. fn decrypt(&self, account: &StoreAccountRef, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error>; + /// Agree on shared key. + fn agree(&self, account: &StoreAccountRef, password: &str, other: &Public) -> Result; /// Returns all accounts in this secret store. fn accounts(&self) -> Result, Error>; diff --git a/parity/configuration.rs b/parity/configuration.rs index b28d2608e..be355f394 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -41,7 +41,7 @@ use ethcore_logger::Config as LogConfig; use dir::{self, Directories, default_hypervisor_path, default_local_path, default_data_path}; use dapps::Configuration as DappsConfiguration; use ipfs::Configuration as IpfsConfiguration; -use secretstore::Configuration as SecretStoreConfiguration; +use secretstore::{Configuration as SecretStoreConfiguration, NodeSecretKey}; use updater::{UpdatePolicy, UpdateFilter, ReleaseTrack}; use run::RunCmd; use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, KillBlockchain, ExportState, DataFormat}; @@ -993,10 +993,13 @@ impl Configuration { self.interface(&self.args.flag_secretstore_http_interface) } - fn secretstore_self_secret(&self) -> Result, String> { + fn secretstore_self_secret(&self) -> Result, String> { match self.args.flag_secretstore_secret { - Some(ref s) => Ok(Some(s.parse() - .map_err(|e| format!("Invalid secret store secret: {}. Error: {:?}", s, e))?)), + Some(ref s) if s.len() == 64 => Ok(Some(NodeSecretKey::Plain(s.parse() + .map_err(|e| format!("Invalid secret store secret: {}. Error: {:?}", s, e))?))), + Some(ref s) if s.len() == 40 => Ok(Some(NodeSecretKey::KeyStore(s.parse() + .map_err(|e| format!("Invalid secret store secret address: {}. Error: {:?}", s, e))?))), + Some(_) => Err(format!("Invalid secret store secret. Must be either existing account address, or hex-encoded private key")), None => Ok(None), } } diff --git a/parity/run.rs b/parity/run.rs index 0c5f11bbb..fbf59e680 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -507,7 +507,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R } // Attempt to sign in the engine signer. - if !passwords.into_iter().any(|p| miner.set_engine_signer(engine_signer, p).is_ok()) { + if !passwords.iter().any(|p| miner.set_engine_signer(engine_signer, (*p).clone()).is_ok()) { return Err(format!("No valid password for the consensus signer {}. {}", engine_signer, VERIFY_PASSWORD_HINT)); } } @@ -734,6 +734,8 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // secret store key server let secretstore_deps = secretstore::Dependencies { client: client.clone(), + account_provider: account_provider, + accounts_passwords: &passwords, }; let secretstore_key_server = secretstore::start(cmd.secretstore_conf.clone(), secretstore_deps)?; diff --git a/parity/secretstore.rs b/parity/secretstore.rs index f215c937c..def2cd1a6 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -17,9 +17,20 @@ use std::collections::BTreeMap; use std::sync::Arc; use dir::default_data_path; +use ethcore::account_provider::AccountProvider; use ethcore::client::Client; use ethkey::{Secret, Public}; use helpers::replace_home; +use util::Address; + +#[derive(Debug, PartialEq, Clone)] +/// This node secret key. +pub enum NodeSecretKey { + /// Stored as plain text in configuration file. + Plain(Secret), + /// Stored as account in key store. + KeyStore(Address), +} #[derive(Debug, PartialEq, Clone)] /// Secret store configuration @@ -27,7 +38,7 @@ pub struct Configuration { /// Is secret store functionality enabled? pub enabled: bool, /// This node secret. - pub self_secret: Option, + pub self_secret: Option, /// Other nodes IDs + addresses. pub nodes: BTreeMap, /// Interface to listen to @@ -43,9 +54,13 @@ pub struct Configuration { } /// Secret store dependencies -pub struct Dependencies { +pub struct Dependencies<'a> { /// Blockchain client. pub client: Arc, + /// Account provider. + pub account_provider: Arc, + /// Passed accounts passwords. + pub accounts_passwords: &'a [String], } #[cfg(not(feature = "secretstore"))] @@ -65,9 +80,10 @@ mod server { #[cfg(feature="secretstore")] mod server { + use std::sync::Arc; use ethcore_secretstore; use ethkey::KeyPair; - use super::{Configuration, Dependencies}; + use super::{Configuration, Dependencies, NodeSecretKey}; /// Key server pub struct KeyServer { @@ -76,8 +92,31 @@ mod server { impl KeyServer { /// Create new key server - pub fn new(conf: Configuration, deps: Dependencies) -> Result { - let self_secret = conf.self_secret.ok_or("self secret is required when using secretstore")?; + pub fn new(mut conf: Configuration, deps: Dependencies) -> Result { + let self_secret: Arc = match conf.self_secret.take() { + Some(NodeSecretKey::Plain(secret)) => Arc::new(ethcore_secretstore::PlainNodeKeyPair::new( + KeyPair::from_secret(secret).map_err(|e| format!("invalid secret: {}", e))?)), + Some(NodeSecretKey::KeyStore(account)) => { + // Check if account exists + if !deps.account_provider.has_account(account.clone()).unwrap_or(false) { + return Err(format!("Account {} passed as secret store node key is not found", account)); + } + + // Check if any passwords have been read from the password file(s) + if deps.accounts_passwords.is_empty() { + return Err(format!("No password found for the secret store node account {}", account)); + } + + // Attempt to sign in the engine signer. + let password = deps.accounts_passwords.iter() + .find(|p| deps.account_provider.sign(account.clone(), Some((*p).clone()), Default::default()).is_ok()) + .ok_or(format!("No valid password for the secret store node account {}", account))?; + Arc::new(ethcore_secretstore::KeyStoreNodeKeyPair::new(deps.account_provider, account, password.clone()) + .map_err(|e| format!("{}", e))?) + }, + None => return Err("self secret is required when using secretstore".into()), + }; + let mut conf = ethcore_secretstore::ServiceConfiguration { listener_address: ethcore_secretstore::NodeAddress { address: conf.http_interface.clone(), @@ -86,7 +125,6 @@ mod server { data_path: conf.data_path.clone(), cluster_config: ethcore_secretstore::ClusterConfiguration { threads: 4, - self_private: (**self_secret).into(), listener_address: ethcore_secretstore::NodeAddress { address: conf.interface.clone(), port: conf.port, @@ -99,11 +137,9 @@ mod server { }, }; - let self_key_pair = KeyPair::from_secret(self_secret.clone()) - .map_err(|e| format!("valid secret is required when using secretstore. Error: {}", e))?; - conf.cluster_config.nodes.insert(self_key_pair.public().clone(), conf.cluster_config.listener_address.clone()); + conf.cluster_config.nodes.insert(self_secret.public().clone(), conf.cluster_config.listener_address.clone()); - let key_server = ethcore_secretstore::start(deps.client, conf) + let key_server = ethcore_secretstore::start(deps.client, self_secret, conf) .map_err(Into::::into)?; Ok(KeyServer { diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index c83e460f3..6526bff68 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -26,7 +26,7 @@ use super::acl_storage::AclStorage; use super::key_storage::KeyStorage; use super::key_server_set::KeyServerSet; use key_server_cluster::{math, ClusterCore}; -use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer}; +use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer, NodeKeyPair}; use types::all::{Error, Public, RequestSignature, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow, ClusterConfiguration, MessageHash, EncryptedMessageSignature}; use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration}; @@ -45,9 +45,9 @@ pub struct KeyServerCore { impl KeyServerImpl { /// Create new key server instance - pub fn new(config: &ClusterConfiguration, key_server_set: Arc, acl_storage: Arc, key_storage: Arc) -> Result { + pub fn new(config: &ClusterConfiguration, key_server_set: Arc, self_key_pair: Arc, acl_storage: Arc, key_storage: Arc) -> Result { Ok(KeyServerImpl { - data: Arc::new(Mutex::new(KeyServerCore::new(config, key_server_set, acl_storage, key_storage)?)), + data: Arc::new(Mutex::new(KeyServerCore::new(config, key_server_set, self_key_pair, acl_storage, key_storage)?)), }) } @@ -144,10 +144,10 @@ impl MessageSigner for KeyServerImpl { } impl KeyServerCore { - pub fn new(config: &ClusterConfiguration, key_server_set: Arc, acl_storage: Arc, key_storage: Arc) -> Result { + pub fn new(config: &ClusterConfiguration, key_server_set: Arc, self_key_pair: Arc, acl_storage: Arc, key_storage: Arc) -> Result { let config = NetClusterConfiguration { threads: config.threads, - self_key_pair: ethkey::KeyPair::from_secret_slice(&config.self_private)?, + self_key_pair: self_key_pair, listen_address: (config.listener_address.address.clone(), config.listener_address.port), key_server_set: key_server_set, allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes, @@ -198,6 +198,7 @@ pub mod tests { use ethkey::{self, Secret, Random, Generator}; use acl_storage::tests::DummyAclStorage; use key_storage::tests::DummyKeyStorage; + use node_key_pair::PlainNodeKeyPair; use key_server_set::tests::MapKeyServerSet; use key_server_cluster::math; use util::H256; @@ -244,7 +245,6 @@ pub mod tests { let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); let configs: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration { threads: 1, - self_private: (***key_pairs[i].secret()).into(), listener_address: NodeAddress { address: "127.0.0.1".into(), port: start_port + (i as u16), @@ -259,8 +259,11 @@ pub mod tests { let key_servers_set: BTreeMap = configs[0].nodes.iter() .map(|(k, a)| (k.clone(), format!("{}:{}", a.address, a.port).parse().unwrap())) .collect(); - let key_servers: Vec<_> = configs.into_iter().map(|cfg| - KeyServerImpl::new(&cfg, Arc::new(MapKeyServerSet::new(key_servers_set.clone())), Arc::new(DummyAclStorage::default()), Arc::new(DummyKeyStorage::default())).unwrap() + let key_servers: Vec<_> = configs.into_iter().enumerate().map(|(i, cfg)| + KeyServerImpl::new(&cfg, Arc::new(MapKeyServerSet::new(key_servers_set.clone())), + Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())), + Arc::new(DummyAclStorage::default()), + Arc::new(DummyKeyStorage::default())).unwrap() ).collect(); // wait until connections are established. It is fast => do not bother with events here diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index d77a82431..155dd4a01 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -28,7 +28,7 @@ use tokio_core::reactor::{Handle, Remote, Interval}; use tokio_core::net::{TcpListener, TcpStream}; use ethkey::{Public, KeyPair, Signature, Random, Generator}; use util::H256; -use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, KeyServerSet}; +use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair}; use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, GenerationSessionWrapper, EncryptionSessionWrapper, DecryptionSessionWrapper, SigningSessionWrapper}; use key_server_cluster::message::{self, Message, ClusterMessage, GenerationMessage, EncryptionMessage, DecryptionMessage, @@ -99,7 +99,7 @@ pub struct ClusterConfiguration { /// Allow connecting to 'higher' nodes. pub allow_connecting_to_higher_nodes: bool, /// KeyPair this node holds. - pub self_key_pair: KeyPair, + pub self_key_pair: Arc, /// Interface to listen to. pub listen_address: (String, u16), /// Cluster nodes set. @@ -146,7 +146,7 @@ pub struct ClusterData { /// Handle to the cpu thread pool. pool: CpuPool, /// KeyPair this node holds. - self_key_pair: KeyPair, + self_key_pair: Arc, /// Connections data. connections: ClusterConnections, /// Active sessions data. @@ -989,7 +989,7 @@ pub mod tests { use parking_lot::Mutex; use tokio_core::reactor::Core; use ethkey::{Random, Generator, Public}; - use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet}; + use key_server_cluster::{NodeId, SessionId, Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair}; use key_server_cluster::message::Message; use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration}; use key_server_cluster::generation_session::{Session as GenerationSession, SessionState as GenerationSessionState}; @@ -1068,7 +1068,7 @@ pub mod tests { let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); let cluster_params: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration { threads: 1, - self_key_pair: key_pairs[i].clone(), + self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())), listen_address: ("127.0.0.1".to_owned(), ports_begin + i as u16), key_server_set: Arc::new(MapKeyServerSet::new(key_pairs.iter().enumerate() .map(|(j, kp)| (kp.public().clone(), format!("127.0.0.1:{}", ports_begin + j as u16).parse().unwrap())) diff --git a/secret_store/src/key_server_cluster/io/handshake.rs b/secret_store/src/key_server_cluster/io/handshake.rs index df8f6cbf7..bf52ab798 100644 --- a/secret_store/src/key_server_cluster/io/handshake.rs +++ b/secret_store/src/key_server_cluster/io/handshake.rs @@ -15,24 +15,25 @@ // along with Parity. If not, see . use std::io; +use std::sync::Arc; use std::collections::BTreeSet; use futures::{Future, Poll, Async}; use tokio_io::{AsyncRead, AsyncWrite}; -use ethkey::{Random, Generator, KeyPair, Secret, sign, verify_public}; +use ethkey::{Random, Generator, KeyPair, verify_public}; use util::H256; -use key_server_cluster::{NodeId, Error}; +use key_server_cluster::{NodeId, Error, NodeKeyPair}; use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature}; use key_server_cluster::io::{write_message, write_encrypted_message, WriteMessage, ReadMessage, - read_message, read_encrypted_message, compute_shared_key}; + read_message, read_encrypted_message, fix_shared_key}; /// Start handshake procedure with another node from the cluster. -pub fn handshake(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { +pub fn handshake(a: A, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); handshake_with_plain_confirmation(a, self_confirmation_plain, self_key_pair, trusted_nodes) } /// Start handshake procedure with another node from the cluster and given plain confirmation. -pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Result, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { +pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Result, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { let (error, state) = match self_confirmation_plain.clone() .and_then(|c| Handshake::::make_public_key_message(self_key_pair.public().clone(), c)) { Ok(message) => (None, HandshakeState::SendPublicKey(write_message(a, message))), @@ -53,7 +54,7 @@ pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Resul } /// Wait for handshake procedure to be started by another node from the cluster. -pub fn accept_handshake(a: A, self_key_pair: KeyPair) -> Handshake where A: AsyncWrite + AsyncRead { +pub fn accept_handshake(a: A, self_key_pair: Arc) -> Handshake where A: AsyncWrite + AsyncRead { let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); let (error, state) = match self_confirmation_plain.clone() { Ok(_) => (None, HandshakeState::ReceivePublicKey(read_message(a))), @@ -87,7 +88,7 @@ pub struct Handshake { is_active: bool, error: Option<(A, Result)>, state: HandshakeState, - self_key_pair: KeyPair, + self_key_pair: Arc, self_confirmation_plain: H256, trusted_nodes: Option>, other_node_id: Option, @@ -117,9 +118,9 @@ impl Handshake where A: AsyncRead + AsyncWrite { }))) } - fn make_private_key_signature_message(secret: &Secret, confirmation_plain: &H256) -> Result { + fn make_private_key_signature_message(self_key_pair: &NodeKeyPair, confirmation_plain: &H256) -> Result { Ok(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature { - confirmation_signed: sign(secret, confirmation_plain)?.into(), + confirmation_signed: self_key_pair.sign(confirmation_plain)?.into(), }))) } } @@ -142,15 +143,15 @@ impl Future for Handshake where A: AsyncRead + AsyncWrite { read_message(stream) ), Async::NotReady) } else { - self.shared_key = match compute_shared_key(self.self_key_pair.secret(), + self.shared_key = match self.self_key_pair.compute_shared_key( self.other_node_id.as_ref().expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; other_node_id is filled in ReceivePublicKey; qed") - ) { + ).map_err(Into::into).and_then(|sk| fix_shared_key(sk.secret())) { Ok(shared_key) => Some(shared_key), - Err(err) => return Ok((stream, Err(err)).into()), + Err(err) => return Ok((stream, Err(err.into())).into()), }; let message = match Handshake::::make_private_key_signature_message( - self.self_key_pair.secret(), + &*self.self_key_pair, self.other_confirmation_plain.as_ref().expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; other_confirmation_plain is filled in ReceivePublicKey; qed") ) { Ok(message) => message, @@ -179,15 +180,15 @@ impl Future for Handshake where A: AsyncRead + AsyncWrite { self.other_node_id = Some(message.node_id.into()); self.other_confirmation_plain = Some(message.confirmation_plain.into()); if self.is_active { - self.shared_key = match compute_shared_key(self.self_key_pair.secret(), + self.shared_key = match self.self_key_pair.compute_shared_key( self.other_node_id.as_ref().expect("filled couple of lines above; qed") - ) { + ).map_err(Into::into).and_then(|sk| fix_shared_key(sk.secret())) { Ok(shared_key) => Some(shared_key), - Err(err) => return Ok((stream, Err(err)).into()), + Err(err) => return Ok((stream, Err(err.into())).into()), }; let message = match Handshake::::make_private_key_signature_message( - self.self_key_pair.secret(), + &*self.self_key_pair, self.other_confirmation_plain.as_ref().expect("filled couple of lines above; qed") ) { Ok(message) => message, @@ -248,11 +249,14 @@ impl Future for Handshake where A: AsyncRead + AsyncWrite { #[cfg(test)] mod tests { + use std::sync::Arc; use std::collections::BTreeSet; use futures::Future; use ethkey::{Random, Generator, sign}; + use ethcrypto::ecdh::agree; use util::H256; - use key_server_cluster::io::message::compute_shared_key; + use key_server_cluster::PlainNodeKeyPair; + use key_server_cluster::io::message::fix_shared_key; use key_server_cluster::io::message::tests::TestIo; use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature}; use super::{handshake_with_plain_confirmation, accept_handshake, HandshakeResult}; @@ -283,9 +287,9 @@ mod tests { let (self_confirmation_plain, io) = prepare_test_io(); let self_key_pair = io.self_key_pair().clone(); let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect(); - let shared_key = compute_shared_key(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap(); + let shared_key = fix_shared_key(&agree(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap()).unwrap(); - let handshake = handshake_with_plain_confirmation(io, Ok(self_confirmation_plain), self_key_pair, trusted_nodes); + let handshake = handshake_with_plain_confirmation(io, Ok(self_confirmation_plain), Arc::new(PlainNodeKeyPair::new(self_key_pair)), trusted_nodes); let handshake_result = handshake.wait().unwrap(); assert_eq!(handshake_result.1, Ok(HandshakeResult { node_id: handshake_result.0.peer_public().clone(), @@ -298,9 +302,9 @@ mod tests { let (self_confirmation_plain, io) = prepare_test_io(); let self_key_pair = io.self_key_pair().clone(); let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect(); - let shared_key = compute_shared_key(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap(); + let shared_key = fix_shared_key(&agree(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap()).unwrap(); - let mut handshake = accept_handshake(io, self_key_pair); + let mut handshake = accept_handshake(io, Arc::new(PlainNodeKeyPair::new(self_key_pair))); handshake.set_self_confirmation_plain(self_confirmation_plain); let handshake_result = handshake.wait().unwrap(); diff --git a/secret_store/src/key_server_cluster/io/message.rs b/secret_store/src/key_server_cluster/io/message.rs index 49b71e39d..5a6b50a3e 100644 --- a/secret_store/src/key_server_cluster/io/message.rs +++ b/secret_store/src/key_server_cluster/io/message.rs @@ -19,9 +19,8 @@ use std::u16; use std::ops::Deref; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use serde_json; -use ethcrypto::ecdh::agree; use ethcrypto::ecies::{encrypt_single_message, decrypt_single_message}; -use ethkey::{Public, Secret, KeyPair}; +use ethkey::{Secret, KeyPair}; use ethkey::math::curve_order; use util::{H256, U256}; use key_server_cluster::Error; @@ -154,12 +153,11 @@ pub fn decrypt_message(key: &KeyPair, payload: Vec) -> Result, Error Ok(decrypt_single_message(key.secret(), &payload)?) } -/// Compute shared encryption key. -pub fn compute_shared_key(self_secret: &Secret, other_public: &Public) -> Result { +/// Fix shared encryption key. +pub fn fix_shared_key(shared_secret: &Secret) -> Result { // secret key created in agree function is invalid, as it is not calculated mod EC.field.n // => let's do it manually - let shared_secret = agree(self_secret, other_public)?; - let shared_secret: H256 = (*shared_secret).into(); + let shared_secret: H256 = (**shared_secret).into(); let shared_secret: U256 = shared_secret.into(); let shared_secret: H256 = (shared_secret % curve_order()).into(); let shared_key_pair = KeyPair::from_secret_slice(&*shared_secret)?; @@ -204,8 +202,9 @@ pub mod tests { use futures::Poll; use tokio_io::{AsyncRead, AsyncWrite}; use ethkey::{KeyPair, Public}; + use ethcrypto::ecdh::agree; use key_server_cluster::message::Message; - use super::{MESSAGE_HEADER_SIZE, MessageHeader, compute_shared_key, encrypt_message, serialize_message, + use super::{MESSAGE_HEADER_SIZE, MessageHeader, fix_shared_key, encrypt_message, serialize_message, serialize_header, deserialize_header}; pub struct TestIo { @@ -217,7 +216,7 @@ pub mod tests { impl TestIo { pub fn new(self_key_pair: KeyPair, peer_public: Public) -> Self { - let shared_key_pair = compute_shared_key(self_key_pair.secret(), &peer_public).unwrap(); + let shared_key_pair = fix_shared_key(&agree(self_key_pair.secret(), &peer_public).unwrap()).unwrap(); TestIo { self_key_pair: self_key_pair, peer_public: peer_public, diff --git a/secret_store/src/key_server_cluster/io/mod.rs b/secret_store/src/key_server_cluster/io/mod.rs index 57071038e..dfea33683 100644 --- a/secret_store/src/key_server_cluster/io/mod.rs +++ b/secret_store/src/key_server_cluster/io/mod.rs @@ -26,7 +26,7 @@ mod write_message; pub use self::deadline::{deadline, Deadline, DeadlineStatus}; pub use self::handshake::{handshake, accept_handshake, Handshake, HandshakeResult}; pub use self::message::{MessageHeader, SerializedMessage, serialize_message, deserialize_message, - encrypt_message, compute_shared_key}; + encrypt_message, fix_shared_key}; pub use self::read_header::{read_header, ReadHeader}; pub use self::read_payload::{read_payload, read_encrypted_payload, ReadPayload}; pub use self::read_message::{read_message, read_encrypted_message, ReadMessage}; diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index 8f6ae4add..102c3672f 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -20,6 +20,7 @@ use ethkey; use ethcrypto; use super::types::all::ServerKeyId; +pub use super::traits::NodeKeyPair; pub use super::types::all::{NodeId, EncryptedDocumentKeyShadow}; pub use super::acl_storage::AclStorage; pub use super::key_storage::{KeyStorage, DocumentKeyShare}; @@ -30,6 +31,8 @@ pub use self::generation_session::Session as GenerationSession; pub use self::encryption_session::Session as EncryptionSession; pub use self::decryption_session::Session as DecryptionSession; +#[cfg(test)] +pub use super::node_key_pair::PlainNodeKeyPair; #[cfg(test)] pub use super::key_storage::tests::DummyKeyStorage; #[cfg(test)] diff --git a/secret_store/src/key_server_cluster/net/accept_connection.rs b/secret_store/src/key_server_cluster/net/accept_connection.rs index 339625f3f..d85e492dd 100644 --- a/secret_store/src/key_server_cluster/net/accept_connection.rs +++ b/secret_store/src/key_server_cluster/net/accept_connection.rs @@ -15,18 +15,18 @@ // along with Parity. If not, see . use std::io; +use std::sync::Arc; use std::net::SocketAddr; use std::time::Duration; use futures::{Future, Poll}; use tokio_core::reactor::Handle; use tokio_core::net::TcpStream; -use ethkey::KeyPair; -use key_server_cluster::Error; +use key_server_cluster::{Error, NodeKeyPair}; use key_server_cluster::io::{accept_handshake, Handshake, Deadline, deadline}; use key_server_cluster::net::Connection; /// Create future for accepting incoming connection. -pub fn accept_connection(address: SocketAddr, stream: TcpStream, handle: &Handle, self_key_pair: KeyPair) -> Deadline { +pub fn accept_connection(address: SocketAddr, stream: TcpStream, handle: &Handle, self_key_pair: Arc) -> Deadline { let accept = AcceptConnection { handshake: accept_handshake(stream, self_key_pair), address: address, diff --git a/secret_store/src/key_server_cluster/net/connect.rs b/secret_store/src/key_server_cluster/net/connect.rs index 449168ab2..7515494e4 100644 --- a/secret_store/src/key_server_cluster/net/connect.rs +++ b/secret_store/src/key_server_cluster/net/connect.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::sync::Arc; use std::collections::BTreeSet; use std::io; use std::time::Duration; @@ -21,13 +22,12 @@ use std::net::SocketAddr; use futures::{Future, Poll, Async}; use tokio_core::reactor::Handle; use tokio_core::net::{TcpStream, TcpStreamNew}; -use ethkey::KeyPair; -use key_server_cluster::{Error, NodeId}; +use key_server_cluster::{Error, NodeId, NodeKeyPair}; use key_server_cluster::io::{handshake, Handshake, Deadline, deadline}; use key_server_cluster::net::Connection; /// Create future for connecting to other node. -pub fn connect(address: &SocketAddr, handle: &Handle, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Deadline { +pub fn connect(address: &SocketAddr, handle: &Handle, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Deadline { let connect = Connect { state: ConnectState::TcpConnect(TcpStream::connect(address, handle)), address: address.clone(), @@ -48,7 +48,7 @@ enum ConnectState { pub struct Connect { state: ConnectState, address: SocketAddr, - self_key_pair: KeyPair, + self_key_pair: Arc, trusted_nodes: BTreeSet, } diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index d5af7a5fa..08ebe6e1c 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -241,7 +241,6 @@ pub mod tests { data_path: path.as_str().to_owned(), cluster_config: ClusterConfiguration { threads: 1, - self_private: (**Random.generate().unwrap().secret().clone()).into(), listener_address: NodeAddress { address: "0.0.0.0".to_owned(), port: 8083, diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 9750f7223..7e9897e60 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -59,22 +59,24 @@ mod key_server; mod key_storage; mod serialization; mod key_server_set; +mod node_key_pair; use std::sync::Arc; use ethcore::client::Client; pub use types::all::{ServerKeyId, EncryptedDocumentKey, RequestSignature, Public, Error, NodeAddress, ServiceConfiguration, ClusterConfiguration}; -pub use traits::{KeyServer}; +pub use traits::{NodeKeyPair, KeyServer}; +pub use self::node_key_pair::{PlainNodeKeyPair, KeyStoreNodeKeyPair}; /// Start new key server instance -pub fn start(client: Arc, config: ServiceConfiguration) -> Result, Error> { +pub fn start(client: Arc, self_key_pair: Arc, config: ServiceConfiguration) -> Result, Error> { use std::sync::Arc; let acl_storage = acl_storage::OnChainAclStorage::new(&client); let key_server_set = key_server_set::OnChainKeyServerSet::new(&client, config.cluster_config.nodes.clone())?; let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?); - let key_server = key_server::KeyServerImpl::new(&config.cluster_config, key_server_set, acl_storage, key_storage)?; + let key_server = key_server::KeyServerImpl::new(&config.cluster_config, key_server_set, self_key_pair, acl_storage, key_storage)?; let listener = http_listener::KeyServerHttpListener::start(&config.listener_address, key_server)?; Ok(Box::new(listener)) } diff --git a/secret_store/src/node_key_pair.rs b/secret_store/src/node_key_pair.rs new file mode 100644 index 000000000..ce6c88a07 --- /dev/null +++ b/secret_store/src/node_key_pair.rs @@ -0,0 +1,84 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use ethcrypto::ecdh::agree; +use ethkey::{KeyPair, Public, Signature, Error as EthKeyError, sign}; +use ethcore::account_provider::AccountProvider; +use util::{Address, H256}; +use traits::NodeKeyPair; + +pub struct PlainNodeKeyPair { + key_pair: KeyPair, +} + +pub struct KeyStoreNodeKeyPair { + account_provider: Arc, + address: Address, + public: Public, + password: String, +} + +impl PlainNodeKeyPair { + pub fn new(key_pair: KeyPair) -> Self { + PlainNodeKeyPair { + key_pair: key_pair, + } + } +} + +impl NodeKeyPair for PlainNodeKeyPair { + fn public(&self) -> &Public { + self.key_pair.public() + } + + fn sign(&self, data: &H256) -> Result { + sign(self.key_pair.secret(), data) + } + + fn compute_shared_key(&self, peer_public: &Public) -> Result { + agree(self.key_pair.secret(), peer_public).map_err(|e| EthKeyError::Custom(e.into())) + .and_then(KeyPair::from_secret) + } +} + +impl KeyStoreNodeKeyPair { + pub fn new(account_provider: Arc, address: Address, password: String) -> Result { + let public = account_provider.account_public(address.clone(), &password).map_err(|e| EthKeyError::Custom(format!("{}", e)))?; + Ok(KeyStoreNodeKeyPair { + account_provider: account_provider, + address: address, + public: public, + password: password, + }) + } +} + +impl NodeKeyPair for KeyStoreNodeKeyPair { + fn public(&self) -> &Public { + &self.public + } + + fn sign(&self, data: &H256) -> Result { + self.account_provider.sign(self.address.clone(), Some(self.password.clone()), data.clone()) + .map_err(|e| EthKeyError::Custom(format!("{}", e))) + } + + fn compute_shared_key(&self, peer_public: &Public) -> Result { + KeyPair::from_secret(self.account_provider.agree(self.address.clone(), Some(self.password.clone()), peer_public) + .map_err(|e| EthKeyError::Custom(format!("{}", e)))?) + } +} diff --git a/secret_store/src/traits.rs b/secret_store/src/traits.rs index 33a4eff3c..31da748e0 100644 --- a/secret_store/src/traits.rs +++ b/secret_store/src/traits.rs @@ -14,9 +14,21 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use ethkey::{KeyPair, Signature, Error as EthKeyError}; +use util::H256; use types::all::{Error, Public, ServerKeyId, MessageHash, EncryptedMessageSignature, RequestSignature, EncryptedDocumentKey, EncryptedDocumentKeyShadow}; +/// Node key pair. +pub trait NodeKeyPair: Send + Sync { + /// Public portion of key. + fn public(&self) -> &Public; + /// Sign data with node key. + fn sign(&self, data: &H256) -> Result; + /// Compute shared key to encrypt channel between two nodes. + fn compute_shared_key(&self, peer_public: &Public) -> Result; +} + /// Server key (SK) generator. pub trait ServerKeyGenerator { /// Generate new SK. diff --git a/secret_store/src/types/all.rs b/secret_store/src/types/all.rs index 54fc8acae..8dc92f175 100644 --- a/secret_store/src/types/all.rs +++ b/secret_store/src/types/all.rs @@ -83,8 +83,6 @@ pub struct ServiceConfiguration { pub struct ClusterConfiguration { /// Number of threads reserved by cluster. pub threads: usize, - /// Private key this node holds. - pub self_private: Vec, // holds ethkey::Secret /// This node address. pub listener_address: NodeAddress, /// All cluster nodes addresses. From 45087599efdf8ac5f80f6e1df112d747131b91c0 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 9 Aug 2017 12:33:41 +0300 Subject: [PATCH 047/112] lost commit --- Cargo.lock | 1 - ethstore/Cargo.toml | 1 - ethstore/src/lib.rs | 1 - 3 files changed, 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f5ed0a21..e2b4da1eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -847,7 +847,6 @@ name = "ethstore" version = "0.1.0" dependencies = [ "ethcore-bigint 0.1.3", - "ethcore-util 1.8.0", "ethcrypto 0.1.0", "ethkey 0.2.0", "itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/ethstore/Cargo.toml b/ethstore/Cargo.toml index 0f91a663c..200dec366 100755 --- a/ethstore/Cargo.toml +++ b/ethstore/Cargo.toml @@ -19,7 +19,6 @@ itertools = "0.5" parking_lot = "0.4" ethcrypto = { path = "../ethcrypto" } ethcore-bigint = { path = "../util/bigint" } -ethcore-util = { path = "../util" } smallvec = "0.4" parity-wordlist = "1.0" tempdir = "0.3" diff --git a/ethstore/src/lib.rs b/ethstore/src/lib.rs index 311e9e73a..65935f89c 100755 --- a/ethstore/src/lib.rs +++ b/ethstore/src/lib.rs @@ -35,7 +35,6 @@ extern crate ethcore_bigint as bigint; extern crate ethcrypto as crypto; extern crate ethkey as _ethkey; extern crate parity_wordlist; -extern crate ethcore_util as util; #[macro_use] extern crate log; From a8a11e56dbd3f9727f741d65c0da381cb1e220c0 Mon Sep 17 00:00:00 2001 From: Marek Kotewicz Date: Wed, 9 Aug 2017 16:40:52 +0200 Subject: [PATCH 048/112] updated jsonrpc (#6264) --- Cargo.lock | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e2b4da1eb..fd472676c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1217,7 +1217,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" dependencies = [ "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1229,7 +1229,7 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1242,7 +1242,7 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" dependencies = [ "bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1255,7 +1255,7 @@ dependencies = [ [[package]] name = "jsonrpc-macros" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1265,7 +1265,7 @@ dependencies = [ [[package]] name = "jsonrpc-minihttp-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" dependencies = [ "bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1280,7 +1280,7 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1290,7 +1290,7 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" dependencies = [ "bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", "globset 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1303,7 +1303,7 @@ dependencies = [ [[package]] name = "jsonrpc-tcp-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" dependencies = [ "bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1317,7 +1317,7 @@ dependencies = [ [[package]] name = "jsonrpc-ws-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", From cc95edf4dce6cdc40ab013744837f5602d55a8cb Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 9 Aug 2017 18:17:28 +0300 Subject: [PATCH 049/112] fixed grumbles --- parity/secretstore.rs | 5 +++++ secret_store/src/http_listener.rs | 6 ------ secret_store/src/key_server_cluster/decryption_session.rs | 2 +- secret_store/src/key_server_cluster/signing_session.rs | 2 +- secret_store/src/key_storage.rs | 2 +- 5 files changed, 8 insertions(+), 9 deletions(-) diff --git a/parity/secretstore.rs b/parity/secretstore.rs index 8094ef323..eb5922540 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -87,6 +87,7 @@ mod server { use std::sync::Arc; use ethcore_secretstore; use ethkey::KeyPair; + use ansi_term::Colour::Red; use super::{Configuration, Dependencies, NodeSecretKey}; /// Key server @@ -97,6 +98,10 @@ mod server { impl KeyServer { /// Create new key server pub fn new(mut conf: Configuration, deps: Dependencies) -> Result { + if !conf.acl_check_enabled { + warn!("Running SecretStore with disabled ACL check: {}", Red.bold().paint("everyone has access to stored keys")); + } + let self_secret: Arc = match conf.self_secret.take() { Some(NodeSecretKey::Plain(secret)) => Arc::new(ethcore_secretstore::PlainNodeKeyPair::new( KeyPair::from_secret(secret).map_err(|e| format!("invalid secret: {}", e))?)), diff --git a/secret_store/src/http_listener.rs b/secret_store/src/http_listener.rs index 86688618a..883389365 100644 --- a/secret_store/src/http_listener.rs +++ b/secret_store/src/http_listener.rs @@ -78,9 +78,6 @@ impl KeyServerHttpListener where T: KeyServer + 'static { let shared_handler = Arc::new(KeyServerSharedHttpHandler { key_server: key_server, }); - /*let handler = KeyServerHttpHandler { - handler: shared_handler.clone(), - };*/ let http_server = listener_address .map(|listener_address| format!("{}:{}", listener_address.address, listener_address.port)) @@ -89,9 +86,6 @@ impl KeyServerHttpListener where T: KeyServer + 'static { handler: shared_handler.clone(), }).expect("cannot start HttpServer")); - /*let listener_addr: &str = &format!("{}:{}", listener_address.address, listener_address.port); - let http_server = HttpServer::http(&listener_addr).expect("cannot start HttpServer"); - let http_server = http_server.handle(handler).expect("cannot start HttpServer");*/ let listener = KeyServerHttpListener { http_server: http_server, handler: shared_handler, diff --git a/secret_store/src/key_server_cluster/decryption_session.rs b/secret_store/src/key_server_cluster/decryption_session.rs index afc73f858..bc3c6aad0 100644 --- a/secret_store/src/key_server_cluster/decryption_session.rs +++ b/secret_store/src/key_server_cluster/decryption_session.rs @@ -467,7 +467,7 @@ impl Ord for DecryptionSessionId { mod tests { use std::sync::Arc; use std::collections::BTreeMap; - use super::super::super::acl_storage::DummyAclStorage; + use acl_storage::DummyAclStorage; use ethkey::{self, KeyPair, Random, Generator, Public, Secret}; use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, Error, EncryptedDocumentKeyShadow, SessionMeta}; use key_server_cluster::cluster::tests::DummyCluster; diff --git a/secret_store/src/key_server_cluster/signing_session.rs b/secret_store/src/key_server_cluster/signing_session.rs index e647c8b14..e56306142 100644 --- a/secret_store/src/key_server_cluster/signing_session.rs +++ b/secret_store/src/key_server_cluster/signing_session.rs @@ -572,7 +572,7 @@ mod tests { use std::collections::{BTreeMap, VecDeque}; use ethkey::{self, Random, Generator, Public}; use util::H256; - use super::super::super::acl_storage::DummyAclStorage; + use acl_storage::DummyAclStorage; use key_server_cluster::{NodeId, SessionId, SessionMeta, Error, KeyStorage}; use key_server_cluster::cluster::tests::DummyCluster; use key_server_cluster::generation_session::{Session as GenerationSession}; diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index 2fad4cdf7..fdbb5fa40 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -200,7 +200,7 @@ pub mod tests { use devtools::RandomTempPath; use ethkey::{Random, Generator, Public, Secret}; use util::Database; - use super::super::types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId}; + use types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId}; use super::{DB_META_KEY_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare, SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1, upgrade_db}; From d6eb05382672e026545bf9a9ca2747c37f5c10a6 Mon Sep 17 00:00:00 2001 From: Nicolas Gotchac Date: Wed, 9 Aug 2017 19:06:14 +0200 Subject: [PATCH 050/112] Add support for ConsenSys multisig wallet (#6153) * First draft of ConsenSys wallet * Fix transfer store // WIP Consensys Wallet * Rename walletABI JSON file * Fix linting * Fix wrong daylimit in wallet modal * Confirm/Revoke ConsensysWallet txs * Linting * Change of settings for the Multisig Wallet --- .../abi/consensys-multisig-wallet.json | 510 ++++++++++++++++++ ...t.json => foundation-multisig-wallet.json} | 0 js/src/contracts/abi/index.js | 2 +- js/src/modals/AddContract/types.js | 4 +- .../modals/CreateWallet/createWalletStore.js | 6 +- js/src/modals/Transfer/store.js | 305 ++++------- .../WalletSettings/walletSettingsStore.js | 86 +-- js/src/redux/providers/personalActions.js | 2 +- js/src/redux/providers/walletActions.js | 260 ++------- js/src/util/tx.js | 74 +-- js/src/util/wallets.js | 426 +++++---------- js/src/util/wallets/consensys-wallet.js | 354 ++++++++++++ js/src/util/wallets/foundation-wallet.js | 500 +++++++++++++++++ js/src/util/wallets/pending-contracts.js | 49 ++ js/src/util/wallets/updates.js | 21 + 15 files changed, 1818 insertions(+), 781 deletions(-) create mode 100644 js/src/contracts/abi/consensys-multisig-wallet.json rename js/src/contracts/abi/{wallet.json => foundation-multisig-wallet.json} (100%) create mode 100644 js/src/util/wallets/consensys-wallet.js create mode 100644 js/src/util/wallets/foundation-wallet.js create mode 100644 js/src/util/wallets/pending-contracts.js create mode 100644 js/src/util/wallets/updates.js diff --git a/js/src/contracts/abi/consensys-multisig-wallet.json b/js/src/contracts/abi/consensys-multisig-wallet.json new file mode 100644 index 000000000..79623637d --- /dev/null +++ b/js/src/contracts/abi/consensys-multisig-wallet.json @@ -0,0 +1,510 @@ +[ + { + "constant": true, + "inputs": [ + { + "name": "", + "type": "uint256" + } + ], + "name": "owners", + "outputs": [ + { + "name": "", + "type": "address" + } + ], + "payable": false, + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "owner", + "type": "address" + } + ], + "name": "removeOwner", + "outputs": [], + "payable": false, + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "transactionId", + "type": "uint256" + } + ], + "name": "revokeConfirmation", + "outputs": [], + "payable": false, + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "", + "type": "address" + } + ], + "name": "isOwner", + "outputs": [ + { + "name": "", + "type": "bool" + } + ], + "payable": false, + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "", + "type": "uint256" + }, + { + "name": "", + "type": "address" + } + ], + "name": "confirmations", + "outputs": [ + { + "name": "", + "type": "bool" + } + ], + "payable": false, + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "pending", + "type": "bool" + }, + { + "name": "executed", + "type": "bool" + } + ], + "name": "getTransactionCount", + "outputs": [ + { + "name": "count", + "type": "uint256" + } + ], + "payable": false, + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "owner", + "type": "address" + } + ], + "name": "addOwner", + "outputs": [], + "payable": false, + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "transactionId", + "type": "uint256" + } + ], + "name": "isConfirmed", + "outputs": [ + { + "name": "", + "type": "bool" + } + ], + "payable": false, + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "transactionId", + "type": "uint256" + } + ], + "name": "getConfirmationCount", + "outputs": [ + { + "name": "count", + "type": "uint256" + } + ], + "payable": false, + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "", + "type": "uint256" + } + ], + "name": "transactions", + "outputs": [ + { + "name": "destination", + "type": "address" + }, + { + "name": "value", + "type": "uint256" + }, + { + "name": "data", + "type": "bytes" + }, + { + "name": "executed", + "type": "bool" + } + ], + "payable": false, + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "getOwners", + "outputs": [ + { + "name": "", + "type": "address[]" + } + ], + "payable": false, + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "from", + "type": "uint256" + }, + { + "name": "to", + "type": "uint256" + }, + { + "name": "pending", + "type": "bool" + }, + { + "name": "executed", + "type": "bool" + } + ], + "name": "getTransactionIds", + "outputs": [ + { + "name": "_transactionIds", + "type": "uint256[]" + } + ], + "payable": false, + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "transactionId", + "type": "uint256" + } + ], + "name": "getConfirmations", + "outputs": [ + { + "name": "_confirmations", + "type": "address[]" + } + ], + "payable": false, + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "transactionCount", + "outputs": [ + { + "name": "", + "type": "uint256" + } + ], + "payable": false, + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "_required", + "type": "uint256" + } + ], + "name": "changeRequirement", + "outputs": [], + "payable": false, + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "transactionId", + "type": "uint256" + } + ], + "name": "confirmTransaction", + "outputs": [], + "payable": false, + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "destination", + "type": "address" + }, + { + "name": "value", + "type": "uint256" + }, + { + "name": "data", + "type": "bytes" + } + ], + "name": "submitTransaction", + "outputs": [ + { + "name": "transactionId", + "type": "uint256" + } + ], + "payable": false, + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "MAX_OWNER_COUNT", + "outputs": [ + { + "name": "", + "type": "uint256" + } + ], + "payable": false, + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "required", + "outputs": [ + { + "name": "", + "type": "uint256" + } + ], + "payable": false, + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "owner", + "type": "address" + }, + { + "name": "newOwner", + "type": "address" + } + ], + "name": "replaceOwner", + "outputs": [], + "payable": false, + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "transactionId", + "type": "uint256" + } + ], + "name": "executeTransaction", + "outputs": [], + "payable": false, + "type": "function" + }, + { + "inputs": [ + { + "name": "_owners", + "type": "address[]" + }, + { + "name": "_required", + "type": "uint256" + } + ], + "payable": false, + "type": "constructor" + }, + { + "payable": true, + "type": "fallback" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "sender", + "type": "address" + }, + { + "indexed": true, + "name": "transactionId", + "type": "uint256" + } + ], + "name": "Confirmation", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "sender", + "type": "address" + }, + { + "indexed": true, + "name": "transactionId", + "type": "uint256" + } + ], + "name": "Revocation", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "transactionId", + "type": "uint256" + } + ], + "name": "Submission", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "transactionId", + "type": "uint256" + } + ], + "name": "Execution", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "transactionId", + "type": "uint256" + } + ], + "name": "ExecutionFailure", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "sender", + "type": "address" + }, + { + "indexed": false, + "name": "value", + "type": "uint256" + } + ], + "name": "Deposit", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "owner", + "type": "address" + } + ], + "name": "OwnerAddition", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "owner", + "type": "address" + } + ], + "name": "OwnerRemoval", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "name": "required", + "type": "uint256" + } + ], + "name": "RequirementChange", + "type": "event" + } +] diff --git a/js/src/contracts/abi/wallet.json b/js/src/contracts/abi/foundation-multisig-wallet.json similarity index 100% rename from js/src/contracts/abi/wallet.json rename to js/src/contracts/abi/foundation-multisig-wallet.json diff --git a/js/src/contracts/abi/index.js b/js/src/contracts/abi/index.js index 8985d869e..f475cce07 100644 --- a/js/src/contracts/abi/index.js +++ b/js/src/contracts/abi/index.js @@ -28,4 +28,4 @@ export registry2 from './registry2.json'; export signaturereg from './signaturereg.json'; export smsverification from './sms-verification.json'; export tokenreg from './tokenreg.json'; -export wallet from './wallet.json'; +export foundationWallet from './foundation-multisig-wallet.json'; diff --git a/js/src/modals/AddContract/types.js b/js/src/modals/AddContract/types.js index b229fc7ac..dd1e20fbc 100644 --- a/js/src/modals/AddContract/types.js +++ b/js/src/modals/AddContract/types.js @@ -17,7 +17,7 @@ import React from 'react'; import { FormattedMessage } from 'react-intl'; -import { eip20, wallet } from '~/contracts/abi'; +import { eip20, foundationWallet } from '~/contracts/abi'; const ABI_TYPES = [ { @@ -72,7 +72,7 @@ const ABI_TYPES = [ ), readOnly: true, type: 'multisig', - value: JSON.stringify(wallet) + value: JSON.stringify(foundationWallet) }, { description: ( diff --git a/js/src/modals/CreateWallet/createWalletStore.js b/js/src/modals/CreateWallet/createWalletStore.js index d614e8041..26ed5816c 100644 --- a/js/src/modals/CreateWallet/createWalletStore.js +++ b/js/src/modals/CreateWallet/createWalletStore.js @@ -21,7 +21,7 @@ import { FormattedMessage } from 'react-intl'; import Contract from '~/api/contract'; import Contracts from '~/contracts'; -import { wallet as walletAbi } from '~/contracts/abi'; +import { foundationWallet as walletAbi } from '~/contracts/abi'; import { wallet as walletCode, walletLibrary as walletLibraryCode, walletLibraryRegKey, fullWalletCode } from '~/contracts/code/wallet'; import { validateUint, validateAddress, validateName } from '~/util/validation'; @@ -163,11 +163,11 @@ export default class CreateWalletStore { WalletsUtils.fetchOwners(walletContract), WalletsUtils.fetchDailylimit(walletContract) ]) - .then(([ require, owners, dailylimit ]) => { + .then(([ require, owners, daylimit ]) => { transaction(() => { this.wallet.owners = owners; this.wallet.required = require.toNumber(); - this.wallet.dailylimit = dailylimit.limit; + this.wallet.daylimit = daylimit.limit; this.wallet = this.getWalletWithMeta(this.wallet); }); diff --git a/js/src/modals/Transfer/store.js b/js/src/modals/Transfer/store.js index 737bab778..eaccf4f40 100644 --- a/js/src/modals/Transfer/store.js +++ b/js/src/modals/Transfer/store.js @@ -18,12 +18,12 @@ import { noop } from 'lodash'; import { observable, computed, action, transaction } from 'mobx'; import BigNumber from 'bignumber.js'; -import { eip20 as tokenAbi, wallet as walletAbi } from '~/contracts/abi'; +import { eip20 as tokenAbi } from '~/contracts/abi'; import { fromWei } from '~/api/util/wei'; -import Contract from '~/api/contract'; import ERRORS from './errors'; -import { DEFAULT_GAS, DEFAULT_GASPRICE, MAX_GAS_ESTIMATION } from '~/util/constants'; +import { DEFAULT_GAS } from '~/util/constants'; import { ETH_TOKEN } from '~/util/tokens'; +import { getTxOptions } from '~/util/tx'; import GasPriceStore from '~/ui/GasPriceEditor/store'; import { getLogger, LOG_KEYS } from '~/config'; @@ -92,7 +92,6 @@ export default class TransferStore { if (this.isWallet) { this.wallet = props.wallet; - this.walletContract = new Contract(this.api, walletAbi); } if (senders) { @@ -115,19 +114,13 @@ export default class TransferStore { @computed get isValid () { const detailsValid = !this.recipientError && !this.valueError && !this.totalError && !this.senderError; const extrasValid = !this.gasStore.errorGas && !this.gasStore.errorPrice && !this.gasStore.conditionBlockError && !this.totalError; - const verifyValid = !this.passwordError; switch (this.stage) { case 0: return detailsValid; case 1: - return this.extras - ? extrasValid - : verifyValid; - - case 2: - return verifyValid; + return extrasValid; } } @@ -263,16 +256,21 @@ export default class TransferStore { if (this.isWallet && !valueError) { const { last, limit, spent } = this.wallet.dailylimit; - const remains = fromWei(limit.minus(spent)); - const today = Math.round(Date.now() / (24 * 3600 * 1000)); - const isResetable = last.lt(today); - if ((!isResetable && remains.lt(value)) || fromWei(limit).lt(value)) { - // already spent too much today - this.walletWarning = WALLET_WARNING_SPENT_TODAY_LIMIT; - } else if (this.walletWarning) { - // all ok - this.walletWarning = null; + // Don't show a warning if the limit is 0 + // (will always need confirmations) + if (limit.gt(0)) { + const remains = fromWei(limit.minus(spent)); + const today = Math.round(Date.now() / (24 * 3600 * 1000)); + const willResetLimit = last.lt(today); + + if ((!willResetLimit && remains.lt(value)) || fromWei(limit).lt(value)) { + // already spent too much today + this.walletWarning = WALLET_WARNING_SPENT_TODAY_LIMIT; + } else if (this.walletWarning) { + // all ok + this.walletWarning = null; + } } } @@ -312,24 +310,16 @@ export default class TransferStore { }); } - getBalance (forceSender = false) { - if (this.isWallet && !forceSender) { - return this.balance; - } - - const balance = this.senders - ? this.sendersBalances[this.sender] - : this.balance; - - return balance; - } - /** * Return the balance of the selected token * (in WEI for ETH, without formating for other tokens) */ - getTokenBalance (token = this.token, forceSender = false) { - return new BigNumber(this.balance[token.id] || 0); + getTokenBalance (token = this.token, address = this.account.address) { + const balance = address === this.account.address + ? this.balance + : this.sendersBalances[address]; + + return new BigNumber(balance[token.id] || 0); } getTokenValue (token = this.token, value = this.value, inverse = false) { @@ -348,54 +338,30 @@ export default class TransferStore { return _value.mul(token.format); } - getValues (_gasTotal) { - const gasTotal = new BigNumber(_gasTotal || 0); + getValue () { const { valueAll, isEth, isWallet } = this; - log.debug('@getValues', 'gas', gasTotal.toFormat()); - if (!valueAll) { const value = this.getTokenValue(); - // If it's a token or a wallet, eth is the estimated gas, - // and value is the user input - if (!isEth || isWallet) { - return { - eth: gasTotal, - token: value - }; - } - - // Otherwise, eth is the sum of the gas and the user input - const totalEthValue = gasTotal.plus(value); - - return { - eth: totalEthValue, - token: value - }; + return value; } - // If it's the total balance that needs to be sent, send the total balance - // if it's not a proper ETH transfer + const balance = this.getTokenBalance(); + if (!isEth || isWallet) { - const tokenBalance = this.getTokenBalance(); - - return { - eth: gasTotal, - token: tokenBalance - }; + return balance; } - // Otherwise, substract the gas estimate - const availableEth = this.getTokenBalance(ETH_TOKEN); - const totalEthValue = availableEth.gt(gasTotal) - ? availableEth.minus(gasTotal) + // substract the gas estimate + const gasTotal = new BigNumber(this.gasStore.price || 0) + .mul(new BigNumber(this.gasStore.gas || 0)); + + const totalEthValue = balance.gt(gasTotal) + ? balance.minus(gasTotal) : new BigNumber(0); - return { - eth: totalEthValue.plus(gasTotal), - token: totalEthValue - }; + return totalEthValue; } getFormattedTokenValue (tokenValue) { @@ -403,160 +369,125 @@ export default class TransferStore { } @action recalculate = (redo = false) => { - const { account } = this; + const { account, balance } = this; - if (!account || !this.balance) { + if (!account || !balance) { return; } - const balance = this.getBalance(); + return this.getTransactionOptions() + .then((options) => { + const gasTotal = options.gas.mul(options.gasPrice); - if (!balance) { - return; - } + const tokenValue = this.getValue(); + const ethValue = options.value.add(gasTotal); - const gasTotal = new BigNumber(this.gasStore.price || 0).mul(new BigNumber(this.gasStore.gas || 0)); + const tokenBalance = this.getTokenBalance(); + const ethBalance = this.getTokenBalance(ETH_TOKEN, options.from); - const ethBalance = this.getTokenBalance(ETH_TOKEN, true); - const tokenBalance = this.getTokenBalance(); - const { eth, token } = this.getValues(gasTotal); + let totalError = null; + let valueError = null; - let totalError = null; - let valueError = null; + if (tokenValue.gt(tokenBalance)) { + valueError = ERRORS.largeAmount; + } - if (eth.gt(ethBalance)) { - totalError = ERRORS.largeAmount; - } + if (ethValue.gt(ethBalance)) { + totalError = ERRORS.largeAmount; + } - if (token && token.gt(tokenBalance)) { - valueError = ERRORS.largeAmount; - } + log.debug('@recalculate', { + eth: ethValue.toFormat(), + token: tokenValue.toFormat(), + ethBalance: ethBalance.toFormat(), + tokenBalance: tokenBalance.toFormat(), + gasTotal: gasTotal.toFormat() + }); - log.debug('@recalculate', { - eth: eth.toFormat(), - token: token.toFormat(), - ethBalance: ethBalance.toFormat(), - tokenBalance: tokenBalance.toFormat(), - gasTotal: gasTotal.toFormat() - }); + transaction(() => { + this.totalError = totalError; + this.valueError = valueError; + this.gasStore.setErrorTotal(totalError); + this.gasStore.setEthValue(options.value); - transaction(() => { - this.totalError = totalError; - this.valueError = valueError; - this.gasStore.setErrorTotal(totalError); - this.gasStore.setEthValue(eth.sub(gasTotal)); + this.total = fromWei(ethValue).toFixed(); - this.total = this.api.util.fromWei(eth).toFixed(); + const nextValue = this.getFormattedTokenValue(tokenValue); + let prevValue; - const nextValue = this.getFormattedTokenValue(token); - let prevValue; + try { + prevValue = new BigNumber(this.value || 0); + } catch (error) { + prevValue = new BigNumber(0); + } - try { - prevValue = new BigNumber(this.value || 0); - } catch (error) { - prevValue = new BigNumber(0); - } + // Change the input only if necessary + if (!nextValue.eq(prevValue)) { + this.value = nextValue.toString(); + } - // Change the input only if necessary - if (!nextValue.eq(prevValue)) { - this.value = nextValue.toString(); - } - - // Re Calculate gas once more to be sure - if (redo) { - return this.recalculateGas(false); - } - }); - } - - send () { - const { options, values } = this._getTransferParams(); - - log.debug('@send', 'transfer value', options.value && options.value.toFormat()); - - return this._getTransferMethod().postTransaction(options, values); - } - - _estimateGas (forceToken = false) { - const { options, values } = this._getTransferParams(true, forceToken); - - return this._getTransferMethod(true, forceToken).estimateGas(options, values); + // Re Calculate gas once more to be sure + if (redo) { + return this.recalculateGas(false); + } + }); + }); } estimateGas () { - return this._estimateGas(); + return this.getTransactionOptions() + .then((options) => { + return this.api.eth.estimateGas(options); + }); } - _getTransferMethod (gas = false, forceToken = false) { - const { isEth, isWallet } = this; + send () { + return this.getTransactionOptions() + .then((options) => { + log.debug('@send', 'transfer value', options.value && options.value.toFormat()); - if (isEth && !isWallet && !forceToken) { - return gas ? this.api.eth : this.api.parity; - } - - if (isWallet && !forceToken) { - return this.wallet.instance.execute; - } - - return this.tokenContract.at(this.token.address).instance.transfer; + return this.api.parity.postTransaction(options); + }); } - _getData (gas = false) { - const { isEth, isWallet } = this; + getTransactionOptions () { + const [ func, options, values ] = this._getTransactionArgs(); - if (!isWallet || isEth) { - return this.data && this.data.length ? this.data : ''; - } - - const func = this._getTransferMethod(gas, true); - const { options, values } = this._getTransferParams(gas, true); - - return this.tokenContract.at(this.token.address).getCallData(func, options, values); + return getTxOptions(this.api, func, options, values) + .then((_options) => { + delete _options.sender; + return _options; + }); } - _getTransferParams (gas = false, forceToken = false) { - const { isEth, isWallet } = this; - - const to = (isEth && !isWallet) ? this.recipient - : (this.isWallet ? this.wallet.address : this.token.address); + _getTransactionArgs () { + const { isEth } = this; + const value = this.getValue(); const options = this.gasStore.overrideTransaction({ - from: this.sender || this.account.address, - to + from: this.account.address, + sender: this.sender }); - if (gas) { - options.gas = MAX_GAS_ESTIMATION; - } - - const gasTotal = new BigNumber(options.gas || DEFAULT_GAS).mul(options.gasPrice || DEFAULT_GASPRICE); - const { token } = this.getValues(gasTotal); - - if (isEth && !isWallet && !forceToken) { - options.value = token; - options.data = this._getData(gas); - - return { options, values: [] }; - } - - if (isWallet && !forceToken) { - const to = isEth ? this.recipient : this.token.address; - const value = isEth ? token : new BigNumber(0); - - const values = [ - to, value, - this._getData(gas) - ]; - - return { options, values }; + // A simple ETH transfer + if (isEth) { + options.value = value; + options.data = this.data || ''; + options.to = this.recipient; + + return [ null, options ]; } + // A token transfer + const tokenContract = this.tokenContract.at(this.token.address); const values = [ this.recipient, - token.toFixed(0) + value ]; - return { options, values }; + options.to = this.token.address; + + return [ tokenContract.instance.transfer, options, values ]; } _validatePositiveNumber (num) { diff --git a/js/src/modals/WalletSettings/walletSettingsStore.js b/js/src/modals/WalletSettings/walletSettingsStore.js index d31ec9eb2..c3adb812e 100644 --- a/js/src/modals/WalletSettings/walletSettingsStore.js +++ b/js/src/modals/WalletSettings/walletSettingsStore.js @@ -20,6 +20,7 @@ import BigNumber from 'bignumber.js'; import { validateUint, validateAddress } from '~/util/validation'; import { DEFAULT_GAS, MAX_GAS_ESTIMATION } from '~/util/constants'; +import WalletsUtils from '~/util/wallets'; const STEPS = { EDIT: { title: 'wallet settings' }, @@ -220,8 +221,6 @@ export default class WalletSettingsStore { this.api = api; this.step = this.stepsKeys[0]; - this.walletInstance = wallet.instance; - this.initialWallet = { address: wallet.address, owners: wallet.owners, @@ -280,72 +279,43 @@ export default class WalletSettingsStore { @action send = () => { const changes = this.changes; - const walletInstance = this.walletInstance; - Promise.all(changes.map((change) => this.sendChange(change, walletInstance))); + Promise.all(changes.map((change) => this.sendChange(change))); this.onClose(); } - @action sendChange = (change, walletInstance) => { - const { method, values } = this.getChangeMethod(change, walletInstance); + @action sendChange = (change) => { + const { api, initialWallet } = this; - const options = { - from: this.wallet.sender, - to: this.initialWallet.address, - gas: MAX_GAS_ESTIMATION - }; - - return method - .estimateGas(options, values) - .then((gasEst) => { - let gas = gasEst; - - if (gas.gt(DEFAULT_GAS)) { - gas = gas.mul(1.2); + WalletsUtils.getChangeMethod(api, initialWallet.address, change) + .then((changeMethod) => { + if (!changeMethod) { + return; } - options.gas = gas; - return method.postTransaction(options, values); + const { method, values } = changeMethod; + + const options = { + from: this.wallet.sender, + to: initialWallet.address, + gas: MAX_GAS_ESTIMATION + }; + + return method + .estimateGas(options, values) + .then((gasEst) => { + let gas = gasEst; + + if (gas.gt(DEFAULT_GAS)) { + gas = gas.mul(1.2); + } + options.gas = gas; + + return method.postTransaction(options, values); + }); }); } - getChangeMethod = (change, walletInstance) => { - if (change.type === 'require') { - return { - method: walletInstance.changeRequirement, - values: [ change.value ] - }; - } - - if (change.type === 'dailylimit') { - return { - method: walletInstance.setDailyLimit, - values: [ change.value ] - }; - } - - if (change.type === 'add_owner') { - return { - method: walletInstance.addOwner, - values: [ change.value ] - }; - } - - if (change.type === 'change_owner') { - return { - method: walletInstance.changeOwner, - values: [ change.value.from, change.value.to ] - }; - } - - if (change.type === 'remove_owner') { - return { - method: walletInstance.removeOwner, - values: [ change.value ] - }; - } - } - @action validateWallet = (_wallet) => { const senderValidation = validateAddress(_wallet.sender); const requireValidation = validateUint(_wallet.require); diff --git a/js/src/redux/providers/personalActions.js b/js/src/redux/providers/personalActions.js index 6200537c3..f747fa92e 100644 --- a/js/src/redux/providers/personalActions.js +++ b/js/src/redux/providers/personalActions.js @@ -23,7 +23,7 @@ import { attachWallets } from './walletActions'; import Contract from '~/api/contract'; import MethodDecodingStore from '~/ui/MethodDecoding/methodDecodingStore'; import WalletsUtils from '~/util/wallets'; -import { wallet as WalletAbi } from '~/contracts/abi'; +import { foundationWallet as WalletAbi } from '~/contracts/abi'; export function personalAccountsInfo (accountsInfo) { const accounts = {}; diff --git a/js/src/redux/providers/walletActions.js b/js/src/redux/providers/walletActions.js index b31a2b35b..58a8faca6 100644 --- a/js/src/redux/providers/walletActions.js +++ b/js/src/redux/providers/walletActions.js @@ -17,19 +17,17 @@ import { isEqual, uniq } from 'lodash'; import Contract from '~/api/contract'; -import { bytesToHex, toHex } from '~/api/util/format'; import { ERROR_CODES } from '~/api/transport/error'; -import { wallet as WALLET_ABI } from '~/contracts/abi'; -import { MAX_GAS_ESTIMATION } from '~/util/constants'; +import { foundationWallet as WALLET_ABI } from '~/contracts/abi'; import WalletsUtils from '~/util/wallets'; - import { newError } from '~/ui/Errors/actions'; - -const UPDATE_OWNERS = 'owners'; -const UPDATE_REQUIRE = 'require'; -const UPDATE_DAILYLIMIT = 'dailylimit'; -const UPDATE_TRANSACTIONS = 'transactions'; -const UPDATE_CONFIRMATIONS = 'confirmations'; +import { + UPDATE_OWNERS, + UPDATE_REQUIRE, + UPDATE_DAILYLIMIT, + UPDATE_TRANSACTIONS, + UPDATE_CONFIRMATIONS +} from '~/util/wallets/updates'; export function confirmOperation (address, owner, operation) { return modifyOperation('confirm', address, owner, operation); @@ -39,41 +37,25 @@ export function revokeOperation (address, owner, operation) { return modifyOperation('revoke', address, owner, operation); } -function modifyOperation (method, address, owner, operation) { +function modifyOperation (modification, address, owner, operation) { return (dispatch, getState) => { const { api } = getState(); - const contract = new Contract(api, WALLET_ABI).at(address); - - const options = { - from: owner, - gas: MAX_GAS_ESTIMATION - }; - - const values = [ operation ]; dispatch(setOperationPendingState(address, operation, true)); - contract.instance[method] - .estimateGas(options, values) - .then((gas) => { - options.gas = gas.mul(1.2); - return contract.instance[method].postTransaction(options, values); - }) + WalletsUtils.postModifyOperation(api, address, modification, owner, operation) .then((requestId) => { - return api - .pollMethod('parity_checkRequest', requestId) - .catch((e) => { - dispatch(setOperationPendingState(address, operation, false)); - if (e.code === ERROR_CODES.REQUEST_REJECTED) { - return; - } - - throw e; - }); + return api.pollMethod('parity_checkRequest', requestId); }) .catch((error) => { - dispatch(setOperationPendingState(address, operation, false)); + if (error.code === ERROR_CODES.REQUEST_REJECTED) { + return; + } + dispatch(newError(error)); + }) + .then(() => { + dispatch(setOperationPendingState(address, operation, false)); }); }; } @@ -97,14 +79,18 @@ export function attachWallets (_wallets) { return dispatch(updateWallets({ wallets: {}, walletsAddresses: [], filterSubId: null })); } - const filterOptions = { - fromBlock: 0, - toBlock: 'latest', - address: nextAddresses - }; - + // Filter the logs from the current block api.eth - .newFilter(filterOptions) + .blockNumber() + .then((block) => { + const filterOptions = { + fromBlock: block, + toBlock: 'latest', + address: nextAddresses + }; + + return api.eth.newFilter(filterOptions); + }) .then((filterId) => { dispatch(updateWallets({ wallets: _wallets, walletsAddresses: nextAddresses, filterSubId: filterId })); }) @@ -142,7 +128,6 @@ export function load (api) { api.eth .getFilterChanges(filterSubId) - .then((logs) => contract.parseEventLogs(logs)) .then((logs) => { parseLogs(logs)(dispatch, getState); }) @@ -292,202 +277,57 @@ function fetchWalletDailylimit (contract) { } function fetchWalletConfirmations (contract, _operations, _owners = null, _transactions = null, getState) { - const walletInstance = contract.instance; - const wallet = getState().wallet.wallets[contract.address]; const owners = _owners || (wallet && wallet.owners) || null; const transactions = _transactions || (wallet && wallet.transactions) || null; - // Full load if no operations given, or if the one given aren't loaded yet - const fullLoad = !Array.isArray(_operations) || _operations - .filter((op) => !wallet.confirmations.find((conf) => conf.operation === op)) - .length > 0; + const cache = { owners, transactions }; - let promise; - - if (fullLoad) { - promise = walletInstance - .ConfirmationNeeded - .getAllLogs() - .then((logs) => { - return logs.map((log) => ({ - initiator: log.params.initiator.value, - to: log.params.to.value, - data: log.params.data.value, - value: log.params.value.value, - operation: bytesToHex(log.params.operation.value), - transactionIndex: log.transactionIndex, - transactionHash: log.transactionHash, - blockNumber: log.blockNumber, - confirmedBy: [] - })); - }) - .then((logs) => { - return logs.sort((logA, logB) => { - const comp = logA.blockNumber.comparedTo(logB.blockNumber); - - if (comp !== 0) { - return comp; - } - - return logA.transactionIndex.comparedTo(logB.transactionIndex); - }); - }) - .then((confirmations) => { - if (confirmations.length === 0) { - return confirmations; - } - - // Only fetch confirmations for operations not - // yet confirmed (ie. not yet a transaction) - if (transactions) { - const operations = transactions - .filter((t) => t.operation) - .map((t) => t.operation); - - return confirmations.filter((confirmation) => { - return !operations.includes(confirmation.operation); - }); - } - - return confirmations; - }); - } else { - const { confirmations } = wallet; - const nextConfirmations = confirmations - .filter((conf) => _operations.includes(conf.operation)); - - promise = Promise.resolve(nextConfirmations); - } - - return promise + return WalletsUtils.fetchPendingTransactions(contract, cache) .then((confirmations) => { - if (confirmations.length === 0) { - return confirmations; - } - - const uniqConfirmations = Object.values( - confirmations.reduce((confirmations, confirmation) => { - confirmations[confirmation.operation] = confirmation; - return confirmations; - }, {}) - ); - - const operations = uniqConfirmations.map((conf) => conf.operation); - - return Promise - .all(operations.map((op) => fetchOperationConfirmations(contract, op, owners))) - .then((confirmedBys) => { - uniqConfirmations.forEach((_, index) => { - uniqConfirmations[index].confirmedBy = confirmedBys[index]; - }); - - return uniqConfirmations; - }); - }) - .then((confirmations) => { - const prevConfirmations = wallet.confirmations || []; - const nextConfirmations = prevConfirmations - .filter((conA) => !confirmations.find((conB) => conB.operation === conA.operation)) - .concat(confirmations) - .map((conf) => ({ - ...conf, - pending: false - })); - return { key: UPDATE_CONFIRMATIONS, - value: nextConfirmations + value: confirmations }; }); } -function fetchOperationConfirmations (contract, operation, owners = null) { - if (!owners) { - console.warn('[fetchOperationConfirmations] try to provide the owners for the Wallet', contract.address); - } - - const walletInstance = contract.instance; - - const promise = owners - ? Promise.resolve({ value: owners }) - : fetchWalletOwners(contract); - - return promise - .then((result) => { - const owners = result.value; - - return Promise - .all(owners.map((owner) => walletInstance.hasConfirmed.call({}, [ operation, owner ]))) - .then((data) => { - return owners.filter((owner, index) => data[index]); - }); - }); -} - function parseLogs (logs) { return (dispatch, getState) => { if (!logs || logs.length === 0) { return; } - const WalletSignatures = WalletsUtils.getWalletSignatures(); - + const { api } = getState(); const updates = {}; - logs.forEach((log) => { - const { address, topics } = log; - const eventSignature = toHex(topics[0]); - const prev = updates[address] || { - [ UPDATE_DAILYLIMIT ]: true, - address - }; + const promises = logs.map((log) => { + const { address } = log; - switch (eventSignature) { - case WalletSignatures.OwnerChanged: - case WalletSignatures.OwnerAdded: - case WalletSignatures.OwnerRemoved: - updates[address] = { - ...prev, - [ UPDATE_OWNERS ]: true + return WalletsUtils.logToUpdate(api, address, log) + .then((update) => { + const prev = updates[address] || { + [ UPDATE_DAILYLIMIT ]: true, + address }; - return; - case WalletSignatures.RequirementChanged: - updates[address] = { - ...prev, - [ UPDATE_REQUIRE ]: true - }; - return; + if (update[UPDATE_CONFIRMATIONS]) { + const operations = (prev[UPDATE_CONFIRMATIONS] || []).concat(update[UPDATE_CONFIRMATIONS]); - case WalletSignatures.ConfirmationNeeded: - case WalletSignatures.Confirmation: - case WalletSignatures.Revoke: - const operation = bytesToHex(log.params.operation.value); + update[UPDATE_CONFIRMATIONS] = uniq(operations); + } updates[address] = { ...prev, - [ UPDATE_CONFIRMATIONS ]: uniq( - (prev[UPDATE_CONFIRMATIONS] || []).concat(operation) - ) + ...update }; - - return; - - case WalletSignatures.Deposit: - case WalletSignatures.SingleTransact: - case WalletSignatures.MultiTransact: - case WalletSignatures.Old.SingleTransact: - case WalletSignatures.Old.MultiTransact: - updates[address] = { - ...prev, - [ UPDATE_TRANSACTIONS ]: true - }; - return; - } + }); }); - fetchWalletsInfo(updates)(dispatch, getState); + return Promise.all(promises) + .then(() => { + fetchWalletsInfo(updates)(dispatch, getState); + }); }; } diff --git a/js/src/util/tx.js b/js/src/util/tx.js index 9ab8b6599..e325e6024 100644 --- a/js/src/util/tx.js +++ b/js/src/util/tx.js @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +import BigNumber from 'bignumber.js'; + import WalletsUtils from '~/util/wallets'; /** @@ -71,11 +73,14 @@ const isValidReceipt = (receipt) => { return receipt && receipt.blockNumber && receipt.blockNumber.gt(0); }; -function getTxArgs (func, options, values = []) { - const { contract } = func; - const { api } = contract; +export function getTxOptions (api, func, _options, values = []) { + const options = { ..._options }; const address = options.from; + if (func && func.contract) { + options.to = options.to || func.contract.address; + } + if (!address) { return Promise.resolve({ func, options, values }); } @@ -87,8 +92,9 @@ function getTxArgs (func, options, values = []) { return { func, options, values }; } - options.data = contract.getCallData(func, options, values); - options.to = options.to || contract.address; + if (func && func.contract) { + options.data = func.contract.getCallData(func, options, values); + } if (!options.to) { return { func, options, values }; @@ -103,24 +109,35 @@ function getTxArgs (func, options, values = []) { return callArgs; }); + }) + .then(({ func, options, values }) => { + if (func) { + options.data = func.contract.getCallData(func, options, values); + } + + if (!options.value) { + options.value = new BigNumber(0); + } + + return options; }); } export function estimateGas (_func, _options, _values = []) { - return getTxArgs(_func, _options, _values) - .then((callArgs) => { - const { func, options, values } = callArgs; + const { api } = _func.contract; - return func._estimateGas(options, values); + return getTxOptions(api, _func, _options, _values) + .then((options) => { + return api.eth.estimateGas(options); }); } export function postTransaction (_func, _options, _values = []) { - return getTxArgs(_func, _options, _values) - .then((callArgs) => { - const { func, options, values } = callArgs; + const { api } = _func.contract; - return func._postTransaction(options, values); + return getTxOptions(api, _func, _options, _values) + .then((options) => { + return api.parity.postTransaction(options); }); } @@ -182,42 +199,35 @@ export function deploy (contract, options, values, skipGasEstimate = false) { } export function parseTransactionReceipt (api, options, receipt) { - const { metadata } = options; - const address = options.from; - if (receipt.gasUsed.eq(options.gas)) { const error = new Error(`Contract not deployed, gasUsed == ${options.gas.toFixed(0)}`); return Promise.reject(error); } - const logs = WalletsUtils.parseLogs(api, receipt.logs || []); + // If regular contract creation, only validate the contract + if (receipt.contractAddress) { + return validateContract(api, receipt.contractAddress); + } - const confirmationLog = logs.find((log) => log.event === 'ConfirmationNeeded'); - const transactionLog = logs.find((log) => log.event === 'SingleTransact'); + // Otherwise, needs to check for a contract deployment + // from a multisig wallet + const walletResult = WalletsUtils.parseTransactionLogs(api, options, receipt.logs || []); - if (!confirmationLog && !transactionLog && !receipt.contractAddress) { + if (!walletResult) { const error = new Error('Something went wrong in the contract deployment...'); return Promise.reject(error); } - // Confirmations are needed from the other owners - if (confirmationLog) { - const operationHash = api.util.bytesToHex(confirmationLog.params.operation.value); - - // Add the contract to pending contracts - WalletsUtils.addPendingContract(address, operationHash, metadata); + if (walletResult.pending) { return Promise.resolve(null); } - if (transactionLog) { - // Set the contract address in the receipt - receipt.contractAddress = transactionLog.params.created.value; - } - - const contractAddress = receipt.contractAddress; + return validateContract(api, walletResult.contractAddress); +} +function validateContract (api, contractAddress) { return api.eth .getCode(contractAddress) .then((code) => { diff --git a/js/src/util/wallets.js b/js/src/util/wallets.js index e90f4115f..6b1c29d01 100644 --- a/js/src/util/wallets.js +++ b/js/src/util/wallets.js @@ -15,95 +15,96 @@ // along with Parity. If not, see . import BigNumber from 'bignumber.js'; -import { intersection, range, uniq } from 'lodash'; -import store from 'store'; +import { intersection } from 'lodash'; -import Abi from '~/abi'; -import Contract from '~/api/contract'; -import { bytesToHex, toHex } from '~/api/util/format'; -import { validateAddress } from '~/util/validation'; -import WalletAbi from '~/contracts/abi/wallet.json'; -import OldWalletAbi from '~/contracts/abi/old-wallet.json'; +import ConsensysWalletUtils from './wallets/consensys-wallet'; +import FoundationWalletUtils from './wallets/foundation-wallet'; -const LS_PENDING_CONTRACTS_KEY = '_parity::wallets::pendingContracts'; +const CONSENSYS_WALLET = 'CONSENSYS_WALLET'; +const FOUNDATION_WALLET = 'FOUNDATION_WALLET'; const _cachedWalletLookup = {}; +const _cachedWalletTypes = {}; let _cachedAccounts = {}; -const walletAbi = new Abi(WalletAbi); -const oldWalletAbi = new Abi(OldWalletAbi); - -const walletEvents = walletAbi.events.reduce((events, event) => { - events[event.name] = event; - return events; -}, {}); - -const oldWalletEvents = oldWalletAbi.events.reduce((events, event) => { - events[event.name] = event; - return events; -}, {}); - -const WalletSignatures = { - OwnerChanged: toHex(walletEvents.OwnerChanged.signature), - OwnerAdded: toHex(walletEvents.OwnerAdded.signature), - OwnerRemoved: toHex(walletEvents.OwnerRemoved.signature), - RequirementChanged: toHex(walletEvents.RequirementChanged.signature), - Confirmation: toHex(walletEvents.Confirmation.signature), - Revoke: toHex(walletEvents.Revoke.signature), - Deposit: toHex(walletEvents.Deposit.signature), - SingleTransact: toHex(walletEvents.SingleTransact.signature), - MultiTransact: toHex(walletEvents.MultiTransact.signature), - ConfirmationNeeded: toHex(walletEvents.ConfirmationNeeded.signature), - - Old: { - SingleTransact: toHex(oldWalletEvents.SingleTransact.signature), - MultiTransact: toHex(oldWalletEvents.MultiTransact.signature) - } -}; - export default class WalletsUtils { - static getWalletSignatures () { - return WalletSignatures; - } - - static getPendingContracts () { - return store.get(LS_PENDING_CONTRACTS_KEY) || {}; - } - - static setPendingContracts (contracts = {}) { - return store.set(LS_PENDING_CONTRACTS_KEY, contracts); - } - - static removePendingContract (operationHash) { - const nextContracts = WalletsUtils.getPendingContracts(); - - delete nextContracts[operationHash]; - WalletsUtils.setPendingContracts(nextContracts); - } - - static addPendingContract (address, operationHash, metadata) { - const nextContracts = { - ...WalletsUtils.getPendingContracts(), - [ operationHash ]: { - address, - metadata, - operationHash - } - }; - - WalletsUtils.setPendingContracts(nextContracts); - } - static cacheAccounts (accounts) { _cachedAccounts = accounts; } - static getCallArgs (api, options, values = []) { - const walletContract = new Contract(api, WalletAbi); - const walletAddress = options.from; + static delegateCall (api, address, method, args = []) { + return WalletsUtils.getWalletType(api, address) + .then((walletType) => { + if (walletType === CONSENSYS_WALLET) { + return ConsensysWalletUtils[method].apply(null, args); + } + + return FoundationWalletUtils[method].apply(null, args); + }); + } + + static fetchDailylimit (walletContract) { + const { api } = walletContract; return WalletsUtils - .fetchOwners(walletContract.at(walletAddress)) + .delegateCall(api, walletContract.address, 'fetchDailylimit', [ walletContract ]); + } + + static fetchOwners (walletContract) { + const { api } = walletContract; + + return WalletsUtils + .delegateCall(api, walletContract.address, 'fetchOwners', [ walletContract ]); + } + + static fetchRequire (walletContract) { + const { api } = walletContract; + + return WalletsUtils + .delegateCall(api, walletContract.address, 'fetchRequire', [ walletContract ]); + } + + static fetchPendingTransactions (walletContract, cache) { + const { api } = walletContract; + + return WalletsUtils + .delegateCall(api, walletContract.address, 'fetchPendingTransactions', [ walletContract, cache ]); + } + + static fetchTransactions (walletContract) { + const { api } = walletContract; + + return WalletsUtils + .delegateCall(api, walletContract.address, 'fetchTransactions', [ walletContract ]) + .then((transactions) => { + return transactions.sort((txA, txB) => { + const comp = txB.blockNumber.comparedTo(txA.blockNumber); + + if (comp !== 0) { + return comp; + } + + return txB.transactionIndex.comparedTo(txA.transactionIndex); + }); + }); + } + + static getCallArgs (api, options, values = []) { + const walletAddress = options.from; + let walletContract; + let submitMethod; + + return Promise + .all([ + WalletsUtils.getWalletContract(api, walletAddress), + WalletsUtils.delegateCall(api, walletAddress, 'getSubmitMethod') + ]) + .then(([ _walletContract, _submitMethod ]) => { + walletContract = _walletContract; + submitMethod = _submitMethod; + + return WalletsUtils.fetchOwners(walletContract); + }) .then((owners) => { const addresses = Object.keys(_cachedAccounts); const ownerAddress = intersection(addresses, owners).pop(); @@ -121,12 +122,12 @@ export default class WalletsUtils { const nextValues = [ to, value, data ]; const nextOptions = { ..._options, - from: ownerAddress, + from: options.sender || ownerAddress, to: walletAddress, value: new BigNumber(0) }; - const execFunc = walletContract.instance.execute; + const execFunc = walletContract.instance[submitMethod]; const callArgs = { func: execFunc, options: nextOptions, values: nextValues }; if (!account.wallet) { @@ -139,6 +140,11 @@ export default class WalletsUtils { }); } + static getChangeMethod (api, address, change) { + return WalletsUtils + .delegateCall(api, address, 'getChangeMethod', [ api, address, change ]); + } + static getDeployArgs (contract, options, values) { const { api } = contract; const func = contract.constructors[0]; @@ -158,10 +164,41 @@ export default class WalletsUtils { }); } - static parseLogs (api, logs = []) { - const walletContract = new Contract(api, WalletAbi); + static getWalletContract (api, address) { + return WalletsUtils + .delegateCall(api, address, 'getWalletContract', [ api ]) + .then((walletContract) => { + return walletContract.at(address); + }); + } - return walletContract.parseEventLogs(logs); + static getWalletType (api, address) { + if (_cachedWalletTypes[address] === undefined) { + _cachedWalletTypes[address] = Promise.resolve(null) + .then((result) => { + if (result) { + return result; + } + + return FoundationWalletUtils.isWallet(api, address) + .then((isWallet) => isWallet && FOUNDATION_WALLET); + }) + .then((result) => { + if (result) { + return result; + } + + return ConsensysWalletUtils.isWallet(api, address) + .then((isWallet) => isWallet && CONSENSYS_WALLET); + }) + .then((result) => { + _cachedWalletTypes[address] = result || null; + + return _cachedWalletTypes[address]; + }); + } + + return Promise.resolve(_cachedWalletTypes[address]); } /** @@ -175,20 +212,8 @@ export default class WalletsUtils { } if (!_cachedWalletLookup[address]) { - const walletContract = new Contract(api, WalletAbi); - - _cachedWalletLookup[address] = walletContract - .at(address) - .instance - .m_numOwners - .call() - .then((result) => { - if (!result || result.equals(0)) { - return false; - } - - return true; - }) + _cachedWalletLookup[address] = WalletsUtils.getWalletType(api, address) + .then((walletType) => walletType !== null) .then((bool) => { _cachedWalletLookup[address] = Promise.resolve(bool); return bool; @@ -198,207 +223,34 @@ export default class WalletsUtils { return _cachedWalletLookup[address]; } - static fetchRequire (walletContract) { - return walletContract.instance.m_required.call(); - } - - static fetchOwners (walletContract) { - const walletInstance = walletContract.instance; - - return walletInstance - .m_numOwners.call() - .then((mNumOwners) => { - const promises = range(mNumOwners.toNumber()) - .map((idx) => walletInstance.getOwner.call({}, [ idx ])); - - return Promise - .all(promises) - .then((owners) => { - const uniqOwners = uniq(owners); - - // If all owners are the zero account : must be Mist wallet contract - if (uniqOwners.length === 1 && /^(0x)?0*$/.test(owners[0])) { - return WalletsUtils.fetchMistOwners(walletContract, mNumOwners.toNumber()); - } - - return owners; - }) - .then((owners) => uniq(owners)); - }); - } - - static fetchMistOwners (walletContract, mNumOwners) { - const walletAddress = walletContract.address; - + static logToUpdate (api, address, log) { return WalletsUtils - .getMistOwnersOffset(walletContract) - .then((result) => { - if (!result || result.offset === -1) { - return []; - } - - const owners = [ result.address ]; - - if (mNumOwners === 1) { - return owners; - } - - const initOffset = result.offset + 1; - let promise = Promise.resolve(); - - range(initOffset, initOffset + mNumOwners - 1).forEach((offset) => { - promise = promise - .then(() => { - return walletContract.api.eth.getStorageAt(walletAddress, offset); - }) - .then((result) => { - const resultAddress = '0x' + (result || '').slice(-40); - const { address } = validateAddress(resultAddress); - - owners.push(address); - }); - }); - - return promise.then(() => owners); - }); + .delegateCall(api, address, 'logToUpdate', [ log ]); } - static getMistOwnersOffset (walletContract, offset = 3) { - return walletContract.api.eth - .getStorageAt(walletContract.address, offset) - .then((result) => { - if (result && !/^(0x)?0*$/.test(result)) { - const resultAddress = '0x' + result.slice(-40); - const { address, addressError } = validateAddress(resultAddress); - - if (!addressError) { - return { offset, address }; - } - } - - if (offset >= 100) { - return { offset: -1 }; - } - - return WalletsUtils.getMistOwnersOffset(walletContract, offset + 1); - }); + static parseTransactionLogs (api, options, rawLogs) { + return WalletsUtils + .delegateCall(api, options.from, 'parseTransactionLogs', [ api, options, rawLogs ]); } - static fetchDailylimit (walletContract) { - const walletInstance = walletContract.instance; + static postModifyOperation (api, walletAddress, modification, owner, operation) { + const options = { from: owner }; + const values = [ operation ]; return Promise .all([ - walletInstance.m_dailyLimit.call(), - walletInstance.m_spentToday.call(), - walletInstance.m_lastDay.call() + WalletsUtils + .getWalletContract(api, walletAddress), + WalletsUtils + .delegateCall(api, walletAddress, 'getModifyOperationMethod', [ modification ]) ]) - .then(([ limit, spent, last ]) => ({ - limit, spent, last - })); - } - - static fetchTransactions (walletContract) { - const { api } = walletContract; - const pendingContracts = WalletsUtils.getPendingContracts(); - - return walletContract - .getAllLogs({ - topics: [ [ - WalletSignatures.SingleTransact, - WalletSignatures.MultiTransact, - WalletSignatures.Deposit, - WalletSignatures.Old.SingleTransact, - WalletSignatures.Old.MultiTransact - ] ] - }) - .then((logs) => { - return logs.sort((logA, logB) => { - const comp = logB.blockNumber.comparedTo(logA.blockNumber); - - if (comp !== 0) { - return comp; - } - - return logB.transactionIndex.comparedTo(logA.transactionIndex); - }); - }) - .then((logs) => { - const transactions = logs.map((log) => { - const signature = toHex(log.topics[0]); - - const value = log.params.value.value; - const from = signature === WalletSignatures.Deposit - ? log.params['_from'].value - : walletContract.address; - - const to = signature === WalletSignatures.Deposit - ? walletContract.address - : log.params.to.value; - - const transaction = { - transactionHash: log.transactionHash, - blockNumber: log.blockNumber, - from, to, value - }; - - if (log.params.created && log.params.created.value && !/^(0x)?0*$/.test(log.params.created.value)) { - transaction.creates = log.params.created.value; - delete transaction.to; - } - - if (log.params.operation) { - const operation = bytesToHex(log.params.operation.value); - - // Add the pending contract to the contracts - if (pendingContracts[operation]) { - const { metadata } = pendingContracts[operation]; - const contractName = metadata.name; - - metadata.blockNumber = log.blockNumber; - - // The contract creation might not be in the same log, - // but must be in the same transaction (eg. Contract creation - // from Wallet within a Wallet) - api.eth - .getTransactionReceipt(log.transactionHash) - .then((transactionReceipt) => { - const transactionLogs = WalletsUtils.parseLogs(api, transactionReceipt.logs); - const creationLog = transactionLogs.find((log) => { - return log.params.created && !/^(0x)?0*$/.test(log.params.created.value); - }); - - if (!creationLog) { - return false; - } - - const contractAddress = creationLog.params.created.value; - - return Promise - .all([ - api.parity.setAccountName(contractAddress, contractName), - api.parity.setAccountMeta(contractAddress, metadata) - ]) - .then(() => { - WalletsUtils.removePendingContract(operation); - }); - }) - .catch((error) => { - console.error('adding wallet contract', error); - }); - } - - transaction.operation = operation; - } - - if (log.params.data) { - transaction.data = log.params.data.value; - } - - return transaction; - }); - - return transactions; + .then(([ wallet, method ]) => { + return wallet.instance[method] + .estimateGas(options, values) + .then((gas) => { + options.gas = gas.mul(1.5); + return wallet.instance[method].postTransaction(options, values); + }); }); } } diff --git a/js/src/util/wallets/consensys-wallet.js b/js/src/util/wallets/consensys-wallet.js new file mode 100644 index 000000000..9f9f9d5fa --- /dev/null +++ b/js/src/util/wallets/consensys-wallet.js @@ -0,0 +1,354 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +import BigNumber from 'bignumber.js'; + +import Abi from '~/abi'; +import Contract from '~/api/contract'; +import { toHex } from '~/api/util/format'; + +import WalletAbi from '~/contracts/abi/consensys-multisig-wallet.json'; + +import { + UPDATE_OWNERS, + UPDATE_REQUIRE, + UPDATE_TRANSACTIONS, + UPDATE_CONFIRMATIONS +} from './updates'; + +const WALLET_CONTRACT = new Contract({}, WalletAbi); +const WALLET_ABI = new Abi(WalletAbi); + +const walletEvents = WALLET_ABI.events.reduce((events, event) => { + events[event.name] = event; + return events; +}, {}); + +const WalletSignatures = { + Confirmation: toHex(walletEvents.Confirmation.signature), + Revocation: toHex(walletEvents.Revocation.signature), + Deposit: toHex(walletEvents.Deposit.signature), + Execution: toHex(walletEvents.Execution.signature), + OwnerAddition: toHex(walletEvents.OwnerAddition.signature), + OwnerRemoval: toHex(walletEvents.OwnerRemoval.signature), + RequirementChange: toHex(walletEvents.RequirementChange.signature), + Submission: toHex(walletEvents.Submission.signature) +}; + +export default class ConsensysWalletUtils { + static fetchOwners (inWallet) { + const wallet = new Contract(inWallet.api, WalletAbi).at(inWallet.address); + + return wallet.instance.getOwners.call() + .then((owners) => { + return owners.map((token) => token.value); + }); + } + + static fetchPendingTransactions (inWallet) { + const wallet = new Contract(inWallet.api, WalletAbi).at(inWallet.address); + + let transactions; + let txIds; + + // Get pending and not exectued transactions + return wallet.instance.getTransactionCount + .call({}, [ true, false ]) + .then((txCount) => { + // Get all the pending transactions + const fromId = 0; + const toId = txCount; + + return wallet.instance.getTransactionIds + .call({}, [ fromId, toId, true, false ]); + }) + .then((_txIds) => { + txIds = _txIds.map((token) => token.value); + + const promises = txIds.map((txId) => { + return wallet.instance.transactions + .call({}, [ txId ]); + }); + + return Promise.all(promises); + }) + .then((transactions) => { + return transactions.map((transaction, index) => { + const [ destination, value, data ] = transaction; + const id = toHex(txIds[index]); + + return { + to: destination, + data, + value, + operation: id + }; + }); + }) + .then((_transactions) => { + transactions = _transactions; + + return wallet + .getAllLogs({ + topics: [ + WalletSignatures.Submission, + txIds.map((txId) => toHex(txId)) + ] + }); + }) + .then((logs) => { + transactions.forEach((tx) => { + const log = logs + .find((log) => { + const id = toHex(log.params.transactionId.value); + + return id === tx.operation; + }); + + if (!log) { + console.warn('could not find a Submission log for this operation', tx); + return; + } + + tx.transactionIndex = log.transactionIndex; + tx.transactionHash = log.transactionHash; + tx.blockNumber = log.blockNumber; + }); + + const confirmationsPromises = transactions.map((tx) => { + return wallet.instance.getConfirmations + .call({}, [ tx.operation ]) + .then((owners) => { + return owners.map((token) => token.value); + }); + }); + + return Promise.all(confirmationsPromises); + }) + .then((confirmations) => { + transactions.forEach((tx, index) => { + tx.confirmedBy = confirmations[index]; + }); + + return transactions; + }); + } + + static fetchRequire (inWallet) { + const wallet = new Contract(inWallet.api, WalletAbi).at(inWallet.address); + + return wallet.instance.required.call(); + } + + static fetchTransactions (inWallet) { + const wallet = new Contract(inWallet.api, WalletAbi).at(inWallet.address); + + let transactions; + let txIds; + + return wallet.instance.getTransactionCount + .call({}, [ false, true ]) + .then((txCount) => { + // Get the 20 last transactions + const fromId = Math.max(0, txCount - 20); + const toId = txCount; + + return wallet.instance.getTransactionIds + .call({}, [ fromId, toId, false, true ]); + }) + .then((_txIds) => { + txIds = _txIds.map((token) => token.value); + + const promises = txIds.map((txId) => { + return wallet.instance.transactions + .call({}, [ txId ]); + }); + + return Promise.all(promises); + }) + .then((transactions) => { + return transactions.map((transaction, index) => { + const [ destination, value, data, executed ] = transaction; + const id = toHex(txIds[index]); + + return { + destination, value, data, executed, id + }; + }); + }) + .then((_transactions) => { + transactions = _transactions; + + const depositLogs = wallet + .getAllLogs({ + topics: [ WalletSignatures.Deposit ] + }); + + const executionLogs = wallet + .getAllLogs({ + topics: [ WalletSignatures.Execution, txIds ] + }); + + return Promise.all([ depositLogs, executionLogs ]); + }) + .then(([ depositLogs, executionLogs ]) => { + const logs = [].concat(depositLogs, executionLogs); + + return logs.map((log) => { + const signature = toHex(log.topics[0]); + + const transaction = { + transactionHash: log.transactionHash, + blockNumber: log.blockNumber + }; + + if (signature === WalletSignatures.Deposit) { + transaction.from = log.params.sender.value; + transaction.value = log.params.value.value; + transaction.to = wallet.address; + } else { + const txId = toHex(log.params.transactionId.value); + const tx = transactions.find((tx) => tx.id === txId); + + transaction.from = wallet.address; + transaction.to = tx.destination; + transaction.value = tx.value; + transaction.data = tx.data; + transaction.operation = toHex(tx.id); + } + + return transaction; + }); + }); + } + + static getChangeMethod (api, address, change) { + const wallet = new Contract(api, WalletAbi).at(address); + const walletInstance = wallet.instance; + + let data = ''; + + if (change.type === 'require') { + const func = walletInstance.changeRequirement; + + data = wallet.getCallData(func, {}, [ change.value ]); + } + + if (change.type === 'add_owner') { + const func = walletInstance.addOwner; + + data = wallet.getCallData(func, {}, [ change.value ]); + } + + if (change.type === 'change_owner') { + const func = walletInstance.replaceOwner; + + data = wallet.getCallData(func, {}, [ change.value.from, change.value.to ]); + } + + if (change.type === 'remove_owner') { + const func = walletInstance.removeOwner; + + data = wallet.getCallData(func, {}, [ change.value ]); + } + + const method = walletInstance.submitTransaction; + const values = [ address, 0, data ]; + + return { method, values }; + } + + static getModifyOperationMethod (modification) { + switch (modification) { + case 'confirm': + return 'confirmTransaction'; + + case 'revoke': + return 'revokeConfirmation'; + + default: + return ''; + } + } + + static getSubmitMethod () { + return 'submitTransaction'; + } + + static getWalletContract (api) { + return new Contract(api, WalletAbi); + } + + static getWalletSignatures () { + return WalletSignatures; + } + + static fetchDailylimit () { + return { + last: new BigNumber(0), + limit: new BigNumber(0), + spent: new BigNumber(0) + }; + } + + static isWallet (api, address) { + const wallet = new Contract(api, WalletAbi).at(address); + + return ConsensysWalletUtils.fetchRequire(wallet) + .then((result) => { + if (!result || result.equals(0)) { + return false; + } + + return true; + }); + } + + static logToUpdate (log) { + const eventSignature = toHex(log.topics[0]); + + switch (eventSignature) { + case WalletSignatures.OwnerAddition: + case WalletSignatures.OwnerRemoval: + return { [ UPDATE_OWNERS ]: true }; + + case WalletSignatures.RequirementChange: + return { [ UPDATE_REQUIRE ]: true }; + + case WalletSignatures.Deposit: + case WalletSignatures.Execution: + return { [ UPDATE_TRANSACTIONS ]: true }; + + case WalletSignatures.Submission: + case WalletSignatures.Confirmation: + case WalletSignatures.Revocation: + const parsedLog = WALLET_CONTRACT.parseEventLogs([ log ])[0]; + const operation = toHex(parsedLog.params.transactionId.value); + + return { [ UPDATE_CONFIRMATIONS ]: operation }; + + default: + return {}; + } + } + + /** + * This type of wallet cannot create any contract... + */ + static parseTransactionLogs (api, options, rawLogs) { + return null; + } +} diff --git a/js/src/util/wallets/foundation-wallet.js b/js/src/util/wallets/foundation-wallet.js new file mode 100644 index 000000000..4fb1cfe22 --- /dev/null +++ b/js/src/util/wallets/foundation-wallet.js @@ -0,0 +1,500 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +import { range, uniq } from 'lodash'; + +import Abi from '~/abi'; +import Contract from '~/api/contract'; +import { bytesToHex, toHex } from '~/api/util/format'; +import { validateAddress } from '~/util/validation'; + +import WalletAbi from '~/contracts/abi/foundation-multisig-wallet.json'; +import OldWalletAbi from '~/contracts/abi/old-wallet.json'; + +import PendingContracts from './pending-contracts'; +import { + UPDATE_OWNERS, + UPDATE_REQUIRE, + UPDATE_TRANSACTIONS, + UPDATE_CONFIRMATIONS +} from './updates'; + +const WALLET_CONTRACT = new Contract({}, WalletAbi); +const WALLET_ABI = new Abi(WalletAbi); +const OLD_WALLET_ABI = new Abi(OldWalletAbi); + +const walletEvents = WALLET_ABI.events.reduce((events, event) => { + events[event.name] = event; + return events; +}, {}); + +const oldWalletEvents = OLD_WALLET_ABI.events.reduce((events, event) => { + events[event.name] = event; + return events; +}, {}); + +const WalletSignatures = { + OwnerChanged: toHex(walletEvents.OwnerChanged.signature), + OwnerAdded: toHex(walletEvents.OwnerAdded.signature), + OwnerRemoved: toHex(walletEvents.OwnerRemoved.signature), + RequirementChanged: toHex(walletEvents.RequirementChanged.signature), + Confirmation: toHex(walletEvents.Confirmation.signature), + Revoke: toHex(walletEvents.Revoke.signature), + Deposit: toHex(walletEvents.Deposit.signature), + SingleTransact: toHex(walletEvents.SingleTransact.signature), + MultiTransact: toHex(walletEvents.MultiTransact.signature), + ConfirmationNeeded: toHex(walletEvents.ConfirmationNeeded.signature), + + Old: { + SingleTransact: toHex(oldWalletEvents.SingleTransact.signature), + MultiTransact: toHex(oldWalletEvents.MultiTransact.signature) + } +}; + +export default class FoundationWalletUtils { + static fetchConfirmations (walletContract, operation, _owners = null) { + const ownersPromise = _owners + ? Promise.resolve(_owners) + : FoundationWalletUtils.fetchOwners(walletContract); + + return ownersPromise + .then((owners) => { + const promises = owners.map((owner) => { + return walletContract.instance.hasConfirmed.call({}, [ operation, owner ]); + }); + + return Promise + .all(promises) + .then((data) => { + return owners.filter((_, index) => data[index]); + }); + }); + } + + static fetchDailylimit (walletContract) { + const walletInstance = walletContract.instance; + + return Promise + .all([ + walletInstance.m_dailyLimit.call(), + walletInstance.m_spentToday.call(), + walletInstance.m_lastDay.call() + ]) + .then(([ limit, spent, last ]) => ({ + limit, spent, last + })); + } + + static fetchOwners (walletContract) { + const walletInstance = walletContract.instance; + + return walletInstance + .m_numOwners.call() + .then((mNumOwners) => { + const promises = range(mNumOwners.toNumber()) + .map((idx) => walletInstance.getOwner.call({}, [ idx ])); + + return Promise + .all(promises) + .then((_owners) => { + const owners = validateOwners(_owners); + + // If all owners are the zero account : must be Mist wallet contract + if (!owners) { + return fetchMistOwners(walletContract, mNumOwners.toNumber()); + } + + return owners; + }); + }); + } + + static fetchPendingTransactions (walletContract, cache = {}) { + const { owners, transactions } = cache; + + return walletContract + .instance + .ConfirmationNeeded + .getAllLogs() + .then((logs) => { + return logs.map((log) => ({ + initiator: log.params.initiator.value, + to: log.params.to.value, + data: log.params.data.value, + value: log.params.value.value, + operation: bytesToHex(log.params.operation.value), + transactionIndex: log.transactionIndex, + transactionHash: log.transactionHash, + blockNumber: log.blockNumber, + confirmedBy: [] + })); + }) + .then((logs) => { + return logs.sort((logA, logB) => { + const comp = logA.blockNumber.comparedTo(logB.blockNumber); + + if (comp !== 0) { + return comp; + } + + return logA.transactionIndex.comparedTo(logB.transactionIndex); + }); + }) + .then((pendingTxs) => { + if (pendingTxs.length === 0) { + return pendingTxs; + } + + // Only fetch confirmations for operations not + // yet confirmed (ie. not yet a transaction) + if (transactions) { + const operations = transactions + .filter((t) => t.operation) + .map((t) => t.operation); + + return pendingTxs.filter((pendingTx) => { + return !operations.includes(pendingTx.operation); + }); + } + + return pendingTxs; + }) + .then((pendingTxs) => { + const promises = pendingTxs.map((tx) => { + return FoundationWalletUtils + .fetchConfirmations(walletContract, tx.operation, owners) + .then((confirmedBy) => { + tx.confirmedBy = confirmedBy; + + return tx; + }); + }); + + return Promise.all(promises); + }); + } + + static fetchRequire (wallet) { + return wallet.instance.m_required.call(); + } + + static fetchTransactions (walletContract) { + const { api } = walletContract; + + return walletContract + .getAllLogs({ + topics: [ [ + WalletSignatures.SingleTransact, + WalletSignatures.MultiTransact, + WalletSignatures.Deposit, + WalletSignatures.Old.SingleTransact, + WalletSignatures.Old.MultiTransact + ] ] + }) + .then((logs) => { + const transactions = logs.map((log) => { + const signature = toHex(log.topics[0]); + + const value = log.params.value.value; + const from = signature === WalletSignatures.Deposit + ? log.params['_from'].value + : walletContract.address; + + const to = signature === WalletSignatures.Deposit + ? walletContract.address + : log.params.to.value; + + const transaction = { + transactionHash: log.transactionHash, + blockNumber: log.blockNumber, + from, to, value + }; + + if (log.params.created && log.params.created.value && !/^(0x)?0*$/.test(log.params.created.value)) { + transaction.creates = log.params.created.value; + delete transaction.to; + } + + if (log.params.operation) { + transaction.operation = bytesToHex(log.params.operation.value); + checkPendingOperation(api, log, transaction.operation); + } + + if (log.params.data) { + transaction.data = log.params.data.value; + } + + return transaction; + }); + + return transactions; + }); + } + + static getChangeMethod (api, address, change) { + const wallet = new Contract(api, WalletAbi).at(address); + const walletInstance = wallet.instance; + + if (change.type === 'require') { + return { + method: walletInstance.changeRequirement, + values: [ change.value ] + }; + } + + if (change.type === 'dailylimit') { + return { + method: walletInstance.setDailyLimit, + values: [ change.value ] + }; + } + + if (change.type === 'add_owner') { + return { + method: walletInstance.addOwner, + values: [ change.value ] + }; + } + + if (change.type === 'change_owner') { + return { + method: walletInstance.changeOwner, + values: [ change.value.from, change.value.to ] + }; + } + + if (change.type === 'remove_owner') { + return { + method: walletInstance.removeOwner, + values: [ change.value ] + }; + } + } + + static getModifyOperationMethod (modification) { + switch (modification) { + case 'confirm': + return 'confirm'; + + case 'revoke': + return 'revoke'; + + default: + return ''; + } + } + + static getSubmitMethod () { + return 'execute'; + } + + static getWalletContract (api) { + return new Contract(api, WalletAbi); + } + + static getWalletSignatures () { + return WalletSignatures; + } + + static isWallet (api, address) { + const walletContract = new Contract(api, WalletAbi); + + return walletContract + .at(address) + .instance + .m_numOwners + .call() + .then((result) => { + if (!result || result.equals(0)) { + return false; + } + + return true; + }); + } + + static logToUpdate (log) { + const eventSignature = toHex(log.topics[0]); + + switch (eventSignature) { + case WalletSignatures.OwnerChanged: + case WalletSignatures.OwnerAdded: + case WalletSignatures.OwnerRemoved: + return { [ UPDATE_OWNERS ]: true }; + + case WalletSignatures.RequirementChanged: + return { [ UPDATE_REQUIRE ]: true }; + + case WalletSignatures.ConfirmationNeeded: + case WalletSignatures.Confirmation: + case WalletSignatures.Revoke: + const parsedLog = WALLET_CONTRACT.parseEventLogs([ log ])[0]; + const operation = bytesToHex(parsedLog.params.operation.value); + + return { [ UPDATE_CONFIRMATIONS ]: operation }; + + case WalletSignatures.Deposit: + case WalletSignatures.SingleTransact: + case WalletSignatures.MultiTransact: + case WalletSignatures.Old.SingleTransact: + case WalletSignatures.Old.MultiTransact: + return { [ UPDATE_TRANSACTIONS ]: true }; + + default: + return {}; + } + } + + static parseLogs (api, logs = []) { + const walletContract = new Contract(api, WalletAbi); + + return walletContract.parseEventLogs(logs); + } + + static parseTransactionLogs (api, options, rawLogs) { + const { metadata } = options; + const address = options.from; + const logs = FoundationWalletUtils.parseLogs(api, rawLogs); + + const confirmationLog = logs.find((log) => log.event === 'ConfirmationNeeded'); + const transactionLog = logs.find((log) => log.event === 'SingleTransact'); + + if (!confirmationLog && !transactionLog) { + return null; + } + + // Confirmations are needed from the other owners + if (confirmationLog) { + const operationHash = bytesToHex(confirmationLog.params.operation.value); + + // Add the contract to pending contracts + PendingContracts.addPendingContract(address, operationHash, metadata); + + return { pending: true }; + } + + return { contractAddress: transactionLog.params.created.value }; + } +} + +function checkPendingOperation (api, log, operation) { + const pendingContracts = PendingContracts.getPendingContracts(); + + // Add the pending contract to the contracts + if (pendingContracts[operation]) { + const { metadata } = pendingContracts[operation]; + const contractName = metadata.name; + + metadata.blockNumber = log.blockNumber; + + // The contract creation might not be in the same log, + // but must be in the same transaction (eg. Contract creation + // from Wallet within a Wallet) + api.eth + .getTransactionReceipt(log.transactionHash) + .then((transactionReceipt) => { + const transactionLogs = FoundationWalletUtils.parseLogs(api, transactionReceipt.logs); + const creationLog = transactionLogs.find((log) => { + return log.params.created && !/^(0x)?0*$/.test(log.params.created.value); + }); + + if (!creationLog) { + return false; + } + + const contractAddress = creationLog.params.created.value; + + return Promise + .all([ + api.parity.setAccountName(contractAddress, contractName), + api.parity.setAccountMeta(contractAddress, metadata) + ]) + .then(() => { + PendingContracts.removePendingContract(operation); + }); + }) + .catch((error) => { + console.error('adding wallet contract', error); + }); + } +} + +function fetchMistOwners (walletContract, mNumOwners) { + const walletAddress = walletContract.address; + + return getMistOwnersOffset(walletContract) + .then((result) => { + if (!result || result.offset === -1) { + return []; + } + + const owners = [ result.address ]; + + if (mNumOwners === 1) { + return owners; + } + + const initOffset = result.offset + 1; + let promise = Promise.resolve(); + + range(initOffset, initOffset + mNumOwners - 1).forEach((offset) => { + promise = promise + .then(() => { + return walletContract.api.eth.getStorageAt(walletAddress, offset); + }) + .then((result) => { + const resultAddress = '0x' + (result || '').slice(-40); + const { address } = validateAddress(resultAddress); + + owners.push(address); + }); + }); + + return promise.then(() => owners); + }); +} + +function getMistOwnersOffset (walletContract, offset = 3) { + return walletContract.api.eth + .getStorageAt(walletContract.address, offset) + .then((result) => { + if (result && !/^(0x)?0*$/.test(result)) { + const resultAddress = '0x' + result.slice(-40); + const { address, addressError } = validateAddress(resultAddress); + + if (!addressError) { + return { offset, address }; + } + } + + if (offset >= 100) { + return { offset: -1 }; + } + + return getMistOwnersOffset(walletContract, offset + 1); + }); +} + +function validateOwners (owners) { + const uniqOwners = uniq(owners); + + // If all owners are the zero account : must be Mist wallet contract + if (uniqOwners.length === 1 && /^(0x)?0*$/.test(owners[0])) { + return null; + } + + return uniqOwners; +} diff --git a/js/src/util/wallets/pending-contracts.js b/js/src/util/wallets/pending-contracts.js new file mode 100644 index 000000000..8eef273e2 --- /dev/null +++ b/js/src/util/wallets/pending-contracts.js @@ -0,0 +1,49 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +import store from 'store'; + +const LS_PENDING_CONTRACTS_KEY = '_parity::wallets::pendingContracts'; + +export default class PendingContracts { + static getPendingContracts () { + return store.get(LS_PENDING_CONTRACTS_KEY) || {}; + } + + static setPendingContracts (contracts = {}) { + return store.set(LS_PENDING_CONTRACTS_KEY, contracts); + } + + static removePendingContract (operationHash) { + const nextContracts = PendingContracts.getPendingContracts(); + + delete nextContracts[operationHash]; + PendingContracts.setPendingContracts(nextContracts); + } + + static addPendingContract (address, operationHash, metadata) { + const nextContracts = { + ...PendingContracts.getPendingContracts(), + [ operationHash ]: { + address, + metadata, + operationHash + } + }; + + PendingContracts.setPendingContracts(nextContracts); + } +} diff --git a/js/src/util/wallets/updates.js b/js/src/util/wallets/updates.js new file mode 100644 index 000000000..a739652fc --- /dev/null +++ b/js/src/util/wallets/updates.js @@ -0,0 +1,21 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +export const UPDATE_OWNERS = 'owners'; +export const UPDATE_REQUIRE = 'require'; +export const UPDATE_DAILYLIMIT = 'dailylimit'; +export const UPDATE_TRANSACTIONS = 'transactions'; +export const UPDATE_CONFIRMATIONS = 'confirmations'; From 7d17d77254950a559f98ec8f3f941393a1863eca Mon Sep 17 00:00:00 2001 From: Craig O'Connor Date: Wed, 9 Aug 2017 11:06:40 -0600 Subject: [PATCH 051/112] Dapp refresh (#5752) * RwLock * getting there * argh * parking_lot * rpc * wax on wax off * almost there * remove lock * write over read * works * linting * small updates * dissapearing act * router update * complete * one m * grumbles1 * grumbles part II * parking_lot->util * missed test case * fied package-lock.json * small fixes * 404 tests failing * cleanup * cleanup 2 * updates and the likes * play * simplify filter * f-ing bugs * read->write * Address own grumbles. * Fix test. --- dapps/src/apps/mod.rs | 12 +++--- dapps/src/endpoint.rs | 3 +- dapps/src/lib.rs | 57 +++++++++++++++++++++------- dapps/src/router.rs | 51 ++++++++++++++++--------- dapps/src/tests/fetch.rs | 2 +- dapps/src/tests/redirection.rs | 1 - dapps/src/web.rs | 2 - js/package-lock.json | 4 +- js/src/api/rpc/parity/parity.js | 5 +++ js/src/jsonrpc/interfaces/parity.js | 11 ++++++ js/src/views/Dapps/dapps.js | 13 ++++++- js/src/views/Dapps/dappsStore.js | 19 +++++++++- parity/dapps.rs | 7 +++- rpc/src/v1/helpers/dapps.rs | 10 +---- rpc/src/v1/impls/light/parity_set.rs | 4 ++ rpc/src/v1/impls/parity_set.rs | 4 ++ rpc/src/v1/tests/helpers/dapps.rs | 4 ++ rpc/src/v1/traits/parity_set.rs | 4 ++ 18 files changed, 157 insertions(+), 56 deletions(-) diff --git a/dapps/src/apps/mod.rs b/dapps/src/apps/mod.rs index 376b8a36f..c38c6784a 100644 --- a/dapps/src/apps/mod.rs +++ b/dapps/src/apps/mod.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::collections::BTreeMap; use std::path::PathBuf; use std::sync::Arc; @@ -30,8 +29,8 @@ use {WebProxyTokens, ParentFrameSettings}; mod app; mod cache; -mod fs; mod ui; +pub mod fs; pub mod fetcher; pub mod manifest; @@ -64,9 +63,10 @@ pub fn all_endpoints( web_proxy_tokens: Arc, remote: Remote, fetch: F, -) -> Endpoints { +) -> (Vec, Endpoints) { // fetch fs dapps at first to avoid overwriting builtins - let mut pages = fs::local_endpoints(dapps_path, embeddable.clone()); + let mut pages = fs::local_endpoints(dapps_path.clone(), embeddable.clone()); + let local_endpoints: Vec = pages.keys().cloned().collect(); for path in extra_dapps { if let Some((id, endpoint)) = fs::local_endpoint(path.clone(), embeddable.clone()) { pages.insert(id, endpoint); @@ -80,10 +80,10 @@ pub fn all_endpoints( pages.insert("proxy".into(), ProxyPac::boxed(embeddable.clone(), dapps_domain.to_owned())); pages.insert(WEB_PATH.into(), Web::boxed(embeddable.clone(), web_proxy_tokens.clone(), remote.clone(), fetch.clone())); - Arc::new(pages) + (local_endpoints, pages) } -fn insert(pages: &mut BTreeMap>, id: &str, embed_at: Embeddable) { +fn insert(pages: &mut Endpoints, id: &str, embed_at: Embeddable) { pages.insert(id.to_owned(), Box::new(match embed_at { Embeddable::Yes(address) => PageEndpoint::new_safe_to_embed(T::default(), address), Embeddable::No => PageEndpoint::new(T::default()), diff --git a/dapps/src/endpoint.rs b/dapps/src/endpoint.rs index ea8fd0a38..ea5825b74 100644 --- a/dapps/src/endpoint.rs +++ b/dapps/src/endpoint.rs @@ -16,7 +16,6 @@ //! URL Endpoint traits -use std::sync::Arc; use std::collections::BTreeMap; use hyper::{self, server, net}; @@ -39,7 +38,7 @@ pub struct EndpointInfo { pub icon_url: String, } -pub type Endpoints = Arc>>; +pub type Endpoints = BTreeMap>; pub type Handler = server::Handler + Send; pub trait Endpoint : Send + Sync { diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index f34c24cae..412e69daf 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -69,9 +69,11 @@ mod web; #[cfg(test)] mod tests; +use std::collections::HashMap; +use std::mem; use std::path::PathBuf; use std::sync::Arc; -use std::collections::HashMap; +use util::RwLock; use jsonrpc_http_server::{self as http, hyper, Origin}; @@ -101,31 +103,54 @@ impl WebProxyTokens for F where F: Fn(String) -> Option + Send + Sync } /// Current supported endpoints. +#[derive(Default, Clone)] pub struct Endpoints { - endpoints: endpoint::Endpoints, + local_endpoints: Arc>>, + endpoints: Arc>, + dapps_path: PathBuf, + embeddable: Option, } impl Endpoints { /// Returns a current list of app endpoints. pub fn list(&self) -> Vec { - self.endpoints.iter().filter_map(|(ref k, ref e)| { + self.endpoints.read().iter().filter_map(|(ref k, ref e)| { e.info().map(|ref info| apps::App::from_info(k, info)) }).collect() } + + /// Check for any changes in the local dapps folder and update. + pub fn refresh_local_dapps(&self) { + let new_local = apps::fs::local_endpoints(&self.dapps_path, self.embeddable.clone()); + let old_local = mem::replace(&mut *self.local_endpoints.write(), new_local.keys().cloned().collect()); + let (_, to_remove): (_, Vec<_>) = old_local + .into_iter() + .partition(|k| new_local.contains_key(&k.clone())); + + let mut endpoints = self.endpoints.write(); + // remove the dead dapps + for k in to_remove { + endpoints.remove(&k); + } + // new dapps to be added + for (k, v) in new_local { + if !endpoints.contains_key(&k) { + endpoints.insert(k, v); + } + } + } } /// Dapps server as `jsonrpc-http-server` request middleware. pub struct Middleware { + endpoints: Endpoints, router: router::Router, - endpoints: endpoint::Endpoints, } impl Middleware { /// Get local endpoints handle. - pub fn endpoints(&self) -> Endpoints { - Endpoints { - endpoints: self.endpoints.clone(), - } + pub fn endpoints(&self) -> &Endpoints { + &self.endpoints } /// Creates new middleware for UI server. @@ -164,8 +189,8 @@ impl Middleware { ); Middleware { - router: router, endpoints: Default::default(), + router: router, } } @@ -191,8 +216,8 @@ impl Middleware { remote.clone(), fetch.clone(), ).embeddable_on(embeddable.clone()).allow_dapps(true)); - let endpoints = apps::all_endpoints( - dapps_path, + let (local_endpoints, endpoints) = apps::all_endpoints( + dapps_path.clone(), extra_dapps, dapps_domain, embeddable.clone(), @@ -200,6 +225,12 @@ impl Middleware { remote.clone(), fetch.clone(), ); + let endpoints = Endpoints { + endpoints: Arc::new(RwLock::new(endpoints)), + dapps_path, + local_endpoints: Arc::new(RwLock::new(local_endpoints)), + embeddable: embeddable.clone(), + }; let special = { let mut special = special_endpoints( @@ -225,8 +256,8 @@ impl Middleware { ); Middleware { - router: router, - endpoints: endpoints, + endpoints, + router, } } } diff --git a/dapps/src/router.rs b/dapps/src/router.rs index 5cf92ff7e..2b74d51df 100644 --- a/dapps/src/router.rs +++ b/dapps/src/router.rs @@ -28,7 +28,8 @@ use jsonrpc_http_server as http; use apps; use apps::fetcher::Fetcher; -use endpoint::{Endpoint, Endpoints, EndpointPath, Handler}; +use endpoint::{Endpoint, EndpointPath, Handler}; +use Endpoints; use handlers; use Embeddable; @@ -50,26 +51,27 @@ pub struct Router { dapps_domain: String, } -impl http::RequestMiddleware for Router { - fn on_request(&self, req: &server::Request, control: &Control) -> http::RequestMiddlewareAction { +impl Router { + fn resolve_request(&self, req: &server::Request, control: Control, refresh_dapps: bool) -> (bool, Option>) { // Choose proper handler depending on path / domain let url = handlers::extract_url(req); let endpoint = extract_endpoint(&url, &self.dapps_domain); let referer = extract_referer_endpoint(req, &self.dapps_domain); let is_utils = endpoint.1 == SpecialEndpoint::Utils; - let is_origin_set = req.headers().get::().is_some(); let is_get_request = *req.method() == hyper::Method::Get; let is_head_request = *req.method() == hyper::Method::Head; + let has_dapp = |dapp: &str| self.endpoints + .as_ref() + .map_or(false, |endpoints| endpoints.endpoints.read().contains_key(dapp)); trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", url, req); - - let control = control.clone(); debug!(target: "dapps", "Handling endpoint request: {:?}", endpoint); - let handler: Option> = match (endpoint.0, endpoint.1, referer) { + + (is_utils, match (endpoint.0, endpoint.1, referer) { // Handle invalid web requests that we can recover from (ref path, SpecialEndpoint::None, Some((ref referer, ref referer_url))) if referer.app_id == apps::WEB_PATH - && self.endpoints.as_ref().map(|ep| ep.contains_key(apps::WEB_PATH)).unwrap_or(false) + && has_dapp(apps::WEB_PATH) && !is_web_endpoint(path) => { @@ -88,11 +90,13 @@ impl http::RequestMiddleware for Router { .map(|special| special.to_async_handler(path.clone().unwrap_or_default(), control)) }, // Then delegate to dapp - (Some(ref path), _, _) if self.endpoints.as_ref().map(|ep| ep.contains_key(&path.app_id)).unwrap_or(false) => { + (Some(ref path), _, _) if has_dapp(&path.app_id) => { trace!(target: "dapps", "Resolving to local/builtin dapp."); Some(self.endpoints .as_ref() .expect("endpoints known to be set; qed") + .endpoints + .read() .get(&path.app_id) .expect("endpoints known to contain key; qed") .to_async_handler(path.clone(), control)) @@ -110,13 +114,19 @@ impl http::RequestMiddleware for Router { => { trace!(target: "dapps", "Resolving to 404."); - Some(Box::new(handlers::ContentHandler::error( - hyper::StatusCode::NotFound, - "404 Not Found", - "Requested content was not found.", - None, - self.embeddable_on.clone(), - ))) + if refresh_dapps { + debug!(target: "dapps", "Refreshing dapps and re-trying."); + self.endpoints.as_ref().map(|endpoints| endpoints.refresh_local_dapps()); + return self.resolve_request(req, control, false) + } else { + Some(Box::new(handlers::ContentHandler::error( + hyper::StatusCode::NotFound, + "404 Not Found", + "Requested content was not found.", + None, + self.embeddable_on.clone(), + ))) + } }, // Any other GET|HEAD requests to home page. _ if (is_get_request || is_head_request) && self.special.contains_key(&SpecialEndpoint::Home) => { @@ -130,8 +140,15 @@ impl http::RequestMiddleware for Router { trace!(target: "dapps", "Resolving to RPC call."); None } - }; + }) + } +} +impl http::RequestMiddleware for Router { + fn on_request(&self, req: &server::Request, control: &Control) -> http::RequestMiddlewareAction { + let control = control.clone(); + let is_origin_set = req.headers().get::().is_some(); + let (is_utils, handler) = self.resolve_request(req, control, self.endpoints.is_some()); match handler { Some(handler) => http::RequestMiddlewareAction::Respond { should_validate_hosts: !is_utils, diff --git a/dapps/src/tests/fetch.rs b/dapps/src/tests/fetch.rs index 8abc86196..f12323155 100644 --- a/dapps/src/tests/fetch.rs +++ b/dapps/src/tests/fetch.rs @@ -39,7 +39,7 @@ fn should_resolve_dapp() { // then response.assert_status("HTTP/1.1 404 Not Found"); - assert_eq!(registrar.calls.lock().len(), 2); + assert_eq!(registrar.calls.lock().len(), 4); assert_security_headers_for_embed(&response.headers); } diff --git a/dapps/src/tests/redirection.rs b/dapps/src/tests/redirection.rs index 1e9b039e2..81d3ec76c 100644 --- a/dapps/src/tests/redirection.rs +++ b/dapps/src/tests/redirection.rs @@ -204,4 +204,3 @@ fn should_serve_utils() { assert_eq!(response.body.contains("function(){"), true); assert_security_headers(&response.headers); } - diff --git a/dapps/src/web.rs b/dapps/src/web.rs index 637a5287e..5222f51b5 100644 --- a/dapps/src/web.rs +++ b/dapps/src/web.rs @@ -241,5 +241,3 @@ impl server::Handler for WebHandler { } } } - - diff --git a/js/package-lock.json b/js/package-lock.json index 7514c3c29..9573976da 100644 --- a/js/package-lock.json +++ b/js/package-lock.json @@ -7722,7 +7722,7 @@ "minimatch": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "integrity": "sha1-UWbihkV/AzBgZL5Ul+jbsMPTIIM=", "requires": { "brace-expansion": "1.1.8" } @@ -10081,7 +10081,7 @@ "react-qr-reader": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/react-qr-reader/-/react-qr-reader-1.1.3.tgz", - "integrity": "sha512-ruBF8KaSwUW9nbzjO4rA7/HOCGYZuNUz9od7uBRy8SRBi24nwxWWmwa2z8R6vPGDRglA0y2Qk1aVBuC1olTnHw==", + "integrity": "sha1-dDmnZvyZPLj17u/HLCnblh1AswI=", "requires": { "jsqr": "git+https://github.com/JodusNodus/jsQR.git#5ba1acefa1cbb9b2bc92b49f503f2674e2ec212b", "prop-types": "15.5.10", diff --git a/js/src/api/rpc/parity/parity.js b/js/src/api/rpc/parity/parity.js index 4fdaf5b1b..c2681b3fb 100644 --- a/js/src/api/rpc/parity/parity.js +++ b/js/src/api/rpc/parity/parity.js @@ -95,6 +95,11 @@ export default class Parity { .execute('parity_dappsList'); } + dappsRefresh () { + return this._transport + .execute('parity_dappsRefresh'); + } + dappsUrl () { return this._transport .execute('parity_dappsUrl'); diff --git a/js/src/jsonrpc/interfaces/parity.js b/js/src/jsonrpc/interfaces/parity.js index d1ade602b..d80a0c5a2 100644 --- a/js/src/jsonrpc/interfaces/parity.js +++ b/js/src/jsonrpc/interfaces/parity.js @@ -164,6 +164,17 @@ export default { } }, + dappsRefresh: { + subdoc: SUBDOC_SET, + desc: 'Returns a boolean value upon success and error upon failure', + params: [], + returns: { + type: Boolean, + desc: 'True for success. error details for failure', + example: true + } + }, + dappsUrl: { section: SECTION_NODE, desc: 'Returns the hostname and the port of dapps/rpc server, error if not enabled.', diff --git a/js/src/views/Dapps/dapps.js b/js/src/views/Dapps/dapps.js index 5e5efedf7..54706f352 100644 --- a/js/src/views/Dapps/dapps.js +++ b/js/src/views/Dapps/dapps.js @@ -24,7 +24,7 @@ import { connect } from 'react-redux'; import { DappPermissions, DappsVisible } from '~/modals'; import PermissionStore from '~/modals/DappPermissions/store'; import { Actionbar, Button, DappCard, Page, SectionList } from '~/ui'; -import { LockedIcon, VisibleIcon } from '~/ui/Icons'; +import { LockedIcon, RefreshIcon, VisibleIcon } from '~/ui/Icons'; import DappsStore from './dappsStore'; @@ -90,6 +90,17 @@ class Dapps extends Component { /> } buttons={ [ +