diff --git a/accounts/ethstore/cli/src/main.rs b/accounts/ethstore/cli/src/main.rs index 0f5644063..8fc0054be 100644 --- a/accounts/ethstore/cli/src/main.rs +++ b/accounts/ethstore/cli/src/main.rs @@ -163,7 +163,7 @@ fn main() { } } -fn key_dir(location: &str, password: Option) -> Result, Error> { +fn key_dir(location: &str, password: Option) -> Result, Error> { let dir: RootDiskDirectory = match location { "geth" => RootDiskDirectory::create(dir::geth(false))?, "geth-test" => RootDiskDirectory::create(dir::geth(true))?, diff --git a/accounts/ethstore/src/ethstore.rs b/accounts/ethstore/src/ethstore.rs index 89bb14ba6..36416c5e7 100644 --- a/accounts/ethstore/src/ethstore.rs +++ b/accounts/ethstore/src/ethstore.rs @@ -709,7 +709,7 @@ mod tests { } struct RootDiskDirectoryGuard { - pub key_dir: Option>, + pub key_dir: Option>, _path: TempDir, } diff --git a/ethcore/wasm/run/src/runner.rs b/ethcore/wasm/run/src/runner.rs index f43b4fa57..45e9c15b9 100644 --- a/ethcore/wasm/run/src/runner.rs +++ b/ethcore/wasm/run/src/runner.rs @@ -115,7 +115,7 @@ impl fmt::Display for Fail { } pub fn construct( - ext: &mut vm::Ext, + ext: &mut dyn vm::Ext, source: Vec, arguments: Vec, sender: H160, diff --git a/miner/src/pool/replace.rs b/miner/src/pool/replace.rs index 0655af599..9ed15bad2 100644 --- a/miner/src/pool/replace.rs +++ b/miner/src/pool/replace.rs @@ -133,7 +133,7 @@ mod tests { verified_tx } - fn should_replace(replace: &ShouldReplace, old: VerifiedTransaction, new: VerifiedTransaction) -> Choice { + fn should_replace(replace: &dyn ShouldReplace, old: VerifiedTransaction, new: VerifiedTransaction) -> Choice { let old_tx = txpool::Transaction { insertion_id: 0, transaction: Arc::new(old) }; let new_tx = txpool::Transaction { insertion_id: 0, transaction: Arc::new(new) }; let old = ReplaceTransaction::new(&old_tx, Default::default()); diff --git a/parity-clib/src/lib.rs b/parity-clib/src/lib.rs index bbb60ec2d..238bdaadf 100644 --- a/parity-clib/src/lib.rs +++ b/parity-clib/src/lib.rs @@ -243,7 +243,7 @@ pub unsafe extern fn parity_set_logger( } // WebSocket event loop -fn parity_ws_worker(client: &RunningClient, query: &str, callback: Arc) -> *const c_void { +fn parity_ws_worker(client: &RunningClient, query: &str, callback: Arc) -> *const c_void { let (tx, mut rx) = mpsc::channel(1); let session = Arc::new(PubSubSession::new(tx)); let query_future = client.rpc_query(query, Some(session.clone())); @@ -274,7 +274,7 @@ fn parity_ws_worker(client: &RunningClient, query: &str, callback: Arc } // RPC event loop that runs for at most `timeout_ms` -fn parity_rpc_worker(client: &RunningClient, query: &str, callback: Arc, timeout_ms: u64) { +fn parity_rpc_worker(client: &RunningClient, query: &str, callback: Arc, timeout_ms: u64) { let cb = callback.clone(); let query = client.rpc_query(query, None).map(move |response| { let response = response.unwrap_or_else(|| error::EMPTY.to_string()); diff --git a/secret-store/src/key_server.rs b/secret-store/src/key_server.rs index f93f92d5a..233a9b3db 100644 --- a/secret-store/src/key_server.rs +++ b/secret-store/src/key_server.rs @@ -37,13 +37,13 @@ pub struct KeyServerImpl { /// Secret store key server data. pub struct KeyServerCore { - cluster: Arc, + cluster: Arc, } impl KeyServerImpl { /// Create new key server instance - pub fn new(config: &ClusterConfiguration, key_server_set: Arc, self_key_pair: Arc, - acl_storage: Arc, key_storage: Arc, executor: Executor) -> Result + pub fn new(config: &ClusterConfiguration, key_server_set: Arc, self_key_pair: Arc, + acl_storage: Arc, key_storage: Arc, executor: Executor) -> Result { Ok(KeyServerImpl { data: Arc::new(Mutex::new(KeyServerCore::new(config, key_server_set, self_key_pair, acl_storage, key_storage, executor)?)), @@ -51,7 +51,7 @@ impl KeyServerImpl { } /// Get cluster client reference. - pub fn cluster(&self) -> Arc { + pub fn cluster(&self) -> Arc { self.data.lock().cluster.clone() } } @@ -64,7 +64,7 @@ impl AdminSessionsServer for KeyServerImpl { old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet, - ) -> Box + Send> { + ) -> Box + Send> { return_session(self.data.lock().cluster .new_servers_set_change_session(None, None, new_servers_set, old_set_signature, new_set_signature)) } @@ -76,7 +76,7 @@ impl ServerKeyGenerator for KeyServerImpl { key_id: ServerKeyId, author: Requester, threshold: usize, - ) -> Box + Send> { + ) -> Box + Send> { // recover requestor' address key from signature let address = author.address(&key_id).map_err(Error::InsufficientRequesterData); @@ -89,7 +89,7 @@ impl ServerKeyGenerator for KeyServerImpl { &self, key_id: ServerKeyId, author: Requester, - ) -> Box + Send> { + ) -> Box + Send> { // recover requestor' public key from signature let session_and_address = author .address(&key_id) @@ -121,7 +121,7 @@ impl DocumentKeyServer for KeyServerImpl { author: Requester, common_point: Public, encrypted_document_key: Public, - ) -> Box + Send> { + ) -> Box + Send> { // store encrypted key return_session(self.data.lock().cluster.new_encryption_session(key_id, author.clone(), common_point, encrypted_document_key)) @@ -132,7 +132,7 @@ impl DocumentKeyServer for KeyServerImpl { key_id: ServerKeyId, author: Requester, threshold: usize, - ) -> Box + Send> { + ) -> Box + Send> { // recover requestor' public key from signature let public = result(author.public(&key_id).map_err(Error::InsufficientRequesterData)); @@ -174,7 +174,7 @@ impl DocumentKeyServer for KeyServerImpl { &self, key_id: ServerKeyId, requester: Requester, - ) -> Box + Send> { + ) -> Box + Send> { // recover requestor' public key from signature let public = result(requester.public(&key_id).map_err(Error::InsufficientRequesterData)); @@ -200,7 +200,7 @@ impl DocumentKeyServer for KeyServerImpl { &self, key_id: ServerKeyId, requester: Requester, - ) -> Box + Send> { + ) -> Box + Send> { return_session(self.data.lock().cluster.new_decryption_session(key_id, None, requester.clone(), None, true, false)) } @@ -212,7 +212,7 @@ impl MessageSigner for KeyServerImpl { key_id: ServerKeyId, requester: Requester, message: MessageHash, - ) -> Box + Send> { + ) -> Box + Send> { // recover requestor' public key from signature let public = result(requester.public(&key_id).map_err(Error::InsufficientRequesterData)); @@ -246,7 +246,7 @@ impl MessageSigner for KeyServerImpl { key_id: ServerKeyId, requester: Requester, message: MessageHash, - ) -> Box + Send> { + ) -> Box + Send> { // recover requestor' public key from signature let public = result(requester.public(&key_id).map_err(Error::InsufficientRequesterData)); @@ -269,8 +269,8 @@ impl MessageSigner for KeyServerImpl { } impl KeyServerCore { - pub fn new(config: &ClusterConfiguration, key_server_set: Arc, self_key_pair: Arc, - acl_storage: Arc, key_storage: Arc, executor: Executor) -> Result + pub fn new(config: &ClusterConfiguration, key_server_set: Arc, self_key_pair: Arc, + acl_storage: Arc, key_storage: Arc, executor: Executor) -> Result { let cconfig = NetClusterConfiguration { self_key_pair: self_key_pair.clone(), @@ -298,7 +298,7 @@ impl KeyServerCore { fn return_session( session: Result, Error>, -) -> Box + Send> { +) -> Box + Send> { match session { Ok(session) => Box::new(session.into_wait_future()), Err(error) => Box::new(err(error)) @@ -340,7 +340,7 @@ pub mod tests { _old_set_signature: RequestSignature, _new_set_signature: RequestSignature, _new_servers_set: BTreeSet, - ) -> Box + Send> { + ) -> Box + Send> { unimplemented!("test-only") } } @@ -351,7 +351,7 @@ pub mod tests { _key_id: ServerKeyId, _author: Requester, _threshold: usize, - ) -> Box + Send> { + ) -> Box + Send> { unimplemented!("test-only") } @@ -359,7 +359,7 @@ pub mod tests { &self, _key_id: ServerKeyId, _author: Requester, - ) -> Box + Send> { + ) -> Box + Send> { unimplemented!("test-only") } } @@ -371,7 +371,7 @@ pub mod tests { _author: Requester, _common_point: Public, _encrypted_document_key: Public, - ) -> Box + Send> { + ) -> Box + Send> { unimplemented!("test-only") } @@ -380,7 +380,7 @@ pub mod tests { _key_id: ServerKeyId, _author: Requester, _threshold: usize, - ) -> Box + Send> { + ) -> Box + Send> { unimplemented!("test-only") } @@ -388,7 +388,7 @@ pub mod tests { &self, _key_id: ServerKeyId, _requester: Requester, - ) -> Box + Send> { + ) -> Box + Send> { unimplemented!("test-only") } @@ -396,7 +396,7 @@ pub mod tests { &self, _key_id: ServerKeyId, _requester: Requester, - ) -> Box + Send> { + ) -> Box + Send> { unimplemented!("test-only") } } @@ -407,7 +407,7 @@ pub mod tests { _key_id: ServerKeyId, _requester: Requester, _message: MessageHash, - ) -> Box + Send> { + ) -> Box + Send> { unimplemented!("test-only") } @@ -416,7 +416,7 @@ pub mod tests { _key_id: ServerKeyId, _requester: Requester, _message: MessageHash, - ) -> Box + Send> { + ) -> Box + Send> { unimplemented!("test-only") } } diff --git a/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs b/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs index 8db046326..841aa889e 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs @@ -83,7 +83,7 @@ struct SessionCore { /// Key share. pub key_share: Option, /// Session result computer. - pub result_computer: Arc, + pub result_computer: Arc, /// Session transport. pub transport: T, /// Session nonce. @@ -119,7 +119,7 @@ pub struct SessionParams { /// Key share. pub key_share: Option, /// Session result computer. - pub result_computer: Arc, + pub result_computer: Arc, /// Session transport to communicate to other cluster nodes. pub transport: T, /// Session nonce. @@ -140,7 +140,7 @@ enum SessionState { /// Isolated session transport. pub struct IsolatedSessionTransport { /// Cluster. - pub cluster: Arc, + pub cluster: Arc, /// Key id. pub key_id: SessionId, /// Sub session id. @@ -859,7 +859,7 @@ mod tests { versions: vec![version_id.clone().into()] })), Err(Error::InvalidMessage)); } - + run_test(CommonKeyData { threshold: 2, author: Default::default(), diff --git a/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs b/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs index 299d02121..5dcc7ea70 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs @@ -83,9 +83,9 @@ struct SessionCore { /// Servers set change session meta (id is computed from new_nodes_set). pub meta: ShareChangeSessionMeta, /// Cluster which allows this node to send messages to other nodes in the cluster. - pub cluster: Arc, + pub cluster: Arc, /// Keys storage. - pub key_storage: Arc, + pub key_storage: Arc, /// Session-level nonce. pub nonce: u64, /// All known nodes. @@ -136,9 +136,9 @@ pub struct SessionParams { /// Session meta (artificial). pub meta: ShareChangeSessionMeta, /// Cluster. - pub cluster: Arc, + pub cluster: Arc, /// Keys storage. - pub key_storage: Arc, + pub key_storage: Arc, /// Session nonce. pub nonce: u64, /// All known nodes. @@ -158,7 +158,7 @@ struct ServersSetChangeConsensusTransport { /// Migration id (if part of auto-migration process). migration_id: Option, /// Cluster. - cluster: Arc, + cluster: Arc, } /// Unknown sessions job transport. @@ -168,7 +168,7 @@ struct UnknownSessionsJobTransport { /// Session-level nonce. nonce: u64, /// Cluster. - cluster: Arc, + cluster: Arc, } /// Key version negotiation transport. @@ -178,7 +178,7 @@ struct ServersSetChangeKeyVersionNegotiationTransport { /// Session-level nonce. nonce: u64, /// Cluster. - cluster: Arc, + cluster: Arc, } impl SessionImpl { @@ -292,7 +292,7 @@ impl SessionImpl { self.on_session_error(sender, message.error.clone()); Ok(()) }, - &ServersSetChangeMessage::ServersSetChangeCompleted(ref message) => + &ServersSetChangeMessage::ServersSetChangeCompleted(ref message) => self.on_session_completed(sender, message), } } @@ -893,7 +893,7 @@ impl SessionImpl { /// Complete servers set change session. fn complete_session(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { debug_assert_eq!(core.meta.self_node_id, core.meta.master_node_id); - + // send completion notification core.cluster.broadcast(Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(ServersSetChangeCompleted { session: core.meta.id.clone().into(), diff --git a/secret-store/src/key_server_cluster/admin_sessions/sessions_queue.rs b/secret-store/src/key_server_cluster/admin_sessions/sessions_queue.rs index 91d3bc7b8..3346f0d2c 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/sessions_queue.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/sessions_queue.rs @@ -28,7 +28,7 @@ pub struct SessionsQueue { impl SessionsQueue { /// Create new sessions queue. - pub fn new(key_storage: &Arc, unknown_sessions: BTreeSet) -> Self { + pub fn new(key_storage: &Arc, unknown_sessions: BTreeSet) -> Self { // TODO [Opt]: // 1) known sessions - change to iter // 2) unknown sesions - request chunk-by-chunk diff --git a/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs b/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs index ef7882d68..c190396a5 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs @@ -69,7 +69,7 @@ struct SessionCore { /// Session transport to communicate to other cluster nodes. pub transport: T, /// Key storage. - pub key_storage: Arc, + pub key_storage: Arc, /// Administrator public key. pub admin_public: Option, /// Session completion signal. @@ -131,7 +131,7 @@ pub struct SessionParams { /// Session transport. pub transport: T, /// Key storage. - pub key_storage: Arc, + pub key_storage: Arc, /// Administrator public key. pub admin_public: Option, /// Session nonce. @@ -154,7 +154,7 @@ pub struct IsolatedSessionTransport { /// Id numbers of all new nodes. id_numbers: Option>>, /// Cluster. - cluster: Arc, + cluster: Arc, } impl SessionImpl where T: SessionTransport { @@ -817,7 +817,7 @@ impl ClusterSession for SessionImpl where T: SessionTransport { } impl IsolatedSessionTransport { - pub fn new(session_id: SessionId, version: Option, nonce: u64, cluster: Arc) -> Self { + pub fn new(session_id: SessionId, version: Option, nonce: u64, cluster: Arc) -> Self { IsolatedSessionTransport { session: session_id, version: version, diff --git a/secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs b/secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs index d6af236d4..18d2671f5 100644 --- a/secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs +++ b/secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs @@ -43,9 +43,9 @@ pub struct ShareChangeSession { /// Share change session meta. meta: ShareChangeSessionMeta, /// Cluster. - cluster: Arc, + cluster: Arc, /// Key storage. - key_storage: Arc, + key_storage: Arc, /// Key version. key_version: H256, /// Nodes that have reported version ownership. @@ -82,9 +82,9 @@ pub struct ShareChangeSessionParams { /// Share change session meta. pub meta: ShareChangeSessionMeta, /// Cluster. - pub cluster: Arc, + pub cluster: Arc, /// Keys storage. - pub key_storage: Arc, + pub key_storage: Arc, /// Session plan. pub plan: ShareChangeSessionPlan, } @@ -97,7 +97,7 @@ pub struct ShareChangeTransport { /// Session nonce. nonce: u64, /// Cluster. - cluster: Arc, + cluster: Arc, } impl ShareChangeSession { @@ -201,7 +201,7 @@ impl ShareChangeSession { } impl ShareChangeTransport { - pub fn new(session_id: SessionId, nonce: u64, cluster: Arc) -> Self { + pub fn new(session_id: SessionId, nonce: u64, cluster: Arc) -> Self { ShareChangeTransport { session_id: session_id, nonce: nonce, diff --git a/secret-store/src/key_server_cluster/client_sessions/decryption_session.rs b/secret-store/src/key_server_cluster/client_sessions/decryption_session.rs index 1bda0bc33..46691f58b 100644 --- a/secret-store/src/key_server_cluster/client_sessions/decryption_session.rs +++ b/secret-store/src/key_server_cluster/client_sessions/decryption_session.rs @@ -56,7 +56,7 @@ struct SessionCore { /// Key share. pub key_share: Option, /// Cluster which allows this node to send messages to other nodes in the cluster. - pub cluster: Arc, + pub cluster: Arc, /// Session-level nonce. pub nonce: u64, /// Session completion signal. @@ -98,9 +98,9 @@ pub struct SessionParams { /// Key share. pub key_share: Option, /// ACL storage. - pub acl_storage: Arc, + pub acl_storage: Arc, /// Cluster. - pub cluster: Arc, + pub cluster: Arc, /// Session nonce. pub nonce: u64, } @@ -118,7 +118,7 @@ struct DecryptionConsensusTransport { /// Selected key version (on master node). version: Option, /// Cluster. - cluster: Arc, + cluster: Arc, } /// Decryption job transport @@ -134,7 +134,7 @@ struct DecryptionJobTransport { /// Master node id. master_node_id: NodeId, /// Cluster. - cluster: Arc, + cluster: Arc, } /// Session delegation status. diff --git a/secret-store/src/key_server_cluster/client_sessions/encryption_session.rs b/secret-store/src/key_server_cluster/client_sessions/encryption_session.rs index bdead62e5..461810013 100644 --- a/secret-store/src/key_server_cluster/client_sessions/encryption_session.rs +++ b/secret-store/src/key_server_cluster/client_sessions/encryption_session.rs @@ -44,9 +44,9 @@ pub struct SessionImpl { /// Encrypted data. encrypted_data: Option, /// Key storage. - key_storage: Arc, + key_storage: Arc, /// Cluster which allows this node to send messages to other nodes in the cluster. - cluster: Arc, + cluster: Arc, /// Session nonce. nonce: u64, /// Session completion signal. @@ -64,9 +64,9 @@ pub struct SessionParams { /// Encrypted data (result of running generation_session::SessionImpl). pub encrypted_data: Option, /// Key storage. - pub key_storage: Arc, + pub key_storage: Arc, /// Cluster - pub cluster: Arc, + pub cluster: Arc, /// Session nonce. pub nonce: u64, } @@ -331,7 +331,7 @@ pub fn check_encrypted_data(key_share: Option<&DocumentKeyShare>) -> Result<(), } /// Update key share with encrypted document key. -pub fn update_encrypted_data(key_storage: &Arc, key_id: ServerKeyId, mut key_share: DocumentKeyShare, author: Address, common_point: Public, encrypted_point: Public) -> Result<(), Error> { +pub fn update_encrypted_data(key_storage: &Arc, key_id: ServerKeyId, mut key_share: DocumentKeyShare, author: Address, common_point: Public, encrypted_point: Public) -> Result<(), Error> { // author must be the same if key_share.author != author { return Err(Error::AccessDenied); diff --git a/secret-store/src/key_server_cluster/client_sessions/generation_session.rs b/secret-store/src/key_server_cluster/client_sessions/generation_session.rs index 02d33f5db..806173854 100644 --- a/secret-store/src/key_server_cluster/client_sessions/generation_session.rs +++ b/secret-store/src/key_server_cluster/client_sessions/generation_session.rs @@ -42,9 +42,9 @@ pub struct SessionImpl { /// Public identifier of this node. self_node_id: NodeId, /// Key storage. - key_storage: Option>, + key_storage: Option>, /// Cluster which allows this node to send messages to other nodes in the cluster. - cluster: Arc, + cluster: Arc, /// Session-level nonce. nonce: u64, /// Mutable session data. @@ -60,9 +60,9 @@ pub struct SessionParams { /// Id of node, on which this session is running. pub self_node_id: Public, /// Key storage. - pub key_storage: Option>, + pub key_storage: Option>, /// Cluster - pub cluster: Arc, + pub cluster: Arc, /// Session nonce. pub nonce: Option, } diff --git a/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs b/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs index d3c801af6..4f913a536 100644 --- a/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs +++ b/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs @@ -56,7 +56,7 @@ struct SessionCore { /// Key share. pub key_share: Option, /// Cluster which allows this node to send messages to other nodes in the cluster. - pub cluster: Arc, + pub cluster: Arc, /// Session-level nonce. pub nonce: u64, /// Session completion signal. @@ -112,9 +112,9 @@ pub struct SessionParams { /// Key share. pub key_share: Option, /// ACL storage. - pub acl_storage: Arc, + pub acl_storage: Arc, /// Cluster - pub cluster: Arc, + pub cluster: Arc, /// Session nonce. pub nonce: u64, } @@ -130,7 +130,7 @@ struct SigningConsensusTransport { /// Selected key version (on master node). version: Option, /// Cluster. - cluster: Arc, + cluster: Arc, } /// Signing key generation transport. @@ -142,7 +142,7 @@ struct NonceGenerationTransport, + cluster: Arc, /// Other nodes ids. other_nodes_ids: BTreeSet, /// Message mapping function. @@ -158,7 +158,7 @@ struct SigningJobTransport { /// Session-level nonce. nonce: u64, /// Cluster. - cluster: Arc, + cluster: Arc, } /// Session delegation status. diff --git a/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs b/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs index ff901fc15..ae0aa69d4 100644 --- a/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs +++ b/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs @@ -57,7 +57,7 @@ struct SessionCore { /// Key share. pub key_share: Option, /// Cluster which allows this node to send messages to other nodes in the cluster. - pub cluster: Arc, + pub cluster: Arc, /// Session-level nonce. pub nonce: u64, /// SessionImpl completion signal. @@ -106,9 +106,9 @@ pub struct SessionParams { /// Key share. pub key_share: Option, /// ACL storage. - pub acl_storage: Arc, + pub acl_storage: Arc, /// Cluster - pub cluster: Arc, + pub cluster: Arc, /// Session nonce. pub nonce: u64, } @@ -124,7 +124,7 @@ struct SigningConsensusTransport { /// Selected key version (on master node). version: Option, /// Cluster. - cluster: Arc, + cluster: Arc, } /// Signing key generation transport. @@ -132,7 +132,7 @@ struct SessionKeyGenerationTransport { /// Session access key. access_key: Secret, /// Cluster. - cluster: Arc, + cluster: Arc, /// Session-level nonce. nonce: u64, /// Other nodes ids. @@ -148,7 +148,7 @@ struct SigningJobTransport { /// Session-level nonce. nonce: u64, /// Cluster. - cluster: Arc, + cluster: Arc, } /// Session delegation status. diff --git a/secret-store/src/key_server_cluster/cluster.rs b/secret-store/src/key_server_cluster/cluster.rs index c6ffd446e..f1d347091 100644 --- a/secret-store/src/key_server_cluster/cluster.rs +++ b/secret-store/src/key_server_cluster/cluster.rs @@ -104,11 +104,11 @@ pub trait ClusterClient: Send + Sync { ) -> Result, Error>; /// Listen for new generation sessions. - fn add_generation_listener(&self, listener: Arc>); + fn add_generation_listener(&self, listener: Arc>); /// Listen for new decryption sessions. - fn add_decryption_listener(&self, listener: Arc>); + fn add_decryption_listener(&self, listener: Arc>); /// Listen for new key version negotiation sessions. - fn add_key_version_negotiation_listener(&self, listener: Arc>>); + fn add_key_version_negotiation_listener(&self, listener: Arc>>); /// Ask node to make 'faulty' generation sessions. #[cfg(test)] @@ -143,13 +143,13 @@ pub trait Cluster: Send + Sync { #[derive(Clone)] pub struct ClusterConfiguration { /// KeyPair this node holds. - pub self_key_pair: Arc, + pub self_key_pair: Arc, /// Cluster nodes set. - pub key_server_set: Arc, + pub key_server_set: Arc, /// Reference to key storage - pub key_storage: Arc, + pub key_storage: Arc, /// Reference to ACL storage - pub acl_storage: Arc, + pub acl_storage: Arc, /// Administrator public key. pub admin_public: Option, /// Do not remove sessions from container. @@ -172,8 +172,8 @@ pub struct ClusterClientImpl { pub struct ClusterView { configured_nodes_count: usize, connected_nodes: BTreeSet, - connections: Arc, - self_key_pair: Arc, + connections: Arc, + self_key_pair: Arc, } /// Cross-thread shareable cluster data. @@ -181,15 +181,15 @@ pub struct ClusterData { /// Cluster configuration. pub config: ClusterConfiguration, /// KeyPair this node holds. - pub self_key_pair: Arc, + pub self_key_pair: Arc, /// Connections data. pub connections: C, /// Active sessions data. pub sessions: Arc, // Messages processor. - pub message_processor: Arc, + pub message_processor: Arc, /// Link between servers set chnage session and the connections manager. - pub servers_set_change_creator_connector: Arc, + pub servers_set_change_creator_connector: Arc, } /// Create new network-backed cluster. @@ -206,7 +206,7 @@ pub fn new_network_cluster( connections: BTreeMap::new(), })); - let connection_trigger: Box = match net_config.auto_migrate_enabled { + let connection_trigger: Box = match net_config.auto_migrate_enabled { false => Box::new(SimpleConnectionTrigger::with_config(&config)), true if config.admin_public.is_none() => Box::new(ConnectionTriggerWithMigration::with_config(&config)), true => return Err(Error::Internal( @@ -264,9 +264,9 @@ pub fn new_test_cluster( impl ClusterCore { pub fn new( sessions: Arc, - message_processor: Arc, + message_processor: Arc, connections: C, - servers_set_change_creator_connector: Arc, + servers_set_change_creator_connector: Arc, config: ClusterConfiguration, ) -> Result, Error> { Ok(Arc::new(ClusterCore { @@ -282,7 +282,7 @@ impl ClusterCore { } /// Create new client interface. - pub fn client(&self) -> Arc { + pub fn client(&self) -> Arc { Arc::new(ClusterClientImpl::new(self.data.clone())) } @@ -293,7 +293,7 @@ impl ClusterCore { } #[cfg(test)] - pub fn view(&self) -> Result, Error> { + pub fn view(&self) -> Result, Error> { let connections = self.data.connections.provider(); let mut connected_nodes = connections.connected_nodes()?; let disconnected_nodes = connections.disconnected_nodes(); @@ -311,8 +311,8 @@ impl ClusterCore { impl ClusterView { pub fn new( - self_key_pair: Arc, - connections: Arc, + self_key_pair: Arc, + connections: Arc, nodes: BTreeSet, configured_nodes_count: usize ) -> Self { @@ -555,15 +555,15 @@ impl ClusterClient for ClusterClientImpl { }) } - fn add_generation_listener(&self, listener: Arc>) { + fn add_generation_listener(&self, listener: Arc>) { self.data.sessions.generation_sessions.add_listener(listener); } - fn add_decryption_listener(&self, listener: Arc>) { + fn add_decryption_listener(&self, listener: Arc>) { self.data.sessions.decryption_sessions.add_listener(listener); } - fn add_key_version_negotiation_listener(&self, listener: Arc>>) { + fn add_key_version_negotiation_listener(&self, listener: Arc>>) { self.data.sessions.negotiation_sessions.add_listener(listener); } @@ -597,10 +597,10 @@ pub struct ServersSetChangeParams { } pub fn new_servers_set_change_session( - self_key_pair: Arc, + self_key_pair: Arc, sessions: &ClusterSessions, - connections: Arc, - servers_set_change_creator_connector: Arc, + connections: Arc, + servers_set_change_creator_connector: Arc, params: ServersSetChangeParams, ) -> Result, Error> { let session_id = match params.session_id { @@ -757,9 +757,9 @@ pub mod tests { unimplemented!("test-only") } - fn add_generation_listener(&self, _listener: Arc>) {} - fn add_decryption_listener(&self, _listener: Arc>) {} - fn add_key_version_negotiation_listener(&self, _listener: Arc>>) {} + fn add_generation_listener(&self, _listener: Arc>) {} + fn add_decryption_listener(&self, _listener: Arc>) {} + fn add_key_version_negotiation_listener(&self, _listener: Arc>>) {} fn make_faulty_generation_sessions(&self) { unimplemented!("test-only") } fn generation_session(&self, _session_id: &SessionId) -> Option> { unimplemented!("test-only") } diff --git a/secret-store/src/key_server_cluster/cluster_connections.rs b/secret-store/src/key_server_cluster/cluster_connections.rs index b484e6d8e..bf8c24afd 100644 --- a/secret-store/src/key_server_cluster/cluster_connections.rs +++ b/secret-store/src/key_server_cluster/cluster_connections.rs @@ -37,7 +37,7 @@ pub trait Connection: Send + Sync { /// Connections manager. Responsible for keeping us connected to all required nodes. pub trait ConnectionManager: 'static + Send + Sync { /// Returns shared reference to connections provider. - fn provider(&self) -> Arc; + fn provider(&self) -> Arc; /// Try to reach all disconnected nodes immediately. This method is exposed mostly for /// tests, where all 'nodes' are starting listening for incoming connections first and /// only after this, they're actually start connecting to each other. @@ -55,7 +55,7 @@ pub trait ConnectionProvider: Send + Sync { /// Returns the set of currently disconnected nodes. fn disconnected_nodes(&self) -> BTreeSet; /// Returns the reference to the active node connection or None if the node is not connected. - fn connection(&self, node: &NodeId) -> Option>; + fn connection(&self, node: &NodeId) -> Option>; } #[cfg(test)] @@ -110,7 +110,7 @@ pub mod tests { } impl ConnectionManager for Arc { - fn provider(&self) -> Arc { + fn provider(&self) -> Arc { self.clone() } @@ -129,7 +129,7 @@ pub mod tests { self.disconnected_nodes.lock().clone() } - fn connection(&self, node: &NodeId) -> Option> { + fn connection(&self, node: &NodeId) -> Option> { match self.connected_nodes.lock().contains(node) { true => Some(Arc::new(TestConnection { from: self.node, diff --git a/secret-store/src/key_server_cluster/cluster_connections_net.rs b/secret-store/src/key_server_cluster/cluster_connections_net.rs index bda7f7dd2..75c0a36fa 100644 --- a/secret-store/src/key_server_cluster/cluster_connections_net.rs +++ b/secret-store/src/key_server_cluster/cluster_connections_net.rs @@ -38,7 +38,7 @@ use key_server_cluster::net::{accept_connection as io_accept_connection, connect as io_connect, Connection as IoConnection}; /// Empty future. -pub type BoxedEmptyFuture = Box + Send>; +pub type BoxedEmptyFuture = Box + Send>; /// Maintain interval (seconds). Every MAINTAIN_INTERVAL seconds node: /// 1) checks if connected nodes are responding to KeepAlive messages @@ -79,11 +79,11 @@ struct NetConnectionsData { /// Reference to tokio task executor. executor: Executor, /// Key pair of this node. - self_key_pair: Arc, + self_key_pair: Arc, /// Network messages processor. - message_processor: Arc, + message_processor: Arc, /// Connections trigger. - trigger: Mutex>, + trigger: Mutex>, /// Mutable connection data. container: Arc>, } @@ -121,8 +121,8 @@ impl NetConnectionsManager { /// Create new network connections manager. pub fn new( executor: Executor, - message_processor: Arc, - trigger: Box, + message_processor: Arc, + trigger: Box, container: Arc>, config: &ClusterConfiguration, net_config: NetConnectionsManagerConfig, @@ -153,7 +153,7 @@ impl NetConnectionsManager { } impl ConnectionManager for NetConnectionsManager { - fn provider(&self) -> Arc { + fn provider(&self) -> Arc { self.data.container.clone() } @@ -180,7 +180,7 @@ impl ConnectionProvider for RwLock { .collect() } - fn connection(&self, node: &NodeId) -> Option> { + fn connection(&self, node: &NodeId) -> Option> { match self.read().connections.get(node).cloned() { Some(connection) => Some(connection), None => None, @@ -302,7 +302,7 @@ impl NetConnectionsData { trace!(target: "secretstore_net", "{}: removing connection to {} at {}", self.self_key_pair.public(), node_id, entry.get().node_address()); entry.remove_entry(); - + true } else { false diff --git a/secret-store/src/key_server_cluster/cluster_message_processor.rs b/secret-store/src/key_server_cluster/cluster_message_processor.rs index 0624d50b1..d0ab712b6 100644 --- a/secret-store/src/key_server_cluster/cluster_message_processor.rs +++ b/secret-store/src/key_server_cluster/cluster_message_processor.rs @@ -32,7 +32,7 @@ pub trait MessageProcessor: Send + Sync { /// Process disconnect from the remote node. fn process_disconnect(&self, node: &NodeId); /// Process single message from the connection. - fn process_connection_message(&self, connection: Arc, message: Message); + fn process_connection_message(&self, connection: Arc, message: Message); /// Start servers set change session. This is typically used by ConnectionManager when /// it detects that auto-migration session needs to be started. @@ -49,19 +49,19 @@ pub trait MessageProcessor: Send + Sync { /// Bridge between ConnectionManager and ClusterSessions. pub struct SessionsMessageProcessor { - self_key_pair: Arc, - servers_set_change_creator_connector: Arc, + self_key_pair: Arc, + servers_set_change_creator_connector: Arc, sessions: Arc, - connections: Arc, + connections: Arc, } impl SessionsMessageProcessor { /// Create new instance of SessionsMessageProcessor. pub fn new( - self_key_pair: Arc, - servers_set_change_creator_connector: Arc, + self_key_pair: Arc, + servers_set_change_creator_connector: Arc, sessions: Arc, - connections: Arc, + connections: Arc, ) -> Self { SessionsMessageProcessor { self_key_pair, @@ -75,7 +75,7 @@ impl SessionsMessageProcessor { fn process_message>( &self, sessions: &ClusterSessionsContainer, - connection: Arc, + connection: Arc, mut message: Message, ) -> Option> where @@ -198,7 +198,7 @@ impl SessionsMessageProcessor { } /// Process single cluster message from the connection. - fn process_cluster_message(&self, connection: Arc, message: ClusterMessage) { + fn process_cluster_message(&self, connection: Arc, message: ClusterMessage) { match message { ClusterMessage::KeepAlive(_) => { let msg = Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse { @@ -220,7 +220,7 @@ impl MessageProcessor for SessionsMessageProcessor { self.sessions.on_connection_timeout(node); } - fn process_connection_message(&self, connection: Arc, message: Message) { + fn process_connection_message(&self, connection: Arc, message: Message) { trace!(target: "secretstore_net", "{}: received message {} from {}", self.self_key_pair.public(), message, connection.node_id()); diff --git a/secret-store/src/key_server_cluster/cluster_sessions.rs b/secret-store/src/key_server_cluster/cluster_sessions.rs index 888499202..3db72395d 100644 --- a/secret-store/src/key_server_cluster/cluster_sessions.rs +++ b/secret-store/src/key_server_cluster/cluster_sessions.rs @@ -188,7 +188,7 @@ pub struct ClusterSessionsContainer>>, /// Listeners. Lock order: sessions -> listeners. - listeners: Mutex>>>, + listeners: Mutex>>>, /// Sessions container state. container_state: Arc>, /// Do not actually remove sessions. @@ -200,7 +200,7 @@ pub struct QueuedSession { /// Session master. pub master: NodeId, /// Cluster view. - pub cluster_view: Arc, + pub cluster_view: Arc, /// Last keep alive time. pub last_keep_alive_time: Instant, /// Last received message time. @@ -224,7 +224,7 @@ pub enum ClusterSessionsContainerState { impl ClusterSessions { /// Create new cluster sessions container. - pub fn new(config: &ClusterConfiguration, servers_set_change_session_creator_connector: Arc) -> Self { + pub fn new(config: &ClusterConfiguration, servers_set_change_session_creator_connector: Arc) -> Self { let container_state = Arc::new(Mutex::new(ClusterSessionsContainerState::Idle)); let creator_core = Arc::new(SessionCreatorCore::new(config)); ClusterSessions { @@ -320,7 +320,7 @@ impl ClusterSessionsContainer where S: ClusterSession, SC: Cluster } } - pub fn add_listener(&self, listener: Arc>) { + pub fn add_listener(&self, listener: Arc>) { self.listeners.lock().push(Arc::downgrade(&listener)); } @@ -347,7 +347,7 @@ impl ClusterSessionsContainer where S: ClusterSession, SC: Cluster pub fn insert( &self, - cluster: Arc, + cluster: Arc, master: NodeId, session_id: S::Id, session_nonce: Option, @@ -439,7 +439,7 @@ impl ClusterSessionsContainer where S: ClusterSession, SC: Cluster } } - fn notify_listeners) -> ()>(&self, callback: F) { + fn notify_listeners) -> ()>(&self, callback: F) { let mut listeners = self.listeners.lock(); let mut listener_index = 0; while listener_index < listeners.len() { @@ -621,7 +621,7 @@ impl WaitableSession { } } - pub fn into_wait_future(self) -> Box + Send> { + pub fn into_wait_future(self) -> Box + Send> { Box::new(self.oneshot .map_err(|e| Error::Internal(e.to_string())) .and_then(|res| res)) @@ -647,7 +647,7 @@ impl CompletionSignal { } } -pub fn create_cluster_view(self_key_pair: Arc, connections: Arc, requires_all_connections: bool) -> Result, Error> { +pub fn create_cluster_view(self_key_pair: Arc, connections: Arc, requires_all_connections: bool) -> Result, Error> { let mut connected_nodes = connections.connected_nodes()?; let disconnected_nodes = connections.disconnected_nodes(); diff --git a/secret-store/src/key_server_cluster/cluster_sessions_creator.rs b/secret-store/src/key_server_cluster/cluster_sessions_creator.rs index 20d007d6f..d0559be48 100644 --- a/secret-store/src/key_server_cluster/cluster_sessions_creator.rs +++ b/secret-store/src/key_server_cluster/cluster_sessions_creator.rs @@ -56,7 +56,7 @@ pub trait ClusterSessionCreator { /// Create cluster session. fn create( &self, - cluster: Arc, + cluster: Arc, master: NodeId, nonce: Option, id: S::Id, @@ -74,9 +74,9 @@ pub struct SessionCreatorCore { /// Self node id. self_node_id: NodeId, /// Reference to key storage - key_storage: Arc, + key_storage: Arc, /// Reference to ACL storage - acl_storage: Arc, + acl_storage: Arc, /// Always-increasing sessions counter. Is used as session nonce to prevent replay attacks: /// 1) during handshake, KeyServers generate new random key to encrypt messages /// => there's no way to use messages from previous connections for replay attacks @@ -153,7 +153,7 @@ impl ClusterSessionCreator for GenerationSessionCreator { fn create( &self, - cluster: Arc, + cluster: Arc, master: NodeId, nonce: Option, id: SessionId, @@ -198,7 +198,7 @@ impl ClusterSessionCreator for EncryptionSessionCreator { fn create( &self, - cluster: Arc, + cluster: Arc, master: NodeId, nonce: Option, id: SessionId, @@ -248,7 +248,7 @@ impl ClusterSessionCreator for DecryptionSessionCreator { fn create( &self, - cluster: Arc, + cluster: Arc, master: NodeId, nonce: Option, id: SessionIdWithSubSession, @@ -305,7 +305,7 @@ impl ClusterSessionCreator for SchnorrSigningSessionC fn create( &self, - cluster: Arc, + cluster: Arc, master: NodeId, nonce: Option, id: SessionIdWithSubSession, @@ -359,7 +359,7 @@ impl ClusterSessionCreator for EcdsaSigningSessionCreat })) } - fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionIdWithSubSession, requester: Option) -> Result, Error> { + fn create(&self, cluster: Arc, master: NodeId, nonce: Option, id: SessionIdWithSubSession, requester: Option) -> Result, Error> { let encrypted_data = self.core.read_key_share(&id.id)?; let nonce = self.core.check_session_nonce(&master, nonce)?; let (session, oneshot) = EcdsaSigningSessionImpl::new(EcdsaSigningSessionParams { @@ -403,7 +403,7 @@ impl ClusterSessionCreator, + cluster: Arc, master: NodeId, nonce: Option, id: SessionIdWithSubSession, @@ -445,7 +445,7 @@ pub struct AdminSessionCreator { /// Administrator public. pub admin_public: Option, /// Servers set change sessions creator connector. - pub servers_set_change_session_creator_connector: Arc, + pub servers_set_change_session_creator_connector: Arc, } impl ClusterSessionCreator for AdminSessionCreator { @@ -476,7 +476,7 @@ impl ClusterSessionCreator for AdminSessionCreator { fn create( &self, - cluster: Arc, + cluster: Arc, master: NodeId, nonce: Option, id: SessionId, diff --git a/secret-store/src/key_server_cluster/connection_trigger.rs b/secret-store/src/key_server_cluster/connection_trigger.rs index 7b3649861..d8ad995a9 100644 --- a/secret-store/src/key_server_cluster/connection_trigger.rs +++ b/secret-store/src/key_server_cluster/connection_trigger.rs @@ -52,7 +52,7 @@ pub trait ConnectionTrigger: Send + Sync { /// Maintain active connections. fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer); /// Return connector for the servers set change session creator. - fn servers_set_change_creator_connector(&self) -> Arc; + fn servers_set_change_creator_connector(&self) -> Arc; } /// Servers set change session creator connector. @@ -67,11 +67,11 @@ pub trait ServersSetChangeSessionCreatorConnector: Send + Sync { /// Simple connection trigger, which only keeps connections to current_set. pub struct SimpleConnectionTrigger { /// Key server set cluster. - key_server_set: Arc, + key_server_set: Arc, /// Trigger connections. connections: TriggerConnections, /// Servers set change session creator connector. - connector: Arc, + connector: Arc, } /// Simple Servers set change session creator connector, which will just return @@ -93,7 +93,7 @@ pub enum ConnectionsAction { /// Trigger connections. pub struct TriggerConnections { /// This node key pair. - pub self_key_pair: Arc, + pub self_key_pair: Arc, } impl SimpleConnectionTrigger { @@ -103,7 +103,7 @@ impl SimpleConnectionTrigger { } /// Create new simple connection trigger. - pub fn new(key_server_set: Arc, self_key_pair: Arc, admin_public: Option) -> Self { + pub fn new(key_server_set: Arc, self_key_pair: Arc, admin_public: Option) -> Self { SimpleConnectionTrigger { key_server_set: key_server_set, connections: TriggerConnections { @@ -139,7 +139,7 @@ impl ConnectionTrigger for SimpleConnectionTrigger { self.connections.maintain(ConnectionsAction::ConnectToCurrentSet, connections, &self.key_server_set.snapshot()) } - fn servers_set_change_creator_connector(&self) -> Arc { + fn servers_set_change_creator_connector(&self) -> Arc { self.connector.clone() } } diff --git a/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs b/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs index b4dcfad63..00ea42571 100644 --- a/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs +++ b/secret-store/src/key_server_cluster/connection_trigger_with_migration.rs @@ -33,9 +33,9 @@ use {NodeKeyPair}; /// Key servers set change trigger with automated migration procedure. pub struct ConnectionTriggerWithMigration { /// This node key pair. - self_key_pair: Arc, + self_key_pair: Arc, /// Key server set. - key_server_set: Arc, + key_server_set: Arc, /// Last server set state. snapshot: KeyServerSetSnapshot, /// Required connections action. @@ -105,9 +105,9 @@ struct TriggerSession { /// Servers set change session creator connector. connector: Arc, /// This node key pair. - self_key_pair: Arc, + self_key_pair: Arc, /// Key server set. - key_server_set: Arc, + key_server_set: Arc, } impl ConnectionTriggerWithMigration { @@ -117,7 +117,7 @@ impl ConnectionTriggerWithMigration { } /// Create new trigge with migration. - pub fn new(key_server_set: Arc, self_key_pair: Arc) -> Self { + pub fn new(key_server_set: Arc, self_key_pair: Arc) -> Self { let snapshot = key_server_set.snapshot(); let migration = snapshot.migration.clone(); @@ -203,7 +203,7 @@ impl ConnectionTrigger for ConnectionTriggerWithMigration { } } - fn servers_set_change_creator_connector(&self) -> Arc { + fn servers_set_change_creator_connector(&self) -> Arc { self.session.connector.clone() } } diff --git a/secret-store/src/key_server_cluster/io/deadline.rs b/secret-store/src/key_server_cluster/io/deadline.rs index 7c0893257..80c3cc3e6 100644 --- a/secret-store/src/key_server_cluster/io/deadline.rs +++ b/secret-store/src/key_server_cluster/io/deadline.rs @@ -19,7 +19,7 @@ use std::time::Duration; use futures::{Future, Poll}; use tokio::timer::timeout::{Timeout, Error as TimeoutError}; -type DeadlineBox = Box = Box::Item>, Error = TimeoutError<::Error> > + Send>; diff --git a/secret-store/src/key_server_cluster/io/handshake.rs b/secret-store/src/key_server_cluster/io/handshake.rs index f378cba09..b266d8681 100644 --- a/secret-store/src/key_server_cluster/io/handshake.rs +++ b/secret-store/src/key_server_cluster/io/handshake.rs @@ -46,14 +46,14 @@ use key_server_cluster::io::{write_message, write_encrypted_message, WriteMessag read_message, read_encrypted_message, fix_shared_key}; /// Start handshake procedure with another node from the cluster. -pub fn handshake(a: A, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { +pub fn handshake(a: A, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { let init_data = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into) .and_then(|cp| Random.generate().map(|kp| (cp, kp)).map_err(Into::into)); handshake_with_init_data(a, init_data, self_key_pair, trusted_nodes) } /// Start handshake procedure with another node from the cluster and given plain confirmation + session key pair. -pub fn handshake_with_init_data(a: A, init_data: Result<(H256, KeyPair), Error>, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { +pub fn handshake_with_init_data(a: A, init_data: Result<(H256, KeyPair), Error>, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Handshake where A: AsyncWrite + AsyncRead { let handshake_input_data = init_data .and_then(|(cp, kp)| sign(kp.secret(), &cp).map(|sp| (cp, kp, sp)).map_err(Into::into)) .and_then(|(cp, kp, sp)| Handshake::::make_public_key_message(self_key_pair.public().clone(), cp.clone(), sp).map(|msg| (cp, kp, msg))); @@ -79,7 +79,7 @@ pub fn handshake_with_init_data(a: A, init_data: Result<(H256, KeyPair), Erro } /// Wait for handshake procedure to be started by another node from the cluster. -pub fn accept_handshake(a: A, self_key_pair: Arc) -> Handshake where A: AsyncWrite + AsyncRead { +pub fn accept_handshake(a: A, self_key_pair: Arc) -> Handshake where A: AsyncWrite + AsyncRead { let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); let handshake_input_data = self_confirmation_plain .and_then(|cp| Random.generate().map(|kp| (cp, kp)).map_err(Into::into)); @@ -118,7 +118,7 @@ pub struct Handshake { is_active: bool, error: Option<(A, Result)>, state: HandshakeState, - self_key_pair: Arc, + self_key_pair: Arc, self_session_key_pair: Option, self_confirmation_plain: H256, trusted_nodes: Option>, @@ -156,7 +156,7 @@ impl Handshake where A: AsyncRead + AsyncWrite { }))) } - fn make_private_key_signature_message(self_key_pair: &NodeKeyPair, confirmation_plain: &H256) -> Result { + fn make_private_key_signature_message(self_key_pair: &dyn NodeKeyPair, confirmation_plain: &H256) -> Result { Ok(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature { confirmation_signed: self_key_pair.sign(confirmation_plain)?.into(), }))) diff --git a/secret-store/src/key_server_cluster/jobs/key_access_job.rs b/secret-store/src/key_server_cluster/jobs/key_access_job.rs index 075d7320f..46cb00db2 100644 --- a/secret-store/src/key_server_cluster/jobs/key_access_job.rs +++ b/secret-store/src/key_server_cluster/jobs/key_access_job.rs @@ -26,13 +26,13 @@ pub struct KeyAccessJob { /// Has key share? has_key_share: bool, /// ACL storage. - acl_storage: Arc, + acl_storage: Arc, /// Requester data. requester: Option, } impl KeyAccessJob { - pub fn new_on_slave(id: SessionId, acl_storage: Arc) -> Self { + pub fn new_on_slave(id: SessionId, acl_storage: Arc) -> Self { KeyAccessJob { id: id, has_key_share: true, @@ -41,7 +41,7 @@ impl KeyAccessJob { } } - pub fn new_on_master(id: SessionId, acl_storage: Arc, requester: Requester) -> Self { + pub fn new_on_master(id: SessionId, acl_storage: Arc, requester: Requester) -> Self { KeyAccessJob { id: id, has_key_share: true, @@ -76,7 +76,7 @@ impl JobExecutor for KeyAccessJob { if !self.has_key_share { return Ok(JobPartialRequestAction::Reject(false)); } - + self.requester = Some(partial_request.clone()); self.acl_storage.check(partial_request.address(&self.id).map_err(Error::InsufficientRequesterData)?, &self.id) .map(|is_confirmed| if is_confirmed { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) }) diff --git a/secret-store/src/key_server_cluster/jobs/unknown_sessions_job.rs b/secret-store/src/key_server_cluster/jobs/unknown_sessions_job.rs index 33eca6583..0b11934fb 100644 --- a/secret-store/src/key_server_cluster/jobs/unknown_sessions_job.rs +++ b/secret-store/src/key_server_cluster/jobs/unknown_sessions_job.rs @@ -24,18 +24,18 @@ pub struct UnknownSessionsJob { /// Target node id. target_node_id: Option, /// Keys storage. - key_storage: Arc, + key_storage: Arc, } impl UnknownSessionsJob { - pub fn new_on_slave(key_storage: Arc) -> Self { + pub fn new_on_slave(key_storage: Arc) -> Self { UnknownSessionsJob { target_node_id: None, key_storage: key_storage, } } - pub fn new_on_master(key_storage: Arc, self_node_id: NodeId) -> Self { + pub fn new_on_master(key_storage: Arc, self_node_id: NodeId) -> Self { UnknownSessionsJob { target_node_id: Some(self_node_id), key_storage: key_storage, diff --git a/secret-store/src/key_server_cluster/net/accept_connection.rs b/secret-store/src/key_server_cluster/net/accept_connection.rs index 3b66fe1d7..88ee9b3d5 100644 --- a/secret-store/src/key_server_cluster/net/accept_connection.rs +++ b/secret-store/src/key_server_cluster/net/accept_connection.rs @@ -25,7 +25,7 @@ use key_server_cluster::io::{accept_handshake, Handshake, Deadline, deadline}; use key_server_cluster::net::Connection; /// Create future for accepting incoming connection. -pub fn accept_connection(stream: TcpStream, self_key_pair: Arc) -> Deadline { +pub fn accept_connection(stream: TcpStream, self_key_pair: Arc) -> Deadline { // TODO: This could fail so it would be better either to accept the // address as a separate argument or return a result. let address = stream.peer_addr().expect("Unable to determine tcp peer address"); diff --git a/secret-store/src/key_server_cluster/net/connect.rs b/secret-store/src/key_server_cluster/net/connect.rs index 3c2cbc269..c5be9be0e 100644 --- a/secret-store/src/key_server_cluster/net/connect.rs +++ b/secret-store/src/key_server_cluster/net/connect.rs @@ -26,7 +26,7 @@ use key_server_cluster::io::{handshake, Handshake, Deadline, deadline}; use key_server_cluster::net::Connection; /// Create future for connecting to other node. -pub fn connect(address: &SocketAddr, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Deadline { +pub fn connect(address: &SocketAddr, self_key_pair: Arc, trusted_nodes: BTreeSet) -> Deadline { let connect = Connect { state: ConnectState::TcpConnect(TcpStream::connect(address)), address: address.clone(), @@ -47,7 +47,7 @@ enum ConnectState { pub struct Connect { state: ConnectState, address: SocketAddr, - self_key_pair: Arc, + self_key_pair: Arc, trusted_nodes: BTreeSet, } diff --git a/secret-store/src/key_server_set.rs b/secret-store/src/key_server_set.rs index c69be8ce7..7aa1f8b10 100644 --- a/secret-store/src/key_server_set.rs +++ b/secret-store/src/key_server_set.rs @@ -121,11 +121,11 @@ struct CachedContract { /// Previous confirm migration transaction. confirm_migration_tx: Option, /// This node key pair. - self_key_pair: Arc, + self_key_pair: Arc, } impl OnChainKeyServerSet { - pub fn new(trusted_client: TrustedClient, contract_address_source: Option, self_key_pair: Arc, auto_migrate_enabled: bool, key_servers: BTreeMap) -> Result, Error> { + pub fn new(trusted_client: TrustedClient, contract_address_source: Option, self_key_pair: Arc, auto_migrate_enabled: bool, key_servers: BTreeMap) -> Result, Error> { let client = trusted_client.get_untrusted(); let key_server_set = Arc::new(OnChainKeyServerSet { contract: Mutex::new(CachedContract::new(trusted_client, contract_address_source, self_key_pair, auto_migrate_enabled, key_servers)?), @@ -232,7 +232,7 @@ impl ) -> Result, String>> KeyServerSubset for NewKeySe } impl CachedContract { - pub fn new(client: TrustedClient, contract_address_source: Option, self_key_pair: Arc, auto_migrate_enabled: bool, key_servers: BTreeMap) -> Result { + pub fn new(client: TrustedClient, contract_address_source: Option, self_key_pair: Arc, auto_migrate_enabled: bool, key_servers: BTreeMap) -> Result { let server_set = match contract_address_source.is_none() { true => key_servers.into_iter() .map(|(p, addr)| { @@ -471,7 +471,7 @@ impl CachedContract { key_servers } - fn update_number_of_confirmations_if_required(&mut self, client: &BlockChainClient) { + fn update_number_of_confirmations_if_required(&mut self, client: &dyn BlockChainClient) { if !self.auto_migrate_enabled { return; } @@ -574,11 +574,11 @@ fn update_last_transaction_block(client: &Client, migration_id: &H256, previous_ true } -fn latest_block_hash(client: &BlockChainClient) -> H256 { +fn latest_block_hash(client: &dyn BlockChainClient) -> H256 { client.block_hash(BlockId::Latest).unwrap_or_default() } -fn block_confirmations(client: &BlockChainClient, block: H256) -> Option { +fn block_confirmations(client: &dyn BlockChainClient, block: H256) -> Option { client.block_number(BlockId::Hash(block)) .and_then(|block| client.block_number(BlockId::Latest).map(|last_block| (block, last_block))) .map(|(block, last_block)| last_block - block) diff --git a/secret-store/src/key_storage.rs b/secret-store/src/key_storage.rs index 8ba36f708..d670417f9 100644 --- a/secret-store/src/key_storage.rs +++ b/secret-store/src/key_storage.rs @@ -72,17 +72,17 @@ pub trait KeyStorage: Send + Sync { /// Check if storage contains document encryption key fn contains(&self, document: &ServerKeyId) -> bool; /// Iterate through storage - fn iter<'a>(&'a self) -> Box + 'a>; + fn iter<'a>(&'a self) -> Box + 'a>; } /// Persistent document encryption keys storage pub struct PersistentKeyStorage { - db: Arc, + db: Arc, } /// Persistent document encryption keys storage iterator pub struct PersistentKeyStorageIterator<'a> { - iter: Box, Box<[u8]>)> + 'a>, + iter: Box, Box<[u8]>)> + 'a>, } /// V3 of encrypted key share, as it is stored by key storage on the single key server. @@ -115,7 +115,7 @@ struct SerializableDocumentKeyShareVersionV3 { impl PersistentKeyStorage { /// Create new persistent document encryption keys storage - pub fn new(db: Arc) -> Result { + pub fn new(db: Arc) -> Result { let db = upgrade_db(db)?; Ok(PersistentKeyStorage { @@ -124,7 +124,7 @@ impl PersistentKeyStorage { } } -fn upgrade_db(db: Arc) -> Result, Error> { +fn upgrade_db(db: Arc) -> Result, Error> { let version = db.get(None, DB_META_KEY_VERSION)?; let version = version.and_then(|v| v.get(0).cloned()); match version { @@ -185,7 +185,7 @@ impl KeyStorage for PersistentKeyStorage { .unwrap_or(false) } - fn iter<'a>(&'a self) -> Box + 'a> { + fn iter<'a>(&'a self) -> Box + 'a> { Box::new(PersistentKeyStorageIterator { iter: self.db.iter(None), }) @@ -336,7 +336,7 @@ pub mod tests { self.keys.read().contains_key(document) } - fn iter<'a>(&'a self) -> Box + 'a> { + fn iter<'a>(&'a self) -> Box + 'a> { Box::new(self.keys.read().clone().into_iter()) } } diff --git a/secret-store/src/lib.rs b/secret-store/src/lib.rs index faba91f39..a9d387fb6 100644 --- a/secret-store/src/lib.rs +++ b/secret-store/src/lib.rs @@ -91,11 +91,11 @@ pub use self::node_key_pair::PlainNodeKeyPair; pub use self::node_key_pair::KeyStoreNodeKeyPair; /// Start new key server instance -pub fn start(client: Arc, sync: Arc, miner: Arc, self_key_pair: Arc, mut config: ServiceConfiguration, - db: Arc, executor: Executor) -> Result, Error> +pub fn start(client: Arc, sync: Arc, miner: Arc, self_key_pair: Arc, mut config: ServiceConfiguration, + db: Arc, executor: Executor) -> Result, Error> { let trusted_client = trusted_client::TrustedClient::new(self_key_pair.clone(), client.clone(), sync, miner); - let acl_storage: Arc = match config.acl_check_contract_address.take() { + let acl_storage: Arc = match config.acl_check_contract_address.take() { Some(acl_check_contract_address) => acl_storage::OnChainAclStorage::new(trusted_client.clone(), acl_check_contract_address)?, None => Arc::new(acl_storage::DummyAclStorage::default()), }; @@ -106,7 +106,7 @@ pub fn start(client: Arc, sync: Arc, miner: Arc, se let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set.clone(), self_key_pair.clone(), acl_storage.clone(), key_storage.clone(), executor.clone())?); let cluster = key_server.cluster(); - let key_server: Arc = key_server; + let key_server: Arc = key_server; // prepare HTTP listener let http_listener = match config.listener_address { @@ -123,7 +123,7 @@ pub fn start(client: Arc, sync: Arc, miner: Arc, se address, self_key_pair.clone())); - let mut contracts: Vec> = Vec::new(); + let mut contracts: Vec> = Vec::new(); config.service_contract_address.map(|address| create_service_contract(address, listener::service_contract::SERVICE_CONTRACT_REGISTRY_NAME.to_owned(), @@ -150,7 +150,7 @@ pub fn start(client: Arc, sync: Arc, miner: Arc, se listener::ApiMask { document_key_shadow_retrieval_requests: true, ..Default::default() })) .map(|l| contracts.push(l)); - let contract: Option> = match contracts.len() { + let contract: Option> = match contracts.len() { 0 => None, 1 => Some(contracts.pop().expect("contract.len() is 1; qed")), _ => Some(Arc::new(listener::service_contract_aggregate::OnChainServiceContractAggregate::new(contracts))), diff --git a/secret-store/src/listener/http_listener.rs b/secret-store/src/listener/http_listener.rs index bdf813b40..93dbed7c4 100644 --- a/secret-store/src/listener/http_listener.rs +++ b/secret-store/src/listener/http_listener.rs @@ -89,13 +89,13 @@ struct KeyServerHttpHandler { /// Shared http handler struct KeyServerSharedHttpHandler { - key_server: Weak, + key_server: Weak, } impl KeyServerHttpListener { /// Start KeyServer http listener - pub fn start(listener_address: NodeAddress, cors_domains: Option>, key_server: Weak, executor: Executor) -> Result { + pub fn start(listener_address: NodeAddress, cors_domains: Option>, key_server: Weak, executor: Executor) -> Result { let shared_handler = Arc::new(KeyServerSharedHttpHandler { key_server: key_server, }); @@ -130,7 +130,7 @@ impl KeyServerHttpListener { } impl KeyServerHttpHandler { - fn key_server(&self) -> Result, Error> { + fn key_server(&self) -> Result, Error> { self.handler.key_server.upgrade() .ok_or_else(|| Error::Internal("KeyServer is already destroyed".into())) } @@ -142,7 +142,7 @@ impl KeyServerHttpHandler { path: &str, req_body: &[u8], cors: AllowCors, - ) -> Box, Error=hyper::Error> + Send> { + ) -> Box, Error=hyper::Error> + Send> { match parse_request(&req_method, &path, &req_body) { Request::GenerateServerKey(document, signature, threshold) => Box::new(result(self.key_server()) @@ -219,7 +219,7 @@ impl Service for KeyServerHttpHandler { type ReqBody = Body; type ResBody = Body; type Error = hyper::Error; - type Future = Box, Error=Self::Error> + Send>; + type Future = Box, Error=Self::Error> + Send>; fn call(&mut self, req: HttpRequest) -> Self::Future { let cors = cors::get_cors_allow_origin( @@ -462,7 +462,7 @@ mod tests { #[test] fn http_listener_successfully_drops() { - let key_server: Arc = Arc::new(DummyKeyServer::default()); + let key_server: Arc = Arc::new(DummyKeyServer::default()); let address = NodeAddress { address: "127.0.0.1".into(), port: 9000 }; let runtime = Runtime::with_thread_count(1); let listener = KeyServerHttpListener::start(address, None, Arc::downgrade(&key_server), diff --git a/secret-store/src/listener/mod.rs b/secret-store/src/listener/mod.rs index f260c28ed..50d09624b 100644 --- a/secret-store/src/listener/mod.rs +++ b/secret-store/src/listener/mod.rs @@ -42,7 +42,7 @@ pub struct ApiMask { /// Combined HTTP + service contract listener. pub struct Listener { - key_server: Arc, + key_server: Arc, _http: Option, _contract: Option>, } @@ -61,7 +61,7 @@ impl ApiMask { impl Listener { /// Create new listener. - pub fn new(key_server: Arc, http: Option, contract: Option>) -> Self { + pub fn new(key_server: Arc, http: Option, contract: Option>) -> Self { Self { key_server: key_server, _http: http, @@ -78,7 +78,7 @@ impl ServerKeyGenerator for Listener { key_id: ServerKeyId, author: Requester, threshold: usize, - ) -> Box + Send> { + ) -> Box + Send> { self.key_server.generate_key(key_id, author, threshold) } @@ -86,7 +86,7 @@ impl ServerKeyGenerator for Listener { &self, key_id: ServerKeyId, author: Requester, - ) -> Box + Send> { + ) -> Box + Send> { self.key_server.restore_key_public(key_id, author) } } @@ -98,7 +98,7 @@ impl DocumentKeyServer for Listener { author: Requester, common_point: Public, encrypted_document_key: Public, - ) -> Box + Send> { + ) -> Box + Send> { self.key_server.store_document_key(key_id, author, common_point, encrypted_document_key) } @@ -107,7 +107,7 @@ impl DocumentKeyServer for Listener { key_id: ServerKeyId, author: Requester, threshold: usize, - ) -> Box + Send> { + ) -> Box + Send> { self.key_server.generate_document_key(key_id, author, threshold) } @@ -115,7 +115,7 @@ impl DocumentKeyServer for Listener { &self, key_id: ServerKeyId, requester: Requester, - ) -> Box + Send> { + ) -> Box + Send> { self.key_server.restore_document_key(key_id, requester) } @@ -123,7 +123,7 @@ impl DocumentKeyServer for Listener { &self, key_id: ServerKeyId, requester: Requester, - ) -> Box + Send> { + ) -> Box + Send> { self.key_server.restore_document_key_shadow(key_id, requester) } } @@ -134,7 +134,7 @@ impl MessageSigner for Listener { key_id: ServerKeyId, requester: Requester, message: MessageHash, - ) -> Box + Send> { + ) -> Box + Send> { self.key_server.sign_message_schnorr(key_id, requester, message) } @@ -143,7 +143,7 @@ impl MessageSigner for Listener { key_id: ServerKeyId, requester: Requester, message: MessageHash, - ) -> Box + Send> { + ) -> Box + Send> { self.key_server.sign_message_ecdsa(key_id, requester, message) } } @@ -154,7 +154,7 @@ impl AdminSessionsServer for Listener { old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet, - ) -> Box + Send> { + ) -> Box + Send> { self.key_server.change_servers_set(old_set_signature, new_set_signature, new_servers_set) } } diff --git a/secret-store/src/listener/service_contract.rs b/secret-store/src/listener/service_contract.rs index 3d24ff798..580a6f382 100644 --- a/secret-store/src/listener/service_contract.rs +++ b/secret-store/src/listener/service_contract.rs @@ -70,9 +70,9 @@ pub trait ServiceContract: Send + Sync { /// Update contract when new blocks are enacted. Returns true if contract is installed && up-to-date (i.e. chain is synced). fn update(&self) -> bool; /// Read recent contract logs. Returns topics of every entry. - fn read_logs(&self) -> Box>; + fn read_logs(&self) -> Box>; /// Publish generated key. - fn read_pending_requests(&self) -> Box>; + fn read_pending_requests(&self) -> Box>; /// Publish generated server key. fn publish_generated_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public) -> Result<(), String>; /// Publish server key generation error. @@ -100,7 +100,7 @@ pub struct OnChainServiceContract { /// Blockchain client. client: TrustedClient, /// This node key pair. - self_key_pair: Arc, + self_key_pair: Arc, /// Contract registry name (if any). name: String, /// Contract address source. @@ -138,7 +138,7 @@ struct DocumentKeyShadowRetrievalService; impl OnChainServiceContract { /// Create new on-chain service contract. - pub fn new(mask: ApiMask, client: TrustedClient, name: String, address_source: ContractAddress, self_key_pair: Arc) -> Self { + pub fn new(mask: ApiMask, client: TrustedClient, name: String, address_source: ContractAddress, self_key_pair: Arc) -> Self { let contract = OnChainServiceContract { mask: mask, client: client, @@ -191,8 +191,8 @@ impl OnChainServiceContract { /// Create task-specific pending requests iterator. fn create_pending_requests_iterator< C: 'static + Fn(&Client, &Address, &BlockId) -> Result, - R: 'static + Fn(&NodeKeyPair, &Client, &Address, &BlockId, U256) -> Result<(bool, ServiceTask), String> - >(&self, client: Arc, contract_address: &Address, block: &BlockId, get_count: C, read_item: R) -> Box> { + R: 'static + Fn(&dyn NodeKeyPair, &Client, &Address, &BlockId, U256) -> Result<(bool, ServiceTask), String> + >(&self, client: Arc, contract_address: &Address, block: &BlockId, get_count: C, read_item: R) -> Box> { get_count(&*client, contract_address, block) .map(|count| { let client = client.clone(); @@ -209,7 +209,7 @@ impl OnChainServiceContract { .ok(), index: 0.into(), length: count, - }) as Box> + }) as Box> }) .map_err(|error| { warn!(target: "secretstore", "{}: creating pending requests iterator failed: {}", @@ -240,7 +240,7 @@ impl ServiceContract for OnChainServiceContract { self.update_contract_address() && self.client.get().is_some() } - fn read_logs(&self) -> Box> { + fn read_logs(&self) -> Box> { let client = match self.client.get() { Some(client) => client, None => { @@ -310,7 +310,7 @@ impl ServiceContract for OnChainServiceContract { }).collect::>().into_iter()) } - fn read_pending_requests(&self) -> Box> { + fn read_pending_requests(&self) -> Box> { let client = match self.client.get() { Some(client) => client, None => return Box::new(::std::iter::empty()), @@ -327,7 +327,7 @@ impl ServiceContract for OnChainServiceContract { let iter = match self.mask.server_key_generation_requests { true => Box::new(self.create_pending_requests_iterator(client.clone(), &contract_address, &block, &ServerKeyGenerationService::read_pending_requests_count, - &ServerKeyGenerationService::read_pending_request)) as Box>, + &ServerKeyGenerationService::read_pending_request)) as Box>, false => Box::new(::std::iter::empty()), }; let iter = match self.mask.server_key_retrieval_requests { @@ -484,7 +484,7 @@ impl ServerKeyGenerationService { } /// Read pending request. - fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> { + fn read_pending_request(self_key_pair: &dyn NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> { let self_address = public_to_address(self_key_pair.public()); let (encoded, decoder) = service::functions::get_server_key_generation_request::call(index); @@ -544,7 +544,7 @@ impl ServerKeyRetrievalService { } /// Read pending request. - fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> { + fn read_pending_request(self_key_pair: &dyn NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> { let self_address = public_to_address(self_key_pair.public()); let (encoded, decoder) = service::functions::get_server_key_retrieval_request::call(index); @@ -607,7 +607,7 @@ impl DocumentKeyStoreService { } /// Read pending request. - fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> { + fn read_pending_request(self_key_pair: &dyn NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> { let self_address = public_to_address(self_key_pair.public()); let (encoded, decoder) = service::functions::get_document_key_store_request::call(index); let (server_key_id, author, common_point, encrypted_point) = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?) @@ -687,7 +687,7 @@ impl DocumentKeyShadowRetrievalService { } /// Read pending request. - fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> { + fn read_pending_request(self_key_pair: &dyn NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> { let self_address = public_to_address(self_key_pair.public()); let (encoded, decoder) = service::functions::get_document_key_shadow_retrieval_request::call(index); @@ -781,11 +781,11 @@ pub mod tests { true } - fn read_logs(&self) -> Box> { + fn read_logs(&self) -> Box> { Box::new(self.logs.clone().into_iter()) } - fn read_pending_requests(&self) -> Box> { + fn read_pending_requests(&self) -> Box> { Box::new(self.pending_requests.clone().into_iter()) } diff --git a/secret-store/src/listener/service_contract_aggregate.rs b/secret-store/src/listener/service_contract_aggregate.rs index 29a4730e2..13e1a7968 100644 --- a/secret-store/src/listener/service_contract_aggregate.rs +++ b/secret-store/src/listener/service_contract_aggregate.rs @@ -25,12 +25,12 @@ use {ServerKeyId}; /// Aggregated on-chain service contract. pub struct OnChainServiceContractAggregate { /// All hosted service contracts. - contracts: Vec>, + contracts: Vec>, } impl OnChainServiceContractAggregate { /// Create new aggregated service contract listener. - pub fn new(contracts: Vec>) -> Self { + pub fn new(contracts: Vec>) -> Self { debug_assert!(contracts.len() > 1); OnChainServiceContractAggregate { contracts: contracts, @@ -47,15 +47,15 @@ impl ServiceContract for OnChainServiceContractAggregate { result } - fn read_logs(&self) -> Box> { + fn read_logs(&self) -> Box> { self.contracts.iter() - .fold(Box::new(::std::iter::empty()) as Box>, |i, c| + .fold(Box::new(::std::iter::empty()) as Box>, |i, c| Box::new(i.chain(c.read_logs()))) } - fn read_pending_requests(&self) -> Box> { + fn read_pending_requests(&self) -> Box> { self.contracts.iter() - .fold(Box::new(::std::iter::empty()) as Box>, |i, c| + .fold(Box::new(::std::iter::empty()) as Box>, |i, c| Box::new(i.chain(c.read_pending_requests()))) } diff --git a/secret-store/src/listener/service_contract_listener.rs b/secret-store/src/listener/service_contract_listener.rs index c0b306050..c5d540224 100644 --- a/secret-store/src/listener/service_contract_listener.rs +++ b/secret-store/src/listener/service_contract_listener.rs @@ -62,17 +62,17 @@ pub struct ServiceContractListener { /// Service contract listener parameters. pub struct ServiceContractListenerParams { /// Service contract. - pub contract: Arc, + pub contract: Arc, /// This node key pair. - pub self_key_pair: Arc, + pub self_key_pair: Arc, /// Key servers set. - pub key_server_set: Arc, + pub key_server_set: Arc, /// ACL storage reference. - pub acl_storage: Arc, + pub acl_storage: Arc, /// Cluster reference. - pub cluster: Arc, + pub cluster: Arc, /// Key storage reference. - pub key_storage: Arc, + pub key_storage: Arc, } /// Service contract listener data. @@ -84,17 +84,17 @@ struct ServiceContractListenerData { /// Service tasks queue. pub tasks_queue: Arc>, /// Service contract. - pub contract: Arc, + pub contract: Arc, /// ACL storage reference. - pub acl_storage: Arc, + pub acl_storage: Arc, /// Cluster client reference. - pub cluster: Arc, + pub cluster: Arc, /// This node key pair. - pub self_key_pair: Arc, + pub self_key_pair: Arc, /// Key servers set. - pub key_server_set: Arc, + pub key_server_set: Arc, /// Key storage reference. - pub key_storage: Arc, + pub key_storage: Arc, } @@ -561,7 +561,7 @@ fn log_service_task_result(task: &ServiceTask, self_id: &Public, result: Result< } /// Returns true when session, related to `server_key_id` must be started on `node`. -fn is_processed_by_this_key_server(key_server_set: &KeyServerSet, node: &NodeId, server_key_id: &H256) -> bool { +fn is_processed_by_this_key_server(key_server_set: &dyn KeyServerSet, node: &NodeId, server_key_id: &H256) -> bool { let servers = key_server_set.snapshot().current_set; let total_servers_count = servers.len(); match total_servers_count { @@ -613,7 +613,7 @@ mod tests { key_storage } - fn make_servers_set(is_isolated: bool) -> Arc { + fn make_servers_set(is_isolated: bool) -> Arc { Arc::new(MapKeyServerSet::new(is_isolated, vec![ ("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(), "127.0.0.1:8080".parse().unwrap()), @@ -624,7 +624,7 @@ mod tests { ].into_iter().collect())) } - fn make_service_contract_listener(contract: Option>, cluster: Option>, key_storage: Option>, acl_storage: Option>, servers_set: Option>) -> Arc { + fn make_service_contract_listener(contract: Option>, cluster: Option>, key_storage: Option>, acl_storage: Option>, servers_set: Option>) -> Arc { let contract = contract.unwrap_or_else(|| Arc::new(DummyServiceContract::default())); let cluster = cluster.unwrap_or_else(|| Arc::new(DummyClusterClient::default())); let key_storage = key_storage.unwrap_or_else(|| Arc::new(DummyKeyStorage::default())); diff --git a/secret-store/src/traits.rs b/secret-store/src/traits.rs index ed44e2503..149087d63 100644 --- a/secret-store/src/traits.rs +++ b/secret-store/src/traits.rs @@ -45,7 +45,7 @@ pub trait ServerKeyGenerator { key_id: ServerKeyId, author: Requester, threshold: usize, - ) -> Box + Send>; + ) -> Box + Send>; /// Retrieve public portion of previously generated SK. /// `key_id` is identifier of previously generated SK. /// `author` is the same author, that has created the server key. @@ -53,7 +53,7 @@ pub trait ServerKeyGenerator { &self, key_id: ServerKeyId, author: Requester, - ) -> Box + Send>; + ) -> Box + Send>; } /// Document key (DK) server. @@ -70,7 +70,7 @@ pub trait DocumentKeyServer: ServerKeyGenerator { author: Requester, common_point: Public, encrypted_document_key: Public, - ) -> Box + Send>; + ) -> Box + Send>; /// Generate and store both SK and DK. This is a shortcut for consequent calls of `generate_key` and `store_document_key`. /// The only difference is that DK is generated by DocumentKeyServer (which might be considered unsafe). /// `key_id` is the caller-provided identifier of generated SK. @@ -82,7 +82,7 @@ pub trait DocumentKeyServer: ServerKeyGenerator { key_id: ServerKeyId, author: Requester, threshold: usize, - ) -> Box + Send>; + ) -> Box + Send>; /// Restore previously stored DK. /// DK is decrypted on the key server (which might be considered unsafe), and then encrypted with caller public key. /// `key_id` is identifier of previously generated SK. @@ -92,7 +92,7 @@ pub trait DocumentKeyServer: ServerKeyGenerator { &self, key_id: ServerKeyId, requester: Requester, - ) -> Box + Send>; + ) -> Box + Send>; /// Restore previously stored DK. /// To decrypt DK on client: /// 1) use requestor secret key to decrypt secret coefficients from result.decrypt_shadows @@ -104,7 +104,7 @@ pub trait DocumentKeyServer: ServerKeyGenerator { &self, key_id: ServerKeyId, requester: Requester, - ) -> Box + Send>; + ) -> Box + Send>; } /// Message signer. @@ -119,7 +119,7 @@ pub trait MessageSigner: ServerKeyGenerator { key_id: ServerKeyId, requester: Requester, message: MessageHash, - ) -> Box + Send>; + ) -> Box + Send>; /// Generate ECDSA signature for message with previously generated SK. /// WARNING: only possible when SK was generated using t <= 2 * N. /// `key_id` is the caller-provided identifier of generated SK. @@ -131,7 +131,7 @@ pub trait MessageSigner: ServerKeyGenerator { key_id: ServerKeyId, signature: Requester, message: MessageHash, - ) -> Box + Send>; + ) -> Box + Send>; } /// Administrative sessions server. @@ -145,7 +145,7 @@ pub trait AdminSessionsServer { old_set_signature: RequestSignature, new_set_signature: RequestSignature, new_servers_set: BTreeSet, - ) -> Box + Send>; + ) -> Box + Send>; } /// Key server. diff --git a/secret-store/src/trusted_client.rs b/secret-store/src/trusted_client.rs index a058f0564..6811d1670 100644 --- a/secret-store/src/trusted_client.rs +++ b/secret-store/src/trusted_client.rs @@ -33,11 +33,11 @@ use {Error, NodeKeyPair, ContractAddress}; /// 'Trusted' client weak reference. pub struct TrustedClient { /// This key server node key pair. - self_key_pair: Arc, + self_key_pair: Arc, /// Blockchain client. client: Weak, /// Sync provider. - sync: Weak, + sync: Weak, /// Miner service. miner: Weak, } diff --git a/util/fetch/src/client.rs b/util/fetch/src/client.rs index 696b624ef..68aca5398 100644 --- a/util/fetch/src/client.rs +++ b/util/fetch/src/client.rs @@ -871,7 +871,7 @@ mod test { type ReqBody = hyper::Body; type ResBody = hyper::Body; type Error = Error; - type Future = Box, Error=Self::Error> + Send + 'static>; + type Future = Box, Error=Self::Error> + Send + 'static>; fn call(&mut self, req: hyper::Request) -> Self::Future { match req.uri().path() {