Fix deprecated trait objects without an explicit dyn (#11112)

This commit is contained in:
Juan Aguilar 2019-10-02 10:55:31 +02:00 committed by David
parent d243b15ae0
commit ad633de6d9
41 changed files with 264 additions and 264 deletions

View File

@ -163,7 +163,7 @@ fn main() {
}
}
fn key_dir(location: &str, password: Option<Password>) -> Result<Box<KeyDirectory>, Error> {
fn key_dir(location: &str, password: Option<Password>) -> Result<Box<dyn KeyDirectory>, Error> {
let dir: RootDiskDirectory = match location {
"geth" => RootDiskDirectory::create(dir::geth(false))?,
"geth-test" => RootDiskDirectory::create(dir::geth(true))?,

View File

@ -709,7 +709,7 @@ mod tests {
}
struct RootDiskDirectoryGuard {
pub key_dir: Option<Box<KeyDirectory>>,
pub key_dir: Option<Box<dyn KeyDirectory>>,
_path: TempDir,
}

View File

@ -115,7 +115,7 @@ impl fmt::Display for Fail {
}
pub fn construct(
ext: &mut vm::Ext,
ext: &mut dyn vm::Ext,
source: Vec<u8>,
arguments: Vec<u8>,
sender: H160,

View File

@ -133,7 +133,7 @@ mod tests {
verified_tx
}
fn should_replace(replace: &ShouldReplace<VerifiedTransaction>, old: VerifiedTransaction, new: VerifiedTransaction) -> Choice {
fn should_replace(replace: &dyn ShouldReplace<VerifiedTransaction>, old: VerifiedTransaction, new: VerifiedTransaction) -> Choice {
let old_tx = txpool::Transaction { insertion_id: 0, transaction: Arc::new(old) };
let new_tx = txpool::Transaction { insertion_id: 0, transaction: Arc::new(new) };
let old = ReplaceTransaction::new(&old_tx, Default::default());

View File

@ -243,7 +243,7 @@ pub unsafe extern fn parity_set_logger(
}
// WebSocket event loop
fn parity_ws_worker(client: &RunningClient, query: &str, callback: Arc<Callback>) -> *const c_void {
fn parity_ws_worker(client: &RunningClient, query: &str, callback: Arc<dyn Callback>) -> *const c_void {
let (tx, mut rx) = mpsc::channel(1);
let session = Arc::new(PubSubSession::new(tx));
let query_future = client.rpc_query(query, Some(session.clone()));
@ -274,7 +274,7 @@ fn parity_ws_worker(client: &RunningClient, query: &str, callback: Arc<Callback>
}
// RPC event loop that runs for at most `timeout_ms`
fn parity_rpc_worker(client: &RunningClient, query: &str, callback: Arc<Callback>, timeout_ms: u64) {
fn parity_rpc_worker(client: &RunningClient, query: &str, callback: Arc<dyn Callback>, timeout_ms: u64) {
let cb = callback.clone();
let query = client.rpc_query(query, None).map(move |response| {
let response = response.unwrap_or_else(|| error::EMPTY.to_string());

View File

@ -37,13 +37,13 @@ pub struct KeyServerImpl {
/// Secret store key server data.
pub struct KeyServerCore {
cluster: Arc<ClusterClient>,
cluster: Arc<dyn ClusterClient>,
}
impl KeyServerImpl {
/// Create new key server instance
pub fn new(config: &ClusterConfiguration, key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>,
acl_storage: Arc<AclStorage>, key_storage: Arc<KeyStorage>, executor: Executor) -> Result<Self, Error>
pub fn new(config: &ClusterConfiguration, key_server_set: Arc<dyn KeyServerSet>, self_key_pair: Arc<dyn NodeKeyPair>,
acl_storage: Arc<dyn AclStorage>, key_storage: Arc<dyn KeyStorage>, executor: Executor) -> Result<Self, Error>
{
Ok(KeyServerImpl {
data: Arc::new(Mutex::new(KeyServerCore::new(config, key_server_set, self_key_pair, acl_storage, key_storage, executor)?)),
@ -51,7 +51,7 @@ impl KeyServerImpl {
}
/// Get cluster client reference.
pub fn cluster(&self) -> Arc<ClusterClient> {
pub fn cluster(&self) -> Arc<dyn ClusterClient> {
self.data.lock().cluster.clone()
}
}
@ -64,7 +64,7 @@ impl AdminSessionsServer for KeyServerImpl {
old_set_signature: RequestSignature,
new_set_signature: RequestSignature,
new_servers_set: BTreeSet<NodeId>,
) -> Box<Future<Item=(), Error=Error> + Send> {
) -> Box<dyn Future<Item=(), Error=Error> + Send> {
return_session(self.data.lock().cluster
.new_servers_set_change_session(None, None, new_servers_set, old_set_signature, new_set_signature))
}
@ -76,7 +76,7 @@ impl ServerKeyGenerator for KeyServerImpl {
key_id: ServerKeyId,
author: Requester,
threshold: usize,
) -> Box<Future<Item=Public, Error=Error> + Send> {
) -> Box<dyn Future<Item=Public, Error=Error> + Send> {
// recover requestor' address key from signature
let address = author.address(&key_id).map_err(Error::InsufficientRequesterData);
@ -89,7 +89,7 @@ impl ServerKeyGenerator for KeyServerImpl {
&self,
key_id: ServerKeyId,
author: Requester,
) -> Box<Future<Item=Public, Error=Error> + Send> {
) -> Box<dyn Future<Item=Public, Error=Error> + Send> {
// recover requestor' public key from signature
let session_and_address = author
.address(&key_id)
@ -121,7 +121,7 @@ impl DocumentKeyServer for KeyServerImpl {
author: Requester,
common_point: Public,
encrypted_document_key: Public,
) -> Box<Future<Item=(), Error=Error> + Send> {
) -> Box<dyn Future<Item=(), Error=Error> + Send> {
// store encrypted key
return_session(self.data.lock().cluster.new_encryption_session(key_id,
author.clone(), common_point, encrypted_document_key))
@ -132,7 +132,7 @@ impl DocumentKeyServer for KeyServerImpl {
key_id: ServerKeyId,
author: Requester,
threshold: usize,
) -> Box<Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
// recover requestor' public key from signature
let public = result(author.public(&key_id).map_err(Error::InsufficientRequesterData));
@ -174,7 +174,7 @@ impl DocumentKeyServer for KeyServerImpl {
&self,
key_id: ServerKeyId,
requester: Requester,
) -> Box<Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
// recover requestor' public key from signature
let public = result(requester.public(&key_id).map_err(Error::InsufficientRequesterData));
@ -200,7 +200,7 @@ impl DocumentKeyServer for KeyServerImpl {
&self,
key_id: ServerKeyId,
requester: Requester,
) -> Box<Future<Item=EncryptedDocumentKeyShadow, Error=Error> + Send> {
) -> Box<dyn Future<Item=EncryptedDocumentKeyShadow, Error=Error> + Send> {
return_session(self.data.lock().cluster.new_decryption_session(key_id,
None, requester.clone(), None, true, false))
}
@ -212,7 +212,7 @@ impl MessageSigner for KeyServerImpl {
key_id: ServerKeyId,
requester: Requester,
message: MessageHash,
) -> Box<Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
// recover requestor' public key from signature
let public = result(requester.public(&key_id).map_err(Error::InsufficientRequesterData));
@ -246,7 +246,7 @@ impl MessageSigner for KeyServerImpl {
key_id: ServerKeyId,
requester: Requester,
message: MessageHash,
) -> Box<Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
// recover requestor' public key from signature
let public = result(requester.public(&key_id).map_err(Error::InsufficientRequesterData));
@ -269,8 +269,8 @@ impl MessageSigner for KeyServerImpl {
}
impl KeyServerCore {
pub fn new(config: &ClusterConfiguration, key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>,
acl_storage: Arc<AclStorage>, key_storage: Arc<KeyStorage>, executor: Executor) -> Result<Self, Error>
pub fn new(config: &ClusterConfiguration, key_server_set: Arc<dyn KeyServerSet>, self_key_pair: Arc<dyn NodeKeyPair>,
acl_storage: Arc<dyn AclStorage>, key_storage: Arc<dyn KeyStorage>, executor: Executor) -> Result<Self, Error>
{
let cconfig = NetClusterConfiguration {
self_key_pair: self_key_pair.clone(),
@ -298,7 +298,7 @@ impl KeyServerCore {
fn return_session<S: ClusterSession>(
session: Result<WaitableSession<S>, Error>,
) -> Box<Future<Item=S::SuccessfulResult, Error=Error> + Send> {
) -> Box<dyn Future<Item=S::SuccessfulResult, Error=Error> + Send> {
match session {
Ok(session) => Box::new(session.into_wait_future()),
Err(error) => Box::new(err(error))
@ -340,7 +340,7 @@ pub mod tests {
_old_set_signature: RequestSignature,
_new_set_signature: RequestSignature,
_new_servers_set: BTreeSet<NodeId>,
) -> Box<Future<Item=(), Error=Error> + Send> {
) -> Box<dyn Future<Item=(), Error=Error> + Send> {
unimplemented!("test-only")
}
}
@ -351,7 +351,7 @@ pub mod tests {
_key_id: ServerKeyId,
_author: Requester,
_threshold: usize,
) -> Box<Future<Item=Public, Error=Error> + Send> {
) -> Box<dyn Future<Item=Public, Error=Error> + Send> {
unimplemented!("test-only")
}
@ -359,7 +359,7 @@ pub mod tests {
&self,
_key_id: ServerKeyId,
_author: Requester,
) -> Box<Future<Item=Public, Error=Error> + Send> {
) -> Box<dyn Future<Item=Public, Error=Error> + Send> {
unimplemented!("test-only")
}
}
@ -371,7 +371,7 @@ pub mod tests {
_author: Requester,
_common_point: Public,
_encrypted_document_key: Public,
) -> Box<Future<Item=(), Error=Error> + Send> {
) -> Box<dyn Future<Item=(), Error=Error> + Send> {
unimplemented!("test-only")
}
@ -380,7 +380,7 @@ pub mod tests {
_key_id: ServerKeyId,
_author: Requester,
_threshold: usize,
) -> Box<Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
unimplemented!("test-only")
}
@ -388,7 +388,7 @@ pub mod tests {
&self,
_key_id: ServerKeyId,
_requester: Requester,
) -> Box<Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
unimplemented!("test-only")
}
@ -396,7 +396,7 @@ pub mod tests {
&self,
_key_id: ServerKeyId,
_requester: Requester,
) -> Box<Future<Item=EncryptedDocumentKeyShadow, Error=Error> + Send> {
) -> Box<dyn Future<Item=EncryptedDocumentKeyShadow, Error=Error> + Send> {
unimplemented!("test-only")
}
}
@ -407,7 +407,7 @@ pub mod tests {
_key_id: ServerKeyId,
_requester: Requester,
_message: MessageHash,
) -> Box<Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
unimplemented!("test-only")
}
@ -416,7 +416,7 @@ pub mod tests {
_key_id: ServerKeyId,
_requester: Requester,
_message: MessageHash,
) -> Box<Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
unimplemented!("test-only")
}
}

View File

@ -83,7 +83,7 @@ struct SessionCore<T: SessionTransport> {
/// Key share.
pub key_share: Option<DocumentKeyShare>,
/// Session result computer.
pub result_computer: Arc<SessionResultComputer>,
pub result_computer: Arc<dyn SessionResultComputer>,
/// Session transport.
pub transport: T,
/// Session nonce.
@ -119,7 +119,7 @@ pub struct SessionParams<T: SessionTransport> {
/// Key share.
pub key_share: Option<DocumentKeyShare>,
/// Session result computer.
pub result_computer: Arc<SessionResultComputer>,
pub result_computer: Arc<dyn SessionResultComputer>,
/// Session transport to communicate to other cluster nodes.
pub transport: T,
/// Session nonce.
@ -140,7 +140,7 @@ enum SessionState {
/// Isolated session transport.
pub struct IsolatedSessionTransport {
/// Cluster.
pub cluster: Arc<Cluster>,
pub cluster: Arc<dyn Cluster>,
/// Key id.
pub key_id: SessionId,
/// Sub session id.
@ -859,7 +859,7 @@ mod tests {
versions: vec![version_id.clone().into()]
})), Err(Error::InvalidMessage));
}
run_test(CommonKeyData {
threshold: 2,
author: Default::default(),

View File

@ -83,9 +83,9 @@ struct SessionCore {
/// Servers set change session meta (id is computed from new_nodes_set).
pub meta: ShareChangeSessionMeta,
/// Cluster which allows this node to send messages to other nodes in the cluster.
pub cluster: Arc<Cluster>,
pub cluster: Arc<dyn Cluster>,
/// Keys storage.
pub key_storage: Arc<KeyStorage>,
pub key_storage: Arc<dyn KeyStorage>,
/// Session-level nonce.
pub nonce: u64,
/// All known nodes.
@ -136,9 +136,9 @@ pub struct SessionParams {
/// Session meta (artificial).
pub meta: ShareChangeSessionMeta,
/// Cluster.
pub cluster: Arc<Cluster>,
pub cluster: Arc<dyn Cluster>,
/// Keys storage.
pub key_storage: Arc<KeyStorage>,
pub key_storage: Arc<dyn KeyStorage>,
/// Session nonce.
pub nonce: u64,
/// All known nodes.
@ -158,7 +158,7 @@ struct ServersSetChangeConsensusTransport {
/// Migration id (if part of auto-migration process).
migration_id: Option<H256>,
/// Cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
}
/// Unknown sessions job transport.
@ -168,7 +168,7 @@ struct UnknownSessionsJobTransport {
/// Session-level nonce.
nonce: u64,
/// Cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
}
/// Key version negotiation transport.
@ -178,7 +178,7 @@ struct ServersSetChangeKeyVersionNegotiationTransport {
/// Session-level nonce.
nonce: u64,
/// Cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
}
impl SessionImpl {
@ -292,7 +292,7 @@ impl SessionImpl {
self.on_session_error(sender, message.error.clone());
Ok(())
},
&ServersSetChangeMessage::ServersSetChangeCompleted(ref message) =>
&ServersSetChangeMessage::ServersSetChangeCompleted(ref message) =>
self.on_session_completed(sender, message),
}
}
@ -893,7 +893,7 @@ impl SessionImpl {
/// Complete servers set change session.
fn complete_session(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> {
debug_assert_eq!(core.meta.self_node_id, core.meta.master_node_id);
// send completion notification
core.cluster.broadcast(Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(ServersSetChangeCompleted {
session: core.meta.id.clone().into(),

View File

@ -28,7 +28,7 @@ pub struct SessionsQueue {
impl SessionsQueue {
/// Create new sessions queue.
pub fn new(key_storage: &Arc<KeyStorage>, unknown_sessions: BTreeSet<SessionId>) -> Self {
pub fn new(key_storage: &Arc<dyn KeyStorage>, unknown_sessions: BTreeSet<SessionId>) -> Self {
// TODO [Opt]:
// 1) known sessions - change to iter
// 2) unknown sesions - request chunk-by-chunk

View File

@ -69,7 +69,7 @@ struct SessionCore<T: SessionTransport> {
/// Session transport to communicate to other cluster nodes.
pub transport: T,
/// Key storage.
pub key_storage: Arc<KeyStorage>,
pub key_storage: Arc<dyn KeyStorage>,
/// Administrator public key.
pub admin_public: Option<Public>,
/// Session completion signal.
@ -131,7 +131,7 @@ pub struct SessionParams<T: SessionTransport> {
/// Session transport.
pub transport: T,
/// Key storage.
pub key_storage: Arc<KeyStorage>,
pub key_storage: Arc<dyn KeyStorage>,
/// Administrator public key.
pub admin_public: Option<Public>,
/// Session nonce.
@ -154,7 +154,7 @@ pub struct IsolatedSessionTransport {
/// Id numbers of all new nodes.
id_numbers: Option<BTreeMap<NodeId, Option<Secret>>>,
/// Cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
}
impl<T> SessionImpl<T> where T: SessionTransport {
@ -817,7 +817,7 @@ impl<T> ClusterSession for SessionImpl<T> where T: SessionTransport {
}
impl IsolatedSessionTransport {
pub fn new(session_id: SessionId, version: Option<H256>, nonce: u64, cluster: Arc<Cluster>) -> Self {
pub fn new(session_id: SessionId, version: Option<H256>, nonce: u64, cluster: Arc<dyn Cluster>) -> Self {
IsolatedSessionTransport {
session: session_id,
version: version,

View File

@ -43,9 +43,9 @@ pub struct ShareChangeSession {
/// Share change session meta.
meta: ShareChangeSessionMeta,
/// Cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
/// Key storage.
key_storage: Arc<KeyStorage>,
key_storage: Arc<dyn KeyStorage>,
/// Key version.
key_version: H256,
/// Nodes that have reported version ownership.
@ -82,9 +82,9 @@ pub struct ShareChangeSessionParams {
/// Share change session meta.
pub meta: ShareChangeSessionMeta,
/// Cluster.
pub cluster: Arc<Cluster>,
pub cluster: Arc<dyn Cluster>,
/// Keys storage.
pub key_storage: Arc<KeyStorage>,
pub key_storage: Arc<dyn KeyStorage>,
/// Session plan.
pub plan: ShareChangeSessionPlan,
}
@ -97,7 +97,7 @@ pub struct ShareChangeTransport {
/// Session nonce.
nonce: u64,
/// Cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
}
impl ShareChangeSession {
@ -201,7 +201,7 @@ impl ShareChangeSession {
}
impl ShareChangeTransport {
pub fn new(session_id: SessionId, nonce: u64, cluster: Arc<Cluster>) -> Self {
pub fn new(session_id: SessionId, nonce: u64, cluster: Arc<dyn Cluster>) -> Self {
ShareChangeTransport {
session_id: session_id,
nonce: nonce,

View File

@ -56,7 +56,7 @@ struct SessionCore {
/// Key share.
pub key_share: Option<DocumentKeyShare>,
/// Cluster which allows this node to send messages to other nodes in the cluster.
pub cluster: Arc<Cluster>,
pub cluster: Arc<dyn Cluster>,
/// Session-level nonce.
pub nonce: u64,
/// Session completion signal.
@ -98,9 +98,9 @@ pub struct SessionParams {
/// Key share.
pub key_share: Option<DocumentKeyShare>,
/// ACL storage.
pub acl_storage: Arc<AclStorage>,
pub acl_storage: Arc<dyn AclStorage>,
/// Cluster.
pub cluster: Arc<Cluster>,
pub cluster: Arc<dyn Cluster>,
/// Session nonce.
pub nonce: u64,
}
@ -118,7 +118,7 @@ struct DecryptionConsensusTransport {
/// Selected key version (on master node).
version: Option<H256>,
/// Cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
}
/// Decryption job transport
@ -134,7 +134,7 @@ struct DecryptionJobTransport {
/// Master node id.
master_node_id: NodeId,
/// Cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
}
/// Session delegation status.

View File

@ -44,9 +44,9 @@ pub struct SessionImpl {
/// Encrypted data.
encrypted_data: Option<DocumentKeyShare>,
/// Key storage.
key_storage: Arc<KeyStorage>,
key_storage: Arc<dyn KeyStorage>,
/// Cluster which allows this node to send messages to other nodes in the cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
/// Session nonce.
nonce: u64,
/// Session completion signal.
@ -64,9 +64,9 @@ pub struct SessionParams {
/// Encrypted data (result of running generation_session::SessionImpl).
pub encrypted_data: Option<DocumentKeyShare>,
/// Key storage.
pub key_storage: Arc<KeyStorage>,
pub key_storage: Arc<dyn KeyStorage>,
/// Cluster
pub cluster: Arc<Cluster>,
pub cluster: Arc<dyn Cluster>,
/// Session nonce.
pub nonce: u64,
}
@ -331,7 +331,7 @@ pub fn check_encrypted_data(key_share: Option<&DocumentKeyShare>) -> Result<(),
}
/// Update key share with encrypted document key.
pub fn update_encrypted_data(key_storage: &Arc<KeyStorage>, key_id: ServerKeyId, mut key_share: DocumentKeyShare, author: Address, common_point: Public, encrypted_point: Public) -> Result<(), Error> {
pub fn update_encrypted_data(key_storage: &Arc<dyn KeyStorage>, key_id: ServerKeyId, mut key_share: DocumentKeyShare, author: Address, common_point: Public, encrypted_point: Public) -> Result<(), Error> {
// author must be the same
if key_share.author != author {
return Err(Error::AccessDenied);

View File

@ -42,9 +42,9 @@ pub struct SessionImpl {
/// Public identifier of this node.
self_node_id: NodeId,
/// Key storage.
key_storage: Option<Arc<KeyStorage>>,
key_storage: Option<Arc<dyn KeyStorage>>,
/// Cluster which allows this node to send messages to other nodes in the cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
/// Session-level nonce.
nonce: u64,
/// Mutable session data.
@ -60,9 +60,9 @@ pub struct SessionParams {
/// Id of node, on which this session is running.
pub self_node_id: Public,
/// Key storage.
pub key_storage: Option<Arc<KeyStorage>>,
pub key_storage: Option<Arc<dyn KeyStorage>>,
/// Cluster
pub cluster: Arc<Cluster>,
pub cluster: Arc<dyn Cluster>,
/// Session nonce.
pub nonce: Option<u64>,
}

View File

@ -56,7 +56,7 @@ struct SessionCore {
/// Key share.
pub key_share: Option<DocumentKeyShare>,
/// Cluster which allows this node to send messages to other nodes in the cluster.
pub cluster: Arc<Cluster>,
pub cluster: Arc<dyn Cluster>,
/// Session-level nonce.
pub nonce: u64,
/// Session completion signal.
@ -112,9 +112,9 @@ pub struct SessionParams {
/// Key share.
pub key_share: Option<DocumentKeyShare>,
/// ACL storage.
pub acl_storage: Arc<AclStorage>,
pub acl_storage: Arc<dyn AclStorage>,
/// Cluster
pub cluster: Arc<Cluster>,
pub cluster: Arc<dyn Cluster>,
/// Session nonce.
pub nonce: u64,
}
@ -130,7 +130,7 @@ struct SigningConsensusTransport {
/// Selected key version (on master node).
version: Option<H256>,
/// Cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
}
/// Signing key generation transport.
@ -142,7 +142,7 @@ struct NonceGenerationTransport<F: Fn(SessionId, Secret, u64, GenerationMessage)
/// Session-level nonce.
nonce: u64,
/// Cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
/// Other nodes ids.
other_nodes_ids: BTreeSet<NodeId>,
/// Message mapping function.
@ -158,7 +158,7 @@ struct SigningJobTransport {
/// Session-level nonce.
nonce: u64,
/// Cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
}
/// Session delegation status.

View File

@ -57,7 +57,7 @@ struct SessionCore {
/// Key share.
pub key_share: Option<DocumentKeyShare>,
/// Cluster which allows this node to send messages to other nodes in the cluster.
pub cluster: Arc<Cluster>,
pub cluster: Arc<dyn Cluster>,
/// Session-level nonce.
pub nonce: u64,
/// SessionImpl completion signal.
@ -106,9 +106,9 @@ pub struct SessionParams {
/// Key share.
pub key_share: Option<DocumentKeyShare>,
/// ACL storage.
pub acl_storage: Arc<AclStorage>,
pub acl_storage: Arc<dyn AclStorage>,
/// Cluster
pub cluster: Arc<Cluster>,
pub cluster: Arc<dyn Cluster>,
/// Session nonce.
pub nonce: u64,
}
@ -124,7 +124,7 @@ struct SigningConsensusTransport {
/// Selected key version (on master node).
version: Option<H256>,
/// Cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
}
/// Signing key generation transport.
@ -132,7 +132,7 @@ struct SessionKeyGenerationTransport {
/// Session access key.
access_key: Secret,
/// Cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
/// Session-level nonce.
nonce: u64,
/// Other nodes ids.
@ -148,7 +148,7 @@ struct SigningJobTransport {
/// Session-level nonce.
nonce: u64,
/// Cluster.
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
}
/// Session delegation status.

View File

@ -104,11 +104,11 @@ pub trait ClusterClient: Send + Sync {
) -> Result<WaitableSession<AdminSession>, Error>;
/// Listen for new generation sessions.
fn add_generation_listener(&self, listener: Arc<ClusterSessionsListener<GenerationSession>>);
fn add_generation_listener(&self, listener: Arc<dyn ClusterSessionsListener<GenerationSession>>);
/// Listen for new decryption sessions.
fn add_decryption_listener(&self, listener: Arc<ClusterSessionsListener<DecryptionSession>>);
fn add_decryption_listener(&self, listener: Arc<dyn ClusterSessionsListener<DecryptionSession>>);
/// Listen for new key version negotiation sessions.
fn add_key_version_negotiation_listener(&self, listener: Arc<ClusterSessionsListener<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>>);
fn add_key_version_negotiation_listener(&self, listener: Arc<dyn ClusterSessionsListener<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>>);
/// Ask node to make 'faulty' generation sessions.
#[cfg(test)]
@ -143,13 +143,13 @@ pub trait Cluster: Send + Sync {
#[derive(Clone)]
pub struct ClusterConfiguration {
/// KeyPair this node holds.
pub self_key_pair: Arc<NodeKeyPair>,
pub self_key_pair: Arc<dyn NodeKeyPair>,
/// Cluster nodes set.
pub key_server_set: Arc<KeyServerSet>,
pub key_server_set: Arc<dyn KeyServerSet>,
/// Reference to key storage
pub key_storage: Arc<KeyStorage>,
pub key_storage: Arc<dyn KeyStorage>,
/// Reference to ACL storage
pub acl_storage: Arc<AclStorage>,
pub acl_storage: Arc<dyn AclStorage>,
/// Administrator public key.
pub admin_public: Option<Public>,
/// Do not remove sessions from container.
@ -172,8 +172,8 @@ pub struct ClusterClientImpl<C: ConnectionManager> {
pub struct ClusterView {
configured_nodes_count: usize,
connected_nodes: BTreeSet<NodeId>,
connections: Arc<ConnectionProvider>,
self_key_pair: Arc<NodeKeyPair>,
connections: Arc<dyn ConnectionProvider>,
self_key_pair: Arc<dyn NodeKeyPair>,
}
/// Cross-thread shareable cluster data.
@ -181,15 +181,15 @@ pub struct ClusterData<C: ConnectionManager> {
/// Cluster configuration.
pub config: ClusterConfiguration,
/// KeyPair this node holds.
pub self_key_pair: Arc<NodeKeyPair>,
pub self_key_pair: Arc<dyn NodeKeyPair>,
/// Connections data.
pub connections: C,
/// Active sessions data.
pub sessions: Arc<ClusterSessions>,
// Messages processor.
pub message_processor: Arc<MessageProcessor>,
pub message_processor: Arc<dyn MessageProcessor>,
/// Link between servers set chnage session and the connections manager.
pub servers_set_change_creator_connector: Arc<ServersSetChangeSessionCreatorConnector>,
pub servers_set_change_creator_connector: Arc<dyn ServersSetChangeSessionCreatorConnector>,
}
/// Create new network-backed cluster.
@ -206,7 +206,7 @@ pub fn new_network_cluster(
connections: BTreeMap::new(),
}));
let connection_trigger: Box<ConnectionTrigger> = match net_config.auto_migrate_enabled {
let connection_trigger: Box<dyn ConnectionTrigger> = match net_config.auto_migrate_enabled {
false => Box::new(SimpleConnectionTrigger::with_config(&config)),
true if config.admin_public.is_none() => Box::new(ConnectionTriggerWithMigration::with_config(&config)),
true => return Err(Error::Internal(
@ -264,9 +264,9 @@ pub fn new_test_cluster(
impl<C: ConnectionManager> ClusterCore<C> {
pub fn new(
sessions: Arc<ClusterSessions>,
message_processor: Arc<MessageProcessor>,
message_processor: Arc<dyn MessageProcessor>,
connections: C,
servers_set_change_creator_connector: Arc<ServersSetChangeSessionCreatorConnector>,
servers_set_change_creator_connector: Arc<dyn ServersSetChangeSessionCreatorConnector>,
config: ClusterConfiguration,
) -> Result<Arc<Self>, Error> {
Ok(Arc::new(ClusterCore {
@ -282,7 +282,7 @@ impl<C: ConnectionManager> ClusterCore<C> {
}
/// Create new client interface.
pub fn client(&self) -> Arc<ClusterClient> {
pub fn client(&self) -> Arc<dyn ClusterClient> {
Arc::new(ClusterClientImpl::new(self.data.clone()))
}
@ -293,7 +293,7 @@ impl<C: ConnectionManager> ClusterCore<C> {
}
#[cfg(test)]
pub fn view(&self) -> Result<Arc<Cluster>, Error> {
pub fn view(&self) -> Result<Arc<dyn Cluster>, Error> {
let connections = self.data.connections.provider();
let mut connected_nodes = connections.connected_nodes()?;
let disconnected_nodes = connections.disconnected_nodes();
@ -311,8 +311,8 @@ impl<C: ConnectionManager> ClusterCore<C> {
impl ClusterView {
pub fn new(
self_key_pair: Arc<NodeKeyPair>,
connections: Arc<ConnectionProvider>,
self_key_pair: Arc<dyn NodeKeyPair>,
connections: Arc<dyn ConnectionProvider>,
nodes: BTreeSet<NodeId>,
configured_nodes_count: usize
) -> Self {
@ -555,15 +555,15 @@ impl<C: ConnectionManager> ClusterClient for ClusterClientImpl<C> {
})
}
fn add_generation_listener(&self, listener: Arc<ClusterSessionsListener<GenerationSession>>) {
fn add_generation_listener(&self, listener: Arc<dyn ClusterSessionsListener<GenerationSession>>) {
self.data.sessions.generation_sessions.add_listener(listener);
}
fn add_decryption_listener(&self, listener: Arc<ClusterSessionsListener<DecryptionSession>>) {
fn add_decryption_listener(&self, listener: Arc<dyn ClusterSessionsListener<DecryptionSession>>) {
self.data.sessions.decryption_sessions.add_listener(listener);
}
fn add_key_version_negotiation_listener(&self, listener: Arc<ClusterSessionsListener<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>>) {
fn add_key_version_negotiation_listener(&self, listener: Arc<dyn ClusterSessionsListener<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>>) {
self.data.sessions.negotiation_sessions.add_listener(listener);
}
@ -597,10 +597,10 @@ pub struct ServersSetChangeParams {
}
pub fn new_servers_set_change_session(
self_key_pair: Arc<NodeKeyPair>,
self_key_pair: Arc<dyn NodeKeyPair>,
sessions: &ClusterSessions,
connections: Arc<ConnectionProvider>,
servers_set_change_creator_connector: Arc<ServersSetChangeSessionCreatorConnector>,
connections: Arc<dyn ConnectionProvider>,
servers_set_change_creator_connector: Arc<dyn ServersSetChangeSessionCreatorConnector>,
params: ServersSetChangeParams,
) -> Result<WaitableSession<AdminSession>, Error> {
let session_id = match params.session_id {
@ -757,9 +757,9 @@ pub mod tests {
unimplemented!("test-only")
}
fn add_generation_listener(&self, _listener: Arc<ClusterSessionsListener<GenerationSession>>) {}
fn add_decryption_listener(&self, _listener: Arc<ClusterSessionsListener<DecryptionSession>>) {}
fn add_key_version_negotiation_listener(&self, _listener: Arc<ClusterSessionsListener<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>>) {}
fn add_generation_listener(&self, _listener: Arc<dyn ClusterSessionsListener<GenerationSession>>) {}
fn add_decryption_listener(&self, _listener: Arc<dyn ClusterSessionsListener<DecryptionSession>>) {}
fn add_key_version_negotiation_listener(&self, _listener: Arc<dyn ClusterSessionsListener<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>>) {}
fn make_faulty_generation_sessions(&self) { unimplemented!("test-only") }
fn generation_session(&self, _session_id: &SessionId) -> Option<Arc<GenerationSession>> { unimplemented!("test-only") }

View File

@ -37,7 +37,7 @@ pub trait Connection: Send + Sync {
/// Connections manager. Responsible for keeping us connected to all required nodes.
pub trait ConnectionManager: 'static + Send + Sync {
/// Returns shared reference to connections provider.
fn provider(&self) -> Arc<ConnectionProvider>;
fn provider(&self) -> Arc<dyn ConnectionProvider>;
/// Try to reach all disconnected nodes immediately. This method is exposed mostly for
/// tests, where all 'nodes' are starting listening for incoming connections first and
/// only after this, they're actually start connecting to each other.
@ -55,7 +55,7 @@ pub trait ConnectionProvider: Send + Sync {
/// Returns the set of currently disconnected nodes.
fn disconnected_nodes(&self) -> BTreeSet<NodeId>;
/// Returns the reference to the active node connection or None if the node is not connected.
fn connection(&self, node: &NodeId) -> Option<Arc<Connection>>;
fn connection(&self, node: &NodeId) -> Option<Arc<dyn Connection>>;
}
#[cfg(test)]
@ -110,7 +110,7 @@ pub mod tests {
}
impl ConnectionManager for Arc<TestConnections> {
fn provider(&self) -> Arc<ConnectionProvider> {
fn provider(&self) -> Arc<dyn ConnectionProvider> {
self.clone()
}
@ -129,7 +129,7 @@ pub mod tests {
self.disconnected_nodes.lock().clone()
}
fn connection(&self, node: &NodeId) -> Option<Arc<Connection>> {
fn connection(&self, node: &NodeId) -> Option<Arc<dyn Connection>> {
match self.connected_nodes.lock().contains(node) {
true => Some(Arc::new(TestConnection {
from: self.node,

View File

@ -38,7 +38,7 @@ use key_server_cluster::net::{accept_connection as io_accept_connection,
connect as io_connect, Connection as IoConnection};
/// Empty future.
pub type BoxedEmptyFuture = Box<Future<Item = (), Error = ()> + Send>;
pub type BoxedEmptyFuture = Box<dyn Future<Item = (), Error = ()> + Send>;
/// Maintain interval (seconds). Every MAINTAIN_INTERVAL seconds node:
/// 1) checks if connected nodes are responding to KeepAlive messages
@ -79,11 +79,11 @@ struct NetConnectionsData {
/// Reference to tokio task executor.
executor: Executor,
/// Key pair of this node.
self_key_pair: Arc<NodeKeyPair>,
self_key_pair: Arc<dyn NodeKeyPair>,
/// Network messages processor.
message_processor: Arc<MessageProcessor>,
message_processor: Arc<dyn MessageProcessor>,
/// Connections trigger.
trigger: Mutex<Box<ConnectionTrigger>>,
trigger: Mutex<Box<dyn ConnectionTrigger>>,
/// Mutable connection data.
container: Arc<RwLock<NetConnectionsContainer>>,
}
@ -121,8 +121,8 @@ impl NetConnectionsManager {
/// Create new network connections manager.
pub fn new(
executor: Executor,
message_processor: Arc<MessageProcessor>,
trigger: Box<ConnectionTrigger>,
message_processor: Arc<dyn MessageProcessor>,
trigger: Box<dyn ConnectionTrigger>,
container: Arc<RwLock<NetConnectionsContainer>>,
config: &ClusterConfiguration,
net_config: NetConnectionsManagerConfig,
@ -153,7 +153,7 @@ impl NetConnectionsManager {
}
impl ConnectionManager for NetConnectionsManager {
fn provider(&self) -> Arc<ConnectionProvider> {
fn provider(&self) -> Arc<dyn ConnectionProvider> {
self.data.container.clone()
}
@ -180,7 +180,7 @@ impl ConnectionProvider for RwLock<NetConnectionsContainer> {
.collect()
}
fn connection(&self, node: &NodeId) -> Option<Arc<Connection>> {
fn connection(&self, node: &NodeId) -> Option<Arc<dyn Connection>> {
match self.read().connections.get(node).cloned() {
Some(connection) => Some(connection),
None => None,
@ -302,7 +302,7 @@ impl NetConnectionsData {
trace!(target: "secretstore_net", "{}: removing connection to {} at {}",
self.self_key_pair.public(), node_id, entry.get().node_address());
entry.remove_entry();
true
} else {
false

View File

@ -32,7 +32,7 @@ pub trait MessageProcessor: Send + Sync {
/// Process disconnect from the remote node.
fn process_disconnect(&self, node: &NodeId);
/// Process single message from the connection.
fn process_connection_message(&self, connection: Arc<Connection>, message: Message);
fn process_connection_message(&self, connection: Arc<dyn Connection>, message: Message);
/// Start servers set change session. This is typically used by ConnectionManager when
/// it detects that auto-migration session needs to be started.
@ -49,19 +49,19 @@ pub trait MessageProcessor: Send + Sync {
/// Bridge between ConnectionManager and ClusterSessions.
pub struct SessionsMessageProcessor {
self_key_pair: Arc<NodeKeyPair>,
servers_set_change_creator_connector: Arc<ServersSetChangeSessionCreatorConnector>,
self_key_pair: Arc<dyn NodeKeyPair>,
servers_set_change_creator_connector: Arc<dyn ServersSetChangeSessionCreatorConnector>,
sessions: Arc<ClusterSessions>,
connections: Arc<ConnectionProvider>,
connections: Arc<dyn ConnectionProvider>,
}
impl SessionsMessageProcessor {
/// Create new instance of SessionsMessageProcessor.
pub fn new(
self_key_pair: Arc<NodeKeyPair>,
servers_set_change_creator_connector: Arc<ServersSetChangeSessionCreatorConnector>,
self_key_pair: Arc<dyn NodeKeyPair>,
servers_set_change_creator_connector: Arc<dyn ServersSetChangeSessionCreatorConnector>,
sessions: Arc<ClusterSessions>,
connections: Arc<ConnectionProvider>,
connections: Arc<dyn ConnectionProvider>,
) -> Self {
SessionsMessageProcessor {
self_key_pair,
@ -75,7 +75,7 @@ impl SessionsMessageProcessor {
fn process_message<S: ClusterSession, SC: ClusterSessionCreator<S>>(
&self,
sessions: &ClusterSessionsContainer<S, SC>,
connection: Arc<Connection>,
connection: Arc<dyn Connection>,
mut message: Message,
) -> Option<Arc<S>>
where
@ -198,7 +198,7 @@ impl SessionsMessageProcessor {
}
/// Process single cluster message from the connection.
fn process_cluster_message(&self, connection: Arc<Connection>, message: ClusterMessage) {
fn process_cluster_message(&self, connection: Arc<dyn Connection>, message: ClusterMessage) {
match message {
ClusterMessage::KeepAlive(_) => {
let msg = Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse {
@ -220,7 +220,7 @@ impl MessageProcessor for SessionsMessageProcessor {
self.sessions.on_connection_timeout(node);
}
fn process_connection_message(&self, connection: Arc<Connection>, message: Message) {
fn process_connection_message(&self, connection: Arc<dyn Connection>, message: Message) {
trace!(target: "secretstore_net", "{}: received message {} from {}",
self.self_key_pair.public(), message, connection.node_id());

View File

@ -188,7 +188,7 @@ pub struct ClusterSessionsContainer<S: ClusterSession, SC: ClusterSessionCreator
/// Active sessions.
sessions: RwLock<BTreeMap<S::Id, QueuedSession<S>>>,
/// Listeners. Lock order: sessions -> listeners.
listeners: Mutex<Vec<Weak<ClusterSessionsListener<S>>>>,
listeners: Mutex<Vec<Weak<dyn ClusterSessionsListener<S>>>>,
/// Sessions container state.
container_state: Arc<Mutex<ClusterSessionsContainerState>>,
/// Do not actually remove sessions.
@ -200,7 +200,7 @@ pub struct QueuedSession<S> {
/// Session master.
pub master: NodeId,
/// Cluster view.
pub cluster_view: Arc<Cluster>,
pub cluster_view: Arc<dyn Cluster>,
/// Last keep alive time.
pub last_keep_alive_time: Instant,
/// Last received message time.
@ -224,7 +224,7 @@ pub enum ClusterSessionsContainerState {
impl ClusterSessions {
/// Create new cluster sessions container.
pub fn new(config: &ClusterConfiguration, servers_set_change_session_creator_connector: Arc<ServersSetChangeSessionCreatorConnector>) -> Self {
pub fn new(config: &ClusterConfiguration, servers_set_change_session_creator_connector: Arc<dyn ServersSetChangeSessionCreatorConnector>) -> Self {
let container_state = Arc::new(Mutex::new(ClusterSessionsContainerState::Idle));
let creator_core = Arc::new(SessionCreatorCore::new(config));
ClusterSessions {
@ -320,7 +320,7 @@ impl<S, SC> ClusterSessionsContainer<S, SC> where S: ClusterSession, SC: Cluster
}
}
pub fn add_listener(&self, listener: Arc<ClusterSessionsListener<S>>) {
pub fn add_listener(&self, listener: Arc<dyn ClusterSessionsListener<S>>) {
self.listeners.lock().push(Arc::downgrade(&listener));
}
@ -347,7 +347,7 @@ impl<S, SC> ClusterSessionsContainer<S, SC> where S: ClusterSession, SC: Cluster
pub fn insert(
&self,
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
master: NodeId,
session_id: S::Id,
session_nonce: Option<u64>,
@ -439,7 +439,7 @@ impl<S, SC> ClusterSessionsContainer<S, SC> where S: ClusterSession, SC: Cluster
}
}
fn notify_listeners<F: Fn(&ClusterSessionsListener<S>) -> ()>(&self, callback: F) {
fn notify_listeners<F: Fn(&dyn ClusterSessionsListener<S>) -> ()>(&self, callback: F) {
let mut listeners = self.listeners.lock();
let mut listener_index = 0;
while listener_index < listeners.len() {
@ -621,7 +621,7 @@ impl<S: ClusterSession> WaitableSession<S> {
}
}
pub fn into_wait_future(self) -> Box<Future<Item=S::SuccessfulResult, Error=Error> + Send> {
pub fn into_wait_future(self) -> Box<dyn Future<Item=S::SuccessfulResult, Error=Error> + Send> {
Box::new(self.oneshot
.map_err(|e| Error::Internal(e.to_string()))
.and_then(|res| res))
@ -647,7 +647,7 @@ impl<T> CompletionSignal<T> {
}
}
pub fn create_cluster_view(self_key_pair: Arc<NodeKeyPair>, connections: Arc<ConnectionProvider>, requires_all_connections: bool) -> Result<Arc<Cluster>, Error> {
pub fn create_cluster_view(self_key_pair: Arc<dyn NodeKeyPair>, connections: Arc<dyn ConnectionProvider>, requires_all_connections: bool) -> Result<Arc<dyn Cluster>, Error> {
let mut connected_nodes = connections.connected_nodes()?;
let disconnected_nodes = connections.disconnected_nodes();

View File

@ -56,7 +56,7 @@ pub trait ClusterSessionCreator<S: ClusterSession> {
/// Create cluster session.
fn create(
&self,
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
master: NodeId,
nonce: Option<u64>,
id: S::Id,
@ -74,9 +74,9 @@ pub struct SessionCreatorCore {
/// Self node id.
self_node_id: NodeId,
/// Reference to key storage
key_storage: Arc<KeyStorage>,
key_storage: Arc<dyn KeyStorage>,
/// Reference to ACL storage
acl_storage: Arc<AclStorage>,
acl_storage: Arc<dyn AclStorage>,
/// Always-increasing sessions counter. Is used as session nonce to prevent replay attacks:
/// 1) during handshake, KeyServers generate new random key to encrypt messages
/// => there's no way to use messages from previous connections for replay attacks
@ -153,7 +153,7 @@ impl ClusterSessionCreator<GenerationSessionImpl> for GenerationSessionCreator {
fn create(
&self,
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
master: NodeId,
nonce: Option<u64>,
id: SessionId,
@ -198,7 +198,7 @@ impl ClusterSessionCreator<EncryptionSessionImpl> for EncryptionSessionCreator {
fn create(
&self,
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
master: NodeId,
nonce: Option<u64>,
id: SessionId,
@ -248,7 +248,7 @@ impl ClusterSessionCreator<DecryptionSessionImpl> for DecryptionSessionCreator {
fn create(
&self,
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
master: NodeId,
nonce: Option<u64>,
id: SessionIdWithSubSession,
@ -305,7 +305,7 @@ impl ClusterSessionCreator<SchnorrSigningSessionImpl> for SchnorrSigningSessionC
fn create(
&self,
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
master: NodeId,
nonce: Option<u64>,
id: SessionIdWithSubSession,
@ -359,7 +359,7 @@ impl ClusterSessionCreator<EcdsaSigningSessionImpl> for EcdsaSigningSessionCreat
}))
}
fn create(&self, cluster: Arc<Cluster>, master: NodeId, nonce: Option<u64>, id: SessionIdWithSubSession, requester: Option<Requester>) -> Result<WaitableSession<EcdsaSigningSessionImpl>, Error> {
fn create(&self, cluster: Arc<dyn Cluster>, master: NodeId, nonce: Option<u64>, id: SessionIdWithSubSession, requester: Option<Requester>) -> Result<WaitableSession<EcdsaSigningSessionImpl>, Error> {
let encrypted_data = self.core.read_key_share(&id.id)?;
let nonce = self.core.check_session_nonce(&master, nonce)?;
let (session, oneshot) = EcdsaSigningSessionImpl::new(EcdsaSigningSessionParams {
@ -403,7 +403,7 @@ impl ClusterSessionCreator<KeyVersionNegotiationSessionImpl<VersionNegotiationTr
fn create(
&self,
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
master: NodeId,
nonce: Option<u64>,
id: SessionIdWithSubSession,
@ -445,7 +445,7 @@ pub struct AdminSessionCreator {
/// Administrator public.
pub admin_public: Option<Public>,
/// Servers set change sessions creator connector.
pub servers_set_change_session_creator_connector: Arc<ServersSetChangeSessionCreatorConnector>,
pub servers_set_change_session_creator_connector: Arc<dyn ServersSetChangeSessionCreatorConnector>,
}
impl ClusterSessionCreator<AdminSession> for AdminSessionCreator {
@ -476,7 +476,7 @@ impl ClusterSessionCreator<AdminSession> for AdminSessionCreator {
fn create(
&self,
cluster: Arc<Cluster>,
cluster: Arc<dyn Cluster>,
master: NodeId,
nonce: Option<u64>,
id: SessionId,

View File

@ -52,7 +52,7 @@ pub trait ConnectionTrigger: Send + Sync {
/// Maintain active connections.
fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer);
/// Return connector for the servers set change session creator.
fn servers_set_change_creator_connector(&self) -> Arc<ServersSetChangeSessionCreatorConnector>;
fn servers_set_change_creator_connector(&self) -> Arc<dyn ServersSetChangeSessionCreatorConnector>;
}
/// Servers set change session creator connector.
@ -67,11 +67,11 @@ pub trait ServersSetChangeSessionCreatorConnector: Send + Sync {
/// Simple connection trigger, which only keeps connections to current_set.
pub struct SimpleConnectionTrigger {
/// Key server set cluster.
key_server_set: Arc<KeyServerSet>,
key_server_set: Arc<dyn KeyServerSet>,
/// Trigger connections.
connections: TriggerConnections,
/// Servers set change session creator connector.
connector: Arc<ServersSetChangeSessionCreatorConnector>,
connector: Arc<dyn ServersSetChangeSessionCreatorConnector>,
}
/// Simple Servers set change session creator connector, which will just return
@ -93,7 +93,7 @@ pub enum ConnectionsAction {
/// Trigger connections.
pub struct TriggerConnections {
/// This node key pair.
pub self_key_pair: Arc<NodeKeyPair>,
pub self_key_pair: Arc<dyn NodeKeyPair>,
}
impl SimpleConnectionTrigger {
@ -103,7 +103,7 @@ impl SimpleConnectionTrigger {
}
/// Create new simple connection trigger.
pub fn new(key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>, admin_public: Option<Public>) -> Self {
pub fn new(key_server_set: Arc<dyn KeyServerSet>, self_key_pair: Arc<dyn NodeKeyPair>, admin_public: Option<Public>) -> Self {
SimpleConnectionTrigger {
key_server_set: key_server_set,
connections: TriggerConnections {
@ -139,7 +139,7 @@ impl ConnectionTrigger for SimpleConnectionTrigger {
self.connections.maintain(ConnectionsAction::ConnectToCurrentSet, connections, &self.key_server_set.snapshot())
}
fn servers_set_change_creator_connector(&self) -> Arc<ServersSetChangeSessionCreatorConnector> {
fn servers_set_change_creator_connector(&self) -> Arc<dyn ServersSetChangeSessionCreatorConnector> {
self.connector.clone()
}
}

View File

@ -33,9 +33,9 @@ use {NodeKeyPair};
/// Key servers set change trigger with automated migration procedure.
pub struct ConnectionTriggerWithMigration {
/// This node key pair.
self_key_pair: Arc<NodeKeyPair>,
self_key_pair: Arc<dyn NodeKeyPair>,
/// Key server set.
key_server_set: Arc<KeyServerSet>,
key_server_set: Arc<dyn KeyServerSet>,
/// Last server set state.
snapshot: KeyServerSetSnapshot,
/// Required connections action.
@ -105,9 +105,9 @@ struct TriggerSession {
/// Servers set change session creator connector.
connector: Arc<ServersSetChangeSessionCreatorConnectorWithMigration>,
/// This node key pair.
self_key_pair: Arc<NodeKeyPair>,
self_key_pair: Arc<dyn NodeKeyPair>,
/// Key server set.
key_server_set: Arc<KeyServerSet>,
key_server_set: Arc<dyn KeyServerSet>,
}
impl ConnectionTriggerWithMigration {
@ -117,7 +117,7 @@ impl ConnectionTriggerWithMigration {
}
/// Create new trigge with migration.
pub fn new(key_server_set: Arc<KeyServerSet>, self_key_pair: Arc<NodeKeyPair>) -> Self {
pub fn new(key_server_set: Arc<dyn KeyServerSet>, self_key_pair: Arc<dyn NodeKeyPair>) -> Self {
let snapshot = key_server_set.snapshot();
let migration = snapshot.migration.clone();
@ -203,7 +203,7 @@ impl ConnectionTrigger for ConnectionTriggerWithMigration {
}
}
fn servers_set_change_creator_connector(&self) -> Arc<ServersSetChangeSessionCreatorConnector> {
fn servers_set_change_creator_connector(&self) -> Arc<dyn ServersSetChangeSessionCreatorConnector> {
self.session.connector.clone()
}
}

View File

@ -19,7 +19,7 @@ use std::time::Duration;
use futures::{Future, Poll};
use tokio::timer::timeout::{Timeout, Error as TimeoutError};
type DeadlineBox<F> = Box<Future<
type DeadlineBox<F> = Box<dyn Future<
Item = DeadlineStatus<<F as Future>::Item>,
Error = TimeoutError<<F as Future>::Error>
> + Send>;

View File

@ -46,14 +46,14 @@ use key_server_cluster::io::{write_message, write_encrypted_message, WriteMessag
read_message, read_encrypted_message, fix_shared_key};
/// Start handshake procedure with another node from the cluster.
pub fn handshake<A>(a: A, self_key_pair: Arc<NodeKeyPair>, trusted_nodes: BTreeSet<NodeId>) -> Handshake<A> where A: AsyncWrite + AsyncRead {
pub fn handshake<A>(a: A, self_key_pair: Arc<dyn NodeKeyPair>, trusted_nodes: BTreeSet<NodeId>) -> Handshake<A> where A: AsyncWrite + AsyncRead {
let init_data = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into)
.and_then(|cp| Random.generate().map(|kp| (cp, kp)).map_err(Into::into));
handshake_with_init_data(a, init_data, self_key_pair, trusted_nodes)
}
/// Start handshake procedure with another node from the cluster and given plain confirmation + session key pair.
pub fn handshake_with_init_data<A>(a: A, init_data: Result<(H256, KeyPair), Error>, self_key_pair: Arc<NodeKeyPair>, trusted_nodes: BTreeSet<NodeId>) -> Handshake<A> where A: AsyncWrite + AsyncRead {
pub fn handshake_with_init_data<A>(a: A, init_data: Result<(H256, KeyPair), Error>, self_key_pair: Arc<dyn NodeKeyPair>, trusted_nodes: BTreeSet<NodeId>) -> Handshake<A> where A: AsyncWrite + AsyncRead {
let handshake_input_data = init_data
.and_then(|(cp, kp)| sign(kp.secret(), &cp).map(|sp| (cp, kp, sp)).map_err(Into::into))
.and_then(|(cp, kp, sp)| Handshake::<A>::make_public_key_message(self_key_pair.public().clone(), cp.clone(), sp).map(|msg| (cp, kp, msg)));
@ -79,7 +79,7 @@ pub fn handshake_with_init_data<A>(a: A, init_data: Result<(H256, KeyPair), Erro
}
/// Wait for handshake procedure to be started by another node from the cluster.
pub fn accept_handshake<A>(a: A, self_key_pair: Arc<NodeKeyPair>) -> Handshake<A> where A: AsyncWrite + AsyncRead {
pub fn accept_handshake<A>(a: A, self_key_pair: Arc<dyn NodeKeyPair>) -> Handshake<A> where A: AsyncWrite + AsyncRead {
let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into);
let handshake_input_data = self_confirmation_plain
.and_then(|cp| Random.generate().map(|kp| (cp, kp)).map_err(Into::into));
@ -118,7 +118,7 @@ pub struct Handshake<A> {
is_active: bool,
error: Option<(A, Result<HandshakeResult, Error>)>,
state: HandshakeState<A>,
self_key_pair: Arc<NodeKeyPair>,
self_key_pair: Arc<dyn NodeKeyPair>,
self_session_key_pair: Option<KeyPair>,
self_confirmation_plain: H256,
trusted_nodes: Option<BTreeSet<NodeId>>,
@ -156,7 +156,7 @@ impl<A> Handshake<A> where A: AsyncRead + AsyncWrite {
})))
}
fn make_private_key_signature_message(self_key_pair: &NodeKeyPair, confirmation_plain: &H256) -> Result<Message, Error> {
fn make_private_key_signature_message(self_key_pair: &dyn NodeKeyPair, confirmation_plain: &H256) -> Result<Message, Error> {
Ok(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature {
confirmation_signed: self_key_pair.sign(confirmation_plain)?.into(),
})))

View File

@ -26,13 +26,13 @@ pub struct KeyAccessJob {
/// Has key share?
has_key_share: bool,
/// ACL storage.
acl_storage: Arc<AclStorage>,
acl_storage: Arc<dyn AclStorage>,
/// Requester data.
requester: Option<Requester>,
}
impl KeyAccessJob {
pub fn new_on_slave(id: SessionId, acl_storage: Arc<AclStorage>) -> Self {
pub fn new_on_slave(id: SessionId, acl_storage: Arc<dyn AclStorage>) -> Self {
KeyAccessJob {
id: id,
has_key_share: true,
@ -41,7 +41,7 @@ impl KeyAccessJob {
}
}
pub fn new_on_master(id: SessionId, acl_storage: Arc<AclStorage>, requester: Requester) -> Self {
pub fn new_on_master(id: SessionId, acl_storage: Arc<dyn AclStorage>, requester: Requester) -> Self {
KeyAccessJob {
id: id,
has_key_share: true,
@ -76,7 +76,7 @@ impl JobExecutor for KeyAccessJob {
if !self.has_key_share {
return Ok(JobPartialRequestAction::Reject(false));
}
self.requester = Some(partial_request.clone());
self.acl_storage.check(partial_request.address(&self.id).map_err(Error::InsufficientRequesterData)?, &self.id)
.map(|is_confirmed| if is_confirmed { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) })

View File

@ -24,18 +24,18 @@ pub struct UnknownSessionsJob {
/// Target node id.
target_node_id: Option<NodeId>,
/// Keys storage.
key_storage: Arc<KeyStorage>,
key_storage: Arc<dyn KeyStorage>,
}
impl UnknownSessionsJob {
pub fn new_on_slave(key_storage: Arc<KeyStorage>) -> Self {
pub fn new_on_slave(key_storage: Arc<dyn KeyStorage>) -> Self {
UnknownSessionsJob {
target_node_id: None,
key_storage: key_storage,
}
}
pub fn new_on_master(key_storage: Arc<KeyStorage>, self_node_id: NodeId) -> Self {
pub fn new_on_master(key_storage: Arc<dyn KeyStorage>, self_node_id: NodeId) -> Self {
UnknownSessionsJob {
target_node_id: Some(self_node_id),
key_storage: key_storage,

View File

@ -25,7 +25,7 @@ use key_server_cluster::io::{accept_handshake, Handshake, Deadline, deadline};
use key_server_cluster::net::Connection;
/// Create future for accepting incoming connection.
pub fn accept_connection(stream: TcpStream, self_key_pair: Arc<NodeKeyPair>) -> Deadline<AcceptConnection> {
pub fn accept_connection(stream: TcpStream, self_key_pair: Arc<dyn NodeKeyPair>) -> Deadline<AcceptConnection> {
// TODO: This could fail so it would be better either to accept the
// address as a separate argument or return a result.
let address = stream.peer_addr().expect("Unable to determine tcp peer address");

View File

@ -26,7 +26,7 @@ use key_server_cluster::io::{handshake, Handshake, Deadline, deadline};
use key_server_cluster::net::Connection;
/// Create future for connecting to other node.
pub fn connect(address: &SocketAddr, self_key_pair: Arc<NodeKeyPair>, trusted_nodes: BTreeSet<NodeId>) -> Deadline<Connect> {
pub fn connect(address: &SocketAddr, self_key_pair: Arc<dyn NodeKeyPair>, trusted_nodes: BTreeSet<NodeId>) -> Deadline<Connect> {
let connect = Connect {
state: ConnectState::TcpConnect(TcpStream::connect(address)),
address: address.clone(),
@ -47,7 +47,7 @@ enum ConnectState {
pub struct Connect {
state: ConnectState,
address: SocketAddr,
self_key_pair: Arc<NodeKeyPair>,
self_key_pair: Arc<dyn NodeKeyPair>,
trusted_nodes: BTreeSet<NodeId>,
}

View File

@ -121,11 +121,11 @@ struct CachedContract {
/// Previous confirm migration transaction.
confirm_migration_tx: Option<PreviousMigrationTransaction>,
/// This node key pair.
self_key_pair: Arc<NodeKeyPair>,
self_key_pair: Arc<dyn NodeKeyPair>,
}
impl OnChainKeyServerSet {
pub fn new(trusted_client: TrustedClient, contract_address_source: Option<ContractAddress>, self_key_pair: Arc<NodeKeyPair>, auto_migrate_enabled: bool, key_servers: BTreeMap<Public, NodeAddress>) -> Result<Arc<Self>, Error> {
pub fn new(trusted_client: TrustedClient, contract_address_source: Option<ContractAddress>, self_key_pair: Arc<dyn NodeKeyPair>, auto_migrate_enabled: bool, key_servers: BTreeMap<Public, NodeAddress>) -> Result<Arc<Self>, Error> {
let client = trusted_client.get_untrusted();
let key_server_set = Arc::new(OnChainKeyServerSet {
contract: Mutex::new(CachedContract::new(trusted_client, contract_address_source, self_key_pair, auto_migrate_enabled, key_servers)?),
@ -232,7 +232,7 @@ impl <F: Fn(Vec<u8>) -> Result<Vec<u8>, String>> KeyServerSubset<F> for NewKeySe
}
impl CachedContract {
pub fn new(client: TrustedClient, contract_address_source: Option<ContractAddress>, self_key_pair: Arc<NodeKeyPair>, auto_migrate_enabled: bool, key_servers: BTreeMap<Public, NodeAddress>) -> Result<Self, Error> {
pub fn new(client: TrustedClient, contract_address_source: Option<ContractAddress>, self_key_pair: Arc<dyn NodeKeyPair>, auto_migrate_enabled: bool, key_servers: BTreeMap<Public, NodeAddress>) -> Result<Self, Error> {
let server_set = match contract_address_source.is_none() {
true => key_servers.into_iter()
.map(|(p, addr)| {
@ -471,7 +471,7 @@ impl CachedContract {
key_servers
}
fn update_number_of_confirmations_if_required(&mut self, client: &BlockChainClient) {
fn update_number_of_confirmations_if_required(&mut self, client: &dyn BlockChainClient) {
if !self.auto_migrate_enabled {
return;
}
@ -574,11 +574,11 @@ fn update_last_transaction_block(client: &Client, migration_id: &H256, previous_
true
}
fn latest_block_hash(client: &BlockChainClient) -> H256 {
fn latest_block_hash(client: &dyn BlockChainClient) -> H256 {
client.block_hash(BlockId::Latest).unwrap_or_default()
}
fn block_confirmations(client: &BlockChainClient, block: H256) -> Option<u64> {
fn block_confirmations(client: &dyn BlockChainClient, block: H256) -> Option<u64> {
client.block_number(BlockId::Hash(block))
.and_then(|block| client.block_number(BlockId::Latest).map(|last_block| (block, last_block)))
.map(|(block, last_block)| last_block - block)

View File

@ -72,17 +72,17 @@ pub trait KeyStorage: Send + Sync {
/// Check if storage contains document encryption key
fn contains(&self, document: &ServerKeyId) -> bool;
/// Iterate through storage
fn iter<'a>(&'a self) -> Box<Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a>;
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a>;
}
/// Persistent document encryption keys storage
pub struct PersistentKeyStorage {
db: Arc<KeyValueDB>,
db: Arc<dyn KeyValueDB>,
}
/// Persistent document encryption keys storage iterator
pub struct PersistentKeyStorageIterator<'a> {
iter: Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>,
iter: Box<dyn Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>,
}
/// V3 of encrypted key share, as it is stored by key storage on the single key server.
@ -115,7 +115,7 @@ struct SerializableDocumentKeyShareVersionV3 {
impl PersistentKeyStorage {
/// Create new persistent document encryption keys storage
pub fn new(db: Arc<KeyValueDB>) -> Result<Self, Error> {
pub fn new(db: Arc<dyn KeyValueDB>) -> Result<Self, Error> {
let db = upgrade_db(db)?;
Ok(PersistentKeyStorage {
@ -124,7 +124,7 @@ impl PersistentKeyStorage {
}
}
fn upgrade_db(db: Arc<KeyValueDB>) -> Result<Arc<KeyValueDB>, Error> {
fn upgrade_db(db: Arc<dyn KeyValueDB>) -> Result<Arc<dyn KeyValueDB>, Error> {
let version = db.get(None, DB_META_KEY_VERSION)?;
let version = version.and_then(|v| v.get(0).cloned());
match version {
@ -185,7 +185,7 @@ impl KeyStorage for PersistentKeyStorage {
.unwrap_or(false)
}
fn iter<'a>(&'a self) -> Box<Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a> {
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a> {
Box::new(PersistentKeyStorageIterator {
iter: self.db.iter(None),
})
@ -336,7 +336,7 @@ pub mod tests {
self.keys.read().contains_key(document)
}
fn iter<'a>(&'a self) -> Box<Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a> {
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a> {
Box::new(self.keys.read().clone().into_iter())
}
}

View File

@ -91,11 +91,11 @@ pub use self::node_key_pair::PlainNodeKeyPair;
pub use self::node_key_pair::KeyStoreNodeKeyPair;
/// Start new key server instance
pub fn start(client: Arc<Client>, sync: Arc<SyncProvider>, miner: Arc<Miner>, self_key_pair: Arc<NodeKeyPair>, mut config: ServiceConfiguration,
db: Arc<KeyValueDB>, executor: Executor) -> Result<Box<KeyServer>, Error>
pub fn start(client: Arc<Client>, sync: Arc<dyn SyncProvider>, miner: Arc<Miner>, self_key_pair: Arc<dyn NodeKeyPair>, mut config: ServiceConfiguration,
db: Arc<dyn KeyValueDB>, executor: Executor) -> Result<Box<dyn KeyServer>, Error>
{
let trusted_client = trusted_client::TrustedClient::new(self_key_pair.clone(), client.clone(), sync, miner);
let acl_storage: Arc<acl_storage::AclStorage> = match config.acl_check_contract_address.take() {
let acl_storage: Arc<dyn acl_storage::AclStorage> = match config.acl_check_contract_address.take() {
Some(acl_check_contract_address) => acl_storage::OnChainAclStorage::new(trusted_client.clone(), acl_check_contract_address)?,
None => Arc::new(acl_storage::DummyAclStorage::default()),
};
@ -106,7 +106,7 @@ pub fn start(client: Arc<Client>, sync: Arc<SyncProvider>, miner: Arc<Miner>, se
let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set.clone(), self_key_pair.clone(),
acl_storage.clone(), key_storage.clone(), executor.clone())?);
let cluster = key_server.cluster();
let key_server: Arc<KeyServer> = key_server;
let key_server: Arc<dyn KeyServer> = key_server;
// prepare HTTP listener
let http_listener = match config.listener_address {
@ -123,7 +123,7 @@ pub fn start(client: Arc<Client>, sync: Arc<SyncProvider>, miner: Arc<Miner>, se
address,
self_key_pair.clone()));
let mut contracts: Vec<Arc<listener::service_contract::ServiceContract>> = Vec::new();
let mut contracts: Vec<Arc<dyn listener::service_contract::ServiceContract>> = Vec::new();
config.service_contract_address.map(|address|
create_service_contract(address,
listener::service_contract::SERVICE_CONTRACT_REGISTRY_NAME.to_owned(),
@ -150,7 +150,7 @@ pub fn start(client: Arc<Client>, sync: Arc<SyncProvider>, miner: Arc<Miner>, se
listener::ApiMask { document_key_shadow_retrieval_requests: true, ..Default::default() }))
.map(|l| contracts.push(l));
let contract: Option<Arc<listener::service_contract::ServiceContract>> = match contracts.len() {
let contract: Option<Arc<dyn listener::service_contract::ServiceContract>> = match contracts.len() {
0 => None,
1 => Some(contracts.pop().expect("contract.len() is 1; qed")),
_ => Some(Arc::new(listener::service_contract_aggregate::OnChainServiceContractAggregate::new(contracts))),

View File

@ -89,13 +89,13 @@ struct KeyServerHttpHandler {
/// Shared http handler
struct KeyServerSharedHttpHandler {
key_server: Weak<KeyServer>,
key_server: Weak<dyn KeyServer>,
}
impl KeyServerHttpListener {
/// Start KeyServer http listener
pub fn start(listener_address: NodeAddress, cors_domains: Option<Vec<String>>, key_server: Weak<KeyServer>, executor: Executor) -> Result<Self, Error> {
pub fn start(listener_address: NodeAddress, cors_domains: Option<Vec<String>>, key_server: Weak<dyn KeyServer>, executor: Executor) -> Result<Self, Error> {
let shared_handler = Arc::new(KeyServerSharedHttpHandler {
key_server: key_server,
});
@ -130,7 +130,7 @@ impl KeyServerHttpListener {
}
impl KeyServerHttpHandler {
fn key_server(&self) -> Result<Arc<KeyServer>, Error> {
fn key_server(&self) -> Result<Arc<dyn KeyServer>, Error> {
self.handler.key_server.upgrade()
.ok_or_else(|| Error::Internal("KeyServer is already destroyed".into()))
}
@ -142,7 +142,7 @@ impl KeyServerHttpHandler {
path: &str,
req_body: &[u8],
cors: AllowCors<AccessControlAllowOrigin>,
) -> Box<Future<Item=HttpResponse<Body>, Error=hyper::Error> + Send> {
) -> Box<dyn Future<Item=HttpResponse<Body>, Error=hyper::Error> + Send> {
match parse_request(&req_method, &path, &req_body) {
Request::GenerateServerKey(document, signature, threshold) =>
Box::new(result(self.key_server())
@ -219,7 +219,7 @@ impl Service for KeyServerHttpHandler {
type ReqBody = Body;
type ResBody = Body;
type Error = hyper::Error;
type Future = Box<Future<Item = HttpResponse<Self::ResBody>, Error=Self::Error> + Send>;
type Future = Box<dyn Future<Item = HttpResponse<Self::ResBody>, Error=Self::Error> + Send>;
fn call(&mut self, req: HttpRequest<Body>) -> Self::Future {
let cors = cors::get_cors_allow_origin(
@ -462,7 +462,7 @@ mod tests {
#[test]
fn http_listener_successfully_drops() {
let key_server: Arc<KeyServer> = Arc::new(DummyKeyServer::default());
let key_server: Arc<dyn KeyServer> = Arc::new(DummyKeyServer::default());
let address = NodeAddress { address: "127.0.0.1".into(), port: 9000 };
let runtime = Runtime::with_thread_count(1);
let listener = KeyServerHttpListener::start(address, None, Arc::downgrade(&key_server),

View File

@ -42,7 +42,7 @@ pub struct ApiMask {
/// Combined HTTP + service contract listener.
pub struct Listener {
key_server: Arc<KeyServer>,
key_server: Arc<dyn KeyServer>,
_http: Option<http_listener::KeyServerHttpListener>,
_contract: Option<Arc<service_contract_listener::ServiceContractListener>>,
}
@ -61,7 +61,7 @@ impl ApiMask {
impl Listener {
/// Create new listener.
pub fn new(key_server: Arc<KeyServer>, http: Option<http_listener::KeyServerHttpListener>, contract: Option<Arc<service_contract_listener::ServiceContractListener>>) -> Self {
pub fn new(key_server: Arc<dyn KeyServer>, http: Option<http_listener::KeyServerHttpListener>, contract: Option<Arc<service_contract_listener::ServiceContractListener>>) -> Self {
Self {
key_server: key_server,
_http: http,
@ -78,7 +78,7 @@ impl ServerKeyGenerator for Listener {
key_id: ServerKeyId,
author: Requester,
threshold: usize,
) -> Box<Future<Item=Public, Error=Error> + Send> {
) -> Box<dyn Future<Item=Public, Error=Error> + Send> {
self.key_server.generate_key(key_id, author, threshold)
}
@ -86,7 +86,7 @@ impl ServerKeyGenerator for Listener {
&self,
key_id: ServerKeyId,
author: Requester,
) -> Box<Future<Item=Public, Error=Error> + Send> {
) -> Box<dyn Future<Item=Public, Error=Error> + Send> {
self.key_server.restore_key_public(key_id, author)
}
}
@ -98,7 +98,7 @@ impl DocumentKeyServer for Listener {
author: Requester,
common_point: Public,
encrypted_document_key: Public,
) -> Box<Future<Item=(), Error=Error> + Send> {
) -> Box<dyn Future<Item=(), Error=Error> + Send> {
self.key_server.store_document_key(key_id, author, common_point, encrypted_document_key)
}
@ -107,7 +107,7 @@ impl DocumentKeyServer for Listener {
key_id: ServerKeyId,
author: Requester,
threshold: usize,
) -> Box<Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
self.key_server.generate_document_key(key_id, author, threshold)
}
@ -115,7 +115,7 @@ impl DocumentKeyServer for Listener {
&self,
key_id: ServerKeyId,
requester: Requester,
) -> Box<Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
self.key_server.restore_document_key(key_id, requester)
}
@ -123,7 +123,7 @@ impl DocumentKeyServer for Listener {
&self,
key_id: ServerKeyId,
requester: Requester,
) -> Box<Future<Item=EncryptedDocumentKeyShadow, Error=Error> + Send> {
) -> Box<dyn Future<Item=EncryptedDocumentKeyShadow, Error=Error> + Send> {
self.key_server.restore_document_key_shadow(key_id, requester)
}
}
@ -134,7 +134,7 @@ impl MessageSigner for Listener {
key_id: ServerKeyId,
requester: Requester,
message: MessageHash,
) -> Box<Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
self.key_server.sign_message_schnorr(key_id, requester, message)
}
@ -143,7 +143,7 @@ impl MessageSigner for Listener {
key_id: ServerKeyId,
requester: Requester,
message: MessageHash,
) -> Box<Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
self.key_server.sign_message_ecdsa(key_id, requester, message)
}
}
@ -154,7 +154,7 @@ impl AdminSessionsServer for Listener {
old_set_signature: RequestSignature,
new_set_signature: RequestSignature,
new_servers_set: BTreeSet<NodeId>,
) -> Box<Future<Item=(), Error=Error> + Send> {
) -> Box<dyn Future<Item=(), Error=Error> + Send> {
self.key_server.change_servers_set(old_set_signature, new_set_signature, new_servers_set)
}
}

View File

@ -70,9 +70,9 @@ pub trait ServiceContract: Send + Sync {
/// Update contract when new blocks are enacted. Returns true if contract is installed && up-to-date (i.e. chain is synced).
fn update(&self) -> bool;
/// Read recent contract logs. Returns topics of every entry.
fn read_logs(&self) -> Box<Iterator<Item=ServiceTask>>;
fn read_logs(&self) -> Box<dyn Iterator<Item=ServiceTask>>;
/// Publish generated key.
fn read_pending_requests(&self) -> Box<Iterator<Item=(bool, ServiceTask)>>;
fn read_pending_requests(&self) -> Box<dyn Iterator<Item=(bool, ServiceTask)>>;
/// Publish generated server key.
fn publish_generated_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public) -> Result<(), String>;
/// Publish server key generation error.
@ -100,7 +100,7 @@ pub struct OnChainServiceContract {
/// Blockchain client.
client: TrustedClient,
/// This node key pair.
self_key_pair: Arc<NodeKeyPair>,
self_key_pair: Arc<dyn NodeKeyPair>,
/// Contract registry name (if any).
name: String,
/// Contract address source.
@ -138,7 +138,7 @@ struct DocumentKeyShadowRetrievalService;
impl OnChainServiceContract {
/// Create new on-chain service contract.
pub fn new(mask: ApiMask, client: TrustedClient, name: String, address_source: ContractAddress, self_key_pair: Arc<NodeKeyPair>) -> Self {
pub fn new(mask: ApiMask, client: TrustedClient, name: String, address_source: ContractAddress, self_key_pair: Arc<dyn NodeKeyPair>) -> Self {
let contract = OnChainServiceContract {
mask: mask,
client: client,
@ -191,8 +191,8 @@ impl OnChainServiceContract {
/// Create task-specific pending requests iterator.
fn create_pending_requests_iterator<
C: 'static + Fn(&Client, &Address, &BlockId) -> Result<U256, String>,
R: 'static + Fn(&NodeKeyPair, &Client, &Address, &BlockId, U256) -> Result<(bool, ServiceTask), String>
>(&self, client: Arc<Client>, contract_address: &Address, block: &BlockId, get_count: C, read_item: R) -> Box<Iterator<Item=(bool, ServiceTask)>> {
R: 'static + Fn(&dyn NodeKeyPair, &Client, &Address, &BlockId, U256) -> Result<(bool, ServiceTask), String>
>(&self, client: Arc<Client>, contract_address: &Address, block: &BlockId, get_count: C, read_item: R) -> Box<dyn Iterator<Item=(bool, ServiceTask)>> {
get_count(&*client, contract_address, block)
.map(|count| {
let client = client.clone();
@ -209,7 +209,7 @@ impl OnChainServiceContract {
.ok(),
index: 0.into(),
length: count,
}) as Box<Iterator<Item=(bool, ServiceTask)>>
}) as Box<dyn Iterator<Item=(bool, ServiceTask)>>
})
.map_err(|error| {
warn!(target: "secretstore", "{}: creating pending requests iterator failed: {}",
@ -240,7 +240,7 @@ impl ServiceContract for OnChainServiceContract {
self.update_contract_address() && self.client.get().is_some()
}
fn read_logs(&self) -> Box<Iterator<Item=ServiceTask>> {
fn read_logs(&self) -> Box<dyn Iterator<Item=ServiceTask>> {
let client = match self.client.get() {
Some(client) => client,
None => {
@ -310,7 +310,7 @@ impl ServiceContract for OnChainServiceContract {
}).collect::<Vec<_>>().into_iter())
}
fn read_pending_requests(&self) -> Box<Iterator<Item=(bool, ServiceTask)>> {
fn read_pending_requests(&self) -> Box<dyn Iterator<Item=(bool, ServiceTask)>> {
let client = match self.client.get() {
Some(client) => client,
None => return Box::new(::std::iter::empty()),
@ -327,7 +327,7 @@ impl ServiceContract for OnChainServiceContract {
let iter = match self.mask.server_key_generation_requests {
true => Box::new(self.create_pending_requests_iterator(client.clone(), &contract_address, &block,
&ServerKeyGenerationService::read_pending_requests_count,
&ServerKeyGenerationService::read_pending_request)) as Box<Iterator<Item=(bool, ServiceTask)>>,
&ServerKeyGenerationService::read_pending_request)) as Box<dyn Iterator<Item=(bool, ServiceTask)>>,
false => Box::new(::std::iter::empty()),
};
let iter = match self.mask.server_key_retrieval_requests {
@ -484,7 +484,7 @@ impl ServerKeyGenerationService {
}
/// Read pending request.
fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
fn read_pending_request(self_key_pair: &dyn NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
let self_address = public_to_address(self_key_pair.public());
let (encoded, decoder) = service::functions::get_server_key_generation_request::call(index);
@ -544,7 +544,7 @@ impl ServerKeyRetrievalService {
}
/// Read pending request.
fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
fn read_pending_request(self_key_pair: &dyn NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
let self_address = public_to_address(self_key_pair.public());
let (encoded, decoder) = service::functions::get_server_key_retrieval_request::call(index);
@ -607,7 +607,7 @@ impl DocumentKeyStoreService {
}
/// Read pending request.
fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
fn read_pending_request(self_key_pair: &dyn NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
let self_address = public_to_address(self_key_pair.public());
let (encoded, decoder) = service::functions::get_document_key_store_request::call(index);
let (server_key_id, author, common_point, encrypted_point) = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?)
@ -687,7 +687,7 @@ impl DocumentKeyShadowRetrievalService {
}
/// Read pending request.
fn read_pending_request(self_key_pair: &NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
fn read_pending_request(self_key_pair: &dyn NodeKeyPair, client: &Client, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
let self_address = public_to_address(self_key_pair.public());
let (encoded, decoder) = service::functions::get_document_key_shadow_retrieval_request::call(index);
@ -781,11 +781,11 @@ pub mod tests {
true
}
fn read_logs(&self) -> Box<Iterator<Item=ServiceTask>> {
fn read_logs(&self) -> Box<dyn Iterator<Item=ServiceTask>> {
Box::new(self.logs.clone().into_iter())
}
fn read_pending_requests(&self) -> Box<Iterator<Item=(bool, ServiceTask)>> {
fn read_pending_requests(&self) -> Box<dyn Iterator<Item=(bool, ServiceTask)>> {
Box::new(self.pending_requests.clone().into_iter())
}

View File

@ -25,12 +25,12 @@ use {ServerKeyId};
/// Aggregated on-chain service contract.
pub struct OnChainServiceContractAggregate {
/// All hosted service contracts.
contracts: Vec<Arc<ServiceContract>>,
contracts: Vec<Arc<dyn ServiceContract>>,
}
impl OnChainServiceContractAggregate {
/// Create new aggregated service contract listener.
pub fn new(contracts: Vec<Arc<ServiceContract>>) -> Self {
pub fn new(contracts: Vec<Arc<dyn ServiceContract>>) -> Self {
debug_assert!(contracts.len() > 1);
OnChainServiceContractAggregate {
contracts: contracts,
@ -47,15 +47,15 @@ impl ServiceContract for OnChainServiceContractAggregate {
result
}
fn read_logs(&self) -> Box<Iterator<Item=ServiceTask>> {
fn read_logs(&self) -> Box<dyn Iterator<Item=ServiceTask>> {
self.contracts.iter()
.fold(Box::new(::std::iter::empty()) as Box<Iterator<Item=ServiceTask>>, |i, c|
.fold(Box::new(::std::iter::empty()) as Box<dyn Iterator<Item=ServiceTask>>, |i, c|
Box::new(i.chain(c.read_logs())))
}
fn read_pending_requests(&self) -> Box<Iterator<Item=(bool, ServiceTask)>> {
fn read_pending_requests(&self) -> Box<dyn Iterator<Item=(bool, ServiceTask)>> {
self.contracts.iter()
.fold(Box::new(::std::iter::empty()) as Box<Iterator<Item=(bool, ServiceTask)>>, |i, c|
.fold(Box::new(::std::iter::empty()) as Box<dyn Iterator<Item=(bool, ServiceTask)>>, |i, c|
Box::new(i.chain(c.read_pending_requests())))
}

View File

@ -62,17 +62,17 @@ pub struct ServiceContractListener {
/// Service contract listener parameters.
pub struct ServiceContractListenerParams {
/// Service contract.
pub contract: Arc<ServiceContract>,
pub contract: Arc<dyn ServiceContract>,
/// This node key pair.
pub self_key_pair: Arc<NodeKeyPair>,
pub self_key_pair: Arc<dyn NodeKeyPair>,
/// Key servers set.
pub key_server_set: Arc<KeyServerSet>,
pub key_server_set: Arc<dyn KeyServerSet>,
/// ACL storage reference.
pub acl_storage: Arc<AclStorage>,
pub acl_storage: Arc<dyn AclStorage>,
/// Cluster reference.
pub cluster: Arc<ClusterClient>,
pub cluster: Arc<dyn ClusterClient>,
/// Key storage reference.
pub key_storage: Arc<KeyStorage>,
pub key_storage: Arc<dyn KeyStorage>,
}
/// Service contract listener data.
@ -84,17 +84,17 @@ struct ServiceContractListenerData {
/// Service tasks queue.
pub tasks_queue: Arc<TasksQueue<ServiceTask>>,
/// Service contract.
pub contract: Arc<ServiceContract>,
pub contract: Arc<dyn ServiceContract>,
/// ACL storage reference.
pub acl_storage: Arc<AclStorage>,
pub acl_storage: Arc<dyn AclStorage>,
/// Cluster client reference.
pub cluster: Arc<ClusterClient>,
pub cluster: Arc<dyn ClusterClient>,
/// This node key pair.
pub self_key_pair: Arc<NodeKeyPair>,
pub self_key_pair: Arc<dyn NodeKeyPair>,
/// Key servers set.
pub key_server_set: Arc<KeyServerSet>,
pub key_server_set: Arc<dyn KeyServerSet>,
/// Key storage reference.
pub key_storage: Arc<KeyStorage>,
pub key_storage: Arc<dyn KeyStorage>,
}
@ -561,7 +561,7 @@ fn log_service_task_result(task: &ServiceTask, self_id: &Public, result: Result<
}
/// Returns true when session, related to `server_key_id` must be started on `node`.
fn is_processed_by_this_key_server(key_server_set: &KeyServerSet, node: &NodeId, server_key_id: &H256) -> bool {
fn is_processed_by_this_key_server(key_server_set: &dyn KeyServerSet, node: &NodeId, server_key_id: &H256) -> bool {
let servers = key_server_set.snapshot().current_set;
let total_servers_count = servers.len();
match total_servers_count {
@ -613,7 +613,7 @@ mod tests {
key_storage
}
fn make_servers_set(is_isolated: bool) -> Arc<KeyServerSet> {
fn make_servers_set(is_isolated: bool) -> Arc<dyn KeyServerSet> {
Arc::new(MapKeyServerSet::new(is_isolated, vec![
("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8".parse().unwrap(),
"127.0.0.1:8080".parse().unwrap()),
@ -624,7 +624,7 @@ mod tests {
].into_iter().collect()))
}
fn make_service_contract_listener(contract: Option<Arc<ServiceContract>>, cluster: Option<Arc<DummyClusterClient>>, key_storage: Option<Arc<KeyStorage>>, acl_storage: Option<Arc<AclStorage>>, servers_set: Option<Arc<KeyServerSet>>) -> Arc<ServiceContractListener> {
fn make_service_contract_listener(contract: Option<Arc<dyn ServiceContract>>, cluster: Option<Arc<DummyClusterClient>>, key_storage: Option<Arc<dyn KeyStorage>>, acl_storage: Option<Arc<dyn AclStorage>>, servers_set: Option<Arc<dyn KeyServerSet>>) -> Arc<ServiceContractListener> {
let contract = contract.unwrap_or_else(|| Arc::new(DummyServiceContract::default()));
let cluster = cluster.unwrap_or_else(|| Arc::new(DummyClusterClient::default()));
let key_storage = key_storage.unwrap_or_else(|| Arc::new(DummyKeyStorage::default()));

View File

@ -45,7 +45,7 @@ pub trait ServerKeyGenerator {
key_id: ServerKeyId,
author: Requester,
threshold: usize,
) -> Box<Future<Item=Public, Error=Error> + Send>;
) -> Box<dyn Future<Item=Public, Error=Error> + Send>;
/// Retrieve public portion of previously generated SK.
/// `key_id` is identifier of previously generated SK.
/// `author` is the same author, that has created the server key.
@ -53,7 +53,7 @@ pub trait ServerKeyGenerator {
&self,
key_id: ServerKeyId,
author: Requester,
) -> Box<Future<Item=Public, Error=Error> + Send>;
) -> Box<dyn Future<Item=Public, Error=Error> + Send>;
}
/// Document key (DK) server.
@ -70,7 +70,7 @@ pub trait DocumentKeyServer: ServerKeyGenerator {
author: Requester,
common_point: Public,
encrypted_document_key: Public,
) -> Box<Future<Item=(), Error=Error> + Send>;
) -> Box<dyn Future<Item=(), Error=Error> + Send>;
/// Generate and store both SK and DK. This is a shortcut for consequent calls of `generate_key` and `store_document_key`.
/// The only difference is that DK is generated by DocumentKeyServer (which might be considered unsafe).
/// `key_id` is the caller-provided identifier of generated SK.
@ -82,7 +82,7 @@ pub trait DocumentKeyServer: ServerKeyGenerator {
key_id: ServerKeyId,
author: Requester,
threshold: usize,
) -> Box<Future<Item=EncryptedDocumentKey, Error=Error> + Send>;
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send>;
/// Restore previously stored DK.
/// DK is decrypted on the key server (which might be considered unsafe), and then encrypted with caller public key.
/// `key_id` is identifier of previously generated SK.
@ -92,7 +92,7 @@ pub trait DocumentKeyServer: ServerKeyGenerator {
&self,
key_id: ServerKeyId,
requester: Requester,
) -> Box<Future<Item=EncryptedDocumentKey, Error=Error> + Send>;
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send>;
/// Restore previously stored DK.
/// To decrypt DK on client:
/// 1) use requestor secret key to decrypt secret coefficients from result.decrypt_shadows
@ -104,7 +104,7 @@ pub trait DocumentKeyServer: ServerKeyGenerator {
&self,
key_id: ServerKeyId,
requester: Requester,
) -> Box<Future<Item=EncryptedDocumentKeyShadow, Error=Error> + Send>;
) -> Box<dyn Future<Item=EncryptedDocumentKeyShadow, Error=Error> + Send>;
}
/// Message signer.
@ -119,7 +119,7 @@ pub trait MessageSigner: ServerKeyGenerator {
key_id: ServerKeyId,
requester: Requester,
message: MessageHash,
) -> Box<Future<Item=EncryptedMessageSignature, Error=Error> + Send>;
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send>;
/// Generate ECDSA signature for message with previously generated SK.
/// WARNING: only possible when SK was generated using t <= 2 * N.
/// `key_id` is the caller-provided identifier of generated SK.
@ -131,7 +131,7 @@ pub trait MessageSigner: ServerKeyGenerator {
key_id: ServerKeyId,
signature: Requester,
message: MessageHash,
) -> Box<Future<Item=EncryptedMessageSignature, Error=Error> + Send>;
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send>;
}
/// Administrative sessions server.
@ -145,7 +145,7 @@ pub trait AdminSessionsServer {
old_set_signature: RequestSignature,
new_set_signature: RequestSignature,
new_servers_set: BTreeSet<NodeId>,
) -> Box<Future<Item=(), Error=Error> + Send>;
) -> Box<dyn Future<Item=(), Error=Error> + Send>;
}
/// Key server.

View File

@ -33,11 +33,11 @@ use {Error, NodeKeyPair, ContractAddress};
/// 'Trusted' client weak reference.
pub struct TrustedClient {
/// This key server node key pair.
self_key_pair: Arc<NodeKeyPair>,
self_key_pair: Arc<dyn NodeKeyPair>,
/// Blockchain client.
client: Weak<Client>,
/// Sync provider.
sync: Weak<SyncProvider>,
sync: Weak<dyn SyncProvider>,
/// Miner service.
miner: Weak<Miner>,
}

View File

@ -871,7 +871,7 @@ mod test {
type ReqBody = hyper::Body;
type ResBody = hyper::Body;
type Error = Error;
type Future = Box<Future<Item=hyper::Response<Self::ResBody>, Error=Self::Error> + Send + 'static>;
type Future = Box<dyn Future<Item=hyper::Response<Self::ResBody>, Error=Self::Error> + Send + 'static>;
fn call(&mut self, req: hyper::Request<hyper::Body>) -> Self::Future {
match req.uri().path() {