Switch usage of Secret Store to the external lib (#11487)
This commit is contained in:
parent
3357cfb3e5
commit
9477bae6dc
74
Cargo.lock
generated
74
Cargo.lock
generated
@ -1582,43 +1582,6 @@ dependencies = [
|
|||||||
"vm",
|
"vm",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "ethcore-secretstore"
|
|
||||||
version = "1.0.0"
|
|
||||||
dependencies = [
|
|
||||||
"byteorder",
|
|
||||||
"env_logger 0.5.13",
|
|
||||||
"ethabi",
|
|
||||||
"ethabi-contract",
|
|
||||||
"ethabi-derive",
|
|
||||||
"ethereum-types",
|
|
||||||
"ethkey",
|
|
||||||
"futures",
|
|
||||||
"hyper",
|
|
||||||
"jsonrpc-server-utils",
|
|
||||||
"keccak-hash",
|
|
||||||
"kvdb",
|
|
||||||
"kvdb-rocksdb",
|
|
||||||
"lazy_static",
|
|
||||||
"libsecp256k1",
|
|
||||||
"log",
|
|
||||||
"parity-bytes",
|
|
||||||
"parity-crypto",
|
|
||||||
"parity-runtime",
|
|
||||||
"parking_lot 0.10.0",
|
|
||||||
"percent-encoding 2.1.0",
|
|
||||||
"rustc-hex 1.0.0",
|
|
||||||
"serde",
|
|
||||||
"serde_derive",
|
|
||||||
"serde_json",
|
|
||||||
"tempdir",
|
|
||||||
"tiny-keccak 1.5.0",
|
|
||||||
"tokio",
|
|
||||||
"tokio-io",
|
|
||||||
"tokio-service",
|
|
||||||
"url 2.1.0",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethcore-service"
|
name = "ethcore-service"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@ -3303,7 +3266,6 @@ dependencies = [
|
|||||||
"ethcore-miner",
|
"ethcore-miner",
|
||||||
"ethcore-network",
|
"ethcore-network",
|
||||||
"ethcore-private-tx",
|
"ethcore-private-tx",
|
||||||
"ethcore-secretstore",
|
|
||||||
"ethcore-service",
|
"ethcore-service",
|
||||||
"ethcore-sync",
|
"ethcore-sync",
|
||||||
"ethereum-types",
|
"ethereum-types",
|
||||||
@ -3333,6 +3295,7 @@ dependencies = [
|
|||||||
"parity-path",
|
"parity-path",
|
||||||
"parity-rpc",
|
"parity-rpc",
|
||||||
"parity-runtime",
|
"parity-runtime",
|
||||||
|
"parity-secretstore",
|
||||||
"parity-updater",
|
"parity-updater",
|
||||||
"parity-util-mem",
|
"parity-util-mem",
|
||||||
"parity-version",
|
"parity-version",
|
||||||
@ -3533,6 +3496,41 @@ dependencies = [
|
|||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "parity-secretstore"
|
||||||
|
version = "1.0.0"
|
||||||
|
source = "git+https://github.com/paritytech/secret-store?rev=ebe751d#ebe751db6af07425d2e1823ac05a84d0fafe3dad"
|
||||||
|
dependencies = [
|
||||||
|
"byteorder",
|
||||||
|
"ethabi",
|
||||||
|
"ethabi-contract",
|
||||||
|
"ethabi-derive",
|
||||||
|
"ethereum-types",
|
||||||
|
"futures",
|
||||||
|
"hyper",
|
||||||
|
"jsonrpc-server-utils",
|
||||||
|
"keccak-hash",
|
||||||
|
"kvdb",
|
||||||
|
"kvdb-rocksdb",
|
||||||
|
"lazy_static",
|
||||||
|
"libsecp256k1",
|
||||||
|
"log",
|
||||||
|
"parity-bytes",
|
||||||
|
"parity-crypto",
|
||||||
|
"parity-runtime",
|
||||||
|
"parking_lot 0.10.0",
|
||||||
|
"percent-encoding 2.1.0",
|
||||||
|
"rustc-hex 1.0.0",
|
||||||
|
"serde",
|
||||||
|
"serde_derive",
|
||||||
|
"serde_json",
|
||||||
|
"tiny-keccak 1.5.0",
|
||||||
|
"tokio",
|
||||||
|
"tokio-io",
|
||||||
|
"tokio-service",
|
||||||
|
"url 2.1.0",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-snappy"
|
name = "parity-snappy"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
|
@ -30,7 +30,6 @@ ethcore-logger = { path = "parity/logger" }
|
|||||||
ethcore-miner = { path = "miner" }
|
ethcore-miner = { path = "miner" }
|
||||||
ethcore-network = { path = "util/network" }
|
ethcore-network = { path = "util/network" }
|
||||||
ethcore-private-tx = { path = "ethcore/private-tx" }
|
ethcore-private-tx = { path = "ethcore/private-tx" }
|
||||||
ethcore-secretstore = { path = "secret-store", optional = true }
|
|
||||||
ethcore-service = { path = "ethcore/service" }
|
ethcore-service = { path = "ethcore/service" }
|
||||||
ethcore-sync = { path = "ethcore/sync" }
|
ethcore-sync = { path = "ethcore/sync" }
|
||||||
ethereum-types = "0.8.0"
|
ethereum-types = "0.8.0"
|
||||||
@ -58,6 +57,7 @@ parity-local-store = { path = "miner/local-store" }
|
|||||||
parity-path = "0.1"
|
parity-path = "0.1"
|
||||||
parity-rpc = { path = "rpc" }
|
parity-rpc = { path = "rpc" }
|
||||||
parity-runtime = "0.1.1"
|
parity-runtime = "0.1.1"
|
||||||
|
parity-secretstore = { git = "https://github.com/paritytech/secret-store", rev = "ebe751d", optional = true }
|
||||||
parity-updater = { path = "updater" }
|
parity-updater = { path = "updater" }
|
||||||
parity-util-mem = { version = "0.5.1", features = ["jemalloc-global"] }
|
parity-util-mem = { version = "0.5.1", features = ["jemalloc-global"] }
|
||||||
parity-version = { path = "util/version" }
|
parity-version = { path = "util/version" }
|
||||||
@ -99,7 +99,7 @@ test-heavy = ["ethcore/test-heavy"]
|
|||||||
evm-debug = ["ethcore/evm-debug"]
|
evm-debug = ["ethcore/evm-debug"]
|
||||||
evm-debug-tests = ["ethcore/evm-debug-tests"]
|
evm-debug-tests = ["ethcore/evm-debug-tests"]
|
||||||
slow-blocks = ["ethcore/slow-blocks"]
|
slow-blocks = ["ethcore/slow-blocks"]
|
||||||
secretstore = ["ethcore-secretstore", "accounts", "ethabi", "ethcore-call-contract"]
|
secretstore = ["parity-secretstore", "accounts", "ethabi", "ethcore-call-contract"]
|
||||||
final = ["parity-version/final"]
|
final = ["parity-version/final"]
|
||||||
deadlock_detection = ["parking_lot/deadlock_detection"]
|
deadlock_detection = ["parking_lot/deadlock_detection"]
|
||||||
# to create a memory profile (requires nightly rust), use e.g.
|
# to create a memory profile (requires nightly rust), use e.g.
|
||||||
|
@ -84,7 +84,7 @@ extern crate log as rlog;
|
|||||||
extern crate ethcore_accounts as accounts;
|
extern crate ethcore_accounts as accounts;
|
||||||
|
|
||||||
#[cfg(feature = "secretstore")]
|
#[cfg(feature = "secretstore")]
|
||||||
extern crate ethcore_secretstore;
|
extern crate parity_secretstore;
|
||||||
|
|
||||||
#[cfg(feature = "secretstore")]
|
#[cfg(feature = "secretstore")]
|
||||||
extern crate ethabi;
|
extern crate ethabi;
|
||||||
|
@ -37,7 +37,7 @@ use ethcore::miner::{Miner, MinerService};
|
|||||||
use parity_crypto::publickey::Error as EthKeyError;
|
use parity_crypto::publickey::Error as EthKeyError;
|
||||||
use sync::SyncProvider;
|
use sync::SyncProvider;
|
||||||
use registrar::RegistrarClient;
|
use registrar::RegistrarClient;
|
||||||
use ethcore_secretstore::{BlockId, BlockNumber, SecretStoreChain, NewBlocksNotify, SigningKeyPair, ContractAddress, Filter};
|
use parity_secretstore::{BlockId, BlockNumber, SecretStoreChain, NewBlocksNotify, SigningKeyPair, ContractAddress, Filter};
|
||||||
|
|
||||||
// TODO: Instead of a constant, make this based on consensus finality.
|
// TODO: Instead of a constant, make this based on consensus finality.
|
||||||
/// Number of confirmations required before request can be processed.
|
/// Number of confirmations required before request can be processed.
|
||||||
|
@ -22,7 +22,7 @@ use ethkey::Password;
|
|||||||
use parity_crypto::publickey::public_to_address;
|
use parity_crypto::publickey::public_to_address;
|
||||||
use ethereum_types::{H256, Address, Public};
|
use ethereum_types::{H256, Address, Public};
|
||||||
use parity_crypto::publickey::{Signature, Error as EthKeyError};
|
use parity_crypto::publickey::{Signature, Error as EthKeyError};
|
||||||
use ethcore_secretstore::SigningKeyPair;
|
use parity_secretstore::SigningKeyPair;
|
||||||
|
|
||||||
pub struct KeyStoreNodeKeyPair {
|
pub struct KeyStoreNodeKeyPair {
|
||||||
account_provider: Arc<AccountProvider>,
|
account_provider: Arc<AccountProvider>,
|
||||||
|
@ -123,7 +123,7 @@ mod server {
|
|||||||
#[cfg(feature = "secretstore")]
|
#[cfg(feature = "secretstore")]
|
||||||
mod server {
|
mod server {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use ethcore_secretstore;
|
use parity_secretstore;
|
||||||
use parity_crypto::publickey::KeyPair;
|
use parity_crypto::publickey::KeyPair;
|
||||||
use ansi_term::Colour::{Red, White};
|
use ansi_term::Colour::{Red, White};
|
||||||
use super::{Configuration, Dependencies, NodeSecretKey, ContractAddress, Executor};
|
use super::{Configuration, Dependencies, NodeSecretKey, ContractAddress, Executor};
|
||||||
@ -131,23 +131,23 @@ mod server {
|
|||||||
#[cfg(feature = "accounts")]
|
#[cfg(feature = "accounts")]
|
||||||
use super::super::KeyStoreNodeKeyPair;
|
use super::super::KeyStoreNodeKeyPair;
|
||||||
|
|
||||||
fn into_service_contract_address(address: ContractAddress) -> ethcore_secretstore::ContractAddress {
|
fn into_service_contract_address(address: ContractAddress) -> parity_secretstore::ContractAddress {
|
||||||
match address {
|
match address {
|
||||||
ContractAddress::Registry => ethcore_secretstore::ContractAddress::Registry,
|
ContractAddress::Registry => parity_secretstore::ContractAddress::Registry,
|
||||||
ContractAddress::Address(address) => ethcore_secretstore::ContractAddress::Address(address),
|
ContractAddress::Address(address) => parity_secretstore::ContractAddress::Address(address),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Key server
|
/// Key server
|
||||||
pub struct KeyServer {
|
pub struct KeyServer {
|
||||||
_key_server: Box<dyn ethcore_secretstore::KeyServer>,
|
_key_server: Box<dyn parity_secretstore::KeyServer>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyServer {
|
impl KeyServer {
|
||||||
/// Create new key server
|
/// Create new key server
|
||||||
pub fn new(mut conf: Configuration, deps: Dependencies, executor: Executor) -> Result<Self, String> {
|
pub fn new(mut conf: Configuration, deps: Dependencies, executor: Executor) -> Result<Self, String> {
|
||||||
let self_secret: Arc<dyn ethcore_secretstore::SigningKeyPair> = match conf.self_secret.take() {
|
let self_secret: Arc<dyn parity_secretstore::SigningKeyPair> = match conf.self_secret.take() {
|
||||||
Some(NodeSecretKey::Plain(secret)) => Arc::new(ethcore_secretstore::PlainNodeKeyPair::new(
|
Some(NodeSecretKey::Plain(secret)) => Arc::new(parity_secretstore::PlainNodeKeyPair::new(
|
||||||
KeyPair::from_secret(secret).map_err(|e| format!("invalid secret: {}", e))?)),
|
KeyPair::from_secret(secret).map_err(|e| format!("invalid secret: {}", e))?)),
|
||||||
#[cfg(feature = "accounts")]
|
#[cfg(feature = "accounts")]
|
||||||
Some(NodeSecretKey::KeyStore(account)) => {
|
Some(NodeSecretKey::KeyStore(account)) => {
|
||||||
@ -177,8 +177,8 @@ mod server {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let key_server_name = format!("{}:{}", conf.interface, conf.port);
|
let key_server_name = format!("{}:{}", conf.interface, conf.port);
|
||||||
let mut cconf = ethcore_secretstore::ServiceConfiguration {
|
let mut cconf = parity_secretstore::ServiceConfiguration {
|
||||||
listener_address: if conf.http_enabled { Some(ethcore_secretstore::NodeAddress {
|
listener_address: if conf.http_enabled { Some(parity_secretstore::NodeAddress {
|
||||||
address: conf.http_interface.clone(),
|
address: conf.http_interface.clone(),
|
||||||
port: conf.http_port,
|
port: conf.http_port,
|
||||||
}) } else { None },
|
}) } else { None },
|
||||||
@ -188,12 +188,12 @@ mod server {
|
|||||||
service_contract_doc_store_address: conf.service_contract_doc_store_address.map(into_service_contract_address),
|
service_contract_doc_store_address: conf.service_contract_doc_store_address.map(into_service_contract_address),
|
||||||
service_contract_doc_sretr_address: conf.service_contract_doc_sretr_address.map(into_service_contract_address),
|
service_contract_doc_sretr_address: conf.service_contract_doc_sretr_address.map(into_service_contract_address),
|
||||||
acl_check_contract_address: conf.acl_check_contract_address.map(into_service_contract_address),
|
acl_check_contract_address: conf.acl_check_contract_address.map(into_service_contract_address),
|
||||||
cluster_config: ethcore_secretstore::ClusterConfiguration {
|
cluster_config: parity_secretstore::ClusterConfiguration {
|
||||||
listener_address: ethcore_secretstore::NodeAddress {
|
listener_address: parity_secretstore::NodeAddress {
|
||||||
address: conf.interface.clone(),
|
address: conf.interface.clone(),
|
||||||
port: conf.port,
|
port: conf.port,
|
||||||
},
|
},
|
||||||
nodes: conf.nodes.into_iter().map(|(p, (ip, port))| (p, ethcore_secretstore::NodeAddress {
|
nodes: conf.nodes.into_iter().map(|(p, (ip, port))| (p, parity_secretstore::NodeAddress {
|
||||||
address: ip,
|
address: ip,
|
||||||
port: port,
|
port: port,
|
||||||
})).collect(),
|
})).collect(),
|
||||||
@ -207,9 +207,9 @@ mod server {
|
|||||||
|
|
||||||
cconf.cluster_config.nodes.insert(self_secret.public().clone(), cconf.cluster_config.listener_address.clone());
|
cconf.cluster_config.nodes.insert(self_secret.public().clone(), cconf.cluster_config.listener_address.clone());
|
||||||
|
|
||||||
let db = ethcore_secretstore::open_secretstore_db(&conf.data_path)?;
|
let db = parity_secretstore::open_secretstore_db(&conf.data_path)?;
|
||||||
let trusted_client = TrustedClient::new(self_secret.clone(), deps.client, deps.sync, deps.miner);
|
let trusted_client = TrustedClient::new(self_secret.clone(), deps.client, deps.sync, deps.miner);
|
||||||
let key_server = ethcore_secretstore::start(trusted_client, self_secret, cconf, db, executor)
|
let key_server = parity_secretstore::start(trusted_client, self_secret, cconf, db, executor)
|
||||||
.map_err(|e| format!("Error starting KeyServer {}: {}", key_server_name, e))?;
|
.map_err(|e| format!("Error starting KeyServer {}: {}", key_server_name, e))?;
|
||||||
|
|
||||||
Ok(KeyServer {
|
Ok(KeyServer {
|
||||||
|
@ -1,43 +0,0 @@
|
|||||||
[package]
|
|
||||||
description = "Parity Ethereum (EthCore) Secret Store"
|
|
||||||
name = "ethcore-secretstore"
|
|
||||||
version = "1.0.0"
|
|
||||||
license = "GPL-3.0"
|
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
byteorder = "1.0"
|
|
||||||
ethabi = "9.0.1"
|
|
||||||
ethabi-contract = "9.0.0"
|
|
||||||
ethabi-derive = "9.0.1"
|
|
||||||
ethereum-types = "0.8.0"
|
|
||||||
ethkey = { path = "../accounts/ethkey", optional = true }
|
|
||||||
futures = "0.1"
|
|
||||||
hyper = { version = "0.12", default-features = false }
|
|
||||||
keccak-hash = "0.4.0"
|
|
||||||
kvdb = "0.4.0"
|
|
||||||
kvdb-rocksdb = "0.5.0"
|
|
||||||
lazy_static = "1.0"
|
|
||||||
libsecp256k1 = { version = "0.3.5", default-features = false }
|
|
||||||
log = "0.4"
|
|
||||||
parity-bytes = "0.1"
|
|
||||||
parity-crypto = { version = "0.5.0", features = ["publickey"] }
|
|
||||||
parity-runtime = "0.1.1"
|
|
||||||
parking_lot = "0.10.0"
|
|
||||||
percent-encoding = "2.1.0"
|
|
||||||
rustc-hex = "1.0"
|
|
||||||
serde = "1.0"
|
|
||||||
serde_derive = "1.0"
|
|
||||||
serde_json = "1.0"
|
|
||||||
tiny-keccak = "1.4"
|
|
||||||
tokio = "0.1.22"
|
|
||||||
tokio-io = "0.1"
|
|
||||||
tokio-service = "0.1"
|
|
||||||
url = "2.1.0"
|
|
||||||
jsonrpc-server-utils = "14.0.3"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
env_logger = "0.5"
|
|
||||||
tempdir = "0.3"
|
|
||||||
kvdb-rocksdb = "0.5.0"
|
|
||||||
parity-runtime = { version = "0.1.1", features = ["test-helpers"] }
|
|
@ -1,3 +0,0 @@
|
|||||||
[
|
|
||||||
{"constant":true,"inputs":[{"name":"user","type":"address"},{"name":"document","type":"bytes32"}],"name":"checkPermissions","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}
|
|
||||||
]
|
|
@ -1,24 +0,0 @@
|
|||||||
[
|
|
||||||
{"constant":true,"inputs":[],"name":"getMigrationMaster","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getMigrationKeyServerPublic","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":false,"inputs":[{"name":"id","type":"bytes32"}],"name":"startMigration","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getCurrentKeyServerIndex","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getMigrationKeyServerAddress","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[],"name":"getMigrationId","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[],"name":"getNewKeyServers","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":false,"inputs":[{"name":"id","type":"bytes32"}],"name":"confirmMigration","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
|
||||||
{"constant":true,"inputs":[],"name":"getMigrationKeyServers","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"isMigrationConfirmed","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[],"name":"getCurrentKeyServersCount","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[],"name":"getCurrentKeyServers","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[],"name":"getCurrentLastChange","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getCurrentKeyServerPublic","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getNewKeyServerAddress","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getCurrentKeyServerAddress","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getNewKeyServerPublic","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"index","type":"uint8"}],"name":"getCurrentKeyServer","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"anonymous":false,"inputs":[{"indexed":false,"name":"keyServer","type":"address"}],"name":"KeyServerAdded","type":"event"},
|
|
||||||
{"anonymous":false,"inputs":[{"indexed":false,"name":"keyServer","type":"address"}],"name":"KeyServerRemoved","type":"event"},
|
|
||||||
{"anonymous":false,"inputs":[],"name":"MigrationStarted","type":"event"},
|
|
||||||
{"anonymous":false,"inputs":[],"name":"MigrationCompleted","type":"event"}
|
|
||||||
]
|
|
@ -1,33 +0,0 @@
|
|||||||
[
|
|
||||||
{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"requireKeyServer","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
|
|
||||||
{"constant":true,"inputs":[],"name":"serverKeyGenerationRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"serverKeyGenerationError","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getServerKeyGenerationRequest","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"address"},{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"}],"name":"serverKeyGenerated","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"keyServer","type":"address"}],"name":"isServerKeyGenerationResponseRequired","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"anonymous":false,"inputs":[{"indexed":false,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"author","type":"address"},{"indexed":false,"name":"threshold","type":"uint8"}],"name":"ServerKeyGenerationRequested","type":"event"},
|
|
||||||
|
|
||||||
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"serverKeyRetrievalError","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
|
||||||
{"constant":true,"inputs":[],"name":"serverKeyRetrievalRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"keyServer","type":"address"}],"name":"isServerKeyRetrievalResponseRequired","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getServerKeyRetrievalRequest","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"},{"name":"threshold","type":"uint8"}],"name":"serverKeyRetrieved","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
|
||||||
{"anonymous":false,"inputs":[{"indexed":false,"name":"serverKeyId","type":"bytes32"}],"name":"ServerKeyRetrievalRequested","type":"event"},
|
|
||||||
|
|
||||||
{"constant":true,"inputs":[],"name":"documentKeyStoreRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"documentKeyStoreError","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
|
||||||
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"documentKeyStored","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"keyServer","type":"address"}],"name":"isDocumentKeyStoreResponseRequired","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getDocumentKeyStoreRequest","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"address"},{"name":"","type":"bytes"},{"name":"","type":"bytes"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"anonymous":false,"inputs":[{"indexed":false,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"author","type":"address"},{"indexed":false,"name":"commonPoint","type":"bytes"},{"indexed":false,"name":"encryptedPoint","type":"bytes"}],"name":"DocumentKeyStoreRequested","type":"event"},
|
|
||||||
|
|
||||||
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"requester","type":"address"},{"name":"commonPoint","type":"bytes"},{"name":"threshold","type":"uint8"}],"name":"documentKeyCommonRetrieved","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"keyServer","type":"address"},{"name":"requester","type":"address"}],"name":"isDocumentKeyShadowRetrievalResponseRequired","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"requester","type":"address"},{"name":"participants","type":"uint256"},{"name":"decryptedSecret","type":"bytes"},{"name":"shadow","type":"bytes"}],"name":"documentKeyPersonalRetrieved","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
|
||||||
{"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"requester","type":"address"}],"name":"documentKeyShadowRetrievalError","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
|
|
||||||
{"constant":true,"inputs":[],"name":"documentKeyShadowRetrievalRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getDocumentKeyShadowRetrievalRequest","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"bytes"},{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},
|
|
||||||
{"anonymous":false,"inputs":[{"indexed":false,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"requester","type":"address"}],"name":"DocumentKeyCommonRetrievalRequested","type":"event"},
|
|
||||||
{"anonymous":false,"inputs":[{"indexed":false,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"requesterPublic","type":"bytes"}],"name":"DocumentKeyPersonalRetrievalRequested","type":"event"}
|
|
||||||
]
|
|
@ -1,140 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::{HashMap, HashSet};
|
|
||||||
use parking_lot::{Mutex, RwLock};
|
|
||||||
use ethereum_types::Address;
|
|
||||||
use ethabi::FunctionOutputDecoder;
|
|
||||||
use blockchain::{SecretStoreChain, NewBlocksNotify, ContractAddress, BlockId};
|
|
||||||
use types::{Error, ServerKeyId};
|
|
||||||
|
|
||||||
use_contract!(acl_storage, "res/acl_storage.json");
|
|
||||||
|
|
||||||
const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker";
|
|
||||||
|
|
||||||
/// ACL storage of Secret Store
|
|
||||||
pub trait AclStorage: Send + Sync {
|
|
||||||
/// Check if requester can access document with hash `document`
|
|
||||||
fn check(&self, requester: Address, document: &ServerKeyId) -> Result<bool, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// On-chain ACL storage implementation.
|
|
||||||
pub struct OnChainAclStorage {
|
|
||||||
/// Cached on-chain contract.
|
|
||||||
contract: Mutex<CachedContract>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Cached on-chain ACL storage contract.
|
|
||||||
struct CachedContract {
|
|
||||||
/// Blockchain client.
|
|
||||||
client: Arc<dyn SecretStoreChain>,
|
|
||||||
/// Contract address source.
|
|
||||||
address_source: ContractAddress,
|
|
||||||
/// Current contract address.
|
|
||||||
contract_address: Option<Address>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Dummy ACL storage implementation (check always passed).
|
|
||||||
#[derive(Default, Debug)]
|
|
||||||
pub struct DummyAclStorage {
|
|
||||||
prohibited: RwLock<HashMap<Address, HashSet<ServerKeyId>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OnChainAclStorage {
|
|
||||||
pub fn new(trusted_client: Arc<dyn SecretStoreChain>, address_source: ContractAddress) -> Result<Arc<Self>, Error> {
|
|
||||||
let acl_storage = Arc::new(OnChainAclStorage {
|
|
||||||
contract: Mutex::new(CachedContract::new(trusted_client.clone(), address_source)),
|
|
||||||
});
|
|
||||||
trusted_client.add_listener(acl_storage.clone());
|
|
||||||
Ok(acl_storage)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AclStorage for OnChainAclStorage {
|
|
||||||
fn check(&self, requester: Address, document: &ServerKeyId) -> Result<bool, Error> {
|
|
||||||
self.contract.lock().check(requester, document)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NewBlocksNotify for OnChainAclStorage {
|
|
||||||
fn new_blocks(&self, _new_enacted_len: usize) {
|
|
||||||
self.contract.lock().update_contract_address()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CachedContract {
|
|
||||||
pub fn new(client: Arc<dyn SecretStoreChain>, address_source: ContractAddress) -> Self {
|
|
||||||
let mut contract = CachedContract {
|
|
||||||
client,
|
|
||||||
address_source,
|
|
||||||
contract_address: None,
|
|
||||||
};
|
|
||||||
contract.update_contract_address();
|
|
||||||
contract
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn update_contract_address(&mut self) {
|
|
||||||
let contract_address = self.client.read_contract_address(
|
|
||||||
ACL_CHECKER_CONTRACT_REGISTRY_NAME,
|
|
||||||
&self.address_source
|
|
||||||
);
|
|
||||||
if contract_address != self.contract_address {
|
|
||||||
trace!(target: "secretstore", "Configuring for ACL checker contract from address {:?}",
|
|
||||||
contract_address);
|
|
||||||
|
|
||||||
self.contract_address = contract_address;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn check(&mut self, requester: Address, document: &ServerKeyId) -> Result<bool, Error> {
|
|
||||||
if self.client.is_trusted() {
|
|
||||||
// call contract to check accesss
|
|
||||||
match self.contract_address {
|
|
||||||
Some(contract_address) => {
|
|
||||||
let (encoded, decoder) = acl_storage::functions::check_permissions::call(requester, document.clone());
|
|
||||||
let d = self.client.call_contract(BlockId::Latest, contract_address, encoded)
|
|
||||||
.map_err(|e| Error::Internal(format!("ACL checker call error: {}", e.to_string())))?;
|
|
||||||
decoder.decode(&d)
|
|
||||||
.map_err(|e| Error::Internal(format!("ACL checker call error: {}", e.to_string())))
|
|
||||||
},
|
|
||||||
None => Err(Error::Internal("ACL checker contract is not configured".to_owned())),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Err(Error::Internal("Calling ACL contract without trusted blockchain client".into()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DummyAclStorage {
|
|
||||||
/// Prohibit given requester access to given documents
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn prohibit(&self, requester: Address, document: ServerKeyId) {
|
|
||||||
self.prohibited.write()
|
|
||||||
.entry(requester)
|
|
||||||
.or_insert_with(Default::default)
|
|
||||||
.insert(document);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AclStorage for DummyAclStorage {
|
|
||||||
fn check(&self, requester: Address, document: &ServerKeyId) -> Result<bool, Error> {
|
|
||||||
Ok(self.prohibited.read()
|
|
||||||
.get(&requester)
|
|
||||||
.map(|docs| !docs.contains(document))
|
|
||||||
.unwrap_or(true))
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,119 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use bytes::Bytes;
|
|
||||||
use ethereum_types::{H256, Address, Public};
|
|
||||||
use ethabi::RawLog;
|
|
||||||
use crypto::publickey::{Signature, Error as EthKeyError};
|
|
||||||
|
|
||||||
/// Type for block number.
|
|
||||||
/// Duplicated from ethcore types
|
|
||||||
pub type BlockNumber = u64;
|
|
||||||
|
|
||||||
/// Uniquely identifies block.
|
|
||||||
/// Duplicated from ethcore types
|
|
||||||
#[derive(Debug, PartialEq, Copy, Clone, Hash, Eq)]
|
|
||||||
pub enum BlockId {
|
|
||||||
/// Block's sha3.
|
|
||||||
/// Querying by hash is always faster.
|
|
||||||
Hash(H256),
|
|
||||||
/// Block number within canon blockchain.
|
|
||||||
Number(BlockNumber),
|
|
||||||
/// Earliest block (genesis).
|
|
||||||
Earliest,
|
|
||||||
/// Latest mined block.
|
|
||||||
Latest,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Contract address.
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub enum ContractAddress {
|
|
||||||
/// Address is read from registry.
|
|
||||||
Registry,
|
|
||||||
/// Address is specified.
|
|
||||||
Address(ethereum_types::Address),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Key pair with signing ability.
|
|
||||||
pub trait SigningKeyPair: Send + Sync {
|
|
||||||
/// Public portion of key.
|
|
||||||
fn public(&self) -> &Public;
|
|
||||||
/// Address of key owner.
|
|
||||||
fn address(&self) -> Address;
|
|
||||||
/// Sign data with the key.
|
|
||||||
fn sign(&self, data: &H256) -> Result<Signature, EthKeyError>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Wrapps client ChainNotify in order to send signal about new blocks
|
|
||||||
pub trait NewBlocksNotify: Send + Sync {
|
|
||||||
/// Fires when chain has new blocks.
|
|
||||||
/// Sends this signal only, if contracts' update required
|
|
||||||
fn new_blocks(&self, _new_enacted_len: usize) {
|
|
||||||
// does nothing by default
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Blockchain logs Filter.
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub struct Filter {
|
|
||||||
/// Blockchain will be searched from this block.
|
|
||||||
pub from_block: BlockId,
|
|
||||||
|
|
||||||
/// Search addresses.
|
|
||||||
///
|
|
||||||
/// If None, match all.
|
|
||||||
/// If specified, log must be produced by one of these addresses.
|
|
||||||
pub address: Option<Vec<Address>>,
|
|
||||||
|
|
||||||
/// Search topics.
|
|
||||||
///
|
|
||||||
/// If None, match all.
|
|
||||||
/// If specified, log must contain one of these topics.
|
|
||||||
pub topics: Vec<Option<Vec<H256>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Blockchain representation for Secret Store
|
|
||||||
pub trait SecretStoreChain: Send + Sync + 'static {
|
|
||||||
/// Adds listener for chain's NewBlocks event
|
|
||||||
fn add_listener(&self, target: Arc<dyn NewBlocksNotify>);
|
|
||||||
|
|
||||||
/// Check if the underlying chain is in the trusted state
|
|
||||||
fn is_trusted(&self) -> bool;
|
|
||||||
|
|
||||||
/// Transact contract.
|
|
||||||
fn transact_contract(&self, contract: Address, tx_data: Bytes) -> Result<(), EthKeyError>;
|
|
||||||
|
|
||||||
/// Read contract address. If address source is registry, address only returned if current client state is
|
|
||||||
/// trusted. Address from registry is read from registry from block latest block with
|
|
||||||
/// REQUEST_CONFIRMATIONS_REQUIRED confirmations.
|
|
||||||
fn read_contract_address(&self, registry_name: &str, address: &ContractAddress) -> Option<Address>;
|
|
||||||
|
|
||||||
/// Call contract in the blockchain
|
|
||||||
fn call_contract(&self, block_id: BlockId, contract_address: Address, data: Bytes) -> Result<Bytes, String>;
|
|
||||||
|
|
||||||
/// Returns blockhash for block id
|
|
||||||
fn block_hash(&self, id: BlockId) -> Option<H256>;
|
|
||||||
|
|
||||||
/// Returns block number for block id
|
|
||||||
fn block_number(&self, id: BlockId) -> Option<BlockNumber>;
|
|
||||||
|
|
||||||
/// Retrieve last blockchain logs for the filter
|
|
||||||
fn retrieve_last_logs(&self, filter: Filter) -> Option<Vec<RawLog>>;
|
|
||||||
|
|
||||||
/// Get hash of the last block with predefined number of confirmations (depends on the chain).
|
|
||||||
fn get_confirmed_block_hash(&self) -> Option<H256>;
|
|
||||||
}
|
|
@ -1,714 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use futures::{future::{err, result}, Future};
|
|
||||||
use parking_lot::Mutex;
|
|
||||||
use crypto::DEFAULT_MAC;
|
|
||||||
use crypto::publickey::public_to_address;
|
|
||||||
use parity_runtime::Executor;
|
|
||||||
use super::acl_storage::AclStorage;
|
|
||||||
use super::key_storage::KeyStorage;
|
|
||||||
use super::key_server_set::KeyServerSet;
|
|
||||||
use blockchain::SigningKeyPair;
|
|
||||||
use key_server_cluster::{math, new_network_cluster, ClusterSession, WaitableSession};
|
|
||||||
use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer};
|
|
||||||
use types::{Error, Public, RequestSignature, Requester, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow,
|
|
||||||
ClusterConfiguration, MessageHash, EncryptedMessageSignature, NodeId};
|
|
||||||
use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration, NetConnectionsManagerConfig};
|
|
||||||
|
|
||||||
/// Secret store key server implementation
|
|
||||||
pub struct KeyServerImpl {
|
|
||||||
data: Arc<Mutex<KeyServerCore>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Secret store key server data.
|
|
||||||
pub struct KeyServerCore {
|
|
||||||
cluster: Arc<dyn ClusterClient>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyServerImpl {
|
|
||||||
/// Create new key server instance
|
|
||||||
pub fn new(config: &ClusterConfiguration, key_server_set: Arc<dyn KeyServerSet>, self_key_pair: Arc<dyn SigningKeyPair>,
|
|
||||||
acl_storage: Arc<dyn AclStorage>, key_storage: Arc<dyn KeyStorage>, executor: Executor) -> Result<Self, Error>
|
|
||||||
{
|
|
||||||
Ok(KeyServerImpl {
|
|
||||||
data: Arc::new(Mutex::new(KeyServerCore::new(config, key_server_set, self_key_pair, acl_storage, key_storage, executor)?)),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get cluster client reference.
|
|
||||||
pub fn cluster(&self) -> Arc<dyn ClusterClient> {
|
|
||||||
self.data.lock().cluster.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyServer for KeyServerImpl {}
|
|
||||||
|
|
||||||
impl AdminSessionsServer for KeyServerImpl {
|
|
||||||
fn change_servers_set(
|
|
||||||
&self,
|
|
||||||
old_set_signature: RequestSignature,
|
|
||||||
new_set_signature: RequestSignature,
|
|
||||||
new_servers_set: BTreeSet<NodeId>,
|
|
||||||
) -> Box<dyn Future<Item=(), Error=Error> + Send> {
|
|
||||||
return_session(self.data.lock().cluster
|
|
||||||
.new_servers_set_change_session(None, None, new_servers_set, old_set_signature, new_set_signature))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServerKeyGenerator for KeyServerImpl {
|
|
||||||
fn generate_key(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
author: Requester,
|
|
||||||
threshold: usize,
|
|
||||||
) -> Box<dyn Future<Item=Public, Error=Error> + Send> {
|
|
||||||
// recover requestor' address key from signature
|
|
||||||
let address = author.address(&key_id).map_err(Error::InsufficientRequesterData);
|
|
||||||
|
|
||||||
// generate server key
|
|
||||||
return_session(address.and_then(|address| self.data.lock().cluster
|
|
||||||
.new_generation_session(key_id, None, address, threshold)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn restore_key_public(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
author: Requester,
|
|
||||||
) -> Box<dyn Future<Item=Public, Error=Error> + Send> {
|
|
||||||
// recover requestor' public key from signature
|
|
||||||
let session_and_address = author
|
|
||||||
.address(&key_id)
|
|
||||||
.map_err(Error::InsufficientRequesterData)
|
|
||||||
.and_then(|address| self.data.lock().cluster.new_key_version_negotiation_session(key_id)
|
|
||||||
.map(|session| (session, address)));
|
|
||||||
let (session, address) = match session_and_address {
|
|
||||||
Ok((session, address)) => (session, address),
|
|
||||||
Err(error) => return Box::new(err(error)),
|
|
||||||
};
|
|
||||||
|
|
||||||
// negotiate key version && retrieve common key data
|
|
||||||
let core_session = session.session.clone();
|
|
||||||
Box::new(session.into_wait_future()
|
|
||||||
.and_then(move |_| core_session.common_key_data()
|
|
||||||
.map(|key_share| (key_share, address)))
|
|
||||||
.and_then(|(key_share, address)| if key_share.author == address {
|
|
||||||
Ok(key_share.public)
|
|
||||||
} else {
|
|
||||||
Err(Error::AccessDenied)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DocumentKeyServer for KeyServerImpl {
|
|
||||||
fn store_document_key(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
author: Requester,
|
|
||||||
common_point: Public,
|
|
||||||
encrypted_document_key: Public,
|
|
||||||
) -> Box<dyn Future<Item=(), Error=Error> + Send> {
|
|
||||||
// store encrypted key
|
|
||||||
return_session(self.data.lock().cluster.new_encryption_session(key_id,
|
|
||||||
author.clone(), common_point, encrypted_document_key))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn generate_document_key(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
author: Requester,
|
|
||||||
threshold: usize,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
|
|
||||||
// recover requestor' public key from signature
|
|
||||||
let public = result(author.public(&key_id).map_err(Error::InsufficientRequesterData));
|
|
||||||
|
|
||||||
// generate server key
|
|
||||||
let data = self.data.clone();
|
|
||||||
let server_key = public.and_then(move |public| {
|
|
||||||
let data = data.lock();
|
|
||||||
let session = data.cluster.new_generation_session(key_id, None, public_to_address(&public), threshold);
|
|
||||||
result(session.map(|session| (public, session)))
|
|
||||||
})
|
|
||||||
.and_then(|(public, session)| session.into_wait_future().map(move |server_key| (public, server_key)));
|
|
||||||
|
|
||||||
// generate random document key
|
|
||||||
let document_key = server_key.and_then(|(public, server_key)|
|
|
||||||
result(math::generate_random_point()
|
|
||||||
.and_then(|document_key| math::encrypt_secret(&document_key, &server_key)
|
|
||||||
.map(|encrypted_document_key| (public, document_key, encrypted_document_key))))
|
|
||||||
);
|
|
||||||
|
|
||||||
// store document key in the storage
|
|
||||||
let data = self.data.clone();
|
|
||||||
let stored_document_key = document_key.and_then(move |(public, document_key, encrypted_document_key)| {
|
|
||||||
let data = data.lock();
|
|
||||||
let session = data.cluster.new_encryption_session(key_id,
|
|
||||||
author.clone(), encrypted_document_key.common_point, encrypted_document_key.encrypted_point);
|
|
||||||
result(session.map(|session| (public, document_key, session)))
|
|
||||||
})
|
|
||||||
.and_then(|(public, document_key, session)| session.into_wait_future().map(move |_| (public, document_key)));
|
|
||||||
|
|
||||||
// encrypt document key with requestor public key
|
|
||||||
let encrypted_document_key = stored_document_key
|
|
||||||
.and_then(|(public, document_key)| crypto::publickey::ecies::encrypt(&public, &DEFAULT_MAC, document_key.as_bytes())
|
|
||||||
.map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err))));
|
|
||||||
|
|
||||||
Box::new(encrypted_document_key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn restore_document_key(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
requester: Requester,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
|
|
||||||
// recover requestor' public key from signature
|
|
||||||
let public = result(requester.public(&key_id).map_err(Error::InsufficientRequesterData));
|
|
||||||
|
|
||||||
// decrypt document key
|
|
||||||
let data = self.data.clone();
|
|
||||||
let stored_document_key = public.and_then(move |public| {
|
|
||||||
let data = data.lock();
|
|
||||||
let session = data.cluster.new_decryption_session(key_id, None, requester.clone(), None, false, false);
|
|
||||||
result(session.map(|session| (public, session)))
|
|
||||||
})
|
|
||||||
.and_then(|(public, session)| session.into_wait_future().map(move |document_key| (public, document_key)));
|
|
||||||
|
|
||||||
// encrypt document key with requestor public key
|
|
||||||
let encrypted_document_key = stored_document_key
|
|
||||||
.and_then(|(public, document_key)|
|
|
||||||
crypto::publickey::ecies::encrypt(&public, &DEFAULT_MAC, document_key.decrypted_secret.as_bytes())
|
|
||||||
.map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err))));
|
|
||||||
|
|
||||||
Box::new(encrypted_document_key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn restore_document_key_shadow(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
requester: Requester,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedDocumentKeyShadow, Error=Error> + Send> {
|
|
||||||
return_session(self.data.lock().cluster.new_decryption_session(key_id,
|
|
||||||
None, requester.clone(), None, true, false))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MessageSigner for KeyServerImpl {
|
|
||||||
fn sign_message_schnorr(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
requester: Requester,
|
|
||||||
message: MessageHash,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
|
|
||||||
// recover requestor' public key from signature
|
|
||||||
let public = result(requester.public(&key_id).map_err(Error::InsufficientRequesterData));
|
|
||||||
|
|
||||||
// sign message
|
|
||||||
let data = self.data.clone();
|
|
||||||
let signature = public.and_then(move |public| {
|
|
||||||
let data = data.lock();
|
|
||||||
let session = data.cluster.new_schnorr_signing_session(key_id, requester.clone().into(), None, message);
|
|
||||||
result(session.map(|session| (public, session)))
|
|
||||||
})
|
|
||||||
.and_then(|(public, session)| session.into_wait_future().map(move |signature| (public, signature)));
|
|
||||||
|
|
||||||
// compose two message signature components into single one
|
|
||||||
let combined_signature = signature.map(|(public, signature)| {
|
|
||||||
let mut combined_signature = [0; 64];
|
|
||||||
combined_signature[..32].clone_from_slice(signature.0.as_bytes());
|
|
||||||
combined_signature[32..].clone_from_slice(signature.1.as_bytes());
|
|
||||||
(public, combined_signature)
|
|
||||||
});
|
|
||||||
|
|
||||||
// encrypt signature with requestor public key
|
|
||||||
let encrypted_signature = combined_signature
|
|
||||||
.and_then(|(public, combined_signature)| crypto::publickey::ecies::encrypt(&public, &DEFAULT_MAC, &combined_signature)
|
|
||||||
.map_err(|err| Error::Internal(format!("Error encrypting message signature: {}", err))));
|
|
||||||
|
|
||||||
Box::new(encrypted_signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sign_message_ecdsa(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
requester: Requester,
|
|
||||||
message: MessageHash,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
|
|
||||||
// recover requestor' public key from signature
|
|
||||||
let public = result(requester.public(&key_id).map_err(Error::InsufficientRequesterData));
|
|
||||||
|
|
||||||
// sign message
|
|
||||||
let data = self.data.clone();
|
|
||||||
let signature = public.and_then(move |public| {
|
|
||||||
let data = data.lock();
|
|
||||||
let session = data.cluster.new_ecdsa_signing_session(key_id, requester.clone().into(), None, message);
|
|
||||||
result(session.map(|session| (public, session)))
|
|
||||||
})
|
|
||||||
.and_then(|(public, session)| session.into_wait_future().map(move |signature| (public, signature)));
|
|
||||||
|
|
||||||
// encrypt combined signature with requestor public key
|
|
||||||
let encrypted_signature = signature
|
|
||||||
.and_then(|(public, signature)| crypto::publickey::ecies::encrypt(&public, &DEFAULT_MAC, &*signature)
|
|
||||||
.map_err(|err| Error::Internal(format!("Error encrypting message signature: {}", err))));
|
|
||||||
|
|
||||||
Box::new(encrypted_signature)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyServerCore {
|
|
||||||
pub fn new(config: &ClusterConfiguration, key_server_set: Arc<dyn KeyServerSet>, self_key_pair: Arc<dyn SigningKeyPair>,
|
|
||||||
acl_storage: Arc<dyn AclStorage>, key_storage: Arc<dyn KeyStorage>, executor: Executor) -> Result<Self, Error>
|
|
||||||
{
|
|
||||||
let cconfig = NetClusterConfiguration {
|
|
||||||
self_key_pair: self_key_pair.clone(),
|
|
||||||
key_server_set: key_server_set,
|
|
||||||
acl_storage: acl_storage,
|
|
||||||
key_storage: key_storage,
|
|
||||||
admin_public: config.admin_public,
|
|
||||||
preserve_sessions: false,
|
|
||||||
};
|
|
||||||
let net_config = NetConnectionsManagerConfig {
|
|
||||||
listen_address: (config.listener_address.address.clone(), config.listener_address.port),
|
|
||||||
allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes,
|
|
||||||
auto_migrate_enabled: config.auto_migrate_enabled,
|
|
||||||
};
|
|
||||||
|
|
||||||
let core = new_network_cluster(executor, cconfig, net_config)?;
|
|
||||||
let cluster = core.client();
|
|
||||||
core.run()?;
|
|
||||||
|
|
||||||
Ok(KeyServerCore {
|
|
||||||
cluster,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn return_session<S: ClusterSession>(
|
|
||||||
session: Result<WaitableSession<S>, Error>,
|
|
||||||
) -> Box<dyn Future<Item=S::SuccessfulResult, Error=Error> + Send> {
|
|
||||||
match session {
|
|
||||||
Ok(session) => Box::new(session.into_wait_future()),
|
|
||||||
Err(error) => Box::new(err(error))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub mod tests {
|
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use std::time;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use futures::Future;
|
|
||||||
use crypto::DEFAULT_MAC;
|
|
||||||
use crypto::publickey::{Secret, Random, Generator, verify_public};
|
|
||||||
use acl_storage::DummyAclStorage;
|
|
||||||
use key_storage::KeyStorage;
|
|
||||||
use key_storage::tests::DummyKeyStorage;
|
|
||||||
use node_key_pair::PlainNodeKeyPair;
|
|
||||||
use key_server_set::tests::MapKeyServerSet;
|
|
||||||
use key_server_cluster::math;
|
|
||||||
use ethereum_types::{H256, H520};
|
|
||||||
use parity_runtime::Runtime;
|
|
||||||
use types::{Error, Public, ClusterConfiguration, NodeAddress, RequestSignature, ServerKeyId,
|
|
||||||
EncryptedDocumentKey, EncryptedDocumentKeyShadow, MessageHash, EncryptedMessageSignature,
|
|
||||||
Requester, NodeId};
|
|
||||||
use traits::{AdminSessionsServer, ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer};
|
|
||||||
use super::KeyServerImpl;
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct DummyKeyServer;
|
|
||||||
|
|
||||||
impl KeyServer for DummyKeyServer {}
|
|
||||||
|
|
||||||
impl AdminSessionsServer for DummyKeyServer {
|
|
||||||
fn change_servers_set(
|
|
||||||
&self,
|
|
||||||
_old_set_signature: RequestSignature,
|
|
||||||
_new_set_signature: RequestSignature,
|
|
||||||
_new_servers_set: BTreeSet<NodeId>,
|
|
||||||
) -> Box<dyn Future<Item=(), Error=Error> + Send> {
|
|
||||||
unimplemented!("test-only")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServerKeyGenerator for DummyKeyServer {
|
|
||||||
fn generate_key(
|
|
||||||
&self,
|
|
||||||
_key_id: ServerKeyId,
|
|
||||||
_author: Requester,
|
|
||||||
_threshold: usize,
|
|
||||||
) -> Box<dyn Future<Item=Public, Error=Error> + Send> {
|
|
||||||
unimplemented!("test-only")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn restore_key_public(
|
|
||||||
&self,
|
|
||||||
_key_id: ServerKeyId,
|
|
||||||
_author: Requester,
|
|
||||||
) -> Box<dyn Future<Item=Public, Error=Error> + Send> {
|
|
||||||
unimplemented!("test-only")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DocumentKeyServer for DummyKeyServer {
|
|
||||||
fn store_document_key(
|
|
||||||
&self,
|
|
||||||
_key_id: ServerKeyId,
|
|
||||||
_author: Requester,
|
|
||||||
_common_point: Public,
|
|
||||||
_encrypted_document_key: Public,
|
|
||||||
) -> Box<dyn Future<Item=(), Error=Error> + Send> {
|
|
||||||
unimplemented!("test-only")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn generate_document_key(
|
|
||||||
&self,
|
|
||||||
_key_id: ServerKeyId,
|
|
||||||
_author: Requester,
|
|
||||||
_threshold: usize,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
|
|
||||||
unimplemented!("test-only")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn restore_document_key(
|
|
||||||
&self,
|
|
||||||
_key_id: ServerKeyId,
|
|
||||||
_requester: Requester,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
|
|
||||||
unimplemented!("test-only")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn restore_document_key_shadow(
|
|
||||||
&self,
|
|
||||||
_key_id: ServerKeyId,
|
|
||||||
_requester: Requester,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedDocumentKeyShadow, Error=Error> + Send> {
|
|
||||||
unimplemented!("test-only")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MessageSigner for DummyKeyServer {
|
|
||||||
fn sign_message_schnorr(
|
|
||||||
&self,
|
|
||||||
_key_id: ServerKeyId,
|
|
||||||
_requester: Requester,
|
|
||||||
_message: MessageHash,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
|
|
||||||
unimplemented!("test-only")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sign_message_ecdsa(
|
|
||||||
&self,
|
|
||||||
_key_id: ServerKeyId,
|
|
||||||
_requester: Requester,
|
|
||||||
_message: MessageHash,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
|
|
||||||
unimplemented!("test-only")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_key_servers(start_port: u16, num_nodes: usize) -> (Vec<KeyServerImpl>, Vec<Arc<DummyKeyStorage>>, Runtime) {
|
|
||||||
let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate()).collect();
|
|
||||||
let configs: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration {
|
|
||||||
listener_address: NodeAddress {
|
|
||||||
address: "127.0.0.1".into(),
|
|
||||||
port: start_port + (i as u16),
|
|
||||||
},
|
|
||||||
nodes: key_pairs.iter().enumerate().map(|(j, kp)| (kp.public().clone(),
|
|
||||||
NodeAddress {
|
|
||||||
address: "127.0.0.1".into(),
|
|
||||||
port: start_port + (j as u16),
|
|
||||||
})).collect(),
|
|
||||||
key_server_set_contract_address: None,
|
|
||||||
allow_connecting_to_higher_nodes: false,
|
|
||||||
admin_public: None,
|
|
||||||
auto_migrate_enabled: false,
|
|
||||||
}).collect();
|
|
||||||
let key_servers_set: BTreeMap<Public, SocketAddr> = configs[0].nodes.iter()
|
|
||||||
.map(|(k, a)| (k.clone(), format!("{}:{}", a.address, a.port).parse().unwrap()))
|
|
||||||
.collect();
|
|
||||||
let key_storages = (0..num_nodes).map(|_| Arc::new(DummyKeyStorage::default())).collect::<Vec<_>>();
|
|
||||||
let runtime = Runtime::with_thread_count(4);
|
|
||||||
let key_servers: Vec<_> = configs.into_iter().enumerate().map(|(i, cfg)|
|
|
||||||
KeyServerImpl::new(&cfg, Arc::new(MapKeyServerSet::new(false, key_servers_set.clone())),
|
|
||||||
Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())),
|
|
||||||
Arc::new(DummyAclStorage::default()),
|
|
||||||
key_storages[i].clone(), runtime.executor()).unwrap()
|
|
||||||
).collect();
|
|
||||||
|
|
||||||
// wait until connections are established. It is fast => do not bother with events here
|
|
||||||
let start = time::Instant::now();
|
|
||||||
let mut tried_reconnections = false;
|
|
||||||
loop {
|
|
||||||
if key_servers.iter().all(|ks| ks.cluster().is_fully_connected()) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
let old_tried_reconnections = tried_reconnections;
|
|
||||||
let mut fully_connected = true;
|
|
||||||
for key_server in &key_servers {
|
|
||||||
if !key_server.cluster().is_fully_connected() {
|
|
||||||
fully_connected = false;
|
|
||||||
if !old_tried_reconnections {
|
|
||||||
tried_reconnections = true;
|
|
||||||
key_server.cluster().connect();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if fully_connected {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if time::Instant::now() - start > time::Duration::from_millis(3000) {
|
|
||||||
panic!("connections are not established in 3000ms");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
(key_servers, key_storages, runtime)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn document_key_generation_and_retrievement_works_over_network_with_single_node() {
|
|
||||||
let _ = ::env_logger::try_init();
|
|
||||||
let (key_servers, _, runtime) = make_key_servers(6070, 1);
|
|
||||||
|
|
||||||
// generate document key
|
|
||||||
let threshold = 0;
|
|
||||||
let document = Random.generate().secret().clone();
|
|
||||||
let secret = Random.generate().secret().clone();
|
|
||||||
let signature: Requester = crypto::publickey::sign(&secret, &document).unwrap().into();
|
|
||||||
let generated_key = key_servers[0].generate_document_key(
|
|
||||||
*document,
|
|
||||||
signature.clone(),
|
|
||||||
threshold,
|
|
||||||
).wait().unwrap();
|
|
||||||
let generated_key = crypto::publickey::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap();
|
|
||||||
|
|
||||||
// now let's try to retrieve key back
|
|
||||||
for key_server in key_servers.iter() {
|
|
||||||
let retrieved_key = key_server.restore_document_key(
|
|
||||||
*document,
|
|
||||||
signature.clone(),
|
|
||||||
).wait().unwrap();
|
|
||||||
let retrieved_key = crypto::publickey::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap();
|
|
||||||
assert_eq!(retrieved_key, generated_key);
|
|
||||||
}
|
|
||||||
drop(runtime);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn document_key_generation_and_retrievement_works_over_network_with_3_nodes() {
|
|
||||||
let _ = ::env_logger::try_init();
|
|
||||||
let (key_servers, key_storages, runtime) = make_key_servers(6080, 3);
|
|
||||||
|
|
||||||
let test_cases = [0, 1, 2];
|
|
||||||
for threshold in &test_cases {
|
|
||||||
// generate document key
|
|
||||||
let document = Random.generate().secret().clone();
|
|
||||||
let secret = Random.generate().secret().clone();
|
|
||||||
let signature: Requester = crypto::publickey::sign(&secret, &document).unwrap().into();
|
|
||||||
let generated_key = key_servers[0].generate_document_key(
|
|
||||||
*document,
|
|
||||||
signature.clone(),
|
|
||||||
*threshold,
|
|
||||||
).wait().unwrap();
|
|
||||||
let generated_key = crypto::publickey::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap();
|
|
||||||
|
|
||||||
// now let's try to retrieve key back
|
|
||||||
for (i, key_server) in key_servers.iter().enumerate() {
|
|
||||||
let retrieved_key = key_server.restore_document_key(
|
|
||||||
*document,
|
|
||||||
signature.clone(),
|
|
||||||
).wait().unwrap();
|
|
||||||
let retrieved_key = crypto::publickey::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap();
|
|
||||||
assert_eq!(retrieved_key, generated_key);
|
|
||||||
|
|
||||||
let key_share = key_storages[i].get(&document).unwrap().unwrap();
|
|
||||||
assert!(key_share.common_point.is_some());
|
|
||||||
assert!(key_share.encrypted_point.is_some());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
drop(runtime);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn server_key_generation_and_storing_document_key_works_over_network_with_3_nodes() {
|
|
||||||
let _ = ::env_logger::try_init();
|
|
||||||
let (key_servers, _, runtime) = make_key_servers(6090, 3);
|
|
||||||
|
|
||||||
let test_cases = [0, 1, 2];
|
|
||||||
for threshold in &test_cases {
|
|
||||||
// generate server key
|
|
||||||
let server_key_id = Random.generate().secret().clone();
|
|
||||||
let requestor_secret = Random.generate().secret().clone();
|
|
||||||
let signature: Requester = crypto::publickey::sign(&requestor_secret, &server_key_id).unwrap().into();
|
|
||||||
let server_public = key_servers[0].generate_key(
|
|
||||||
*server_key_id,
|
|
||||||
signature.clone(),
|
|
||||||
*threshold,
|
|
||||||
).wait().unwrap();
|
|
||||||
|
|
||||||
// generate document key (this is done by KS client so that document key is unknown to any KS)
|
|
||||||
let generated_key = Random.generate().public().clone();
|
|
||||||
let encrypted_document_key = math::encrypt_secret(&generated_key, &server_public).unwrap();
|
|
||||||
|
|
||||||
// store document key
|
|
||||||
key_servers[0].store_document_key(*server_key_id, signature.clone(),
|
|
||||||
encrypted_document_key.common_point, encrypted_document_key.encrypted_point).wait().unwrap();
|
|
||||||
|
|
||||||
// now let's try to retrieve key back
|
|
||||||
for key_server in key_servers.iter() {
|
|
||||||
let retrieved_key = key_server.restore_document_key(*server_key_id, signature.clone()).wait().unwrap();
|
|
||||||
let retrieved_key = crypto::publickey::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &retrieved_key).unwrap();
|
|
||||||
let retrieved_key = Public::from_slice(&retrieved_key);
|
|
||||||
assert_eq!(retrieved_key, generated_key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
drop(runtime);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn server_key_generation_and_message_signing_works_over_network_with_3_nodes() {
|
|
||||||
let _ = ::env_logger::try_init();
|
|
||||||
let (key_servers, _, runtime) = make_key_servers(6100, 3);
|
|
||||||
|
|
||||||
let test_cases = [0, 1, 2];
|
|
||||||
for threshold in &test_cases {
|
|
||||||
// generate server key
|
|
||||||
let server_key_id = Random.generate().secret().clone();
|
|
||||||
let requestor_secret = Random.generate().secret().clone();
|
|
||||||
let signature: Requester = crypto::publickey::sign(&requestor_secret, &server_key_id).unwrap().into();
|
|
||||||
let server_public = key_servers[0].generate_key(
|
|
||||||
*server_key_id,
|
|
||||||
signature.clone(),
|
|
||||||
*threshold,
|
|
||||||
).wait().unwrap();
|
|
||||||
|
|
||||||
// sign message
|
|
||||||
let message_hash = H256::from_low_u64_be(42);
|
|
||||||
let combined_signature = key_servers[0].sign_message_schnorr(
|
|
||||||
*server_key_id,
|
|
||||||
signature,
|
|
||||||
message_hash,
|
|
||||||
).wait().unwrap();
|
|
||||||
let combined_signature = crypto::publickey::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &combined_signature).unwrap();
|
|
||||||
let signature_c = Secret::copy_from_slice(&combined_signature[..32]).unwrap();
|
|
||||||
let signature_s = Secret::copy_from_slice(&combined_signature[32..]).unwrap();
|
|
||||||
|
|
||||||
// check signature
|
|
||||||
assert_eq!(math::verify_schnorr_signature(&server_public, &(signature_c, signature_s), &message_hash), Ok(true));
|
|
||||||
}
|
|
||||||
drop(runtime);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn decryption_session_is_delegated_when_node_does_not_have_key_share() {
|
|
||||||
let _ = ::env_logger::try_init();
|
|
||||||
let (key_servers, key_storages, runtime) = make_key_servers(6110, 3);
|
|
||||||
|
|
||||||
// generate document key
|
|
||||||
let threshold = 0;
|
|
||||||
let document = Random.generate().secret().clone();
|
|
||||||
let secret = Random.generate().secret().clone();
|
|
||||||
let signature: Requester = crypto::publickey::sign(&secret, &document).unwrap().into();
|
|
||||||
let generated_key = key_servers[0].generate_document_key(
|
|
||||||
*document,
|
|
||||||
signature.clone(),
|
|
||||||
threshold,
|
|
||||||
).wait().unwrap();
|
|
||||||
let generated_key = crypto::publickey::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap();
|
|
||||||
|
|
||||||
// remove key from node0
|
|
||||||
key_storages[0].remove(&document).unwrap();
|
|
||||||
|
|
||||||
// now let's try to retrieve key back by requesting it from node0, so that session must be delegated
|
|
||||||
let retrieved_key = key_servers[0].restore_document_key(*document, signature).wait().unwrap();
|
|
||||||
let retrieved_key = crypto::publickey::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap();
|
|
||||||
assert_eq!(retrieved_key, generated_key);
|
|
||||||
drop(runtime);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn schnorr_signing_session_is_delegated_when_node_does_not_have_key_share() {
|
|
||||||
let _ = ::env_logger::try_init();
|
|
||||||
let (key_servers, key_storages, runtime) = make_key_servers(6114, 3);
|
|
||||||
let threshold = 1;
|
|
||||||
|
|
||||||
// generate server key
|
|
||||||
let server_key_id = Random.generate().secret().clone();
|
|
||||||
let requestor_secret = Random.generate().secret().clone();
|
|
||||||
let signature: Requester = crypto::publickey::sign(&requestor_secret, &server_key_id).unwrap().into();
|
|
||||||
let server_public = key_servers[0].generate_key(*server_key_id, signature.clone(), threshold).wait().unwrap();
|
|
||||||
|
|
||||||
// remove key from node0
|
|
||||||
key_storages[0].remove(&server_key_id).unwrap();
|
|
||||||
|
|
||||||
// sign message
|
|
||||||
let message_hash = H256::from_low_u64_be(42);
|
|
||||||
let combined_signature = key_servers[0].sign_message_schnorr(
|
|
||||||
*server_key_id,
|
|
||||||
signature,
|
|
||||||
message_hash,
|
|
||||||
).wait().unwrap();
|
|
||||||
let combined_signature = crypto::publickey::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &combined_signature).unwrap();
|
|
||||||
let signature_c = Secret::copy_from_slice(&combined_signature[..32]).unwrap();
|
|
||||||
let signature_s = Secret::copy_from_slice(&combined_signature[32..]).unwrap();
|
|
||||||
|
|
||||||
// check signature
|
|
||||||
assert_eq!(math::verify_schnorr_signature(&server_public, &(signature_c, signature_s), &message_hash), Ok(true));
|
|
||||||
drop(runtime);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn ecdsa_signing_session_is_delegated_when_node_does_not_have_key_share() {
|
|
||||||
let _ = ::env_logger::try_init();
|
|
||||||
let (key_servers, key_storages, runtime) = make_key_servers(6117, 4);
|
|
||||||
let threshold = 1;
|
|
||||||
|
|
||||||
// generate server key
|
|
||||||
let server_key_id = Random.generate().secret().clone();
|
|
||||||
let requestor_secret = Random.generate().secret().clone();
|
|
||||||
let signature = crypto::publickey::sign(&requestor_secret, &server_key_id).unwrap();
|
|
||||||
let server_public = key_servers[0].generate_key(
|
|
||||||
*server_key_id,
|
|
||||||
signature.clone().into(),
|
|
||||||
threshold,
|
|
||||||
).wait().unwrap();
|
|
||||||
|
|
||||||
// remove key from node0
|
|
||||||
key_storages[0].remove(&server_key_id).unwrap();
|
|
||||||
|
|
||||||
// sign message
|
|
||||||
let message_hash = H256::random();
|
|
||||||
let signature = key_servers[0].sign_message_ecdsa(
|
|
||||||
*server_key_id,
|
|
||||||
signature.clone().into(),
|
|
||||||
message_hash,
|
|
||||||
).wait().unwrap();
|
|
||||||
let signature = crypto::publickey::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &signature).unwrap();
|
|
||||||
let signature = H520::from_slice(&signature[0..65]);
|
|
||||||
|
|
||||||
// check signature
|
|
||||||
assert!(verify_public(&server_public, &signature.into(), &message_hash).unwrap());
|
|
||||||
drop(runtime);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn servers_set_change_session_works_over_network() {
|
|
||||||
// TODO [Test]
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,969 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::{BTreeSet, BTreeMap};
|
|
||||||
use ethereum_types::{Address, H256};
|
|
||||||
use crypto::publickey::Secret;
|
|
||||||
use futures::Oneshot;
|
|
||||||
use parking_lot::Mutex;
|
|
||||||
use key_server_cluster::{Error, SessionId, NodeId, DocumentKeyShare};
|
|
||||||
use key_server_cluster::cluster::Cluster;
|
|
||||||
use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession, CompletionSignal};
|
|
||||||
use key_server_cluster::decryption_session::SessionImpl as DecryptionSession;
|
|
||||||
use key_server_cluster::signing_session_ecdsa::SessionImpl as EcdsaSigningSession;
|
|
||||||
use key_server_cluster::signing_session_schnorr::SessionImpl as SchnorrSigningSession;
|
|
||||||
use key_server_cluster::message::{Message, KeyVersionNegotiationMessage, RequestKeyVersions,
|
|
||||||
KeyVersions, KeyVersionsError, FailedKeyVersionContinueAction, CommonKeyData};
|
|
||||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
|
||||||
|
|
||||||
// TODO [Opt]: change sessions so that versions are sent by chunks.
|
|
||||||
/// Number of versions sent in single message.
|
|
||||||
const VERSIONS_PER_MESSAGE: usize = 32;
|
|
||||||
|
|
||||||
/// Key version negotiation transport.
|
|
||||||
pub trait SessionTransport {
|
|
||||||
/// Broadcast message to all nodes.
|
|
||||||
fn broadcast(&self, message: KeyVersionNegotiationMessage) -> Result<(), Error>;
|
|
||||||
/// Send message to given node.
|
|
||||||
fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Key version negotiation result computer.
|
|
||||||
pub trait SessionResultComputer: Send + Sync {
|
|
||||||
/// Compute result of session, if possible.
|
|
||||||
fn compute_result(&self, threshold: Option<usize>, confirmations: &BTreeSet<NodeId>, versions: &BTreeMap<H256, BTreeSet<NodeId>>) -> Option<Result<(H256, NodeId), Error>>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Key discovery session API.
|
|
||||||
pub struct SessionImpl<T: SessionTransport> {
|
|
||||||
/// Session core.
|
|
||||||
core: SessionCore<T>,
|
|
||||||
/// Session data.
|
|
||||||
data: Mutex<SessionData>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Action after key version is negotiated.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub enum ContinueAction {
|
|
||||||
/// Decryption session + origin + is_shadow_decryption + is_broadcast_decryption.
|
|
||||||
Decrypt(Arc<DecryptionSession>, Option<Address>, bool, bool),
|
|
||||||
/// Schnorr signing session + message hash.
|
|
||||||
SchnorrSign(Arc<SchnorrSigningSession>, H256),
|
|
||||||
/// ECDSA signing session + message hash.
|
|
||||||
EcdsaSign(Arc<EcdsaSigningSession>, H256),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Failed action after key version is negotiated.
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
|
||||||
pub enum FailedContinueAction {
|
|
||||||
/// Decryption origin + requester.
|
|
||||||
Decrypt(Option<Address>, Address),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Immutable session data.
|
|
||||||
struct SessionCore<T: SessionTransport> {
|
|
||||||
/// Session meta.
|
|
||||||
pub meta: ShareChangeSessionMeta,
|
|
||||||
/// Sub-session id.
|
|
||||||
pub sub_session: Secret,
|
|
||||||
/// Key share.
|
|
||||||
pub key_share: Option<DocumentKeyShare>,
|
|
||||||
/// Session result computer.
|
|
||||||
pub result_computer: Arc<dyn SessionResultComputer>,
|
|
||||||
/// Session transport.
|
|
||||||
pub transport: T,
|
|
||||||
/// Session nonce.
|
|
||||||
pub nonce: u64,
|
|
||||||
/// Session completion signal.
|
|
||||||
pub completed: CompletionSignal<Option<(H256, NodeId)>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Mutable session data.
|
|
||||||
struct SessionData {
|
|
||||||
/// Session state.
|
|
||||||
pub state: SessionState,
|
|
||||||
/// Initialization confirmations.
|
|
||||||
pub confirmations: Option<BTreeSet<NodeId>>,
|
|
||||||
/// Common key data that nodes have agreed upon.
|
|
||||||
pub key_share: Option<DocumentKeyShare>,
|
|
||||||
/// { Version => Nodes }
|
|
||||||
pub versions: Option<BTreeMap<H256, BTreeSet<NodeId>>>,
|
|
||||||
/// Session result.
|
|
||||||
pub result: Option<Result<Option<(H256, NodeId)>, Error>>,
|
|
||||||
/// Continue action.
|
|
||||||
pub continue_with: Option<ContinueAction>,
|
|
||||||
/// Failed continue action (reported in error message by master node).
|
|
||||||
pub failed_continue_with: Option<FailedContinueAction>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// SessionImpl creation parameters
|
|
||||||
pub struct SessionParams<T: SessionTransport> {
|
|
||||||
/// Session meta.
|
|
||||||
pub meta: ShareChangeSessionMeta,
|
|
||||||
/// Sub-session id.
|
|
||||||
pub sub_session: Secret,
|
|
||||||
/// Key share.
|
|
||||||
pub key_share: Option<DocumentKeyShare>,
|
|
||||||
/// Session result computer.
|
|
||||||
pub result_computer: Arc<dyn SessionResultComputer>,
|
|
||||||
/// Session transport to communicate to other cluster nodes.
|
|
||||||
pub transport: T,
|
|
||||||
/// Session nonce.
|
|
||||||
pub nonce: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Signing session state.
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
enum SessionState {
|
|
||||||
/// Waiting for initialization.
|
|
||||||
WaitingForInitialization,
|
|
||||||
/// Waiting for responses.
|
|
||||||
WaitingForResponses,
|
|
||||||
/// Session is completed.
|
|
||||||
Finished,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Isolated session transport.
|
|
||||||
pub struct IsolatedSessionTransport {
|
|
||||||
/// Cluster.
|
|
||||||
pub cluster: Arc<dyn Cluster>,
|
|
||||||
/// Key id.
|
|
||||||
pub key_id: SessionId,
|
|
||||||
/// Sub session id.
|
|
||||||
pub sub_session: Secret,
|
|
||||||
/// Session-level nonce.
|
|
||||||
pub nonce: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fastest session result computer. Computes first possible version that can be recovered on this node.
|
|
||||||
/// If there's no such version, selects version with the most support.
|
|
||||||
pub struct FastestResultComputer {
|
|
||||||
/// This node id.
|
|
||||||
self_node_id: NodeId,
|
|
||||||
/// Threshold (if known).
|
|
||||||
threshold: Option<usize>,
|
|
||||||
/// Count of all configured key server nodes.
|
|
||||||
configured_nodes_count: usize,
|
|
||||||
/// Count of all connected key server nodes.
|
|
||||||
connected_nodes_count: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Selects version with most support, waiting for responses from all nodes.
|
|
||||||
pub struct LargestSupportResultComputer;
|
|
||||||
|
|
||||||
impl<T> SessionImpl<T> where T: SessionTransport {
|
|
||||||
/// Create new session.
|
|
||||||
pub fn new(params: SessionParams<T>) -> (Self, Oneshot<Result<Option<(H256, NodeId)>, Error>>) {
|
|
||||||
let (completed, oneshot) = CompletionSignal::new();
|
|
||||||
(SessionImpl {
|
|
||||||
core: SessionCore {
|
|
||||||
meta: params.meta,
|
|
||||||
sub_session: params.sub_session,
|
|
||||||
key_share: params.key_share.clone(),
|
|
||||||
result_computer: params.result_computer,
|
|
||||||
transport: params.transport,
|
|
||||||
nonce: params.nonce,
|
|
||||||
completed,
|
|
||||||
},
|
|
||||||
data: Mutex::new(SessionData {
|
|
||||||
state: SessionState::WaitingForInitialization,
|
|
||||||
confirmations: None,
|
|
||||||
key_share: params.key_share.map(|key_share| DocumentKeyShare {
|
|
||||||
threshold: key_share.threshold,
|
|
||||||
author: key_share.author,
|
|
||||||
public: key_share.public,
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
versions: None,
|
|
||||||
result: None,
|
|
||||||
continue_with: None,
|
|
||||||
failed_continue_with: None,
|
|
||||||
})
|
|
||||||
}, oneshot)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return session meta.
|
|
||||||
pub fn meta(&self) -> &ShareChangeSessionMeta {
|
|
||||||
&self.core.meta
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return result computer reference.
|
|
||||||
pub fn version_holders(&self, version: &H256) -> Result<BTreeSet<NodeId>, Error> {
|
|
||||||
Ok(self.data.lock().versions.as_ref().ok_or(Error::InvalidStateForRequest)?
|
|
||||||
.get(version).ok_or(Error::ServerKeyIsNotFound)?
|
|
||||||
.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set continue action.
|
|
||||||
pub fn set_continue_action(&self, action: ContinueAction) {
|
|
||||||
self.data.lock().continue_with = Some(action);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Take continue action.
|
|
||||||
pub fn take_continue_action(&self) -> Option<ContinueAction> {
|
|
||||||
self.data.lock().continue_with.take()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Take failed continue action.
|
|
||||||
pub fn take_failed_continue_action(&self) -> Option<FailedContinueAction> {
|
|
||||||
self.data.lock().failed_continue_with.take()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return session completion result (if available).
|
|
||||||
pub fn result(&self) -> Option<Result<Option<(H256, NodeId)>, Error>> {
|
|
||||||
self.data.lock().result.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Retrieve common key data (author, threshold, public), if available.
|
|
||||||
pub fn common_key_data(&self) -> Result<DocumentKeyShare, Error> {
|
|
||||||
self.data.lock().key_share.clone()
|
|
||||||
.ok_or(Error::InvalidStateForRequest)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Initialize session.
|
|
||||||
pub fn initialize(&self, connected_nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
|
||||||
// check state
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
if data.state != SessionState::WaitingForInitialization {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
// update state
|
|
||||||
let mut confirmations = connected_nodes;
|
|
||||||
let mut versions: BTreeMap<H256, BTreeSet<NodeId>> = BTreeMap::new();
|
|
||||||
let received_own_confirmation = confirmations.remove(&self.core.meta.self_node_id);
|
|
||||||
if received_own_confirmation {
|
|
||||||
if let Some(key_share) = self.core.key_share.as_ref() {
|
|
||||||
for version in &key_share.versions {
|
|
||||||
versions.entry(version.hash.clone())
|
|
||||||
.or_insert_with(Default::default)
|
|
||||||
.insert(self.core.meta.self_node_id.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// update state
|
|
||||||
let no_confirmations_required = confirmations.is_empty();
|
|
||||||
data.state = SessionState::WaitingForResponses;
|
|
||||||
data.confirmations = Some(confirmations);
|
|
||||||
data.versions = Some(versions);
|
|
||||||
|
|
||||||
// try to complete session
|
|
||||||
Self::try_complete(&self.core, &mut *data);
|
|
||||||
if no_confirmations_required && data.state != SessionState::Finished {
|
|
||||||
return Err(Error::ServerKeyIsNotFound);
|
|
||||||
} else if data.state == SessionState::Finished {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// send requests
|
|
||||||
let confirmations = data.confirmations.as_ref().expect("dilled couple of lines above; qed");
|
|
||||||
for connected_node in confirmations {
|
|
||||||
self.core.transport.send(connected_node, KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions {
|
|
||||||
session: self.core.meta.id.clone().into(),
|
|
||||||
sub_session: self.core.sub_session.clone().into(),
|
|
||||||
session_nonce: self.core.nonce,
|
|
||||||
}))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process single message.
|
|
||||||
pub fn process_message(&self, sender: &NodeId, message: &KeyVersionNegotiationMessage) -> Result<(), Error> {
|
|
||||||
if self.core.nonce != message.session_nonce() {
|
|
||||||
return Err(Error::ReplayProtection);
|
|
||||||
}
|
|
||||||
|
|
||||||
match message {
|
|
||||||
&KeyVersionNegotiationMessage::RequestKeyVersions(ref message) =>
|
|
||||||
self.on_key_versions_request(sender, message),
|
|
||||||
&KeyVersionNegotiationMessage::KeyVersions(ref message) =>
|
|
||||||
self.on_key_versions(sender, message),
|
|
||||||
&KeyVersionNegotiationMessage::KeyVersionsError(ref message) => {
|
|
||||||
// remember failed continue action
|
|
||||||
if let Some(FailedKeyVersionContinueAction::Decrypt(Some(ref origin), ref requester)) = message.continue_with {
|
|
||||||
self.data.lock().failed_continue_with =
|
|
||||||
Some(FailedContinueAction::Decrypt(Some(origin.clone().into()), requester.clone().into()));
|
|
||||||
}
|
|
||||||
|
|
||||||
self.on_session_error(sender, message.error.clone());
|
|
||||||
Ok(())
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process key versions request.
|
|
||||||
pub fn on_key_versions_request(&self, sender: &NodeId, _message: &RequestKeyVersions) -> Result<(), Error> {
|
|
||||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
|
||||||
|
|
||||||
// check message
|
|
||||||
if *sender != self.core.meta.master_node_id {
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
|
|
||||||
// check state
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
if data.state != SessionState::WaitingForInitialization {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
// send response
|
|
||||||
self.core.transport.send(sender, KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
|
||||||
session: self.core.meta.id.clone().into(),
|
|
||||||
sub_session: self.core.sub_session.clone().into(),
|
|
||||||
session_nonce: self.core.nonce,
|
|
||||||
key_common: self.core.key_share.as_ref().map(|key_share| CommonKeyData {
|
|
||||||
threshold: key_share.threshold,
|
|
||||||
author: key_share.author.into(),
|
|
||||||
public: key_share.public.into(),
|
|
||||||
}),
|
|
||||||
versions: self.core.key_share.as_ref().map(|key_share|
|
|
||||||
key_share.versions.iter().rev()
|
|
||||||
.filter(|v| v.id_numbers.contains_key(sender))
|
|
||||||
.chain(key_share.versions.iter().rev().filter(|v| !v.id_numbers.contains_key(sender)))
|
|
||||||
.map(|v| v.hash.clone().into())
|
|
||||||
.take(VERSIONS_PER_MESSAGE)
|
|
||||||
.collect())
|
|
||||||
.unwrap_or_else(|| Default::default())
|
|
||||||
}))?;
|
|
||||||
|
|
||||||
// update state
|
|
||||||
data.state = SessionState::Finished;
|
|
||||||
data.result = Some(Ok(None));
|
|
||||||
self.core.completed.send(Ok(None));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process key versions response.
|
|
||||||
pub fn on_key_versions(&self, sender: &NodeId, message: &KeyVersions) -> Result<(), Error> {
|
|
||||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
|
||||||
|
|
||||||
// check state
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
if data.state != SessionState::WaitingForResponses && data.state != SessionState::Finished {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
let reason = "this field is filled on master node when initializing; this is initialized master node; qed";
|
|
||||||
if !data.confirmations.as_mut().expect(reason).remove(sender) {
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
|
|
||||||
// remember versions that sender have
|
|
||||||
{
|
|
||||||
match message.key_common.as_ref() {
|
|
||||||
Some(key_common) if data.key_share.is_none() => {
|
|
||||||
data.key_share = Some(DocumentKeyShare {
|
|
||||||
threshold: key_common.threshold,
|
|
||||||
author: key_common.author.clone().into(),
|
|
||||||
public: key_common.public.clone().into(),
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
},
|
|
||||||
Some(key_common) => {
|
|
||||||
let prev_key_share = data.key_share.as_ref()
|
|
||||||
.expect("data.key_share.is_none() is matched by previous branch; qed");
|
|
||||||
if prev_key_share.threshold != key_common.threshold ||
|
|
||||||
prev_key_share.author.as_bytes() != key_common.author.as_bytes() ||
|
|
||||||
prev_key_share.public.as_bytes() != key_common.public.as_bytes()
|
|
||||||
{
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
None if message.versions.is_empty() => (),
|
|
||||||
None => return Err(Error::InvalidMessage),
|
|
||||||
}
|
|
||||||
|
|
||||||
let versions = data.versions.as_mut().expect(reason);
|
|
||||||
for version in &message.versions {
|
|
||||||
versions.entry(version.clone().into())
|
|
||||||
.or_insert_with(Default::default)
|
|
||||||
.insert(sender.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// try to compute result
|
|
||||||
if data.state != SessionState::Finished {
|
|
||||||
Self::try_complete(&self.core, &mut *data);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Try to complete result && finish session.
|
|
||||||
fn try_complete(core: &SessionCore<T>, data: &mut SessionData) {
|
|
||||||
let reason = "this field is filled on master node when initializing; try_complete is only called on initialized master node; qed";
|
|
||||||
let confirmations = data.confirmations.as_ref().expect(reason);
|
|
||||||
let versions = data.versions.as_ref().expect(reason);
|
|
||||||
let threshold = data.key_share.as_ref().map(|key_share| key_share.threshold);
|
|
||||||
if let Some(result) = core.result_computer.compute_result(threshold, confirmations, versions) {
|
|
||||||
// when the master node processing decryption service request, it starts with a key version negotiation session
|
|
||||||
// if the negotiation fails, only master node knows about it
|
|
||||||
// => if the error is fatal, only the master will know about it and report it to the contract && the request will never be rejected
|
|
||||||
// => let's broadcast fatal error so that every other node know about it, and, if it trusts to master node
|
|
||||||
// will report error to the contract
|
|
||||||
if let (Some(continue_with), Err(error)) = (data.continue_with.as_ref(), result.as_ref()) {
|
|
||||||
let origin = match *continue_with {
|
|
||||||
ContinueAction::Decrypt(_, origin, _, _) => origin.clone(),
|
|
||||||
_ => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let requester = match *continue_with {
|
|
||||||
ContinueAction::Decrypt(ref session, _, _, _) => session.requester().and_then(|r| r.address(&core.meta.id).ok()),
|
|
||||||
_ => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
if origin.is_some() && requester.is_some() && !error.is_non_fatal() {
|
|
||||||
let requester = requester.expect("checked in above condition; qed");
|
|
||||||
data.failed_continue_with =
|
|
||||||
Some(FailedContinueAction::Decrypt(origin.clone(), requester.clone()));
|
|
||||||
|
|
||||||
let send_result = core.transport.broadcast(KeyVersionNegotiationMessage::KeyVersionsError(KeyVersionsError {
|
|
||||||
session: core.meta.id.clone().into(),
|
|
||||||
sub_session: core.sub_session.clone().into(),
|
|
||||||
session_nonce: core.nonce,
|
|
||||||
error: error.clone(),
|
|
||||||
continue_with: Some(FailedKeyVersionContinueAction::Decrypt(
|
|
||||||
origin.map(Into::into),
|
|
||||||
requester.into(),
|
|
||||||
)),
|
|
||||||
}));
|
|
||||||
|
|
||||||
if let Err(send_error) = send_result {
|
|
||||||
warn!(target: "secretstore_net", "{}: failed to broadcast key version negotiation error {}: {}",
|
|
||||||
core.meta.self_node_id, error, send_error);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let result = result.map(Some);
|
|
||||||
data.state = SessionState::Finished;
|
|
||||||
data.result = Some(result.clone());
|
|
||||||
core.completed.send(result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> ClusterSession for SessionImpl<T> where T: SessionTransport {
|
|
||||||
type Id = SessionIdWithSubSession;
|
|
||||||
type CreationData = ();
|
|
||||||
type SuccessfulResult = Option<(H256, NodeId)>;
|
|
||||||
|
|
||||||
fn type_name() -> &'static str {
|
|
||||||
"version negotiation"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn id(&self) -> SessionIdWithSubSession {
|
|
||||||
SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.sub_session.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_finished(&self) -> bool {
|
|
||||||
self.data.lock().state == SessionState::Finished
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_session_timeout(&self) {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
|
|
||||||
if data.confirmations.is_some() {
|
|
||||||
data.confirmations.as_mut().expect("checked a line above; qed").clear();
|
|
||||||
Self::try_complete(&self.core, &mut *data);
|
|
||||||
if data.state != SessionState::Finished {
|
|
||||||
warn!(target: "secretstore_net", "{}: key version negotiation session failed with timeout", self.core.meta.self_node_id);
|
|
||||||
|
|
||||||
data.result = Some(Err(Error::ConsensusTemporaryUnreachable));
|
|
||||||
self.core.completed.send(Err(Error::ConsensusTemporaryUnreachable));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_node_timeout(&self, node: &NodeId) {
|
|
||||||
self.on_session_error(node, Error::NodeDisconnected)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_session_error(&self, node: &NodeId, error: Error) {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
|
|
||||||
if data.confirmations.is_some() {
|
|
||||||
let is_waiting_for_confirmation = data.confirmations.as_mut().expect("checked a line above; qed").remove(node);
|
|
||||||
if !is_waiting_for_confirmation {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
Self::try_complete(&self.core, &mut *data);
|
|
||||||
if data.state == SessionState::Finished {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
warn!(target: "secretstore_net", "{}: key version negotiation session failed because of {} from {}",
|
|
||||||
self.core.meta.self_node_id, error, node);
|
|
||||||
|
|
||||||
data.state = SessionState::Finished;
|
|
||||||
data.result = Some(Err(error.clone()));
|
|
||||||
self.core.completed.send(Err(error));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
|
||||||
match *message {
|
|
||||||
Message::KeyVersionNegotiation(ref message) => self.process_message(sender, message),
|
|
||||||
_ => unreachable!("cluster checks message to be correct before passing; qed"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SessionTransport for IsolatedSessionTransport {
|
|
||||||
fn broadcast(&self, message: KeyVersionNegotiationMessage) -> Result<(), Error> {
|
|
||||||
self.cluster.broadcast(Message::KeyVersionNegotiation(message))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> {
|
|
||||||
self.cluster.send(node, Message::KeyVersionNegotiation(message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FastestResultComputer {
|
|
||||||
pub fn new(self_node_id: NodeId, key_share: Option<&DocumentKeyShare>, configured_nodes_count: usize, connected_nodes_count: usize) -> Self {
|
|
||||||
let threshold = key_share.map(|ks| ks.threshold);
|
|
||||||
FastestResultComputer {
|
|
||||||
self_node_id,
|
|
||||||
threshold,
|
|
||||||
configured_nodes_count,
|
|
||||||
connected_nodes_count,
|
|
||||||
}
|
|
||||||
}}
|
|
||||||
|
|
||||||
impl SessionResultComputer for FastestResultComputer {
|
|
||||||
fn compute_result(&self, threshold: Option<usize>, confirmations: &BTreeSet<NodeId>, versions: &BTreeMap<H256, BTreeSet<NodeId>>) -> Option<Result<(H256, NodeId), Error>> {
|
|
||||||
match self.threshold.or(threshold) {
|
|
||||||
// if there's no versions at all && we're not waiting for confirmations anymore
|
|
||||||
_ if confirmations.is_empty() && versions.is_empty() => Some(Err(Error::ServerKeyIsNotFound)),
|
|
||||||
// if we have key share on this node
|
|
||||||
Some(threshold) => {
|
|
||||||
// select version this node have, with enough participants
|
|
||||||
let has_key_share = self.threshold.is_some();
|
|
||||||
let version = versions.iter().find(|&(_, ref n)| !has_key_share || n.contains(&self.self_node_id) && n.len() >= threshold + 1);
|
|
||||||
// if there's no such version, wait for more confirmations
|
|
||||||
match version {
|
|
||||||
Some((version, nodes)) => Some(Ok((version.clone(), if has_key_share { self.self_node_id.clone() } else { nodes.iter().cloned().nth(0)
|
|
||||||
.expect("version is only inserted when there's at least one owner; qed") }))),
|
|
||||||
None if !confirmations.is_empty() => None,
|
|
||||||
// otherwise - try to find any version
|
|
||||||
None => Some(versions.iter()
|
|
||||||
.find(|&(_, ref n)| n.len() >= threshold + 1)
|
|
||||||
.map(|(version, nodes)| Ok((version.clone(), nodes.iter().cloned().nth(0)
|
|
||||||
.expect("version is only inserted when there's at least one owner; qed"))))
|
|
||||||
// if there's no version consensus among all connected nodes
|
|
||||||
// AND we're connected to ALL configured nodes
|
|
||||||
// OR there are less than required nodes for key restore
|
|
||||||
// => this means that we can't restore key with CURRENT configuration => respond with fatal error
|
|
||||||
// otherwise we could try later, after all nodes are connected
|
|
||||||
.unwrap_or_else(|| Err(if self.configured_nodes_count == self.connected_nodes_count
|
|
||||||
|| self.configured_nodes_count < threshold + 1 {
|
|
||||||
Error::ConsensusUnreachable
|
|
||||||
} else {
|
|
||||||
Error::ConsensusTemporaryUnreachable
|
|
||||||
}))),
|
|
||||||
}
|
|
||||||
},
|
|
||||||
// if we do not have share, then wait for all confirmations
|
|
||||||
None if !confirmations.is_empty() => None,
|
|
||||||
// ...and select version with largest support
|
|
||||||
None => Some(versions.iter()
|
|
||||||
.max_by_key(|&(_, ref n)| n.len())
|
|
||||||
.map(|(version, nodes)| Ok((version.clone(), nodes.iter().cloned().nth(0)
|
|
||||||
.expect("version is only inserted when there's at least one owner; qed"))))
|
|
||||||
.unwrap_or_else(|| Err(if self.configured_nodes_count == self.connected_nodes_count {
|
|
||||||
Error::ConsensusUnreachable
|
|
||||||
} else {
|
|
||||||
Error::ConsensusTemporaryUnreachable
|
|
||||||
}))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SessionResultComputer for LargestSupportResultComputer {
|
|
||||||
fn compute_result(&self, _threshold: Option<usize>, confirmations: &BTreeSet<NodeId>, versions: &BTreeMap<H256, BTreeSet<NodeId>>) -> Option<Result<(H256, NodeId), Error>> {
|
|
||||||
if !confirmations.is_empty() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
if versions.is_empty() {
|
|
||||||
return Some(Err(Error::ServerKeyIsNotFound));
|
|
||||||
}
|
|
||||||
|
|
||||||
versions.iter()
|
|
||||||
.max_by_key(|&(_, ref n)| n.len())
|
|
||||||
.map(|(version, nodes)| Ok((version.clone(), nodes.iter().cloned().nth(0)
|
|
||||||
.expect("version is only inserted when there's at least one owner; qed"))))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::{VecDeque, BTreeMap, BTreeSet};
|
|
||||||
use ethereum_types::{H512, H160, Address};
|
|
||||||
use crypto::publickey::public_to_address;
|
|
||||||
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage,
|
|
||||||
DocumentKeyShare, DocumentKeyShareVersion};
|
|
||||||
use key_server_cluster::math;
|
|
||||||
use key_server_cluster::cluster::Cluster;
|
|
||||||
use key_server_cluster::cluster::tests::DummyCluster;
|
|
||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
|
||||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
|
||||||
use key_server_cluster::decryption_session::create_default_decryption_session;
|
|
||||||
use key_server_cluster::message::{
|
|
||||||
Message, KeyVersionNegotiationMessage, RequestKeyVersions,
|
|
||||||
CommonKeyData, KeyVersions,
|
|
||||||
};
|
|
||||||
use super::{
|
|
||||||
SessionImpl, SessionTransport, SessionParams, FastestResultComputer, LargestSupportResultComputer,
|
|
||||||
SessionResultComputer, SessionState, ContinueAction, FailedContinueAction,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct DummyTransport {
|
|
||||||
cluster: Arc<DummyCluster>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SessionTransport for DummyTransport {
|
|
||||||
fn broadcast(&self, message: KeyVersionNegotiationMessage) -> Result<(), Error> {
|
|
||||||
self.cluster.broadcast(Message::KeyVersionNegotiation(message))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> {
|
|
||||||
self.cluster.send(node, Message::KeyVersionNegotiation(message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct Node {
|
|
||||||
pub cluster: Arc<DummyCluster>,
|
|
||||||
pub key_storage: Arc<DummyKeyStorage>,
|
|
||||||
pub session: SessionImpl<DummyTransport>,
|
|
||||||
}
|
|
||||||
|
|
||||||
struct MessageLoop {
|
|
||||||
pub session_id: SessionId,
|
|
||||||
pub nodes: BTreeMap<NodeId, Node>,
|
|
||||||
pub queue: VecDeque<(NodeId, NodeId, Message)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MessageLoop {
|
|
||||||
pub fn prepare_nodes(nodes_num: usize) -> BTreeMap<NodeId, Arc<DummyKeyStorage>> {
|
|
||||||
(0..nodes_num).map(|_| (math::generate_random_point().unwrap(),
|
|
||||||
Arc::new(DummyKeyStorage::default()))).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn empty(nodes_num: usize) -> Self {
|
|
||||||
Self::new(Self::prepare_nodes(nodes_num))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new(nodes: BTreeMap<NodeId, Arc<DummyKeyStorage>>) -> Self {
|
|
||||||
let master_node_id = nodes.keys().cloned().nth(0).unwrap();
|
|
||||||
let sub_sesion = math::generate_random_scalar().unwrap();
|
|
||||||
let all_nodes_ids: BTreeSet<_> = nodes.keys().cloned().collect();
|
|
||||||
MessageLoop {
|
|
||||||
session_id: Default::default(),
|
|
||||||
nodes: nodes.iter().map(|(node_id, key_storage)| {
|
|
||||||
let cluster = Arc::new(DummyCluster::new(node_id.clone()));
|
|
||||||
cluster.add_nodes(all_nodes_ids.iter().cloned());
|
|
||||||
(node_id.clone(), Node {
|
|
||||||
cluster: cluster.clone(),
|
|
||||||
key_storage: key_storage.clone(),
|
|
||||||
session: SessionImpl::new(SessionParams {
|
|
||||||
meta: ShareChangeSessionMeta {
|
|
||||||
id: Default::default(),
|
|
||||||
self_node_id: node_id.clone(),
|
|
||||||
master_node_id: master_node_id.clone(),
|
|
||||||
configured_nodes_count: nodes.len(),
|
|
||||||
connected_nodes_count: nodes.len(),
|
|
||||||
},
|
|
||||||
sub_session: sub_sesion.clone(),
|
|
||||||
key_share: key_storage.get(&Default::default()).unwrap(),
|
|
||||||
result_computer: Arc::new(FastestResultComputer::new(
|
|
||||||
node_id.clone(),
|
|
||||||
key_storage.get(&Default::default()).unwrap().as_ref(),
|
|
||||||
nodes.len(), nodes.len()
|
|
||||||
)),
|
|
||||||
transport: DummyTransport {
|
|
||||||
cluster: cluster,
|
|
||||||
},
|
|
||||||
nonce: 0,
|
|
||||||
}).0,
|
|
||||||
})
|
|
||||||
}).collect(),
|
|
||||||
queue: VecDeque::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn node_id(&self, idx: usize) -> &NodeId {
|
|
||||||
self.nodes.keys().nth(idx).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn session(&self, idx: usize) -> &SessionImpl<DummyTransport> {
|
|
||||||
&self.nodes.values().nth(idx).unwrap().session
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> {
|
|
||||||
self.nodes.values()
|
|
||||||
.filter_map(|n| n.cluster.take_message().map(|m| (n.session.meta().self_node_id.clone(), m.0, m.1)))
|
|
||||||
.nth(0)
|
|
||||||
.or_else(|| self.queue.pop_front())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> {
|
|
||||||
match msg.2 {
|
|
||||||
Message::KeyVersionNegotiation(message) =>
|
|
||||||
self.nodes[&msg.1].session.process_message(&msg.0, &message),
|
|
||||||
_ => panic!("unexpected"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn run(&mut self) {
|
|
||||||
while let Some((from, to, message)) = self.take_message() {
|
|
||||||
self.process_message((from, to, message)).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn negotiation_fails_if_initialized_twice() {
|
|
||||||
let ml = MessageLoop::empty(1);
|
|
||||||
assert_eq!(ml.session(0).initialize(BTreeSet::new()), Ok(()));
|
|
||||||
assert_eq!(ml.session(0).initialize(BTreeSet::new()), Err(Error::InvalidStateForRequest));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn negotiation_fails_if_message_contains_wrong_nonce() {
|
|
||||||
let ml = MessageLoop::empty(2);
|
|
||||||
assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions {
|
|
||||||
session: Default::default(),
|
|
||||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
|
||||||
session_nonce: 100,
|
|
||||||
})), Err(Error::ReplayProtection));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn negotiation_fails_if_versions_request_received_from_non_master() {
|
|
||||||
let ml = MessageLoop::empty(3);
|
|
||||||
assert_eq!(ml.session(2).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions {
|
|
||||||
session: Default::default(),
|
|
||||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
|
||||||
session_nonce: 0,
|
|
||||||
})), Err(Error::InvalidMessage));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn negotiation_fails_if_versions_request_received_twice() {
|
|
||||||
let ml = MessageLoop::empty(2);
|
|
||||||
assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions {
|
|
||||||
session: Default::default(),
|
|
||||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
|
||||||
session_nonce: 0,
|
|
||||||
})), Ok(()));
|
|
||||||
assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions {
|
|
||||||
session: Default::default(),
|
|
||||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
|
||||||
session_nonce: 0,
|
|
||||||
})), Err(Error::InvalidStateForRequest));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn negotiation_fails_if_versions_received_before_initialization() {
|
|
||||||
let ml = MessageLoop::empty(2);
|
|
||||||
assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
|
||||||
session: Default::default(),
|
|
||||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
|
||||||
session_nonce: 0,
|
|
||||||
key_common: Some(CommonKeyData {
|
|
||||||
threshold: 10,
|
|
||||||
author: Default::default(),
|
|
||||||
public: Default::default(),
|
|
||||||
}),
|
|
||||||
versions: Vec::new(),
|
|
||||||
})), Err(Error::InvalidStateForRequest));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn negotiation_does_not_fails_if_versions_received_after_completion() {
|
|
||||||
let ml = MessageLoop::empty(3);
|
|
||||||
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();
|
|
||||||
assert_eq!(ml.session(0).data.lock().state, SessionState::WaitingForResponses);
|
|
||||||
|
|
||||||
let version_id = (*math::generate_random_scalar().unwrap()).clone();
|
|
||||||
assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
|
||||||
session: Default::default(),
|
|
||||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
|
||||||
session_nonce: 0,
|
|
||||||
key_common: Some(CommonKeyData {
|
|
||||||
threshold: 0,
|
|
||||||
author: Default::default(),
|
|
||||||
public: Default::default(),
|
|
||||||
}),
|
|
||||||
|
|
||||||
versions: vec![version_id.clone().into()]
|
|
||||||
})), Ok(()));
|
|
||||||
assert_eq!(ml.session(0).data.lock().state, SessionState::Finished);
|
|
||||||
|
|
||||||
assert_eq!(ml.session(0).process_message(ml.node_id(2), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
|
||||||
session: Default::default(),
|
|
||||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
|
||||||
session_nonce: 0,
|
|
||||||
key_common: Some(CommonKeyData {
|
|
||||||
threshold: 0,
|
|
||||||
author: Default::default(),
|
|
||||||
public: Default::default(),
|
|
||||||
}),
|
|
||||||
|
|
||||||
versions: vec![version_id.clone().into()]
|
|
||||||
})), Ok(()));
|
|
||||||
assert_eq!(ml.session(0).data.lock().state, SessionState::Finished);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn negotiation_fails_if_wrong_common_data_sent() {
|
|
||||||
fn run_test(key_common: CommonKeyData) {
|
|
||||||
let ml = MessageLoop::empty(3);
|
|
||||||
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();
|
|
||||||
|
|
||||||
let version_id = (*math::generate_random_scalar().unwrap()).clone();
|
|
||||||
assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
|
||||||
session: Default::default(),
|
|
||||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
|
||||||
session_nonce: 0,
|
|
||||||
key_common: Some(CommonKeyData {
|
|
||||||
threshold: 1,
|
|
||||||
author: Default::default(),
|
|
||||||
public: Default::default(),
|
|
||||||
}),
|
|
||||||
versions: vec![version_id.clone().into()]
|
|
||||||
})), Ok(()));
|
|
||||||
assert_eq!(ml.session(0).process_message(ml.node_id(2), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
|
||||||
session: Default::default(),
|
|
||||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
|
||||||
session_nonce: 0,
|
|
||||||
key_common: Some(key_common),
|
|
||||||
versions: vec![version_id.clone().into()]
|
|
||||||
})), Err(Error::InvalidMessage));
|
|
||||||
}
|
|
||||||
|
|
||||||
run_test(CommonKeyData {
|
|
||||||
threshold: 2,
|
|
||||||
author: Default::default(),
|
|
||||||
public: Default::default(),
|
|
||||||
});
|
|
||||||
|
|
||||||
run_test(CommonKeyData {
|
|
||||||
threshold: 1,
|
|
||||||
author: H160::from_low_u64_be(1).into(),
|
|
||||||
public: Default::default(),
|
|
||||||
});
|
|
||||||
|
|
||||||
run_test(CommonKeyData {
|
|
||||||
threshold: 1,
|
|
||||||
author: H160::from_low_u64_be(2).into(),
|
|
||||||
public: Default::default(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn negotiation_fails_if_threshold_empty_when_versions_are_not_empty() {
|
|
||||||
let ml = MessageLoop::empty(2);
|
|
||||||
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();
|
|
||||||
|
|
||||||
let version_id = (*math::generate_random_scalar().unwrap()).clone();
|
|
||||||
assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
|
||||||
session: Default::default(),
|
|
||||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
|
||||||
session_nonce: 0,
|
|
||||||
key_common: None,
|
|
||||||
versions: vec![version_id.clone().into()]
|
|
||||||
})), Err(Error::InvalidMessage));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn fast_negotiation_does_not_completes_instantly_when_enough_share_owners_are_connected() {
|
|
||||||
let nodes = MessageLoop::prepare_nodes(2);
|
|
||||||
let version_id = (*math::generate_random_scalar().unwrap()).clone();
|
|
||||||
nodes.values().nth(0).unwrap().insert(Default::default(), DocumentKeyShare {
|
|
||||||
author: H160::from_low_u64_be(2),
|
|
||||||
threshold: 1,
|
|
||||||
public: H512::from_low_u64_be(3),
|
|
||||||
common_point: None,
|
|
||||||
encrypted_point: None,
|
|
||||||
versions: vec![DocumentKeyShareVersion {
|
|
||||||
hash: version_id,
|
|
||||||
id_numbers: vec![(nodes.keys().cloned().nth(0).unwrap(), math::generate_random_scalar().unwrap())].into_iter().collect(),
|
|
||||||
secret_share: math::generate_random_scalar().unwrap(),
|
|
||||||
}],
|
|
||||||
}).unwrap();
|
|
||||||
let ml = MessageLoop::new(nodes);
|
|
||||||
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();
|
|
||||||
// we can't be sure that node has given key version because previous ShareAdd session could fail
|
|
||||||
assert!(ml.session(0).data.lock().state != SessionState::Finished);
|
|
||||||
|
|
||||||
// check that upon completion, commmon key data is known
|
|
||||||
assert_eq!(ml.session(0).common_key_data(), Ok(DocumentKeyShare {
|
|
||||||
author: H160::from_low_u64_be(2),
|
|
||||||
threshold: 1,
|
|
||||||
public: H512::from_low_u64_be(3),
|
|
||||||
..Default::default()
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn fastest_computer_returns_missing_share_if_no_versions_returned() {
|
|
||||||
let computer = FastestResultComputer {
|
|
||||||
self_node_id: Default::default(),
|
|
||||||
threshold: None,
|
|
||||||
configured_nodes_count: 1,
|
|
||||||
connected_nodes_count: 1,
|
|
||||||
};
|
|
||||||
assert_eq!(computer.compute_result(Some(10), &Default::default(), &Default::default()), Some(Err(Error::ServerKeyIsNotFound)));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn largest_computer_returns_missing_share_if_no_versions_returned() {
|
|
||||||
let computer = LargestSupportResultComputer;
|
|
||||||
assert_eq!(computer.compute_result(Some(10), &Default::default(), &Default::default()), Some(Err(Error::ServerKeyIsNotFound)));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn fatal_error_is_not_broadcasted_if_started_without_origin() {
|
|
||||||
let mut ml = MessageLoop::empty(3);
|
|
||||||
ml.session(0).set_continue_action(ContinueAction::Decrypt(create_default_decryption_session(), None, false, false));
|
|
||||||
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();
|
|
||||||
ml.run();
|
|
||||||
|
|
||||||
assert!(ml.nodes.values().all(|n| n.session.is_finished() &&
|
|
||||||
n.session.take_failed_continue_action().is_none()));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn fatal_error_is_broadcasted_if_started_with_origin() {
|
|
||||||
let mut ml = MessageLoop::empty(3);
|
|
||||||
ml.session(0).set_continue_action(ContinueAction::Decrypt(create_default_decryption_session(), Some(Address::from_low_u64_be(1)), true, true));
|
|
||||||
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();
|
|
||||||
ml.run();
|
|
||||||
|
|
||||||
// on all nodes session is completed
|
|
||||||
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
|
|
||||||
|
|
||||||
// slave nodes have non-empty failed continue action
|
|
||||||
assert!(ml.nodes.values().skip(1).all(|n| n.session.take_failed_continue_action()
|
|
||||||
== Some(FailedContinueAction::Decrypt(Some(Address::from_low_u64_be(1)), public_to_address(&H512::from_low_u64_be(2))))));
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,53 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
pub mod key_version_negotiation_session;
|
|
||||||
pub mod servers_set_change_session;
|
|
||||||
pub mod share_add_session;
|
|
||||||
pub mod share_change_session;
|
|
||||||
|
|
||||||
mod sessions_queue;
|
|
||||||
|
|
||||||
use key_server_cluster::{SessionId, NodeId, SessionMeta, Error};
|
|
||||||
|
|
||||||
/// Share change session metadata.
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct ShareChangeSessionMeta {
|
|
||||||
/// Key id.
|
|
||||||
pub id: SessionId,
|
|
||||||
/// Id of node, which has started this session.
|
|
||||||
pub master_node_id: NodeId,
|
|
||||||
/// Id of node, on which this session is running.
|
|
||||||
pub self_node_id: NodeId,
|
|
||||||
/// Count of all configured key server nodes.
|
|
||||||
pub configured_nodes_count: usize,
|
|
||||||
/// Count of all connected key server nodes.
|
|
||||||
pub connected_nodes_count: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ShareChangeSessionMeta {
|
|
||||||
/// Convert to consensus session meta. `all_nodes_set` is the union of `old_nodes_set` && `new_nodes_set`.
|
|
||||||
pub fn into_consensus_meta(self, all_nodes_set_len: usize) -> Result<SessionMeta, Error> {
|
|
||||||
Ok(SessionMeta {
|
|
||||||
id: self.id,
|
|
||||||
master_node_id: self.master_node_id,
|
|
||||||
self_node_id: self.self_node_id,
|
|
||||||
threshold: all_nodes_set_len.checked_sub(1).ok_or(Error::ConsensusUnreachable)?,
|
|
||||||
configured_nodes_count: self.configured_nodes_count,
|
|
||||||
connected_nodes_count: self.connected_nodes_count,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@ -1,56 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::{VecDeque, BTreeSet};
|
|
||||||
use key_server_cluster::{Error, SessionId, KeyStorage};
|
|
||||||
|
|
||||||
/// Queue of share change sessions.
|
|
||||||
pub struct SessionsQueue {
|
|
||||||
/// Sessions, known on this node.
|
|
||||||
known_sessions: VecDeque<SessionId>,
|
|
||||||
/// Unknown sessions.
|
|
||||||
unknown_sessions: VecDeque<SessionId>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SessionsQueue {
|
|
||||||
/// Create new sessions queue.
|
|
||||||
pub fn new(key_storage: &Arc<dyn KeyStorage>, unknown_sessions: BTreeSet<SessionId>) -> Self {
|
|
||||||
// TODO [Opt]:
|
|
||||||
// 1) known sessions - change to iter
|
|
||||||
// 2) unknown sesions - request chunk-by-chunk
|
|
||||||
SessionsQueue {
|
|
||||||
known_sessions: key_storage.iter().map(|(k, _)| k).collect(),
|
|
||||||
unknown_sessions: unknown_sessions.into_iter().collect(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Iterator for SessionsQueue {
|
|
||||||
type Item = Result<SessionId, Error>;
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
if let Some(known_session) = self.known_sessions.pop_front() {
|
|
||||||
return Some(Ok(known_session));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(unknown_session) = self.unknown_sessions.pop_front() {
|
|
||||||
return Some(Ok(unknown_session));
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@ -1,333 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::{BTreeSet, BTreeMap};
|
|
||||||
use ethereum_types::H256;
|
|
||||||
use crypto::publickey::Secret;
|
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, ServerKeyId, KeyStorage};
|
|
||||||
use key_server_cluster::cluster::Cluster;
|
|
||||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
|
||||||
use key_server_cluster::math;
|
|
||||||
use key_server_cluster::jobs::servers_set_change_access_job::ServersSetChangeAccessRequest;
|
|
||||||
use key_server_cluster::jobs::job_session::JobTransport;
|
|
||||||
use key_server_cluster::message::{Message, ServersSetChangeMessage, ServersSetChangeShareAddMessage};
|
|
||||||
use key_server_cluster::share_add_session::{SessionTransport as ShareAddSessionTransport,
|
|
||||||
SessionImpl as ShareAddSessionImpl, SessionParams as ShareAddSessionParams};
|
|
||||||
use key_server_cluster::message::ShareAddMessage;
|
|
||||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
|
||||||
|
|
||||||
/// Single session meta-change session. Brief overview:
|
|
||||||
/// 1) nodes that have been already removed from cluster (isolated nodes) are removed from session
|
|
||||||
/// 2) new shares are added to the session
|
|
||||||
/// 3) shares are moved between nodes
|
|
||||||
/// 4) shares are removed from nodes
|
|
||||||
pub struct ShareChangeSession {
|
|
||||||
/// Servers set change session id.
|
|
||||||
session_id: SessionId,
|
|
||||||
/// Session nonce.
|
|
||||||
nonce: u64,
|
|
||||||
/// Share change session meta.
|
|
||||||
meta: ShareChangeSessionMeta,
|
|
||||||
/// Cluster.
|
|
||||||
cluster: Arc<dyn Cluster>,
|
|
||||||
/// Key storage.
|
|
||||||
key_storage: Arc<dyn KeyStorage>,
|
|
||||||
/// Key version.
|
|
||||||
key_version: H256,
|
|
||||||
/// Nodes that have reported version ownership.
|
|
||||||
version_holders: Option<BTreeSet<NodeId>>,
|
|
||||||
/// Consensus group to use in ShareAdd session.
|
|
||||||
consensus_group: Option<BTreeSet<NodeId>>,
|
|
||||||
/// Nodes to add shares for.
|
|
||||||
new_nodes_map: Option<BTreeMap<NodeId, Option<Secret>>>,
|
|
||||||
/// Share add session.
|
|
||||||
share_add_session: Option<ShareAddSessionImpl<ShareChangeTransport>>,
|
|
||||||
/// Is finished.
|
|
||||||
is_finished: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Share change session plan.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct ShareChangeSessionPlan {
|
|
||||||
/// Key version that plan is valid for.
|
|
||||||
pub key_version: H256,
|
|
||||||
/// Nodes that have reported version ownership.
|
|
||||||
pub version_holders: BTreeSet<NodeId>,
|
|
||||||
/// Consensus group to use in ShareAdd session.
|
|
||||||
pub consensus_group: BTreeSet<NodeId>,
|
|
||||||
/// Nodes to add shares for.
|
|
||||||
pub new_nodes_map: BTreeMap<NodeId, Option<Secret>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Session parameters.
|
|
||||||
pub struct ShareChangeSessionParams {
|
|
||||||
/// Servers set change session id.
|
|
||||||
pub session_id: SessionId,
|
|
||||||
/// Session nonce.
|
|
||||||
pub nonce: u64,
|
|
||||||
/// Share change session meta.
|
|
||||||
pub meta: ShareChangeSessionMeta,
|
|
||||||
/// Cluster.
|
|
||||||
pub cluster: Arc<dyn Cluster>,
|
|
||||||
/// Keys storage.
|
|
||||||
pub key_storage: Arc<dyn KeyStorage>,
|
|
||||||
/// Session plan.
|
|
||||||
pub plan: ShareChangeSessionPlan,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Share add session transport.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct ShareChangeTransport {
|
|
||||||
/// Servers set change session id.
|
|
||||||
session_id: SessionId,
|
|
||||||
/// Session nonce.
|
|
||||||
nonce: u64,
|
|
||||||
/// Cluster.
|
|
||||||
cluster: Arc<dyn Cluster>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ShareChangeSession {
|
|
||||||
/// Create new share change session.
|
|
||||||
pub fn new(params: ShareChangeSessionParams) -> Result<Self, Error> {
|
|
||||||
// we can't create sessions right now, because key share is read when session is created, but it can change in previous session
|
|
||||||
let key_version = params.plan.key_version;
|
|
||||||
let consensus_group = if !params.plan.consensus_group.is_empty() { Some(params.plan.consensus_group) } else { None };
|
|
||||||
let version_holders = if !params.plan.version_holders.is_empty() { Some(params.plan.version_holders) } else { None };
|
|
||||||
let new_nodes_map = if !params.plan.new_nodes_map.is_empty() { Some(params.plan.new_nodes_map) } else { None };
|
|
||||||
debug_assert!(new_nodes_map.is_some());
|
|
||||||
|
|
||||||
let is_finished = new_nodes_map.is_none();
|
|
||||||
Ok(ShareChangeSession {
|
|
||||||
session_id: params.session_id,
|
|
||||||
nonce: params.nonce,
|
|
||||||
meta: params.meta,
|
|
||||||
cluster: params.cluster,
|
|
||||||
key_storage: params.key_storage,
|
|
||||||
key_version: key_version,
|
|
||||||
version_holders: version_holders,
|
|
||||||
consensus_group: consensus_group,
|
|
||||||
new_nodes_map: new_nodes_map,
|
|
||||||
share_add_session: None,
|
|
||||||
is_finished: is_finished,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Is finished?.
|
|
||||||
pub fn is_finished(&self) -> bool {
|
|
||||||
self.is_finished
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Is master node?.
|
|
||||||
pub fn is_master(&self) -> bool {
|
|
||||||
self.meta.self_node_id == self.meta.master_node_id
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Initialize session (on master node).
|
|
||||||
pub fn initialize(&mut self) -> Result<(), Error> {
|
|
||||||
self.proceed_to_next_state()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When share-add message is received.
|
|
||||||
pub fn on_share_add_message(&mut self, sender: &NodeId, message: &ShareAddMessage) -> Result<(), Error> {
|
|
||||||
if self.share_add_session.is_none() {
|
|
||||||
self.create_share_add_session()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let change_state_needed = self.share_add_session.as_ref()
|
|
||||||
.map(|share_add_session| {
|
|
||||||
let was_finished = share_add_session.is_finished();
|
|
||||||
share_add_session.process_message(sender, message)
|
|
||||||
.map(|_| share_add_session.is_finished() && !was_finished)
|
|
||||||
})
|
|
||||||
.unwrap_or(Err(Error::InvalidMessage))?;
|
|
||||||
if change_state_needed {
|
|
||||||
self.proceed_to_next_state()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create new share add session.
|
|
||||||
fn create_share_add_session(&mut self) -> Result<(), Error> {
|
|
||||||
let consensus_group = self.consensus_group.take().ok_or(Error::InvalidStateForRequest)?;
|
|
||||||
let version_holders = self.version_holders.take().ok_or(Error::InvalidStateForRequest)?;
|
|
||||||
let new_nodes_map = self.new_nodes_map.take().ok_or(Error::InvalidStateForRequest)?;
|
|
||||||
let (share_add_session, _) = ShareAddSessionImpl::new(ShareAddSessionParams {
|
|
||||||
meta: self.meta.clone(),
|
|
||||||
nonce: self.nonce,
|
|
||||||
transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()),
|
|
||||||
key_storage: self.key_storage.clone(),
|
|
||||||
admin_public: None,
|
|
||||||
})?;
|
|
||||||
share_add_session.set_consensus_output(&self.key_version, consensus_group, version_holders, new_nodes_map)?;
|
|
||||||
self.share_add_session = Some(share_add_session);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Proceed to the next state.
|
|
||||||
fn proceed_to_next_state(&mut self) -> Result<(), Error> {
|
|
||||||
if self.meta.self_node_id != self.meta.master_node_id {
|
|
||||||
if self.new_nodes_map.is_none() {
|
|
||||||
self.is_finished = true;
|
|
||||||
}
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.new_nodes_map.is_some() {
|
|
||||||
self.create_share_add_session()?;
|
|
||||||
return self.share_add_session.as_ref()
|
|
||||||
.expect("either create_share_add_session fails, or session is created; qed")
|
|
||||||
.initialize(None, None, None, None);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.is_finished = true;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ShareChangeTransport {
|
|
||||||
pub fn new(session_id: SessionId, nonce: u64, cluster: Arc<dyn Cluster>) -> Self {
|
|
||||||
ShareChangeTransport {
|
|
||||||
session_id: session_id,
|
|
||||||
nonce: nonce,
|
|
||||||
cluster: cluster,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl JobTransport for ShareChangeTransport {
|
|
||||||
type PartialJobRequest = ServersSetChangeAccessRequest;
|
|
||||||
type PartialJobResponse = bool;
|
|
||||||
|
|
||||||
fn send_partial_request(&self, _node: &NodeId, _request: ServersSetChangeAccessRequest) -> Result<(), Error> {
|
|
||||||
unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn send_partial_response(&self, _node: &NodeId, _response: bool) -> Result<(), Error> {
|
|
||||||
unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ShareAddSessionTransport for ShareChangeTransport {
|
|
||||||
fn nodes(&self) -> BTreeSet<NodeId> {
|
|
||||||
self.cluster.nodes()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_master_data(&mut self, _consensus_group: BTreeSet<NodeId>, _version_holders: BTreeSet<NodeId>, _id_numbers: BTreeMap<NodeId, Option<Secret>>) {
|
|
||||||
unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn send(&self, node: &NodeId, message: ShareAddMessage) -> Result<(), Error> {
|
|
||||||
self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(ServersSetChangeShareAddMessage {
|
|
||||||
session: self.session_id.clone().into(),
|
|
||||||
session_nonce: self.nonce,
|
|
||||||
message: message,
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prepare share change plan for moving from old `old_key_version_owners` to `new_nodes_set`.
|
|
||||||
pub fn prepare_share_change_session_plan(cluster_nodes: &BTreeSet<NodeId>, threshold: usize, key_id: &ServerKeyId, key_version: H256, master: &NodeId, old_key_version_owners: &BTreeSet<NodeId>, new_nodes_set: &BTreeSet<NodeId>) -> Result<ShareChangeSessionPlan, Error> {
|
|
||||||
// we can't do anything if there are no enought shares
|
|
||||||
if old_key_version_owners.len() < threshold + 1 {
|
|
||||||
warn!("cannot add shares to key {} with threshold {}: only {} shares owners are available",
|
|
||||||
key_id, threshold, old_key_version_owners.len());
|
|
||||||
return Ok(ShareChangeSessionPlan {
|
|
||||||
key_version: key_version,
|
|
||||||
version_holders: Default::default(),
|
|
||||||
consensus_group: Default::default(),
|
|
||||||
new_nodes_map: Default::default(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// warn if we're loosing the key
|
|
||||||
if new_nodes_set.len() < threshold + 1 {
|
|
||||||
warn!("losing key {} with threshold {}: only {} nodes left after servers set change session",
|
|
||||||
key_id, threshold, new_nodes_set.len());
|
|
||||||
}
|
|
||||||
|
|
||||||
// make new nodes map, so that:
|
|
||||||
// all non-isolated old nodes will have their id number preserved
|
|
||||||
// all new nodes will have new id number
|
|
||||||
let mut new_nodes_map = new_nodes_set.difference(&old_key_version_owners)
|
|
||||||
.map(|n| math::generate_random_scalar().map(|id| (n.clone(), Some(id))))
|
|
||||||
.collect::<Result<BTreeMap<_, _>, _>>()?;
|
|
||||||
if !new_nodes_map.is_empty() {
|
|
||||||
for old_node in old_key_version_owners.iter().filter(|n| cluster_nodes.contains(n)) {
|
|
||||||
new_nodes_map.insert(old_node.clone(), None);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// select consensus group if there are some nodes to add
|
|
||||||
let consensus_group = if !new_nodes_map.is_empty() {
|
|
||||||
::std::iter::once(master.clone())
|
|
||||||
.chain(old_key_version_owners.iter()
|
|
||||||
.filter(|n| *n != master && cluster_nodes.contains(*n))
|
|
||||||
.take(threshold)
|
|
||||||
.cloned())
|
|
||||||
.collect()
|
|
||||||
} else {
|
|
||||||
BTreeSet::new()
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(ShareChangeSessionPlan {
|
|
||||||
key_version: key_version,
|
|
||||||
version_holders: old_key_version_owners.clone(),
|
|
||||||
consensus_group: consensus_group,
|
|
||||||
new_nodes_map: new_nodes_map,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ShareChangeSessionPlan {
|
|
||||||
/// Is empty (nothing-to-do) plan?
|
|
||||||
pub fn is_empty(&self) -> bool {
|
|
||||||
self.new_nodes_map.is_empty()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use key_server_cluster::math;
|
|
||||||
use super::prepare_share_change_session_plan;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn share_change_plan_creates_empty_plan() {
|
|
||||||
let cluster_nodes: Vec<_> = (0..3).map(|_| math::generate_random_point().unwrap()).collect();
|
|
||||||
let master = cluster_nodes[0].clone();
|
|
||||||
let old_key_version_owners = cluster_nodes.iter().cloned().collect();
|
|
||||||
let new_nodes_set = cluster_nodes.iter().cloned().collect();
|
|
||||||
let plan = prepare_share_change_session_plan(&cluster_nodes.iter().cloned().collect(),
|
|
||||||
1, &Default::default(), Default::default(), &master, &old_key_version_owners, &new_nodes_set).unwrap();
|
|
||||||
|
|
||||||
assert!(plan.is_empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn share_change_plan_adds_new_nodes() {
|
|
||||||
let cluster_nodes: Vec<_> = (0..3).map(|_| math::generate_random_point().unwrap()).collect();
|
|
||||||
let master = cluster_nodes[0].clone();
|
|
||||||
let old_key_version_owners = cluster_nodes[0..2].iter().cloned().collect();
|
|
||||||
let new_nodes_set = cluster_nodes.iter().cloned().collect();
|
|
||||||
let plan = prepare_share_change_session_plan(&cluster_nodes.iter().cloned().collect(),
|
|
||||||
1, &Default::default(), Default::default(), &master, &old_key_version_owners, &new_nodes_set).unwrap();
|
|
||||||
|
|
||||||
assert!(!plan.is_empty());
|
|
||||||
assert_eq!(old_key_version_owners, plan.consensus_group);
|
|
||||||
assert_eq!(new_nodes_set, plan.new_nodes_map.keys().cloned().collect());
|
|
||||||
}
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@ -1,344 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use std::fmt::{Debug, Formatter, Error as FmtError};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use futures::Oneshot;
|
|
||||||
use parking_lot::Mutex;
|
|
||||||
use ethereum_types::Address;
|
|
||||||
use crypto::publickey::Public;
|
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, Requester, KeyStorage,
|
|
||||||
DocumentKeyShare, ServerKeyId};
|
|
||||||
use key_server_cluster::cluster::Cluster;
|
|
||||||
use key_server_cluster::cluster_sessions::{ClusterSession, CompletionSignal};
|
|
||||||
use key_server_cluster::message::{Message, EncryptionMessage, InitializeEncryptionSession,
|
|
||||||
ConfirmEncryptionInitialization, EncryptionSessionError};
|
|
||||||
|
|
||||||
/// Encryption (distributed key generation) session.
|
|
||||||
/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper:
|
|
||||||
/// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf
|
|
||||||
/// Brief overview:
|
|
||||||
/// 1) initialization: master node (which has received request for storing the secret) initializes the session on all other nodes
|
|
||||||
/// 2) master node sends common_point + encrypted_point to all other nodes
|
|
||||||
/// 3) common_point + encrypted_point are saved on all nodes
|
|
||||||
/// 4) in case of error, previous values are restored
|
|
||||||
pub struct SessionImpl {
|
|
||||||
/// Unique session id.
|
|
||||||
id: SessionId,
|
|
||||||
/// Public identifier of this node.
|
|
||||||
self_node_id: NodeId,
|
|
||||||
/// Encrypted data.
|
|
||||||
encrypted_data: Option<DocumentKeyShare>,
|
|
||||||
/// Key storage.
|
|
||||||
key_storage: Arc<dyn KeyStorage>,
|
|
||||||
/// Cluster which allows this node to send messages to other nodes in the cluster.
|
|
||||||
cluster: Arc<dyn Cluster>,
|
|
||||||
/// Session nonce.
|
|
||||||
nonce: u64,
|
|
||||||
/// Session completion signal.
|
|
||||||
completed: CompletionSignal<()>,
|
|
||||||
/// Mutable session data.
|
|
||||||
data: Mutex<SessionData>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// SessionImpl creation parameters
|
|
||||||
pub struct SessionParams {
|
|
||||||
/// SessionImpl identifier.
|
|
||||||
pub id: SessionId,
|
|
||||||
/// Id of node, on which this session is running.
|
|
||||||
pub self_node_id: Public,
|
|
||||||
/// Encrypted data (result of running generation_session::SessionImpl).
|
|
||||||
pub encrypted_data: Option<DocumentKeyShare>,
|
|
||||||
/// Key storage.
|
|
||||||
pub key_storage: Arc<dyn KeyStorage>,
|
|
||||||
/// Cluster
|
|
||||||
pub cluster: Arc<dyn Cluster>,
|
|
||||||
/// Session nonce.
|
|
||||||
pub nonce: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Mutable data of encryption (distributed key generation) session.
|
|
||||||
#[derive(Debug)]
|
|
||||||
struct SessionData {
|
|
||||||
/// Current state of the session.
|
|
||||||
state: SessionState,
|
|
||||||
/// Nodes-specific data.
|
|
||||||
nodes: BTreeMap<NodeId, NodeData>,
|
|
||||||
/// Encryption session result.
|
|
||||||
result: Option<Result<(), Error>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Mutable node-specific data.
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
struct NodeData {
|
|
||||||
// === Values, filled during initialization phase ===
|
|
||||||
/// Flags marking that node has confirmed session initialization.
|
|
||||||
pub initialization_confirmed: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Encryption (distributed key generation) session state.
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
|
||||||
pub enum SessionState {
|
|
||||||
// === Initialization states ===
|
|
||||||
/// Every node starts in this state.
|
|
||||||
WaitingForInitialization,
|
|
||||||
/// Master node waits for every other node to confirm initialization.
|
|
||||||
WaitingForInitializationConfirm,
|
|
||||||
|
|
||||||
// === Final states of the session ===
|
|
||||||
/// Encryption data is saved.
|
|
||||||
Finished,
|
|
||||||
/// Failed to save encryption data.
|
|
||||||
Failed,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SessionImpl {
|
|
||||||
/// Create new encryption session.
|
|
||||||
pub fn new(params: SessionParams) -> Result<(Self, Oneshot<Result<(), Error>>), Error> {
|
|
||||||
check_encrypted_data(params.encrypted_data.as_ref())?;
|
|
||||||
|
|
||||||
let (completed, oneshot) = CompletionSignal::new();
|
|
||||||
Ok((SessionImpl {
|
|
||||||
id: params.id,
|
|
||||||
self_node_id: params.self_node_id,
|
|
||||||
encrypted_data: params.encrypted_data,
|
|
||||||
key_storage: params.key_storage,
|
|
||||||
cluster: params.cluster,
|
|
||||||
nonce: params.nonce,
|
|
||||||
completed,
|
|
||||||
data: Mutex::new(SessionData {
|
|
||||||
state: SessionState::WaitingForInitialization,
|
|
||||||
nodes: BTreeMap::new(),
|
|
||||||
result: None,
|
|
||||||
}),
|
|
||||||
}, oneshot))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get this node Id.
|
|
||||||
pub fn node(&self) -> &NodeId {
|
|
||||||
&self.self_node_id
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Start new session initialization. This must be called on master node.
|
|
||||||
pub fn initialize(&self, requester: Requester, common_point: Public, encrypted_point: Public) -> Result<(), Error> {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
|
|
||||||
// check state
|
|
||||||
if data.state != SessionState::WaitingForInitialization {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
// update state
|
|
||||||
data.state = SessionState::WaitingForInitializationConfirm;
|
|
||||||
data.nodes.extend(self.cluster.nodes().into_iter().map(|n| (n, NodeData {
|
|
||||||
initialization_confirmed: &n == self.node(),
|
|
||||||
})));
|
|
||||||
|
|
||||||
// TODO [Sec]: id signature is not enough here, as it was already used in key generation
|
|
||||||
// TODO [Reliability]: there could be situation when some nodes have failed to store encrypted data
|
|
||||||
// => potential problems during restore. some confirmation step is needed (2pc)?
|
|
||||||
// save encryption data
|
|
||||||
if let Some(encrypted_data) = self.encrypted_data.clone() {
|
|
||||||
let requester_address = requester.address(&self.id).map_err(Error::InsufficientRequesterData)?;
|
|
||||||
update_encrypted_data(&self.key_storage, self.id.clone(),
|
|
||||||
encrypted_data, requester_address, common_point.clone(), encrypted_point.clone())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// start initialization
|
|
||||||
if data.nodes.len() > 1 {
|
|
||||||
self.cluster.broadcast(Message::Encryption(EncryptionMessage::InitializeEncryptionSession(InitializeEncryptionSession {
|
|
||||||
session: self.id.clone().into(),
|
|
||||||
session_nonce: self.nonce,
|
|
||||||
requester: requester.into(),
|
|
||||||
common_point: common_point.into(),
|
|
||||||
encrypted_point: encrypted_point.into(),
|
|
||||||
})))
|
|
||||||
} else {
|
|
||||||
data.state = SessionState::Finished;
|
|
||||||
data.result = Some(Ok(()));
|
|
||||||
self.completed.send(Ok(()));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When session initialization message is received.
|
|
||||||
pub fn on_initialize_session(&self, sender: NodeId, message: &InitializeEncryptionSession) -> Result<(), Error> {
|
|
||||||
debug_assert!(self.id == *message.session);
|
|
||||||
debug_assert!(&sender != self.node());
|
|
||||||
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
|
|
||||||
// check state
|
|
||||||
if data.state != SessionState::WaitingForInitialization {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
// check that the requester is the author of the encrypted data
|
|
||||||
if let Some(encrypted_data) = self.encrypted_data.clone() {
|
|
||||||
let requester: Requester = message.requester.clone().into();
|
|
||||||
let requester_address = requester.address(&self.id).map_err(Error::InsufficientRequesterData)?;
|
|
||||||
update_encrypted_data(&self.key_storage, self.id.clone(),
|
|
||||||
encrypted_data, requester_address, message.common_point.clone().into(), message.encrypted_point.clone().into())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// update state
|
|
||||||
data.state = SessionState::Finished;
|
|
||||||
|
|
||||||
// send confirmation back to master node
|
|
||||||
self.cluster.send(&sender, Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization(ConfirmEncryptionInitialization {
|
|
||||||
session: self.id.clone().into(),
|
|
||||||
session_nonce: self.nonce,
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When session initialization confirmation message is reeived.
|
|
||||||
pub fn on_confirm_initialization(&self, sender: NodeId, message: &ConfirmEncryptionInitialization) -> Result<(), Error> {
|
|
||||||
debug_assert!(self.id == *message.session);
|
|
||||||
debug_assert!(&sender != self.node());
|
|
||||||
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
debug_assert!(data.nodes.contains_key(&sender));
|
|
||||||
|
|
||||||
// check if all nodes have confirmed initialization
|
|
||||||
data.nodes.get_mut(&sender)
|
|
||||||
.expect("message is received from cluster; nodes contains all cluster nodes; qed")
|
|
||||||
.initialization_confirmed = true;
|
|
||||||
if !data.nodes.values().all(|n| n.initialization_confirmed) {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// update state
|
|
||||||
data.state = SessionState::Finished;
|
|
||||||
data.result = Some(Ok(()));
|
|
||||||
self.completed.send(Ok(()));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterSession for SessionImpl {
|
|
||||||
type Id = SessionId;
|
|
||||||
type CreationData = ();
|
|
||||||
type SuccessfulResult = ();
|
|
||||||
|
|
||||||
fn type_name() -> &'static str {
|
|
||||||
"encryption"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn id(&self) -> SessionId {
|
|
||||||
self.id.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_finished(&self) -> bool {
|
|
||||||
let data = self.data.lock();
|
|
||||||
data.state == SessionState::Failed
|
|
||||||
|| data.state == SessionState::Finished
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_node_timeout(&self, node: &NodeId) {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
|
|
||||||
warn!("{}: encryption session failed because {} connection has timeouted", self.node(), node);
|
|
||||||
|
|
||||||
data.state = SessionState::Failed;
|
|
||||||
data.result = Some(Err(Error::NodeDisconnected));
|
|
||||||
self.completed.send(Err(Error::NodeDisconnected));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_session_timeout(&self) {
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
|
|
||||||
warn!("{}: encryption session failed with timeout", self.node());
|
|
||||||
|
|
||||||
data.state = SessionState::Failed;
|
|
||||||
data.result = Some(Err(Error::NodeDisconnected));
|
|
||||||
self.completed.send(Err(Error::NodeDisconnected));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_session_error(&self, node: &NodeId, error: Error) {
|
|
||||||
// error in encryption session is considered fatal
|
|
||||||
// => broadcast error if error occured on this node
|
|
||||||
if *node == self.self_node_id {
|
|
||||||
// do not bother processing send error, as we already processing error
|
|
||||||
let _ = self.cluster.broadcast(Message::Encryption(EncryptionMessage::EncryptionSessionError(EncryptionSessionError {
|
|
||||||
session: self.id.clone().into(),
|
|
||||||
session_nonce: self.nonce,
|
|
||||||
error: error.clone().into(),
|
|
||||||
})));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut data = self.data.lock();
|
|
||||||
|
|
||||||
warn!("{}: encryption session failed with error: {} from {}", self.node(), error, node);
|
|
||||||
|
|
||||||
data.state = SessionState::Failed;
|
|
||||||
data.result = Some(Err(error.clone()));
|
|
||||||
self.completed.send(Err(error));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
|
||||||
if Some(self.nonce) != message.session_nonce() {
|
|
||||||
return Err(Error::ReplayProtection);
|
|
||||||
}
|
|
||||||
|
|
||||||
match message {
|
|
||||||
&Message::Encryption(ref message) => match message {
|
|
||||||
&EncryptionMessage::InitializeEncryptionSession(ref message) =>
|
|
||||||
self.on_initialize_session(sender.clone(), message),
|
|
||||||
&EncryptionMessage::ConfirmEncryptionInitialization(ref message) =>
|
|
||||||
self.on_confirm_initialization(sender.clone(), message),
|
|
||||||
&EncryptionMessage::EncryptionSessionError(ref message) => {
|
|
||||||
self.on_session_error(sender, message.error.clone());
|
|
||||||
Ok(())
|
|
||||||
},
|
|
||||||
},
|
|
||||||
_ => unreachable!("cluster checks message to be correct before passing; qed"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Debug for SessionImpl {
|
|
||||||
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
|
|
||||||
write!(f, "Encryption session {} on {}", self.id, self.self_node_id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check that common_point and encrypted point are not yet set in key share.
|
|
||||||
pub fn check_encrypted_data(key_share: Option<&DocumentKeyShare>) -> Result<(), Error> {
|
|
||||||
if let Some(key_share) = key_share {
|
|
||||||
// check that common_point and encrypted_point are still not set yet
|
|
||||||
if key_share.common_point.is_some() || key_share.encrypted_point.is_some() {
|
|
||||||
return Err(Error::DocumentKeyAlreadyStored);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update key share with encrypted document key.
|
|
||||||
pub fn update_encrypted_data(key_storage: &Arc<dyn KeyStorage>, key_id: ServerKeyId, mut key_share: DocumentKeyShare, author: Address, common_point: Public, encrypted_point: Public) -> Result<(), Error> {
|
|
||||||
// author must be the same
|
|
||||||
if key_share.author != author {
|
|
||||||
return Err(Error::AccessDenied);
|
|
||||||
}
|
|
||||||
|
|
||||||
// save encryption data
|
|
||||||
key_share.common_point = Some(common_point);
|
|
||||||
key_share.encrypted_point = Some(encrypted_point);
|
|
||||||
key_storage.update(key_id, key_share)
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@ -1,21 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
pub mod decryption_session;
|
|
||||||
pub mod encryption_session;
|
|
||||||
pub mod generation_session;
|
|
||||||
pub mod signing_session_ecdsa;
|
|
||||||
pub mod signing_session_schnorr;
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,176 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity.
|
|
||||||
|
|
||||||
// Parity is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use key_server_cluster::{Error, NodeId};
|
|
||||||
use key_server_cluster::message::Message;
|
|
||||||
|
|
||||||
/// Connection to the single node. Provides basic information about connected node and
|
|
||||||
/// allows sending messages to this node.
|
|
||||||
pub trait Connection: Send + Sync {
|
|
||||||
/// Is this inbound connection? This only matters when both nodes are simultaneously establishing
|
|
||||||
/// two connections to each other. The agreement is that the inbound connection from the node with
|
|
||||||
/// lower NodeId is used and the other connection is closed.
|
|
||||||
fn is_inbound(&self) -> bool;
|
|
||||||
/// Returns id of the connected node.
|
|
||||||
fn node_id(&self) -> &NodeId;
|
|
||||||
/// Returns 'address' of the node to use in traces.
|
|
||||||
fn node_address(&self) -> String;
|
|
||||||
/// Send message to the connected node.
|
|
||||||
fn send_message(&self, message: Message);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Connections manager. Responsible for keeping us connected to all required nodes.
|
|
||||||
pub trait ConnectionManager: 'static + Send + Sync {
|
|
||||||
/// Returns shared reference to connections provider.
|
|
||||||
fn provider(&self) -> Arc<dyn ConnectionProvider>;
|
|
||||||
/// Try to reach all disconnected nodes immediately. This method is exposed mostly for
|
|
||||||
/// tests, where all 'nodes' are starting listening for incoming connections first and
|
|
||||||
/// only after this, they're actually start connecting to each other.
|
|
||||||
fn connect(&self);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Connections provider. Holds all active connections and the set of nodes that we need to
|
|
||||||
/// connect to. At any moment connection could be lost and the set of connected/disconnected
|
|
||||||
/// nodes could change (at behalf of the connection manager).
|
|
||||||
/// Clone operation should be cheap (Arc).
|
|
||||||
pub trait ConnectionProvider: Send + Sync {
|
|
||||||
/// Returns the set of currently connected nodes. Error is returned when our node is
|
|
||||||
/// not a part of the cluster ('isolated' node).
|
|
||||||
fn connected_nodes(&self) -> Result<BTreeSet<NodeId>, Error>;
|
|
||||||
/// Returns the set of currently disconnected nodes.
|
|
||||||
fn disconnected_nodes(&self) -> BTreeSet<NodeId>;
|
|
||||||
/// Returns the reference to the active node connection or None if the node is not connected.
|
|
||||||
fn connection(&self, node: &NodeId) -> Option<Arc<dyn Connection>>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub mod tests {
|
|
||||||
use std::collections::{BTreeSet, VecDeque};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
|
||||||
use parking_lot::Mutex;
|
|
||||||
use key_server_cluster::{Error, NodeId};
|
|
||||||
use key_server_cluster::message::Message;
|
|
||||||
use super::{ConnectionManager, Connection, ConnectionProvider};
|
|
||||||
|
|
||||||
/// Shared messages queue.
|
|
||||||
pub type MessagesQueue = Arc<Mutex<VecDeque<(NodeId, NodeId, Message)>>>;
|
|
||||||
|
|
||||||
/// Single node connections.
|
|
||||||
pub struct TestConnections {
|
|
||||||
node: NodeId,
|
|
||||||
is_isolated: AtomicBool,
|
|
||||||
connected_nodes: Mutex<BTreeSet<NodeId>>,
|
|
||||||
disconnected_nodes: Mutex<BTreeSet<NodeId>>,
|
|
||||||
messages: MessagesQueue,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Single connection.
|
|
||||||
pub struct TestConnection {
|
|
||||||
from: NodeId,
|
|
||||||
to: NodeId,
|
|
||||||
messages: MessagesQueue,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TestConnections {
|
|
||||||
pub fn isolate(&self) {
|
|
||||||
let connected_nodes = ::std::mem::replace(&mut *self.connected_nodes.lock(), Default::default());
|
|
||||||
self.is_isolated.store(true, Ordering::Relaxed);
|
|
||||||
self.disconnected_nodes.lock().extend(connected_nodes)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn disconnect(&self, node: NodeId) {
|
|
||||||
self.connected_nodes.lock().remove(&node);
|
|
||||||
self.disconnected_nodes.lock().insert(node);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn exclude(&self, node: NodeId) {
|
|
||||||
self.connected_nodes.lock().remove(&node);
|
|
||||||
self.disconnected_nodes.lock().remove(&node);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn include(&self, node: NodeId) {
|
|
||||||
self.connected_nodes.lock().insert(node);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConnectionManager for Arc<TestConnections> {
|
|
||||||
fn provider(&self) -> Arc<dyn ConnectionProvider> {
|
|
||||||
self.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn connect(&self) {}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConnectionProvider for TestConnections {
|
|
||||||
fn connected_nodes(&self) -> Result<BTreeSet<NodeId>, Error> {
|
|
||||||
match self.is_isolated.load(Ordering::Relaxed) {
|
|
||||||
false => Ok(self.connected_nodes.lock().clone()),
|
|
||||||
true => Err(Error::NodeDisconnected),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn disconnected_nodes(&self) -> BTreeSet<NodeId> {
|
|
||||||
self.disconnected_nodes.lock().clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn connection(&self, node: &NodeId) -> Option<Arc<dyn Connection>> {
|
|
||||||
match self.connected_nodes.lock().contains(node) {
|
|
||||||
true => Some(Arc::new(TestConnection {
|
|
||||||
from: self.node,
|
|
||||||
to: *node,
|
|
||||||
messages: self.messages.clone(),
|
|
||||||
})),
|
|
||||||
false => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Connection for TestConnection {
|
|
||||||
fn is_inbound(&self) -> bool {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
fn node_id(&self) -> &NodeId {
|
|
||||||
&self.to
|
|
||||||
}
|
|
||||||
|
|
||||||
fn node_address(&self) -> String {
|
|
||||||
format!("{}", self.to)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn send_message(&self, message: Message) {
|
|
||||||
self.messages.lock().push_back((self.from, self.to, message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_test_connections(
|
|
||||||
messages: MessagesQueue,
|
|
||||||
node: NodeId,
|
|
||||||
mut nodes: BTreeSet<NodeId>
|
|
||||||
) -> Arc<TestConnections> {
|
|
||||||
let is_isolated = !nodes.remove(&node);
|
|
||||||
Arc::new(TestConnections {
|
|
||||||
node,
|
|
||||||
is_isolated: AtomicBool::new(is_isolated),
|
|
||||||
connected_nodes: Mutex::new(nodes),
|
|
||||||
disconnected_nodes: Default::default(),
|
|
||||||
messages,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,543 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity.
|
|
||||||
|
|
||||||
// Parity is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::{BTreeMap, BTreeSet};
|
|
||||||
use std::collections::btree_map::Entry;
|
|
||||||
use std::io;
|
|
||||||
use std::net::{SocketAddr, IpAddr};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::{Duration, Instant};
|
|
||||||
use futures::{future, Future, Stream};
|
|
||||||
use parking_lot::{Mutex, RwLock};
|
|
||||||
use tokio::net::{TcpListener, TcpStream};
|
|
||||||
use tokio::timer::{Interval, timeout::Error as TimeoutError};
|
|
||||||
use tokio_io::IoFuture;
|
|
||||||
use crypto::publickey::KeyPair;
|
|
||||||
use parity_runtime::Executor;
|
|
||||||
use blockchain::SigningKeyPair;
|
|
||||||
use key_server_cluster::{Error, NodeId, ClusterConfiguration};
|
|
||||||
use key_server_cluster::cluster_connections::{ConnectionProvider, Connection, ConnectionManager};
|
|
||||||
use key_server_cluster::connection_trigger::{Maintain, ConnectionTrigger};
|
|
||||||
use key_server_cluster::cluster_message_processor::MessageProcessor;
|
|
||||||
use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream,
|
|
||||||
read_encrypted_message, WriteMessage, write_encrypted_message};
|
|
||||||
use key_server_cluster::message::{self, ClusterMessage, Message};
|
|
||||||
use key_server_cluster::net::{accept_connection as io_accept_connection,
|
|
||||||
connect as io_connect, Connection as IoConnection};
|
|
||||||
|
|
||||||
/// Empty future.
|
|
||||||
pub type BoxedEmptyFuture = Box<dyn Future<Item = (), Error = ()> + Send>;
|
|
||||||
|
|
||||||
/// Maintain interval (seconds). Every MAINTAIN_INTERVAL seconds node:
|
|
||||||
/// 1) checks if connected nodes are responding to KeepAlive messages
|
|
||||||
/// 2) tries to connect to disconnected nodes
|
|
||||||
/// 3) checks if enc/dec sessions are time-outed
|
|
||||||
const MAINTAIN_INTERVAL: u64 = 10;
|
|
||||||
|
|
||||||
/// When no messages have been received from node within KEEP_ALIVE_SEND_INTERVAL seconds,
|
|
||||||
/// we must send KeepAlive message to the node to check if it still responds to messages.
|
|
||||||
const KEEP_ALIVE_SEND_INTERVAL: Duration = Duration::from_secs(30);
|
|
||||||
/// When no messages have been received from node within KEEP_ALIVE_DISCONNECT_INTERVAL seconds,
|
|
||||||
/// we must treat this node as non-responding && disconnect from it.
|
|
||||||
const KEEP_ALIVE_DISCONNECT_INTERVAL: Duration = Duration::from_secs(60);
|
|
||||||
|
|
||||||
/// Network connection manager configuration.
|
|
||||||
pub struct NetConnectionsManagerConfig {
|
|
||||||
/// Allow connecting to 'higher' nodes.
|
|
||||||
pub allow_connecting_to_higher_nodes: bool,
|
|
||||||
/// Interface to listen to.
|
|
||||||
pub listen_address: (String, u16),
|
|
||||||
/// True if we should autostart key servers set change session when servers set changes?
|
|
||||||
/// This will only work when servers set is configured using KeyServerSet contract.
|
|
||||||
pub auto_migrate_enabled: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Network connections manager.
|
|
||||||
pub struct NetConnectionsManager {
|
|
||||||
/// Address we're listening for incoming connections.
|
|
||||||
listen_address: SocketAddr,
|
|
||||||
/// Shared cluster connections data reference.
|
|
||||||
data: Arc<NetConnectionsData>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Network connections data. Shared among NetConnectionsManager and spawned futures.
|
|
||||||
struct NetConnectionsData {
|
|
||||||
/// Allow connecting to 'higher' nodes.
|
|
||||||
allow_connecting_to_higher_nodes: bool,
|
|
||||||
/// Reference to tokio task executor.
|
|
||||||
executor: Executor,
|
|
||||||
/// Key pair of this node.
|
|
||||||
self_key_pair: Arc<dyn SigningKeyPair>,
|
|
||||||
/// Network messages processor.
|
|
||||||
message_processor: Arc<dyn MessageProcessor>,
|
|
||||||
/// Connections trigger.
|
|
||||||
trigger: Mutex<Box<dyn ConnectionTrigger>>,
|
|
||||||
/// Mutable connection data.
|
|
||||||
container: Arc<RwLock<NetConnectionsContainer>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Network connections container. This is the only mutable data of NetConnectionsManager.
|
|
||||||
/// The set of nodes is mutated by the connection trigger and the connections set is also
|
|
||||||
/// mutated by spawned futures.
|
|
||||||
pub struct NetConnectionsContainer {
|
|
||||||
/// Is this node isolated from cluster?
|
|
||||||
pub is_isolated: bool,
|
|
||||||
/// Current key servers set.
|
|
||||||
pub nodes: BTreeMap<NodeId, SocketAddr>,
|
|
||||||
/// Active connections to key servers.
|
|
||||||
pub connections: BTreeMap<NodeId, Arc<NetConnection>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Network connection to single key server node.
|
|
||||||
pub struct NetConnection {
|
|
||||||
executor: Executor,
|
|
||||||
/// Id of the peer node.
|
|
||||||
node_id: NodeId,
|
|
||||||
/// Address of the peer node.
|
|
||||||
node_address: SocketAddr,
|
|
||||||
/// Is this inbound (true) or outbound (false) connection?
|
|
||||||
is_inbound: bool,
|
|
||||||
/// Key pair that is used to encrypt connection' messages.
|
|
||||||
key: KeyPair,
|
|
||||||
/// Last message time.
|
|
||||||
last_message_time: RwLock<Instant>,
|
|
||||||
/// Underlying TCP stream.
|
|
||||||
stream: SharedTcpStream,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NetConnectionsManager {
|
|
||||||
/// Create new network connections manager.
|
|
||||||
pub fn new(
|
|
||||||
executor: Executor,
|
|
||||||
message_processor: Arc<dyn MessageProcessor>,
|
|
||||||
trigger: Box<dyn ConnectionTrigger>,
|
|
||||||
container: Arc<RwLock<NetConnectionsContainer>>,
|
|
||||||
config: &ClusterConfiguration,
|
|
||||||
net_config: NetConnectionsManagerConfig,
|
|
||||||
) -> Result<Self, Error> {
|
|
||||||
let listen_address = make_socket_address(
|
|
||||||
&net_config.listen_address.0,
|
|
||||||
net_config.listen_address.1)?;
|
|
||||||
|
|
||||||
Ok(NetConnectionsManager {
|
|
||||||
listen_address,
|
|
||||||
data: Arc::new(NetConnectionsData {
|
|
||||||
allow_connecting_to_higher_nodes: net_config.allow_connecting_to_higher_nodes,
|
|
||||||
executor,
|
|
||||||
message_processor,
|
|
||||||
self_key_pair: config.self_key_pair.clone(),
|
|
||||||
trigger: Mutex::new(trigger),
|
|
||||||
container,
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Start listening for connections and schedule connections maintenance.
|
|
||||||
pub fn start(&self) -> Result<(), Error> {
|
|
||||||
net_listen(&self.listen_address, self.data.clone())?;
|
|
||||||
net_schedule_maintain(self.data.clone());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConnectionManager for NetConnectionsManager {
|
|
||||||
fn provider(&self) -> Arc<dyn ConnectionProvider> {
|
|
||||||
self.data.container.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn connect(&self) {
|
|
||||||
net_connect_disconnected(self.data.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConnectionProvider for RwLock<NetConnectionsContainer> {
|
|
||||||
fn connected_nodes(&self) -> Result<BTreeSet<NodeId>, Error> {
|
|
||||||
let connections = self.read();
|
|
||||||
if connections.is_isolated {
|
|
||||||
return Err(Error::NodeDisconnected);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(connections.connections.keys().cloned().collect())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn disconnected_nodes(&self) -> BTreeSet<NodeId> {
|
|
||||||
let connections = self.read();
|
|
||||||
connections.nodes.keys()
|
|
||||||
.filter(|node_id| !connections.connections.contains_key(node_id))
|
|
||||||
.cloned()
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn connection(&self, node: &NodeId) -> Option<Arc<dyn Connection>> {
|
|
||||||
match self.read().connections.get(node).cloned() {
|
|
||||||
Some(connection) => Some(connection),
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NetConnection {
|
|
||||||
/// Create new connection.
|
|
||||||
pub fn new(executor: Executor, is_inbound: bool, connection: IoConnection) -> NetConnection {
|
|
||||||
NetConnection {
|
|
||||||
executor,
|
|
||||||
node_id: connection.node_id,
|
|
||||||
node_address: connection.address,
|
|
||||||
is_inbound: is_inbound,
|
|
||||||
stream: connection.stream,
|
|
||||||
key: connection.key,
|
|
||||||
last_message_time: RwLock::new(Instant::now()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get last message time.
|
|
||||||
pub fn last_message_time(&self) -> Instant {
|
|
||||||
*self.last_message_time.read()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update last message time
|
|
||||||
pub fn set_last_message_time(&self, last_message_time: Instant) {
|
|
||||||
*self.last_message_time.write() = last_message_time
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns future that sends encrypted message over this connection.
|
|
||||||
pub fn send_message_future(&self, message: Message) -> WriteMessage<SharedTcpStream> {
|
|
||||||
write_encrypted_message(self.stream.clone(), &self.key, message)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns future that reads encrypted message from this connection.
|
|
||||||
pub fn read_message_future(&self) -> ReadMessage<SharedTcpStream> {
|
|
||||||
read_encrypted_message(self.stream.clone(), self.key.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Connection for NetConnection {
|
|
||||||
fn is_inbound(&self) -> bool {
|
|
||||||
self.is_inbound
|
|
||||||
}
|
|
||||||
|
|
||||||
fn node_id(&self) -> &NodeId {
|
|
||||||
&self.node_id
|
|
||||||
}
|
|
||||||
|
|
||||||
fn node_address(&self) -> String {
|
|
||||||
format!("{}", self.node_address)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn send_message(&self, message: Message) {
|
|
||||||
execute(&self.executor, self.send_message_future(message).then(|_| Ok(())));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NetConnectionsData {
|
|
||||||
/// Executes closure for each active connection.
|
|
||||||
pub fn active_connections(&self) -> Vec<Arc<NetConnection>> {
|
|
||||||
self.container.read().connections.values().cloned().collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Executes closure for each disconnected node.
|
|
||||||
pub fn disconnected_nodes(&self) -> Vec<(NodeId, SocketAddr)> {
|
|
||||||
let container = self.container.read();
|
|
||||||
container.nodes.iter()
|
|
||||||
.filter(|(node_id, _)| !container.connections.contains_key(node_id))
|
|
||||||
.map(|(node_id, addr)| (*node_id, *addr))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Try to insert new connection. Returns true if connection has been inserted.
|
|
||||||
/// Returns false (and ignores connections) if:
|
|
||||||
/// - we do not expect connection from this node
|
|
||||||
/// - we are already connected to the node and existing connection 'supersede'
|
|
||||||
/// new connection by agreement
|
|
||||||
pub fn insert(&self, connection: Arc<NetConnection>) -> bool {
|
|
||||||
let node = *connection.node_id();
|
|
||||||
let mut container = self.container.write();
|
|
||||||
if !container.nodes.contains_key(&node) {
|
|
||||||
trace!(target: "secretstore_net", "{}: ignoring unknown connection from {} at {}",
|
|
||||||
self.self_key_pair.public(), node, connection.node_address());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if container.connections.contains_key(&node) {
|
|
||||||
// we have already connected to the same node
|
|
||||||
// the agreement is that node with lower id must establish connection to node with higher id
|
|
||||||
if (*self.self_key_pair.public() < node && connection.is_inbound())
|
|
||||||
|| (*self.self_key_pair.public() > node && !connection.is_inbound()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
trace!(target: "secretstore_net",
|
|
||||||
"{}: inserting connection to {} at {}. Connected to {} of {} nodes",
|
|
||||||
self.self_key_pair.public(), node, connection.node_address(),
|
|
||||||
container.connections.len() + 1, container.nodes.len());
|
|
||||||
container.connections.insert(node, connection);
|
|
||||||
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Tries to remove connection. Returns true if connection has been removed.
|
|
||||||
/// Returns false if we do not know this connection.
|
|
||||||
pub fn remove(&self, connection: &NetConnection) -> bool {
|
|
||||||
let node_id = *connection.node_id();
|
|
||||||
let is_inbound = connection.is_inbound();
|
|
||||||
let mut container = self.container.write();
|
|
||||||
if let Entry::Occupied(entry) = container.connections.entry(node_id) {
|
|
||||||
if entry.get().is_inbound() != is_inbound {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
trace!(target: "secretstore_net", "{}: removing connection to {} at {}",
|
|
||||||
self.self_key_pair.public(), node_id, entry.get().node_address());
|
|
||||||
entry.remove_entry();
|
|
||||||
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Listen incoming connections.
|
|
||||||
fn net_listen(
|
|
||||||
listen_address: &SocketAddr,
|
|
||||||
data: Arc<NetConnectionsData>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
execute(&data.executor, net_listen_future(listen_address, data.clone())?);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Listen incoming connections future.
|
|
||||||
fn net_listen_future(
|
|
||||||
listen_address: &SocketAddr,
|
|
||||||
data: Arc<NetConnectionsData>,
|
|
||||||
) -> Result<BoxedEmptyFuture, Error> {
|
|
||||||
Ok(Box::new(TcpListener::bind(listen_address)?
|
|
||||||
.incoming()
|
|
||||||
.and_then(move |stream| {
|
|
||||||
net_accept_connection(data.clone(), stream);
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
.for_each(|_| Ok(()))
|
|
||||||
.then(|_| future::ok(()))))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Accept incoming connection.
|
|
||||||
fn net_accept_connection(
|
|
||||||
data: Arc<NetConnectionsData>,
|
|
||||||
stream: TcpStream,
|
|
||||||
) {
|
|
||||||
execute(&data.executor, net_accept_connection_future(data.clone(), stream));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Accept incoming connection future.
|
|
||||||
fn net_accept_connection_future(data: Arc<NetConnectionsData>, stream: TcpStream) -> BoxedEmptyFuture {
|
|
||||||
Box::new(io_accept_connection(stream, data.self_key_pair.clone())
|
|
||||||
.then(move |result| net_process_connection_result(data, None, result))
|
|
||||||
.then(|_| future::ok(())))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Connect to remote node.
|
|
||||||
fn net_connect(
|
|
||||||
data: Arc<NetConnectionsData>,
|
|
||||||
remote: SocketAddr,
|
|
||||||
) {
|
|
||||||
execute(&data.executor, net_connect_future(data.clone(), remote));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Connect to remote node future.
|
|
||||||
fn net_connect_future(
|
|
||||||
data: Arc<NetConnectionsData>,
|
|
||||||
remote: SocketAddr,
|
|
||||||
) -> BoxedEmptyFuture {
|
|
||||||
let disconnected_nodes = data.container.disconnected_nodes();
|
|
||||||
Box::new(io_connect(&remote, data.self_key_pair.clone(), disconnected_nodes)
|
|
||||||
.then(move |result| net_process_connection_result(data, Some(remote), result))
|
|
||||||
.then(|_| future::ok(())))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process network connection result.
|
|
||||||
fn net_process_connection_result(
|
|
||||||
data: Arc<NetConnectionsData>,
|
|
||||||
outbound_addr: Option<SocketAddr>,
|
|
||||||
result: Result<DeadlineStatus<Result<IoConnection, Error>>, TimeoutError<io::Error>>,
|
|
||||||
) -> IoFuture<Result<(), Error>> {
|
|
||||||
match result {
|
|
||||||
Ok(DeadlineStatus::Meet(Ok(connection))) => {
|
|
||||||
let connection = Arc::new(NetConnection::new(data.executor.clone(), outbound_addr.is_none(), connection));
|
|
||||||
if data.insert(connection.clone()) {
|
|
||||||
let maintain_action = data.trigger.lock().on_connection_established(connection.node_id());
|
|
||||||
maintain_connection_trigger(data.clone(), maintain_action);
|
|
||||||
|
|
||||||
return net_process_connection_messages(data, connection);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Ok(DeadlineStatus::Meet(Err(err))) => {
|
|
||||||
warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}",
|
|
||||||
data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" },
|
|
||||||
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
|
|
||||||
},
|
|
||||||
Ok(DeadlineStatus::Timeout) => {
|
|
||||||
warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}",
|
|
||||||
data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" },
|
|
||||||
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
|
|
||||||
},
|
|
||||||
Err(err) => {
|
|
||||||
warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}",
|
|
||||||
data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" },
|
|
||||||
outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default());
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
Box::new(future::ok(Ok(())))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process connection messages.
|
|
||||||
fn net_process_connection_messages(
|
|
||||||
data: Arc<NetConnectionsData>,
|
|
||||||
connection: Arc<NetConnection>,
|
|
||||||
) -> IoFuture<Result<(), Error>> {
|
|
||||||
Box::new(connection
|
|
||||||
.read_message_future()
|
|
||||||
.then(move |result|
|
|
||||||
match result {
|
|
||||||
Ok((_, Ok(message))) => {
|
|
||||||
connection.set_last_message_time(Instant::now());
|
|
||||||
data.message_processor.process_connection_message(connection.clone(), message);
|
|
||||||
// continue serving connection
|
|
||||||
let process_messages_future = net_process_connection_messages(
|
|
||||||
data.clone(), connection).then(|_| Ok(()));
|
|
||||||
execute(&data.executor, process_messages_future);
|
|
||||||
Box::new(future::ok(Ok(())))
|
|
||||||
},
|
|
||||||
Ok((_, Err(err))) => {
|
|
||||||
warn!(target: "secretstore_net", "{}: protocol error '{}' when reading message from node {}",
|
|
||||||
data.self_key_pair.public(), err, connection.node_id());
|
|
||||||
// continue serving connection
|
|
||||||
let process_messages_future = net_process_connection_messages(
|
|
||||||
data.clone(), connection).then(|_| Ok(()));
|
|
||||||
execute(&data.executor, process_messages_future);
|
|
||||||
Box::new(future::ok(Err(err)))
|
|
||||||
},
|
|
||||||
Err(err) => {
|
|
||||||
let node_id = *connection.node_id();
|
|
||||||
warn!(target: "secretstore_net", "{}: network error '{}' when reading message from node {}",
|
|
||||||
data.self_key_pair.public(), err, node_id);
|
|
||||||
// close connection
|
|
||||||
if data.remove(&*connection) {
|
|
||||||
let maintain_action = data.trigger.lock().on_connection_closed(&node_id);
|
|
||||||
maintain_connection_trigger(data, maintain_action);
|
|
||||||
}
|
|
||||||
Box::new(future::err(err))
|
|
||||||
},
|
|
||||||
}
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Schedule connections. maintain.
|
|
||||||
fn net_schedule_maintain(data: Arc<NetConnectionsData>) {
|
|
||||||
let closure_data = data.clone();
|
|
||||||
execute(&data.executor, Interval::new_interval(Duration::new(MAINTAIN_INTERVAL, 0))
|
|
||||||
.and_then(move |_| Ok(net_maintain(closure_data.clone())))
|
|
||||||
.for_each(|_| Ok(()))
|
|
||||||
.then(|_| future::ok(())));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Maintain network connections.
|
|
||||||
fn net_maintain(data: Arc<NetConnectionsData>) {
|
|
||||||
trace!(target: "secretstore_net", "{}: executing maintain procedures", data.self_key_pair.public());
|
|
||||||
|
|
||||||
update_nodes_set(data.clone());
|
|
||||||
data.message_processor.maintain_sessions();
|
|
||||||
net_keep_alive(data.clone());
|
|
||||||
net_connect_disconnected(data);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send keep alive messages to remote nodes.
|
|
||||||
fn net_keep_alive(data: Arc<NetConnectionsData>) {
|
|
||||||
let active_connections = data.active_connections();
|
|
||||||
for connection in active_connections {
|
|
||||||
// the last_message_time could change after active_connections() call
|
|
||||||
// => we always need to call Instant::now() after getting last_message_time
|
|
||||||
let last_message_time = connection.last_message_time();
|
|
||||||
let now = Instant::now();
|
|
||||||
let last_message_diff = now - last_message_time;
|
|
||||||
if last_message_diff > KEEP_ALIVE_DISCONNECT_INTERVAL {
|
|
||||||
warn!(target: "secretstore_net", "{}: keep alive timeout for node {}",
|
|
||||||
data.self_key_pair.public(), connection.node_id());
|
|
||||||
|
|
||||||
let node_id = *connection.node_id();
|
|
||||||
if data.remove(&*connection) {
|
|
||||||
let maintain_action = data.trigger.lock().on_connection_closed(&node_id);
|
|
||||||
maintain_connection_trigger(data.clone(), maintain_action);
|
|
||||||
}
|
|
||||||
data.message_processor.process_disconnect(&node_id);
|
|
||||||
}
|
|
||||||
else if last_message_diff > KEEP_ALIVE_SEND_INTERVAL {
|
|
||||||
connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {})));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Connect disconnected nodes.
|
|
||||||
fn net_connect_disconnected(data: Arc<NetConnectionsData>) {
|
|
||||||
let disconnected_nodes = data.disconnected_nodes();
|
|
||||||
for (node_id, address) in disconnected_nodes {
|
|
||||||
if data.allow_connecting_to_higher_nodes || *data.self_key_pair.public() < node_id {
|
|
||||||
net_connect(data.clone(), address);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Schedule future execution.
|
|
||||||
fn execute<F: Future<Item = (), Error = ()> + Send + 'static>(executor: &Executor, f: F) {
|
|
||||||
if let Err(err) = future::Executor::execute(executor, Box::new(f)) {
|
|
||||||
error!("Secret store runtime unable to spawn task. Runtime is shutting down. ({:?})", err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Try to update active nodes set from connection trigger.
|
|
||||||
fn update_nodes_set(data: Arc<NetConnectionsData>) {
|
|
||||||
let maintain_action = data.trigger.lock().on_maintain();
|
|
||||||
maintain_connection_trigger(data, maintain_action);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Execute maintain procedures of connections trigger.
|
|
||||||
fn maintain_connection_trigger(data: Arc<NetConnectionsData>, maintain_action: Option<Maintain>) {
|
|
||||||
if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Session) {
|
|
||||||
let session_params = data.trigger.lock().maintain_session();
|
|
||||||
if let Some(session_params) = session_params {
|
|
||||||
let session = data.message_processor.start_servers_set_change_session(session_params);
|
|
||||||
match session {
|
|
||||||
Ok(_) => trace!(target: "secretstore_net", "{}: started auto-migrate session",
|
|
||||||
data.self_key_pair.public()),
|
|
||||||
Err(err) => trace!(target: "secretstore_net", "{}: failed to start auto-migrate session with: {}",
|
|
||||||
data.self_key_pair.public(), err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if maintain_action == Some(Maintain::SessionAndConnections) || maintain_action == Some(Maintain::Connections) {
|
|
||||||
let mut trigger = data.trigger.lock();
|
|
||||||
let mut data = data.container.write();
|
|
||||||
trigger.maintain_connections(&mut *data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compose SocketAddr from configuration' address and port.
|
|
||||||
fn make_socket_address(address: &str, port: u16) -> Result<SocketAddr, Error> {
|
|
||||||
let ip_address: IpAddr = address.parse().map_err(|_| Error::InvalidNodeAddress)?;
|
|
||||||
Ok(SocketAddr::new(ip_address, port))
|
|
||||||
}
|
|
@ -1,358 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity.
|
|
||||||
|
|
||||||
// Parity is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use blockchain::SigningKeyPair;
|
|
||||||
use key_server_cluster::{Error, NodeId};
|
|
||||||
use key_server_cluster::cluster::{ServersSetChangeParams, new_servers_set_change_session};
|
|
||||||
use key_server_cluster::cluster_sessions::{AdminSession};
|
|
||||||
use key_server_cluster::cluster_connections::{ConnectionProvider, Connection};
|
|
||||||
use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, ClusterSessionsContainer,
|
|
||||||
create_cluster_view};
|
|
||||||
use key_server_cluster::cluster_sessions_creator::{ClusterSessionCreator, IntoSessionId};
|
|
||||||
use key_server_cluster::message::{self, Message, ClusterMessage};
|
|
||||||
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSession,
|
|
||||||
IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, ContinueAction};
|
|
||||||
use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector;
|
|
||||||
|
|
||||||
/// Something that is able to process signals/messages from other nodes.
|
|
||||||
pub trait MessageProcessor: Send + Sync {
|
|
||||||
/// Process disconnect from the remote node.
|
|
||||||
fn process_disconnect(&self, node: &NodeId);
|
|
||||||
/// Process single message from the connection.
|
|
||||||
fn process_connection_message(&self, connection: Arc<dyn Connection>, message: Message);
|
|
||||||
|
|
||||||
/// Start servers set change session. This is typically used by ConnectionManager when
|
|
||||||
/// it detects that auto-migration session needs to be started.
|
|
||||||
fn start_servers_set_change_session(&self, params: ServersSetChangeParams) -> Result<Arc<AdminSession>, Error>;
|
|
||||||
/// Try to continue session after key version negotiation session is completed.
|
|
||||||
fn try_continue_session(
|
|
||||||
&self,
|
|
||||||
session: Option<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>>
|
|
||||||
);
|
|
||||||
/// Maintain active sessions. Typically called by the ConnectionManager at some intervals.
|
|
||||||
/// Should cancel stalled sessions and send keep-alive messages for sessions that support it.
|
|
||||||
fn maintain_sessions(&self);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Bridge between ConnectionManager and ClusterSessions.
|
|
||||||
pub struct SessionsMessageProcessor {
|
|
||||||
self_key_pair: Arc<dyn SigningKeyPair>,
|
|
||||||
servers_set_change_creator_connector: Arc<dyn ServersSetChangeSessionCreatorConnector>,
|
|
||||||
sessions: Arc<ClusterSessions>,
|
|
||||||
connections: Arc<dyn ConnectionProvider>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SessionsMessageProcessor {
|
|
||||||
/// Create new instance of SessionsMessageProcessor.
|
|
||||||
pub fn new(
|
|
||||||
self_key_pair: Arc<dyn SigningKeyPair>,
|
|
||||||
servers_set_change_creator_connector: Arc<dyn ServersSetChangeSessionCreatorConnector>,
|
|
||||||
sessions: Arc<ClusterSessions>,
|
|
||||||
connections: Arc<dyn ConnectionProvider>,
|
|
||||||
) -> Self {
|
|
||||||
SessionsMessageProcessor {
|
|
||||||
self_key_pair,
|
|
||||||
servers_set_change_creator_connector,
|
|
||||||
sessions,
|
|
||||||
connections,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process single session message from connection.
|
|
||||||
fn process_message<S: ClusterSession, SC: ClusterSessionCreator<S>>(
|
|
||||||
&self,
|
|
||||||
sessions: &ClusterSessionsContainer<S, SC>,
|
|
||||||
connection: Arc<dyn Connection>,
|
|
||||||
mut message: Message,
|
|
||||||
) -> Option<Arc<S>>
|
|
||||||
where
|
|
||||||
Message: IntoSessionId<S::Id>
|
|
||||||
{
|
|
||||||
// get or create new session, if required
|
|
||||||
let mut sender = *connection.node_id();
|
|
||||||
let session = self.prepare_session(sessions, &sender, &message);
|
|
||||||
// send error if session is not found, or failed to create
|
|
||||||
let session = match session {
|
|
||||||
Ok(session) => session,
|
|
||||||
Err(error) => {
|
|
||||||
// this is new session => it is not yet in container
|
|
||||||
warn!(target: "secretstore_net",
|
|
||||||
"{}: {} session read error '{}' when requested for session from node {}",
|
|
||||||
self.self_key_pair.public(), S::type_name(), error, sender);
|
|
||||||
if !message.is_error_message() {
|
|
||||||
let qed = "session_id only fails for cluster messages;
|
|
||||||
only session messages are passed to process_message;
|
|
||||||
qed";
|
|
||||||
let session_id = message.into_session_id().expect(qed);
|
|
||||||
let session_nonce = message.session_nonce().expect(qed);
|
|
||||||
|
|
||||||
connection.send_message(SC::make_error_message(session_id, session_nonce, error));
|
|
||||||
}
|
|
||||||
return None;
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
let session_id = session.id();
|
|
||||||
let mut is_queued_message = false;
|
|
||||||
loop {
|
|
||||||
let message_result = session.on_message(&sender, &message);
|
|
||||||
match message_result {
|
|
||||||
Ok(_) => {
|
|
||||||
// if session is completed => stop
|
|
||||||
if session.is_finished() {
|
|
||||||
info!(target: "secretstore_net",
|
|
||||||
"{}: {} session completed", self.self_key_pair.public(), S::type_name());
|
|
||||||
sessions.remove(&session_id);
|
|
||||||
return Some(session);
|
|
||||||
}
|
|
||||||
|
|
||||||
// try to dequeue message
|
|
||||||
match sessions.dequeue_message(&session_id) {
|
|
||||||
Some((msg_sender, msg)) => {
|
|
||||||
is_queued_message = true;
|
|
||||||
sender = msg_sender;
|
|
||||||
message = msg;
|
|
||||||
},
|
|
||||||
None => return Some(session),
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(Error::TooEarlyForRequest) => {
|
|
||||||
sessions.enqueue_message(&session_id, sender, message, is_queued_message);
|
|
||||||
return Some(session);
|
|
||||||
},
|
|
||||||
Err(err) => {
|
|
||||||
warn!(
|
|
||||||
target: "secretstore_net",
|
|
||||||
"{}: {} session error '{}' when processing message {} from node {}",
|
|
||||||
self.self_key_pair.public(),
|
|
||||||
S::type_name(),
|
|
||||||
err,
|
|
||||||
message,
|
|
||||||
sender);
|
|
||||||
session.on_session_error(self.self_key_pair.public(), err);
|
|
||||||
sessions.remove(&session_id);
|
|
||||||
return Some(session);
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get or insert new session.
|
|
||||||
fn prepare_session<S: ClusterSession, SC: ClusterSessionCreator<S>>(
|
|
||||||
&self,
|
|
||||||
sessions: &ClusterSessionsContainer<S, SC>,
|
|
||||||
sender: &NodeId,
|
|
||||||
message: &Message
|
|
||||||
) -> Result<Arc<S>, Error>
|
|
||||||
where
|
|
||||||
Message: IntoSessionId<S::Id>
|
|
||||||
{
|
|
||||||
fn requires_all_connections(message: &Message) -> bool {
|
|
||||||
match *message {
|
|
||||||
Message::Generation(_) => true,
|
|
||||||
Message::ShareAdd(_) => true,
|
|
||||||
Message::ServersSetChange(_) => true,
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// get or create new session, if required
|
|
||||||
let session_id = message.into_session_id()
|
|
||||||
.expect("into_session_id fails for cluster messages only;
|
|
||||||
only session messages are passed to prepare_session;
|
|
||||||
qed");
|
|
||||||
let is_initialization_message = message.is_initialization_message();
|
|
||||||
let is_delegation_message = message.is_delegation_message();
|
|
||||||
match is_initialization_message || is_delegation_message {
|
|
||||||
false => sessions.get(&session_id, true).ok_or(Error::NoActiveSessionWithId),
|
|
||||||
true => {
|
|
||||||
let creation_data = SC::creation_data_from_message(&message)?;
|
|
||||||
let master = if is_initialization_message {
|
|
||||||
*sender
|
|
||||||
} else {
|
|
||||||
*self.self_key_pair.public()
|
|
||||||
};
|
|
||||||
let cluster = create_cluster_view(
|
|
||||||
self.self_key_pair.clone(),
|
|
||||||
self.connections.clone(),
|
|
||||||
requires_all_connections(&message))?;
|
|
||||||
|
|
||||||
let nonce = Some(message.session_nonce().ok_or(Error::InvalidMessage)?);
|
|
||||||
let exclusive = message.is_exclusive_session_message();
|
|
||||||
sessions.insert(cluster, master, session_id, nonce, exclusive, creation_data).map(|s| s.session)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process single cluster message from the connection.
|
|
||||||
fn process_cluster_message(&self, connection: Arc<dyn Connection>, message: ClusterMessage) {
|
|
||||||
match message {
|
|
||||||
ClusterMessage::KeepAlive(_) => {
|
|
||||||
let msg = Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse {
|
|
||||||
session_id: None,
|
|
||||||
}));
|
|
||||||
connection.send_message(msg)
|
|
||||||
},
|
|
||||||
ClusterMessage::KeepAliveResponse(msg) => if let Some(session_id) = msg.session_id {
|
|
||||||
self.sessions.on_session_keep_alive(connection.node_id(), session_id.into());
|
|
||||||
},
|
|
||||||
_ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}",
|
|
||||||
self.self_key_pair.public(), message, connection.node_id(), connection.node_address()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MessageProcessor for SessionsMessageProcessor {
|
|
||||||
fn process_disconnect(&self, node: &NodeId) {
|
|
||||||
self.sessions.on_connection_timeout(node);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_connection_message(&self, connection: Arc<dyn Connection>, message: Message) {
|
|
||||||
trace!(target: "secretstore_net", "{}: received message {} from {}",
|
|
||||||
self.self_key_pair.public(), message, connection.node_id());
|
|
||||||
|
|
||||||
// error is ignored as we only process errors on session level
|
|
||||||
match message {
|
|
||||||
Message::Generation(message) => self
|
|
||||||
.process_message(&self.sessions.generation_sessions, connection, Message::Generation(message))
|
|
||||||
.map(|_| ()).unwrap_or_default(),
|
|
||||||
Message::Encryption(message) => self
|
|
||||||
.process_message(&self.sessions.encryption_sessions, connection, Message::Encryption(message))
|
|
||||||
.map(|_| ()).unwrap_or_default(),
|
|
||||||
Message::Decryption(message) => self
|
|
||||||
.process_message(&self.sessions.decryption_sessions, connection, Message::Decryption(message))
|
|
||||||
.map(|_| ()).unwrap_or_default(),
|
|
||||||
Message::SchnorrSigning(message) => self
|
|
||||||
.process_message(&self.sessions.schnorr_signing_sessions, connection, Message::SchnorrSigning(message))
|
|
||||||
.map(|_| ()).unwrap_or_default(),
|
|
||||||
Message::EcdsaSigning(message) => self
|
|
||||||
.process_message(&self.sessions.ecdsa_signing_sessions, connection, Message::EcdsaSigning(message))
|
|
||||||
.map(|_| ()).unwrap_or_default(),
|
|
||||||
Message::ServersSetChange(message) => {
|
|
||||||
let message = Message::ServersSetChange(message);
|
|
||||||
let is_initialization_message = message.is_initialization_message();
|
|
||||||
let session = self.process_message(&self.sessions.admin_sessions, connection, message);
|
|
||||||
if is_initialization_message {
|
|
||||||
if let Some(session) = session {
|
|
||||||
self.servers_set_change_creator_connector
|
|
||||||
.set_key_servers_set_change_session(session.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Message::KeyVersionNegotiation(message) => {
|
|
||||||
let session = self.process_message(
|
|
||||||
&self.sessions.negotiation_sessions, connection, Message::KeyVersionNegotiation(message));
|
|
||||||
self.try_continue_session(session);
|
|
||||||
},
|
|
||||||
Message::ShareAdd(message) => self.process_message(
|
|
||||||
&self.sessions.admin_sessions, connection, Message::ShareAdd(message))
|
|
||||||
.map(|_| ()).unwrap_or_default(),
|
|
||||||
Message::Cluster(message) => self.process_cluster_message(connection, message),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn try_continue_session(
|
|
||||||
&self,
|
|
||||||
session: Option<Arc<KeyVersionNegotiationSession<KeyVersionNegotiationSessionTransport>>>
|
|
||||||
) {
|
|
||||||
if let Some(session) = session {
|
|
||||||
let meta = session.meta();
|
|
||||||
let is_master_node = meta.self_node_id == meta.master_node_id;
|
|
||||||
if is_master_node && session.is_finished() {
|
|
||||||
self.sessions.negotiation_sessions.remove(&session.id());
|
|
||||||
match session.result() {
|
|
||||||
Some(Ok(Some((version, master)))) => match session.take_continue_action() {
|
|
||||||
Some(ContinueAction::Decrypt(
|
|
||||||
session, origin, is_shadow_decryption, is_broadcast_decryption
|
|
||||||
)) => {
|
|
||||||
let initialization_error = if self.self_key_pair.public() == &master {
|
|
||||||
session.initialize(
|
|
||||||
origin, version, is_shadow_decryption, is_broadcast_decryption)
|
|
||||||
} else {
|
|
||||||
session.delegate(
|
|
||||||
master, origin, version, is_shadow_decryption, is_broadcast_decryption)
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Err(error) = initialization_error {
|
|
||||||
session.on_session_error(&meta.self_node_id, error);
|
|
||||||
self.sessions.decryption_sessions.remove(&session.id());
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Some(ContinueAction::SchnorrSign(session, message_hash)) => {
|
|
||||||
let initialization_error = if self.self_key_pair.public() == &master {
|
|
||||||
session.initialize(version, message_hash)
|
|
||||||
} else {
|
|
||||||
session.delegate(master, version, message_hash)
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Err(error) = initialization_error {
|
|
||||||
session.on_session_error(&meta.self_node_id, error);
|
|
||||||
self.sessions.schnorr_signing_sessions.remove(&session.id());
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Some(ContinueAction::EcdsaSign(session, message_hash)) => {
|
|
||||||
let initialization_error = if self.self_key_pair.public() == &master {
|
|
||||||
session.initialize(version, message_hash)
|
|
||||||
} else {
|
|
||||||
session.delegate(master, version, message_hash)
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Err(error) = initialization_error {
|
|
||||||
session.on_session_error(&meta.self_node_id, error);
|
|
||||||
self.sessions.ecdsa_signing_sessions.remove(&session.id());
|
|
||||||
}
|
|
||||||
},
|
|
||||||
None => (),
|
|
||||||
},
|
|
||||||
Some(Err(error)) => match session.take_continue_action() {
|
|
||||||
Some(ContinueAction::Decrypt(session, _, _, _)) => {
|
|
||||||
session.on_session_error(&meta.self_node_id, error);
|
|
||||||
self.sessions.decryption_sessions.remove(&session.id());
|
|
||||||
},
|
|
||||||
Some(ContinueAction::SchnorrSign(session, _)) => {
|
|
||||||
session.on_session_error(&meta.self_node_id, error);
|
|
||||||
self.sessions.schnorr_signing_sessions.remove(&session.id());
|
|
||||||
},
|
|
||||||
Some(ContinueAction::EcdsaSign(session, _)) => {
|
|
||||||
session.on_session_error(&meta.self_node_id, error);
|
|
||||||
self.sessions.ecdsa_signing_sessions.remove(&session.id());
|
|
||||||
},
|
|
||||||
None => (),
|
|
||||||
},
|
|
||||||
None | Some(Ok(None)) => unreachable!("is_master_node; session is finished;
|
|
||||||
negotiation version always finished with result on master;
|
|
||||||
qed"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn maintain_sessions(&self) {
|
|
||||||
self.sessions.stop_stalled_sessions();
|
|
||||||
self.sessions.sessions_keep_alive();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn start_servers_set_change_session(&self, params: ServersSetChangeParams) -> Result<Arc<AdminSession>, Error> {
|
|
||||||
new_servers_set_change_session(
|
|
||||||
self.self_key_pair.clone(),
|
|
||||||
&*self.sessions,
|
|
||||||
self.connections.clone(),
|
|
||||||
self.servers_set_change_creator_connector.clone(),
|
|
||||||
params,
|
|
||||||
).map(|s| s.session)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,786 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::time::{Duration, Instant};
|
|
||||||
use std::sync::{Arc, Weak};
|
|
||||||
use std::sync::atomic::AtomicBool;
|
|
||||||
use std::collections::{VecDeque, BTreeMap, BTreeSet};
|
|
||||||
use futures::{oneshot, Oneshot, Complete, Future};
|
|
||||||
use parking_lot::{Mutex, RwLock, Condvar};
|
|
||||||
use ethereum_types::H256;
|
|
||||||
use crypto::publickey::Secret;
|
|
||||||
use blockchain::SigningKeyPair;
|
|
||||||
use key_server_cluster::{Error, NodeId, SessionId};
|
|
||||||
use key_server_cluster::cluster::{Cluster, ClusterConfiguration, ClusterView};
|
|
||||||
use key_server_cluster::cluster_connections::ConnectionProvider;
|
|
||||||
use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector;
|
|
||||||
use key_server_cluster::message::{self, Message};
|
|
||||||
use key_server_cluster::generation_session::{SessionImpl as GenerationSessionImpl};
|
|
||||||
use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl};
|
|
||||||
use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl};
|
|
||||||
use key_server_cluster::signing_session_ecdsa::{SessionImpl as EcdsaSigningSessionImpl};
|
|
||||||
use key_server_cluster::signing_session_schnorr::{SessionImpl as SchnorrSigningSessionImpl};
|
|
||||||
use key_server_cluster::share_add_session::{SessionImpl as ShareAddSessionImpl, IsolatedSessionTransport as ShareAddTransport};
|
|
||||||
use key_server_cluster::servers_set_change_session::{SessionImpl as ServersSetChangeSessionImpl};
|
|
||||||
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl,
|
|
||||||
IsolatedSessionTransport as VersionNegotiationTransport};
|
|
||||||
|
|
||||||
use key_server_cluster::cluster_sessions_creator::{GenerationSessionCreator, EncryptionSessionCreator, DecryptionSessionCreator,
|
|
||||||
SchnorrSigningSessionCreator, KeyVersionNegotiationSessionCreator, AdminSessionCreator, SessionCreatorCore,
|
|
||||||
EcdsaSigningSessionCreator, ClusterSessionCreator};
|
|
||||||
|
|
||||||
/// When there are no session-related messages for SESSION_TIMEOUT_INTERVAL seconds,
|
|
||||||
/// we must treat this session as stalled && finish it with an error.
|
|
||||||
/// This timeout is for cases when node is responding to KeepAlive messages, but intentionally ignores
|
|
||||||
/// session messages.
|
|
||||||
const SESSION_TIMEOUT_INTERVAL: Duration = Duration::from_secs(60);
|
|
||||||
/// Interval to send session-level KeepAlive-messages.
|
|
||||||
const SESSION_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(30);
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
/// Servers set change session id (there could be at most 1 session => hardcoded id).
|
|
||||||
pub static ref SERVERS_SET_CHANGE_SESSION_ID: SessionId = "10b7af423bb551d5dc8645db754163a2145d37d78d468fa7330435ed77064c1c"
|
|
||||||
.parse()
|
|
||||||
.expect("hardcoded id should parse without errors; qed");
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Session id with sub session.
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
||||||
pub struct SessionIdWithSubSession {
|
|
||||||
/// Key id.
|
|
||||||
pub id: SessionId,
|
|
||||||
/// Sub session id.
|
|
||||||
pub access_key: Secret,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generic cluster session.
|
|
||||||
pub trait ClusterSession {
|
|
||||||
/// Session identifier type.
|
|
||||||
type Id: ::std::fmt::Debug + Ord + Clone;
|
|
||||||
/// Session creation data type.
|
|
||||||
type CreationData;
|
|
||||||
/// Session (successful) result type.
|
|
||||||
type SuccessfulResult: Send + 'static;
|
|
||||||
|
|
||||||
/// Session type name.
|
|
||||||
fn type_name() -> &'static str;
|
|
||||||
/// Get session id.
|
|
||||||
fn id(&self) -> Self::Id;
|
|
||||||
/// If session is finished (either with succcess or not).
|
|
||||||
fn is_finished(&self) -> bool;
|
|
||||||
/// When it takes too much time to complete session.
|
|
||||||
fn on_session_timeout(&self);
|
|
||||||
/// When it takes too much time to receive response from the node.
|
|
||||||
fn on_node_timeout(&self, node_id: &NodeId);
|
|
||||||
/// Process error that has occured during session + propagate this error to required nodes.
|
|
||||||
fn on_session_error(&self, sender: &NodeId, error: Error);
|
|
||||||
/// Process session message.
|
|
||||||
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error>;
|
|
||||||
|
|
||||||
/// 'Wait for session completion' helper.
|
|
||||||
#[cfg(test)]
|
|
||||||
fn wait_session<T, U, F: Fn(&U) -> Option<Result<T, Error>>>(
|
|
||||||
completion: &CompletionSignal<T>,
|
|
||||||
session_data: &Mutex<U>,
|
|
||||||
timeout: Option<Duration>,
|
|
||||||
result_reader: F
|
|
||||||
) -> Option<Result<T, Error>> {
|
|
||||||
let mut locked_data = session_data.lock();
|
|
||||||
match result_reader(&locked_data) {
|
|
||||||
Some(result) => Some(result),
|
|
||||||
None => {
|
|
||||||
let completion_condvar = completion.completion_condvar.as_ref().expect("created in test mode");
|
|
||||||
match timeout {
|
|
||||||
None => completion_condvar.wait(&mut locked_data),
|
|
||||||
Some(timeout) => {
|
|
||||||
completion_condvar.wait_for(&mut locked_data, timeout);
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
result_reader(&locked_data)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Waitable cluster session.
|
|
||||||
pub struct WaitableSession<S: ClusterSession> {
|
|
||||||
/// Session handle.
|
|
||||||
pub session: Arc<S>,
|
|
||||||
/// Session result oneshot.
|
|
||||||
pub oneshot: Oneshot<Result<S::SuccessfulResult, Error>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Session completion signal.
|
|
||||||
pub struct CompletionSignal<T> {
|
|
||||||
/// Completion future.
|
|
||||||
pub completion_future: Mutex<Option<Complete<Result<T, Error>>>>,
|
|
||||||
|
|
||||||
/// Completion condvar.
|
|
||||||
pub completion_condvar: Option<Condvar>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Administrative session.
|
|
||||||
pub enum AdminSession {
|
|
||||||
/// Share add session.
|
|
||||||
ShareAdd(ShareAddSessionImpl<ShareAddTransport>),
|
|
||||||
/// Servers set change session.
|
|
||||||
ServersSetChange(ServersSetChangeSessionImpl),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Administrative session creation data.
|
|
||||||
pub enum AdminSessionCreationData {
|
|
||||||
/// Share add session (key id).
|
|
||||||
ShareAdd(H256),
|
|
||||||
/// Servers set change session (block id, new_server_set).
|
|
||||||
ServersSetChange(Option<H256>, BTreeSet<NodeId>),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Active sessions on this cluster.
|
|
||||||
pub struct ClusterSessions {
|
|
||||||
/// Key generation sessions.
|
|
||||||
pub generation_sessions: ClusterSessionsContainer<GenerationSessionImpl, GenerationSessionCreator>,
|
|
||||||
/// Encryption sessions.
|
|
||||||
pub encryption_sessions: ClusterSessionsContainer<EncryptionSessionImpl, EncryptionSessionCreator>,
|
|
||||||
/// Decryption sessions.
|
|
||||||
pub decryption_sessions: ClusterSessionsContainer<DecryptionSessionImpl, DecryptionSessionCreator>,
|
|
||||||
/// Schnorr signing sessions.
|
|
||||||
pub schnorr_signing_sessions: ClusterSessionsContainer<SchnorrSigningSessionImpl, SchnorrSigningSessionCreator>,
|
|
||||||
/// ECDSA signing sessions.
|
|
||||||
pub ecdsa_signing_sessions: ClusterSessionsContainer<EcdsaSigningSessionImpl, EcdsaSigningSessionCreator>,
|
|
||||||
/// Key version negotiation sessions.
|
|
||||||
pub negotiation_sessions: ClusterSessionsContainer<
|
|
||||||
KeyVersionNegotiationSessionImpl<VersionNegotiationTransport>,
|
|
||||||
KeyVersionNegotiationSessionCreator
|
|
||||||
>,
|
|
||||||
/// Administrative sessions.
|
|
||||||
pub admin_sessions: ClusterSessionsContainer<AdminSession, AdminSessionCreator>,
|
|
||||||
/// Self node id.
|
|
||||||
self_node_id: NodeId,
|
|
||||||
/// Creator core.
|
|
||||||
creator_core: Arc<SessionCreatorCore>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Active sessions container listener.
|
|
||||||
pub trait ClusterSessionsListener<S: ClusterSession>: Send + Sync {
|
|
||||||
/// When new session is inserted to the container.
|
|
||||||
fn on_session_inserted(&self, _session: Arc<S>) {}
|
|
||||||
/// When session is removed from the container.
|
|
||||||
fn on_session_removed(&self, _session: Arc<S>) {}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Active sessions container.
|
|
||||||
pub struct ClusterSessionsContainer<S: ClusterSession, SC: ClusterSessionCreator<S>> {
|
|
||||||
/// Sessions creator.
|
|
||||||
pub creator: SC,
|
|
||||||
/// Active sessions.
|
|
||||||
sessions: RwLock<BTreeMap<S::Id, QueuedSession<S>>>,
|
|
||||||
/// Listeners. Lock order: sessions -> listeners.
|
|
||||||
listeners: Mutex<Vec<Weak<dyn ClusterSessionsListener<S>>>>,
|
|
||||||
/// Sessions container state.
|
|
||||||
container_state: Arc<Mutex<ClusterSessionsContainerState>>,
|
|
||||||
/// Do not actually remove sessions.
|
|
||||||
preserve_sessions: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Session and its message queue.
|
|
||||||
pub struct QueuedSession<S> {
|
|
||||||
/// Session master.
|
|
||||||
pub master: NodeId,
|
|
||||||
/// Cluster view.
|
|
||||||
pub cluster_view: Arc<dyn Cluster>,
|
|
||||||
/// Last keep alive time.
|
|
||||||
pub last_keep_alive_time: Instant,
|
|
||||||
/// Last received message time.
|
|
||||||
pub last_message_time: Instant,
|
|
||||||
/// Generation session.
|
|
||||||
pub session: Arc<S>,
|
|
||||||
/// Messages queue.
|
|
||||||
pub queue: VecDeque<(NodeId, Message)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Cluster sessions container state.
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
|
||||||
pub enum ClusterSessionsContainerState {
|
|
||||||
/// There's no active sessions => any session can be started.
|
|
||||||
Idle,
|
|
||||||
/// There are active sessions => exclusive session can't be started right now.
|
|
||||||
Active(usize),
|
|
||||||
/// Exclusive session is active => can't start any other sessions.
|
|
||||||
Exclusive,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterSessions {
|
|
||||||
/// Create new cluster sessions container.
|
|
||||||
pub fn new(config: &ClusterConfiguration, servers_set_change_session_creator_connector: Arc<dyn ServersSetChangeSessionCreatorConnector>) -> Self {
|
|
||||||
let container_state = Arc::new(Mutex::new(ClusterSessionsContainerState::Idle));
|
|
||||||
let creator_core = Arc::new(SessionCreatorCore::new(config));
|
|
||||||
ClusterSessions {
|
|
||||||
self_node_id: config.self_key_pair.public().clone(),
|
|
||||||
generation_sessions: ClusterSessionsContainer::new(GenerationSessionCreator {
|
|
||||||
core: creator_core.clone(),
|
|
||||||
make_faulty_generation_sessions: AtomicBool::new(false),
|
|
||||||
}, container_state.clone()),
|
|
||||||
encryption_sessions: ClusterSessionsContainer::new(EncryptionSessionCreator {
|
|
||||||
core: creator_core.clone(),
|
|
||||||
}, container_state.clone()),
|
|
||||||
decryption_sessions: ClusterSessionsContainer::new(DecryptionSessionCreator {
|
|
||||||
core: creator_core.clone(),
|
|
||||||
}, container_state.clone()),
|
|
||||||
schnorr_signing_sessions: ClusterSessionsContainer::new(SchnorrSigningSessionCreator {
|
|
||||||
core: creator_core.clone(),
|
|
||||||
}, container_state.clone()),
|
|
||||||
ecdsa_signing_sessions: ClusterSessionsContainer::new(EcdsaSigningSessionCreator {
|
|
||||||
core: creator_core.clone(),
|
|
||||||
}, container_state.clone()),
|
|
||||||
negotiation_sessions: ClusterSessionsContainer::new(KeyVersionNegotiationSessionCreator {
|
|
||||||
core: creator_core.clone(),
|
|
||||||
}, container_state.clone()),
|
|
||||||
admin_sessions: ClusterSessionsContainer::new(AdminSessionCreator {
|
|
||||||
core: creator_core.clone(),
|
|
||||||
servers_set_change_session_creator_connector: servers_set_change_session_creator_connector,
|
|
||||||
admin_public: config.admin_public.clone(),
|
|
||||||
}, container_state),
|
|
||||||
creator_core: creator_core,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn make_faulty_generation_sessions(&self) {
|
|
||||||
self.generation_sessions.creator.make_faulty_generation_sessions();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn preserve_sessions(&mut self) {
|
|
||||||
self.generation_sessions.preserve_sessions = true;
|
|
||||||
self.encryption_sessions.preserve_sessions = true;
|
|
||||||
self.decryption_sessions.preserve_sessions = true;
|
|
||||||
self.schnorr_signing_sessions.preserve_sessions = true;
|
|
||||||
self.ecdsa_signing_sessions.preserve_sessions = true;
|
|
||||||
self.negotiation_sessions.preserve_sessions = true;
|
|
||||||
self.admin_sessions.preserve_sessions = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send session-level keep-alive messages.
|
|
||||||
pub fn sessions_keep_alive(&self) {
|
|
||||||
self.admin_sessions.send_keep_alive(&*SERVERS_SET_CHANGE_SESSION_ID, &self.self_node_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When session-level keep-alive response is received.
|
|
||||||
pub fn on_session_keep_alive(&self, sender: &NodeId, session_id: SessionId) {
|
|
||||||
if session_id == *SERVERS_SET_CHANGE_SESSION_ID {
|
|
||||||
self.admin_sessions.on_keep_alive(&session_id, sender);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Stop sessions that are stalling.
|
|
||||||
pub fn stop_stalled_sessions(&self) {
|
|
||||||
self.generation_sessions.stop_stalled_sessions();
|
|
||||||
self.encryption_sessions.stop_stalled_sessions();
|
|
||||||
self.decryption_sessions.stop_stalled_sessions();
|
|
||||||
self.schnorr_signing_sessions.stop_stalled_sessions();
|
|
||||||
self.ecdsa_signing_sessions.stop_stalled_sessions();
|
|
||||||
self.negotiation_sessions.stop_stalled_sessions();
|
|
||||||
self.admin_sessions.stop_stalled_sessions();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When connection to node is lost.
|
|
||||||
pub fn on_connection_timeout(&self, node_id: &NodeId) {
|
|
||||||
self.generation_sessions.on_connection_timeout(node_id);
|
|
||||||
self.encryption_sessions.on_connection_timeout(node_id);
|
|
||||||
self.decryption_sessions.on_connection_timeout(node_id);
|
|
||||||
self.schnorr_signing_sessions.on_connection_timeout(node_id);
|
|
||||||
self.ecdsa_signing_sessions.on_connection_timeout(node_id);
|
|
||||||
self.negotiation_sessions.on_connection_timeout(node_id);
|
|
||||||
self.admin_sessions.on_connection_timeout(node_id);
|
|
||||||
self.creator_core.on_connection_timeout(node_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, SC> ClusterSessionsContainer<S, SC> where S: ClusterSession, SC: ClusterSessionCreator<S> {
|
|
||||||
pub fn new(creator: SC, container_state: Arc<Mutex<ClusterSessionsContainerState>>) -> Self {
|
|
||||||
ClusterSessionsContainer {
|
|
||||||
creator: creator,
|
|
||||||
sessions: RwLock::new(BTreeMap::new()),
|
|
||||||
listeners: Mutex::new(Vec::new()),
|
|
||||||
container_state: container_state,
|
|
||||||
preserve_sessions: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_listener(&self, listener: Arc<dyn ClusterSessionsListener<S>>) {
|
|
||||||
self.listeners.lock().push(Arc::downgrade(&listener));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn is_empty(&self) -> bool {
|
|
||||||
self.sessions.read().is_empty()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get(&self, session_id: &S::Id, update_last_message_time: bool) -> Option<Arc<S>> {
|
|
||||||
let mut sessions = self.sessions.write();
|
|
||||||
sessions.get_mut(session_id)
|
|
||||||
.map(|s| {
|
|
||||||
if update_last_message_time {
|
|
||||||
s.last_message_time = Instant::now();
|
|
||||||
}
|
|
||||||
s.session.clone()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn first(&self) -> Option<Arc<S>> {
|
|
||||||
self.sessions.read().values().nth(0).map(|s| s.session.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn insert(
|
|
||||||
&self,
|
|
||||||
cluster: Arc<dyn Cluster>,
|
|
||||||
master: NodeId,
|
|
||||||
session_id: S::Id,
|
|
||||||
session_nonce: Option<u64>,
|
|
||||||
is_exclusive_session: bool,
|
|
||||||
creation_data: Option<S::CreationData>,
|
|
||||||
) -> Result<WaitableSession<S>, Error> {
|
|
||||||
let mut sessions = self.sessions.write();
|
|
||||||
if sessions.contains_key(&session_id) {
|
|
||||||
return Err(Error::DuplicateSessionId);
|
|
||||||
}
|
|
||||||
|
|
||||||
// create cluster
|
|
||||||
// let cluster = create_cluster_view(data, requires_all_connections)?;
|
|
||||||
// create session
|
|
||||||
let session = self.creator.create(cluster.clone(), master.clone(), session_nonce, session_id.clone(), creation_data)?;
|
|
||||||
// check if session can be started
|
|
||||||
self.container_state.lock().on_session_starting(is_exclusive_session)?;
|
|
||||||
|
|
||||||
// insert session
|
|
||||||
let queued_session = QueuedSession {
|
|
||||||
master: master,
|
|
||||||
cluster_view: cluster,
|
|
||||||
last_keep_alive_time: Instant::now(),
|
|
||||||
last_message_time: Instant::now(),
|
|
||||||
session: session.session.clone(),
|
|
||||||
queue: VecDeque::new(),
|
|
||||||
};
|
|
||||||
sessions.insert(session_id, queued_session);
|
|
||||||
self.notify_listeners(|l| l.on_session_inserted(session.session.clone()));
|
|
||||||
|
|
||||||
Ok(session)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn remove(&self, session_id: &S::Id) {
|
|
||||||
self.do_remove(session_id, &mut *self.sessions.write());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn enqueue_message(&self, session_id: &S::Id, sender: NodeId, message: Message, is_queued_message: bool) {
|
|
||||||
self.sessions.write().get_mut(session_id)
|
|
||||||
.map(|session| if is_queued_message { session.queue.push_front((sender, message)) }
|
|
||||||
else { session.queue.push_back((sender, message)) });
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn dequeue_message(&self, session_id: &S::Id) -> Option<(NodeId, Message)> {
|
|
||||||
self.sessions.write().get_mut(session_id)
|
|
||||||
.and_then(|session| session.queue.pop_front())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn stop_stalled_sessions(&self) {
|
|
||||||
let mut sessions = self.sessions.write();
|
|
||||||
for sid in sessions.keys().cloned().collect::<Vec<_>>() {
|
|
||||||
let remove_session = {
|
|
||||||
let session = sessions.get(&sid).expect("enumerating only existing sessions; qed");
|
|
||||||
if Instant::now() - session.last_message_time > SESSION_TIMEOUT_INTERVAL {
|
|
||||||
session.session.on_session_timeout();
|
|
||||||
session.session.is_finished()
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if remove_session {
|
|
||||||
self.do_remove(&sid, &mut *sessions);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn on_connection_timeout(&self, node_id: &NodeId) {
|
|
||||||
let mut sessions = self.sessions.write();
|
|
||||||
for sid in sessions.keys().cloned().collect::<Vec<_>>() {
|
|
||||||
let remove_session = {
|
|
||||||
let session = sessions.get(&sid).expect("enumerating only existing sessions; qed");
|
|
||||||
session.session.on_node_timeout(node_id);
|
|
||||||
session.session.is_finished()
|
|
||||||
};
|
|
||||||
|
|
||||||
if remove_session {
|
|
||||||
self.do_remove(&sid, &mut *sessions);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn do_remove(&self, session_id: &S::Id, sessions: &mut BTreeMap<S::Id, QueuedSession<S>>) {
|
|
||||||
if !self.preserve_sessions {
|
|
||||||
if let Some(session) = sessions.remove(session_id) {
|
|
||||||
self.container_state.lock().on_session_completed();
|
|
||||||
self.notify_listeners(|l| l.on_session_removed(session.session.clone()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn notify_listeners<F: Fn(&dyn ClusterSessionsListener<S>) -> ()>(&self, callback: F) {
|
|
||||||
let mut listeners = self.listeners.lock();
|
|
||||||
let mut listener_index = 0;
|
|
||||||
while listener_index < listeners.len() {
|
|
||||||
match listeners[listener_index].upgrade() {
|
|
||||||
Some(listener) => {
|
|
||||||
callback(&*listener);
|
|
||||||
listener_index += 1;
|
|
||||||
},
|
|
||||||
None => {
|
|
||||||
listeners.swap_remove(listener_index);
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, SC> ClusterSessionsContainer<S, SC>
|
|
||||||
where
|
|
||||||
S: ClusterSession,
|
|
||||||
SC: ClusterSessionCreator<S>,
|
|
||||||
SessionId: From<S::Id>,
|
|
||||||
{
|
|
||||||
pub fn send_keep_alive(&self, session_id: &S::Id, self_node_id: &NodeId) {
|
|
||||||
if let Some(session) = self.sessions.write().get_mut(session_id) {
|
|
||||||
let now = Instant::now();
|
|
||||||
if self_node_id == &session.master && now - session.last_keep_alive_time > SESSION_KEEP_ALIVE_INTERVAL {
|
|
||||||
session.last_keep_alive_time = now;
|
|
||||||
// since we send KeepAlive message to prevent nodes from disconnecting
|
|
||||||
// && worst thing that can happen if node is disconnected is that session is failed
|
|
||||||
// => ignore error here, because probably this node is not need for the rest of the session at all
|
|
||||||
let _ = session.cluster_view.broadcast(Message::Cluster(message::ClusterMessage::KeepAliveResponse(message::KeepAliveResponse {
|
|
||||||
session_id: Some(session_id.clone().into()),
|
|
||||||
})));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn on_keep_alive(&self, session_id: &S::Id, sender: &NodeId) {
|
|
||||||
if let Some(session) = self.sessions.write().get_mut(session_id) {
|
|
||||||
let now = Instant::now();
|
|
||||||
// we only accept keep alive from master node of ServersSetChange session
|
|
||||||
if sender == &session.master {
|
|
||||||
session.last_keep_alive_time = now;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterSessionsContainerState {
|
|
||||||
/// When session is starting.
|
|
||||||
pub fn on_session_starting(&mut self, is_exclusive_session: bool) -> Result<(), Error> {
|
|
||||||
match *self {
|
|
||||||
ClusterSessionsContainerState::Idle if is_exclusive_session => {
|
|
||||||
::std::mem::replace(self, ClusterSessionsContainerState::Exclusive);
|
|
||||||
},
|
|
||||||
ClusterSessionsContainerState::Idle => {
|
|
||||||
::std::mem::replace(self, ClusterSessionsContainerState::Active(1));
|
|
||||||
},
|
|
||||||
ClusterSessionsContainerState::Active(_) if is_exclusive_session =>
|
|
||||||
return Err(Error::HasActiveSessions),
|
|
||||||
ClusterSessionsContainerState::Active(sessions_count) => {
|
|
||||||
::std::mem::replace(self, ClusterSessionsContainerState::Active(sessions_count + 1));
|
|
||||||
},
|
|
||||||
ClusterSessionsContainerState::Exclusive =>
|
|
||||||
return Err(Error::ExclusiveSessionActive),
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When session is completed.
|
|
||||||
pub fn on_session_completed(&mut self) {
|
|
||||||
match *self {
|
|
||||||
ClusterSessionsContainerState::Idle =>
|
|
||||||
unreachable!("idle means that there are no active sessions; on_session_completed is only called once after active session is completed; qed"),
|
|
||||||
ClusterSessionsContainerState::Active(sessions_count) if sessions_count == 1 => {
|
|
||||||
::std::mem::replace(self, ClusterSessionsContainerState::Idle);
|
|
||||||
},
|
|
||||||
ClusterSessionsContainerState::Active(sessions_count) => {
|
|
||||||
::std::mem::replace(self, ClusterSessionsContainerState::Active(sessions_count - 1));
|
|
||||||
}
|
|
||||||
ClusterSessionsContainerState::Exclusive => {
|
|
||||||
::std::mem::replace(self, ClusterSessionsContainerState::Idle);
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SessionIdWithSubSession {
|
|
||||||
/// Create new decryption session Id.
|
|
||||||
pub fn new(session_id: SessionId, sub_session_id: Secret) -> Self {
|
|
||||||
SessionIdWithSubSession {
|
|
||||||
id: session_id,
|
|
||||||
access_key: sub_session_id,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialOrd for SessionIdWithSubSession {
|
|
||||||
fn partial_cmp(&self, other: &Self) -> Option<::std::cmp::Ordering> {
|
|
||||||
Some(self.cmp(other))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Ord for SessionIdWithSubSession {
|
|
||||||
fn cmp(&self, other: &Self) -> ::std::cmp::Ordering {
|
|
||||||
match self.id.cmp(&other.id) {
|
|
||||||
::std::cmp::Ordering::Equal => self.access_key.cmp(&other.access_key),
|
|
||||||
r @ _ => r,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AdminSession {
|
|
||||||
pub fn as_servers_set_change(&self) -> Option<&ServersSetChangeSessionImpl> {
|
|
||||||
match *self {
|
|
||||||
AdminSession::ServersSetChange(ref session) => Some(session),
|
|
||||||
_ => None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterSession for AdminSession {
|
|
||||||
type Id = SessionId;
|
|
||||||
type CreationData = AdminSessionCreationData;
|
|
||||||
type SuccessfulResult = ();
|
|
||||||
|
|
||||||
fn type_name() -> &'static str {
|
|
||||||
"admin"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn id(&self) -> SessionId {
|
|
||||||
match *self {
|
|
||||||
AdminSession::ShareAdd(ref session) => session.id().clone(),
|
|
||||||
AdminSession::ServersSetChange(ref session) => session.id().clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_finished(&self) -> bool {
|
|
||||||
match *self {
|
|
||||||
AdminSession::ShareAdd(ref session) => session.is_finished(),
|
|
||||||
AdminSession::ServersSetChange(ref session) => session.is_finished(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_session_timeout(&self) {
|
|
||||||
match *self {
|
|
||||||
AdminSession::ShareAdd(ref session) => session.on_session_timeout(),
|
|
||||||
AdminSession::ServersSetChange(ref session) => session.on_session_timeout(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_node_timeout(&self, node_id: &NodeId) {
|
|
||||||
match *self {
|
|
||||||
AdminSession::ShareAdd(ref session) => session.on_node_timeout(node_id),
|
|
||||||
AdminSession::ServersSetChange(ref session) => session.on_node_timeout(node_id),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_session_error(&self, node: &NodeId, error: Error) {
|
|
||||||
match *self {
|
|
||||||
AdminSession::ShareAdd(ref session) => session.on_session_error(node, error),
|
|
||||||
AdminSession::ServersSetChange(ref session) => session.on_session_error(node, error),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
|
||||||
match *self {
|
|
||||||
AdminSession::ShareAdd(ref session) => session.on_message(sender, message),
|
|
||||||
AdminSession::ServersSetChange(ref session) => session.on_message(sender, message),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: ClusterSession> WaitableSession<S> {
|
|
||||||
pub fn new(session: S, oneshot: Oneshot<Result<S::SuccessfulResult, Error>>) -> Self {
|
|
||||||
WaitableSession {
|
|
||||||
session: Arc::new(session),
|
|
||||||
oneshot,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn into_wait_future(self) -> Box<dyn Future<Item=S::SuccessfulResult, Error=Error> + Send> {
|
|
||||||
Box::new(self.oneshot
|
|
||||||
.map_err(|e| Error::Internal(e.to_string()))
|
|
||||||
.and_then(|res| res))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> CompletionSignal<T> {
|
|
||||||
pub fn new() -> (Self, Oneshot<Result<T, Error>>) {
|
|
||||||
let (complete, oneshot) = oneshot();
|
|
||||||
let completion_condvar = if cfg!(test) { Some(Condvar::new()) } else { None };
|
|
||||||
(CompletionSignal {
|
|
||||||
completion_future: Mutex::new(Some(complete)),
|
|
||||||
completion_condvar,
|
|
||||||
}, oneshot)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn send(&self, result: Result<T, Error>) {
|
|
||||||
let completion_future = ::std::mem::replace(&mut *self.completion_future.lock(), None);
|
|
||||||
completion_future.map(|c| c.send(result));
|
|
||||||
if let Some(ref completion_condvar) = self.completion_condvar {
|
|
||||||
completion_condvar.notify_all();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn create_cluster_view(self_key_pair: Arc<dyn SigningKeyPair>, connections: Arc<dyn ConnectionProvider>, requires_all_connections: bool) -> Result<Arc<dyn Cluster>, Error> {
|
|
||||||
let mut connected_nodes = connections.connected_nodes()?;
|
|
||||||
let disconnected_nodes = connections.disconnected_nodes();
|
|
||||||
|
|
||||||
let disconnected_nodes_count = disconnected_nodes.len();
|
|
||||||
if requires_all_connections {
|
|
||||||
if disconnected_nodes_count != 0 {
|
|
||||||
return Err(Error::NodeDisconnected);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
connected_nodes.insert(self_key_pair.public().clone());
|
|
||||||
|
|
||||||
let connected_nodes_count = connected_nodes.len();
|
|
||||||
Ok(Arc::new(ClusterView::new(self_key_pair, connections, connected_nodes, connected_nodes_count + disconnected_nodes_count)))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
||||||
use crypto::publickey::{Random, Generator};
|
|
||||||
use key_server_cluster::{Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair};
|
|
||||||
use key_server_cluster::cluster::ClusterConfiguration;
|
|
||||||
use key_server_cluster::connection_trigger::SimpleServersSetChangeSessionCreatorConnector;
|
|
||||||
use key_server_cluster::cluster::tests::DummyCluster;
|
|
||||||
use key_server_cluster::generation_session::{SessionImpl as GenerationSession};
|
|
||||||
use super::{ClusterSessions, AdminSessionCreationData, ClusterSessionsListener,
|
|
||||||
ClusterSessionsContainerState, SESSION_TIMEOUT_INTERVAL};
|
|
||||||
|
|
||||||
pub fn make_cluster_sessions() -> ClusterSessions {
|
|
||||||
let key_pair = Random.generate();
|
|
||||||
let config = ClusterConfiguration {
|
|
||||||
self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pair.clone())),
|
|
||||||
key_server_set: Arc::new(MapKeyServerSet::new(false, vec![(key_pair.public().clone(), format!("127.0.0.1:{}", 100).parse().unwrap())].into_iter().collect())),
|
|
||||||
key_storage: Arc::new(DummyKeyStorage::default()),
|
|
||||||
acl_storage: Arc::new(DummyAclStorage::default()),
|
|
||||||
admin_public: Some(Random.generate().public().clone()),
|
|
||||||
preserve_sessions: false,
|
|
||||||
};
|
|
||||||
ClusterSessions::new(&config, Arc::new(SimpleServersSetChangeSessionCreatorConnector {
|
|
||||||
admin_public: Some(Random.generate().public().clone()),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn cluster_session_cannot_be_started_if_exclusive_session_is_active() {
|
|
||||||
let sessions = make_cluster_sessions();
|
|
||||||
sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None).unwrap();
|
|
||||||
match sessions.admin_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, true, Some(AdminSessionCreationData::ShareAdd(Default::default()))) {
|
|
||||||
Err(Error::HasActiveSessions) => (),
|
|
||||||
Err(e) => unreachable!(format!("{}", e)),
|
|
||||||
Ok(_) => unreachable!("OK"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn exclusive_session_cannot_be_started_if_other_session_is_active() {
|
|
||||||
let sessions = make_cluster_sessions();
|
|
||||||
|
|
||||||
sessions.admin_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, true, Some(AdminSessionCreationData::ShareAdd(Default::default()))).unwrap();
|
|
||||||
match sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None) {
|
|
||||||
Err(Error::ExclusiveSessionActive) => (),
|
|
||||||
Err(e) => unreachable!(format!("{}", e)),
|
|
||||||
Ok(_) => unreachable!("OK"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn session_listener_works() {
|
|
||||||
#[derive(Default)]
|
|
||||||
struct GenerationSessionListener {
|
|
||||||
inserted: AtomicUsize,
|
|
||||||
removed: AtomicUsize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterSessionsListener<GenerationSession> for GenerationSessionListener {
|
|
||||||
fn on_session_inserted(&self, _session: Arc<GenerationSession>) {
|
|
||||||
self.inserted.fetch_add(1, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_session_removed(&self, _session: Arc<GenerationSession>) {
|
|
||||||
self.removed.fetch_add(1, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let listener = Arc::new(GenerationSessionListener::default());
|
|
||||||
let sessions = make_cluster_sessions();
|
|
||||||
sessions.generation_sessions.add_listener(listener.clone());
|
|
||||||
|
|
||||||
sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None).unwrap();
|
|
||||||
assert_eq!(listener.inserted.load(Ordering::Relaxed), 1);
|
|
||||||
assert_eq!(listener.removed.load(Ordering::Relaxed), 0);
|
|
||||||
|
|
||||||
sessions.generation_sessions.remove(&Default::default());
|
|
||||||
assert_eq!(listener.inserted.load(Ordering::Relaxed), 1);
|
|
||||||
assert_eq!(listener.removed.load(Ordering::Relaxed), 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn last_session_removal_sets_container_state_to_idle() {
|
|
||||||
let sessions = make_cluster_sessions();
|
|
||||||
|
|
||||||
sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None).unwrap();
|
|
||||||
assert_eq!(*sessions.generation_sessions.container_state.lock(), ClusterSessionsContainerState::Active(1));
|
|
||||||
|
|
||||||
sessions.generation_sessions.remove(&Default::default());
|
|
||||||
assert_eq!(*sessions.generation_sessions.container_state.lock(), ClusterSessionsContainerState::Idle);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn last_session_removal_by_timeout_sets_container_state_to_idle() {
|
|
||||||
let sessions = make_cluster_sessions();
|
|
||||||
|
|
||||||
sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None).unwrap();
|
|
||||||
assert_eq!(*sessions.generation_sessions.container_state.lock(), ClusterSessionsContainerState::Active(1));
|
|
||||||
|
|
||||||
sessions.generation_sessions.sessions.write().get_mut(&Default::default()).unwrap().last_message_time -= SESSION_TIMEOUT_INTERVAL * 2;
|
|
||||||
|
|
||||||
sessions.generation_sessions.stop_stalled_sessions();
|
|
||||||
assert_eq!(sessions.generation_sessions.sessions.read().len(), 0);
|
|
||||||
assert_eq!(*sessions.generation_sessions.container_state.lock(), ClusterSessionsContainerState::Idle);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn last_session_removal_by_node_timeout_sets_container_state_to_idle() {
|
|
||||||
let sessions = make_cluster_sessions();
|
|
||||||
|
|
||||||
sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None).unwrap();
|
|
||||||
assert_eq!(*sessions.generation_sessions.container_state.lock(), ClusterSessionsContainerState::Active(1));
|
|
||||||
|
|
||||||
sessions.generation_sessions.on_connection_timeout(&Default::default());
|
|
||||||
assert_eq!(sessions.generation_sessions.sessions.read().len(), 0);
|
|
||||||
assert_eq!(*sessions.generation_sessions.container_state.lock(), ClusterSessionsContainerState::Idle);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,559 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use parking_lot::RwLock;
|
|
||||||
use crypto::publickey::Public;
|
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, Requester, AclStorage, KeyStorage, DocumentKeyShare, SessionMeta};
|
|
||||||
use key_server_cluster::cluster::{Cluster, ClusterConfiguration};
|
|
||||||
use key_server_cluster::connection_trigger::ServersSetChangeSessionCreatorConnector;
|
|
||||||
use key_server_cluster::cluster_sessions::{WaitableSession, ClusterSession, SessionIdWithSubSession,
|
|
||||||
AdminSession, AdminSessionCreationData};
|
|
||||||
use key_server_cluster::message::{self, Message, DecryptionMessage, SchnorrSigningMessage, ConsensusMessageOfShareAdd,
|
|
||||||
ShareAddMessage, ServersSetChangeMessage, ConsensusMessage, ConsensusMessageWithServersSet, EcdsaSigningMessage};
|
|
||||||
use key_server_cluster::generation_session::{SessionImpl as GenerationSessionImpl, SessionParams as GenerationSessionParams};
|
|
||||||
use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl,
|
|
||||||
SessionParams as DecryptionSessionParams};
|
|
||||||
use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl, SessionParams as EncryptionSessionParams};
|
|
||||||
use key_server_cluster::signing_session_ecdsa::{SessionImpl as EcdsaSigningSessionImpl,
|
|
||||||
SessionParams as EcdsaSigningSessionParams};
|
|
||||||
use key_server_cluster::signing_session_schnorr::{SessionImpl as SchnorrSigningSessionImpl,
|
|
||||||
SessionParams as SchnorrSigningSessionParams};
|
|
||||||
use key_server_cluster::share_add_session::{SessionImpl as ShareAddSessionImpl,
|
|
||||||
SessionParams as ShareAddSessionParams, IsolatedSessionTransport as ShareAddTransport};
|
|
||||||
use key_server_cluster::servers_set_change_session::{SessionImpl as ServersSetChangeSessionImpl,
|
|
||||||
SessionParams as ServersSetChangeSessionParams};
|
|
||||||
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl,
|
|
||||||
SessionParams as KeyVersionNegotiationSessionParams, IsolatedSessionTransport as VersionNegotiationTransport,
|
|
||||||
FastestResultComputer as FastestResultKeyVersionsResultComputer};
|
|
||||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
|
||||||
|
|
||||||
/// Generic cluster session creator.
|
|
||||||
pub trait ClusterSessionCreator<S: ClusterSession> {
|
|
||||||
/// Get creation data from message.
|
|
||||||
fn creation_data_from_message(_message: &Message) -> Result<Option<S::CreationData>, Error> {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prepare error message.
|
|
||||||
fn make_error_message(sid: S::Id, nonce: u64, err: Error) -> Message;
|
|
||||||
|
|
||||||
/// Create cluster session.
|
|
||||||
fn create(
|
|
||||||
&self,
|
|
||||||
cluster: Arc<dyn Cluster>,
|
|
||||||
master: NodeId,
|
|
||||||
nonce: Option<u64>,
|
|
||||||
id: S::Id,
|
|
||||||
creation_data: Option<S::CreationData>,
|
|
||||||
) -> Result<WaitableSession<S>, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Message with session id.
|
|
||||||
pub trait IntoSessionId<K> {
|
|
||||||
/// Get session id.
|
|
||||||
fn into_session_id(&self) -> Result<K, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct SessionCreatorCore {
|
|
||||||
/// Self node id.
|
|
||||||
self_node_id: NodeId,
|
|
||||||
/// Reference to key storage
|
|
||||||
key_storage: Arc<dyn KeyStorage>,
|
|
||||||
/// Reference to ACL storage
|
|
||||||
acl_storage: Arc<dyn AclStorage>,
|
|
||||||
/// Always-increasing sessions counter. Is used as session nonce to prevent replay attacks:
|
|
||||||
/// 1) during handshake, KeyServers generate new random key to encrypt messages
|
|
||||||
/// => there's no way to use messages from previous connections for replay attacks
|
|
||||||
/// 2) when session (of any type) is started, master node increases its own session counter and broadcasts it
|
|
||||||
/// 3) when slave KeyServer receives session initialization message, it checks that new nonce is larger than previous (from the same master)
|
|
||||||
/// => there's no way to use messages from previous sessions for replay attacks
|
|
||||||
/// 4) KeyServer checks that each session message contains the same nonce that initialization message
|
|
||||||
/// Given that: (A) handshake is secure and (B) session itself is initially replay-protected
|
|
||||||
/// => this guarantees that sessions are replay-protected.
|
|
||||||
session_counter: AtomicUsize,
|
|
||||||
/// Maximal session nonce, received from given connection.
|
|
||||||
max_nonce: RwLock<BTreeMap<NodeId, u64>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SessionCreatorCore {
|
|
||||||
/// Create new session creator core.
|
|
||||||
pub fn new(config: &ClusterConfiguration) -> Self {
|
|
||||||
SessionCreatorCore {
|
|
||||||
self_node_id: config.self_key_pair.public().clone(),
|
|
||||||
acl_storage: config.acl_storage.clone(),
|
|
||||||
key_storage: config.key_storage.clone(),
|
|
||||||
session_counter: AtomicUsize::new(0),
|
|
||||||
max_nonce: RwLock::new(BTreeMap::new()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When node has teimtouted.
|
|
||||||
pub fn on_connection_timeout(&self, node_id: &NodeId) {
|
|
||||||
self.max_nonce.write().remove(node_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check or generate new session nonce.
|
|
||||||
fn check_session_nonce(&self, master: &NodeId, nonce: Option<u64>) -> Result<u64, Error> {
|
|
||||||
// if we're master node of the session, then nonce should be generated
|
|
||||||
// if we're slave node of the session, then nonce should be passed from outside
|
|
||||||
match nonce {
|
|
||||||
Some(nonce) => match nonce > *self.max_nonce.write().entry(master.clone()).or_insert(0) {
|
|
||||||
true => Ok(nonce),
|
|
||||||
false => Err(Error::ReplayProtection),
|
|
||||||
},
|
|
||||||
None => Ok(self.session_counter.fetch_add(1, Ordering::Relaxed) as u64 + 1),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read key share && remove disconnected nodes.
|
|
||||||
fn read_key_share(&self, key_id: &SessionId) -> Result<Option<DocumentKeyShare>, Error> {
|
|
||||||
self.key_storage.get(key_id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generation session creator.
|
|
||||||
pub struct GenerationSessionCreator {
|
|
||||||
/// True if generation sessions must fail.
|
|
||||||
pub make_faulty_generation_sessions: AtomicBool,
|
|
||||||
/// Creator core.
|
|
||||||
pub core: Arc<SessionCreatorCore>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GenerationSessionCreator {
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn make_faulty_generation_sessions(&self) {
|
|
||||||
self.make_faulty_generation_sessions.store(true, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterSessionCreator<GenerationSessionImpl> for GenerationSessionCreator {
|
|
||||||
fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message {
|
|
||||||
message::Message::Generation(message::GenerationMessage::SessionError(message::SessionError {
|
|
||||||
session: sid.into(),
|
|
||||||
session_nonce: nonce,
|
|
||||||
error: err.into(),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create(
|
|
||||||
&self,
|
|
||||||
cluster: Arc<dyn Cluster>,
|
|
||||||
master: NodeId,
|
|
||||||
nonce: Option<u64>,
|
|
||||||
id: SessionId,
|
|
||||||
_creation_data: Option<()>,
|
|
||||||
) -> Result<WaitableSession<GenerationSessionImpl>, Error> {
|
|
||||||
// check that there's no finished encryption session with the same id
|
|
||||||
if self.core.key_storage.contains(&id) {
|
|
||||||
return Err(Error::ServerKeyAlreadyGenerated);
|
|
||||||
}
|
|
||||||
|
|
||||||
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
|
||||||
let (session, oneshot) = GenerationSessionImpl::new(GenerationSessionParams {
|
|
||||||
id: id.clone(),
|
|
||||||
self_node_id: self.core.self_node_id.clone(),
|
|
||||||
key_storage: Some(self.core.key_storage.clone()),
|
|
||||||
cluster: cluster,
|
|
||||||
nonce: Some(nonce),
|
|
||||||
});
|
|
||||||
|
|
||||||
if self.make_faulty_generation_sessions.load(Ordering::Relaxed) {
|
|
||||||
session.simulate_faulty_behaviour();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(WaitableSession::new(session, oneshot))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Encryption session creator.
|
|
||||||
pub struct EncryptionSessionCreator {
|
|
||||||
/// Creator core.
|
|
||||||
pub core: Arc<SessionCreatorCore>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterSessionCreator<EncryptionSessionImpl> for EncryptionSessionCreator {
|
|
||||||
fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message {
|
|
||||||
message::Message::Encryption(message::EncryptionMessage::EncryptionSessionError(message::EncryptionSessionError {
|
|
||||||
session: sid.into(),
|
|
||||||
session_nonce: nonce,
|
|
||||||
error: err.into(),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create(
|
|
||||||
&self,
|
|
||||||
cluster: Arc<dyn Cluster>,
|
|
||||||
master: NodeId,
|
|
||||||
nonce: Option<u64>,
|
|
||||||
id: SessionId,
|
|
||||||
_creation_data: Option<()>,
|
|
||||||
) -> Result<WaitableSession<EncryptionSessionImpl>, Error> {
|
|
||||||
let encrypted_data = self.core.read_key_share(&id)?;
|
|
||||||
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
|
||||||
let (session, oneshot) = EncryptionSessionImpl::new(EncryptionSessionParams {
|
|
||||||
id: id,
|
|
||||||
self_node_id: self.core.self_node_id.clone(),
|
|
||||||
encrypted_data: encrypted_data,
|
|
||||||
key_storage: self.core.key_storage.clone(),
|
|
||||||
cluster: cluster,
|
|
||||||
nonce: nonce,
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(WaitableSession::new(session, oneshot))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Decryption session creator.
|
|
||||||
pub struct DecryptionSessionCreator {
|
|
||||||
/// Creator core.
|
|
||||||
pub core: Arc<SessionCreatorCore>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterSessionCreator<DecryptionSessionImpl> for DecryptionSessionCreator {
|
|
||||||
fn creation_data_from_message(message: &Message) -> Result<Option<Requester>, Error> {
|
|
||||||
match *message {
|
|
||||||
Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(ref message)) => match &message.message {
|
|
||||||
&ConsensusMessage::InitializeConsensusSession(ref message) => Ok(Some(message.requester.clone().into())),
|
|
||||||
_ => Err(Error::InvalidMessage),
|
|
||||||
},
|
|
||||||
Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(ref message)) => Ok(Some(message.requester.clone().into())),
|
|
||||||
_ => Err(Error::InvalidMessage),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message {
|
|
||||||
message::Message::Decryption(message::DecryptionMessage::DecryptionSessionError(message::DecryptionSessionError {
|
|
||||||
session: sid.id.into(),
|
|
||||||
sub_session: sid.access_key.into(),
|
|
||||||
session_nonce: nonce,
|
|
||||||
error: err.into(),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create(
|
|
||||||
&self,
|
|
||||||
cluster: Arc<dyn Cluster>,
|
|
||||||
master: NodeId,
|
|
||||||
nonce: Option<u64>,
|
|
||||||
id: SessionIdWithSubSession,
|
|
||||||
requester: Option<Requester>,
|
|
||||||
) -> Result<WaitableSession<DecryptionSessionImpl>, Error> {
|
|
||||||
let encrypted_data = self.core.read_key_share(&id.id)?;
|
|
||||||
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
|
||||||
let (session, oneshot) = DecryptionSessionImpl::new(DecryptionSessionParams {
|
|
||||||
meta: SessionMeta {
|
|
||||||
id: id.id,
|
|
||||||
self_node_id: self.core.self_node_id.clone(),
|
|
||||||
master_node_id: master,
|
|
||||||
threshold: encrypted_data.as_ref().map(|ks| ks.threshold).unwrap_or_default(),
|
|
||||||
configured_nodes_count: cluster.configured_nodes_count(),
|
|
||||||
connected_nodes_count: cluster.connected_nodes_count(),
|
|
||||||
},
|
|
||||||
access_key: id.access_key,
|
|
||||||
key_share: encrypted_data,
|
|
||||||
acl_storage: self.core.acl_storage.clone(),
|
|
||||||
cluster: cluster,
|
|
||||||
nonce: nonce,
|
|
||||||
}, requester)?;
|
|
||||||
|
|
||||||
Ok(WaitableSession::new(session, oneshot))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Schnorr signing session creator.
|
|
||||||
pub struct SchnorrSigningSessionCreator {
|
|
||||||
/// Creator core.
|
|
||||||
pub core: Arc<SessionCreatorCore>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterSessionCreator<SchnorrSigningSessionImpl> for SchnorrSigningSessionCreator {
|
|
||||||
fn creation_data_from_message(message: &Message) -> Result<Option<Requester>, Error> {
|
|
||||||
match *message {
|
|
||||||
Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage(ref message)) => match &message.message {
|
|
||||||
&ConsensusMessage::InitializeConsensusSession(ref message) => Ok(Some(message.requester.clone().into())),
|
|
||||||
_ => Err(Error::InvalidMessage),
|
|
||||||
},
|
|
||||||
Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegation(ref message)) => Ok(Some(message.requester.clone().into())),
|
|
||||||
_ => Err(Error::InvalidMessage),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message {
|
|
||||||
message::Message::SchnorrSigning(message::SchnorrSigningMessage::SchnorrSigningSessionError(message::SchnorrSigningSessionError {
|
|
||||||
session: sid.id.into(),
|
|
||||||
sub_session: sid.access_key.into(),
|
|
||||||
session_nonce: nonce,
|
|
||||||
error: err.into(),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create(
|
|
||||||
&self,
|
|
||||||
cluster: Arc<dyn Cluster>,
|
|
||||||
master: NodeId,
|
|
||||||
nonce: Option<u64>,
|
|
||||||
id: SessionIdWithSubSession,
|
|
||||||
requester: Option<Requester>,
|
|
||||||
) -> Result<WaitableSession<SchnorrSigningSessionImpl>, Error> {
|
|
||||||
let encrypted_data = self.core.read_key_share(&id.id)?;
|
|
||||||
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
|
||||||
let (session, oneshot) = SchnorrSigningSessionImpl::new(SchnorrSigningSessionParams {
|
|
||||||
meta: SessionMeta {
|
|
||||||
id: id.id,
|
|
||||||
self_node_id: self.core.self_node_id.clone(),
|
|
||||||
master_node_id: master,
|
|
||||||
threshold: encrypted_data.as_ref().map(|ks| ks.threshold).unwrap_or_default(),
|
|
||||||
configured_nodes_count: cluster.configured_nodes_count(),
|
|
||||||
connected_nodes_count: cluster.connected_nodes_count(),
|
|
||||||
},
|
|
||||||
access_key: id.access_key,
|
|
||||||
key_share: encrypted_data,
|
|
||||||
acl_storage: self.core.acl_storage.clone(),
|
|
||||||
cluster: cluster,
|
|
||||||
nonce: nonce,
|
|
||||||
}, requester)?;
|
|
||||||
Ok(WaitableSession::new(session, oneshot))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// ECDSA signing session creator.
|
|
||||||
pub struct EcdsaSigningSessionCreator {
|
|
||||||
/// Creator core.
|
|
||||||
pub core: Arc<SessionCreatorCore>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterSessionCreator<EcdsaSigningSessionImpl> for EcdsaSigningSessionCreator {
|
|
||||||
fn creation_data_from_message(message: &Message) -> Result<Option<Requester>, Error> {
|
|
||||||
match *message {
|
|
||||||
Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref message)) => match &message.message {
|
|
||||||
&ConsensusMessage::InitializeConsensusSession(ref message) => Ok(Some(message.requester.clone().into())),
|
|
||||||
_ => Err(Error::InvalidMessage),
|
|
||||||
},
|
|
||||||
Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegation(ref message)) => Ok(Some(message.requester.clone().into())),
|
|
||||||
_ => Err(Error::InvalidMessage),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message {
|
|
||||||
message::Message::EcdsaSigning(message::EcdsaSigningMessage::EcdsaSigningSessionError(message::EcdsaSigningSessionError {
|
|
||||||
session: sid.id.into(),
|
|
||||||
sub_session: sid.access_key.into(),
|
|
||||||
session_nonce: nonce,
|
|
||||||
error: err.into(),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create(&self, cluster: Arc<dyn Cluster>, master: NodeId, nonce: Option<u64>, id: SessionIdWithSubSession, requester: Option<Requester>) -> Result<WaitableSession<EcdsaSigningSessionImpl>, Error> {
|
|
||||||
let encrypted_data = self.core.read_key_share(&id.id)?;
|
|
||||||
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
|
||||||
let (session, oneshot) = EcdsaSigningSessionImpl::new(EcdsaSigningSessionParams {
|
|
||||||
meta: SessionMeta {
|
|
||||||
id: id.id,
|
|
||||||
self_node_id: self.core.self_node_id.clone(),
|
|
||||||
master_node_id: master,
|
|
||||||
threshold: encrypted_data.as_ref().map(|ks| ks.threshold).unwrap_or_default(),
|
|
||||||
configured_nodes_count: cluster.configured_nodes_count(),
|
|
||||||
connected_nodes_count: cluster.connected_nodes_count(),
|
|
||||||
},
|
|
||||||
access_key: id.access_key,
|
|
||||||
key_share: encrypted_data,
|
|
||||||
acl_storage: self.core.acl_storage.clone(),
|
|
||||||
cluster: cluster,
|
|
||||||
nonce: nonce,
|
|
||||||
}, requester)?;
|
|
||||||
|
|
||||||
Ok(WaitableSession::new(session, oneshot))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Key version negotiation session creator.
|
|
||||||
pub struct KeyVersionNegotiationSessionCreator {
|
|
||||||
/// Creator core.
|
|
||||||
pub core: Arc<SessionCreatorCore>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterSessionCreator<KeyVersionNegotiationSessionImpl<VersionNegotiationTransport>> for KeyVersionNegotiationSessionCreator {
|
|
||||||
fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message {
|
|
||||||
message::Message::KeyVersionNegotiation(message::KeyVersionNegotiationMessage::KeyVersionsError(message::KeyVersionsError {
|
|
||||||
session: sid.id.into(),
|
|
||||||
sub_session: sid.access_key.into(),
|
|
||||||
session_nonce: nonce,
|
|
||||||
error: err.into(),
|
|
||||||
// we don't care about continue action here. it only matters when we're completing the session with confirmed
|
|
||||||
// fatal error from result computer
|
|
||||||
continue_with: None,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create(
|
|
||||||
&self,
|
|
||||||
cluster: Arc<dyn Cluster>,
|
|
||||||
master: NodeId,
|
|
||||||
nonce: Option<u64>,
|
|
||||||
id: SessionIdWithSubSession,
|
|
||||||
_creation_data: Option<()>,
|
|
||||||
) -> Result<WaitableSession<KeyVersionNegotiationSessionImpl<VersionNegotiationTransport>>, Error> {
|
|
||||||
let configured_nodes_count = cluster.configured_nodes_count();
|
|
||||||
let connected_nodes_count = cluster.connected_nodes_count();
|
|
||||||
let encrypted_data = self.core.read_key_share(&id.id)?;
|
|
||||||
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
|
||||||
let computer = Arc::new(FastestResultKeyVersionsResultComputer::new(self.core.self_node_id.clone(), encrypted_data.as_ref(),
|
|
||||||
configured_nodes_count, configured_nodes_count));
|
|
||||||
let (session, oneshot) = KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams {
|
|
||||||
meta: ShareChangeSessionMeta {
|
|
||||||
id: id.id.clone(),
|
|
||||||
self_node_id: self.core.self_node_id.clone(),
|
|
||||||
master_node_id: master,
|
|
||||||
configured_nodes_count: configured_nodes_count,
|
|
||||||
connected_nodes_count: connected_nodes_count,
|
|
||||||
},
|
|
||||||
sub_session: id.access_key.clone(),
|
|
||||||
key_share: encrypted_data,
|
|
||||||
result_computer: computer,
|
|
||||||
transport: VersionNegotiationTransport {
|
|
||||||
cluster: cluster,
|
|
||||||
key_id: id.id,
|
|
||||||
sub_session: id.access_key.clone(),
|
|
||||||
nonce: nonce,
|
|
||||||
},
|
|
||||||
nonce: nonce,
|
|
||||||
});
|
|
||||||
Ok(WaitableSession::new(session, oneshot))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Administrative session creator.
|
|
||||||
pub struct AdminSessionCreator {
|
|
||||||
/// Creator core.
|
|
||||||
pub core: Arc<SessionCreatorCore>,
|
|
||||||
/// Administrator public.
|
|
||||||
pub admin_public: Option<Public>,
|
|
||||||
/// Servers set change sessions creator connector.
|
|
||||||
pub servers_set_change_session_creator_connector: Arc<dyn ServersSetChangeSessionCreatorConnector>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClusterSessionCreator<AdminSession> for AdminSessionCreator {
|
|
||||||
fn creation_data_from_message(message: &Message) -> Result<Option<AdminSessionCreationData>, Error> {
|
|
||||||
match *message {
|
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref message)) => match &message.message {
|
|
||||||
&ConsensusMessageWithServersSet::InitializeConsensusSession(ref message) => Ok(Some(AdminSessionCreationData::ServersSetChange(
|
|
||||||
message.migration_id.clone().map(Into::into),
|
|
||||||
message.new_nodes_set.clone().into_iter().map(Into::into).collect()
|
|
||||||
))),
|
|
||||||
_ => Err(Error::InvalidMessage),
|
|
||||||
},
|
|
||||||
Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(ref message)) => match &message.message {
|
|
||||||
&ConsensusMessageOfShareAdd::InitializeConsensusSession(ref message) => Ok(Some(AdminSessionCreationData::ShareAdd(message.version.clone().into()))),
|
|
||||||
_ => Err(Error::InvalidMessage),
|
|
||||||
},
|
|
||||||
_ => Err(Error::InvalidMessage),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message {
|
|
||||||
message::Message::ServersSetChange(message::ServersSetChangeMessage::ServersSetChangeError(message::ServersSetChangeError {
|
|
||||||
session: sid.into(),
|
|
||||||
session_nonce: nonce,
|
|
||||||
error: err.into(),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create(
|
|
||||||
&self,
|
|
||||||
cluster: Arc<dyn Cluster>,
|
|
||||||
master: NodeId,
|
|
||||||
nonce: Option<u64>,
|
|
||||||
id: SessionId,
|
|
||||||
creation_data: Option<AdminSessionCreationData>,
|
|
||||||
) -> Result<WaitableSession<AdminSession>, Error> {
|
|
||||||
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
|
||||||
match creation_data {
|
|
||||||
Some(AdminSessionCreationData::ShareAdd(version)) => {
|
|
||||||
let (session, oneshot) = ShareAddSessionImpl::new(ShareAddSessionParams {
|
|
||||||
meta: ShareChangeSessionMeta {
|
|
||||||
id: id.clone(),
|
|
||||||
self_node_id: self.core.self_node_id.clone(),
|
|
||||||
master_node_id: master,
|
|
||||||
configured_nodes_count: cluster.configured_nodes_count(),
|
|
||||||
connected_nodes_count: cluster.connected_nodes_count(),
|
|
||||||
},
|
|
||||||
transport: ShareAddTransport::new(id.clone(), Some(version), nonce, cluster),
|
|
||||||
key_storage: self.core.key_storage.clone(),
|
|
||||||
nonce: nonce,
|
|
||||||
admin_public: Some(self.admin_public.clone().ok_or(Error::AccessDenied)?),
|
|
||||||
})?;
|
|
||||||
Ok(WaitableSession::new(AdminSession::ShareAdd(session), oneshot))
|
|
||||||
},
|
|
||||||
Some(AdminSessionCreationData::ServersSetChange(migration_id, new_nodes_set)) => {
|
|
||||||
let admin_public = self.servers_set_change_session_creator_connector.admin_public(migration_id.as_ref(), new_nodes_set)
|
|
||||||
.map_err(|_| Error::AccessDenied)?;
|
|
||||||
|
|
||||||
let (session, oneshot) = ServersSetChangeSessionImpl::new(ServersSetChangeSessionParams {
|
|
||||||
meta: ShareChangeSessionMeta {
|
|
||||||
id: id.clone(),
|
|
||||||
self_node_id: self.core.self_node_id.clone(),
|
|
||||||
master_node_id: master,
|
|
||||||
configured_nodes_count: cluster.configured_nodes_count(),
|
|
||||||
connected_nodes_count: cluster.connected_nodes_count(),
|
|
||||||
},
|
|
||||||
cluster: cluster.clone(),
|
|
||||||
key_storage: self.core.key_storage.clone(),
|
|
||||||
nonce: nonce,
|
|
||||||
all_nodes_set: cluster.nodes(),
|
|
||||||
admin_public: admin_public,
|
|
||||||
migration_id: migration_id,
|
|
||||||
})?;
|
|
||||||
Ok(WaitableSession::new(AdminSession::ServersSetChange(session), oneshot))
|
|
||||||
},
|
|
||||||
None => unreachable!("expected to call with non-empty creation data; qed"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IntoSessionId<SessionId> for Message {
|
|
||||||
fn into_session_id(&self) -> Result<SessionId, Error> {
|
|
||||||
match *self {
|
|
||||||
Message::Generation(ref message) => Ok(message.session_id().clone()),
|
|
||||||
Message::Encryption(ref message) => Ok(message.session_id().clone()),
|
|
||||||
Message::Decryption(_) => Err(Error::InvalidMessage),
|
|
||||||
Message::SchnorrSigning(_) => Err(Error::InvalidMessage),
|
|
||||||
Message::EcdsaSigning(_) => Err(Error::InvalidMessage),
|
|
||||||
Message::ServersSetChange(ref message) => Ok(message.session_id().clone()),
|
|
||||||
Message::ShareAdd(ref message) => Ok(message.session_id().clone()),
|
|
||||||
Message::KeyVersionNegotiation(_) => Err(Error::InvalidMessage),
|
|
||||||
Message::Cluster(_) => Err(Error::InvalidMessage),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IntoSessionId<SessionIdWithSubSession> for Message {
|
|
||||||
fn into_session_id(&self) -> Result<SessionIdWithSubSession, Error> {
|
|
||||||
match *self {
|
|
||||||
Message::Generation(_) => Err(Error::InvalidMessage),
|
|
||||||
Message::Encryption(_) => Err(Error::InvalidMessage),
|
|
||||||
Message::Decryption(ref message) => Ok(SessionIdWithSubSession::new(message.session_id().clone(), message.sub_session_id().clone())),
|
|
||||||
Message::SchnorrSigning(ref message) => Ok(SessionIdWithSubSession::new(message.session_id().clone(), message.sub_session_id().clone())),
|
|
||||||
Message::EcdsaSigning(ref message) => Ok(SessionIdWithSubSession::new(message.session_id().clone(), message.sub_session_id().clone())),
|
|
||||||
Message::ServersSetChange(_) => Err(Error::InvalidMessage),
|
|
||||||
Message::ShareAdd(_) => Err(Error::InvalidMessage),
|
|
||||||
Message::KeyVersionNegotiation(ref message) => Ok(SessionIdWithSubSession::new(message.session_id().clone(), message.sub_session_id().clone())),
|
|
||||||
Message::Cluster(_) => Err(Error::InvalidMessage),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,392 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::{BTreeSet, BTreeMap};
|
|
||||||
use std::collections::btree_map::Entry;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use ethereum_types::H256;
|
|
||||||
use crypto::publickey::Public;
|
|
||||||
use key_server_cluster::{KeyServerSet, KeyServerSetSnapshot};
|
|
||||||
use key_server_cluster::cluster::{ClusterConfiguration, ServersSetChangeParams};
|
|
||||||
use key_server_cluster::cluster_sessions::AdminSession;
|
|
||||||
use key_server_cluster::cluster_connections::{Connection};
|
|
||||||
use key_server_cluster::cluster_connections_net::{NetConnectionsContainer};
|
|
||||||
use types::{Error, NodeId};
|
|
||||||
use blockchain::SigningKeyPair;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
|
||||||
/// Describes which maintain() call is required.
|
|
||||||
pub enum Maintain {
|
|
||||||
/// We need to maintain() both connections && session.
|
|
||||||
SessionAndConnections,
|
|
||||||
/// Only call maintain_session.
|
|
||||||
Session,
|
|
||||||
/// Only call maintain_connections.
|
|
||||||
Connections,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Connection trigger, which executes necessary actions when set of key servers changes.
|
|
||||||
pub trait ConnectionTrigger: Send + Sync {
|
|
||||||
/// On maintain interval.
|
|
||||||
fn on_maintain(&mut self) -> Option<Maintain>;
|
|
||||||
/// When connection is established.
|
|
||||||
fn on_connection_established(&mut self, node: &NodeId) -> Option<Maintain>;
|
|
||||||
/// When connection is closed.
|
|
||||||
fn on_connection_closed(&mut self, node: &NodeId) -> Option<Maintain>;
|
|
||||||
/// Maintain active sessions. Returns Some if servers set session creation required.
|
|
||||||
fn maintain_session(&mut self) -> Option<ServersSetChangeParams>;
|
|
||||||
/// Maintain active connections.
|
|
||||||
fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer);
|
|
||||||
/// Return connector for the servers set change session creator.
|
|
||||||
fn servers_set_change_creator_connector(&self) -> Arc<dyn ServersSetChangeSessionCreatorConnector>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Servers set change session creator connector.
|
|
||||||
pub trait ServersSetChangeSessionCreatorConnector: Send + Sync {
|
|
||||||
/// Get actual administrator public key. For manual-migration configuration it is the pre-configured
|
|
||||||
/// administrator key. For auto-migration configurations it is the key of actual MigrationSession master node.
|
|
||||||
fn admin_public(&self, migration_id: Option<&H256>, new_server_set: BTreeSet<NodeId>) -> Result<Public, Error>;
|
|
||||||
/// Set active servers set change session.
|
|
||||||
fn set_key_servers_set_change_session(&self, session: Arc<AdminSession>);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Simple connection trigger, which only keeps connections to current_set.
|
|
||||||
pub struct SimpleConnectionTrigger {
|
|
||||||
/// Key server set cluster.
|
|
||||||
key_server_set: Arc<dyn KeyServerSet>,
|
|
||||||
/// Trigger connections.
|
|
||||||
connections: TriggerConnections,
|
|
||||||
/// Servers set change session creator connector.
|
|
||||||
connector: Arc<dyn ServersSetChangeSessionCreatorConnector>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Simple Servers set change session creator connector, which will just return
|
|
||||||
/// pre-configured administartor public when asked.
|
|
||||||
pub struct SimpleServersSetChangeSessionCreatorConnector {
|
|
||||||
/// Secret store administrator public key.
|
|
||||||
pub admin_public: Option<Public>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
|
||||||
/// Action with trigger connections.
|
|
||||||
pub enum ConnectionsAction {
|
|
||||||
/// Connect to nodes from old set only.
|
|
||||||
ConnectToCurrentSet,
|
|
||||||
/// Connect to nodes from migration set.
|
|
||||||
ConnectToMigrationSet,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trigger connections.
|
|
||||||
pub struct TriggerConnections {
|
|
||||||
/// This node key pair.
|
|
||||||
pub self_key_pair: Arc<dyn SigningKeyPair>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SimpleConnectionTrigger {
|
|
||||||
/// Create new simple from cluster configuration.
|
|
||||||
pub fn with_config(config: &ClusterConfiguration) -> Self {
|
|
||||||
Self::new(config.key_server_set.clone(), config.self_key_pair.clone(), config.admin_public)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create new simple connection trigger.
|
|
||||||
pub fn new(key_server_set: Arc<dyn KeyServerSet>, self_key_pair: Arc<dyn SigningKeyPair>, admin_public: Option<Public>) -> Self {
|
|
||||||
SimpleConnectionTrigger {
|
|
||||||
key_server_set: key_server_set,
|
|
||||||
connections: TriggerConnections {
|
|
||||||
self_key_pair: self_key_pair,
|
|
||||||
},
|
|
||||||
connector: Arc::new(SimpleServersSetChangeSessionCreatorConnector {
|
|
||||||
admin_public: admin_public,
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConnectionTrigger for SimpleConnectionTrigger {
|
|
||||||
fn on_maintain(&mut self) -> Option<Maintain> {
|
|
||||||
Some(Maintain::Connections)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_connection_established(&mut self, _node: &NodeId) -> Option<Maintain> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_connection_closed(&mut self, _node: &NodeId) -> Option<Maintain> {
|
|
||||||
// we do not want to reconnect after every connection close
|
|
||||||
// because it could be a part of something bigger
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn maintain_session(&mut self) -> Option<ServersSetChangeParams> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer) {
|
|
||||||
self.connections.maintain(ConnectionsAction::ConnectToCurrentSet, connections, &self.key_server_set.snapshot())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn servers_set_change_creator_connector(&self) -> Arc<dyn ServersSetChangeSessionCreatorConnector> {
|
|
||||||
self.connector.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServersSetChangeSessionCreatorConnector for SimpleServersSetChangeSessionCreatorConnector {
|
|
||||||
fn admin_public(&self, _migration_id: Option<&H256>, _new_server_set: BTreeSet<NodeId>) -> Result<Public, Error> {
|
|
||||||
self.admin_public.clone().ok_or(Error::AccessDenied)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_key_servers_set_change_session(&self, _session: Arc<AdminSession>) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TriggerConnections {
|
|
||||||
pub fn maintain(&self, action: ConnectionsAction, data: &mut NetConnectionsContainer, server_set: &KeyServerSetSnapshot) {
|
|
||||||
match action {
|
|
||||||
ConnectionsAction::ConnectToCurrentSet => {
|
|
||||||
adjust_connections(self.self_key_pair.public(), data, &server_set.current_set);
|
|
||||||
},
|
|
||||||
ConnectionsAction::ConnectToMigrationSet => {
|
|
||||||
let migration_set = server_set.migration.as_ref().map(|s| s.set.clone()).unwrap_or_default();
|
|
||||||
adjust_connections(self.self_key_pair.public(), data, &migration_set);
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn adjust_connections(
|
|
||||||
self_node_id: &NodeId,
|
|
||||||
data: &mut NetConnectionsContainer,
|
|
||||||
required_set: &BTreeMap<NodeId, SocketAddr>
|
|
||||||
) {
|
|
||||||
if !required_set.contains_key(self_node_id) {
|
|
||||||
if !data.is_isolated {
|
|
||||||
trace!(target: "secretstore_net", "{}: isolated from cluser", self_node_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
data.is_isolated = true;
|
|
||||||
data.connections.clear();
|
|
||||||
data.nodes.clear();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
data.is_isolated = false;
|
|
||||||
for node_to_disconnect in select_nodes_to_disconnect(&data.nodes, required_set) {
|
|
||||||
if let Entry::Occupied(entry) = data.connections.entry(node_to_disconnect.clone()) {
|
|
||||||
trace!(target: "secretstore_net", "{}: adjusting connections - removing connection to {} at {}",
|
|
||||||
self_node_id, entry.get().node_id(), entry.get().node_address());
|
|
||||||
entry.remove();
|
|
||||||
}
|
|
||||||
|
|
||||||
data.nodes.remove(&node_to_disconnect);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (node_to_connect, node_addr) in required_set {
|
|
||||||
if node_to_connect != self_node_id {
|
|
||||||
data.nodes.insert(node_to_connect.clone(), node_addr.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn select_nodes_to_disconnect(current_set: &BTreeMap<NodeId, SocketAddr>, new_set: &BTreeMap<NodeId, SocketAddr>) -> Vec<NodeId> {
|
|
||||||
current_set.iter()
|
|
||||||
.filter(|&(node_id, node_addr)| match new_set.get(node_id) {
|
|
||||||
Some(new_node_addr) => node_addr != new_node_addr,
|
|
||||||
None => true,
|
|
||||||
})
|
|
||||||
.map(|(node_id, _)| node_id.clone())
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use crypto::publickey::{Random, Generator};
|
|
||||||
use key_server_cluster::{MapKeyServerSet, PlainNodeKeyPair, KeyServerSetSnapshot, KeyServerSetMigration};
|
|
||||||
use key_server_cluster::cluster_connections_net::NetConnectionsContainer;
|
|
||||||
use super::{Maintain, TriggerConnections, ConnectionsAction, ConnectionTrigger, SimpleConnectionTrigger,
|
|
||||||
select_nodes_to_disconnect, adjust_connections};
|
|
||||||
|
|
||||||
fn default_connection_data() -> NetConnectionsContainer {
|
|
||||||
NetConnectionsContainer {
|
|
||||||
is_isolated: false,
|
|
||||||
nodes: Default::default(),
|
|
||||||
connections: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_connections() -> TriggerConnections {
|
|
||||||
TriggerConnections {
|
|
||||||
self_key_pair: Arc::new(PlainNodeKeyPair::new(Random.generate())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn do_not_disconnect_if_set_is_not_changed() {
|
|
||||||
let node_id = Random.generate().public().clone();
|
|
||||||
assert_eq!(select_nodes_to_disconnect(
|
|
||||||
&vec![(node_id, "127.0.0.1:8081".parse().unwrap())].into_iter().collect(),
|
|
||||||
&vec![(node_id, "127.0.0.1:8081".parse().unwrap())].into_iter().collect()),
|
|
||||||
vec![]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn disconnect_if_address_has_changed() {
|
|
||||||
let node_id = Random.generate().public().clone();
|
|
||||||
assert_eq!(select_nodes_to_disconnect(
|
|
||||||
&vec![(node_id.clone(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(),
|
|
||||||
&vec![(node_id.clone(), "127.0.0.1:8082".parse().unwrap())].into_iter().collect()),
|
|
||||||
vec![node_id.clone()]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn disconnect_if_node_has_removed() {
|
|
||||||
let node_id = Random.generate().public().clone();
|
|
||||||
assert_eq!(select_nodes_to_disconnect(
|
|
||||||
&vec![(node_id.clone(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(),
|
|
||||||
&vec![].into_iter().collect()),
|
|
||||||
vec![node_id.clone()]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn does_not_disconnect_if_node_has_added() {
|
|
||||||
let node_id = Random.generate().public().clone();
|
|
||||||
assert_eq!(select_nodes_to_disconnect(
|
|
||||||
&vec![(node_id.clone(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(),
|
|
||||||
&vec![(node_id.clone(), "127.0.0.1:8081".parse().unwrap()),
|
|
||||||
(Random.generate().public().clone(), "127.0.0.1:8082".parse().unwrap())]
|
|
||||||
.into_iter().collect()),
|
|
||||||
vec![]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn adjust_connections_disconnects_from_all_nodes_if_not_a_part_of_key_server() {
|
|
||||||
let self_node_id = Random.generate().public().clone();
|
|
||||||
let other_node_id = Random.generate().public().clone();
|
|
||||||
let mut connection_data = default_connection_data();
|
|
||||||
connection_data.nodes.insert(other_node_id.clone(), "127.0.0.1:8081".parse().unwrap());
|
|
||||||
|
|
||||||
let required_set = connection_data.nodes.clone();
|
|
||||||
adjust_connections(&self_node_id, &mut connection_data, &required_set);
|
|
||||||
assert!(connection_data.nodes.is_empty());
|
|
||||||
assert!(connection_data.is_isolated);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn adjust_connections_connects_to_new_nodes() {
|
|
||||||
let self_node_id = Random.generate().public().clone();
|
|
||||||
let other_node_id = Random.generate().public().clone();
|
|
||||||
let mut connection_data = default_connection_data();
|
|
||||||
|
|
||||||
let required_set = vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap()),
|
|
||||||
(other_node_id.clone(), "127.0.0.1:8082".parse().unwrap())].into_iter().collect();
|
|
||||||
adjust_connections(&self_node_id, &mut connection_data, &required_set);
|
|
||||||
assert!(connection_data.nodes.contains_key(&other_node_id));
|
|
||||||
assert!(!connection_data.is_isolated);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn adjust_connections_reconnects_from_changed_nodes() {
|
|
||||||
let self_node_id = Random.generate().public().clone();
|
|
||||||
let other_node_id = Random.generate().public().clone();
|
|
||||||
let mut connection_data = default_connection_data();
|
|
||||||
connection_data.nodes.insert(other_node_id.clone(), "127.0.0.1:8082".parse().unwrap());
|
|
||||||
|
|
||||||
let required_set = vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap()),
|
|
||||||
(other_node_id.clone(), "127.0.0.1:8083".parse().unwrap())].into_iter().collect();
|
|
||||||
adjust_connections(&self_node_id, &mut connection_data, &required_set);
|
|
||||||
assert_eq!(connection_data.nodes.get(&other_node_id), Some(&"127.0.0.1:8083".parse().unwrap()));
|
|
||||||
assert!(!connection_data.is_isolated);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn adjust_connections_disconnects_from_removed_nodes() {
|
|
||||||
let self_node_id = Random.generate().public().clone();
|
|
||||||
let other_node_id = Random.generate().public().clone();
|
|
||||||
let mut connection_data = default_connection_data();
|
|
||||||
connection_data.nodes.insert(other_node_id.clone(), "127.0.0.1:8082".parse().unwrap());
|
|
||||||
|
|
||||||
let required_set = vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect();
|
|
||||||
adjust_connections(&self_node_id, &mut connection_data, &required_set);
|
|
||||||
assert!(connection_data.nodes.is_empty());
|
|
||||||
assert!(!connection_data.is_isolated);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn adjust_connections_does_not_connects_to_self() {
|
|
||||||
let self_node_id = Random.generate().public().clone();
|
|
||||||
let mut connection_data = default_connection_data();
|
|
||||||
|
|
||||||
let required_set = vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap())].into_iter().collect();
|
|
||||||
adjust_connections(&self_node_id, &mut connection_data, &required_set);
|
|
||||||
assert!(connection_data.nodes.is_empty());
|
|
||||||
assert!(!connection_data.is_isolated);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_connects_to_current_set_works() {
|
|
||||||
let connections = create_connections();
|
|
||||||
let self_node_id = connections.self_key_pair.public().clone();
|
|
||||||
let current_node_id = Random.generate().public().clone();
|
|
||||||
let migration_node_id = Random.generate().public().clone();
|
|
||||||
let new_node_id = Random.generate().public().clone();
|
|
||||||
|
|
||||||
let mut connections_data = default_connection_data();
|
|
||||||
connections.maintain(ConnectionsAction::ConnectToCurrentSet, &mut connections_data, &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap()),
|
|
||||||
(current_node_id.clone(), "127.0.0.1:8082".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: vec![(new_node_id.clone(), "127.0.0.1:8083".parse().unwrap())].into_iter().collect(),
|
|
||||||
migration: Some(KeyServerSetMigration {
|
|
||||||
set: vec![(migration_node_id.clone(), "127.0.0.1:8084".parse().unwrap())].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
assert_eq!(vec![current_node_id], connections_data.nodes.keys().cloned().collect::<Vec<_>>());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_connects_to_migration_set_works() {
|
|
||||||
let connections = create_connections();
|
|
||||||
let self_node_id = connections.self_key_pair.public().clone();
|
|
||||||
let current_node_id = Random.generate().public().clone();
|
|
||||||
let migration_node_id = Random.generate().public().clone();
|
|
||||||
let new_node_id = Random.generate().public().clone();
|
|
||||||
|
|
||||||
let mut connections_data = default_connection_data();
|
|
||||||
connections.maintain(ConnectionsAction::ConnectToMigrationSet, &mut connections_data, &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(current_node_id.clone(), "127.0.0.1:8082".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: vec![(new_node_id.clone(), "127.0.0.1:8083".parse().unwrap())].into_iter().collect(),
|
|
||||||
migration: Some(KeyServerSetMigration {
|
|
||||||
set: vec![(self_node_id.clone(), "127.0.0.1:8081".parse().unwrap()),
|
|
||||||
(migration_node_id.clone(), "127.0.0.1:8084".parse().unwrap())].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
assert_eq!(vec![migration_node_id].into_iter().collect::<BTreeSet<_>>(),
|
|
||||||
connections_data.nodes.keys().cloned().collect::<BTreeSet<_>>());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn simple_connections_trigger_only_maintains_connections() {
|
|
||||||
let key_server_set = Arc::new(MapKeyServerSet::new(false, Default::default()));
|
|
||||||
let self_key_pair = Arc::new(PlainNodeKeyPair::new(Random.generate()));
|
|
||||||
let mut trigger = SimpleConnectionTrigger::new(key_server_set, self_key_pair, None);
|
|
||||||
assert_eq!(trigger.on_maintain(), Some(Maintain::Connections));
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,759 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::{BTreeSet, BTreeMap};
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use ethereum_types::H256;
|
|
||||||
use crypto::publickey::Public;
|
|
||||||
use parking_lot::Mutex;
|
|
||||||
use key_server_cluster::{KeyServerSet, KeyServerSetSnapshot, KeyServerSetMigration, is_migration_required};
|
|
||||||
use key_server_cluster::cluster::{ClusterConfiguration, ServersSetChangeParams};
|
|
||||||
use key_server_cluster::cluster_connections_net::NetConnectionsContainer;
|
|
||||||
use key_server_cluster::cluster_sessions::{AdminSession, ClusterSession};
|
|
||||||
use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash;
|
|
||||||
use key_server_cluster::connection_trigger::{Maintain, ConnectionsAction, ConnectionTrigger,
|
|
||||||
ServersSetChangeSessionCreatorConnector, TriggerConnections};
|
|
||||||
use types::{Error, NodeId};
|
|
||||||
use blockchain::SigningKeyPair;
|
|
||||||
|
|
||||||
/// Key servers set change trigger with automated migration procedure.
|
|
||||||
pub struct ConnectionTriggerWithMigration {
|
|
||||||
/// This node key pair.
|
|
||||||
self_key_pair: Arc<dyn SigningKeyPair>,
|
|
||||||
/// Key server set.
|
|
||||||
key_server_set: Arc<dyn KeyServerSet>,
|
|
||||||
/// Last server set state.
|
|
||||||
snapshot: KeyServerSetSnapshot,
|
|
||||||
/// Required connections action.
|
|
||||||
connections_action: Option<ConnectionsAction>,
|
|
||||||
/// Required session action.
|
|
||||||
session_action: Option<SessionAction>,
|
|
||||||
/// Currenty connected nodes.
|
|
||||||
connected: BTreeSet<NodeId>,
|
|
||||||
/// Trigger migration connections.
|
|
||||||
connections: TriggerConnections,
|
|
||||||
/// Trigger migration session.
|
|
||||||
session: TriggerSession,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
/// Key servers set change session creator connector with migration support.
|
|
||||||
pub struct ServersSetChangeSessionCreatorConnectorWithMigration {
|
|
||||||
/// This node id.
|
|
||||||
self_node_id: NodeId,
|
|
||||||
/// Active migration state to check when servers set change session is started.
|
|
||||||
migration: Mutex<Option<KeyServerSetMigration>>,
|
|
||||||
/// Active servers set change session.
|
|
||||||
session: Mutex<Option<Arc<AdminSession>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
|
||||||
/// Migration session action.
|
|
||||||
enum SessionAction {
|
|
||||||
/// Start migration (confirm migration transaction).
|
|
||||||
StartMigration(H256),
|
|
||||||
/// Start migration session.
|
|
||||||
Start,
|
|
||||||
/// Confirm migration and forget migration session.
|
|
||||||
ConfirmAndDrop(H256),
|
|
||||||
/// Forget migration session.
|
|
||||||
Drop,
|
|
||||||
/// Forget migration session and retry.
|
|
||||||
DropAndRetry,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
|
||||||
/// Migration session state.
|
|
||||||
enum SessionState {
|
|
||||||
/// No active session.
|
|
||||||
Idle,
|
|
||||||
/// Session is running with given migration id.
|
|
||||||
Active(Option<H256>),
|
|
||||||
/// Session is completed successfully.
|
|
||||||
Finished(Option<H256>),
|
|
||||||
/// Session is completed with an error.
|
|
||||||
Failed(Option<H256>),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
|
||||||
/// Migration state.
|
|
||||||
pub enum MigrationState {
|
|
||||||
/// No migration required.
|
|
||||||
Idle,
|
|
||||||
/// Migration is required.
|
|
||||||
Required,
|
|
||||||
/// Migration has started.
|
|
||||||
Started,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Migration session.
|
|
||||||
struct TriggerSession {
|
|
||||||
/// Servers set change session creator connector.
|
|
||||||
connector: Arc<ServersSetChangeSessionCreatorConnectorWithMigration>,
|
|
||||||
/// This node key pair.
|
|
||||||
self_key_pair: Arc<dyn SigningKeyPair>,
|
|
||||||
/// Key server set.
|
|
||||||
key_server_set: Arc<dyn KeyServerSet>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConnectionTriggerWithMigration {
|
|
||||||
/// Create new simple from cluster configuration.
|
|
||||||
pub fn with_config(config: &ClusterConfiguration) -> Self {
|
|
||||||
Self::new(config.key_server_set.clone(), config.self_key_pair.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create new trigge with migration.
|
|
||||||
pub fn new(key_server_set: Arc<dyn KeyServerSet>, self_key_pair: Arc<dyn SigningKeyPair>) -> Self {
|
|
||||||
let snapshot = key_server_set.snapshot();
|
|
||||||
let migration = snapshot.migration.clone();
|
|
||||||
|
|
||||||
ConnectionTriggerWithMigration {
|
|
||||||
self_key_pair: self_key_pair.clone(),
|
|
||||||
key_server_set: key_server_set.clone(),
|
|
||||||
snapshot: snapshot,
|
|
||||||
connected: BTreeSet::new(),
|
|
||||||
connections: TriggerConnections {
|
|
||||||
self_key_pair: self_key_pair.clone(),
|
|
||||||
},
|
|
||||||
session: TriggerSession {
|
|
||||||
connector: Arc::new(ServersSetChangeSessionCreatorConnectorWithMigration {
|
|
||||||
self_node_id: self_key_pair.public().clone(),
|
|
||||||
migration: Mutex::new(migration),
|
|
||||||
session: Mutex::new(None),
|
|
||||||
}),
|
|
||||||
self_key_pair: self_key_pair,
|
|
||||||
key_server_set: key_server_set,
|
|
||||||
},
|
|
||||||
connections_action: None,
|
|
||||||
session_action: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Actually do mainteinance.
|
|
||||||
fn do_maintain(&mut self) -> Option<Maintain> {
|
|
||||||
loop {
|
|
||||||
let session_state = session_state(self.session.connector.session.lock().clone());
|
|
||||||
let migration_state = migration_state(self.self_key_pair.public(), &self.snapshot);
|
|
||||||
|
|
||||||
let session_action = maintain_session(self.self_key_pair.public(), &self.connected, &self.snapshot, migration_state, session_state);
|
|
||||||
let session_maintain_required = session_action.map(|session_action|
|
|
||||||
self.session.process(session_action)).unwrap_or_default();
|
|
||||||
self.session_action = session_action;
|
|
||||||
|
|
||||||
let connections_action = maintain_connections(migration_state, session_state);
|
|
||||||
let connections_maintain_required = connections_action.map(|_| true).unwrap_or_default();
|
|
||||||
self.connections_action = connections_action;
|
|
||||||
|
|
||||||
if session_state != SessionState::Idle || migration_state != MigrationState::Idle {
|
|
||||||
trace!(target: "secretstore_net", "{}: non-idle auto-migration state: {:?} -> {:?}",
|
|
||||||
self.self_key_pair.public(), (migration_state, session_state), (self.connections_action, self.session_action));
|
|
||||||
}
|
|
||||||
|
|
||||||
if session_action != Some(SessionAction::DropAndRetry) {
|
|
||||||
return match (session_maintain_required, connections_maintain_required) {
|
|
||||||
(true, true) => Some(Maintain::SessionAndConnections),
|
|
||||||
(true, false) => Some(Maintain::Session),
|
|
||||||
(false, true) => Some(Maintain::Connections),
|
|
||||||
(false, false) => None,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConnectionTrigger for ConnectionTriggerWithMigration {
|
|
||||||
fn on_maintain(&mut self) -> Option<Maintain> {
|
|
||||||
self.snapshot = self.key_server_set.snapshot();
|
|
||||||
*self.session.connector.migration.lock() = self.snapshot.migration.clone();
|
|
||||||
|
|
||||||
self.do_maintain()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_connection_established(&mut self, node: &NodeId) -> Option<Maintain> {
|
|
||||||
self.connected.insert(node.clone());
|
|
||||||
self.do_maintain()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_connection_closed(&mut self, node: &NodeId) -> Option<Maintain> {
|
|
||||||
self.connected.remove(node);
|
|
||||||
self.do_maintain()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn maintain_session(&mut self) -> Option<ServersSetChangeParams> {
|
|
||||||
self.session_action.and_then(|action| self.session.maintain(action, &self.snapshot))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn maintain_connections(&mut self, connections: &mut NetConnectionsContainer) {
|
|
||||||
if let Some(action) = self.connections_action {
|
|
||||||
self.connections.maintain(action, connections, &self.snapshot);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn servers_set_change_creator_connector(&self) -> Arc<dyn ServersSetChangeSessionCreatorConnector> {
|
|
||||||
self.session.connector.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServersSetChangeSessionCreatorConnector for ServersSetChangeSessionCreatorConnectorWithMigration {
|
|
||||||
fn admin_public(&self, migration_id: Option<&H256>, new_server_set: BTreeSet<NodeId>) -> Result<Public, Error> {
|
|
||||||
// the idea is that all nodes are agreed upon a block number and a new set of nodes in this block
|
|
||||||
// then master node is selected of all nodes set && this master signs the old set && new set
|
|
||||||
// (signatures are inputs to ServerSetChangeSession)
|
|
||||||
self.migration.lock().as_ref()
|
|
||||||
.map(|migration| {
|
|
||||||
let is_migration_id_same = migration_id.map(|mid| mid == &migration.id).unwrap_or_default();
|
|
||||||
let is_migration_set_same = new_server_set == migration.set.keys().cloned().collect();
|
|
||||||
if is_migration_id_same && is_migration_set_same {
|
|
||||||
Ok(migration.master.clone())
|
|
||||||
} else {
|
|
||||||
warn!(target: "secretstore_net", "{}: failed to accept auto-migration session: same_migration_id={}, same_migration_set={}",
|
|
||||||
self.self_node_id, is_migration_id_same, is_migration_set_same);
|
|
||||||
|
|
||||||
Err(Error::AccessDenied)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.unwrap_or_else(|| {
|
|
||||||
warn!(target: "secretstore_net", "{}: failed to accept non-scheduled auto-migration session", self.self_node_id);
|
|
||||||
Err(Error::AccessDenied)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_key_servers_set_change_session(&self, session: Arc<AdminSession>) {
|
|
||||||
*self.session.lock() = Some(session);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TriggerSession {
|
|
||||||
/// Process session action.
|
|
||||||
pub fn process(&mut self, action: SessionAction) -> bool {
|
|
||||||
match action {
|
|
||||||
SessionAction::ConfirmAndDrop(migration_id) => {
|
|
||||||
*self.connector.session.lock() = None;
|
|
||||||
self.key_server_set.confirm_migration(migration_id);
|
|
||||||
false
|
|
||||||
},
|
|
||||||
SessionAction::Drop | SessionAction::DropAndRetry => {
|
|
||||||
*self.connector.session.lock() = None;
|
|
||||||
false
|
|
||||||
},
|
|
||||||
SessionAction::StartMigration(migration_id) => {
|
|
||||||
self.key_server_set.start_migration(migration_id);
|
|
||||||
false
|
|
||||||
},
|
|
||||||
SessionAction::Start => true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Maintain session.
|
|
||||||
pub fn maintain(
|
|
||||||
&mut self,
|
|
||||||
action: SessionAction,
|
|
||||||
server_set: &KeyServerSetSnapshot
|
|
||||||
) -> Option<ServersSetChangeParams> {
|
|
||||||
if action != SessionAction::Start { // all other actions are processed in maintain
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
let migration = server_set.migration.as_ref()
|
|
||||||
.expect("action is Start only when migration is started (see maintain_session); qed");
|
|
||||||
|
|
||||||
// we assume that authorities that are removed from the servers set are either offline, or malicious
|
|
||||||
// => they're not involved in ServersSetChangeSession
|
|
||||||
// => both sets are the same
|
|
||||||
let old_set: BTreeSet<_> = migration.set.keys().cloned().collect();
|
|
||||||
let new_set = old_set.clone();
|
|
||||||
|
|
||||||
let signatures = self.self_key_pair.sign(&ordered_nodes_hash(&old_set))
|
|
||||||
.and_then(|old_set_signature| self.self_key_pair.sign(&ordered_nodes_hash(&new_set))
|
|
||||||
.map(|new_set_signature| (old_set_signature, new_set_signature)));
|
|
||||||
|
|
||||||
match signatures {
|
|
||||||
Ok((old_set_signature, new_set_signature)) => Some(ServersSetChangeParams {
|
|
||||||
session_id: None,
|
|
||||||
migration_id: Some(migration.id),
|
|
||||||
new_nodes_set: new_set,
|
|
||||||
old_set_signature,
|
|
||||||
new_set_signature,
|
|
||||||
}),
|
|
||||||
Err(err) => {
|
|
||||||
trace!(
|
|
||||||
target: "secretstore_net",
|
|
||||||
"{}: failed to sign servers set for auto-migrate session with: {}",
|
|
||||||
self.self_key_pair.public(), err);
|
|
||||||
None
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn migration_state(self_node_id: &NodeId, snapshot: &KeyServerSetSnapshot) -> MigrationState {
|
|
||||||
// if this node is not on current && old set => we do not participate in migration
|
|
||||||
if !snapshot.current_set.contains_key(self_node_id) &&
|
|
||||||
!snapshot.migration.as_ref().map(|s| s.set.contains_key(self_node_id)).unwrap_or_default() {
|
|
||||||
return MigrationState::Idle;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if migration has already started no other states possible
|
|
||||||
if snapshot.migration.is_some() {
|
|
||||||
return MigrationState::Started;
|
|
||||||
}
|
|
||||||
|
|
||||||
// we only require migration if set actually changes
|
|
||||||
// when only address changes, we could simply adjust connections
|
|
||||||
if !is_migration_required(&snapshot.current_set, &snapshot.new_set) {
|
|
||||||
return MigrationState::Idle;
|
|
||||||
}
|
|
||||||
|
|
||||||
return MigrationState::Required;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn session_state(session: Option<Arc<AdminSession>>) -> SessionState {
|
|
||||||
session
|
|
||||||
.and_then(|s| match s.as_servers_set_change() {
|
|
||||||
Some(s) if !s.is_finished() => Some(SessionState::Active(s.migration_id().cloned())),
|
|
||||||
Some(s) => match s.result() {
|
|
||||||
Some(Ok(_)) => Some(SessionState::Finished(s.migration_id().cloned())),
|
|
||||||
Some(Err(_)) => Some(SessionState::Failed(s.migration_id().cloned())),
|
|
||||||
None => unreachable!("s.is_finished() == true; when session is finished, result is available; qed"),
|
|
||||||
},
|
|
||||||
None => None,
|
|
||||||
})
|
|
||||||
.unwrap_or(SessionState::Idle)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn maintain_session(self_node_id: &NodeId, connected: &BTreeSet<NodeId>, snapshot: &KeyServerSetSnapshot, migration_state: MigrationState, session_state: SessionState) -> Option<SessionAction> {
|
|
||||||
let migration_data_proof = "migration_state is Started; migration data available when started; qed";
|
|
||||||
|
|
||||||
match (migration_state, session_state) {
|
|
||||||
// === NORMAL combinations ===
|
|
||||||
|
|
||||||
// having no session when it is not required => ok
|
|
||||||
(MigrationState::Idle, SessionState::Idle) => None,
|
|
||||||
// migration is required && no active session => start migration
|
|
||||||
(MigrationState::Required, SessionState::Idle) => {
|
|
||||||
match select_master_node(snapshot) == self_node_id {
|
|
||||||
true => Some(SessionAction::StartMigration(H256::random())),
|
|
||||||
// we are not on master node
|
|
||||||
false => None,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
// migration is active && there's no active session => start it
|
|
||||||
(MigrationState::Started, SessionState::Idle) => {
|
|
||||||
match is_connected_to_all_nodes(self_node_id, &snapshot.migration.as_ref().expect(migration_data_proof).set, connected) &&
|
|
||||||
select_master_node(snapshot) == self_node_id {
|
|
||||||
true => Some(SessionAction::Start),
|
|
||||||
// we are not connected to all required nodes yet or we are not on master node => wait for it
|
|
||||||
false => None,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
// migration is active && session is not yet started/finished => ok
|
|
||||||
(MigrationState::Started, SessionState::Active(ref session_migration_id))
|
|
||||||
if snapshot.migration.as_ref().expect(migration_data_proof).id == session_migration_id.unwrap_or_default() =>
|
|
||||||
None,
|
|
||||||
// migration has finished => confirm migration
|
|
||||||
(MigrationState::Started, SessionState::Finished(ref session_migration_id))
|
|
||||||
if snapshot.migration.as_ref().expect(migration_data_proof).id == session_migration_id.unwrap_or_default() =>
|
|
||||||
match snapshot.migration.as_ref().expect(migration_data_proof).set.contains_key(self_node_id) {
|
|
||||||
true => Some(SessionAction::ConfirmAndDrop(
|
|
||||||
snapshot.migration.as_ref().expect(migration_data_proof).id.clone()
|
|
||||||
)),
|
|
||||||
// we are not on migration set => we do not need to confirm
|
|
||||||
false => Some(SessionAction::Drop),
|
|
||||||
},
|
|
||||||
// migration has failed => it should be dropped && restarted later
|
|
||||||
(MigrationState::Started, SessionState::Failed(ref session_migration_id))
|
|
||||||
if snapshot.migration.as_ref().expect(migration_data_proof).id == session_migration_id.unwrap_or_default() =>
|
|
||||||
Some(SessionAction::Drop),
|
|
||||||
|
|
||||||
// ABNORMAL combinations, which are still possible when contract misbehaves ===
|
|
||||||
|
|
||||||
// having active session when it is not required => drop it && wait for other tasks
|
|
||||||
(MigrationState::Idle, SessionState::Active(_)) |
|
|
||||||
// no migration required && there's finished session => drop it && wait for other tasks
|
|
||||||
(MigrationState::Idle, SessionState::Finished(_)) |
|
|
||||||
// no migration required && there's failed session => drop it && wait for other tasks
|
|
||||||
(MigrationState::Idle, SessionState::Failed(_)) |
|
|
||||||
// migration is required && session is active => drop it && wait for other tasks
|
|
||||||
(MigrationState::Required, SessionState::Active(_)) |
|
|
||||||
// migration is required && session has failed => we need to forget this obsolete session and retry
|
|
||||||
(MigrationState::Required, SessionState::Finished(_)) |
|
|
||||||
// session for other migration is active => we need to forget this obsolete session and retry
|
|
||||||
// (the case for same id is checked above)
|
|
||||||
(MigrationState::Started, SessionState::Active(_)) |
|
|
||||||
// session for other migration has finished => we need to forget this obsolete session and retry
|
|
||||||
// (the case for same id is checked above)
|
|
||||||
(MigrationState::Started, SessionState::Finished(_)) |
|
|
||||||
// session for other migration has failed => we need to forget this obsolete session and retry
|
|
||||||
// (the case for same id is checked above)
|
|
||||||
(MigrationState::Started, SessionState::Failed(_)) |
|
|
||||||
// migration is required && session has failed => we need to forget this obsolete session and retry
|
|
||||||
(MigrationState::Required, SessionState::Failed(_)) => {
|
|
||||||
// some of the cases above could happen because of lags (could actually be a non-abnormal behavior)
|
|
||||||
// => we ony trace here
|
|
||||||
trace!(target: "secretstore_net", "{}: suspicious auto-migration state: {:?}",
|
|
||||||
self_node_id, (migration_state, session_state));
|
|
||||||
Some(SessionAction::DropAndRetry)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn maintain_connections(migration_state: MigrationState, session_state: SessionState) -> Option<ConnectionsAction> {
|
|
||||||
match (migration_state, session_state) {
|
|
||||||
// session is active => we do not alter connections when session is active
|
|
||||||
(_, SessionState::Active(_)) => None,
|
|
||||||
// when no migration required => we just keep us connected to old nodes set
|
|
||||||
(MigrationState::Idle, _) => Some(ConnectionsAction::ConnectToCurrentSet),
|
|
||||||
// when migration is either scheduled, or in progress => connect to both old and migration set.
|
|
||||||
// this could lead to situation when node is not 'officially' a part of KeyServer (i.e. it is not in current_set)
|
|
||||||
// but it participates in new key generation session
|
|
||||||
// it is ok, since 'officialy' here means that this node is a owner of all old shares
|
|
||||||
(MigrationState::Required, _) |
|
|
||||||
(MigrationState::Started, _) => Some(ConnectionsAction::ConnectToMigrationSet),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_connected_to_all_nodes(self_node_id: &NodeId, nodes: &BTreeMap<NodeId, SocketAddr>, connected: &BTreeSet<NodeId>) -> bool {
|
|
||||||
nodes.keys()
|
|
||||||
.filter(|n| *n != self_node_id)
|
|
||||||
.all(|n| connected.contains(n))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn select_master_node(snapshot: &KeyServerSetSnapshot) -> &NodeId {
|
|
||||||
// we want to minimize a number of UnknownSession messages =>
|
|
||||||
// try to select a node which was in SS && will be in SS
|
|
||||||
match snapshot.migration.as_ref() {
|
|
||||||
Some(migration) => &migration.master,
|
|
||||||
None => snapshot.current_set.keys()
|
|
||||||
.filter(|n| snapshot.new_set.contains_key(n))
|
|
||||||
.nth(0)
|
|
||||||
.or_else(|| snapshot.new_set.keys().nth(0))
|
|
||||||
.unwrap_or_else(|| snapshot.current_set.keys().nth(0)
|
|
||||||
.expect("select_master_node is only called when migration is Required or Started;\
|
|
||||||
when Started: migration.is_some() && we return migration.master; qed;\
|
|
||||||
when Required: current_set != new_set; this means that at least one set is non-empty; we try to take node from each set; qed"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use key_server_cluster::{KeyServerSetSnapshot, KeyServerSetMigration};
|
|
||||||
use key_server_cluster::connection_trigger::ConnectionsAction;
|
|
||||||
use super::{MigrationState, SessionState, SessionAction, migration_state, maintain_session,
|
|
||||||
maintain_connections, select_master_node};
|
|
||||||
use ethereum_types::{H256, H512};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn migration_state_is_idle_when_required_but_this_node_is_not_on_the_list() {
|
|
||||||
assert_eq!(migration_state(&H512::from_low_u64_be(1), &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(2), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(3), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(),
|
|
||||||
migration: None,
|
|
||||||
}), MigrationState::Idle);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn migration_state_is_idle_when_sets_are_equal() {
|
|
||||||
assert_eq!(migration_state(&H512::from_low_u64_be(1), &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(),
|
|
||||||
migration: None,
|
|
||||||
}), MigrationState::Idle);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn migration_state_is_idle_when_only_address_changes() {
|
|
||||||
assert_eq!(migration_state(&H512::from_low_u64_be(1), &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8080".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(),
|
|
||||||
migration: None,
|
|
||||||
}), MigrationState::Idle);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn migration_state_is_required_when_node_is_added() {
|
|
||||||
assert_eq!(migration_state(&H512::from_low_u64_be(1), &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8080".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8080".parse().unwrap()),
|
|
||||||
(H512::from_low_u64_be(2), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(),
|
|
||||||
migration: None,
|
|
||||||
}), MigrationState::Required);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn migration_state_is_required_when_node_is_removed() {
|
|
||||||
assert_eq!(migration_state(&H512::from_low_u64_be(1), &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8080".parse().unwrap()),
|
|
||||||
(H512::from_low_u64_be(2), "127.0.0.1:8081".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8080".parse().unwrap())].into_iter().collect(),
|
|
||||||
migration: None,
|
|
||||||
}), MigrationState::Required);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn migration_state_is_started_when_migration_is_some() {
|
|
||||||
assert_eq!(migration_state(&H512::from_low_u64_be(1), &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8080".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: Default::default(),
|
|
||||||
migration: Some(KeyServerSetMigration {
|
|
||||||
id: Default::default(),
|
|
||||||
set: Default::default(),
|
|
||||||
master: Default::default(),
|
|
||||||
is_confirmed: Default::default(),
|
|
||||||
}),
|
|
||||||
}), MigrationState::Started);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn existing_master_is_selected_when_migration_has_started() {
|
|
||||||
assert_eq!(select_master_node(&KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8180".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(2), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
migration: Some(KeyServerSetMigration {
|
|
||||||
master: H512::from_low_u64_be(3),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
}), &H512::from_low_u64_be(3));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn persistent_master_is_selected_when_migration_has_not_started_yet() {
|
|
||||||
assert_eq!(select_master_node(&KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8180".parse().unwrap()),
|
|
||||||
(H512::from_low_u64_be(2), "127.0.0.1:8180".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(2), "127.0.0.1:8181".parse().unwrap()),
|
|
||||||
(H512::from_low_u64_be(4), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
migration: None,
|
|
||||||
}), &H512::from_low_u64_be(2));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn new_master_is_selected_in_worst_case() {
|
|
||||||
assert_eq!(select_master_node(&KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8180".parse().unwrap()),
|
|
||||||
(H512::from_low_u64_be(2), "127.0.0.1:8180".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(3), "127.0.0.1:8181".parse().unwrap()),
|
|
||||||
(H512::from_low_u64_be(4), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
migration: None,
|
|
||||||
}), &H512::from_low_u64_be(3));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_connections_returns_none_when_session_is_active() {
|
|
||||||
assert_eq!(maintain_connections(MigrationState::Required,
|
|
||||||
SessionState::Active(Default::default())), None);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_connections_connects_to_current_set_when_no_migration() {
|
|
||||||
assert_eq!(maintain_connections(MigrationState::Idle,
|
|
||||||
SessionState::Idle), Some(ConnectionsAction::ConnectToCurrentSet));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_connections_connects_to_current_and_old_set_when_migration_is_required() {
|
|
||||||
assert_eq!(maintain_connections(MigrationState::Required,
|
|
||||||
SessionState::Idle), Some(ConnectionsAction::ConnectToMigrationSet));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_connections_connects_to_current_and_old_set_when_migration_is_started() {
|
|
||||||
assert_eq!(maintain_connections(MigrationState::Started,
|
|
||||||
SessionState::Idle), Some(ConnectionsAction::ConnectToMigrationSet));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_sessions_does_nothing_if_no_session_and_no_migration() {
|
|
||||||
assert_eq!(maintain_session(&H512::from_low_u64_be(1), &Default::default(), &Default::default(),
|
|
||||||
MigrationState::Idle, SessionState::Idle), None);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_does_nothing_when_migration_required_on_slave_node_and_no_session() {
|
|
||||||
assert_eq!(maintain_session(&H512::from_low_u64_be(2), &vec![H512::from_low_u64_be(2)].into_iter().collect(), &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8181".parse().unwrap()),
|
|
||||||
(H512::from_low_u64_be(2), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
migration: None,
|
|
||||||
}, MigrationState::Required, SessionState::Idle), None);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_does_nothing_when_migration_started_on_slave_node_and_no_session() {
|
|
||||||
assert_eq!(maintain_session(&H512::from_low_u64_be(2), &vec![H512::from_low_u64_be(2)].into_iter().collect(), &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: Default::default(),
|
|
||||||
migration: Some(KeyServerSetMigration {
|
|
||||||
master: H512::from_low_u64_be(1),
|
|
||||||
set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8181".parse().unwrap()),
|
|
||||||
(H512::from_low_u64_be(2), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
}, MigrationState::Started, SessionState::Idle), None);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_does_nothing_when_migration_started_on_master_node_and_no_session_and_not_connected_to_migration_nodes() {
|
|
||||||
assert_eq!(maintain_session(&H512::from_low_u64_be(1), &Default::default(), &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: Default::default(),
|
|
||||||
migration: Some(KeyServerSetMigration {
|
|
||||||
master: H512::from_low_u64_be(1),
|
|
||||||
set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8181".parse().unwrap()),
|
|
||||||
(H512::from_low_u64_be(2), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
}, MigrationState::Started, SessionState::Idle), None);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_starts_session_when_migration_started_on_master_node_and_no_session() {
|
|
||||||
assert_eq!(maintain_session(&H512::from_low_u64_be(1), &vec![H512::from_low_u64_be(2)].into_iter().collect(), &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: Default::default(),
|
|
||||||
migration: Some(KeyServerSetMigration {
|
|
||||||
master: H512::from_low_u64_be(1),
|
|
||||||
set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8181".parse().unwrap()),
|
|
||||||
(H512::from_low_u64_be(2), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
}, MigrationState::Started, SessionState::Idle), Some(SessionAction::Start));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_does_nothing_when_both_migration_and_session_are_started() {
|
|
||||||
assert_eq!(maintain_session(&H512::from_low_u64_be(1), &vec![H512::from_low_u64_be(2)].into_iter().collect(), &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: Default::default(),
|
|
||||||
migration: Some(KeyServerSetMigration {
|
|
||||||
master: H512::from_low_u64_be(1),
|
|
||||||
set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8181".parse().unwrap()),
|
|
||||||
(H512::from_low_u64_be(2), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
}, MigrationState::Started, SessionState::Active(Default::default())), None);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_confirms_migration_when_active_and_session_has_finished_on_new_node() {
|
|
||||||
assert_eq!(maintain_session(&H512::from_low_u64_be(1), &vec![H512::from_low_u64_be(2)].into_iter().collect(), &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: Default::default(),
|
|
||||||
migration: Some(KeyServerSetMigration {
|
|
||||||
master: H512::from_low_u64_be(1),
|
|
||||||
set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8181".parse().unwrap()),
|
|
||||||
(H512::from_low_u64_be(2), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
}, MigrationState::Started, SessionState::Finished(Default::default())), Some(SessionAction::ConfirmAndDrop(Default::default())));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_drops_session_when_active_and_session_has_finished_on_removed_node() {
|
|
||||||
assert_eq!(maintain_session(&H512::from_low_u64_be(1), &vec![H512::from_low_u64_be(2)].into_iter().collect(), &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8181".parse().unwrap()),
|
|
||||||
(H512::from_low_u64_be(2), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: Default::default(),
|
|
||||||
migration: Some(KeyServerSetMigration {
|
|
||||||
master: H512::from_low_u64_be(2),
|
|
||||||
set: vec![(H512::from_low_u64_be(2), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
}, MigrationState::Started, SessionState::Finished(Default::default())), Some(SessionAction::Drop));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_drops_session_when_active_and_session_has_failed() {
|
|
||||||
assert_eq!(maintain_session(&H512::from_low_u64_be(1), &vec![H512::from_low_u64_be(2)].into_iter().collect(), &KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
new_set: Default::default(),
|
|
||||||
migration: Some(KeyServerSetMigration {
|
|
||||||
master: H512::from_low_u64_be(1),
|
|
||||||
set: vec![(H512::from_low_u64_be(1), "127.0.0.1:8181".parse().unwrap()),
|
|
||||||
(H512::from_low_u64_be(2), "127.0.0.1:8181".parse().unwrap())].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
}, MigrationState::Started, SessionState::Failed(Default::default())), Some(SessionAction::Drop));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_detects_abnormal_when_no_migration_and_active_session() {
|
|
||||||
assert_eq!(maintain_session(&Default::default(), &Default::default(), &Default::default(),
|
|
||||||
MigrationState::Idle, SessionState::Active(Default::default())), Some(SessionAction::DropAndRetry));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_detects_abnormal_when_no_migration_and_finished_session() {
|
|
||||||
assert_eq!(maintain_session(&Default::default(), &Default::default(), &Default::default(),
|
|
||||||
MigrationState::Idle, SessionState::Finished(Default::default())), Some(SessionAction::DropAndRetry));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_detects_abnormal_when_no_migration_and_failed_session() {
|
|
||||||
assert_eq!(maintain_session(&Default::default(), &Default::default(), &Default::default(),
|
|
||||||
MigrationState::Idle, SessionState::Failed(Default::default())), Some(SessionAction::DropAndRetry));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_detects_abnormal_when_required_migration_and_active_session() {
|
|
||||||
assert_eq!(maintain_session(&Default::default(), &Default::default(), &Default::default(),
|
|
||||||
MigrationState::Required, SessionState::Active(Default::default())), Some(SessionAction::DropAndRetry));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_detects_abnormal_when_required_migration_and_finished_session() {
|
|
||||||
assert_eq!(maintain_session(&Default::default(), &Default::default(), &Default::default(),
|
|
||||||
MigrationState::Required, SessionState::Finished(Default::default())), Some(SessionAction::DropAndRetry));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_detects_abnormal_when_required_migration_and_failed_session() {
|
|
||||||
assert_eq!(maintain_session(&Default::default(), &Default::default(), &Default::default(),
|
|
||||||
MigrationState::Required, SessionState::Failed(Default::default())), Some(SessionAction::DropAndRetry));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_detects_abnormal_when_active_migration_and_active_session_with_different_id() {
|
|
||||||
assert_eq!(maintain_session(&Default::default(), &Default::default(), &KeyServerSetSnapshot {
|
|
||||||
migration: Some(KeyServerSetMigration {
|
|
||||||
id: H256::zero(),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
}, MigrationState::Started, SessionState::Active(Some(H256::from_low_u64_be(1)))), Some(SessionAction::DropAndRetry));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_detects_abnormal_when_active_migration_and_finished_session_with_different_id() {
|
|
||||||
assert_eq!(maintain_session(&Default::default(), &Default::default(), &KeyServerSetSnapshot {
|
|
||||||
migration: Some(KeyServerSetMigration {
|
|
||||||
id: H256::zero(),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
}, MigrationState::Started, SessionState::Finished(Some(H256::from_low_u64_be(1)))), Some(SessionAction::DropAndRetry));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn maintain_session_detects_abnormal_when_active_migration_and_failed_session_with_different_id() {
|
|
||||||
assert_eq!(maintain_session(&Default::default(), &Default::default(), &KeyServerSetSnapshot {
|
|
||||||
migration: Some(KeyServerSetMigration {
|
|
||||||
id: H256::zero(),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
}, MigrationState::Started, SessionState::Failed(Some(H256::from_low_u64_be(1)))), Some(SessionAction::DropAndRetry));
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,88 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::io;
|
|
||||||
use std::time::Duration;
|
|
||||||
use futures::{Future, Poll};
|
|
||||||
use tokio::timer::timeout::{Timeout, Error as TimeoutError};
|
|
||||||
|
|
||||||
type DeadlineBox<F> = Box<dyn Future<
|
|
||||||
Item = DeadlineStatus<<F as Future>::Item>,
|
|
||||||
Error = TimeoutError<<F as Future>::Error>
|
|
||||||
> + Send>;
|
|
||||||
|
|
||||||
/// Complete a passed future or fail if it is not completed within timeout.
|
|
||||||
pub fn deadline<F, T>(duration: Duration, future: F) -> Result<Deadline<F>, io::Error>
|
|
||||||
where F: Future<Item = T, Error = io::Error> + Send + 'static, T: Send + 'static
|
|
||||||
{
|
|
||||||
let timeout = Box::new(Timeout::new(future, duration)
|
|
||||||
.then(|res| {
|
|
||||||
match res {
|
|
||||||
Ok(fut) => Ok(DeadlineStatus::Meet(fut)),
|
|
||||||
Err(err) => {
|
|
||||||
if err.is_elapsed() {
|
|
||||||
Ok(DeadlineStatus::Timeout)
|
|
||||||
} else {
|
|
||||||
Err(err)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
})
|
|
||||||
);
|
|
||||||
let deadline = Deadline {
|
|
||||||
future: timeout,
|
|
||||||
};
|
|
||||||
Ok(deadline)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deadline future completion status.
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum DeadlineStatus<T> {
|
|
||||||
/// Completed a future.
|
|
||||||
Meet(T),
|
|
||||||
/// Faled with timeout.
|
|
||||||
Timeout,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Future, which waits for passed future completion within given period, or fails with timeout.
|
|
||||||
pub struct Deadline<F> where F: Future {
|
|
||||||
future: DeadlineBox<F>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<F, T> Future for Deadline<F> where F: Future<Item = T, Error = io::Error> {
|
|
||||||
type Item = DeadlineStatus<T>;
|
|
||||||
type Error = TimeoutError<io::Error>;
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
|
||||||
self.future.poll()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::time::Duration;
|
|
||||||
use futures::{Future, done};
|
|
||||||
use tokio::reactor::Reactor;
|
|
||||||
use super::{deadline, DeadlineStatus};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn deadline_result_works() {
|
|
||||||
let mut reactor = Reactor::new().unwrap();
|
|
||||||
let deadline = deadline(Duration::from_millis(1000), done(Ok(()))).unwrap();
|
|
||||||
reactor.turn(Some(Duration::from_millis(3))).unwrap();
|
|
||||||
assert_eq!(deadline.wait().unwrap(), DeadlineStatus::Meet(()));
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,379 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
///! Given: two nodes each holding its own `self_key_pair`.
|
|
||||||
///!
|
|
||||||
///! Handshake process:
|
|
||||||
///! 1) both nodes are generating random `KeyPair` (`session_key_pair`), which will be used for channel encryption
|
|
||||||
///! 2) both nodes are generating random H256 (`confirmation_plain`)
|
|
||||||
///! 3) both nodes are signing `confirmation_plain` using `session_key_pair` to receive `confirmation_signed_session`
|
|
||||||
///! 4) nodes exchange with `NodePublicKey` messages, containing: `self_key_pair.public`, `confirmation_plain`, `confirmation_signed_session`
|
|
||||||
///! 5) both nodes are checking that they're configured to communicate to server with received `message.self_key_pair.public`. Connection is closed otherwise
|
|
||||||
///! 6) both nodes are recovering peer' `session_key_pair.public` from `message.confirmation_plain` and `message.confirmation_signed_session`
|
|
||||||
///! 7) both nodes are computing shared session key pair using self' `session_key_pair.secret` && peer' `session_key_pair.public`. All following messages are encrypted using this key_pair.
|
|
||||||
///! 8) both nodes are signing `message.confirmation_plain` with their own `self_key_pair.private` to receive `confirmation_signed`
|
|
||||||
///! 9) nodes exchange with `NodePrivateKeySignature` messages, containing `confirmation_signed`
|
|
||||||
///! 10) both nodes are checking that `confirmation_signed` is actually signed with the owner of peer' `self_key_pair.secret`
|
|
||||||
///!
|
|
||||||
///! Result of handshake is:
|
|
||||||
///! 1) belief, that we are connected to the KS from our KS-set
|
|
||||||
///! 2) session key pair, which is used to enrypt all connection messages
|
|
||||||
|
|
||||||
use std::io;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use futures::{Future, Poll, Async};
|
|
||||||
use tokio_io::{AsyncRead, AsyncWrite};
|
|
||||||
use crypto::publickey::ecdh::agree;
|
|
||||||
use crypto::publickey::{Random, Generator, KeyPair, Public, Signature, verify_public, sign, recover};
|
|
||||||
use ethereum_types::H256;
|
|
||||||
use blockchain::SigningKeyPair;
|
|
||||||
use key_server_cluster::{NodeId, Error};
|
|
||||||
use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature};
|
|
||||||
use key_server_cluster::io::{write_message, write_encrypted_message, WriteMessage, ReadMessage,
|
|
||||||
read_message, read_encrypted_message, fix_shared_key};
|
|
||||||
|
|
||||||
/// Start handshake procedure with another node from the cluster.
|
|
||||||
pub fn handshake<A>(a: A, self_key_pair: Arc<dyn SigningKeyPair>, trusted_nodes: BTreeSet<NodeId>) -> Handshake<A> where A: AsyncWrite + AsyncRead {
|
|
||||||
let init_data = (
|
|
||||||
*Random.generate().secret().clone(),
|
|
||||||
Random.generate()
|
|
||||||
);
|
|
||||||
handshake_with_init_data(a, Ok(init_data), self_key_pair, trusted_nodes)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Start handshake procedure with another node from the cluster and given plain confirmation + session key pair.
|
|
||||||
pub fn handshake_with_init_data<A>(a: A, init_data: Result<(H256, KeyPair), Error>, self_key_pair: Arc<dyn SigningKeyPair>, trusted_nodes: BTreeSet<NodeId>) -> Handshake<A> where A: AsyncWrite + AsyncRead {
|
|
||||||
let handshake_input_data = init_data
|
|
||||||
.and_then(|(cp, kp)| sign(kp.secret(), &cp).map(|sp| (cp, kp, sp)).map_err(Into::into))
|
|
||||||
.and_then(|(cp, kp, sp)| Handshake::<A>::make_public_key_message(self_key_pair.public().clone(), cp.clone(), sp).map(|msg| (cp, kp, msg)));
|
|
||||||
|
|
||||||
let (error, cp, kp, state) = match handshake_input_data {
|
|
||||||
Ok((cp, kp, msg)) => (None, cp, Some(kp), HandshakeState::SendPublicKey(write_message(a, msg))),
|
|
||||||
Err(err) => (Some((a, Err(err))), Default::default(), None, HandshakeState::Finished),
|
|
||||||
};
|
|
||||||
|
|
||||||
Handshake {
|
|
||||||
is_active: true,
|
|
||||||
error: error,
|
|
||||||
state: state,
|
|
||||||
self_key_pair: self_key_pair,
|
|
||||||
self_session_key_pair: kp,
|
|
||||||
self_confirmation_plain: cp,
|
|
||||||
trusted_nodes: Some(trusted_nodes),
|
|
||||||
peer_node_id: None,
|
|
||||||
peer_session_public: None,
|
|
||||||
peer_confirmation_plain: None,
|
|
||||||
shared_key: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Wait for handshake procedure to be started by another node from the cluster.
|
|
||||||
pub fn accept_handshake<A>(a: A, self_key_pair: Arc<dyn SigningKeyPair>) -> Handshake<A> where A: AsyncWrite + AsyncRead {
|
|
||||||
let self_confirmation_plain = *Random.generate().secret().clone();
|
|
||||||
let keypair = Random.generate();
|
|
||||||
|
|
||||||
Handshake {
|
|
||||||
is_active: false,
|
|
||||||
error: None,
|
|
||||||
state: HandshakeState::ReceivePublicKey(read_message(a)),
|
|
||||||
self_key_pair,
|
|
||||||
self_session_key_pair: Some(keypair),
|
|
||||||
self_confirmation_plain,
|
|
||||||
trusted_nodes: None,
|
|
||||||
peer_node_id: None,
|
|
||||||
peer_session_public: None,
|
|
||||||
peer_confirmation_plain: None,
|
|
||||||
shared_key: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Result of handshake procedure.
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub struct HandshakeResult {
|
|
||||||
/// Node id.
|
|
||||||
pub node_id: NodeId,
|
|
||||||
/// Shared key.
|
|
||||||
pub shared_key: KeyPair,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Future handshake procedure.
|
|
||||||
pub struct Handshake<A> {
|
|
||||||
is_active: bool,
|
|
||||||
error: Option<(A, Result<HandshakeResult, Error>)>,
|
|
||||||
state: HandshakeState<A>,
|
|
||||||
self_key_pair: Arc<dyn SigningKeyPair>,
|
|
||||||
self_session_key_pair: Option<KeyPair>,
|
|
||||||
self_confirmation_plain: H256,
|
|
||||||
trusted_nodes: Option<BTreeSet<NodeId>>,
|
|
||||||
peer_node_id: Option<NodeId>,
|
|
||||||
peer_session_public: Option<Public>,
|
|
||||||
peer_confirmation_plain: Option<H256>,
|
|
||||||
shared_key: Option<KeyPair>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Active handshake state.
|
|
||||||
enum HandshakeState<A> {
|
|
||||||
SendPublicKey(WriteMessage<A>),
|
|
||||||
ReceivePublicKey(ReadMessage<A>),
|
|
||||||
SendPrivateKeySignature(WriteMessage<A>),
|
|
||||||
ReceivePrivateKeySignature(ReadMessage<A>),
|
|
||||||
Finished,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<A> Handshake<A> where A: AsyncRead + AsyncWrite {
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn set_self_confirmation_plain(&mut self, self_confirmation_plain: H256) {
|
|
||||||
self.self_confirmation_plain = self_confirmation_plain;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn set_self_session_key_pair(&mut self, self_session_key_pair: KeyPair) {
|
|
||||||
self.self_session_key_pair = Some(self_session_key_pair);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn make_public_key_message(self_node_id: NodeId, confirmation_plain: H256, confirmation_signed_session: Signature) -> Result<Message, Error> {
|
|
||||||
Ok(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey {
|
|
||||||
node_id: self_node_id.into(),
|
|
||||||
confirmation_plain: confirmation_plain.into(),
|
|
||||||
confirmation_signed_session: confirmation_signed_session.into(),
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_private_key_signature_message(self_key_pair: &dyn SigningKeyPair, confirmation_plain: &H256) -> Result<Message, Error> {
|
|
||||||
Ok(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature {
|
|
||||||
confirmation_signed: self_key_pair.sign(confirmation_plain)?.into(),
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compute_shared_key(self_session_key_pair: &KeyPair, peer_session_public: &Public) -> Result<KeyPair, Error> {
|
|
||||||
agree(self_session_key_pair.secret(), peer_session_public)
|
|
||||||
.map_err(Into::into)
|
|
||||||
.and_then(|s| fix_shared_key(&s))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<A> Future for Handshake<A> where A: AsyncRead + AsyncWrite {
|
|
||||||
type Item = (A, Result<HandshakeResult, Error>);
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
|
||||||
if let Some(error_result) = self.error.take() {
|
|
||||||
return Ok(error_result.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
let (next, result) = match self.state {
|
|
||||||
HandshakeState::SendPublicKey(ref mut future) => {
|
|
||||||
let (stream, _) = try_ready!(future.poll());
|
|
||||||
|
|
||||||
if self.is_active {
|
|
||||||
(HandshakeState::ReceivePublicKey(
|
|
||||||
read_message(stream)
|
|
||||||
), Async::NotReady)
|
|
||||||
} else {
|
|
||||||
let shared_key = Self::compute_shared_key(
|
|
||||||
self.self_session_key_pair.as_ref().expect(
|
|
||||||
"self_session_key_pair is not filled only when initialization has failed; if initialization has failed, self.error.is_some(); qed"),
|
|
||||||
self.peer_session_public.as_ref().expect(
|
|
||||||
"we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; peer_session_public is filled in ReceivePublicKey; qed"),
|
|
||||||
);
|
|
||||||
|
|
||||||
self.shared_key = match shared_key {
|
|
||||||
Ok(shared_key) => Some(shared_key),
|
|
||||||
Err(err) => return Ok((stream, Err(err)).into()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let peer_confirmation_plain = self.peer_confirmation_plain.as_ref()
|
|
||||||
.expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; peer_confirmation_plain is filled in ReceivePublicKey; qed");
|
|
||||||
let message = match Handshake::<A>::make_private_key_signature_message(&*self.self_key_pair, peer_confirmation_plain) {
|
|
||||||
Ok(message) => message,
|
|
||||||
Err(err) => return Ok((stream, Err(err)).into()),
|
|
||||||
};
|
|
||||||
|
|
||||||
(HandshakeState::SendPrivateKeySignature(write_encrypted_message(stream,
|
|
||||||
self.shared_key.as_ref().expect("filled couple of lines above; qed"),
|
|
||||||
message)), Async::NotReady)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
HandshakeState::ReceivePublicKey(ref mut future) => {
|
|
||||||
let (stream, message) = try_ready!(future.poll());
|
|
||||||
|
|
||||||
let message = match message {
|
|
||||||
Ok(message) => match message {
|
|
||||||
Message::Cluster(ClusterMessage::NodePublicKey(message)) => message,
|
|
||||||
_ => return Ok((stream, Err(Error::InvalidMessage)).into()),
|
|
||||||
},
|
|
||||||
Err(err) => return Ok((stream, Err(err.into())).into()),
|
|
||||||
};
|
|
||||||
|
|
||||||
if !self.trusted_nodes.as_ref().map(|tn| tn.contains(&*message.node_id)).unwrap_or(true) {
|
|
||||||
return Ok((stream, Err(Error::InvalidNodeId)).into());
|
|
||||||
}
|
|
||||||
|
|
||||||
self.peer_node_id = Some(message.node_id.into());
|
|
||||||
self.peer_session_public = Some(match recover(&message.confirmation_signed_session, &message.confirmation_plain) {
|
|
||||||
Ok(peer_session_public) => peer_session_public,
|
|
||||||
Err(err) => return Ok((stream, Err(err.into())).into()),
|
|
||||||
});
|
|
||||||
self.peer_confirmation_plain = Some(message.confirmation_plain.into());
|
|
||||||
if self.is_active {
|
|
||||||
let shared_key = Self::compute_shared_key(
|
|
||||||
self.self_session_key_pair.as_ref().expect(
|
|
||||||
"self_session_key_pair is not filled only when initialization has failed; if initialization has failed, self.error.is_some(); qed"),
|
|
||||||
self.peer_session_public.as_ref().expect(
|
|
||||||
"we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; peer_session_public is filled in ReceivePublicKey; qed"),
|
|
||||||
);
|
|
||||||
|
|
||||||
self.shared_key = match shared_key {
|
|
||||||
Ok(shared_key) => Some(shared_key),
|
|
||||||
Err(err) => return Ok((stream, Err(err)).into()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let peer_confirmation_plain = self.peer_confirmation_plain.as_ref()
|
|
||||||
.expect("filled couple of lines above; qed");
|
|
||||||
let message = match Handshake::<A>::make_private_key_signature_message(&*self.self_key_pair, peer_confirmation_plain) {
|
|
||||||
Ok(message) => message,
|
|
||||||
Err(err) => return Ok((stream, Err(err)).into()),
|
|
||||||
};
|
|
||||||
|
|
||||||
(HandshakeState::SendPrivateKeySignature(write_encrypted_message(stream,
|
|
||||||
self.shared_key.as_ref().expect("filled couple of lines above; qed"),
|
|
||||||
message)), Async::NotReady)
|
|
||||||
} else {
|
|
||||||
let self_session_key_pair = self.self_session_key_pair.as_ref()
|
|
||||||
.expect("self_session_key_pair is not filled only when initialization has failed; if initialization has failed, self.error.is_some(); qed");
|
|
||||||
let confirmation_signed_session = match sign(self_session_key_pair.secret(), &self.self_confirmation_plain).map_err(Into::into) {
|
|
||||||
Ok(confirmation_signed_session) => confirmation_signed_session,
|
|
||||||
Err(err) => return Ok((stream, Err(err)).into()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let message = match Handshake::<A>::make_public_key_message(self.self_key_pair.public().clone(), self.self_confirmation_plain.clone(), confirmation_signed_session) {
|
|
||||||
Ok(message) => message,
|
|
||||||
Err(err) => return Ok((stream, Err(err)).into()),
|
|
||||||
};
|
|
||||||
(HandshakeState::SendPublicKey(write_message(stream, message)), Async::NotReady)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
HandshakeState::SendPrivateKeySignature(ref mut future) => {
|
|
||||||
let (stream, _) = try_ready!(future.poll());
|
|
||||||
|
|
||||||
(HandshakeState::ReceivePrivateKeySignature(
|
|
||||||
read_encrypted_message(stream,
|
|
||||||
self.shared_key.as_ref().expect("shared_key is filled in Send/ReceivePublicKey; SendPrivateKeySignature follows Send/ReceivePublicKey; qed").clone()
|
|
||||||
)
|
|
||||||
), Async::NotReady)
|
|
||||||
},
|
|
||||||
HandshakeState::ReceivePrivateKeySignature(ref mut future) => {
|
|
||||||
let (stream, message) = try_ready!(future.poll());
|
|
||||||
|
|
||||||
let message = match message {
|
|
||||||
Ok(message) => match message {
|
|
||||||
Message::Cluster(ClusterMessage::NodePrivateKeySignature(message)) => message,
|
|
||||||
_ => return Ok((stream, Err(Error::InvalidMessage)).into()),
|
|
||||||
},
|
|
||||||
Err(err) => return Ok((stream, Err(err.into())).into()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let peer_public = self.peer_node_id.as_ref().expect("peer_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed");
|
|
||||||
if !verify_public(peer_public, &*message.confirmation_signed, &self.self_confirmation_plain).unwrap_or(false) {
|
|
||||||
return Ok((stream, Err(Error::InvalidMessage)).into());
|
|
||||||
}
|
|
||||||
|
|
||||||
(HandshakeState::Finished, Async::Ready((stream, Ok(HandshakeResult {
|
|
||||||
node_id: self.peer_node_id.expect("peer_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed"),
|
|
||||||
shared_key: self.shared_key.clone().expect("shared_key is filled in Send/ReceivePublicKey; ReceivePrivateKeySignature follows Send/ReceivePublicKey; qed"),
|
|
||||||
}))))
|
|
||||||
},
|
|
||||||
HandshakeState::Finished => panic!("poll Handshake after it's done"),
|
|
||||||
};
|
|
||||||
|
|
||||||
self.state = next;
|
|
||||||
match result {
|
|
||||||
// by polling again, we register new future
|
|
||||||
Async::NotReady => self.poll(),
|
|
||||||
result => Ok(result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use futures::Future;
|
|
||||||
use crypto::publickey::{Random, Generator, sign};
|
|
||||||
use ethereum_types::H256;
|
|
||||||
use key_server_cluster::PlainNodeKeyPair;
|
|
||||||
use key_server_cluster::io::message::tests::TestIo;
|
|
||||||
use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature};
|
|
||||||
use super::{handshake_with_init_data, accept_handshake, HandshakeResult};
|
|
||||||
|
|
||||||
fn prepare_test_io() -> (H256, TestIo) {
|
|
||||||
let mut io = TestIo::new();
|
|
||||||
|
|
||||||
let self_confirmation_plain = *Random.generate().secret().clone();
|
|
||||||
let peer_confirmation_plain = *Random.generate().secret().clone();
|
|
||||||
|
|
||||||
let self_confirmation_signed = sign(io.peer_key_pair().secret(), &self_confirmation_plain).unwrap();
|
|
||||||
let peer_confirmation_signed = sign(io.peer_session_key_pair().secret(), &peer_confirmation_plain).unwrap();
|
|
||||||
|
|
||||||
let peer_public = io.peer_key_pair().public().clone();
|
|
||||||
io.add_input_message(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey {
|
|
||||||
node_id: peer_public.into(),
|
|
||||||
confirmation_plain: peer_confirmation_plain.into(),
|
|
||||||
confirmation_signed_session: peer_confirmation_signed.into(),
|
|
||||||
})));
|
|
||||||
io.add_encrypted_input_message(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature {
|
|
||||||
confirmation_signed: self_confirmation_signed.into(),
|
|
||||||
})));
|
|
||||||
|
|
||||||
(self_confirmation_plain, io)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn active_handshake_works() {
|
|
||||||
let (self_confirmation_plain, io) = prepare_test_io();
|
|
||||||
let trusted_nodes: BTreeSet<_> = vec![io.peer_key_pair().public().clone()].into_iter().collect();
|
|
||||||
let self_session_key_pair = io.self_session_key_pair().clone();
|
|
||||||
let self_key_pair = Arc::new(PlainNodeKeyPair::new(io.self_key_pair().clone()));
|
|
||||||
let shared_key = io.shared_key_pair().clone();
|
|
||||||
|
|
||||||
let handshake = handshake_with_init_data(io, Ok((self_confirmation_plain, self_session_key_pair)), self_key_pair, trusted_nodes);
|
|
||||||
let handshake_result = handshake.wait().unwrap();
|
|
||||||
assert_eq!(handshake_result.1, Ok(HandshakeResult {
|
|
||||||
node_id: handshake_result.0.peer_key_pair().public().clone(),
|
|
||||||
shared_key: shared_key,
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn passive_handshake_works() {
|
|
||||||
let (self_confirmation_plain, io) = prepare_test_io();
|
|
||||||
let self_key_pair = Arc::new(PlainNodeKeyPair::new(io.self_key_pair().clone()));
|
|
||||||
let self_session_key_pair = io.self_session_key_pair().clone();
|
|
||||||
let shared_key = io.shared_key_pair().clone();
|
|
||||||
|
|
||||||
let mut handshake = accept_handshake(io, self_key_pair);
|
|
||||||
handshake.set_self_confirmation_plain(self_confirmation_plain);
|
|
||||||
handshake.set_self_session_key_pair(self_session_key_pair);
|
|
||||||
|
|
||||||
let handshake_result = handshake.wait().unwrap();
|
|
||||||
assert_eq!(handshake_result.1, Ok(HandshakeResult {
|
|
||||||
node_id: handshake_result.0.peer_key_pair().public().clone(),
|
|
||||||
shared_key: shared_key,
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,429 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::io::Cursor;
|
|
||||||
use std::u16;
|
|
||||||
use std::ops::Deref;
|
|
||||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
|
||||||
use serde_json;
|
|
||||||
use crypto::publickey::ecies;
|
|
||||||
use crypto::publickey::{Secret, KeyPair};
|
|
||||||
use crypto::publickey::ec_math_utils::CURVE_ORDER;
|
|
||||||
use ethereum_types::{H256, U256, BigEndianHash};
|
|
||||||
use key_server_cluster::Error;
|
|
||||||
use key_server_cluster::message::{Message, ClusterMessage, GenerationMessage, EncryptionMessage, DecryptionMessage,
|
|
||||||
SchnorrSigningMessage, EcdsaSigningMessage, ServersSetChangeMessage, ShareAddMessage, KeyVersionNegotiationMessage};
|
|
||||||
|
|
||||||
/// Size of serialized header.
|
|
||||||
pub const MESSAGE_HEADER_SIZE: usize = 18;
|
|
||||||
/// Current header version.
|
|
||||||
pub const CURRENT_HEADER_VERSION: u64 = 1;
|
|
||||||
|
|
||||||
/// Message header.
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub struct MessageHeader {
|
|
||||||
/// Message/Header version.
|
|
||||||
pub version: u64,
|
|
||||||
/// Message kind.
|
|
||||||
pub kind: u64,
|
|
||||||
/// Message payload size (without header).
|
|
||||||
pub size: u16,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serialized message.
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
|
||||||
pub struct SerializedMessage(Vec<u8>);
|
|
||||||
|
|
||||||
impl Deref for SerializedMessage {
|
|
||||||
type Target = [u8];
|
|
||||||
|
|
||||||
fn deref(&self) -> &[u8] {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Into<Vec<u8>> for SerializedMessage {
|
|
||||||
fn into(self) -> Vec<u8> {
|
|
||||||
self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serialize message.
|
|
||||||
pub fn serialize_message(message: Message) -> Result<SerializedMessage, Error> {
|
|
||||||
let (message_kind, payload) = match message {
|
|
||||||
Message::Cluster(ClusterMessage::NodePublicKey(payload)) => (1, serde_json::to_vec(&payload)),
|
|
||||||
Message::Cluster(ClusterMessage::NodePrivateKeySignature(payload)) => (2, serde_json::to_vec(&payload)),
|
|
||||||
Message::Cluster(ClusterMessage::KeepAlive(payload)) => (3, serde_json::to_vec(&payload)),
|
|
||||||
Message::Cluster(ClusterMessage::KeepAliveResponse(payload)) => (4, serde_json::to_vec(&payload)),
|
|
||||||
|
|
||||||
Message::Generation(GenerationMessage::InitializeSession(payload)) => (50, serde_json::to_vec(&payload)),
|
|
||||||
Message::Generation(GenerationMessage::ConfirmInitialization(payload)) => (51, serde_json::to_vec(&payload)),
|
|
||||||
Message::Generation(GenerationMessage::CompleteInitialization(payload)) => (52, serde_json::to_vec(&payload)),
|
|
||||||
Message::Generation(GenerationMessage::KeysDissemination(payload)) => (53, serde_json::to_vec(&payload)),
|
|
||||||
Message::Generation(GenerationMessage::PublicKeyShare(payload)) => (54, serde_json::to_vec(&payload)),
|
|
||||||
Message::Generation(GenerationMessage::SessionError(payload)) => (55, serde_json::to_vec(&payload)),
|
|
||||||
Message::Generation(GenerationMessage::SessionCompleted(payload)) => (56, serde_json::to_vec(&payload)),
|
|
||||||
|
|
||||||
Message::Encryption(EncryptionMessage::InitializeEncryptionSession(payload)) => (100, serde_json::to_vec(&payload)),
|
|
||||||
Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization(payload)) => (101, serde_json::to_vec(&payload)),
|
|
||||||
Message::Encryption(EncryptionMessage::EncryptionSessionError(payload)) => (102, serde_json::to_vec(&payload)),
|
|
||||||
|
|
||||||
Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(payload)) => (150, serde_json::to_vec(&payload)),
|
|
||||||
Message::Decryption(DecryptionMessage::RequestPartialDecryption(payload)) => (151, serde_json::to_vec(&payload)),
|
|
||||||
Message::Decryption(DecryptionMessage::PartialDecryption(payload)) => (152, serde_json::to_vec(&payload)),
|
|
||||||
Message::Decryption(DecryptionMessage::DecryptionSessionError(payload)) => (153, serde_json::to_vec(&payload)),
|
|
||||||
Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(payload)) => (154, serde_json::to_vec(&payload)),
|
|
||||||
Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(payload)) => (155, serde_json::to_vec(&payload)),
|
|
||||||
Message::Decryption(DecryptionMessage::DecryptionSessionDelegationCompleted(payload))
|
|
||||||
=> (156, serde_json::to_vec(&payload)),
|
|
||||||
|
|
||||||
Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage(payload))
|
|
||||||
=> (200, serde_json::to_vec(&payload)),
|
|
||||||
Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningGenerationMessage(payload))
|
|
||||||
=> (201, serde_json::to_vec(&payload)),
|
|
||||||
Message::SchnorrSigning(SchnorrSigningMessage::SchnorrRequestPartialSignature(payload))
|
|
||||||
=> (202, serde_json::to_vec(&payload)),
|
|
||||||
Message::SchnorrSigning(SchnorrSigningMessage::SchnorrPartialSignature(payload)) => (203, serde_json::to_vec(&payload)),
|
|
||||||
Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionError(payload)) => (204, serde_json::to_vec(&payload)),
|
|
||||||
Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionCompleted(payload))
|
|
||||||
=> (205, serde_json::to_vec(&payload)),
|
|
||||||
Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegation(payload))
|
|
||||||
=> (206, serde_json::to_vec(&payload)),
|
|
||||||
Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(payload))
|
|
||||||
=> (207, serde_json::to_vec(&payload)),
|
|
||||||
|
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(payload))
|
|
||||||
=> (250, serde_json::to_vec(&payload)),
|
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(payload)) => (251, serde_json::to_vec(&payload)),
|
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(payload)) => (252, serde_json::to_vec(&payload)),
|
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(payload))
|
|
||||||
=> (253, serde_json::to_vec(&payload)),
|
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(payload))
|
|
||||||
=> (254, serde_json::to_vec(&payload)),
|
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(payload))
|
|
||||||
=> (255, serde_json::to_vec(&payload)),
|
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(payload))
|
|
||||||
=> (256, serde_json::to_vec(&payload)),
|
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(payload))
|
|
||||||
=> (257, serde_json::to_vec(&payload)),
|
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(payload))
|
|
||||||
=> (258, serde_json::to_vec(&payload)),
|
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(payload)) => (261, serde_json::to_vec(&payload)),
|
|
||||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(payload))
|
|
||||||
=> (262, serde_json::to_vec(&payload)),
|
|
||||||
|
|
||||||
Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(payload)) => (300, serde_json::to_vec(&payload)),
|
|
||||||
Message::ShareAdd(ShareAddMessage::KeyShareCommon(payload)) => (301, serde_json::to_vec(&payload)),
|
|
||||||
Message::ShareAdd(ShareAddMessage::NewKeysDissemination(payload)) => (302, serde_json::to_vec(&payload)),
|
|
||||||
Message::ShareAdd(ShareAddMessage::ShareAddError(payload)) => (303, serde_json::to_vec(&payload)),
|
|
||||||
|
|
||||||
Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::RequestKeyVersions(payload))
|
|
||||||
=> (450, serde_json::to_vec(&payload)),
|
|
||||||
Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersions(payload))
|
|
||||||
=> (451, serde_json::to_vec(&payload)),
|
|
||||||
Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersionsError(payload))
|
|
||||||
=> (452, serde_json::to_vec(&payload)),
|
|
||||||
|
|
||||||
Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage(payload)) => (500, serde_json::to_vec(&payload)),
|
|
||||||
Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage(payload))
|
|
||||||
=> (501, serde_json::to_vec(&payload)),
|
|
||||||
Message::EcdsaSigning(EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage(payload))
|
|
||||||
=> (502, serde_json::to_vec(&payload)),
|
|
||||||
Message::EcdsaSigning(EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage(payload))
|
|
||||||
=> (503, serde_json::to_vec(&payload)),
|
|
||||||
Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(payload))
|
|
||||||
=> (504, serde_json::to_vec(&payload)),
|
|
||||||
Message::EcdsaSigning(EcdsaSigningMessage::EcdsaRequestPartialSignature(payload)) => (505, serde_json::to_vec(&payload)),
|
|
||||||
Message::EcdsaSigning(EcdsaSigningMessage::EcdsaPartialSignature(payload)) => (506, serde_json::to_vec(&payload)),
|
|
||||||
Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionError(payload)) => (507, serde_json::to_vec(&payload)),
|
|
||||||
Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionCompleted(payload)) => (508, serde_json::to_vec(&payload)),
|
|
||||||
Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegation(payload)) => (509, serde_json::to_vec(&payload)),
|
|
||||||
Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(payload))
|
|
||||||
=> (510, serde_json::to_vec(&payload)),
|
|
||||||
};
|
|
||||||
|
|
||||||
let payload = payload.map_err(|err| Error::Serde(err.to_string()))?;
|
|
||||||
build_serialized_message(MessageHeader {
|
|
||||||
kind: message_kind,
|
|
||||||
version: CURRENT_HEADER_VERSION,
|
|
||||||
size: 0,
|
|
||||||
}, payload)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deserialize message.
|
|
||||||
pub fn deserialize_message(header: &MessageHeader, payload: Vec<u8>) -> Result<Message, Error> {
|
|
||||||
Ok(match header.kind {
|
|
||||||
1 => Message::Cluster(ClusterMessage::NodePublicKey(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
2 => Message::Cluster(ClusterMessage::NodePrivateKeySignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
3 => Message::Cluster(ClusterMessage::KeepAlive(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
4 => Message::Cluster(ClusterMessage::KeepAliveResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
|
|
||||||
50 => Message::Generation(GenerationMessage::InitializeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
51 => Message::Generation(GenerationMessage::ConfirmInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
52 => Message::Generation(GenerationMessage::CompleteInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
53 => Message::Generation(GenerationMessage::KeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
54 => Message::Generation(GenerationMessage::PublicKeyShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
55 => Message::Generation(GenerationMessage::SessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
56 => Message::Generation(GenerationMessage::SessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
|
|
||||||
100 => Message::Encryption(EncryptionMessage::InitializeEncryptionSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
101 => Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
102 => Message::Encryption(EncryptionMessage::EncryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
|
|
||||||
150 => Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
151 => Message::Decryption(DecryptionMessage::RequestPartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
152 => Message::Decryption(DecryptionMessage::PartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
153 => Message::Decryption(DecryptionMessage::DecryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
154 => Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
155 => Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
156 => Message::Decryption(DecryptionMessage::DecryptionSessionDelegationCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
|
|
||||||
200 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
201 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningGenerationMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
202 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrRequestPartialSignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
203 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrPartialSignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
204 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
205 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
206 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegation(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
207 => Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
|
|
||||||
250 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
251 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
252 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
253 => Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
254 => Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
255 => Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
256 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
257 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
258 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
261 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
262 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
|
|
||||||
300 => Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
301 => Message::ShareAdd(ShareAddMessage::KeyShareCommon(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
302 => Message::ShareAdd(ShareAddMessage::NewKeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
303 => Message::ShareAdd(ShareAddMessage::ShareAddError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
|
|
||||||
450 => Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::RequestKeyVersions(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
451 => Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersions(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
452 => Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersionsError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
|
|
||||||
500 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
501 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
502 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
503 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
504 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
505 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaRequestPartialSignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
506 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaPartialSignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
507 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
508 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
509 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegation(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
510 => Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
|
||||||
|
|
||||||
_ => return Err(Error::Serde(format!("unknown message type {}", header.kind))),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Encrypt serialized message.
|
|
||||||
pub fn encrypt_message(key: &KeyPair, message: SerializedMessage) -> Result<SerializedMessage, Error> {
|
|
||||||
let mut header: Vec<_> = message.into();
|
|
||||||
let payload = header.split_off(MESSAGE_HEADER_SIZE);
|
|
||||||
let encrypted_payload = ecies::encrypt(key.public(), &[], &payload)?;
|
|
||||||
|
|
||||||
let header = deserialize_header(&header)?;
|
|
||||||
build_serialized_message(header, encrypted_payload)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Decrypt serialized message.
|
|
||||||
pub fn decrypt_message(key: &KeyPair, payload: Vec<u8>) -> Result<Vec<u8>, Error> {
|
|
||||||
Ok(ecies::decrypt(key.secret(), &[], &payload)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fix shared encryption key.
|
|
||||||
pub fn fix_shared_key(shared_secret: &Secret) -> Result<KeyPair, Error> {
|
|
||||||
// secret key created in agree function is invalid, as it is not calculated mod EC.field.n
|
|
||||||
// => let's do it manually
|
|
||||||
let shared_secret: H256 = (**shared_secret).into();
|
|
||||||
let shared_secret: U256 = shared_secret.into_uint();
|
|
||||||
let shared_secret: H256 = BigEndianHash::from_uint(&(shared_secret % *CURVE_ORDER));
|
|
||||||
let shared_key_pair = KeyPair::from_secret_slice(shared_secret.as_bytes())?;
|
|
||||||
Ok(shared_key_pair)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serialize message header.
|
|
||||||
fn serialize_header(header: &MessageHeader) -> Result<Vec<u8>, Error> {
|
|
||||||
let mut buffer = Vec::with_capacity(MESSAGE_HEADER_SIZE);
|
|
||||||
buffer.write_u64::<LittleEndian>(header.version)?;
|
|
||||||
buffer.write_u64::<LittleEndian>(header.kind)?;
|
|
||||||
buffer.write_u16::<LittleEndian>(header.size)?;
|
|
||||||
Ok(buffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deserialize message header.
|
|
||||||
pub fn deserialize_header(data: &[u8]) -> Result<MessageHeader, Error> {
|
|
||||||
let mut reader = Cursor::new(data);
|
|
||||||
let version = reader.read_u64::<LittleEndian>()?;
|
|
||||||
if version != CURRENT_HEADER_VERSION {
|
|
||||||
return Err(Error::InvalidMessageVersion);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(MessageHeader {
|
|
||||||
version: version,
|
|
||||||
kind: reader.read_u64::<LittleEndian>()?,
|
|
||||||
size: reader.read_u16::<LittleEndian>()?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build serialized message from header && payload
|
|
||||||
fn build_serialized_message(mut header: MessageHeader, payload: Vec<u8>) -> Result<SerializedMessage, Error> {
|
|
||||||
let payload_len = payload.len();
|
|
||||||
if payload_len > u16::MAX as usize {
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
header.size = payload.len() as u16;
|
|
||||||
|
|
||||||
let mut message = serialize_header(&header)?;
|
|
||||||
message.extend(payload);
|
|
||||||
Ok(SerializedMessage(message))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub mod tests {
|
|
||||||
use std::io;
|
|
||||||
use futures::Poll;
|
|
||||||
use tokio_io::{AsyncRead, AsyncWrite};
|
|
||||||
use crypto::publickey::{Random, Generator, KeyPair};
|
|
||||||
use crypto::publickey::ecdh::agree;
|
|
||||||
use key_server_cluster::Error;
|
|
||||||
use key_server_cluster::message::Message;
|
|
||||||
use super::{MESSAGE_HEADER_SIZE, CURRENT_HEADER_VERSION, MessageHeader, fix_shared_key, encrypt_message,
|
|
||||||
serialize_message, serialize_header, deserialize_header};
|
|
||||||
|
|
||||||
pub struct TestIo {
|
|
||||||
self_key_pair: KeyPair,
|
|
||||||
self_session_key_pair: KeyPair,
|
|
||||||
peer_key_pair: KeyPair,
|
|
||||||
peer_session_key_pair: KeyPair,
|
|
||||||
shared_key_pair: KeyPair,
|
|
||||||
input_buffer: io::Cursor<Vec<u8>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TestIo {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
let self_session_key_pair = Random.generate();
|
|
||||||
let peer_session_key_pair = Random.generate();
|
|
||||||
let self_key_pair = Random.generate();
|
|
||||||
let peer_key_pair = Random.generate();
|
|
||||||
let shared_key_pair = fix_shared_key(&agree(self_session_key_pair.secret(), peer_session_key_pair.public()).unwrap()).unwrap();
|
|
||||||
TestIo {
|
|
||||||
self_key_pair: self_key_pair,
|
|
||||||
self_session_key_pair: self_session_key_pair,
|
|
||||||
peer_key_pair: peer_key_pair,
|
|
||||||
peer_session_key_pair: peer_session_key_pair,
|
|
||||||
shared_key_pair: shared_key_pair,
|
|
||||||
input_buffer: io::Cursor::new(Vec::new()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn self_key_pair(&self) -> &KeyPair {
|
|
||||||
&self.self_key_pair
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn self_session_key_pair(&self) -> &KeyPair {
|
|
||||||
&self.self_session_key_pair
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn peer_key_pair(&self) -> &KeyPair {
|
|
||||||
&self.peer_key_pair
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn peer_session_key_pair(&self) -> &KeyPair {
|
|
||||||
&self.peer_session_key_pair
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn shared_key_pair(&self) -> &KeyPair {
|
|
||||||
&self.shared_key_pair
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_input_message(&mut self, message: Message) {
|
|
||||||
let serialized_message = serialize_message(message).unwrap();
|
|
||||||
let serialized_message: Vec<_> = serialized_message.into();
|
|
||||||
let input_buffer = self.input_buffer.get_mut();
|
|
||||||
for b in serialized_message {
|
|
||||||
input_buffer.push(b);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_encrypted_input_message(&mut self, message: Message) {
|
|
||||||
let serialized_message = encrypt_message(&self.shared_key_pair, serialize_message(message).unwrap()).unwrap();
|
|
||||||
let serialized_message: Vec<_> = serialized_message.into();
|
|
||||||
let input_buffer = self.input_buffer.get_mut();
|
|
||||||
for b in serialized_message {
|
|
||||||
input_buffer.push(b);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AsyncRead for TestIo {}
|
|
||||||
|
|
||||||
impl AsyncWrite for TestIo {
|
|
||||||
fn shutdown(&mut self) -> Poll<(), io::Error> {
|
|
||||||
Ok(().into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl io::Read for TestIo {
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
|
||||||
io::Read::read(&mut self.input_buffer, buf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl io::Write for TestIo {
|
|
||||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
|
||||||
Ok(buf.len())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn flush(&mut self) -> io::Result<()> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn header_serialization_works() {
|
|
||||||
let header = MessageHeader {
|
|
||||||
kind: 1,
|
|
||||||
version: CURRENT_HEADER_VERSION,
|
|
||||||
size: 3,
|
|
||||||
};
|
|
||||||
|
|
||||||
let serialized_header = serialize_header(&header).unwrap();
|
|
||||||
assert_eq!(serialized_header.len(), MESSAGE_HEADER_SIZE);
|
|
||||||
|
|
||||||
let deserialized_header = deserialize_header(&serialized_header).unwrap();
|
|
||||||
assert_eq!(deserialized_header, header);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn deserializing_header_of_wrong_version_fails() {
|
|
||||||
let header = MessageHeader {
|
|
||||||
kind: 1,
|
|
||||||
version: CURRENT_HEADER_VERSION + 1,
|
|
||||||
size: 3,
|
|
||||||
};
|
|
||||||
|
|
||||||
assert_eq!(deserialize_header(&serialize_header(&header).unwrap()).unwrap_err(), Error::InvalidMessageVersion);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,34 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
mod deadline;
|
|
||||||
mod handshake;
|
|
||||||
mod message;
|
|
||||||
mod read_header;
|
|
||||||
mod read_payload;
|
|
||||||
mod read_message;
|
|
||||||
mod shared_tcp_stream;
|
|
||||||
mod write_message;
|
|
||||||
|
|
||||||
pub use self::deadline::{deadline, Deadline, DeadlineStatus};
|
|
||||||
pub use self::handshake::{handshake, accept_handshake, Handshake, HandshakeResult};
|
|
||||||
pub use self::message::{MessageHeader, SerializedMessage, serialize_message, deserialize_message,
|
|
||||||
encrypt_message, fix_shared_key};
|
|
||||||
pub use self::read_header::{read_header, ReadHeader};
|
|
||||||
pub use self::read_payload::{read_payload, read_encrypted_payload, ReadPayload};
|
|
||||||
pub use self::read_message::{read_message, read_encrypted_message, ReadMessage};
|
|
||||||
pub use self::shared_tcp_stream::SharedTcpStream;
|
|
||||||
pub use self::write_message::{write_message, write_encrypted_message, WriteMessage};
|
|
@ -1,45 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::io;
|
|
||||||
use futures::{Future, Poll, Async};
|
|
||||||
use tokio_io::AsyncRead;
|
|
||||||
use tokio_io::io::{ReadExact, read_exact};
|
|
||||||
use key_server_cluster::Error;
|
|
||||||
use key_server_cluster::io::message::{MESSAGE_HEADER_SIZE, MessageHeader, deserialize_header};
|
|
||||||
|
|
||||||
/// Create future for read single message header from the stream.
|
|
||||||
pub fn read_header<A>(a: A) -> ReadHeader<A> where A: AsyncRead {
|
|
||||||
ReadHeader {
|
|
||||||
reader: read_exact(a, [0; MESSAGE_HEADER_SIZE]),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Future for read single message header from the stream.
|
|
||||||
pub struct ReadHeader<A> {
|
|
||||||
reader: ReadExact<A, [u8; MESSAGE_HEADER_SIZE]>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<A> Future for ReadHeader<A> where A: AsyncRead {
|
|
||||||
type Item = (A, Result<MessageHeader, Error>);
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
|
||||||
let (read, data) = try_ready!(self.reader.poll());
|
|
||||||
let header = deserialize_header(&data);
|
|
||||||
Ok(Async::Ready((read, header)))
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,87 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::io;
|
|
||||||
use futures::{Poll, Future, Async};
|
|
||||||
use tokio_io::AsyncRead;
|
|
||||||
use crypto::publickey::KeyPair;
|
|
||||||
use key_server_cluster::Error;
|
|
||||||
use key_server_cluster::message::Message;
|
|
||||||
use key_server_cluster::io::{read_header, ReadHeader, read_payload, read_encrypted_payload, ReadPayload};
|
|
||||||
|
|
||||||
/// Create future for read single message from the stream.
|
|
||||||
pub fn read_message<A>(a: A) -> ReadMessage<A> where A: AsyncRead {
|
|
||||||
ReadMessage {
|
|
||||||
key: None,
|
|
||||||
state: ReadMessageState::ReadHeader(read_header(a)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create future for read single encrypted message from the stream.
|
|
||||||
pub fn read_encrypted_message<A>(a: A, key: KeyPair) -> ReadMessage<A> where A: AsyncRead {
|
|
||||||
ReadMessage {
|
|
||||||
key: Some(key),
|
|
||||||
state: ReadMessageState::ReadHeader(read_header(a)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
enum ReadMessageState<A> {
|
|
||||||
ReadHeader(ReadHeader<A>),
|
|
||||||
ReadPayload(ReadPayload<A>),
|
|
||||||
Finished,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Future for read single message from the stream.
|
|
||||||
pub struct ReadMessage<A> {
|
|
||||||
key: Option<KeyPair>,
|
|
||||||
state: ReadMessageState<A>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<A> Future for ReadMessage<A> where A: AsyncRead {
|
|
||||||
type Item = (A, Result<Message, Error>);
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
|
||||||
let (next, result) = match self.state {
|
|
||||||
ReadMessageState::ReadHeader(ref mut future) => {
|
|
||||||
let (read, header) = try_ready!(future.poll());
|
|
||||||
let header = match header {
|
|
||||||
Ok(header) => header,
|
|
||||||
Err(err) => return Ok((read, Err(err)).into()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let future = match self.key.take() {
|
|
||||||
Some(key) => read_encrypted_payload(read, header, key),
|
|
||||||
None => read_payload(read, header),
|
|
||||||
};
|
|
||||||
let next = ReadMessageState::ReadPayload(future);
|
|
||||||
(next, Async::NotReady)
|
|
||||||
},
|
|
||||||
ReadMessageState::ReadPayload(ref mut future) => {
|
|
||||||
let (read, payload) = try_ready!(future.poll());
|
|
||||||
(ReadMessageState::Finished, Async::Ready((read, payload)))
|
|
||||||
},
|
|
||||||
ReadMessageState::Finished => panic!("poll ReadMessage after it's done"),
|
|
||||||
};
|
|
||||||
|
|
||||||
self.state = next;
|
|
||||||
match result {
|
|
||||||
// by polling again, we register new future
|
|
||||||
Async::NotReady => self.poll(),
|
|
||||||
result => Ok(result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,65 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::io;
|
|
||||||
use futures::{Poll, Future};
|
|
||||||
use tokio_io::AsyncRead;
|
|
||||||
use tokio_io::io::{read_exact, ReadExact};
|
|
||||||
use crypto::publickey::KeyPair;
|
|
||||||
use key_server_cluster::Error;
|
|
||||||
use key_server_cluster::message::Message;
|
|
||||||
use key_server_cluster::io::message::{MessageHeader, deserialize_message, decrypt_message};
|
|
||||||
|
|
||||||
/// Create future for read single message payload from the stream.
|
|
||||||
pub fn read_payload<A>(a: A, header: MessageHeader) -> ReadPayload<A> where A: AsyncRead {
|
|
||||||
ReadPayload {
|
|
||||||
reader: read_exact(a, vec![0; header.size as usize]),
|
|
||||||
header: header,
|
|
||||||
key: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create future for read single encrypted message payload from the stream.
|
|
||||||
pub fn read_encrypted_payload<A>(a: A, header: MessageHeader, key: KeyPair) -> ReadPayload<A> where A: AsyncRead {
|
|
||||||
ReadPayload {
|
|
||||||
reader: read_exact(a, vec![0; header.size as usize]),
|
|
||||||
header: header,
|
|
||||||
key: Some(key),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Future for read single message payload from the stream.
|
|
||||||
pub struct ReadPayload<A> {
|
|
||||||
reader: ReadExact<A, Vec<u8>>,
|
|
||||||
header: MessageHeader,
|
|
||||||
key: Option<KeyPair>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<A> Future for ReadPayload<A> where A: AsyncRead {
|
|
||||||
type Item = (A, Result<Message, Error>);
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
|
||||||
let (read, data) = try_ready!(self.reader.poll());
|
|
||||||
let payload = if let Some(key) = self.key.take() {
|
|
||||||
decrypt_message(&key, data)
|
|
||||||
.and_then(|data| deserialize_message(&self.header, data))
|
|
||||||
} else {
|
|
||||||
deserialize_message(&self.header, data)
|
|
||||||
};
|
|
||||||
Ok((read, payload).into())
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,71 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::net::Shutdown;
|
|
||||||
use std::io::{Read, Write, Error};
|
|
||||||
use futures::Poll;
|
|
||||||
use tokio_io::{AsyncRead, AsyncWrite};
|
|
||||||
use tokio::net::TcpStream;
|
|
||||||
|
|
||||||
/// Read+Write implementation for Arc<TcpStream>.
|
|
||||||
pub struct SharedTcpStream {
|
|
||||||
io: Arc<TcpStream>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SharedTcpStream {
|
|
||||||
pub fn new(a: Arc<TcpStream>) -> Self {
|
|
||||||
SharedTcpStream {
|
|
||||||
io: a,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<TcpStream> for SharedTcpStream {
|
|
||||||
fn from(a: TcpStream) -> Self {
|
|
||||||
SharedTcpStream::new(Arc::new(a))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AsyncRead for SharedTcpStream {}
|
|
||||||
|
|
||||||
impl AsyncWrite for SharedTcpStream {
|
|
||||||
fn shutdown(&mut self) -> Poll<(), Error> {
|
|
||||||
self.io.shutdown(Shutdown::Both).map(Into::into)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Read for SharedTcpStream {
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> {
|
|
||||||
Read::read(&mut (&*self.io as &TcpStream), buf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Write for SharedTcpStream {
|
|
||||||
fn write(&mut self, buf: &[u8]) -> Result<usize, Error> {
|
|
||||||
Write::write(&mut (&*self.io as &TcpStream), buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn flush(&mut self) -> Result<(), Error> {
|
|
||||||
Write::flush(&mut (&*self.io as &TcpStream))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Clone for SharedTcpStream {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
SharedTcpStream::new(self.io.clone())
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,70 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::io;
|
|
||||||
use futures::{Future, Poll};
|
|
||||||
use tokio_io::AsyncWrite;
|
|
||||||
use tokio_io::io::{WriteAll, write_all};
|
|
||||||
use crypto::publickey::KeyPair;
|
|
||||||
use key_server_cluster::message::Message;
|
|
||||||
use key_server_cluster::io::{serialize_message, encrypt_message};
|
|
||||||
|
|
||||||
/// Write plain message to the channel.
|
|
||||||
pub fn write_message<A>(a: A, message: Message) -> WriteMessage<A> where A: AsyncWrite {
|
|
||||||
let (error, future) = match serialize_message(message)
|
|
||||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) {
|
|
||||||
Ok(message) => (None, write_all(a, message.into())),
|
|
||||||
Err(error) => (Some(error), write_all(a, Vec::new())),
|
|
||||||
};
|
|
||||||
WriteMessage {
|
|
||||||
error: error,
|
|
||||||
future: future,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write encrypted message to the channel.
|
|
||||||
pub fn write_encrypted_message<A>(a: A, key: &KeyPair, message: Message) -> WriteMessage<A> where A: AsyncWrite {
|
|
||||||
let (error, future) = match serialize_message(message)
|
|
||||||
.and_then(|message| encrypt_message(key, message))
|
|
||||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) {
|
|
||||||
Ok(message) => (None, write_all(a, message.into())),
|
|
||||||
Err(error) => (Some(error), write_all(a, Vec::new())),
|
|
||||||
};
|
|
||||||
|
|
||||||
WriteMessage {
|
|
||||||
error: error,
|
|
||||||
future: future,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Future message write.
|
|
||||||
pub struct WriteMessage<A> {
|
|
||||||
error: Option<io::Error>,
|
|
||||||
future: WriteAll<A, Vec<u8>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<A> Future for WriteMessage<A> where A: AsyncWrite {
|
|
||||||
type Item = (A, Vec<u8>);
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
|
||||||
if let Some(err) = self.error.take() {
|
|
||||||
return Err(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.future.poll()
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,793 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use key_server_cluster::{Error, NodeId, SessionMeta, Requester};
|
|
||||||
use key_server_cluster::message::ConsensusMessage;
|
|
||||||
use key_server_cluster::jobs::job_session::{JobSession, JobSessionState, JobTransport, JobExecutor, JobPartialRequestAction};
|
|
||||||
|
|
||||||
/// Consensus session state.
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
|
||||||
pub enum ConsensusSessionState {
|
|
||||||
/// Every node starts in this state.
|
|
||||||
WaitingForInitialization,
|
|
||||||
/// Consensus group is establishing.
|
|
||||||
EstablishingConsensus,
|
|
||||||
/// Consensus group is established.
|
|
||||||
/// Master node can start jobs dissemination.
|
|
||||||
/// Slave node waits for partial job requests.
|
|
||||||
ConsensusEstablished,
|
|
||||||
/// Master node waits for partial jobs responses.
|
|
||||||
WaitingForPartialResults,
|
|
||||||
/// Consensus session is completed successfully.
|
|
||||||
/// Master node can call result() to get computation result.
|
|
||||||
Finished,
|
|
||||||
/// Consensus session has failed with error.
|
|
||||||
Failed,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Consensus session consists of following states:
|
|
||||||
/// 1) consensus group is established
|
|
||||||
/// 2) master node sends partial job requests to every member of consensus group
|
|
||||||
/// 3) slave nodes are computing partial responses
|
|
||||||
/// 4) master node computes result from partial responses
|
|
||||||
pub struct ConsensusSession<ConsensusExecutor: JobExecutor<PartialJobResponse=bool>,
|
|
||||||
ConsensusTransport: JobTransport<PartialJobRequest=ConsensusExecutor::PartialJobRequest, PartialJobResponse=ConsensusExecutor::PartialJobResponse>,
|
|
||||||
ComputationExecutor: JobExecutor,
|
|
||||||
ComputationTransport: JobTransport<PartialJobRequest=ComputationExecutor::PartialJobRequest, PartialJobResponse=ComputationExecutor::PartialJobResponse>
|
|
||||||
> {
|
|
||||||
/// Current session state.
|
|
||||||
state: ConsensusSessionState,
|
|
||||||
/// Session metadata.
|
|
||||||
meta: SessionMeta,
|
|
||||||
/// Consensus establish job.
|
|
||||||
consensus_job: JobSession<ConsensusExecutor, ConsensusTransport>,
|
|
||||||
/// Consensus group.
|
|
||||||
consensus_group: BTreeSet<NodeId>,
|
|
||||||
/// Computation job.
|
|
||||||
computation_job: Option<JobSession<ComputationExecutor, ComputationTransport>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Consensus session creation parameters.
|
|
||||||
pub struct ConsensusSessionParams<ConsensusExecutor: JobExecutor<PartialJobResponse=bool>,
|
|
||||||
ConsensusTransport: JobTransport<PartialJobRequest=ConsensusExecutor::PartialJobRequest, PartialJobResponse=ConsensusExecutor::PartialJobResponse>
|
|
||||||
> {
|
|
||||||
/// Session metadata.
|
|
||||||
pub meta: SessionMeta,
|
|
||||||
/// ACL storage for access check.
|
|
||||||
pub consensus_executor: ConsensusExecutor,
|
|
||||||
/// Transport for consensus establish job.
|
|
||||||
pub consensus_transport: ConsensusTransport,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTransport> ConsensusSession<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTransport>
|
|
||||||
where ConsensusExecutor: JobExecutor<PartialJobResponse=bool, JobResponse=BTreeSet<NodeId>>,
|
|
||||||
ConsensusTransport: JobTransport<PartialJobRequest=ConsensusExecutor::PartialJobRequest, PartialJobResponse=ConsensusExecutor::PartialJobResponse>,
|
|
||||||
ComputationExecutor: JobExecutor,
|
|
||||||
ComputationTransport: JobTransport<PartialJobRequest=ComputationExecutor::PartialJobRequest, PartialJobResponse=ComputationExecutor::PartialJobResponse> {
|
|
||||||
/// Create new consensus session.
|
|
||||||
pub fn new(params: ConsensusSessionParams<ConsensusExecutor, ConsensusTransport>) -> Result<Self, Error> {
|
|
||||||
let consensus_job = JobSession::new(params.meta.clone(), params.consensus_executor, params.consensus_transport);
|
|
||||||
debug_assert!(consensus_job.state() == JobSessionState::Inactive);
|
|
||||||
|
|
||||||
Ok(ConsensusSession {
|
|
||||||
state: ConsensusSessionState::WaitingForInitialization,
|
|
||||||
meta: params.meta,
|
|
||||||
consensus_job: consensus_job,
|
|
||||||
consensus_group: BTreeSet::new(),
|
|
||||||
computation_job: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get consensus job reference.
|
|
||||||
pub fn consensus_job(&self) -> &JobSession<ConsensusExecutor, ConsensusTransport> {
|
|
||||||
&self.consensus_job
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get mutable consensus job reference.
|
|
||||||
pub fn consensus_job_mut(&mut self) -> &mut JobSession<ConsensusExecutor, ConsensusTransport> {
|
|
||||||
&mut self.consensus_job
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get all nodes, which has not rejected consensus request.
|
|
||||||
pub fn consensus_non_rejected_nodes(&self) -> BTreeSet<NodeId> {
|
|
||||||
self.consensus_job.responses().iter()
|
|
||||||
.filter(|r| *r.1)
|
|
||||||
.map(|r| r.0)
|
|
||||||
.chain(self.consensus_job.requests())
|
|
||||||
.filter(|n| **n != self.meta.self_node_id)
|
|
||||||
.cloned()
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get computation job reference.
|
|
||||||
pub fn computation_job(&self) -> &JobSession<ComputationExecutor, ComputationTransport> {
|
|
||||||
self.computation_job.as_ref()
|
|
||||||
.expect("computation_job must only be called on master nodes")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get consensus session state.
|
|
||||||
pub fn state(&self) -> ConsensusSessionState {
|
|
||||||
self.state
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get computation result.
|
|
||||||
pub fn result(&self) -> Result<ComputationExecutor::JobResponse, Error> {
|
|
||||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
|
||||||
if self.state != ConsensusSessionState::Finished {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.computation_job.as_ref()
|
|
||||||
.expect("we are on master node in finished state; computation_job is set on master node during initialization; qed")
|
|
||||||
.result()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Initialize session on master node.
|
|
||||||
pub fn initialize(&mut self, nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
|
||||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
|
||||||
let initialization_result = self.consensus_job.initialize(nodes, None, false);
|
|
||||||
self.state = ConsensusSessionState::EstablishingConsensus;
|
|
||||||
self.process_result(initialization_result.map(|_| ()))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process consensus request message.
|
|
||||||
pub fn on_consensus_partial_request(&mut self, sender: &NodeId, request: ConsensusExecutor::PartialJobRequest) -> Result<(), Error> {
|
|
||||||
let consensus_result = self.consensus_job.on_partial_request(sender, request);
|
|
||||||
self.process_result(consensus_result.map(|_| ()))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process consensus message response.
|
|
||||||
pub fn on_consensus_partial_response(&mut self, sender: &NodeId, response: bool) -> Result<(), Error> {
|
|
||||||
let consensus_result = self.consensus_job.on_partial_response(sender, response);
|
|
||||||
self.process_result(consensus_result)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Select nodes for processing partial requests.
|
|
||||||
pub fn select_consensus_group(&mut self) -> Result<&BTreeSet<NodeId>, Error> {
|
|
||||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
|
||||||
if self.state != ConsensusSessionState::ConsensusEstablished {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.consensus_group.is_empty() {
|
|
||||||
let consensus_group = self.consensus_job.result()?;
|
|
||||||
let is_self_in_consensus = consensus_group.contains(&self.meta.self_node_id);
|
|
||||||
self.consensus_group = consensus_group.into_iter().take(self.meta.threshold + 1).collect();
|
|
||||||
|
|
||||||
if is_self_in_consensus {
|
|
||||||
self.consensus_group.remove(&self.meta.master_node_id);
|
|
||||||
self.consensus_group.insert(self.meta.master_node_id.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(&self.consensus_group)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Disseminate jobs from master node.
|
|
||||||
pub fn disseminate_jobs(&mut self, executor: ComputationExecutor, transport: ComputationTransport, broadcast_self_response: bool) -> Result<Option<ComputationExecutor::PartialJobResponse>, Error> {
|
|
||||||
let consensus_group = self.select_consensus_group()?.clone();
|
|
||||||
self.consensus_group.clear();
|
|
||||||
|
|
||||||
let mut computation_job = JobSession::new(self.meta.clone(), executor, transport);
|
|
||||||
let computation_result = computation_job.initialize(consensus_group, None, broadcast_self_response);
|
|
||||||
self.computation_job = Some(computation_job);
|
|
||||||
self.state = ConsensusSessionState::WaitingForPartialResults;
|
|
||||||
match computation_result {
|
|
||||||
Ok(computation_result) => self.process_result(Ok(())).map(|_| computation_result),
|
|
||||||
Err(error) => Err(self.process_result(Err(error)).unwrap_err()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process job request on slave node.
|
|
||||||
pub fn on_job_request(&mut self, node: &NodeId, request: ComputationExecutor::PartialJobRequest, executor: ComputationExecutor, transport: ComputationTransport) -> Result<JobPartialRequestAction<ComputationExecutor::PartialJobResponse>, Error> {
|
|
||||||
if &self.meta.master_node_id != node {
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
if self.state != ConsensusSessionState::ConsensusEstablished {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
JobSession::new(self.meta.clone(), executor, transport).on_partial_request(node, request)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process job response on slave node.
|
|
||||||
pub fn on_job_response(&mut self, node: &NodeId, response: ComputationExecutor::PartialJobResponse) -> Result<(), Error> {
|
|
||||||
if self.state != ConsensusSessionState::WaitingForPartialResults {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
let computation_result = self.computation_job.as_mut()
|
|
||||||
.expect("WaitingForPartialResults is only set when computation_job is created; qed")
|
|
||||||
.on_partial_response(node, response);
|
|
||||||
|
|
||||||
self.process_result(computation_result)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When session is completed on slave node.
|
|
||||||
pub fn on_session_completed(&mut self, node: &NodeId) -> Result<(), Error> {
|
|
||||||
if node != &self.meta.master_node_id {
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
if self.state != ConsensusSessionState::ConsensusEstablished {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.state = ConsensusSessionState::Finished;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When error is received from node.
|
|
||||||
pub fn on_node_error(&mut self, node: &NodeId, error: Error) -> Result<bool, Error> {
|
|
||||||
let is_self_master = self.meta.master_node_id == self.meta.self_node_id;
|
|
||||||
let is_node_master = self.meta.master_node_id == *node;
|
|
||||||
let (is_restart_needed, timeout_result) = match self.state {
|
|
||||||
ConsensusSessionState::WaitingForInitialization if is_self_master => {
|
|
||||||
// it is strange to receive error before session is initialized && slave doesn't know access_key
|
|
||||||
// => unreachable
|
|
||||||
self.state = ConsensusSessionState::Failed;
|
|
||||||
(false, Err(Error::ConsensusUnreachable))
|
|
||||||
}
|
|
||||||
ConsensusSessionState::WaitingForInitialization if is_node_master => {
|
|
||||||
// error from master node before establishing consensus
|
|
||||||
// => unreachable
|
|
||||||
self.state = ConsensusSessionState::Failed;
|
|
||||||
(false, Err(if !error.is_non_fatal() {
|
|
||||||
Error::ConsensusUnreachable
|
|
||||||
} else {
|
|
||||||
Error::ConsensusTemporaryUnreachable
|
|
||||||
}))
|
|
||||||
},
|
|
||||||
ConsensusSessionState::EstablishingConsensus => {
|
|
||||||
debug_assert!(is_self_master);
|
|
||||||
|
|
||||||
// consensus still can be established
|
|
||||||
// => try to live without this node
|
|
||||||
(false, self.consensus_job.on_node_error(node, error))
|
|
||||||
},
|
|
||||||
ConsensusSessionState::ConsensusEstablished => {
|
|
||||||
// we could try to continue without this node, if enough nodes left
|
|
||||||
(false, self.consensus_job.on_node_error(node, error))
|
|
||||||
},
|
|
||||||
ConsensusSessionState::WaitingForPartialResults => {
|
|
||||||
// check if *current* computation job can continue without this node
|
|
||||||
let is_computation_node = self.computation_job.as_mut()
|
|
||||||
.expect("WaitingForPartialResults state is only set when computation_job is created; qed")
|
|
||||||
.on_node_error(node, error.clone())
|
|
||||||
.is_err();
|
|
||||||
if !is_computation_node {
|
|
||||||
// it is not used by current computation job
|
|
||||||
// => no restart required
|
|
||||||
(false, Ok(()))
|
|
||||||
} else {
|
|
||||||
// it is used by current computation job
|
|
||||||
// => restart is required if there are still enough nodes
|
|
||||||
self.consensus_group.clear();
|
|
||||||
self.state = ConsensusSessionState::EstablishingConsensus;
|
|
||||||
|
|
||||||
let consensus_result = self.consensus_job.on_node_error(node, error);
|
|
||||||
let is_consensus_established = self.consensus_job.state() == JobSessionState::Finished;
|
|
||||||
(is_consensus_established, consensus_result)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
// in all other cases - just ignore error
|
|
||||||
ConsensusSessionState::WaitingForInitialization | ConsensusSessionState::Failed | ConsensusSessionState::Finished => (false, Ok(())),
|
|
||||||
};
|
|
||||||
self.process_result(timeout_result)?;
|
|
||||||
Ok(is_restart_needed)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When session is timeouted.
|
|
||||||
pub fn on_session_timeout(&mut self) -> Result<bool, Error> {
|
|
||||||
match self.state {
|
|
||||||
// if we are waiting for results from slaves, there is a chance to send request to other nodes subset => fall through
|
|
||||||
ConsensusSessionState::WaitingForPartialResults => (),
|
|
||||||
// in some states this error is fatal
|
|
||||||
ConsensusSessionState::WaitingForInitialization | ConsensusSessionState::EstablishingConsensus | ConsensusSessionState::ConsensusEstablished => {
|
|
||||||
let _ = self.consensus_job.on_session_timeout();
|
|
||||||
|
|
||||||
self.consensus_group.clear();
|
|
||||||
self.state = ConsensusSessionState::EstablishingConsensus;
|
|
||||||
return self.process_result(Err(Error::ConsensusTemporaryUnreachable)).map(|_| unreachable!());
|
|
||||||
},
|
|
||||||
// in all other cases - just ignore error
|
|
||||||
ConsensusSessionState::Finished | ConsensusSessionState::Failed => return Ok(false),
|
|
||||||
};
|
|
||||||
|
|
||||||
let timeouted_nodes = self.computation_job.as_ref()
|
|
||||||
.expect("WaitingForPartialResults state is only set when computation_job is created; qed")
|
|
||||||
.requests()
|
|
||||||
.clone();
|
|
||||||
assert!(!timeouted_nodes.is_empty()); // timeout should not ever happen if no requests are active && we are waiting for responses
|
|
||||||
|
|
||||||
self.consensus_group.clear();
|
|
||||||
for timeouted_node in timeouted_nodes {
|
|
||||||
let timeout_result = self.consensus_job.on_node_error(&timeouted_node, Error::NodeDisconnected);
|
|
||||||
self.state = ConsensusSessionState::EstablishingConsensus;
|
|
||||||
self.process_result(timeout_result)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(self.state == ConsensusSessionState::ConsensusEstablished)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process result of job.
|
|
||||||
fn process_result(&mut self, result: Result<(), Error>) -> Result<(), Error> {
|
|
||||||
match self.state {
|
|
||||||
ConsensusSessionState::WaitingForInitialization | ConsensusSessionState::EstablishingConsensus | ConsensusSessionState::ConsensusEstablished => match self.consensus_job.state() {
|
|
||||||
JobSessionState::Finished => self.state = ConsensusSessionState::ConsensusEstablished,
|
|
||||||
JobSessionState::Failed => self.state = ConsensusSessionState::Failed,
|
|
||||||
_ => (),
|
|
||||||
},
|
|
||||||
ConsensusSessionState::WaitingForPartialResults => match self.computation_job.as_ref()
|
|
||||||
.expect("WaitingForPartialResults state is only set when computation_job is created; qed")
|
|
||||||
.state() {
|
|
||||||
JobSessionState::Finished => self.state = ConsensusSessionState::Finished,
|
|
||||||
JobSessionState::Failed => self.state = ConsensusSessionState::Failed,
|
|
||||||
_ => (),
|
|
||||||
},
|
|
||||||
_ => (),
|
|
||||||
}
|
|
||||||
|
|
||||||
result
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTransport> ConsensusSession<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTransport>
|
|
||||||
where ConsensusExecutor: JobExecutor<PartialJobRequest=Requester, PartialJobResponse=bool, JobResponse=BTreeSet<NodeId>>,
|
|
||||||
ConsensusTransport: JobTransport<PartialJobRequest=ConsensusExecutor::PartialJobRequest, PartialJobResponse=ConsensusExecutor::PartialJobResponse>,
|
|
||||||
ComputationExecutor: JobExecutor,
|
|
||||||
ComputationTransport: JobTransport<PartialJobRequest=ComputationExecutor::PartialJobRequest, PartialJobResponse=ComputationExecutor::PartialJobResponse> {
|
|
||||||
/// Process basic consensus message.
|
|
||||||
pub fn on_consensus_message(&mut self, sender: &NodeId, message: &ConsensusMessage) -> Result<(), Error> {
|
|
||||||
let consensus_result = match message {
|
|
||||||
|
|
||||||
&ConsensusMessage::InitializeConsensusSession(ref message) =>
|
|
||||||
self.consensus_job.on_partial_request(sender, message.requester.clone().into()).map(|_| ()),
|
|
||||||
&ConsensusMessage::ConfirmConsensusInitialization(ref message) =>
|
|
||||||
self.consensus_job.on_partial_response(sender, message.is_confirmed),
|
|
||||||
};
|
|
||||||
self.process_result(consensus_result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::sync::Arc;
|
|
||||||
use crypto::publickey::{KeyPair, Random, Generator, sign, public_to_address};
|
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, Requester, DummyAclStorage};
|
|
||||||
use key_server_cluster::message::{ConsensusMessage, InitializeConsensusSession, ConfirmConsensusInitialization};
|
|
||||||
use key_server_cluster::jobs::job_session::tests::{make_master_session_meta, make_slave_session_meta, SquaredSumJobExecutor, DummyJobTransport};
|
|
||||||
use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
|
||||||
use super::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState};
|
|
||||||
|
|
||||||
type SquaredSumConsensusSession = ConsensusSession<KeyAccessJob, DummyJobTransport<Requester, bool>, SquaredSumJobExecutor, DummyJobTransport<u32, u32>>;
|
|
||||||
|
|
||||||
fn make_master_consensus_session(threshold: usize, requester: Option<KeyPair>, acl_storage: Option<DummyAclStorage>) -> SquaredSumConsensusSession {
|
|
||||||
let secret = requester.map(|kp| kp.secret().clone()).unwrap_or(Random.generate().secret().clone());
|
|
||||||
SquaredSumConsensusSession::new(ConsensusSessionParams {
|
|
||||||
meta: make_master_session_meta(threshold),
|
|
||||||
consensus_executor: KeyAccessJob::new_on_master(SessionId::from([1u8; 32]), Arc::new(acl_storage.unwrap_or(DummyAclStorage::default())),
|
|
||||||
sign(&secret, &SessionId::from([1u8; 32])).unwrap().into()),
|
|
||||||
consensus_transport: DummyJobTransport::default(),
|
|
||||||
}).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_slave_consensus_session(threshold: usize, acl_storage: Option<DummyAclStorage>) -> SquaredSumConsensusSession {
|
|
||||||
SquaredSumConsensusSession::new(ConsensusSessionParams {
|
|
||||||
meta: make_slave_session_meta(threshold),
|
|
||||||
consensus_executor: KeyAccessJob::new_on_slave(SessionId::from([1u8; 32]), Arc::new(acl_storage.unwrap_or(DummyAclStorage::default()))),
|
|
||||||
consensus_transport: DummyJobTransport::default(),
|
|
||||||
}).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_consensus_is_not_reached_when_initializes_with_non_zero_threshold() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2)].into_iter().collect()).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_consensus_is_reached_when_initializes_with_zero_threshold() {
|
|
||||||
let mut session = make_master_consensus_session(0, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2)].into_iter().collect()).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_consensus_is_not_reached_when_initializes_with_zero_threshold_and_master_rejects() {
|
|
||||||
let requester = Random.generate();
|
|
||||||
let acl_storage = DummyAclStorage::default();
|
|
||||||
acl_storage.prohibit(public_to_address(requester.public()), SessionId::from([1u8; 32]));
|
|
||||||
|
|
||||||
let mut session = make_master_consensus_session(0, Some(requester), Some(acl_storage));
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2)].into_iter().collect()).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_consensus_is_failed_by_master_node() {
|
|
||||||
let requester = Random.generate();
|
|
||||||
let acl_storage = DummyAclStorage::default();
|
|
||||||
acl_storage.prohibit(public_to_address(requester.public()), SessionId::from([1u8; 32]));
|
|
||||||
|
|
||||||
let mut session = make_master_consensus_session(1, Some(requester), Some(acl_storage));
|
|
||||||
assert_eq!(session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2)].into_iter().collect()).unwrap_err(), Error::ConsensusUnreachable);
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_consensus_is_failed_by_slave_node() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2)].into_iter().collect()).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
|
||||||
assert_eq!(session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: false,
|
|
||||||
})).unwrap_err(), Error::ConsensusUnreachable);
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_job_dissemination_fails_if_consensus_is_not_reached() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
assert_eq!(session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap_err(), Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_job_dissemination_selects_master_node_if_agreed() {
|
|
||||||
let mut session = make_master_consensus_session(0, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2)].into_iter().collect()).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::Finished);
|
|
||||||
assert!(session.computation_job().responses().contains_key(&NodeId::from_low_u64_be(1)));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_job_dissemination_does_not_select_master_node_if_rejected() {
|
|
||||||
let requester = Random.generate();
|
|
||||||
let acl_storage = DummyAclStorage::default();
|
|
||||||
acl_storage.prohibit(public_to_address(requester.public()), SessionId::from([1u8; 32]));
|
|
||||||
|
|
||||||
let mut session = make_master_consensus_session(0, Some(requester), Some(acl_storage));
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2)].into_iter().collect()).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
|
||||||
assert!(!session.computation_job().responses().contains_key(&NodeId::from_low_u64_be(1)));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_computation_request_is_rejected_when_received_by_master_node() {
|
|
||||||
let mut session = make_master_consensus_session(0, None, None);
|
|
||||||
assert_eq!(session.on_job_request(&NodeId::from_low_u64_be(2), 2, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_computation_request_is_rejected_when_received_before_consensus_is_established() {
|
|
||||||
let mut session = make_slave_consensus_session(0, None);
|
|
||||||
assert_eq!(session.on_job_request(&NodeId::from_low_u64_be(1), 2, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_computation_request_is_ignored_when_wrong() {
|
|
||||||
let mut session = make_slave_consensus_session(0, None);
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForInitialization);
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
|
||||||
requester: Requester::Signature(sign(Random.generate().secret(), &SessionId::from([1u8; 32])).unwrap()).into(),
|
|
||||||
version: Default::default(),
|
|
||||||
})).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
assert_eq!(session.on_job_request(&NodeId::from_low_u64_be(1), 20, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidMessage);
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_computation_request_is_processed_when_correct() {
|
|
||||||
let mut session = make_slave_consensus_session(0, None);
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForInitialization);
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
|
||||||
requester: Requester::Signature(sign(Random.generate().secret(), &SessionId::from([1u8; 32])).unwrap()).into(),
|
|
||||||
version: Default::default(),
|
|
||||||
})).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
session.on_job_request(&NodeId::from_low_u64_be(1), 2, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_computation_response_is_ignored_when_consensus_is_not_reached() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
assert_eq!(session.on_job_response(&NodeId::from_low_u64_be(2), 4).unwrap_err(), Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consessus_session_completion_is_ignored_when_received_from_non_master_node() {
|
|
||||||
let mut session = make_slave_consensus_session(0, None);
|
|
||||||
assert_eq!(session.on_session_completed(&NodeId::from_low_u64_be(3)).unwrap_err(), Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consessus_session_completion_is_ignored_when_consensus_is_not_established() {
|
|
||||||
let mut session = make_slave_consensus_session(0, None);
|
|
||||||
assert_eq!(session.on_session_completed(&NodeId::from_low_u64_be(1)).unwrap_err(), Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consessus_session_completion_is_accepted() {
|
|
||||||
let mut session = make_slave_consensus_session(0, None);
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
|
||||||
requester: Requester::Signature(sign(Random.generate().secret(), &SessionId::from([1u8; 32])).unwrap()).into(),
|
|
||||||
version: Default::default(),
|
|
||||||
})).unwrap();
|
|
||||||
session.on_session_completed(&NodeId::from_low_u64_be(1)).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::Finished);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_fails_if_node_error_received_by_uninitialized_master() {
|
|
||||||
let mut session = make_master_consensus_session(0, None, None);
|
|
||||||
assert_eq!(session.on_node_error(&NodeId::from_low_u64_be(2), Error::AccessDenied), Err(Error::ConsensusUnreachable));
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_fails_if_node_error_received_by_uninitialized_slave_from_master() {
|
|
||||||
let mut session = make_slave_consensus_session(0, None);
|
|
||||||
assert_eq!(session.on_node_error(&NodeId::from_low_u64_be(1), Error::AccessDenied), Err(Error::ConsensusUnreachable));
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_sessions_fails_with_temp_error_if_node_error_received_by_uninitialized_slave_from_master() {
|
|
||||||
let mut session = make_slave_consensus_session(0, None);
|
|
||||||
assert_eq!(session.on_node_error(&NodeId::from_low_u64_be(1), Error::NodeDisconnected).unwrap_err(), Error::ConsensusTemporaryUnreachable);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_continues_if_node_error_received_by_master_during_establish_and_enough_nodes_left() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2), NodeId::from_low_u64_be(3)].into_iter().collect()).unwrap();
|
|
||||||
assert_eq!(session.on_node_error(&NodeId::from_low_u64_be(2), Error::AccessDenied), Ok(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_fails_if_node_error_received_by_master_during_establish_and_not_enough_nodes_left() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2)].into_iter().collect()).unwrap();
|
|
||||||
assert_eq!(session.on_node_error(&NodeId::from_low_u64_be(2), Error::AccessDenied), Err(Error::ConsensusUnreachable));
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_continues_if_node2_error_received_by_master_after_consensus_established_and_enough_nodes_left() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2), NodeId::from_low_u64_be(3)].into_iter().collect()).unwrap();
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
assert_eq!(session.on_node_error(&NodeId::from_low_u64_be(2), Error::AccessDenied), Ok(false));
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_continues_if_node3_error_received_by_master_after_consensus_established_and_enough_nodes_left() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2), NodeId::from_low_u64_be(3)].into_iter().collect()).unwrap();
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
assert_eq!(session.on_node_error(&NodeId::from_low_u64_be(3), Error::AccessDenied), Ok(false));
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_fails_if_node_error_received_by_master_after_consensus_established_and_not_enough_nodes_left() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2)].into_iter().collect()).unwrap();
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
assert_eq!(session.on_node_error(&NodeId::from_low_u64_be(2), Error::AccessDenied), Err(Error::ConsensusUnreachable));
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_continues_if_node_error_received_from_slave_not_participating_in_computation() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2), NodeId::from_low_u64_be(3), NodeId::from_low_u64_be(4)].into_iter().collect()).unwrap();
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap();
|
|
||||||
assert_eq!(session.on_node_error(&NodeId::from_low_u64_be(3), Error::AccessDenied), Ok(false));
|
|
||||||
assert_eq!(session.on_node_error(&NodeId::from_low_u64_be(4), Error::AccessDenied), Ok(false));
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_restarts_if_node_error_received_from_slave_participating_in_computation_and_enough_nodes_left() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2), NodeId::from_low_u64_be(3), NodeId::from_low_u64_be(4)].into_iter().collect()).unwrap();
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
|
||||||
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
assert_eq!(session.on_node_error(&NodeId::from_low_u64_be(2), Error::AccessDenied), Ok(true));
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
|
||||||
|
|
||||||
assert_eq!(session.on_node_error(&NodeId::from_low_u64_be(3), Error::AccessDenied), Ok(false));
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_fails_if_node_error_received_from_slave_participating_in_computation_and_not_enough_nodes_left() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2)].into_iter().collect()).unwrap();
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap();
|
|
||||||
assert_eq!(session.on_node_error(&NodeId::from_low_u64_be(2), Error::AccessDenied), Err(Error::ConsensusUnreachable));
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_fails_if_uninitialized_session_timeouts() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
assert_eq!(session.on_session_timeout(), Err(Error::ConsensusTemporaryUnreachable));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_continues_if_session_timeouts_and_enough_nodes_left_for_computation() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2), NodeId::from_low_u64_be(3), NodeId::from_low_u64_be(4)].into_iter().collect()).unwrap();
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
|
|
||||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
|
||||||
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
assert_eq!(session.on_session_timeout(), Ok(true));
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
|
|
||||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
|
||||||
|
|
||||||
assert_eq!(session.on_session_timeout(), Ok(false));
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_continues_if_session_timeouts_and_not_enough_nodes_left_for_computation() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2)].into_iter().collect()).unwrap();
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
|
||||||
|
|
||||||
assert_eq!(session.on_session_timeout(), Err(Error::ConsensusUnreachable));
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn same_consensus_group_returned_after_second_selection() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2), NodeId::from_low_u64_be(3)].into_iter().collect()).unwrap();
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
|
|
||||||
let consensus_group1 = session.select_consensus_group().unwrap().clone();
|
|
||||||
let consensus_group2 = session.select_consensus_group().unwrap().clone();
|
|
||||||
assert_eq!(consensus_group1, consensus_group2);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_complete_2_of_4() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2), NodeId::from_low_u64_be(3), NodeId::from_low_u64_be(3)].into_iter().collect()).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
|
||||||
session.on_job_response(&NodeId::from_low_u64_be(2), 16).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::Finished);
|
|
||||||
assert_eq!(session.result(), Ok(20));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn consensus_session_complete_2_of_4_after_restart() {
|
|
||||||
let mut session = make_master_consensus_session(1, None, None);
|
|
||||||
session.initialize(vec![NodeId::from_low_u64_be(1), NodeId::from_low_u64_be(2), NodeId::from_low_u64_be(3), NodeId::from_low_u64_be(4)].into_iter().collect()).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(2), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
|
|
||||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
|
||||||
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(3), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(session.on_node_error(&NodeId::from_low_u64_be(2), Error::AccessDenied).unwrap(), true);
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
|
|
||||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
|
||||||
|
|
||||||
assert_eq!(session.on_node_error(&NodeId::from_low_u64_be(3), Error::AccessDenied).unwrap(), false);
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::EstablishingConsensus);
|
|
||||||
|
|
||||||
session.on_consensus_message(&NodeId::from_low_u64_be(4), &ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
|
||||||
is_confirmed: true,
|
|
||||||
})).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
|
||||||
|
|
||||||
session.disseminate_jobs(SquaredSumJobExecutor, DummyJobTransport::default(), false).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForPartialResults);
|
|
||||||
|
|
||||||
session.on_job_response(&NodeId::from_low_u64_be(4), 16).unwrap();
|
|
||||||
assert_eq!(session.state(), ConsensusSessionState::Finished);
|
|
||||||
assert_eq!(session.result(), Ok(20));
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,188 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::{BTreeSet, BTreeMap};
|
|
||||||
use ethereum_types::H256;
|
|
||||||
use crypto::publickey::{Public, Secret};
|
|
||||||
use crypto::DEFAULT_MAC;
|
|
||||||
use crypto::publickey::ecies::encrypt;
|
|
||||||
use key_server_cluster::{Error, NodeId, DocumentKeyShare, EncryptedDocumentKeyShadow};
|
|
||||||
use key_server_cluster::math;
|
|
||||||
use key_server_cluster::jobs::job_session::{JobPartialRequestAction, JobPartialResponseAction, JobExecutor};
|
|
||||||
|
|
||||||
/// Decryption job.
|
|
||||||
pub struct DecryptionJob {
|
|
||||||
/// This node id.
|
|
||||||
self_node_id: NodeId,
|
|
||||||
/// Access key.
|
|
||||||
access_key: Secret,
|
|
||||||
/// Requester public key.
|
|
||||||
requester: Public,
|
|
||||||
/// Key share.
|
|
||||||
key_share: DocumentKeyShare,
|
|
||||||
/// Key version.
|
|
||||||
key_version: H256,
|
|
||||||
/// Request id.
|
|
||||||
request_id: Option<Secret>,
|
|
||||||
/// Is shadow decryption requested.
|
|
||||||
is_shadow_decryption: Option<bool>,
|
|
||||||
/// Is broadcast decryption requested.
|
|
||||||
is_broadcast_session: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Decryption job partial request.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct PartialDecryptionRequest {
|
|
||||||
/// Request id.
|
|
||||||
pub id: Secret,
|
|
||||||
/// Is shadow decryption requested.
|
|
||||||
pub is_shadow_decryption: bool,
|
|
||||||
/// Is broadcast decryption requested.
|
|
||||||
pub is_broadcast_session: bool,
|
|
||||||
/// Id of other nodes, participating in decryption.
|
|
||||||
pub other_nodes_ids: BTreeSet<NodeId>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Decryption job partial response.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct PartialDecryptionResponse {
|
|
||||||
/// Request id.
|
|
||||||
pub request_id: Secret,
|
|
||||||
/// Shadow point.
|
|
||||||
pub shadow_point: Public,
|
|
||||||
/// Decryption shadow coefficient, if requested.
|
|
||||||
pub decrypt_shadow: Option<Vec<u8>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DecryptionJob {
|
|
||||||
pub fn new_on_slave(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare, key_version: H256) -> Result<Self, Error> {
|
|
||||||
debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some());
|
|
||||||
Ok(DecryptionJob {
|
|
||||||
self_node_id: self_node_id,
|
|
||||||
access_key: access_key,
|
|
||||||
requester: requester,
|
|
||||||
key_share: key_share,
|
|
||||||
key_version: key_version,
|
|
||||||
request_id: None,
|
|
||||||
is_shadow_decryption: None,
|
|
||||||
is_broadcast_session: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_on_master(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare, key_version: H256, is_shadow_decryption: bool, is_broadcast_session: bool) -> Result<Self, Error> {
|
|
||||||
debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some());
|
|
||||||
Ok(DecryptionJob {
|
|
||||||
self_node_id: self_node_id,
|
|
||||||
access_key: access_key,
|
|
||||||
requester: requester,
|
|
||||||
key_share: key_share,
|
|
||||||
key_version: key_version,
|
|
||||||
request_id: Some(math::generate_random_scalar()?),
|
|
||||||
is_shadow_decryption: Some(is_shadow_decryption),
|
|
||||||
is_broadcast_session: Some(is_broadcast_session),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn request_id(&self) -> &Option<Secret> {
|
|
||||||
&self.request_id
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_request_id(&mut self, request_id: Secret) {
|
|
||||||
self.request_id = Some(request_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl JobExecutor for DecryptionJob {
|
|
||||||
type PartialJobRequest = PartialDecryptionRequest;
|
|
||||||
type PartialJobResponse = PartialDecryptionResponse;
|
|
||||||
type JobResponse = EncryptedDocumentKeyShadow;
|
|
||||||
|
|
||||||
fn prepare_partial_request(&self, node: &NodeId, nodes: &BTreeSet<NodeId>) -> Result<PartialDecryptionRequest, Error> {
|
|
||||||
debug_assert!(nodes.len() == self.key_share.threshold + 1);
|
|
||||||
|
|
||||||
let request_id = self.request_id.as_ref()
|
|
||||||
.expect("prepare_partial_request is only called on master nodes; request_id is filed in constructor on master nodes; qed");
|
|
||||||
let is_shadow_decryption = self.is_shadow_decryption
|
|
||||||
.expect("prepare_partial_request is only called on master nodes; is_shadow_decryption is filed in constructor on master nodes; qed");
|
|
||||||
let is_broadcast_session = self.is_broadcast_session
|
|
||||||
.expect("prepare_partial_request is only called on master nodes; is_broadcast_session is filed in constructor on master nodes; qed");
|
|
||||||
let mut other_nodes_ids = nodes.clone();
|
|
||||||
other_nodes_ids.remove(node);
|
|
||||||
|
|
||||||
Ok(PartialDecryptionRequest {
|
|
||||||
id: request_id.clone(),
|
|
||||||
is_shadow_decryption: is_shadow_decryption,
|
|
||||||
is_broadcast_session: is_broadcast_session,
|
|
||||||
other_nodes_ids: other_nodes_ids,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_partial_request(&mut self, partial_request: PartialDecryptionRequest) -> Result<JobPartialRequestAction<PartialDecryptionResponse>, Error> {
|
|
||||||
let key_version = self.key_share.version(&self.key_version)?;
|
|
||||||
if partial_request.other_nodes_ids.len() != self.key_share.threshold
|
|
||||||
|| partial_request.other_nodes_ids.contains(&self.self_node_id)
|
|
||||||
|| partial_request.other_nodes_ids.iter().any(|n| !key_version.id_numbers.contains_key(n)) {
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
|
|
||||||
let self_id_number = &key_version.id_numbers[&self.self_node_id];
|
|
||||||
let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &key_version.id_numbers[n]);
|
|
||||||
let node_shadow = math::compute_node_shadow(&key_version.secret_share, &self_id_number, other_id_numbers)?;
|
|
||||||
let decrypt_shadow = if partial_request.is_shadow_decryption { Some(math::generate_random_scalar()?) } else { None };
|
|
||||||
let common_point = self.key_share.common_point.as_ref().expect("DecryptionJob is only created when common_point is known; qed");
|
|
||||||
let (shadow_point, decrypt_shadow) = math::compute_node_shadow_point(&self.access_key, &common_point, &node_shadow, decrypt_shadow)?;
|
|
||||||
|
|
||||||
Ok(JobPartialRequestAction::Respond(PartialDecryptionResponse {
|
|
||||||
request_id: partial_request.id,
|
|
||||||
shadow_point: shadow_point,
|
|
||||||
decrypt_shadow: match decrypt_shadow.clone() {
|
|
||||||
None => None,
|
|
||||||
Some(decrypt_shadow) => Some(encrypt(&self.requester, &DEFAULT_MAC, decrypt_shadow.as_bytes())?),
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &PartialDecryptionResponse) -> Result<JobPartialResponseAction, Error> {
|
|
||||||
if Some(&partial_response.request_id) != self.request_id.as_ref() {
|
|
||||||
return Ok(JobPartialResponseAction::Ignore);
|
|
||||||
}
|
|
||||||
if self.is_shadow_decryption != Some(partial_response.decrypt_shadow.is_some()) {
|
|
||||||
return Ok(JobPartialResponseAction::Reject);
|
|
||||||
}
|
|
||||||
Ok(JobPartialResponseAction::Accept)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, PartialDecryptionResponse>) -> Result<EncryptedDocumentKeyShadow, Error> {
|
|
||||||
let is_shadow_decryption = self.is_shadow_decryption
|
|
||||||
.expect("compute_response is only called on master nodes; is_shadow_decryption is filed in constructor on master nodes; qed");
|
|
||||||
let common_point = self.key_share.common_point.as_ref().expect("DecryptionJob is only created when common_point is known; qed");
|
|
||||||
let encrypted_point = self.key_share.encrypted_point.as_ref().expect("DecryptionJob is only created when encrypted_point is known; qed");
|
|
||||||
let joint_shadow_point = math::compute_joint_shadow_point(partial_responses.values().map(|s| &s.shadow_point))?;
|
|
||||||
let decrypted_secret = math::decrypt_with_joint_shadow(self.key_share.threshold, &self.access_key, encrypted_point, &joint_shadow_point)?;
|
|
||||||
Ok(EncryptedDocumentKeyShadow {
|
|
||||||
decrypted_secret: decrypted_secret,
|
|
||||||
common_point: if is_shadow_decryption {
|
|
||||||
Some(math::make_common_shadow_point(self.key_share.threshold, common_point.clone())?)
|
|
||||||
} else { None },
|
|
||||||
decrypt_shadows: if is_shadow_decryption {
|
|
||||||
Some(partial_responses.values().map(|r| r.decrypt_shadow.as_ref()
|
|
||||||
.expect("is_shadow_decryption == true; decrypt_shadow.is_some() is checked in check_partial_response; qed")
|
|
||||||
.clone())
|
|
||||||
.collect())
|
|
||||||
} else { None },
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,60 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::{BTreeMap, BTreeSet};
|
|
||||||
use key_server_cluster::{Error, NodeId};
|
|
||||||
use key_server_cluster::jobs::job_session::{JobExecutor, JobTransport, JobPartialRequestAction, JobPartialResponseAction};
|
|
||||||
|
|
||||||
/// No-work job to use in generics (TODO [Refac]: create separate ShareChangeConsensusSession && remove this)
|
|
||||||
pub struct DummyJob;
|
|
||||||
|
|
||||||
impl JobExecutor for DummyJob {
|
|
||||||
type PartialJobRequest = ();
|
|
||||||
type PartialJobResponse = ();
|
|
||||||
type JobResponse = ();
|
|
||||||
|
|
||||||
fn prepare_partial_request(&self, _n: &NodeId, _nodes: &BTreeSet<NodeId>) -> Result<(), Error> {
|
|
||||||
unreachable!("dummy job methods are never called")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_partial_request(&mut self, _r: ()) -> Result<JobPartialRequestAction<()>, Error> {
|
|
||||||
unreachable!("dummy job methods are never called")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_partial_response(&mut self, _s: &NodeId, _r: &()) -> Result<JobPartialResponseAction, Error> {
|
|
||||||
unreachable!("dummy job methods are never called")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compute_response(&self, _r: &BTreeMap<NodeId, ()>) -> Result<(), Error> {
|
|
||||||
unreachable!("dummy job methods are never called")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// No-work job transport to use in generics (TODO [Refac]: create separate ShareChangeConsensusSession && remove this)
|
|
||||||
pub struct DummyJobTransport;
|
|
||||||
|
|
||||||
impl JobTransport for DummyJobTransport {
|
|
||||||
type PartialJobRequest = ();
|
|
||||||
type PartialJobResponse = ();
|
|
||||||
|
|
||||||
fn send_partial_request(&self, _node: &NodeId, _request: ()) -> Result<(), Error> {
|
|
||||||
unreachable!("dummy transport methods are never called")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn send_partial_response(&self, _node: &NodeId, _response: ()) -> Result<(), Error> {
|
|
||||||
unreachable!("dummy transport methods are never called")
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,661 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::{BTreeSet, BTreeMap};
|
|
||||||
use key_server_cluster::{Error, NodeId, SessionMeta};
|
|
||||||
|
|
||||||
/// Partial response action.
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
|
||||||
pub enum JobPartialResponseAction {
|
|
||||||
/// Ignore this response.
|
|
||||||
Ignore,
|
|
||||||
/// Mark this response as reject.
|
|
||||||
Reject,
|
|
||||||
/// Accept this response.
|
|
||||||
Accept,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Partial request action.
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
|
||||||
pub enum JobPartialRequestAction<PartialJobResponse> {
|
|
||||||
/// Respond with reject.
|
|
||||||
Reject(PartialJobResponse),
|
|
||||||
/// Respond with this response.
|
|
||||||
Respond(PartialJobResponse),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Job executor.
|
|
||||||
pub trait JobExecutor {
|
|
||||||
type PartialJobRequest;
|
|
||||||
type PartialJobResponse: Clone;
|
|
||||||
type JobResponse;
|
|
||||||
|
|
||||||
/// Prepare job request for given node.
|
|
||||||
fn prepare_partial_request(&self, node: &NodeId, nodes: &BTreeSet<NodeId>) -> Result<Self::PartialJobRequest, Error>;
|
|
||||||
/// Process partial request.
|
|
||||||
fn process_partial_request(&mut self, partial_request: Self::PartialJobRequest) -> Result<JobPartialRequestAction<Self::PartialJobResponse>, Error>;
|
|
||||||
/// Check partial response of given node.
|
|
||||||
fn check_partial_response(&mut self, sender: &NodeId, partial_response: &Self::PartialJobResponse) -> Result<JobPartialResponseAction, Error>;
|
|
||||||
/// Compute final job response.
|
|
||||||
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, Self::PartialJobResponse>) -> Result<Self::JobResponse, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Jobs transport.
|
|
||||||
pub trait JobTransport {
|
|
||||||
type PartialJobRequest;
|
|
||||||
type PartialJobResponse;
|
|
||||||
|
|
||||||
/// Send partial request to given node.
|
|
||||||
fn send_partial_request(&self, node: &NodeId, request: Self::PartialJobRequest) -> Result<(), Error>;
|
|
||||||
/// Send partial request to given node.
|
|
||||||
fn send_partial_response(&self, node: &NodeId, response: Self::PartialJobResponse) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Current state of job session.
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
|
||||||
pub enum JobSessionState {
|
|
||||||
/// Session is inactive.
|
|
||||||
Inactive,
|
|
||||||
/// Session is active.
|
|
||||||
Active,
|
|
||||||
/// Session is finished.
|
|
||||||
Finished,
|
|
||||||
/// Session has failed.
|
|
||||||
Failed,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Basic request-response session on a set of nodes.
|
|
||||||
pub struct JobSession<Executor: JobExecutor, Transport> where Transport: JobTransport<PartialJobRequest = Executor::PartialJobRequest, PartialJobResponse = Executor::PartialJobResponse> {
|
|
||||||
/// Session meta.
|
|
||||||
meta: SessionMeta,
|
|
||||||
/// Job executor.
|
|
||||||
executor: Executor,
|
|
||||||
/// Jobs transport.
|
|
||||||
transport: Transport,
|
|
||||||
/// Session data.
|
|
||||||
data: JobSessionData<Executor::PartialJobResponse>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Data of job session.
|
|
||||||
struct JobSessionData<PartialJobResponse> {
|
|
||||||
/// Session state.
|
|
||||||
state: JobSessionState,
|
|
||||||
/// Mutable session data.
|
|
||||||
active_data: Option<ActiveJobSessionData<PartialJobResponse>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Active job session data.
|
|
||||||
struct ActiveJobSessionData<PartialJobResponse> {
|
|
||||||
/// Active partial requests.
|
|
||||||
requests: BTreeSet<NodeId>,
|
|
||||||
/// Rejects to partial requests (maps to true, if reject is fatal).
|
|
||||||
rejects: BTreeMap<NodeId, bool>,
|
|
||||||
/// Received partial responses.
|
|
||||||
responses: BTreeMap<NodeId, PartialJobResponse>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExecutor, Transport: JobTransport<PartialJobRequest = Executor::PartialJobRequest, PartialJobResponse = Executor::PartialJobResponse> {
|
|
||||||
/// Create new session.
|
|
||||||
pub fn new(meta: SessionMeta, executor: Executor, transport: Transport) -> Self {
|
|
||||||
JobSession {
|
|
||||||
meta: meta,
|
|
||||||
executor: executor,
|
|
||||||
transport: transport,
|
|
||||||
data: JobSessionData {
|
|
||||||
state: JobSessionState::Inactive,
|
|
||||||
active_data: None,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get transport reference.
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn transport(&self) -> &Transport {
|
|
||||||
&self.transport
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get mutable transport reference.
|
|
||||||
pub fn transport_mut(&mut self) -> &mut Transport {
|
|
||||||
&mut self.transport
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get executor reference.
|
|
||||||
pub fn executor(&self) -> &Executor {
|
|
||||||
&self.executor
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get mutable executor reference.
|
|
||||||
pub fn executor_mut(&mut self) -> &mut Executor {
|
|
||||||
&mut self.executor
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get job state.
|
|
||||||
pub fn state(&self) -> JobSessionState {
|
|
||||||
self.data.state
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get rejects.
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn rejects(&self) -> &BTreeMap<NodeId, bool> {
|
|
||||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
|
||||||
|
|
||||||
&self.data.active_data.as_ref()
|
|
||||||
.expect("rejects is only called on master nodes after initialization; on master nodes active_data is filled during initialization; qed")
|
|
||||||
.rejects
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get active requests.
|
|
||||||
pub fn requests(&self) -> &BTreeSet<NodeId> {
|
|
||||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
|
||||||
|
|
||||||
&self.data.active_data.as_ref()
|
|
||||||
.expect("requests is only called on master nodes after initialization; on master nodes active_data is filled during initialization; qed")
|
|
||||||
.requests
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get responses.
|
|
||||||
pub fn responses(&self) -> &BTreeMap<NodeId, Executor::PartialJobResponse> {
|
|
||||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
|
||||||
|
|
||||||
&self.data.active_data.as_ref()
|
|
||||||
.expect("responses is only called on master nodes after initialization; on master nodes active_data is filled during initialization; qed")
|
|
||||||
.responses
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns true if enough responses are ready to compute result.
|
|
||||||
pub fn is_result_ready(&self) -> bool {
|
|
||||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
|
||||||
self.data.active_data.as_ref()
|
|
||||||
.expect("is_result_ready is only called on master nodes after initialization; on master nodes active_data is filled during initialization; qed")
|
|
||||||
.responses.len() >= self.meta.threshold + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get job result.
|
|
||||||
pub fn result(&self) -> Result<Executor::JobResponse, Error> {
|
|
||||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
|
||||||
|
|
||||||
if self.data.state != JobSessionState::Finished {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.executor.compute_response(&self.data.active_data.as_ref()
|
|
||||||
.expect("requests is only called on master nodes; on master nodes active_data is filled during initialization; qed")
|
|
||||||
.responses)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Initialize.
|
|
||||||
pub fn initialize(&mut self, nodes: BTreeSet<NodeId>, self_response: Option<Executor::PartialJobResponse>, broadcast_self_response: bool) -> Result<Option<Executor::PartialJobResponse>, Error> {
|
|
||||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
|
||||||
|
|
||||||
if nodes.len() < self.meta.threshold + 1 {
|
|
||||||
return Err(if self.meta.configured_nodes_count < self.meta.threshold + 1 {
|
|
||||||
Error::ConsensusUnreachable
|
|
||||||
} else {
|
|
||||||
Error::ConsensusTemporaryUnreachable
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.data.state != JobSessionState::Inactive {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
// result from self
|
|
||||||
let active_data = ActiveJobSessionData {
|
|
||||||
requests: nodes.clone(),
|
|
||||||
rejects: BTreeMap::new(),
|
|
||||||
responses: BTreeMap::new(),
|
|
||||||
};
|
|
||||||
let waits_for_self = active_data.requests.contains(&self.meta.self_node_id);
|
|
||||||
let self_response = match self_response {
|
|
||||||
Some(self_response) => Some(self_response),
|
|
||||||
None if waits_for_self => {
|
|
||||||
let partial_request = self.executor.prepare_partial_request(&self.meta.self_node_id, &active_data.requests)?;
|
|
||||||
let self_response = self.executor.process_partial_request(partial_request)?;
|
|
||||||
Some(self_response.take_response())
|
|
||||||
},
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
// update state
|
|
||||||
self.data.active_data = Some(active_data);
|
|
||||||
self.data.state = JobSessionState::Active;
|
|
||||||
|
|
||||||
// if we are waiting for response from self => do it
|
|
||||||
if let Some(self_response) = self_response.clone() {
|
|
||||||
let self_node_id = self.meta.self_node_id.clone();
|
|
||||||
self.on_partial_response(&self_node_id, self_response)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// send requests to save nodes. we only send requests if session is still active.
|
|
||||||
for node in nodes.iter().filter(|n| **n != self.meta.self_node_id) {
|
|
||||||
if self.data.state == JobSessionState::Active {
|
|
||||||
self.transport.send_partial_request(node, self.executor.prepare_partial_request(node, &nodes)?)?;
|
|
||||||
}
|
|
||||||
if broadcast_self_response {
|
|
||||||
if let Some(self_response) = self_response.clone() {
|
|
||||||
self.transport.send_partial_response(node, self_response)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(self_response)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When partial request is received by slave node.
|
|
||||||
pub fn on_partial_request(&mut self, node: &NodeId, request: Executor::PartialJobRequest) -> Result<JobPartialRequestAction<Executor::PartialJobResponse>, Error> {
|
|
||||||
if node != &self.meta.master_node_id {
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
if self.meta.self_node_id == self.meta.master_node_id {
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
if self.data.state != JobSessionState::Inactive && self.data.state != JobSessionState::Finished {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
let partial_request_action = self.executor.process_partial_request(request)?;
|
|
||||||
let partial_response = match partial_request_action {
|
|
||||||
JobPartialRequestAction::Respond(ref partial_response) => {
|
|
||||||
self.data.state = JobSessionState::Finished;
|
|
||||||
partial_response.clone()
|
|
||||||
},
|
|
||||||
JobPartialRequestAction::Reject(ref partial_response) => {
|
|
||||||
self.data.state = JobSessionState::Failed;
|
|
||||||
partial_response.clone()
|
|
||||||
},
|
|
||||||
};
|
|
||||||
self.transport.send_partial_response(node, partial_response)?;
|
|
||||||
Ok(partial_request_action)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When partial request is received by master node.
|
|
||||||
pub fn on_partial_response(&mut self, node: &NodeId, response: Executor::PartialJobResponse) -> Result<(), Error> {
|
|
||||||
if self.meta.self_node_id != self.meta.master_node_id {
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
if self.data.state != JobSessionState::Active && self.data.state != JobSessionState::Finished {
|
|
||||||
return Err(Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
let active_data = self.data.active_data.as_mut()
|
|
||||||
.expect("on_partial_response is only called on master nodes; on master nodes active_data is filled during initialization; qed");
|
|
||||||
if !active_data.requests.remove(node) {
|
|
||||||
return Err(Error::InvalidNodeForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
match self.executor.check_partial_response(node, &response)? {
|
|
||||||
JobPartialResponseAction::Ignore => Ok(()),
|
|
||||||
JobPartialResponseAction::Reject => {
|
|
||||||
// direct reject is always considered as fatal
|
|
||||||
active_data.rejects.insert(node.clone(), true);
|
|
||||||
if active_data.requests.len() + active_data.responses.len() >= self.meta.threshold + 1 {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
self.data.state = JobSessionState::Failed;
|
|
||||||
Err(consensus_unreachable(&active_data.rejects))
|
|
||||||
},
|
|
||||||
JobPartialResponseAction::Accept => {
|
|
||||||
active_data.responses.insert(node.clone(), response);
|
|
||||||
if active_data.responses.len() < self.meta.threshold + 1 {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
self.data.state = JobSessionState::Finished;
|
|
||||||
Ok(())
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When error from node is received.
|
|
||||||
pub fn on_node_error(&mut self, node: &NodeId, error: Error) -> Result<(), Error> {
|
|
||||||
if self.meta.self_node_id != self.meta.master_node_id {
|
|
||||||
if node != &self.meta.master_node_id {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
self.data.state = JobSessionState::Failed;
|
|
||||||
return Err(if !error.is_non_fatal() {
|
|
||||||
Error::ConsensusUnreachable
|
|
||||||
} else {
|
|
||||||
Error::ConsensusTemporaryUnreachable
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(active_data) = self.data.active_data.as_mut() {
|
|
||||||
if active_data.rejects.contains_key(node) {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
if active_data.requests.remove(node) || active_data.responses.remove(node).is_some() {
|
|
||||||
active_data.rejects.insert(node.clone(), !error.is_non_fatal());
|
|
||||||
if self.data.state == JobSessionState::Finished && active_data.responses.len() < self.meta.threshold + 1 {
|
|
||||||
self.data.state = JobSessionState::Active;
|
|
||||||
}
|
|
||||||
if active_data.requests.len() + active_data.responses.len() >= self.meta.threshold + 1 {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
self.data.state = JobSessionState::Failed;
|
|
||||||
return Err(consensus_unreachable(&active_data.rejects));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When session timeouted.
|
|
||||||
pub fn on_session_timeout(&mut self) -> Result<(), Error> {
|
|
||||||
if self.data.state == JobSessionState::Finished || self.data.state == JobSessionState::Failed {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
self.data.state = JobSessionState::Failed;
|
|
||||||
// we have started session => consensus is possible in theory, but now it has failed with timeout
|
|
||||||
Err(Error::ConsensusTemporaryUnreachable)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<PartialJobResponse> JobPartialRequestAction<PartialJobResponse> {
|
|
||||||
/// Take actual response.
|
|
||||||
pub fn take_response(self) -> PartialJobResponse {
|
|
||||||
match self {
|
|
||||||
JobPartialRequestAction::Respond(response) => response,
|
|
||||||
JobPartialRequestAction::Reject(response) => response,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns appropriate 'consensus unreachable' error.
|
|
||||||
fn consensus_unreachable(rejects: &BTreeMap<NodeId, bool>) -> Error {
|
|
||||||
// when >= 50% of nodes have responded with fatal reject => ConsensusUnreachable
|
|
||||||
if rejects.values().filter(|r| **r).count() >= rejects.len() / 2 {
|
|
||||||
Error::ConsensusUnreachable
|
|
||||||
} else {
|
|
||||||
Error::ConsensusTemporaryUnreachable
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub mod tests {
|
|
||||||
use std::collections::{VecDeque, BTreeMap, BTreeSet};
|
|
||||||
use parking_lot::Mutex;
|
|
||||||
use crypto::publickey::Public;
|
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, SessionMeta};
|
|
||||||
use super::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor, JobTransport, JobSession, JobSessionState};
|
|
||||||
|
|
||||||
pub struct SquaredSumJobExecutor;
|
|
||||||
|
|
||||||
impl JobExecutor for SquaredSumJobExecutor {
|
|
||||||
type PartialJobRequest = u32;
|
|
||||||
type PartialJobResponse = u32;
|
|
||||||
type JobResponse = u32;
|
|
||||||
|
|
||||||
fn prepare_partial_request(&self, _n: &NodeId, _nodes: &BTreeSet<NodeId>) -> Result<u32, Error> { Ok(2) }
|
|
||||||
fn process_partial_request(&mut self, r: u32) -> Result<JobPartialRequestAction<u32>, Error> { if r <= 10 { Ok(JobPartialRequestAction::Respond(r * r)) } else { Err(Error::InvalidMessage) } }
|
|
||||||
fn check_partial_response(&mut self, _s: &NodeId, r: &u32) -> Result<JobPartialResponseAction, Error> { if r % 2 == 0 { Ok(JobPartialResponseAction::Accept) } else { Ok(JobPartialResponseAction::Reject) } }
|
|
||||||
fn compute_response(&self, r: &BTreeMap<NodeId, u32>) -> Result<u32, Error> { Ok(r.values().fold(0, |v1, v2| v1 + v2)) }
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct DummyJobTransport<T, U> {
|
|
||||||
pub requests: Mutex<VecDeque<(NodeId, T)>>,
|
|
||||||
pub responses: Mutex<VecDeque<(NodeId, U)>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, U> DummyJobTransport<T, U> {
|
|
||||||
pub fn is_empty_response(&self) -> bool {
|
|
||||||
self.responses.lock().is_empty()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn response(&self) -> (NodeId, U) {
|
|
||||||
self.responses.lock().pop_front().unwrap()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, U> JobTransport for DummyJobTransport<T, U> {
|
|
||||||
type PartialJobRequest = T;
|
|
||||||
type PartialJobResponse = U;
|
|
||||||
|
|
||||||
fn send_partial_request(&self, node: &NodeId, request: T) -> Result<(), Error> { self.requests.lock().push_back((node.clone(), request)); Ok(()) }
|
|
||||||
fn send_partial_response(&self, node: &NodeId, response: U) -> Result<(), Error> { self.responses.lock().push_back((node.clone(), response)); Ok(()) }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn make_master_session_meta(threshold: usize) -> SessionMeta {
|
|
||||||
SessionMeta { id: SessionId::from([1u8; 32]), master_node_id: NodeId::from_low_u64_be(1), self_node_id: NodeId::from_low_u64_be(1), threshold: threshold,
|
|
||||||
configured_nodes_count: 5, connected_nodes_count: 5 }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn make_slave_session_meta(threshold: usize) -> SessionMeta {
|
|
||||||
SessionMeta { id: SessionId::from([1u8; 32]), master_node_id: NodeId::from_low_u64_be(1), self_node_id: NodeId::from_low_u64_be(2), threshold: threshold,
|
|
||||||
configured_nodes_count: 5, connected_nodes_count: 5 }
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_initialize_fails_if_not_enough_nodes_for_threshold_total() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.meta.configured_nodes_count = 1;
|
|
||||||
assert_eq!(job.initialize(vec![Public::from_low_u64_be(1)].into_iter().collect(), None, false).unwrap_err(), Error::ConsensusUnreachable);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_initialize_fails_if_not_enough_nodes_for_threshold_connected() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.meta.connected_nodes_count = 3;
|
|
||||||
assert_eq!(job.initialize(vec![Public::from_low_u64_be(1)].into_iter().collect(), None, false).unwrap_err(), Error::ConsensusTemporaryUnreachable);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_initialize_fails_if_not_inactive() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(1)].into_iter().collect(), None, false).unwrap();
|
|
||||||
assert_eq!(job.initialize(vec![Public::from_low_u64_be(1)].into_iter().collect(), None, false).unwrap_err(), Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_initialization_leads_to_finish_if_single_node_is_required() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(1)].into_iter().collect(), None, false).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Finished);
|
|
||||||
assert!(job.is_result_ready());
|
|
||||||
assert_eq!(job.result(), Ok(4));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_initialization_does_not_leads_to_finish_if_single_other_node_is_required() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(2)].into_iter().collect(), None, false).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_request_fails_if_comes_from_non_master_node() {
|
|
||||||
let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
assert_eq!(job.on_partial_request(&NodeId::from_low_u64_be(3), 2).unwrap_err(), Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_request_fails_if_comes_to_master_node() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
assert_eq!(job.on_partial_request(&NodeId::from_low_u64_be(1), 2).unwrap_err(), Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_request_fails_if_comes_to_failed_state() {
|
|
||||||
let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.on_session_timeout().unwrap_err();
|
|
||||||
assert_eq!(job.on_partial_request(&NodeId::from_low_u64_be(1), 2).unwrap_err(), Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_request_succeeds_if_comes_to_finished_state() {
|
|
||||||
let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.on_partial_request(&NodeId::from_low_u64_be(1), 2).unwrap();
|
|
||||||
assert_eq!(job.transport().response(), (NodeId::from_low_u64_be(1), 4));
|
|
||||||
assert_eq!(job.state(), JobSessionState::Finished);
|
|
||||||
job.on_partial_request(&NodeId::from_low_u64_be(1), 3).unwrap();
|
|
||||||
assert_eq!(job.transport().response(), (NodeId::from_low_u64_be(1), 9));
|
|
||||||
assert_eq!(job.state(), JobSessionState::Finished);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_response_fails_if_comes_to_slave_node() {
|
|
||||||
let mut job = JobSession::new(make_slave_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
assert_eq!(job.on_partial_response(&NodeId::from_low_u64_be(1), 2).unwrap_err(), Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_response_fails_if_comes_to_failed_state() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(2)].into_iter().collect(), None, false).unwrap();
|
|
||||||
job.on_session_timeout().unwrap_err();
|
|
||||||
assert_eq!(job.on_partial_response(&NodeId::from_low_u64_be(2), 2).unwrap_err(), Error::InvalidStateForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_response_fails_if_comes_from_unknown_node() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(0), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(2)].into_iter().collect(), None, false).unwrap();
|
|
||||||
assert_eq!(job.on_partial_response(&NodeId::from_low_u64_be(3), 2).unwrap_err(), Error::InvalidNodeForRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_response_leads_to_failure_if_too_few_nodes_left() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(1), Public::from_low_u64_be(2)].into_iter().collect(), None, false).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
|
||||||
assert_eq!(job.on_partial_response(&NodeId::from_low_u64_be(2), 3).unwrap_err(), Error::ConsensusUnreachable);
|
|
||||||
assert_eq!(job.state(), JobSessionState::Failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_response_succeeds() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(2), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(1), Public::from_low_u64_be(2), Public::from_low_u64_be(3)].into_iter().collect(), None, false).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
|
||||||
assert!(!job.is_result_ready());
|
|
||||||
job.on_partial_response(&NodeId::from_low_u64_be(2), 2).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
|
||||||
assert!(!job.is_result_ready());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_response_leads_to_finish() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(1), Public::from_low_u64_be(2)].into_iter().collect(), None, false).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
|
||||||
job.on_partial_response(&NodeId::from_low_u64_be(2), 2).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Finished);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_node_error_ignored_when_slave_disconnects_from_slave() {
|
|
||||||
let mut job = JobSession::new(make_slave_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
assert_eq!(job.state(), JobSessionState::Inactive);
|
|
||||||
job.on_node_error(&NodeId::from_low_u64_be(3), Error::AccessDenied).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Inactive);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_node_error_leads_to_fail_when_slave_disconnects_from_master() {
|
|
||||||
let mut job = JobSession::new(make_slave_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
assert_eq!(job.state(), JobSessionState::Inactive);
|
|
||||||
assert_eq!(job.on_node_error(&NodeId::from_low_u64_be(1), Error::AccessDenied).unwrap_err(), Error::ConsensusUnreachable);
|
|
||||||
assert_eq!(job.state(), JobSessionState::Failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_node_error_ignored_when_disconnects_from_rejected() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(1), Public::from_low_u64_be(2), Public::from_low_u64_be(3)].into_iter().collect(), None, false).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
|
||||||
job.on_partial_response(&NodeId::from_low_u64_be(2), 3).unwrap();
|
|
||||||
job.on_node_error(&NodeId::from_low_u64_be(2), Error::AccessDenied).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_node_error_ignored_when_disconnects_from_unknown() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(1), Public::from_low_u64_be(2)].into_iter().collect(), None, false).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
|
||||||
job.on_node_error(&NodeId::from_low_u64_be(3), Error::AccessDenied).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_node_error_ignored_when_disconnects_from_requested_and_enough_nodes_left() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(1), Public::from_low_u64_be(2), Public::from_low_u64_be(3)].into_iter().collect(), None, false).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
|
||||||
job.on_node_error(&NodeId::from_low_u64_be(3), Error::AccessDenied).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_node_error_leads_to_fail_when_disconnects_from_requested_and_not_enough_nodes_left() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(1), Public::from_low_u64_be(2)].into_iter().collect(), None, false).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
|
||||||
assert_eq!(job.on_node_error(&NodeId::from_low_u64_be(2), Error::AccessDenied).unwrap_err(), Error::ConsensusUnreachable);
|
|
||||||
assert_eq!(job.state(), JobSessionState::Failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_broadcasts_self_response() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(1), Public::from_low_u64_be(2)].into_iter().collect(), None, true).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
|
||||||
assert_eq!(job.transport().response(), (NodeId::from_low_u64_be(2), 4));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_does_not_broadcasts_self_response() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(1), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(1), Public::from_low_u64_be(2)].into_iter().collect(), None, false).unwrap();
|
|
||||||
assert_eq!(job.state(), JobSessionState::Active);
|
|
||||||
assert!(job.transport().is_empty_response());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_fails_with_temp_error_if_more_than_half_nodes_respond_with_temp_error() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(2), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(1), Public::from_low_u64_be(2), Public::from_low_u64_be(3), Public::from_low_u64_be(4)].into_iter().collect(), None, false).unwrap();
|
|
||||||
job.on_node_error(&NodeId::from_low_u64_be(2), Error::NodeDisconnected).unwrap();
|
|
||||||
assert_eq!(job.on_node_error(&NodeId::from_low_u64_be(3), Error::NodeDisconnected).unwrap_err(), Error::ConsensusTemporaryUnreachable);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_fails_with_temp_error_if_more_than_half_rejects_are_temp() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(2), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(1), Public::from_low_u64_be(2), Public::from_low_u64_be(3), Public::from_low_u64_be(4)].into_iter().collect(), None, false).unwrap();
|
|
||||||
job.on_node_error(&NodeId::from_low_u64_be(2), Error::NodeDisconnected).unwrap();
|
|
||||||
assert_eq!(job.on_node_error(&NodeId::from_low_u64_be(3), Error::NodeDisconnected).unwrap_err(), Error::ConsensusTemporaryUnreachable);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_fails_if_more_than_half_rejects_are_non_temp() {
|
|
||||||
let mut job = JobSession::new(make_master_session_meta(2), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
job.initialize(vec![Public::from_low_u64_be(1), Public::from_low_u64_be(2), Public::from_low_u64_be(3), Public::from_low_u64_be(4)].into_iter().collect(), None, false).unwrap();
|
|
||||||
job.on_node_error(&NodeId::from_low_u64_be(2), Error::AccessDenied).unwrap();
|
|
||||||
assert_eq!(job.on_node_error(&NodeId::from_low_u64_be(3), Error::AccessDenied).unwrap_err(), Error::ConsensusUnreachable);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn job_fails_with_temp_error_when_temp_error_is_reported_by_master_node() {
|
|
||||||
let mut job = JobSession::new(make_slave_session_meta(2), SquaredSumJobExecutor, DummyJobTransport::default());
|
|
||||||
assert_eq!(job.on_node_error(&NodeId::from_low_u64_be(1), Error::NodeDisconnected).unwrap_err(), Error::ConsensusTemporaryUnreachable);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,92 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::{BTreeSet, BTreeMap};
|
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, Requester, AclStorage};
|
|
||||||
use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor};
|
|
||||||
|
|
||||||
/// Purpose of this job is to construct set of nodes, which have agreed to provide access to the given key for the given requestor.
|
|
||||||
pub struct KeyAccessJob {
|
|
||||||
/// Key id.
|
|
||||||
id: SessionId,
|
|
||||||
/// Has key share?
|
|
||||||
has_key_share: bool,
|
|
||||||
/// ACL storage.
|
|
||||||
acl_storage: Arc<dyn AclStorage>,
|
|
||||||
/// Requester data.
|
|
||||||
requester: Option<Requester>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyAccessJob {
|
|
||||||
pub fn new_on_slave(id: SessionId, acl_storage: Arc<dyn AclStorage>) -> Self {
|
|
||||||
KeyAccessJob {
|
|
||||||
id: id,
|
|
||||||
has_key_share: true,
|
|
||||||
acl_storage: acl_storage,
|
|
||||||
requester: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_on_master(id: SessionId, acl_storage: Arc<dyn AclStorage>, requester: Requester) -> Self {
|
|
||||||
KeyAccessJob {
|
|
||||||
id: id,
|
|
||||||
has_key_share: true,
|
|
||||||
acl_storage: acl_storage,
|
|
||||||
requester: Some(requester),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_has_key_share(&mut self, has_key_share: bool) {
|
|
||||||
self.has_key_share = has_key_share;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_requester(&mut self, requester: Requester) {
|
|
||||||
self.requester = Some(requester);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn requester(&self) -> Option<&Requester> {
|
|
||||||
self.requester.as_ref()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl JobExecutor for KeyAccessJob {
|
|
||||||
type PartialJobRequest = Requester;
|
|
||||||
type PartialJobResponse = bool;
|
|
||||||
type JobResponse = BTreeSet<NodeId>;
|
|
||||||
|
|
||||||
fn prepare_partial_request(&self, _node: &NodeId, _nodes: &BTreeSet<NodeId>) -> Result<Requester, Error> {
|
|
||||||
Ok(self.requester.as_ref().expect("prepare_partial_request is only called on master nodes; new_on_master fills the signature; qed").clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_partial_request(&mut self, partial_request: Requester) -> Result<JobPartialRequestAction<bool>, Error> {
|
|
||||||
if !self.has_key_share {
|
|
||||||
return Ok(JobPartialRequestAction::Reject(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
self.requester = Some(partial_request.clone());
|
|
||||||
self.acl_storage.check(partial_request.address(&self.id).map_err(Error::InsufficientRequesterData)?, &self.id)
|
|
||||||
.map(|is_confirmed| if is_confirmed { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) })
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &bool) -> Result<JobPartialResponseAction, Error> {
|
|
||||||
Ok(if *partial_response { JobPartialResponseAction::Accept } else { JobPartialResponseAction::Reject })
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, bool>) -> Result<BTreeSet<NodeId>, Error> {
|
|
||||||
Ok(partial_responses.keys().cloned().collect())
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,25 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
pub mod consensus_session;
|
|
||||||
pub mod decryption_job;
|
|
||||||
pub mod dummy_job;
|
|
||||||
pub mod job_session;
|
|
||||||
pub mod key_access_job;
|
|
||||||
pub mod servers_set_change_access_job;
|
|
||||||
pub mod signing_job_ecdsa;
|
|
||||||
pub mod signing_job_schnorr;
|
|
||||||
pub mod unknown_sessions_job;
|
|
@ -1,149 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::{BTreeSet, BTreeMap};
|
|
||||||
use crypto::publickey::{Public, Signature, recover};
|
|
||||||
use tiny_keccak::Keccak;
|
|
||||||
use key_server_cluster::{Error, NodeId, SessionId};
|
|
||||||
use key_server_cluster::message::{InitializeConsensusSessionWithServersSet, InitializeConsensusSessionOfShareAdd};
|
|
||||||
use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor};
|
|
||||||
|
|
||||||
/// Purpose of this job is to check if requestor is administrator of SecretStore (i.e. it have access to change key servers set).
|
|
||||||
pub struct ServersSetChangeAccessJob {
|
|
||||||
/// Servers set administrator public key (this could be changed to ACL-based check later).
|
|
||||||
administrator: Public,
|
|
||||||
/// Old servers set.
|
|
||||||
old_servers_set: Option<BTreeSet<NodeId>>,
|
|
||||||
/// New servers set.
|
|
||||||
new_servers_set: Option<BTreeSet<NodeId>>,
|
|
||||||
/// Old servers set, signed by requester.
|
|
||||||
old_set_signature: Option<Signature>,
|
|
||||||
/// New servers set, signed by requester.
|
|
||||||
new_set_signature: Option<Signature>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Servers set change job partial request.
|
|
||||||
pub struct ServersSetChangeAccessRequest {
|
|
||||||
/// Old servers set.
|
|
||||||
pub old_servers_set: BTreeSet<NodeId>,
|
|
||||||
/// New servers set.
|
|
||||||
pub new_servers_set: BTreeSet<NodeId>,
|
|
||||||
/// Hash(old_servers_set), signed by requester.
|
|
||||||
pub old_set_signature: Signature,
|
|
||||||
/// Hash(new_servers_set), signed by requester.
|
|
||||||
pub new_set_signature: Signature,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> From<&'a InitializeConsensusSessionWithServersSet> for ServersSetChangeAccessRequest {
|
|
||||||
fn from(message: &InitializeConsensusSessionWithServersSet) -> Self {
|
|
||||||
ServersSetChangeAccessRequest {
|
|
||||||
old_servers_set: message.old_nodes_set.iter().cloned().map(Into::into).collect(),
|
|
||||||
new_servers_set: message.new_nodes_set.iter().cloned().map(Into::into).collect(),
|
|
||||||
old_set_signature: message.old_set_signature.clone().into(),
|
|
||||||
new_set_signature: message.new_set_signature.clone().into(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> From<&'a InitializeConsensusSessionOfShareAdd> for ServersSetChangeAccessRequest {
|
|
||||||
fn from(message: &InitializeConsensusSessionOfShareAdd) -> Self {
|
|
||||||
ServersSetChangeAccessRequest {
|
|
||||||
old_servers_set: message.old_nodes_set.iter().cloned().map(Into::into).collect(),
|
|
||||||
new_servers_set: message.new_nodes_map.keys().cloned().map(Into::into).collect(),
|
|
||||||
old_set_signature: message.old_set_signature.clone().into(),
|
|
||||||
new_set_signature: message.new_set_signature.clone().into(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServersSetChangeAccessJob {
|
|
||||||
pub fn new_on_slave(administrator: Public) -> Self {
|
|
||||||
ServersSetChangeAccessJob {
|
|
||||||
administrator: administrator,
|
|
||||||
old_servers_set: None,
|
|
||||||
new_servers_set: None,
|
|
||||||
old_set_signature: None,
|
|
||||||
new_set_signature: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_on_master(administrator: Public, old_servers_set: BTreeSet<NodeId>, new_servers_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Self {
|
|
||||||
ServersSetChangeAccessJob {
|
|
||||||
administrator: administrator,
|
|
||||||
old_servers_set: Some(old_servers_set),
|
|
||||||
new_servers_set: Some(new_servers_set),
|
|
||||||
old_set_signature: Some(old_set_signature),
|
|
||||||
new_set_signature: Some(new_set_signature),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_servers_set(&self) -> Option<&BTreeSet<NodeId>> {
|
|
||||||
self.new_servers_set.as_ref()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl JobExecutor for ServersSetChangeAccessJob {
|
|
||||||
type PartialJobRequest = ServersSetChangeAccessRequest;
|
|
||||||
type PartialJobResponse = bool;
|
|
||||||
type JobResponse = BTreeSet<NodeId>;
|
|
||||||
|
|
||||||
fn prepare_partial_request(&self, _node: &NodeId, _nodes: &BTreeSet<NodeId>) -> Result<ServersSetChangeAccessRequest, Error> {
|
|
||||||
let explanation = "prepare_partial_request is only called on master nodes; this field is filled on master nodes in constructor; qed";
|
|
||||||
Ok(ServersSetChangeAccessRequest {
|
|
||||||
old_servers_set: self.old_servers_set.clone().expect(explanation),
|
|
||||||
new_servers_set: self.new_servers_set.clone().expect(explanation),
|
|
||||||
old_set_signature: self.old_set_signature.clone().expect(explanation),
|
|
||||||
new_set_signature: self.new_set_signature.clone().expect(explanation),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_partial_request(&mut self, partial_request: ServersSetChangeAccessRequest) -> Result<JobPartialRequestAction<bool>, Error> {
|
|
||||||
let ServersSetChangeAccessRequest {
|
|
||||||
old_servers_set,
|
|
||||||
new_servers_set,
|
|
||||||
old_set_signature,
|
|
||||||
new_set_signature,
|
|
||||||
} = partial_request;
|
|
||||||
|
|
||||||
// check old servers set signature
|
|
||||||
let old_actual_public = recover(&old_set_signature, &ordered_nodes_hash(&old_servers_set).into())?;
|
|
||||||
let new_actual_public = recover(&new_set_signature, &ordered_nodes_hash(&new_servers_set).into())?;
|
|
||||||
let is_administrator = old_actual_public == self.administrator && new_actual_public == self.administrator;
|
|
||||||
self.new_servers_set = Some(new_servers_set);
|
|
||||||
|
|
||||||
Ok(if is_administrator { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) })
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &bool) -> Result<JobPartialResponseAction, Error> {
|
|
||||||
Ok(if *partial_response { JobPartialResponseAction::Accept } else { JobPartialResponseAction::Reject })
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, bool>) -> Result<BTreeSet<NodeId>, Error> {
|
|
||||||
Ok(partial_responses.keys().cloned().collect())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn ordered_nodes_hash(nodes: &BTreeSet<NodeId>) -> SessionId {
|
|
||||||
let mut nodes_keccak = Keccak::new_keccak256();
|
|
||||||
for node in nodes {
|
|
||||||
nodes_keccak.update(node.as_bytes());
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut nodes_keccak_value = [0u8; 32];
|
|
||||||
nodes_keccak.finalize(&mut nodes_keccak_value);
|
|
||||||
|
|
||||||
nodes_keccak_value.into()
|
|
||||||
}
|
|
@ -1,151 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::{BTreeSet, BTreeMap};
|
|
||||||
use crypto::publickey::{Public, Secret, Signature};
|
|
||||||
use ethereum_types::H256;
|
|
||||||
use key_server_cluster::{Error, NodeId, DocumentKeyShare};
|
|
||||||
use key_server_cluster::math;
|
|
||||||
use key_server_cluster::jobs::job_session::{JobPartialRequestAction, JobPartialResponseAction, JobExecutor};
|
|
||||||
|
|
||||||
/// Signing job.
|
|
||||||
pub struct EcdsaSigningJob {
|
|
||||||
/// Key share.
|
|
||||||
key_share: DocumentKeyShare,
|
|
||||||
/// Key version.
|
|
||||||
key_version: H256,
|
|
||||||
/// Share of inv(nonce).
|
|
||||||
inv_nonce_share: Secret,
|
|
||||||
/// Nonce public.
|
|
||||||
nonce_public: Public,
|
|
||||||
/// Request id.
|
|
||||||
request_id: Option<Secret>,
|
|
||||||
/// ECDSA reversed-nonce coefficient
|
|
||||||
inversed_nonce_coeff: Option<Secret>,
|
|
||||||
/// Message hash.
|
|
||||||
message_hash: Option<H256>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Signing job partial request.
|
|
||||||
pub struct EcdsaPartialSigningRequest {
|
|
||||||
/// Request id.
|
|
||||||
pub id: Secret,
|
|
||||||
/// ECDSA reversed-nonce coefficient
|
|
||||||
pub inversed_nonce_coeff: Secret,
|
|
||||||
/// Message hash to sign.
|
|
||||||
pub message_hash: H256,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Signing job partial response.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct EcdsaPartialSigningResponse {
|
|
||||||
/// Request id.
|
|
||||||
pub request_id: Secret,
|
|
||||||
/// Partial signature' s share.
|
|
||||||
pub partial_signature_s: Secret,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EcdsaSigningJob {
|
|
||||||
pub fn new_on_slave(key_share: DocumentKeyShare, key_version: H256, nonce_public: Public, inv_nonce_share: Secret) -> Result<Self, Error> {
|
|
||||||
Ok(EcdsaSigningJob {
|
|
||||||
key_share: key_share,
|
|
||||||
key_version: key_version,
|
|
||||||
nonce_public: nonce_public,
|
|
||||||
inv_nonce_share: inv_nonce_share,
|
|
||||||
request_id: None,
|
|
||||||
inversed_nonce_coeff: None,
|
|
||||||
message_hash: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_on_master(key_share: DocumentKeyShare, key_version: H256, nonce_public: Public, inv_nonce_share: Secret, inversed_nonce_coeff: Secret, message_hash: H256) -> Result<Self, Error> {
|
|
||||||
Ok(EcdsaSigningJob {
|
|
||||||
key_share: key_share,
|
|
||||||
key_version: key_version,
|
|
||||||
nonce_public: nonce_public,
|
|
||||||
inv_nonce_share: inv_nonce_share,
|
|
||||||
request_id: Some(math::generate_random_scalar()?),
|
|
||||||
inversed_nonce_coeff: Some(inversed_nonce_coeff),
|
|
||||||
message_hash: Some(message_hash),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl JobExecutor for EcdsaSigningJob {
|
|
||||||
type PartialJobRequest = EcdsaPartialSigningRequest;
|
|
||||||
type PartialJobResponse = EcdsaPartialSigningResponse;
|
|
||||||
type JobResponse = Signature;
|
|
||||||
|
|
||||||
fn prepare_partial_request(&self, _node: &NodeId, nodes: &BTreeSet<NodeId>) -> Result<EcdsaPartialSigningRequest, Error> {
|
|
||||||
debug_assert!(nodes.len() == self.key_share.threshold * 2 + 1);
|
|
||||||
|
|
||||||
let request_id = self.request_id.as_ref()
|
|
||||||
.expect("prepare_partial_request is only called on master nodes; request_id is filed in constructor on master nodes; qed");
|
|
||||||
let inversed_nonce_coeff = self.inversed_nonce_coeff.as_ref()
|
|
||||||
.expect("prepare_partial_request is only called on master nodes; inversed_nonce_coeff is filed in constructor on master nodes; qed");
|
|
||||||
let message_hash = self.message_hash.as_ref()
|
|
||||||
.expect("compute_response is only called on master nodes; message_hash is filed in constructor on master nodes; qed");
|
|
||||||
|
|
||||||
Ok(EcdsaPartialSigningRequest {
|
|
||||||
id: request_id.clone(),
|
|
||||||
inversed_nonce_coeff: inversed_nonce_coeff.clone(),
|
|
||||||
message_hash: message_hash.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_partial_request(&mut self, partial_request: EcdsaPartialSigningRequest) -> Result<JobPartialRequestAction<EcdsaPartialSigningResponse>, Error> {
|
|
||||||
let inversed_nonce_coeff_mul_nonce = math::compute_secret_mul(&partial_request.inversed_nonce_coeff, &self.inv_nonce_share)?;
|
|
||||||
let key_version = self.key_share.version(&self.key_version)?;
|
|
||||||
let signature_r = math::compute_ecdsa_r(&self.nonce_public)?;
|
|
||||||
let inv_nonce_mul_secret = math::compute_secret_mul(&inversed_nonce_coeff_mul_nonce, &key_version.secret_share)?;
|
|
||||||
let partial_signature_s = math::compute_ecdsa_s_share(
|
|
||||||
&inversed_nonce_coeff_mul_nonce,
|
|
||||||
&inv_nonce_mul_secret,
|
|
||||||
&signature_r,
|
|
||||||
&math::to_scalar(partial_request.message_hash)?,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(JobPartialRequestAction::Respond(EcdsaPartialSigningResponse {
|
|
||||||
request_id: partial_request.id,
|
|
||||||
partial_signature_s: partial_signature_s,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &EcdsaPartialSigningResponse) -> Result<JobPartialResponseAction, Error> {
|
|
||||||
if Some(&partial_response.request_id) != self.request_id.as_ref() {
|
|
||||||
return Ok(JobPartialResponseAction::Ignore);
|
|
||||||
}
|
|
||||||
// TODO [Trust]: check_ecdsa_signature_share()
|
|
||||||
|
|
||||||
Ok(JobPartialResponseAction::Accept)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, EcdsaPartialSigningResponse>) -> Result<Signature, Error> {
|
|
||||||
let key_version = self.key_share.version(&self.key_version)?;
|
|
||||||
if partial_responses.keys().any(|n| !key_version.id_numbers.contains_key(n)) {
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
|
|
||||||
let id_numbers: Vec<_> = partial_responses.keys().map(|n| key_version.id_numbers[n].clone()).collect();
|
|
||||||
let signature_s_shares: Vec<_> = partial_responses.values().map(|r| r.partial_signature_s.clone()).collect();
|
|
||||||
let signature_s = math::compute_ecdsa_s(self.key_share.threshold, &signature_s_shares, &id_numbers)?;
|
|
||||||
let signature_r = math::compute_ecdsa_r(&self.nonce_public)?;
|
|
||||||
|
|
||||||
let signature = math::serialize_ecdsa_signature(&self.nonce_public, signature_r, signature_s);
|
|
||||||
|
|
||||||
Ok(signature)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,151 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::{BTreeSet, BTreeMap};
|
|
||||||
use crypto::publickey::{Public, Secret};
|
|
||||||
use ethereum_types::H256;
|
|
||||||
use key_server_cluster::{Error, NodeId, DocumentKeyShare};
|
|
||||||
use key_server_cluster::math;
|
|
||||||
use key_server_cluster::jobs::job_session::{JobPartialRequestAction, JobPartialResponseAction, JobExecutor};
|
|
||||||
|
|
||||||
/// Signing job.
|
|
||||||
pub struct SchnorrSigningJob {
|
|
||||||
/// This node id.
|
|
||||||
self_node_id: NodeId,
|
|
||||||
/// Key share.
|
|
||||||
key_share: DocumentKeyShare,
|
|
||||||
/// Key version.
|
|
||||||
key_version: H256,
|
|
||||||
/// Session public key.
|
|
||||||
session_public: Public,
|
|
||||||
/// Session secret coefficient.
|
|
||||||
session_secret_coeff: Secret,
|
|
||||||
/// Request id.
|
|
||||||
request_id: Option<Secret>,
|
|
||||||
/// Message hash.
|
|
||||||
message_hash: Option<H256>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Signing job partial request.
|
|
||||||
pub struct SchnorrPartialSigningRequest {
|
|
||||||
/// Request id.
|
|
||||||
pub id: Secret,
|
|
||||||
/// Message hash.
|
|
||||||
pub message_hash: H256,
|
|
||||||
/// Id of other nodes, participating in signing.
|
|
||||||
pub other_nodes_ids: BTreeSet<NodeId>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Signing job partial response.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct SchnorrPartialSigningResponse {
|
|
||||||
/// Request id.
|
|
||||||
pub request_id: Secret,
|
|
||||||
/// Partial signature.
|
|
||||||
pub partial_signature: Secret,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SchnorrSigningJob {
|
|
||||||
pub fn new_on_slave(self_node_id: NodeId, key_share: DocumentKeyShare, key_version: H256, session_public: Public, session_secret_coeff: Secret) -> Result<Self, Error> {
|
|
||||||
Ok(SchnorrSigningJob {
|
|
||||||
self_node_id: self_node_id,
|
|
||||||
key_share: key_share,
|
|
||||||
key_version: key_version,
|
|
||||||
session_public: session_public,
|
|
||||||
session_secret_coeff: session_secret_coeff,
|
|
||||||
request_id: None,
|
|
||||||
message_hash: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_on_master(self_node_id: NodeId, key_share: DocumentKeyShare, key_version: H256, session_public: Public, session_secret_coeff: Secret, message_hash: H256) -> Result<Self, Error> {
|
|
||||||
Ok(SchnorrSigningJob {
|
|
||||||
self_node_id: self_node_id,
|
|
||||||
key_share: key_share,
|
|
||||||
key_version: key_version,
|
|
||||||
session_public: session_public,
|
|
||||||
session_secret_coeff: session_secret_coeff,
|
|
||||||
request_id: Some(math::generate_random_scalar()?),
|
|
||||||
message_hash: Some(message_hash),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl JobExecutor for SchnorrSigningJob {
|
|
||||||
type PartialJobRequest = SchnorrPartialSigningRequest;
|
|
||||||
type PartialJobResponse = SchnorrPartialSigningResponse;
|
|
||||||
type JobResponse = (Secret, Secret);
|
|
||||||
|
|
||||||
fn prepare_partial_request(&self, node: &NodeId, nodes: &BTreeSet<NodeId>) -> Result<SchnorrPartialSigningRequest, Error> {
|
|
||||||
debug_assert!(nodes.len() == self.key_share.threshold + 1);
|
|
||||||
|
|
||||||
let request_id = self.request_id.as_ref()
|
|
||||||
.expect("prepare_partial_request is only called on master nodes; request_id is filed in constructor on master nodes; qed");
|
|
||||||
let message_hash = self.message_hash.as_ref()
|
|
||||||
.expect("compute_response is only called on master nodes; message_hash is filed in constructor on master nodes; qed");
|
|
||||||
let mut other_nodes_ids = nodes.clone();
|
|
||||||
other_nodes_ids.remove(node);
|
|
||||||
|
|
||||||
Ok(SchnorrPartialSigningRequest {
|
|
||||||
id: request_id.clone(),
|
|
||||||
message_hash: message_hash.clone(),
|
|
||||||
other_nodes_ids: other_nodes_ids,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_partial_request(&mut self, partial_request: SchnorrPartialSigningRequest) -> Result<JobPartialRequestAction<SchnorrPartialSigningResponse>, Error> {
|
|
||||||
let key_version = self.key_share.version(&self.key_version)?;
|
|
||||||
if partial_request.other_nodes_ids.len() != self.key_share.threshold
|
|
||||||
|| partial_request.other_nodes_ids.contains(&self.self_node_id)
|
|
||||||
|| partial_request.other_nodes_ids.iter().any(|n| !key_version.id_numbers.contains_key(n)) {
|
|
||||||
return Err(Error::InvalidMessage);
|
|
||||||
}
|
|
||||||
|
|
||||||
let self_id_number = &key_version.id_numbers[&self.self_node_id];
|
|
||||||
let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &key_version.id_numbers[n]);
|
|
||||||
let combined_hash = math::combine_message_hash_with_public(&partial_request.message_hash, &self.session_public)?;
|
|
||||||
Ok(JobPartialRequestAction::Respond(SchnorrPartialSigningResponse {
|
|
||||||
request_id: partial_request.id,
|
|
||||||
partial_signature: math::compute_schnorr_signature_share(
|
|
||||||
self.key_share.threshold,
|
|
||||||
&combined_hash,
|
|
||||||
&self.session_secret_coeff,
|
|
||||||
&key_version.secret_share,
|
|
||||||
self_id_number,
|
|
||||||
other_id_numbers
|
|
||||||
)?,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &SchnorrPartialSigningResponse) -> Result<JobPartialResponseAction, Error> {
|
|
||||||
if Some(&partial_response.request_id) != self.request_id.as_ref() {
|
|
||||||
return Ok(JobPartialResponseAction::Ignore);
|
|
||||||
}
|
|
||||||
// TODO [Trust]: check_schnorr_signature_share()
|
|
||||||
|
|
||||||
Ok(JobPartialResponseAction::Accept)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, SchnorrPartialSigningResponse>) -> Result<(Secret, Secret), Error> {
|
|
||||||
let message_hash = self.message_hash.as_ref()
|
|
||||||
.expect("compute_response is only called on master nodes; message_hash is filed in constructor on master nodes; qed");
|
|
||||||
|
|
||||||
let signature_c = math::combine_message_hash_with_public(message_hash, &self.session_public)?;
|
|
||||||
let signature_s = math::compute_schnorr_signature(partial_responses.values().map(|r| &r.partial_signature))?;
|
|
||||||
|
|
||||||
Ok((signature_c, signature_s))
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,80 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::{BTreeSet, BTreeMap};
|
|
||||||
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage};
|
|
||||||
use key_server_cluster::jobs::job_session::{JobPartialRequestAction, JobPartialResponseAction, JobExecutor};
|
|
||||||
|
|
||||||
/// Unknown sessions report job.
|
|
||||||
pub struct UnknownSessionsJob {
|
|
||||||
/// Target node id.
|
|
||||||
target_node_id: Option<NodeId>,
|
|
||||||
/// Keys storage.
|
|
||||||
key_storage: Arc<dyn KeyStorage>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl UnknownSessionsJob {
|
|
||||||
pub fn new_on_slave(key_storage: Arc<dyn KeyStorage>) -> Self {
|
|
||||||
UnknownSessionsJob {
|
|
||||||
target_node_id: None,
|
|
||||||
key_storage: key_storage,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_on_master(key_storage: Arc<dyn KeyStorage>, self_node_id: NodeId) -> Self {
|
|
||||||
UnknownSessionsJob {
|
|
||||||
target_node_id: Some(self_node_id),
|
|
||||||
key_storage: key_storage,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl JobExecutor for UnknownSessionsJob {
|
|
||||||
type PartialJobRequest = NodeId;
|
|
||||||
type PartialJobResponse = BTreeSet<SessionId>;
|
|
||||||
type JobResponse = BTreeMap<SessionId, BTreeSet<NodeId>>;
|
|
||||||
|
|
||||||
fn prepare_partial_request(&self, _node: &NodeId, _nodes: &BTreeSet<NodeId>) -> Result<NodeId, Error> {
|
|
||||||
Ok(self.target_node_id.clone().expect("prepare_partial_request is only called on master nodes; this field is filled on master nodes in constructor; qed"))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_partial_request(&mut self, partial_request: NodeId) -> Result<JobPartialRequestAction<BTreeSet<SessionId>>, Error> {
|
|
||||||
Ok(JobPartialRequestAction::Respond(self.key_storage.iter()
|
|
||||||
.filter(|&(_, ref key_share)| !key_share.versions.last().map(|v| v.id_numbers.contains_key(&partial_request)).unwrap_or(true))
|
|
||||||
.map(|(id, _)| id.clone())
|
|
||||||
.collect()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_partial_response(&mut self, _sender: &NodeId, _partial_response: &BTreeSet<SessionId>) -> Result<JobPartialResponseAction, Error> {
|
|
||||||
Ok(JobPartialResponseAction::Accept)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO [Opt]:
|
|
||||||
// currently ALL unknown sessions are sent at once - it is better to limit messages by size/len => add partial-partial responses
|
|
||||||
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, BTreeSet<SessionId>>) -> Result<BTreeMap<SessionId, BTreeSet<NodeId>>, Error> {
|
|
||||||
let mut result: BTreeMap<SessionId, BTreeSet<NodeId>> = BTreeMap::new();
|
|
||||||
for (node_id, node_sessions) in partial_responses {
|
|
||||||
for node_session in node_sessions {
|
|
||||||
result.entry(node_session.clone())
|
|
||||||
.or_insert_with(Default::default)
|
|
||||||
.insert(node_id.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,85 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use super::types::ServerKeyId;
|
|
||||||
|
|
||||||
pub use super::blockchain::SigningKeyPair;
|
|
||||||
pub use super::types::{Error, NodeId, Requester, EncryptedDocumentKeyShadow};
|
|
||||||
pub use super::acl_storage::AclStorage;
|
|
||||||
pub use super::key_storage::{KeyStorage, DocumentKeyShare, DocumentKeyShareVersion};
|
|
||||||
pub use super::key_server_set::{is_migration_required, KeyServerSet, KeyServerSetSnapshot, KeyServerSetMigration};
|
|
||||||
pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic,
|
|
||||||
SerializableRequester, SerializableMessageHash, SerializableAddress};
|
|
||||||
pub use self::cluster::{new_network_cluster, ClusterCore, ClusterConfiguration, ClusterClient};
|
|
||||||
pub use self::cluster_connections_net::NetConnectionsManagerConfig;
|
|
||||||
pub use self::cluster_sessions::{ClusterSession, ClusterSessionsListener, WaitableSession};
|
|
||||||
#[cfg(test)]
|
|
||||||
pub use self::cluster::tests::DummyClusterClient;
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub use super::node_key_pair::PlainNodeKeyPair;
|
|
||||||
#[cfg(test)]
|
|
||||||
pub use super::key_storage::tests::DummyKeyStorage;
|
|
||||||
pub use super::acl_storage::DummyAclStorage;
|
|
||||||
#[cfg(test)]
|
|
||||||
pub use super::key_server_set::tests::MapKeyServerSet;
|
|
||||||
|
|
||||||
pub type SessionId = ServerKeyId;
|
|
||||||
|
|
||||||
/// Session metadata.
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct SessionMeta {
|
|
||||||
/// Key id.
|
|
||||||
pub id: SessionId,
|
|
||||||
/// Id of node, which has started this session.
|
|
||||||
pub master_node_id: NodeId,
|
|
||||||
/// Id of node, on which this session is running.
|
|
||||||
pub self_node_id: NodeId,
|
|
||||||
/// Session threshold.
|
|
||||||
pub threshold: usize,
|
|
||||||
/// Count of all configured key server nodes (valid at session start time).
|
|
||||||
pub configured_nodes_count: usize,
|
|
||||||
/// Count of all connected key server nodes (valid at session start time).
|
|
||||||
pub connected_nodes_count: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
mod admin_sessions;
|
|
||||||
mod client_sessions;
|
|
||||||
|
|
||||||
pub use self::admin_sessions::key_version_negotiation_session;
|
|
||||||
pub use self::admin_sessions::servers_set_change_session;
|
|
||||||
pub use self::admin_sessions::share_add_session;
|
|
||||||
pub use self::admin_sessions::share_change_session;
|
|
||||||
|
|
||||||
pub use self::client_sessions::decryption_session;
|
|
||||||
pub use self::client_sessions::encryption_session;
|
|
||||||
pub use self::client_sessions::generation_session;
|
|
||||||
pub use self::client_sessions::signing_session_ecdsa;
|
|
||||||
pub use self::client_sessions::signing_session_schnorr;
|
|
||||||
|
|
||||||
mod cluster;
|
|
||||||
mod cluster_connections;
|
|
||||||
mod cluster_connections_net;
|
|
||||||
mod cluster_message_processor;
|
|
||||||
mod cluster_sessions;
|
|
||||||
mod cluster_sessions_creator;
|
|
||||||
mod connection_trigger;
|
|
||||||
mod connection_trigger_with_migration;
|
|
||||||
mod io;
|
|
||||||
mod jobs;
|
|
||||||
pub mod math;
|
|
||||||
mod message;
|
|
||||||
mod net;
|
|
@ -1,66 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::io;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::time::Duration;
|
|
||||||
use futures::{Future, Poll};
|
|
||||||
use tokio::net::TcpStream;
|
|
||||||
use blockchain::SigningKeyPair;
|
|
||||||
use key_server_cluster::Error;
|
|
||||||
use key_server_cluster::io::{accept_handshake, Handshake, Deadline, deadline};
|
|
||||||
use key_server_cluster::net::Connection;
|
|
||||||
|
|
||||||
/// Create future for accepting incoming connection.
|
|
||||||
pub fn accept_connection(stream: TcpStream, self_key_pair: Arc<dyn SigningKeyPair>) -> Deadline<AcceptConnection> {
|
|
||||||
// TODO: This could fail so it would be better either to accept the
|
|
||||||
// address as a separate argument or return a result.
|
|
||||||
let address = stream.peer_addr().expect("Unable to determine tcp peer address");
|
|
||||||
|
|
||||||
let accept = AcceptConnection {
|
|
||||||
handshake: accept_handshake(stream, self_key_pair),
|
|
||||||
address: address,
|
|
||||||
};
|
|
||||||
|
|
||||||
deadline(Duration::new(5, 0), accept).expect("Failed to create timeout")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Future for accepting incoming connection.
|
|
||||||
pub struct AcceptConnection {
|
|
||||||
handshake: Handshake<TcpStream>,
|
|
||||||
address: SocketAddr,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Future for AcceptConnection {
|
|
||||||
type Item = Result<Connection, Error>;
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
|
||||||
let (stream, result) = try_ready!(self.handshake.poll());
|
|
||||||
let result = match result {
|
|
||||||
Ok(result) => result,
|
|
||||||
Err(err) => return Ok(Err(err).into()),
|
|
||||||
};
|
|
||||||
let connection = Connection {
|
|
||||||
stream: stream.into(),
|
|
||||||
address: self.address,
|
|
||||||
node_id: result.node_id,
|
|
||||||
key: result.shared_key,
|
|
||||||
};
|
|
||||||
Ok(Ok(connection).into())
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,90 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use std::io;
|
|
||||||
use std::time::Duration;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use futures::{Future, Poll, Async};
|
|
||||||
use tokio::net::{TcpStream, tcp::ConnectFuture};
|
|
||||||
use blockchain::SigningKeyPair;
|
|
||||||
use key_server_cluster::{Error, NodeId};
|
|
||||||
use key_server_cluster::io::{handshake, Handshake, Deadline, deadline};
|
|
||||||
use key_server_cluster::net::Connection;
|
|
||||||
|
|
||||||
/// Create future for connecting to other node.
|
|
||||||
pub fn connect(address: &SocketAddr, self_key_pair: Arc<dyn SigningKeyPair>, trusted_nodes: BTreeSet<NodeId>) -> Deadline<Connect> {
|
|
||||||
let connect = Connect {
|
|
||||||
state: ConnectState::TcpConnect(TcpStream::connect(address)),
|
|
||||||
address: address.clone(),
|
|
||||||
self_key_pair: self_key_pair,
|
|
||||||
trusted_nodes: trusted_nodes,
|
|
||||||
};
|
|
||||||
|
|
||||||
deadline(Duration::new(5, 0), connect).expect("Failed to create timeout")
|
|
||||||
}
|
|
||||||
|
|
||||||
enum ConnectState {
|
|
||||||
TcpConnect(ConnectFuture),
|
|
||||||
Handshake(Handshake<TcpStream>),
|
|
||||||
Connected,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Future for connecting to other node.
|
|
||||||
pub struct Connect {
|
|
||||||
state: ConnectState,
|
|
||||||
address: SocketAddr,
|
|
||||||
self_key_pair: Arc<dyn SigningKeyPair>,
|
|
||||||
trusted_nodes: BTreeSet<NodeId>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Future for Connect {
|
|
||||||
type Item = Result<Connection, Error>;
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
|
||||||
let (next, result) = match self.state {
|
|
||||||
ConnectState::TcpConnect(ref mut future) => {
|
|
||||||
let stream = try_ready!(future.poll());
|
|
||||||
let handshake = handshake(stream, self.self_key_pair.clone(), self.trusted_nodes.clone());
|
|
||||||
(ConnectState::Handshake(handshake), Async::NotReady)
|
|
||||||
},
|
|
||||||
ConnectState::Handshake(ref mut future) => {
|
|
||||||
let (stream, result) = try_ready!(future.poll());
|
|
||||||
let result = match result {
|
|
||||||
Ok(result) => result,
|
|
||||||
Err(err) => return Ok(Async::Ready(Err(err))),
|
|
||||||
};
|
|
||||||
let connection = Connection {
|
|
||||||
stream: stream.into(),
|
|
||||||
address: self.address,
|
|
||||||
node_id: result.node_id,
|
|
||||||
key: result.shared_key,
|
|
||||||
};
|
|
||||||
(ConnectState::Connected, Async::Ready(Ok(connection)))
|
|
||||||
},
|
|
||||||
ConnectState::Connected => panic!("poll Connect after it's done"),
|
|
||||||
};
|
|
||||||
|
|
||||||
self.state = next;
|
|
||||||
match result {
|
|
||||||
// by polling again, we register new future
|
|
||||||
Async::NotReady => self.poll(),
|
|
||||||
result => Ok(result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,32 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::net;
|
|
||||||
use crypto::publickey::KeyPair;
|
|
||||||
use key_server_cluster::NodeId;
|
|
||||||
use key_server_cluster::io::SharedTcpStream;
|
|
||||||
|
|
||||||
/// Established connection data
|
|
||||||
pub struct Connection {
|
|
||||||
/// Peer address.
|
|
||||||
pub address: net::SocketAddr,
|
|
||||||
/// Connection stream.
|
|
||||||
pub stream: SharedTcpStream,
|
|
||||||
/// Peer node id.
|
|
||||||
pub node_id: NodeId,
|
|
||||||
/// Encryption key.
|
|
||||||
pub key: KeyPair,
|
|
||||||
}
|
|
@ -1,23 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
mod accept_connection;
|
|
||||||
mod connect;
|
|
||||||
mod connection;
|
|
||||||
|
|
||||||
pub use self::accept_connection::{AcceptConnection, accept_connection};
|
|
||||||
pub use self::connect::{Connect, connect};
|
|
||||||
pub use self::connection::Connection;
|
|
@ -1,825 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::collections::{BTreeMap, HashSet};
|
|
||||||
use parking_lot::Mutex;
|
|
||||||
use ethabi::FunctionOutputDecoder;
|
|
||||||
use ethereum_types::{H256, Address};
|
|
||||||
use crypto::publickey::public_to_address;
|
|
||||||
use bytes::Bytes;
|
|
||||||
use types::{Error, Public, NodeAddress, NodeId};
|
|
||||||
use blockchain::{SecretStoreChain, NewBlocksNotify, SigningKeyPair, ContractAddress, BlockId};
|
|
||||||
|
|
||||||
use_contract!(key_server, "res/key_server_set.json");
|
|
||||||
|
|
||||||
/// Name of KeyServerSet contract in registry.
|
|
||||||
const KEY_SERVER_SET_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_server_set";
|
|
||||||
/// Number of blocks (since latest new_set change) required before actually starting migration.
|
|
||||||
const MIGRATION_CONFIRMATIONS_REQUIRED: u64 = 5;
|
|
||||||
/// Number of blocks before the same-migration transaction (be it start or confirmation) will be retried.
|
|
||||||
const TRANSACTION_RETRY_INTERVAL_BLOCKS: u64 = 30;
|
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq)]
|
|
||||||
/// Key Server Set state.
|
|
||||||
pub struct KeyServerSetSnapshot {
|
|
||||||
/// Current set of key servers.
|
|
||||||
pub current_set: BTreeMap<NodeId, SocketAddr>,
|
|
||||||
/// New set of key servers.
|
|
||||||
pub new_set: BTreeMap<NodeId, SocketAddr>,
|
|
||||||
/// Current migration data.
|
|
||||||
pub migration: Option<KeyServerSetMigration>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq)]
|
|
||||||
/// Key server set migration.
|
|
||||||
pub struct KeyServerSetMigration {
|
|
||||||
/// Migration id.
|
|
||||||
pub id: H256,
|
|
||||||
/// Migration set of key servers. It is the new_set at the moment of migration start.
|
|
||||||
pub set: BTreeMap<NodeId, SocketAddr>,
|
|
||||||
/// Master node of the migration process.
|
|
||||||
pub master: NodeId,
|
|
||||||
/// Is migration confirmed by this node?
|
|
||||||
pub is_confirmed: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Key Server Set
|
|
||||||
pub trait KeyServerSet: Send + Sync {
|
|
||||||
/// Is this node currently isolated from the set?
|
|
||||||
fn is_isolated(&self) -> bool;
|
|
||||||
/// Get server set state.
|
|
||||||
fn snapshot(&self) -> KeyServerSetSnapshot;
|
|
||||||
/// Start migration.
|
|
||||||
fn start_migration(&self, migration_id: H256);
|
|
||||||
/// Confirm migration.
|
|
||||||
fn confirm_migration(&self, migration_id: H256);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// On-chain Key Server set implementation.
|
|
||||||
pub struct OnChainKeyServerSet {
|
|
||||||
/// Cached on-chain contract.
|
|
||||||
contract: Mutex<CachedContract>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq)]
|
|
||||||
/// Non-finalized new_set.
|
|
||||||
struct FutureNewSet {
|
|
||||||
/// New servers set.
|
|
||||||
pub new_set: BTreeMap<NodeId, SocketAddr>,
|
|
||||||
/// Hash of block, when this set has appeared for first time.
|
|
||||||
pub block: H256,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq)]
|
|
||||||
/// Migration-related transaction information.
|
|
||||||
struct PreviousMigrationTransaction {
|
|
||||||
/// Migration id.
|
|
||||||
pub migration_id: H256,
|
|
||||||
/// Latest actual block number at the time this transaction has been sent.
|
|
||||||
pub block: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Cached on-chain Key Server set contract.
|
|
||||||
struct CachedContract {
|
|
||||||
/// Blockchain client.
|
|
||||||
client: Arc<dyn SecretStoreChain>,
|
|
||||||
/// Contract address source.
|
|
||||||
contract_address_source: Option<ContractAddress>,
|
|
||||||
/// Current contract address.
|
|
||||||
contract_address: Option<Address>,
|
|
||||||
/// Is auto-migrate enabled?
|
|
||||||
auto_migrate_enabled: bool,
|
|
||||||
/// Current contract state.
|
|
||||||
snapshot: KeyServerSetSnapshot,
|
|
||||||
/// Scheduled contract state (if any).
|
|
||||||
future_new_set: Option<FutureNewSet>,
|
|
||||||
/// Previous start migration transaction.
|
|
||||||
start_migration_tx: Option<PreviousMigrationTransaction>,
|
|
||||||
/// Previous confirm migration transaction.
|
|
||||||
confirm_migration_tx: Option<PreviousMigrationTransaction>,
|
|
||||||
/// This node key pair.
|
|
||||||
self_key_pair: Arc<dyn SigningKeyPair>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OnChainKeyServerSet {
|
|
||||||
pub fn new(trusted_client: Arc<dyn SecretStoreChain>, contract_address_source: Option<ContractAddress>, self_key_pair: Arc<dyn SigningKeyPair>, auto_migrate_enabled: bool, key_servers: BTreeMap<Public, NodeAddress>) -> Result<Arc<Self>, Error> {
|
|
||||||
let key_server_set = Arc::new(OnChainKeyServerSet {
|
|
||||||
contract: Mutex::new(CachedContract::new(trusted_client.clone(), contract_address_source, self_key_pair, auto_migrate_enabled, key_servers)?),
|
|
||||||
});
|
|
||||||
trusted_client.add_listener(key_server_set.clone());
|
|
||||||
Ok(key_server_set)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyServerSet for OnChainKeyServerSet {
|
|
||||||
fn is_isolated(&self) -> bool {
|
|
||||||
self.contract.lock().is_isolated()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn snapshot(&self) -> KeyServerSetSnapshot {
|
|
||||||
self.contract.lock().snapshot()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn start_migration(&self, migration_id: H256) {
|
|
||||||
self.contract.lock().start_migration(migration_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn confirm_migration(&self, migration_id: H256) {
|
|
||||||
self.contract.lock().confirm_migration(migration_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NewBlocksNotify for OnChainKeyServerSet {
|
|
||||||
fn new_blocks(&self, _new_enacted_len: usize) {
|
|
||||||
self.contract.lock().update()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
trait KeyServerSubset<F: Fn(Vec<u8>) -> Result<Vec<u8>, String>> {
|
|
||||||
fn read_list(&self, f: &F) -> Result<Vec<Address>, String>;
|
|
||||||
|
|
||||||
fn read_public(&self, address: Address, f: &F) -> Result<Bytes, String>;
|
|
||||||
|
|
||||||
fn read_address(&self, address: Address, f: &F) -> Result<String, String>;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct CurrentKeyServerSubset;
|
|
||||||
|
|
||||||
impl <F: Fn(Vec<u8>) -> Result<Vec<u8>, String>> KeyServerSubset<F> for CurrentKeyServerSubset {
|
|
||||||
fn read_list(&self, f: &F) -> Result<Vec<Address>, String> {
|
|
||||||
let (encoded, decoder) = key_server::functions::get_current_key_servers::call();
|
|
||||||
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_public(&self, address: Address, f: &F) -> Result<Bytes, String> {
|
|
||||||
let (encoded, decoder) = key_server::functions::get_current_key_server_public::call(address);
|
|
||||||
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_address(&self, address: Address, f: &F) -> Result<String, String> {
|
|
||||||
let (encoded, decoder) = key_server::functions::get_current_key_server_address::call(address);
|
|
||||||
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct MigrationKeyServerSubset;
|
|
||||||
|
|
||||||
impl <F: Fn(Vec<u8>) -> Result<Vec<u8>, String>> KeyServerSubset<F> for MigrationKeyServerSubset {
|
|
||||||
fn read_list(&self, f: &F) -> Result<Vec<Address>, String> {
|
|
||||||
let (encoded, decoder) = key_server::functions::get_migration_key_servers::call();
|
|
||||||
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_public(&self, address: Address, f: &F) -> Result<Bytes, String> {
|
|
||||||
let (encoded, decoder) = key_server::functions::get_migration_key_server_public::call(address);
|
|
||||||
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_address(&self, address: Address, f: &F) -> Result<String, String> {
|
|
||||||
let (encoded, decoder) = key_server::functions::get_migration_key_server_address::call(address);
|
|
||||||
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct NewKeyServerSubset;
|
|
||||||
|
|
||||||
impl <F: Fn(Vec<u8>) -> Result<Vec<u8>, String>> KeyServerSubset<F> for NewKeyServerSubset {
|
|
||||||
fn read_list(&self, f: &F) -> Result<Vec<Address>, String> {
|
|
||||||
let (encoded, decoder) = key_server::functions::get_new_key_servers::call();
|
|
||||||
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_public(&self, address: Address, f: &F) -> Result<Bytes, String> {
|
|
||||||
let (encoded, decoder) = key_server::functions::get_new_key_server_public::call(address);
|
|
||||||
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_address(&self, address: Address, f: &F) -> Result<String, String> {
|
|
||||||
let (encoded, decoder) = key_server::functions::get_new_key_server_address::call(address);
|
|
||||||
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CachedContract {
|
|
||||||
pub fn new(client: Arc<dyn SecretStoreChain>, contract_address_source: Option<ContractAddress>, self_key_pair: Arc<dyn SigningKeyPair>, auto_migrate_enabled: bool, key_servers: BTreeMap<Public, NodeAddress>) -> Result<Self, Error> {
|
|
||||||
let server_set = match contract_address_source.is_none() {
|
|
||||||
true => key_servers.into_iter()
|
|
||||||
.map(|(p, addr)| {
|
|
||||||
let addr = format!("{}:{}", addr.address, addr.port).parse()
|
|
||||||
.map_err(|err| Error::Internal(format!("error parsing node address: {}", err)))?;
|
|
||||||
Ok((p, addr))
|
|
||||||
})
|
|
||||||
.collect::<Result<BTreeMap<_, _>, Error>>()?,
|
|
||||||
false => Default::default(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut contract = CachedContract {
|
|
||||||
client: client,
|
|
||||||
contract_address_source: contract_address_source,
|
|
||||||
contract_address: None,
|
|
||||||
auto_migrate_enabled: auto_migrate_enabled,
|
|
||||||
future_new_set: None,
|
|
||||||
confirm_migration_tx: None,
|
|
||||||
start_migration_tx: None,
|
|
||||||
snapshot: KeyServerSetSnapshot {
|
|
||||||
current_set: server_set.clone(),
|
|
||||||
new_set: server_set,
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
self_key_pair: self_key_pair,
|
|
||||||
};
|
|
||||||
contract.update_contract_address();
|
|
||||||
|
|
||||||
Ok(contract)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn update_contract_address(&mut self) {
|
|
||||||
if let Some(ref contract_address_source) = self.contract_address_source {
|
|
||||||
let contract_address = self.client.read_contract_address(
|
|
||||||
KEY_SERVER_SET_CONTRACT_REGISTRY_NAME,
|
|
||||||
contract_address_source
|
|
||||||
);
|
|
||||||
if contract_address != self.contract_address {
|
|
||||||
trace!(target: "secretstore", "{}: Configuring for key server set contract from address {:?}",
|
|
||||||
self.self_key_pair.public(), contract_address);
|
|
||||||
|
|
||||||
self.contract_address = contract_address;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn update(&mut self) {
|
|
||||||
// no need to update when servers set is hardcoded
|
|
||||||
if self.contract_address_source.is_none() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.client.is_trusted() {
|
|
||||||
// read new snapshot from reqistry
|
|
||||||
self.update_contract_address();
|
|
||||||
self.read_from_registry();
|
|
||||||
|
|
||||||
// update number of confirmations (if there's future new set)
|
|
||||||
self.update_number_of_confirmations_if_required();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_isolated(&self) -> bool {
|
|
||||||
!self.snapshot.current_set.contains_key(self.self_key_pair.public())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn snapshot(&self) -> KeyServerSetSnapshot {
|
|
||||||
self.snapshot.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn start_migration(&mut self, migration_id: H256) {
|
|
||||||
// trust is not needed here, because it is the reaction to the read of the trusted client
|
|
||||||
if let Some(contract_address) = self.contract_address.as_ref() {
|
|
||||||
// check if we need to send start migration transaction
|
|
||||||
if !update_last_transaction_block(&*self.client, &migration_id, &mut self.start_migration_tx) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepare transaction data
|
|
||||||
let transaction_data = key_server::functions::start_migration::encode_input(migration_id);
|
|
||||||
|
|
||||||
// send transaction
|
|
||||||
match self.client.transact_contract(*contract_address, transaction_data) {
|
|
||||||
Ok(_) => trace!(target: "secretstore_net", "{}: sent auto-migration start transaction",
|
|
||||||
self.self_key_pair.public()),
|
|
||||||
Err(error) => warn!(target: "secretstore_net", "{}: failed to submit auto-migration start transaction: {}",
|
|
||||||
self.self_key_pair.public(), error),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn confirm_migration(&mut self, migration_id: H256) {
|
|
||||||
// trust is not needed here, because we have already completed the action
|
|
||||||
if let (true, Some(contract_address)) = (self.client.is_trusted(), self.contract_address) {
|
|
||||||
// check if we need to send start migration transaction
|
|
||||||
if !update_last_transaction_block(&*self.client, &migration_id, &mut self.confirm_migration_tx) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepare transaction data
|
|
||||||
let transaction_data = key_server::functions::confirm_migration::encode_input(migration_id);
|
|
||||||
|
|
||||||
// send transaction
|
|
||||||
match self.client.transact_contract(contract_address, transaction_data) {
|
|
||||||
Ok(_) => trace!(target: "secretstore_net", "{}: sent auto-migration confirm transaction",
|
|
||||||
self.self_key_pair.public()),
|
|
||||||
Err(error) => warn!(target: "secretstore_net", "{}: failed to submit auto-migration confirmation transaction: {}",
|
|
||||||
self.self_key_pair.public(), error),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_from_registry(&mut self) {
|
|
||||||
let contract_address = match self.contract_address {
|
|
||||||
Some(contract_address) => contract_address,
|
|
||||||
None => {
|
|
||||||
// no contract installed => empty snapshot
|
|
||||||
// WARNING: after restart current_set will be reset to the set from configuration file
|
|
||||||
// even though we have reset to empty set here. We are not considering this as an issue
|
|
||||||
// because it is actually the issue of administrator.
|
|
||||||
self.snapshot = Default::default();
|
|
||||||
self.future_new_set = None;
|
|
||||||
return;
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
let do_call = |data| self.client.call_contract(BlockId::Latest, contract_address, data);
|
|
||||||
|
|
||||||
let current_set = Self::read_key_server_set(CurrentKeyServerSubset, &do_call);
|
|
||||||
|
|
||||||
// read migration-related data if auto migration is enabled
|
|
||||||
let (new_set, migration) = match self.auto_migrate_enabled {
|
|
||||||
true => {
|
|
||||||
let new_set = Self::read_key_server_set(NewKeyServerSubset, &do_call);
|
|
||||||
let migration_set = Self::read_key_server_set(MigrationKeyServerSubset, &do_call);
|
|
||||||
|
|
||||||
let migration_id = match migration_set.is_empty() {
|
|
||||||
false => {
|
|
||||||
let (encoded, decoder) = key_server::functions::get_migration_id::call();
|
|
||||||
do_call(encoded)
|
|
||||||
.map_err(|e| e.to_string())
|
|
||||||
.and_then(|data| decoder.decode(&data).map_err(|e| e.to_string()))
|
|
||||||
.map_err(|err| { trace!(target: "secretstore", "Error {} reading migration id from contract", err); err })
|
|
||||||
.ok()
|
|
||||||
},
|
|
||||||
true => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let migration_master = match migration_set.is_empty() {
|
|
||||||
false => {
|
|
||||||
let (encoded, decoder) = key_server::functions::get_migration_master::call();
|
|
||||||
do_call(encoded)
|
|
||||||
.map_err(|e| e.to_string())
|
|
||||||
.and_then(|data| decoder.decode(&data).map_err(|e| e.to_string()))
|
|
||||||
.map_err(|err| { trace!(target: "secretstore", "Error {} reading migration master from contract", err); err })
|
|
||||||
.ok()
|
|
||||||
.and_then(|address| current_set.keys().chain(migration_set.keys())
|
|
||||||
.find(|public| public_to_address(public) == address)
|
|
||||||
.cloned())
|
|
||||||
},
|
|
||||||
true => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let is_migration_confirmed = match migration_set.is_empty() {
|
|
||||||
false if current_set.contains_key(self.self_key_pair.public()) || migration_set.contains_key(self.self_key_pair.public()) => {
|
|
||||||
let (encoded, decoder) = key_server::functions::is_migration_confirmed::call(self.self_key_pair.address());
|
|
||||||
do_call(encoded)
|
|
||||||
.map_err(|e| e.to_string())
|
|
||||||
.and_then(|data| decoder.decode(&data).map_err(|e| e.to_string()))
|
|
||||||
.map_err(|err| { trace!(target: "secretstore", "Error {} reading migration confirmation from contract", err); err })
|
|
||||||
.ok()
|
|
||||||
},
|
|
||||||
_ => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let migration = match (migration_set.is_empty(), migration_id, migration_master, is_migration_confirmed) {
|
|
||||||
(false, Some(migration_id), Some(migration_master), Some(is_migration_confirmed)) =>
|
|
||||||
Some(KeyServerSetMigration {
|
|
||||||
id: migration_id,
|
|
||||||
master: migration_master,
|
|
||||||
set: migration_set,
|
|
||||||
is_confirmed: is_migration_confirmed,
|
|
||||||
}),
|
|
||||||
_ => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
(new_set, migration)
|
|
||||||
}
|
|
||||||
false => (current_set.clone(), None),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut new_snapshot = KeyServerSetSnapshot {
|
|
||||||
current_set: current_set,
|
|
||||||
new_set: new_set,
|
|
||||||
migration: migration,
|
|
||||||
};
|
|
||||||
|
|
||||||
// we might want to adjust new_set if auto migration is enabled
|
|
||||||
if self.auto_migrate_enabled {
|
|
||||||
let block = self.client.block_hash(BlockId::Latest).unwrap_or_default();
|
|
||||||
update_future_set(&mut self.future_new_set, &mut new_snapshot, block);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.snapshot = new_snapshot;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_key_server_set<T, F>(subset: T, do_call: F) -> BTreeMap<Public, SocketAddr>
|
|
||||||
where
|
|
||||||
T: KeyServerSubset<F>,
|
|
||||||
F: Fn(Vec<u8>) -> Result<Vec<u8>, String> {
|
|
||||||
let mut key_servers = BTreeMap::new();
|
|
||||||
let mut key_servers_addresses = HashSet::new();
|
|
||||||
let key_servers_list = subset.read_list(&do_call)
|
|
||||||
.map_err(|err| { warn!(target: "secretstore_net", "error {} reading list of key servers from contract", err); err })
|
|
||||||
.unwrap_or_default();
|
|
||||||
for key_server in key_servers_list {
|
|
||||||
let key_server_public = subset.read_public(key_server, &do_call)
|
|
||||||
.and_then(|p| if p.len() == 64 { Ok(Public::from_slice(&p)) } else { Err(format!("Invalid public length {}", p.len())) });
|
|
||||||
let key_server_address: Result<SocketAddr, _> = subset.read_address(key_server, &do_call)
|
|
||||||
.and_then(|a| a.parse().map_err(|e| format!("Invalid ip address: {}", e)));
|
|
||||||
|
|
||||||
// only add successfully parsed nodes
|
|
||||||
match (key_server_public, key_server_address) {
|
|
||||||
(Ok(key_server_public), Ok(key_server_address)) => {
|
|
||||||
if !key_servers_addresses.insert(key_server_address.clone()) {
|
|
||||||
warn!(target: "secretstore_net", "the same address ({}) specified twice in list of contracts. Ignoring server {}",
|
|
||||||
key_server_address, key_server_public);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
key_servers.insert(key_server_public, key_server_address);
|
|
||||||
},
|
|
||||||
(Err(public_err), _) => warn!(target: "secretstore_net", "received invalid public from key server set contract: {}", public_err),
|
|
||||||
(_, Err(ip_err)) => warn!(target: "secretstore_net", "received invalid IP from key server set contract: {}", ip_err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
key_servers
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update_number_of_confirmations_if_required(&mut self) {
|
|
||||||
if !self.auto_migrate_enabled {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
let client = &*self.client;
|
|
||||||
update_number_of_confirmations(
|
|
||||||
&|| latest_block_hash(client),
|
|
||||||
&|block| block_confirmations(client, block),
|
|
||||||
&mut self.future_new_set, &mut self.snapshot);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check if two sets are equal (in terms of migration requirements). We do not need migration if only
|
|
||||||
/// addresses are changed - simply adjusting connections is enough in this case.
|
|
||||||
pub fn is_migration_required(current_set: &BTreeMap<NodeId, SocketAddr>, new_set: &BTreeMap<NodeId, SocketAddr>) -> bool {
|
|
||||||
let no_nodes_removed = current_set.keys().all(|n| new_set.contains_key(n));
|
|
||||||
let no_nodes_added = new_set.keys().all(|n| current_set.contains_key(n));
|
|
||||||
!no_nodes_removed || !no_nodes_added
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update_future_set(future_new_set: &mut Option<FutureNewSet>, new_snapshot: &mut KeyServerSetSnapshot, block: H256) {
|
|
||||||
// migration has already started => no need to delay visibility
|
|
||||||
if new_snapshot.migration.is_some() {
|
|
||||||
*future_new_set = None;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// no migration is required => no need to delay visibility
|
|
||||||
if !is_migration_required(&new_snapshot.current_set, &new_snapshot.new_set) {
|
|
||||||
*future_new_set = None;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// when auto-migrate is enabled, we do not want to start migration right after new_set is changed, because of:
|
|
||||||
// 1) there could be a fork && we could start migration to forked version (and potentially lose secrets)
|
|
||||||
// 2) there must be some period for new_set changes finalization (i.e. adding/removing more servers)
|
|
||||||
let mut new_set = new_snapshot.current_set.clone();
|
|
||||||
::std::mem::swap(&mut new_set, &mut new_snapshot.new_set);
|
|
||||||
|
|
||||||
// if nothing has changed in future_new_set, then we want to preserve previous block hash
|
|
||||||
let block = match Some(&new_set) == future_new_set.as_ref().map(|f| &f.new_set) {
|
|
||||||
true => future_new_set.as_ref().map(|f| &f.block).cloned().unwrap_or_else(|| block),
|
|
||||||
false => block,
|
|
||||||
};
|
|
||||||
|
|
||||||
*future_new_set = Some(FutureNewSet {
|
|
||||||
new_set: new_set,
|
|
||||||
block: block,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update_number_of_confirmations<F1: Fn() -> H256, F2: Fn(H256) -> Option<u64>>(latest_block: &F1, confirmations: &F2, future_new_set: &mut Option<FutureNewSet>, snapshot: &mut KeyServerSetSnapshot) {
|
|
||||||
match future_new_set.as_mut() {
|
|
||||||
// no future new set is scheduled => do nothing,
|
|
||||||
None => return,
|
|
||||||
// else we should calculate number of confirmations for future new set
|
|
||||||
Some(future_new_set) => match confirmations(future_new_set.block.clone()) {
|
|
||||||
// we have enough confirmations => should move new_set from future to snapshot
|
|
||||||
Some(confirmations) if confirmations >= MIGRATION_CONFIRMATIONS_REQUIRED => (),
|
|
||||||
// not enough confirmations => do nothing
|
|
||||||
Some(_) => return,
|
|
||||||
// if number of confirmations is None, then reorg has happened && we need to reset block
|
|
||||||
// (some more intelligent strategy is possible, but let's stick to simplest one)
|
|
||||||
None => {
|
|
||||||
future_new_set.block = latest_block();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let future_new_set = future_new_set.take()
|
|
||||||
.expect("we only pass through match above when future_new_set is some; qed");
|
|
||||||
snapshot.new_set = future_new_set.new_set;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update_last_transaction_block(client: &dyn SecretStoreChain, migration_id: &H256, previous_transaction: &mut Option<PreviousMigrationTransaction>) -> bool {
|
|
||||||
let last_block = client.block_number(BlockId::Latest).unwrap_or_default();
|
|
||||||
match previous_transaction.as_ref() {
|
|
||||||
// no previous transaction => send immediately
|
|
||||||
None => (),
|
|
||||||
// previous transaction has been sent for other migration process => send immediately
|
|
||||||
Some(tx) if tx.migration_id != *migration_id => (),
|
|
||||||
// if we have sent the same type of transaction recently => do nothing (hope it will be mined eventually)
|
|
||||||
// if we have sent the same transaction some time ago =>
|
|
||||||
// assume that our tx queue was full
|
|
||||||
// or we didn't have enough eth fot this tx
|
|
||||||
// or the transaction has been removed from the queue (and never reached any miner node)
|
|
||||||
// if we have restarted after sending tx => assume we have never sent it
|
|
||||||
Some(tx) => {
|
|
||||||
if tx.block > last_block || last_block - tx.block < TRANSACTION_RETRY_INTERVAL_BLOCKS {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
*previous_transaction = Some(PreviousMigrationTransaction {
|
|
||||||
migration_id: migration_id.clone(),
|
|
||||||
block: last_block,
|
|
||||||
});
|
|
||||||
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
fn latest_block_hash(client: &dyn SecretStoreChain) -> H256 {
|
|
||||||
client.block_hash(BlockId::Latest).unwrap_or_default()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_confirmations(client: &dyn SecretStoreChain, block: H256) -> Option<u64> {
|
|
||||||
client.block_number(BlockId::Hash(block))
|
|
||||||
.and_then(|block| client.block_number(BlockId::Latest).map(|last_block| (block, last_block)))
|
|
||||||
.map(|(block, last_block)| last_block - block)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub mod tests {
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use ethereum_types::{H256, H512};
|
|
||||||
use crypto::publickey::Public;
|
|
||||||
use super::{update_future_set, update_number_of_confirmations, FutureNewSet,
|
|
||||||
KeyServerSet, KeyServerSetSnapshot, MIGRATION_CONFIRMATIONS_REQUIRED};
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct MapKeyServerSet {
|
|
||||||
is_isolated: bool,
|
|
||||||
nodes: BTreeMap<Public, SocketAddr>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MapKeyServerSet {
|
|
||||||
pub fn new(is_isolated: bool, nodes: BTreeMap<Public, SocketAddr>) -> Self {
|
|
||||||
MapKeyServerSet {
|
|
||||||
is_isolated: is_isolated,
|
|
||||||
nodes: nodes,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyServerSet for MapKeyServerSet {
|
|
||||||
fn is_isolated(&self) -> bool {
|
|
||||||
self.is_isolated
|
|
||||||
}
|
|
||||||
|
|
||||||
fn snapshot(&self) -> KeyServerSetSnapshot {
|
|
||||||
KeyServerSetSnapshot {
|
|
||||||
current_set: self.nodes.clone(),
|
|
||||||
new_set: self.nodes.clone(),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn start_migration(&self, _migration_id: H256) {
|
|
||||||
unimplemented!("test-only")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn confirm_migration(&self, _migration_id: H256) {
|
|
||||||
unimplemented!("test-only")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn future_set_is_updated_to_none_when_migration_has_already_started() {
|
|
||||||
let mut future_new_set = Some(Default::default());
|
|
||||||
let mut new_snapshot = KeyServerSetSnapshot {
|
|
||||||
migration: Some(Default::default()),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let new_snapshot_copy = new_snapshot.clone();
|
|
||||||
update_future_set(&mut future_new_set, &mut new_snapshot, Default::default());
|
|
||||||
assert_eq!(future_new_set, None);
|
|
||||||
assert_eq!(new_snapshot, new_snapshot_copy);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn future_set_is_updated_to_none_when_no_migration_is_required() {
|
|
||||||
let node_id = Default::default();
|
|
||||||
let address1 = "127.0.0.1:12000".parse().unwrap();
|
|
||||||
let address2 = "127.0.0.1:12001".parse().unwrap();
|
|
||||||
|
|
||||||
// addresses are different, but node set is the same => no migration is required
|
|
||||||
let mut future_new_set = Some(Default::default());
|
|
||||||
let mut new_snapshot = KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(node_id, address1)].into_iter().collect(),
|
|
||||||
new_set: vec![(node_id, address2)].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let new_snapshot_copy = new_snapshot.clone();
|
|
||||||
update_future_set(&mut future_new_set, &mut new_snapshot, Default::default());
|
|
||||||
assert_eq!(future_new_set, None);
|
|
||||||
assert_eq!(new_snapshot, new_snapshot_copy);
|
|
||||||
|
|
||||||
// everything is the same => no migration is required
|
|
||||||
let mut future_new_set = Some(Default::default());
|
|
||||||
let mut new_snapshot = KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(node_id, address1)].into_iter().collect(),
|
|
||||||
new_set: vec![(node_id, address1)].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let new_snapshot_copy = new_snapshot.clone();
|
|
||||||
update_future_set(&mut future_new_set, &mut new_snapshot, Default::default());
|
|
||||||
assert_eq!(future_new_set, None);
|
|
||||||
assert_eq!(new_snapshot, new_snapshot_copy);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn future_set_is_initialized() {
|
|
||||||
let address = "127.0.0.1:12000".parse().unwrap();
|
|
||||||
|
|
||||||
let mut future_new_set = None;
|
|
||||||
let mut new_snapshot = KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(2), address)].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
update_future_set(&mut future_new_set, &mut new_snapshot, Default::default());
|
|
||||||
assert_eq!(future_new_set, Some(FutureNewSet {
|
|
||||||
new_set: vec![(H512::from_low_u64_be(2), address)].into_iter().collect(),
|
|
||||||
block: Default::default(),
|
|
||||||
}));
|
|
||||||
assert_eq!(new_snapshot, KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn future_set_is_updated_when_set_differs() {
|
|
||||||
let address = "127.0.0.1:12000".parse().unwrap();
|
|
||||||
|
|
||||||
let mut future_new_set = Some(FutureNewSet {
|
|
||||||
new_set: vec![(H512::from_low_u64_be(2), address)].into_iter().collect(),
|
|
||||||
block: Default::default(),
|
|
||||||
});
|
|
||||||
let mut new_snapshot = KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(3), address)].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
update_future_set(&mut future_new_set, &mut new_snapshot, H256::from_low_u64_be(1));
|
|
||||||
assert_eq!(future_new_set, Some(FutureNewSet {
|
|
||||||
new_set: vec![(H512::from_low_u64_be(3), address)].into_iter().collect(),
|
|
||||||
block: H256::from_low_u64_be(1),
|
|
||||||
}));
|
|
||||||
assert_eq!(new_snapshot, KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn future_set_is_not_updated_when_set_is_the_same() {
|
|
||||||
let address = "127.0.0.1:12000".parse().unwrap();
|
|
||||||
|
|
||||||
let mut future_new_set = Some(FutureNewSet {
|
|
||||||
new_set: vec![(H512::from_low_u64_be(2), address)].into_iter().collect(),
|
|
||||||
block: Default::default(),
|
|
||||||
});
|
|
||||||
let mut new_snapshot = KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(2), address)].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
update_future_set(&mut future_new_set, &mut new_snapshot, H256::from_low_u64_be(1));
|
|
||||||
assert_eq!(future_new_set, Some(FutureNewSet {
|
|
||||||
new_set: vec![(H512::from_low_u64_be(2), address)].into_iter().collect(),
|
|
||||||
block: Default::default(),
|
|
||||||
}));
|
|
||||||
assert_eq!(new_snapshot, KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn when_updating_confirmations_nothing_is_changed_if_no_future_set() {
|
|
||||||
let address = "127.0.0.1:12000".parse().unwrap();
|
|
||||||
|
|
||||||
let mut future_new_set = None;
|
|
||||||
let mut snapshot = KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let snapshot_copy = snapshot.clone();
|
|
||||||
update_number_of_confirmations(
|
|
||||||
&|| H256::from_low_u64_be(1),
|
|
||||||
&|_| Some(MIGRATION_CONFIRMATIONS_REQUIRED),
|
|
||||||
&mut future_new_set, &mut snapshot);
|
|
||||||
assert_eq!(future_new_set, None);
|
|
||||||
assert_eq!(snapshot, snapshot_copy);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn when_updating_confirmations_migration_is_scheduled() {
|
|
||||||
let address = "127.0.0.1:12000".parse().unwrap();
|
|
||||||
|
|
||||||
let mut future_new_set = Some(FutureNewSet {
|
|
||||||
new_set: vec![(H512::from_low_u64_be(2), address)].into_iter().collect(),
|
|
||||||
block: Default::default(),
|
|
||||||
});
|
|
||||||
let mut snapshot = KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
update_number_of_confirmations(
|
|
||||||
&|| H256::from_low_u64_be(1),
|
|
||||||
&|_| Some(MIGRATION_CONFIRMATIONS_REQUIRED),
|
|
||||||
&mut future_new_set, &mut snapshot);
|
|
||||||
assert_eq!(future_new_set, None);
|
|
||||||
assert_eq!(snapshot, KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(2), address)].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn when_updating_confirmations_migration_is_not_scheduled_when_not_enough_confirmations() {
|
|
||||||
let address = "127.0.0.1:12000".parse().unwrap();
|
|
||||||
|
|
||||||
let mut future_new_set = Some(FutureNewSet {
|
|
||||||
new_set: vec![(H512::from_low_u64_be(2), address)].into_iter().collect(),
|
|
||||||
block: Default::default(),
|
|
||||||
});
|
|
||||||
let mut snapshot = KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let future_new_set_copy = future_new_set.clone();
|
|
||||||
let snapshot_copy = snapshot.clone();
|
|
||||||
update_number_of_confirmations(
|
|
||||||
&|| H256::from_low_u64_be(1),
|
|
||||||
&|_| Some(MIGRATION_CONFIRMATIONS_REQUIRED - 1),
|
|
||||||
&mut future_new_set, &mut snapshot);
|
|
||||||
assert_eq!(future_new_set, future_new_set_copy);
|
|
||||||
assert_eq!(snapshot, snapshot_copy);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn when_updating_confirmations_migration_is_reset_when_reorganized() {
|
|
||||||
let address = "127.0.0.1:12000".parse().unwrap();
|
|
||||||
|
|
||||||
let mut future_new_set = Some(FutureNewSet {
|
|
||||||
new_set: vec![(H512::from_low_u64_be(2), address)].into_iter().collect(),
|
|
||||||
block: H256::from_low_u64_be(1),
|
|
||||||
});
|
|
||||||
let mut snapshot = KeyServerSetSnapshot {
|
|
||||||
current_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
new_set: vec![(H512::from_low_u64_be(1), address)].into_iter().collect(),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let snapshot_copy = snapshot.clone();
|
|
||||||
update_number_of_confirmations(
|
|
||||||
&|| H256::from_low_u64_be(2),
|
|
||||||
&|_| None,
|
|
||||||
&mut future_new_set, &mut snapshot);
|
|
||||||
assert_eq!(future_new_set, Some(FutureNewSet {
|
|
||||||
new_set: vec![(H512::from_low_u64_be(2), address)].into_iter().collect(),
|
|
||||||
block: H256::from_low_u64_be(2),
|
|
||||||
}));
|
|
||||||
assert_eq!(snapshot, snapshot_copy);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,371 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use serde_json;
|
|
||||||
use tiny_keccak::Keccak;
|
|
||||||
use ethereum_types::{H256, Address};
|
|
||||||
use crypto::publickey::{Secret, Public};
|
|
||||||
use kvdb::KeyValueDB;
|
|
||||||
use types::{Error, ServerKeyId, NodeId};
|
|
||||||
use serialization::{SerializablePublic, SerializableSecret, SerializableH256, SerializableAddress};
|
|
||||||
|
|
||||||
/// Encrypted key share, stored by key storage on the single key server.
|
|
||||||
#[derive(Debug, Default, Clone, PartialEq)]
|
|
||||||
pub struct DocumentKeyShare {
|
|
||||||
/// Author of the entry.
|
|
||||||
pub author: Address,
|
|
||||||
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
|
||||||
pub threshold: usize,
|
|
||||||
/// Server public key.
|
|
||||||
pub public: Public,
|
|
||||||
/// Common (shared) encryption point.
|
|
||||||
pub common_point: Option<Public>,
|
|
||||||
/// Encrypted point.
|
|
||||||
pub encrypted_point: Option<Public>,
|
|
||||||
/// Key share versions.
|
|
||||||
pub versions: Vec<DocumentKeyShareVersion>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Versioned portion of document key share.
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
|
||||||
pub struct DocumentKeyShareVersion {
|
|
||||||
/// Version hash (Keccak(time + id_numbers)).
|
|
||||||
pub hash: H256,
|
|
||||||
/// Nodes ids numbers.
|
|
||||||
pub id_numbers: BTreeMap<NodeId, Secret>,
|
|
||||||
/// Node secret share.
|
|
||||||
pub secret_share: Secret,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Document encryption keys storage
|
|
||||||
pub trait KeyStorage: Send + Sync {
|
|
||||||
/// Insert document encryption key
|
|
||||||
fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error>;
|
|
||||||
/// Update document encryption key
|
|
||||||
fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error>;
|
|
||||||
/// Get document encryption key
|
|
||||||
fn get(&self, document: &ServerKeyId) -> Result<Option<DocumentKeyShare>, Error>;
|
|
||||||
/// Remove document encryption key
|
|
||||||
fn remove(&self, document: &ServerKeyId) -> Result<(), Error>;
|
|
||||||
/// Clears the database
|
|
||||||
fn clear(&self) -> Result<(), Error>;
|
|
||||||
/// Check if storage contains document encryption key
|
|
||||||
fn contains(&self, document: &ServerKeyId) -> bool;
|
|
||||||
/// Iterate through storage
|
|
||||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Persistent document encryption keys storage
|
|
||||||
pub struct PersistentKeyStorage {
|
|
||||||
db: Arc<dyn KeyValueDB>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Persistent document encryption keys storage iterator
|
|
||||||
pub struct PersistentKeyStorageIterator<'a> {
|
|
||||||
iter: Box<dyn Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// V3 of encrypted key share, as it is stored by key storage on the single key server.
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
struct SerializableDocumentKeyShareV3 {
|
|
||||||
/// Author of the entry.
|
|
||||||
pub author: SerializableAddress,
|
|
||||||
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
|
|
||||||
pub threshold: usize,
|
|
||||||
/// Server public.
|
|
||||||
pub public: SerializablePublic,
|
|
||||||
/// Common (shared) encryption point.
|
|
||||||
pub common_point: Option<SerializablePublic>,
|
|
||||||
/// Encrypted point.
|
|
||||||
pub encrypted_point: Option<SerializablePublic>,
|
|
||||||
/// Versions.
|
|
||||||
pub versions: Vec<SerializableDocumentKeyShareVersionV3>
|
|
||||||
}
|
|
||||||
|
|
||||||
/// V3 of encrypted key share version, as it is stored by key storage on the single key server.
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
struct SerializableDocumentKeyShareVersionV3 {
|
|
||||||
/// Version hash.
|
|
||||||
pub hash: SerializableH256,
|
|
||||||
/// Nodes ids numbers.
|
|
||||||
pub id_numbers: BTreeMap<SerializablePublic, SerializableSecret>,
|
|
||||||
/// Node secret share.
|
|
||||||
pub secret_share: SerializableSecret,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PersistentKeyStorage {
|
|
||||||
/// Create new persistent document encryption keys storage
|
|
||||||
pub fn new(db: Arc<dyn KeyValueDB>) -> Result<Self, Error> {
|
|
||||||
Ok(Self { db })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyStorage for PersistentKeyStorage {
|
|
||||||
fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> {
|
|
||||||
let key: SerializableDocumentKeyShareV3 = key.into();
|
|
||||||
let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?;
|
|
||||||
let mut batch = self.db.transaction();
|
|
||||||
batch.put(0, document.as_bytes(), &key);
|
|
||||||
self.db.write(batch).map_err(Into::into)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> {
|
|
||||||
self.insert(document, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get(&self, document: &ServerKeyId) -> Result<Option<DocumentKeyShare>, Error> {
|
|
||||||
self.db.get(0, document.as_bytes())
|
|
||||||
.map_err(|e| Error::Database(e.to_string()))
|
|
||||||
.and_then(|key| match key {
|
|
||||||
None => Ok(None),
|
|
||||||
Some(key) => serde_json::from_slice::<SerializableDocumentKeyShareV3>(&key)
|
|
||||||
.map_err(|e| Error::Database(e.to_string()))
|
|
||||||
.map(Into::into)
|
|
||||||
.map(Some),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn remove(&self, document: &ServerKeyId) -> Result<(), Error> {
|
|
||||||
let mut batch = self.db.transaction();
|
|
||||||
batch.delete(0, document.as_bytes());
|
|
||||||
self.db.write(batch).map_err(Into::into)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn clear(&self) -> Result<(), Error> {
|
|
||||||
let mut batch = self.db.transaction();
|
|
||||||
for (key, _) in self.iter() {
|
|
||||||
batch.delete(0, key.as_bytes());
|
|
||||||
}
|
|
||||||
self.db.write(batch)
|
|
||||||
.map_err(|e| Error::Database(e.to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn contains(&self, document: &ServerKeyId) -> bool {
|
|
||||||
self.db.get(0, document.as_bytes())
|
|
||||||
.map(|k| k.is_some())
|
|
||||||
.unwrap_or(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a> {
|
|
||||||
Box::new(PersistentKeyStorageIterator {
|
|
||||||
iter: self.db.iter(0),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Iterator for PersistentKeyStorageIterator<'a> {
|
|
||||||
type Item = (ServerKeyId, DocumentKeyShare);
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<(ServerKeyId, DocumentKeyShare)> {
|
|
||||||
self.iter.as_mut().next()
|
|
||||||
.and_then(|(db_key, db_val)| serde_json::from_slice::<SerializableDocumentKeyShareV3>(&db_val)
|
|
||||||
.ok()
|
|
||||||
.map(|key| (ServerKeyId::from_slice(&*db_key), key.into())))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DocumentKeyShare {
|
|
||||||
/// Get last version reference.
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn last_version(&self) -> Result<&DocumentKeyShareVersion, Error> {
|
|
||||||
self.versions.iter().rev()
|
|
||||||
.nth(0)
|
|
||||||
.ok_or_else(|| Error::Database("key version is not found".into()))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get given version reference.
|
|
||||||
pub fn version(&self, version: &H256) -> Result<&DocumentKeyShareVersion, Error> {
|
|
||||||
self.versions.iter().rev()
|
|
||||||
.find(|v| &v.hash == version)
|
|
||||||
.ok_or_else(|| Error::Database("key version is not found".into()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DocumentKeyShareVersion {
|
|
||||||
/// Create new version
|
|
||||||
pub fn new(id_numbers: BTreeMap<NodeId, Secret>, secret_share: Secret) -> Self {
|
|
||||||
DocumentKeyShareVersion {
|
|
||||||
hash: Self::data_hash(id_numbers.iter().map(|(k, v)| (k.as_bytes(), v.as_bytes()))),
|
|
||||||
id_numbers: id_numbers,
|
|
||||||
secret_share: secret_share,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calculate hash of given version data.
|
|
||||||
pub fn data_hash<'a, I>(id_numbers: I) -> H256 where I: Iterator<Item=(&'a [u8], &'a [u8])> {
|
|
||||||
let mut nodes_keccak = Keccak::new_keccak256();
|
|
||||||
|
|
||||||
for (node, node_number) in id_numbers {
|
|
||||||
nodes_keccak.update(node);
|
|
||||||
nodes_keccak.update(node_number);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut nodes_keccak_value = [0u8; 32];
|
|
||||||
nodes_keccak.finalize(&mut nodes_keccak_value);
|
|
||||||
|
|
||||||
nodes_keccak_value.into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<DocumentKeyShare> for SerializableDocumentKeyShareV3 {
|
|
||||||
fn from(key: DocumentKeyShare) -> Self {
|
|
||||||
SerializableDocumentKeyShareV3 {
|
|
||||||
author: key.author.into(),
|
|
||||||
threshold: key.threshold,
|
|
||||||
public: key.public.into(),
|
|
||||||
common_point: key.common_point.map(Into::into),
|
|
||||||
encrypted_point: key.encrypted_point.map(Into::into),
|
|
||||||
versions: key.versions.into_iter().map(Into::into).collect(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<DocumentKeyShareVersion> for SerializableDocumentKeyShareVersionV3 {
|
|
||||||
fn from(version: DocumentKeyShareVersion) -> Self {
|
|
||||||
SerializableDocumentKeyShareVersionV3 {
|
|
||||||
hash: version.hash.into(),
|
|
||||||
id_numbers: version.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
|
||||||
secret_share: version.secret_share.into(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<SerializableDocumentKeyShareV3> for DocumentKeyShare {
|
|
||||||
fn from(key: SerializableDocumentKeyShareV3) -> Self {
|
|
||||||
DocumentKeyShare {
|
|
||||||
author: key.author.into(),
|
|
||||||
threshold: key.threshold,
|
|
||||||
public: key.public.into(),
|
|
||||||
common_point: key.common_point.map(Into::into),
|
|
||||||
encrypted_point: key.encrypted_point.map(Into::into),
|
|
||||||
versions: key.versions.into_iter()
|
|
||||||
.map(|v| DocumentKeyShareVersion {
|
|
||||||
hash: v.hash.into(),
|
|
||||||
id_numbers: v.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
|
||||||
secret_share: v.secret_share.into(),
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub mod tests {
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use parking_lot::RwLock;
|
|
||||||
use tempdir::TempDir;
|
|
||||||
use crypto::publickey::{Random, Generator, Public};
|
|
||||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
|
||||||
use types::{Error, ServerKeyId};
|
|
||||||
use super::{KeyStorage, PersistentKeyStorage, DocumentKeyShare, DocumentKeyShareVersion};
|
|
||||||
|
|
||||||
/// In-memory document encryption keys storage
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct DummyKeyStorage {
|
|
||||||
keys: RwLock<HashMap<ServerKeyId, DocumentKeyShare>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyStorage for DummyKeyStorage {
|
|
||||||
fn insert(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> {
|
|
||||||
self.keys.write().insert(document, key);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update(&self, document: ServerKeyId, key: DocumentKeyShare) -> Result<(), Error> {
|
|
||||||
self.keys.write().insert(document, key);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get(&self, document: &ServerKeyId) -> Result<Option<DocumentKeyShare>, Error> {
|
|
||||||
Ok(self.keys.read().get(document).cloned())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn remove(&self, document: &ServerKeyId) -> Result<(), Error> {
|
|
||||||
self.keys.write().remove(document);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn clear(&self) -> Result<(), Error> {
|
|
||||||
self.keys.write().clear();
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn contains(&self, document: &ServerKeyId) -> bool {
|
|
||||||
self.keys.read().contains_key(document)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a> {
|
|
||||||
Box::new(self.keys.read().clone().into_iter())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn persistent_key_storage() {
|
|
||||||
let tempdir = TempDir::new("").unwrap();
|
|
||||||
let key1 = ServerKeyId::from_low_u64_be(1);
|
|
||||||
let value1 = DocumentKeyShare {
|
|
||||||
author: Default::default(),
|
|
||||||
threshold: 100,
|
|
||||||
public: Public::default(),
|
|
||||||
common_point: Some(Random.generate().public().clone()),
|
|
||||||
encrypted_point: Some(Random.generate().public().clone()),
|
|
||||||
versions: vec![DocumentKeyShareVersion {
|
|
||||||
hash: Default::default(),
|
|
||||||
id_numbers: vec![
|
|
||||||
(Random.generate().public().clone(), Random.generate().secret().clone())
|
|
||||||
].into_iter().collect(),
|
|
||||||
secret_share: Random.generate().secret().clone(),
|
|
||||||
}],
|
|
||||||
};
|
|
||||||
let key2 = ServerKeyId::from_low_u64_be(2);
|
|
||||||
let value2 = DocumentKeyShare {
|
|
||||||
author: Default::default(),
|
|
||||||
threshold: 200,
|
|
||||||
public: Public::default(),
|
|
||||||
common_point: Some(Random.generate().public().clone()),
|
|
||||||
encrypted_point: Some(Random.generate().public().clone()),
|
|
||||||
versions: vec![DocumentKeyShareVersion {
|
|
||||||
hash: Default::default(),
|
|
||||||
id_numbers: vec![
|
|
||||||
(Random.generate().public().clone(), Random.generate().secret().clone())
|
|
||||||
].into_iter().collect(),
|
|
||||||
secret_share: Random.generate().secret().clone(),
|
|
||||||
}],
|
|
||||||
};
|
|
||||||
let key3 = ServerKeyId::from_low_u64_be(3);
|
|
||||||
|
|
||||||
let db_config = DatabaseConfig::with_columns(1);
|
|
||||||
let db = Database::open(&db_config, &tempdir.path().display().to_string()).unwrap();
|
|
||||||
|
|
||||||
let key_storage = PersistentKeyStorage::new(Arc::new(db)).unwrap();
|
|
||||||
key_storage.insert(key1.clone(), value1.clone()).unwrap();
|
|
||||||
key_storage.insert(key2.clone(), value2.clone()).unwrap();
|
|
||||||
assert_eq!(key_storage.get(&key1), Ok(Some(value1.clone())));
|
|
||||||
assert_eq!(key_storage.get(&key2), Ok(Some(value2.clone())));
|
|
||||||
assert_eq!(key_storage.get(&key3), Ok(None));
|
|
||||||
drop(key_storage);
|
|
||||||
|
|
||||||
let db = Database::open(&db_config, &tempdir.path().display().to_string()).unwrap();
|
|
||||||
|
|
||||||
let key_storage = PersistentKeyStorage::new(Arc::new(db)).unwrap();
|
|
||||||
assert_eq!(key_storage.get(&key1), Ok(Some(value1)));
|
|
||||||
assert_eq!(key_storage.get(&key2), Ok(Some(value2)));
|
|
||||||
assert_eq!(key_storage.get(&key3), Ok(None));
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,180 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
extern crate byteorder;
|
|
||||||
extern crate ethabi;
|
|
||||||
extern crate ethereum_types;
|
|
||||||
extern crate hyper;
|
|
||||||
extern crate secp256k1;
|
|
||||||
extern crate keccak_hash as hash;
|
|
||||||
extern crate kvdb;
|
|
||||||
extern crate kvdb_rocksdb;
|
|
||||||
extern crate parity_bytes as bytes;
|
|
||||||
extern crate parity_crypto as crypto;
|
|
||||||
extern crate parity_runtime;
|
|
||||||
extern crate parking_lot;
|
|
||||||
extern crate percent_encoding;
|
|
||||||
extern crate rustc_hex;
|
|
||||||
extern crate serde;
|
|
||||||
extern crate serde_json;
|
|
||||||
extern crate tiny_keccak;
|
|
||||||
extern crate tokio;
|
|
||||||
extern crate tokio_io;
|
|
||||||
extern crate tokio_service;
|
|
||||||
extern crate url;
|
|
||||||
extern crate jsonrpc_server_utils;
|
|
||||||
|
|
||||||
extern crate ethabi_derive;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate ethabi_contract;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate futures;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate serde_derive;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate lazy_static;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate log;
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
extern crate env_logger;
|
|
||||||
#[cfg(test)]
|
|
||||||
extern crate tempdir;
|
|
||||||
|
|
||||||
mod key_server_cluster;
|
|
||||||
mod types;
|
|
||||||
|
|
||||||
mod traits;
|
|
||||||
mod acl_storage;
|
|
||||||
mod key_server;
|
|
||||||
mod key_storage;
|
|
||||||
mod serialization;
|
|
||||||
mod key_server_set;
|
|
||||||
mod node_key_pair;
|
|
||||||
mod listener;
|
|
||||||
mod blockchain;
|
|
||||||
mod migration;
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use kvdb::KeyValueDB;
|
|
||||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
|
||||||
use parity_runtime::Executor;
|
|
||||||
|
|
||||||
pub use types::{ServerKeyId, EncryptedDocumentKey, RequestSignature, Public,
|
|
||||||
Error, NodeAddress, ServiceConfiguration, ClusterConfiguration};
|
|
||||||
pub use traits::KeyServer;
|
|
||||||
pub use blockchain::{SecretStoreChain, SigningKeyPair, ContractAddress, BlockId, BlockNumber, NewBlocksNotify, Filter};
|
|
||||||
pub use self::node_key_pair::PlainNodeKeyPair;
|
|
||||||
|
|
||||||
/// Open a secret store DB using the given secret store data path. The DB path is one level beneath the data path.
|
|
||||||
pub fn open_secretstore_db(data_path: &str) -> Result<Arc<dyn KeyValueDB>, String> {
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
migration::upgrade_db(data_path).map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
let mut db_path = PathBuf::from(data_path);
|
|
||||||
db_path.push("db");
|
|
||||||
let db_path = db_path.to_str().ok_or_else(|| "Invalid secretstore path".to_string())?;
|
|
||||||
|
|
||||||
let config = DatabaseConfig::with_columns(1);
|
|
||||||
Ok(Arc::new(Database::open(&config, &db_path).map_err(|e| format!("Error opening database: {:?}", e))?))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Start new key server instance
|
|
||||||
pub fn start(trusted_client: Arc<dyn SecretStoreChain>, self_key_pair: Arc<dyn SigningKeyPair>, mut config: ServiceConfiguration,
|
|
||||||
db: Arc<dyn KeyValueDB>, executor: Executor) -> Result<Box<dyn KeyServer>, Error>
|
|
||||||
{
|
|
||||||
let acl_storage: Arc<dyn acl_storage::AclStorage> = match config.acl_check_contract_address.take() {
|
|
||||||
Some(acl_check_contract_address) => acl_storage::OnChainAclStorage::new(trusted_client.clone(), acl_check_contract_address)?,
|
|
||||||
None => Arc::new(acl_storage::DummyAclStorage::default()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let key_server_set = key_server_set::OnChainKeyServerSet::new(trusted_client.clone(), config.cluster_config.key_server_set_contract_address.take(),
|
|
||||||
self_key_pair.clone(), config.cluster_config.auto_migrate_enabled, config.cluster_config.nodes.clone())?;
|
|
||||||
let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(db)?);
|
|
||||||
let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set.clone(), self_key_pair.clone(),
|
|
||||||
acl_storage.clone(), key_storage.clone(), executor.clone())?);
|
|
||||||
let cluster = key_server.cluster();
|
|
||||||
let key_server: Arc<dyn KeyServer> = key_server;
|
|
||||||
|
|
||||||
// prepare HTTP listener
|
|
||||||
let http_listener = match config.listener_address {
|
|
||||||
Some(listener_address) => Some(listener::http_listener::KeyServerHttpListener::start(listener_address, config.cors, Arc::downgrade(&key_server), executor)?),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
// prepare service contract listeners
|
|
||||||
let create_service_contract = |address, name, api_mask|
|
|
||||||
Arc::new(listener::service_contract::OnChainServiceContract::new(
|
|
||||||
api_mask,
|
|
||||||
trusted_client.clone(),
|
|
||||||
name,
|
|
||||||
address,
|
|
||||||
self_key_pair.clone()));
|
|
||||||
|
|
||||||
let mut contracts: Vec<Arc<dyn listener::service_contract::ServiceContract>> = Vec::new();
|
|
||||||
config.service_contract_address.map(|address|
|
|
||||||
create_service_contract(address,
|
|
||||||
listener::service_contract::SERVICE_CONTRACT_REGISTRY_NAME.to_owned(),
|
|
||||||
listener::ApiMask::all()))
|
|
||||||
.map(|l| contracts.push(l));
|
|
||||||
config.service_contract_srv_gen_address.map(|address|
|
|
||||||
create_service_contract(address,
|
|
||||||
listener::service_contract::SRV_KEY_GEN_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(),
|
|
||||||
listener::ApiMask { server_key_generation_requests: true, ..Default::default() }))
|
|
||||||
.map(|l| contracts.push(l));
|
|
||||||
config.service_contract_srv_retr_address.map(|address|
|
|
||||||
create_service_contract(address,
|
|
||||||
listener::service_contract::SRV_KEY_RETR_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(),
|
|
||||||
listener::ApiMask { server_key_retrieval_requests: true, ..Default::default() }))
|
|
||||||
.map(|l| contracts.push(l));
|
|
||||||
config.service_contract_doc_store_address.map(|address|
|
|
||||||
create_service_contract(address,
|
|
||||||
listener::service_contract::DOC_KEY_STORE_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(),
|
|
||||||
listener::ApiMask { document_key_store_requests: true, ..Default::default() }))
|
|
||||||
.map(|l| contracts.push(l));
|
|
||||||
config.service_contract_doc_sretr_address.map(|address|
|
|
||||||
create_service_contract(address,
|
|
||||||
listener::service_contract::DOC_KEY_SRETR_SERVICE_CONTRACT_REGISTRY_NAME.to_owned(),
|
|
||||||
listener::ApiMask { document_key_shadow_retrieval_requests: true, ..Default::default() }))
|
|
||||||
.map(|l| contracts.push(l));
|
|
||||||
|
|
||||||
let contract: Option<Arc<dyn listener::service_contract::ServiceContract>> = match contracts.len() {
|
|
||||||
0 => None,
|
|
||||||
1 => Some(contracts.pop().expect("contract.len() is 1; qed")),
|
|
||||||
_ => Some(Arc::new(listener::service_contract_aggregate::OnChainServiceContractAggregate::new(contracts))),
|
|
||||||
};
|
|
||||||
|
|
||||||
let contract_listener = match contract {
|
|
||||||
Some(contract) => Some({
|
|
||||||
let listener = listener::service_contract_listener::ServiceContractListener::new(
|
|
||||||
listener::service_contract_listener::ServiceContractListenerParams {
|
|
||||||
contract: contract,
|
|
||||||
self_key_pair: self_key_pair.clone(),
|
|
||||||
key_server_set: key_server_set,
|
|
||||||
acl_storage: acl_storage,
|
|
||||||
cluster: cluster,
|
|
||||||
key_storage: key_storage,
|
|
||||||
}
|
|
||||||
)?;
|
|
||||||
trusted_client.add_listener(listener.clone());
|
|
||||||
listener
|
|
||||||
}),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Box::new(listener::Listener::new(key_server, http_listener, contract_listener)))
|
|
||||||
}
|
|
@ -1,548 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use std::sync::{Arc, Weak};
|
|
||||||
use futures::future::{ok, result};
|
|
||||||
use hyper::{self, Uri, Request as HttpRequest, Response as HttpResponse, Method as HttpMethod,
|
|
||||||
StatusCode as HttpStatusCode, Body,
|
|
||||||
header::{self, HeaderValue},
|
|
||||||
server::conn::Http,
|
|
||||||
service::Service,
|
|
||||||
};
|
|
||||||
use serde::Serialize;
|
|
||||||
use serde_json;
|
|
||||||
use tokio;
|
|
||||||
use tokio::net::TcpListener;
|
|
||||||
use parity_runtime::Executor;
|
|
||||||
use futures::{future, Future, Stream};
|
|
||||||
use percent_encoding::percent_decode;
|
|
||||||
|
|
||||||
use traits::KeyServer;
|
|
||||||
use serialization::{SerializableEncryptedDocumentKeyShadow, SerializableBytes, SerializablePublic};
|
|
||||||
use types::{Error, Public, MessageHash, NodeAddress, RequestSignature, ServerKeyId,
|
|
||||||
EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId};
|
|
||||||
use jsonrpc_server_utils::cors::{self, AllowCors, AccessControlAllowOrigin};
|
|
||||||
|
|
||||||
/// Key server http-requests listener. Available requests:
|
|
||||||
/// To generate server key: POST /shadow/{server_key_id}/{signature}/{threshold}
|
|
||||||
/// To store pregenerated encrypted document key: POST /shadow/{server_key_id}/{signature}/{common_point}/{encrypted_key}
|
|
||||||
/// To generate server && document key: POST /{server_key_id}/{signature}/{threshold}
|
|
||||||
/// To get public portion of server key: GET /server/{server_key_id}/{signature}
|
|
||||||
/// To get document key: GET /{server_key_id}/{signature}
|
|
||||||
/// To get document key shadow: GET /shadow/{server_key_id}/{signature}
|
|
||||||
/// To generate Schnorr signature with server key: GET /schnorr/{server_key_id}/{signature}/{message_hash}
|
|
||||||
/// To generate ECDSA signature with server key: GET /ecdsa/{server_key_id}/{signature}/{message_hash}
|
|
||||||
/// To change servers set: POST /admin/servers_set_change/{old_signature}/{new_signature} + BODY: json array of hex-encoded nodes ids
|
|
||||||
|
|
||||||
type CorsDomains = Option<Vec<AccessControlAllowOrigin>>;
|
|
||||||
|
|
||||||
pub struct KeyServerHttpListener {
|
|
||||||
_executor: Executor,
|
|
||||||
_handler: Arc<KeyServerSharedHttpHandler>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parsed http request
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
|
||||||
enum Request {
|
|
||||||
/// Invalid request
|
|
||||||
Invalid,
|
|
||||||
/// Generate server key.
|
|
||||||
GenerateServerKey(ServerKeyId, RequestSignature, usize),
|
|
||||||
/// Store document key.
|
|
||||||
StoreDocumentKey(ServerKeyId, RequestSignature, Public, Public),
|
|
||||||
/// Generate encryption key.
|
|
||||||
GenerateDocumentKey(ServerKeyId, RequestSignature, usize),
|
|
||||||
/// Request public portion of server key.
|
|
||||||
GetServerKey(ServerKeyId, RequestSignature),
|
|
||||||
/// Request encryption key of given document for given requestor.
|
|
||||||
GetDocumentKey(ServerKeyId, RequestSignature),
|
|
||||||
/// Request shadow of encryption key of given document for given requestor.
|
|
||||||
GetDocumentKeyShadow(ServerKeyId, RequestSignature),
|
|
||||||
/// Generate Schnorr signature for the message.
|
|
||||||
SchnorrSignMessage(ServerKeyId, RequestSignature, MessageHash),
|
|
||||||
/// Generate ECDSA signature for the message.
|
|
||||||
EcdsaSignMessage(ServerKeyId, RequestSignature, MessageHash),
|
|
||||||
/// Change servers set.
|
|
||||||
ChangeServersSet(RequestSignature, RequestSignature, BTreeSet<NodeId>),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Cloneable http handler
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct KeyServerHttpHandler {
|
|
||||||
handler: Arc<KeyServerSharedHttpHandler>,
|
|
||||||
cors: CorsDomains,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Shared http handler
|
|
||||||
struct KeyServerSharedHttpHandler {
|
|
||||||
key_server: Weak<dyn KeyServer>,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
impl KeyServerHttpListener {
|
|
||||||
/// Start KeyServer http listener
|
|
||||||
pub fn start(listener_address: NodeAddress, cors_domains: Option<Vec<String>>, key_server: Weak<dyn KeyServer>, executor: Executor) -> Result<Self, Error> {
|
|
||||||
let shared_handler = Arc::new(KeyServerSharedHttpHandler {
|
|
||||||
key_server: key_server,
|
|
||||||
});
|
|
||||||
let cors: CorsDomains = cors_domains.map(|domains| domains.into_iter().map(AccessControlAllowOrigin::from).collect());
|
|
||||||
let listener_address = format!("{}:{}", listener_address.address, listener_address.port).parse()?;
|
|
||||||
let listener = TcpListener::bind(&listener_address)?;
|
|
||||||
|
|
||||||
let shared_handler2 = shared_handler.clone();
|
|
||||||
|
|
||||||
let server = listener.incoming()
|
|
||||||
.map_err(|e| warn!("Key server listener error: {:?}", e))
|
|
||||||
.for_each(move |socket| {
|
|
||||||
let http = Http::new();
|
|
||||||
let serve = http.serve_connection(socket,
|
|
||||||
KeyServerHttpHandler { handler: shared_handler2.clone(), cors: cors.clone() }
|
|
||||||
).map(|_| ()).map_err(|e| {
|
|
||||||
warn!("Key server handler error: {:?}", e);
|
|
||||||
});
|
|
||||||
|
|
||||||
tokio::spawn(serve)
|
|
||||||
});
|
|
||||||
|
|
||||||
executor.spawn(server);
|
|
||||||
|
|
||||||
let listener = KeyServerHttpListener {
|
|
||||||
_executor: executor,
|
|
||||||
_handler: shared_handler,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(listener)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyServerHttpHandler {
|
|
||||||
fn key_server(&self) -> Result<Arc<dyn KeyServer>, Error> {
|
|
||||||
self.handler.key_server.upgrade()
|
|
||||||
.ok_or_else(|| Error::Internal("KeyServer is already destroyed".into()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process(
|
|
||||||
self,
|
|
||||||
req_method: HttpMethod,
|
|
||||||
req_uri: Uri,
|
|
||||||
path: &str,
|
|
||||||
req_body: &[u8],
|
|
||||||
cors: AllowCors<AccessControlAllowOrigin>,
|
|
||||||
) -> Box<dyn Future<Item=HttpResponse<Body>, Error=hyper::Error> + Send> {
|
|
||||||
match parse_request(&req_method, &path, &req_body) {
|
|
||||||
Request::GenerateServerKey(document, signature, threshold) =>
|
|
||||||
Box::new(result(self.key_server())
|
|
||||||
.and_then(move |key_server| key_server.generate_key(document, signature.into(), threshold))
|
|
||||||
.then(move |result| ok(return_server_public_key("GenerateServerKey", &req_uri, cors, result)))),
|
|
||||||
Request::StoreDocumentKey(document, signature, common_point, encrypted_document_key) =>
|
|
||||||
Box::new(result(self.key_server())
|
|
||||||
.and_then(move |key_server| key_server.store_document_key(
|
|
||||||
document,
|
|
||||||
signature.into(),
|
|
||||||
common_point,
|
|
||||||
encrypted_document_key,
|
|
||||||
))
|
|
||||||
.then(move |result| ok(return_empty("StoreDocumentKey", &req_uri, cors, result)))),
|
|
||||||
Request::GenerateDocumentKey(document, signature, threshold) =>
|
|
||||||
Box::new(result(self.key_server())
|
|
||||||
.and_then(move |key_server| key_server.generate_document_key(
|
|
||||||
document,
|
|
||||||
signature.into(),
|
|
||||||
threshold,
|
|
||||||
))
|
|
||||||
.then(move |result| ok(return_document_key("GenerateDocumentKey", &req_uri, cors, result)))),
|
|
||||||
Request::GetServerKey(document, signature) =>
|
|
||||||
Box::new(result(self.key_server())
|
|
||||||
.and_then(move |key_server| key_server.restore_key_public(
|
|
||||||
document,
|
|
||||||
signature.into(),
|
|
||||||
))
|
|
||||||
.then(move |result| ok(return_server_public_key("GetServerKey", &req_uri, cors, result)))),
|
|
||||||
Request::GetDocumentKey(document, signature) =>
|
|
||||||
Box::new(result(self.key_server())
|
|
||||||
.and_then(move |key_server| key_server.restore_document_key(document, signature.into()))
|
|
||||||
.then(move |result| ok(return_document_key("GetDocumentKey", &req_uri, cors, result)))),
|
|
||||||
Request::GetDocumentKeyShadow(document, signature) =>
|
|
||||||
Box::new(result(self.key_server())
|
|
||||||
.and_then(move |key_server| key_server.restore_document_key_shadow(document, signature.into()))
|
|
||||||
.then(move |result| ok(return_document_key_shadow("GetDocumentKeyShadow", &req_uri, cors, result)))),
|
|
||||||
Request::SchnorrSignMessage(document, signature, message_hash) =>
|
|
||||||
Box::new(result(self.key_server())
|
|
||||||
.and_then(move |key_server| key_server.sign_message_schnorr(
|
|
||||||
document,
|
|
||||||
signature.into(),
|
|
||||||
message_hash,
|
|
||||||
))
|
|
||||||
.then(move |result| ok(return_message_signature("SchnorrSignMessage", &req_uri, cors, result)))),
|
|
||||||
Request::EcdsaSignMessage(document, signature, message_hash) =>
|
|
||||||
Box::new(result(self.key_server())
|
|
||||||
.and_then(move |key_server| key_server.sign_message_ecdsa(
|
|
||||||
document,
|
|
||||||
signature.into(),
|
|
||||||
message_hash,
|
|
||||||
))
|
|
||||||
.then(move |result| ok(return_message_signature("EcdsaSignMessage", &req_uri, cors, result)))),
|
|
||||||
Request::ChangeServersSet(old_set_signature, new_set_signature, new_servers_set) =>
|
|
||||||
Box::new(result(self.key_server())
|
|
||||||
.and_then(move |key_server| key_server.change_servers_set(
|
|
||||||
old_set_signature,
|
|
||||||
new_set_signature,
|
|
||||||
new_servers_set,
|
|
||||||
))
|
|
||||||
.then(move |result| ok(return_empty("ChangeServersSet", &req_uri, cors, result)))),
|
|
||||||
Request::Invalid => {
|
|
||||||
warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri);
|
|
||||||
Box::new(ok(HttpResponse::builder()
|
|
||||||
.status(HttpStatusCode::BAD_REQUEST)
|
|
||||||
.body(Body::empty())
|
|
||||||
.expect("Nothing to parse, cannot fail; qed")))
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Service for KeyServerHttpHandler {
|
|
||||||
type ReqBody = Body;
|
|
||||||
type ResBody = Body;
|
|
||||||
type Error = hyper::Error;
|
|
||||||
type Future = Box<dyn Future<Item = HttpResponse<Self::ResBody>, Error=Self::Error> + Send>;
|
|
||||||
|
|
||||||
fn call(&mut self, req: HttpRequest<Body>) -> Self::Future {
|
|
||||||
let cors = cors::get_cors_allow_origin(
|
|
||||||
req.headers().get(header::ORIGIN).and_then(|value| value.to_str().ok()),
|
|
||||||
req.headers().get(header::HOST).and_then(|value| value.to_str().ok()),
|
|
||||||
&self.cors
|
|
||||||
);
|
|
||||||
match cors {
|
|
||||||
AllowCors::Invalid => {
|
|
||||||
warn!(target: "secretstore", "Ignoring {}-request {} with unauthorized Origin header", req.method(), req.uri());
|
|
||||||
Box::new(future::ok(HttpResponse::builder()
|
|
||||||
.status(HttpStatusCode::NOT_FOUND)
|
|
||||||
.body(Body::empty())
|
|
||||||
.expect("Nothing to parse, cannot fail; qed")))
|
|
||||||
},
|
|
||||||
_ => {
|
|
||||||
let req_method = req.method().clone();
|
|
||||||
let req_uri = req.uri().clone();
|
|
||||||
let path = req_uri.path().to_string();
|
|
||||||
// We cannot consume Self because of the Service trait requirement.
|
|
||||||
let this = self.clone();
|
|
||||||
|
|
||||||
Box::new(req.into_body().concat2()
|
|
||||||
.and_then(move |body| this.process(req_method, req_uri, &path, &body, cors)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn return_empty(req_type: &str, req_uri: &Uri, cors: AllowCors<AccessControlAllowOrigin>, empty: Result<(), Error>) -> HttpResponse<Body> {
|
|
||||||
return_bytes::<i32>(req_type, req_uri, cors, empty.map(|_| None))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn return_server_public_key(
|
|
||||||
req_type: &str,
|
|
||||||
req_uri: &Uri,
|
|
||||||
cors: AllowCors<AccessControlAllowOrigin>,
|
|
||||||
server_public: Result<Public, Error>,
|
|
||||||
) -> HttpResponse<Body> {
|
|
||||||
return_bytes(req_type, req_uri, cors, server_public.map(|k| Some(SerializablePublic(k))))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn return_message_signature(
|
|
||||||
req_type: &str,
|
|
||||||
req_uri: &Uri,
|
|
||||||
cors: AllowCors<AccessControlAllowOrigin>,
|
|
||||||
signature: Result<EncryptedDocumentKey, Error>,
|
|
||||||
) -> HttpResponse<Body> {
|
|
||||||
return_bytes(req_type, req_uri, cors, signature.map(|s| Some(SerializableBytes(s))))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn return_document_key(
|
|
||||||
req_type: &str,
|
|
||||||
req_uri: &Uri,
|
|
||||||
cors: AllowCors<AccessControlAllowOrigin>,
|
|
||||||
document_key: Result<EncryptedDocumentKey, Error>,
|
|
||||||
) -> HttpResponse<Body> {
|
|
||||||
return_bytes(req_type, req_uri, cors, document_key.map(|k| Some(SerializableBytes(k))))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn return_document_key_shadow(
|
|
||||||
req_type: &str,
|
|
||||||
req_uri: &Uri,
|
|
||||||
cors: AllowCors<AccessControlAllowOrigin>,
|
|
||||||
document_key_shadow: Result<EncryptedDocumentKeyShadow, Error>,
|
|
||||||
) -> HttpResponse<Body> {
|
|
||||||
return_bytes(req_type, req_uri, cors, document_key_shadow.map(|k| Some(SerializableEncryptedDocumentKeyShadow {
|
|
||||||
decrypted_secret: k.decrypted_secret.into(),
|
|
||||||
common_point: k.common_point.expect("always filled when requesting document_key_shadow; qed").into(),
|
|
||||||
decrypt_shadows: k.decrypt_shadows.expect("always filled when requesting document_key_shadow; qed").into_iter().map(Into::into).collect()
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn return_bytes<T: Serialize>(
|
|
||||||
req_type: &str,
|
|
||||||
req_uri: &Uri,
|
|
||||||
cors: AllowCors<AccessControlAllowOrigin>,
|
|
||||||
result: Result<Option<T>, Error>,
|
|
||||||
) -> HttpResponse<Body> {
|
|
||||||
match result {
|
|
||||||
Ok(Some(result)) => match serde_json::to_vec(&result) {
|
|
||||||
Ok(result) => {
|
|
||||||
let body: Body = result.into();
|
|
||||||
let mut builder = HttpResponse::builder();
|
|
||||||
builder.header(header::CONTENT_TYPE, HeaderValue::from_static("application/json; charset=utf-8"));
|
|
||||||
if let AllowCors::Ok(AccessControlAllowOrigin::Value(origin)) = cors {
|
|
||||||
builder.header(header::ACCESS_CONTROL_ALLOW_ORIGIN, origin.to_string());
|
|
||||||
}
|
|
||||||
builder.body(body).expect("Error creating http response")
|
|
||||||
},
|
|
||||||
Err(err) => {
|
|
||||||
warn!(target: "secretstore", "response to request {} has failed with: {}", req_uri, err);
|
|
||||||
HttpResponse::builder()
|
|
||||||
.status(HttpStatusCode::INTERNAL_SERVER_ERROR)
|
|
||||||
.body(Body::empty())
|
|
||||||
.expect("Nothing to parse, cannot fail; qed")
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Ok(None) => {
|
|
||||||
let mut builder = HttpResponse::builder();
|
|
||||||
builder.status(HttpStatusCode::OK);
|
|
||||||
if let AllowCors::Ok(AccessControlAllowOrigin::Value(origin)) = cors {
|
|
||||||
builder.header(header::ACCESS_CONTROL_ALLOW_ORIGIN, origin.to_string());
|
|
||||||
}
|
|
||||||
builder.body(Body::empty()).expect("Nothing to parse, cannot fail; qed")
|
|
||||||
},
|
|
||||||
Err(err) => {
|
|
||||||
warn!(target: "secretstore", "{} request {} has failed with: {}", req_type, req_uri, err);
|
|
||||||
return_error(err)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn return_error(err: Error) -> HttpResponse<Body> {
|
|
||||||
let status = match err {
|
|
||||||
| Error::AccessDenied
|
|
||||||
| Error::ConsensusUnreachable
|
|
||||||
| Error::ConsensusTemporaryUnreachable =>
|
|
||||||
HttpStatusCode::FORBIDDEN,
|
|
||||||
| Error::ServerKeyIsNotFound
|
|
||||||
| Error::DocumentKeyIsNotFound =>
|
|
||||||
HttpStatusCode::NOT_FOUND,
|
|
||||||
| Error::InsufficientRequesterData(_)
|
|
||||||
| Error::Hyper(_)
|
|
||||||
| Error::Serde(_)
|
|
||||||
| Error::DocumentKeyAlreadyStored
|
|
||||||
| Error::ServerKeyAlreadyGenerated =>
|
|
||||||
HttpStatusCode::BAD_REQUEST,
|
|
||||||
_ => HttpStatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut res = HttpResponse::builder();
|
|
||||||
res.status(status);
|
|
||||||
|
|
||||||
// return error text. ignore errors when returning error
|
|
||||||
let error_text = format!("\"{}\"", err);
|
|
||||||
if let Ok(error_text) = serde_json::to_vec(&error_text) {
|
|
||||||
res.header(header::CONTENT_TYPE, HeaderValue::from_static("application/json; charset=utf-8"));
|
|
||||||
res.body(error_text.into())
|
|
||||||
.expect("`error_text` is a formatted string, parsing cannot fail; qed")
|
|
||||||
} else {
|
|
||||||
res.body(Body::empty())
|
|
||||||
.expect("Nothing to parse, cannot fail; qed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_request(method: &HttpMethod, uri_path: &str, body: &[u8]) -> Request {
|
|
||||||
let uri_path = match percent_decode(uri_path.as_bytes()).decode_utf8() {
|
|
||||||
Ok(path) => path,
|
|
||||||
Err(_) => return Request::Invalid,
|
|
||||||
};
|
|
||||||
|
|
||||||
let path: Vec<String> = uri_path.trim_start_matches('/').split('/').map(Into::into).collect();
|
|
||||||
if path.len() == 0 {
|
|
||||||
return Request::Invalid;
|
|
||||||
}
|
|
||||||
|
|
||||||
if path[0] == "admin" {
|
|
||||||
return parse_admin_request(method, path, body);
|
|
||||||
}
|
|
||||||
|
|
||||||
let is_known_prefix = &path[0] == "shadow" || &path[0] == "schnorr" || &path[0] == "ecdsa" || &path[0] == "server";
|
|
||||||
let (prefix, args_offset) = if is_known_prefix { (&*path[0], 1) } else { ("", 0) };
|
|
||||||
let args_count = path.len() - args_offset;
|
|
||||||
if args_count < 2 || path[args_offset].is_empty() || path[args_offset + 1].is_empty() {
|
|
||||||
return Request::Invalid;
|
|
||||||
}
|
|
||||||
|
|
||||||
let document = match path[args_offset].parse() {
|
|
||||||
Ok(document) => document,
|
|
||||||
_ => return Request::Invalid,
|
|
||||||
};
|
|
||||||
let signature = match path[args_offset + 1].parse() {
|
|
||||||
Ok(signature) => signature,
|
|
||||||
_ => return Request::Invalid,
|
|
||||||
};
|
|
||||||
|
|
||||||
let threshold = path.get(args_offset + 2).map(|v| v.parse());
|
|
||||||
let message_hash = path.get(args_offset + 2).map(|v| v.parse());
|
|
||||||
let common_point = path.get(args_offset + 2).map(|v| v.parse());
|
|
||||||
let encrypted_key = path.get(args_offset + 3).map(|v| v.parse());
|
|
||||||
match (prefix, args_count, method, threshold, message_hash, common_point, encrypted_key) {
|
|
||||||
("shadow", 3, &HttpMethod::POST, Some(Ok(threshold)), _, _, _) =>
|
|
||||||
Request::GenerateServerKey(document, signature, threshold),
|
|
||||||
("shadow", 4, &HttpMethod::POST, _, _, Some(Ok(common_point)), Some(Ok(encrypted_key))) =>
|
|
||||||
Request::StoreDocumentKey(document, signature, common_point, encrypted_key),
|
|
||||||
("", 3, &HttpMethod::POST, Some(Ok(threshold)), _, _, _) =>
|
|
||||||
Request::GenerateDocumentKey(document, signature, threshold),
|
|
||||||
("server", 2, &HttpMethod::GET, _, _, _, _) =>
|
|
||||||
Request::GetServerKey(document, signature),
|
|
||||||
("", 2, &HttpMethod::GET, _, _, _, _) =>
|
|
||||||
Request::GetDocumentKey(document, signature),
|
|
||||||
("shadow", 2, &HttpMethod::GET, _, _, _, _) =>
|
|
||||||
Request::GetDocumentKeyShadow(document, signature),
|
|
||||||
("schnorr", 3, &HttpMethod::GET, _, Some(Ok(message_hash)), _, _) =>
|
|
||||||
Request::SchnorrSignMessage(document, signature, message_hash),
|
|
||||||
("ecdsa", 3, &HttpMethod::GET, _, Some(Ok(message_hash)), _, _) =>
|
|
||||||
Request::EcdsaSignMessage(document, signature, message_hash),
|
|
||||||
_ => Request::Invalid,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_admin_request(method: &HttpMethod, path: Vec<String>, body: &[u8]) -> Request {
|
|
||||||
let args_count = path.len();
|
|
||||||
if *method != HttpMethod::POST || args_count != 4 || path[1] != "servers_set_change" {
|
|
||||||
return Request::Invalid;
|
|
||||||
}
|
|
||||||
|
|
||||||
let old_set_signature = match path[2].parse() {
|
|
||||||
Ok(signature) => signature,
|
|
||||||
_ => return Request::Invalid,
|
|
||||||
};
|
|
||||||
|
|
||||||
let new_set_signature = match path[3].parse() {
|
|
||||||
Ok(signature) => signature,
|
|
||||||
_ => return Request::Invalid,
|
|
||||||
};
|
|
||||||
|
|
||||||
let new_servers_set: BTreeSet<SerializablePublic> = match serde_json::from_slice(body) {
|
|
||||||
Ok(new_servers_set) => new_servers_set,
|
|
||||||
_ => return Request::Invalid,
|
|
||||||
};
|
|
||||||
|
|
||||||
Request::ChangeServersSet(old_set_signature, new_set_signature,
|
|
||||||
new_servers_set.into_iter().map(Into::into).collect())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::str::FromStr;
|
|
||||||
use hyper::Method as HttpMethod;
|
|
||||||
use crypto::publickey::Public;
|
|
||||||
use traits::KeyServer;
|
|
||||||
use key_server::tests::DummyKeyServer;
|
|
||||||
use types::NodeAddress;
|
|
||||||
use parity_runtime::Runtime;
|
|
||||||
use ethereum_types::H256;
|
|
||||||
use super::{parse_request, Request, KeyServerHttpListener};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn http_listener_successfully_drops() {
|
|
||||||
let key_server: Arc<dyn KeyServer> = Arc::new(DummyKeyServer::default());
|
|
||||||
let address = NodeAddress { address: "127.0.0.1".into(), port: 9000 };
|
|
||||||
let runtime = Runtime::with_thread_count(1);
|
|
||||||
let listener = KeyServerHttpListener::start(address, None, Arc::downgrade(&key_server),
|
|
||||||
runtime.executor()).unwrap();
|
|
||||||
drop(listener);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn parse_request_successful() {
|
|
||||||
// POST /shadow/{server_key_id}/{signature}/{threshold} => generate server key
|
|
||||||
assert_eq!(parse_request(&HttpMethod::POST, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2", Default::default()),
|
|
||||||
Request::GenerateServerKey(H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(),
|
|
||||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
|
|
||||||
2));
|
|
||||||
// POST /shadow/{server_key_id}/{signature}/{common_point}/{encrypted_key} => store encrypted document key
|
|
||||||
assert_eq!(parse_request(&HttpMethod::POST, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8/1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb", Default::default()),
|
|
||||||
Request::StoreDocumentKey(H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(),
|
|
||||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
|
|
||||||
"b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".parse().unwrap(),
|
|
||||||
"1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb".parse().unwrap()));
|
|
||||||
// POST /{server_key_id}/{signature}/{threshold} => generate server && document key
|
|
||||||
assert_eq!(parse_request(&HttpMethod::POST, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/2", Default::default()),
|
|
||||||
Request::GenerateDocumentKey(H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(),
|
|
||||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
|
|
||||||
2));
|
|
||||||
// GET /server/{server_key_id}/{signature} => get public portion of server key
|
|
||||||
assert_eq!(parse_request(&HttpMethod::GET, "/server/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()),
|
|
||||||
Request::GetServerKey(H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(),
|
|
||||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap()));
|
|
||||||
// GET /{server_key_id}/{signature} => get document key
|
|
||||||
assert_eq!(parse_request(&HttpMethod::GET, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()),
|
|
||||||
Request::GetDocumentKey(H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(),
|
|
||||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap()));
|
|
||||||
assert_eq!(parse_request(&HttpMethod::GET, "/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()),
|
|
||||||
Request::GetDocumentKey(H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(),
|
|
||||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap()));
|
|
||||||
// GET /shadow/{server_key_id}/{signature} => get document key shadow
|
|
||||||
assert_eq!(parse_request(&HttpMethod::GET, "/shadow/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", Default::default()),
|
|
||||||
Request::GetDocumentKeyShadow(H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(),
|
|
||||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap()));
|
|
||||||
// GET /schnorr/{server_key_id}/{signature}/{message_hash} => schnorr-sign message with server key
|
|
||||||
assert_eq!(parse_request(&HttpMethod::GET, "/schnorr/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c", Default::default()),
|
|
||||||
Request::SchnorrSignMessage(H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(),
|
|
||||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
|
|
||||||
"281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap()));
|
|
||||||
// GET /ecdsa/{server_key_id}/{signature}/{message_hash} => ecdsa-sign message with server key
|
|
||||||
assert_eq!(parse_request(&HttpMethod::GET, "/ecdsa/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c", Default::default()),
|
|
||||||
Request::EcdsaSignMessage(H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(),
|
|
||||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
|
|
||||||
"281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap()));
|
|
||||||
// POST /admin/servers_set_change/{old_set_signature}/{new_set_signature} + body
|
|
||||||
let node1: Public = "843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91".parse().unwrap();
|
|
||||||
let node2: Public = "07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3".parse().unwrap();
|
|
||||||
let nodes = vec![node1, node2].into_iter().collect();
|
|
||||||
assert_eq!(parse_request(&HttpMethod::POST, "/admin/servers_set_change/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/b199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01",
|
|
||||||
&r#"["0x843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91",
|
|
||||||
"0x07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3"]"#.as_bytes()),
|
|
||||||
Request::ChangeServersSet(
|
|
||||||
"a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
|
|
||||||
"b199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap(),
|
|
||||||
nodes,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn parse_request_failed() {
|
|
||||||
assert_eq!(parse_request(&HttpMethod::GET, "", Default::default()), Request::Invalid);
|
|
||||||
assert_eq!(parse_request(&HttpMethod::GET, "/shadow", Default::default()), Request::Invalid);
|
|
||||||
assert_eq!(parse_request(&HttpMethod::GET, "///2", Default::default()), Request::Invalid);
|
|
||||||
assert_eq!(parse_request(&HttpMethod::GET, "/shadow///2", Default::default()), Request::Invalid);
|
|
||||||
assert_eq!(parse_request(&HttpMethod::GET, "/0000000000000000000000000000000000000000000000000000000000000001", Default::default()), Request::Invalid);
|
|
||||||
assert_eq!(parse_request(&HttpMethod::GET, "/0000000000000000000000000000000000000000000000000000000000000001/", Default::default()), Request::Invalid);
|
|
||||||
assert_eq!(parse_request(&HttpMethod::GET, "/a/b", Default::default()), Request::Invalid);
|
|
||||||
assert_eq!(parse_request(&HttpMethod::GET, "/schnorr/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002", Default::default()), Request::Invalid);
|
|
||||||
assert_eq!(parse_request(&HttpMethod::GET, "/ecdsa/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002/0000000000000000000000000000000000000000000000000000000000000002", Default::default()), Request::Invalid);
|
|
||||||
assert_eq!(parse_request(&HttpMethod::POST, "/admin/servers_set_change/xxx/yyy",
|
|
||||||
&r#"["0x843645726384530ffb0c52f175278143b5a93959af7864460f5a4fec9afd1450cfb8aef63dec90657f43f55b13e0a73c7524d4e9a13c051b4e5f1e53f39ecd91",
|
|
||||||
"0x07230e34ebfe41337d3ed53b186b3861751f2401ee74b988bba55694e2a6f60c757677e194be2e53c3523cc8548694e636e6acb35c4e8fdc5e29d28679b9b2f3"]"#.as_bytes()),
|
|
||||||
Request::Invalid);
|
|
||||||
assert_eq!(parse_request(&HttpMethod::POST, "/admin/servers_set_change/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01", "".as_bytes()),
|
|
||||||
Request::Invalid);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,160 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
pub mod http_listener;
|
|
||||||
pub mod service_contract;
|
|
||||||
pub mod service_contract_aggregate;
|
|
||||||
pub mod service_contract_listener;
|
|
||||||
mod tasks_queue;
|
|
||||||
|
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use futures::Future;
|
|
||||||
use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, AdminSessionsServer, KeyServer};
|
|
||||||
use types::{Error, Public, MessageHash, EncryptedMessageSignature, RequestSignature, ServerKeyId,
|
|
||||||
EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId, Requester};
|
|
||||||
|
|
||||||
/// Available API mask.
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub struct ApiMask {
|
|
||||||
/// Accept server key generation requests.
|
|
||||||
pub server_key_generation_requests: bool,
|
|
||||||
/// Accept server key retrieval requests.
|
|
||||||
pub server_key_retrieval_requests: bool,
|
|
||||||
/// Accept document key store requests.
|
|
||||||
pub document_key_store_requests: bool,
|
|
||||||
/// Accept document key shadow retrieval requests.
|
|
||||||
pub document_key_shadow_retrieval_requests: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Combined HTTP + service contract listener.
|
|
||||||
pub struct Listener {
|
|
||||||
key_server: Arc<dyn KeyServer>,
|
|
||||||
_http: Option<http_listener::KeyServerHttpListener>,
|
|
||||||
_contract: Option<Arc<service_contract_listener::ServiceContractListener>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ApiMask {
|
|
||||||
/// Create mask that accepts all requests.
|
|
||||||
pub fn all() -> Self {
|
|
||||||
ApiMask {
|
|
||||||
server_key_generation_requests: true,
|
|
||||||
server_key_retrieval_requests: true,
|
|
||||||
document_key_store_requests: true,
|
|
||||||
document_key_shadow_retrieval_requests: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Listener {
|
|
||||||
/// Create new listener.
|
|
||||||
pub fn new(key_server: Arc<dyn KeyServer>, http: Option<http_listener::KeyServerHttpListener>, contract: Option<Arc<service_contract_listener::ServiceContractListener>>) -> Self {
|
|
||||||
Self {
|
|
||||||
key_server: key_server,
|
|
||||||
_http: http,
|
|
||||||
_contract: contract,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyServer for Listener {}
|
|
||||||
|
|
||||||
impl ServerKeyGenerator for Listener {
|
|
||||||
fn generate_key(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
author: Requester,
|
|
||||||
threshold: usize,
|
|
||||||
) -> Box<dyn Future<Item=Public, Error=Error> + Send> {
|
|
||||||
self.key_server.generate_key(key_id, author, threshold)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn restore_key_public(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
author: Requester,
|
|
||||||
) -> Box<dyn Future<Item=Public, Error=Error> + Send> {
|
|
||||||
self.key_server.restore_key_public(key_id, author)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DocumentKeyServer for Listener {
|
|
||||||
fn store_document_key(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
author: Requester,
|
|
||||||
common_point: Public,
|
|
||||||
encrypted_document_key: Public,
|
|
||||||
) -> Box<dyn Future<Item=(), Error=Error> + Send> {
|
|
||||||
self.key_server.store_document_key(key_id, author, common_point, encrypted_document_key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn generate_document_key(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
author: Requester,
|
|
||||||
threshold: usize,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
|
|
||||||
self.key_server.generate_document_key(key_id, author, threshold)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn restore_document_key(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
requester: Requester,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send> {
|
|
||||||
self.key_server.restore_document_key(key_id, requester)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn restore_document_key_shadow(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
requester: Requester,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedDocumentKeyShadow, Error=Error> + Send> {
|
|
||||||
self.key_server.restore_document_key_shadow(key_id, requester)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MessageSigner for Listener {
|
|
||||||
fn sign_message_schnorr(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
requester: Requester,
|
|
||||||
message: MessageHash,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
|
|
||||||
self.key_server.sign_message_schnorr(key_id, requester, message)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sign_message_ecdsa(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
requester: Requester,
|
|
||||||
message: MessageHash,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send> {
|
|
||||||
self.key_server.sign_message_ecdsa(key_id, requester, message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AdminSessionsServer for Listener {
|
|
||||||
fn change_servers_set(
|
|
||||||
&self,
|
|
||||||
old_set_signature: RequestSignature,
|
|
||||||
new_set_signature: RequestSignature,
|
|
||||||
new_servers_set: BTreeSet<NodeId>,
|
|
||||||
) -> Box<dyn Future<Item=(), Error=Error> + Send> {
|
|
||||||
self.key_server.change_servers_set(old_set_signature, new_set_signature, new_servers_set)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,807 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use parking_lot::RwLock;
|
|
||||||
use ethabi::RawLog;
|
|
||||||
use ethabi::FunctionOutputDecoder;
|
|
||||||
use crypto::publickey::{Public, public_to_address};
|
|
||||||
use hash::keccak;
|
|
||||||
use bytes::Bytes;
|
|
||||||
use ethereum_types::{H256, U256, Address, H512};
|
|
||||||
use listener::ApiMask;
|
|
||||||
use listener::service_contract_listener::ServiceTask;
|
|
||||||
use blockchain::{SecretStoreChain, Filter, SigningKeyPair, ContractAddress, BlockId};
|
|
||||||
use ServerKeyId;
|
|
||||||
|
|
||||||
use_contract!(service, "res/service.json");
|
|
||||||
|
|
||||||
/// Name of the general SecretStore contract in the registry.
|
|
||||||
pub const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service";
|
|
||||||
/// Name of the server key generation SecretStore contract in the registry.
|
|
||||||
pub const SRV_KEY_GEN_SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service_srv_gen";
|
|
||||||
/// Name of the server key retrieval SecretStore contract in the registry.
|
|
||||||
pub const SRV_KEY_RETR_SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service_srv_retr";
|
|
||||||
/// Name of the document key store SecretStore contract in the registry.
|
|
||||||
pub const DOC_KEY_STORE_SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service_doc_store";
|
|
||||||
/// Name of the document key retrieval SecretStore contract in the registry.
|
|
||||||
pub const DOC_KEY_SRETR_SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service_doc_sretr";
|
|
||||||
|
|
||||||
/// Server key generation has been requested.
|
|
||||||
const SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyGenerationRequested(bytes32,address,uint8)";
|
|
||||||
/// Server key retrieval has been requested.
|
|
||||||
const SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME: &'static [u8] = &*b"ServerKeyRetrievalRequested(bytes32)";
|
|
||||||
/// Document key store has been requested.
|
|
||||||
const DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME: &'static [u8] = &*b"DocumentKeyStoreRequested(bytes32,address,bytes,bytes)";
|
|
||||||
/// Document key common part retrieval has been requested.
|
|
||||||
const DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME: &'static [u8] = &*b"DocumentKeyCommonRetrievalRequested(bytes32,address)";
|
|
||||||
/// Document key personal part retrieval has been requested.
|
|
||||||
const DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME: &'static [u8] = &*b"DocumentKeyPersonalRetrievalRequested(bytes32,bytes)";
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
pub static ref SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME);
|
|
||||||
pub static ref SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME_HASH: H256 = keccak(SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME);
|
|
||||||
pub static ref DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME_HASH: H256 = keccak(DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME);
|
|
||||||
pub static ref DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH: H256 = keccak(DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME);
|
|
||||||
pub static ref DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH: H256 = keccak(DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Service contract trait.
|
|
||||||
pub trait ServiceContract: Send + Sync {
|
|
||||||
/// Update contract when new blocks are enacted. Returns true if contract is installed && up-to-date (i.e. chain is synced).
|
|
||||||
fn update(&self) -> bool;
|
|
||||||
/// Read recent contract logs. Returns topics of every entry.
|
|
||||||
fn read_logs(&self) -> Box<dyn Iterator<Item=ServiceTask>>;
|
|
||||||
/// Publish generated key.
|
|
||||||
fn read_pending_requests(&self) -> Box<dyn Iterator<Item=(bool, ServiceTask)>>;
|
|
||||||
/// Publish generated server key.
|
|
||||||
fn publish_generated_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public) -> Result<(), String>;
|
|
||||||
/// Publish server key generation error.
|
|
||||||
fn publish_server_key_generation_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String>;
|
|
||||||
/// Publish retrieved server key.
|
|
||||||
fn publish_retrieved_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public, threshold: usize) -> Result<(), String>;
|
|
||||||
/// Publish server key retrieval error.
|
|
||||||
fn publish_server_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String>;
|
|
||||||
/// Publish stored document key.
|
|
||||||
fn publish_stored_document_key(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String>;
|
|
||||||
/// Publish document key store error.
|
|
||||||
fn publish_document_key_store_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String>;
|
|
||||||
/// Publish retrieved document key common.
|
|
||||||
fn publish_retrieved_document_key_common(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, common_point: Public, threshold: usize) -> Result<(), String>;
|
|
||||||
/// Publish retrieved document key personal.
|
|
||||||
fn publish_retrieved_document_key_personal(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, participants: &[Address], decrypted_secret: Public, shadow: Bytes) -> Result<(), String>;
|
|
||||||
/// Publish document key store error.
|
|
||||||
fn publish_document_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address) -> Result<(), String>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// On-chain service contract.
|
|
||||||
pub struct OnChainServiceContract {
|
|
||||||
/// Requests mask.
|
|
||||||
mask: ApiMask,
|
|
||||||
/// Blockchain client.
|
|
||||||
client: Arc<dyn SecretStoreChain>,
|
|
||||||
/// This node key pair.
|
|
||||||
self_key_pair: Arc<dyn SigningKeyPair>,
|
|
||||||
/// Contract registry name (if any).
|
|
||||||
name: String,
|
|
||||||
/// Contract address source.
|
|
||||||
address_source: ContractAddress,
|
|
||||||
/// Contract.
|
|
||||||
data: RwLock<ServiceData>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// On-chain service contract data.
|
|
||||||
struct ServiceData {
|
|
||||||
/// Current contract address.
|
|
||||||
pub contract_address: Option<Address>,
|
|
||||||
/// Last block we have read logs from.
|
|
||||||
pub last_log_block: Option<H256>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Pending requests iterator.
|
|
||||||
struct PendingRequestsIterator<F: Fn(U256) -> Option<(bool, ServiceTask)>> {
|
|
||||||
/// Pending request read function.
|
|
||||||
read_request: F,
|
|
||||||
/// Current request index.
|
|
||||||
index: U256,
|
|
||||||
/// Requests length.
|
|
||||||
length: U256,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Server key generation related functions.
|
|
||||||
struct ServerKeyGenerationService;
|
|
||||||
/// Server key retrieval related functions.
|
|
||||||
struct ServerKeyRetrievalService;
|
|
||||||
/// Document key store related functions.
|
|
||||||
struct DocumentKeyStoreService;
|
|
||||||
/// Document key shadow retrievalrelated functions.
|
|
||||||
struct DocumentKeyShadowRetrievalService;
|
|
||||||
|
|
||||||
impl OnChainServiceContract {
|
|
||||||
/// Create new on-chain service contract.
|
|
||||||
pub fn new(mask: ApiMask, client: Arc<dyn SecretStoreChain>, name: String, address_source: ContractAddress, self_key_pair: Arc<dyn SigningKeyPair>) -> Self {
|
|
||||||
let contract = OnChainServiceContract {
|
|
||||||
mask: mask,
|
|
||||||
client: client,
|
|
||||||
self_key_pair: self_key_pair,
|
|
||||||
name: name,
|
|
||||||
address_source: address_source,
|
|
||||||
data: RwLock::new(ServiceData {
|
|
||||||
contract_address: None,
|
|
||||||
last_log_block: None,
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
|
|
||||||
contract.update_contract_address();
|
|
||||||
contract
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send transaction to the service contract.
|
|
||||||
fn send_contract_transaction<C, P>(&self, tx_name: &str, origin: &Address, server_key_id: &ServerKeyId, is_response_required: C, prepare_tx: P) -> Result<(), String>
|
|
||||||
where C: FnOnce(&dyn SecretStoreChain, &Address, &ServerKeyId, &Address) -> bool,
|
|
||||||
P: FnOnce(&dyn SecretStoreChain, &Address) -> Result<Bytes, String> {
|
|
||||||
// only publish if contract address is set && client is online
|
|
||||||
if !self.client.is_trusted() {
|
|
||||||
return Err("trusted client is required to publish key".into())
|
|
||||||
}
|
|
||||||
|
|
||||||
// only publish key if contract waits for publication
|
|
||||||
// failing is ok here - it could be that enough confirmations have been recevied
|
|
||||||
// or key has been requested using HTTP API
|
|
||||||
let self_address = public_to_address(self.self_key_pair.public());
|
|
||||||
if !is_response_required(&*self.client, origin, server_key_id, &self_address) {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepare transaction data
|
|
||||||
let transaction_data = prepare_tx(&*self.client, origin)?;
|
|
||||||
|
|
||||||
// send transaction
|
|
||||||
self.client.transact_contract(
|
|
||||||
origin.clone(),
|
|
||||||
transaction_data
|
|
||||||
).map_err(|e| format!("{}", e))?;
|
|
||||||
|
|
||||||
trace!(target: "secretstore", "{}: transaction {} sent to service contract",
|
|
||||||
self.self_key_pair.public(), tx_name);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create task-specific pending requests iterator.
|
|
||||||
fn create_pending_requests_iterator<
|
|
||||||
C: 'static + Fn(&dyn SecretStoreChain, &Address, &BlockId) -> Result<U256, String>,
|
|
||||||
R: 'static + Fn(&dyn SigningKeyPair, &dyn SecretStoreChain, &Address, &BlockId, U256) -> Result<(bool, ServiceTask), String>
|
|
||||||
>(&self, client: Arc<dyn SecretStoreChain>, contract_address: &Address, block: &BlockId, get_count: C, read_item: R) -> Box<dyn Iterator<Item=(bool, ServiceTask)>> {
|
|
||||||
get_count(&*client, contract_address, block)
|
|
||||||
.map(|count| {
|
|
||||||
let client = client.clone();
|
|
||||||
let self_key_pair = self.self_key_pair.clone();
|
|
||||||
let contract_address = contract_address.clone();
|
|
||||||
let block = block.clone();
|
|
||||||
Box::new(PendingRequestsIterator {
|
|
||||||
read_request: move |index| read_item(&*self_key_pair, &*client, &contract_address, &block, index)
|
|
||||||
.map_err(|error| {
|
|
||||||
warn!(target: "secretstore", "{}: reading pending request failed: {}",
|
|
||||||
self_key_pair.public(), error);
|
|
||||||
error
|
|
||||||
})
|
|
||||||
.ok(),
|
|
||||||
index: 0.into(),
|
|
||||||
length: count,
|
|
||||||
}) as Box<dyn Iterator<Item=(bool, ServiceTask)>>
|
|
||||||
})
|
|
||||||
.map_err(|error| {
|
|
||||||
warn!(target: "secretstore", "{}: creating pending requests iterator failed: {}",
|
|
||||||
self.self_key_pair.public(), error);
|
|
||||||
error
|
|
||||||
})
|
|
||||||
.ok()
|
|
||||||
.unwrap_or_else(|| Box::new(::std::iter::empty()))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update service contract address.
|
|
||||||
fn update_contract_address(&self) -> bool {
|
|
||||||
let contract_address = self.client.read_contract_address(&self.name, &self.address_source);
|
|
||||||
let mut data = self.data.write();
|
|
||||||
if contract_address != data.contract_address {
|
|
||||||
trace!(target: "secretstore", "{}: installing {} service contract from address {:?}",
|
|
||||||
self.self_key_pair.public(), self.name, contract_address);
|
|
||||||
|
|
||||||
data.contract_address = contract_address;
|
|
||||||
}
|
|
||||||
|
|
||||||
data.contract_address.is_some()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServiceContract for OnChainServiceContract {
|
|
||||||
fn update(&self) -> bool {
|
|
||||||
self.update_contract_address() && self.client.is_trusted()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_logs(&self) -> Box<dyn Iterator<Item=ServiceTask>> {
|
|
||||||
if !self.client.is_trusted() {
|
|
||||||
warn!(target: "secretstore", "{}: client is offline during read_logs call",
|
|
||||||
self.self_key_pair.public());
|
|
||||||
return Box::new(::std::iter::empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
let address = match self.data.read().contract_address {
|
|
||||||
Some(address) => address,
|
|
||||||
None => return Box::new(::std::iter::empty()), // no contract installed
|
|
||||||
};
|
|
||||||
let confirmed_block = match self.client.get_confirmed_block_hash() {
|
|
||||||
Some(confirmed_block) => confirmed_block,
|
|
||||||
None => return Box::new(::std::iter::empty()), // no block with enough confirmations
|
|
||||||
};
|
|
||||||
|
|
||||||
let request_logs = self.client.retrieve_last_logs(Filter {
|
|
||||||
from_block: BlockId::Hash(self.data.read().last_log_block.unwrap_or_else(|| confirmed_block)),
|
|
||||||
address: Some(vec![address]),
|
|
||||||
topics: vec![Some(mask_topics(&self.mask))],
|
|
||||||
}).unwrap_or_default();
|
|
||||||
|
|
||||||
let mut data = self.data.write();
|
|
||||||
data.last_log_block = Some(confirmed_block.clone());
|
|
||||||
|
|
||||||
Box::new(request_logs.into_iter()
|
|
||||||
.filter_map(|log| {
|
|
||||||
if log.topics[0] == *SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME_HASH {
|
|
||||||
ServerKeyGenerationService::parse_log(&address, log)
|
|
||||||
} else if log.topics[0] == *SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME_HASH {
|
|
||||||
ServerKeyRetrievalService::parse_log(&address, log)
|
|
||||||
} else if log.topics[0] == *DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME_HASH {
|
|
||||||
DocumentKeyStoreService::parse_log(&address, log)
|
|
||||||
} else if log.topics[0] == *DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH {
|
|
||||||
DocumentKeyShadowRetrievalService::parse_common_request_log(&address, log)
|
|
||||||
} else if log.topics[0] == *DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH {
|
|
||||||
DocumentKeyShadowRetrievalService::parse_personal_request_log(&address, log)
|
|
||||||
} else {
|
|
||||||
Err("unknown type of log entry".into())
|
|
||||||
}
|
|
||||||
.map_err(|error| {
|
|
||||||
warn!(target: "secretstore", "{}: error parsing log entry from service contract: {}",
|
|
||||||
self.self_key_pair.public(), error);
|
|
||||||
error
|
|
||||||
})
|
|
||||||
.ok()
|
|
||||||
}).collect::<Vec<_>>().into_iter())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_pending_requests(&self) -> Box<dyn Iterator<Item=(bool, ServiceTask)>> {
|
|
||||||
if !self.client.is_trusted() {
|
|
||||||
return Box::new(::std::iter::empty())
|
|
||||||
}
|
|
||||||
|
|
||||||
// we only need requests that are here from the last confirm block
|
|
||||||
let data = self.data.read();
|
|
||||||
match data.contract_address {
|
|
||||||
None => Box::new(::std::iter::empty()),
|
|
||||||
Some(contract_address) => self.client.get_confirmed_block_hash()
|
|
||||||
.map(|b| {
|
|
||||||
let block = BlockId::Hash(b);
|
|
||||||
let iter = match self.mask.server_key_generation_requests {
|
|
||||||
true => Box::new(self.create_pending_requests_iterator(self.client.clone(), &contract_address, &block,
|
|
||||||
&ServerKeyGenerationService::read_pending_requests_count,
|
|
||||||
&ServerKeyGenerationService::read_pending_request)) as Box<dyn Iterator<Item=(bool, ServiceTask)>>,
|
|
||||||
false => Box::new(::std::iter::empty()),
|
|
||||||
};
|
|
||||||
let iter = match self.mask.server_key_retrieval_requests {
|
|
||||||
true => Box::new(iter.chain(self.create_pending_requests_iterator(self.client.clone(), &contract_address, &block,
|
|
||||||
&ServerKeyRetrievalService::read_pending_requests_count,
|
|
||||||
&ServerKeyRetrievalService::read_pending_request))),
|
|
||||||
false => iter,
|
|
||||||
};
|
|
||||||
let iter = match self.mask.document_key_store_requests {
|
|
||||||
true => Box::new(iter.chain(self.create_pending_requests_iterator(self.client.clone(), &contract_address, &block,
|
|
||||||
&DocumentKeyStoreService::read_pending_requests_count,
|
|
||||||
&DocumentKeyStoreService::read_pending_request))),
|
|
||||||
false => iter,
|
|
||||||
};
|
|
||||||
let iter = match self.mask.document_key_shadow_retrieval_requests {
|
|
||||||
true => Box::new(iter.chain(self.create_pending_requests_iterator(self.client.clone(), &contract_address, &block,
|
|
||||||
&DocumentKeyShadowRetrievalService::read_pending_requests_count,
|
|
||||||
&DocumentKeyShadowRetrievalService::read_pending_request))),
|
|
||||||
false => iter
|
|
||||||
};
|
|
||||||
|
|
||||||
iter
|
|
||||||
})
|
|
||||||
.unwrap_or_else(|| Box::new(::std::iter::empty()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_generated_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public) -> Result<(), String> {
|
|
||||||
self.send_contract_transaction("publish_generated_server_key", origin, server_key_id, ServerKeyGenerationService::is_response_required,
|
|
||||||
|_, _| Ok(ServerKeyGenerationService::prepare_pubish_tx_data(server_key_id, &server_key)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_server_key_generation_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
|
||||||
self.send_contract_transaction("publish_server_key_generation_error", origin, server_key_id, ServerKeyGenerationService::is_response_required,
|
|
||||||
|_, _| Ok(ServerKeyGenerationService::prepare_error_tx_data(server_key_id)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_retrieved_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public, threshold: usize) -> Result<(), String> {
|
|
||||||
let threshold = serialize_threshold(threshold)?;
|
|
||||||
self.send_contract_transaction("publish_retrieved_server_key", origin, server_key_id, ServerKeyRetrievalService::is_response_required,
|
|
||||||
|_, _| Ok(ServerKeyRetrievalService::prepare_pubish_tx_data(server_key_id, server_key, threshold)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_server_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
|
||||||
self.send_contract_transaction("publish_server_key_retrieval_error", origin, server_key_id, ServerKeyRetrievalService::is_response_required,
|
|
||||||
|_, _| Ok(ServerKeyRetrievalService::prepare_error_tx_data(server_key_id)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_stored_document_key(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
|
||||||
self.send_contract_transaction("publish_stored_document_key", origin, server_key_id, DocumentKeyStoreService::is_response_required,
|
|
||||||
|_, _| Ok(DocumentKeyStoreService::prepare_pubish_tx_data(server_key_id)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_document_key_store_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
|
||||||
self.send_contract_transaction("publish_document_key_store_error", origin, server_key_id, DocumentKeyStoreService::is_response_required,
|
|
||||||
|_, _| Ok(DocumentKeyStoreService::prepare_error_tx_data(server_key_id)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_retrieved_document_key_common(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, common_point: Public, threshold: usize) -> Result<(), String> {
|
|
||||||
let threshold = serialize_threshold(threshold)?;
|
|
||||||
self.send_contract_transaction("publish_retrieved_document_key_common", origin, server_key_id,
|
|
||||||
|client, contract_address, server_key_id, key_server|
|
|
||||||
DocumentKeyShadowRetrievalService::is_response_required(client, contract_address, server_key_id, requester, key_server),
|
|
||||||
|_, _|
|
|
||||||
Ok(DocumentKeyShadowRetrievalService::prepare_pubish_common_tx_data(server_key_id, requester, common_point, threshold))
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_retrieved_document_key_personal(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, participants: &[Address], decrypted_secret: Public, shadow: Bytes) -> Result<(), String> {
|
|
||||||
self.send_contract_transaction("publish_retrieved_document_key_personal", origin, server_key_id, |_, _, _, _| true,
|
|
||||||
move |client, address|
|
|
||||||
DocumentKeyShadowRetrievalService::prepare_pubish_personal_tx_data(client, address, server_key_id, requester, participants, decrypted_secret, shadow)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_document_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address) -> Result<(), String> {
|
|
||||||
self.send_contract_transaction("publish_document_key_retrieval_error", origin, server_key_id,
|
|
||||||
|client, contract_address, server_key_id, key_server|
|
|
||||||
DocumentKeyShadowRetrievalService::is_response_required(client, contract_address, server_key_id, requester, key_server),
|
|
||||||
|_, _|
|
|
||||||
Ok(DocumentKeyShadowRetrievalService::prepare_error_tx_data(server_key_id, requester))
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<F> Iterator for PendingRequestsIterator<F> where F: Fn(U256) -> Option<(bool, ServiceTask)> {
|
|
||||||
type Item = (bool, ServiceTask);
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<(bool, ServiceTask)> {
|
|
||||||
if self.index >= self.length {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let index = self.index.clone();
|
|
||||||
self.index = self.index + 1;
|
|
||||||
|
|
||||||
(self.read_request)(index)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns vector of logs topics to listen to.
|
|
||||||
pub fn mask_topics(mask: &ApiMask) -> Vec<H256> {
|
|
||||||
let mut topics = Vec::new();
|
|
||||||
if mask.server_key_generation_requests {
|
|
||||||
topics.push(*SERVER_KEY_GENERATION_REQUESTED_EVENT_NAME_HASH);
|
|
||||||
}
|
|
||||||
if mask.server_key_retrieval_requests {
|
|
||||||
topics.push(*SERVER_KEY_RETRIEVAL_REQUESTED_EVENT_NAME_HASH);
|
|
||||||
}
|
|
||||||
if mask.document_key_store_requests {
|
|
||||||
topics.push(*DOCUMENT_KEY_STORE_REQUESTED_EVENT_NAME_HASH);
|
|
||||||
}
|
|
||||||
if mask.document_key_shadow_retrieval_requests {
|
|
||||||
topics.push(*DOCUMENT_KEY_COMMON_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH);
|
|
||||||
topics.push(*DOCUMENT_KEY_PERSONAL_PART_RETRIEVAL_REQUESTED_EVENT_NAME_HASH);
|
|
||||||
}
|
|
||||||
topics
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServerKeyGenerationService {
|
|
||||||
/// Parse request log entry.
|
|
||||||
pub fn parse_log(origin: &Address, raw_log: RawLog) -> Result<ServiceTask, String> {
|
|
||||||
match service::events::server_key_generation_requested::parse_log(raw_log) {
|
|
||||||
Ok(l) => Ok(ServiceTask::GenerateServerKey(origin.clone(), l.server_key_id, l.author, parse_threshold(l.threshold)?)),
|
|
||||||
Err(e) => Err(format!("{}", e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check if response from key server is required.
|
|
||||||
pub fn is_response_required(client: &dyn SecretStoreChain, contract_address: &Address, server_key_id: &ServerKeyId, key_server: &Address) -> bool {
|
|
||||||
// we're checking confirmation in Latest block, because we're interested in latest contract state here
|
|
||||||
let (encoded, decoder) = service::functions::is_server_key_generation_response_required::call(*server_key_id, *key_server);
|
|
||||||
match client.call_contract(BlockId::Latest, *contract_address, encoded) {
|
|
||||||
Err(_) => true,
|
|
||||||
Ok(data) => decoder.decode(&data).unwrap_or(true)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prepare publish key transaction data.
|
|
||||||
pub fn prepare_pubish_tx_data(server_key_id: &ServerKeyId, server_key_public: &Public) -> Bytes {
|
|
||||||
service::functions::server_key_generated::encode_input(*server_key_id, server_key_public.as_bytes().to_vec())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prepare error transaction data.
|
|
||||||
pub fn prepare_error_tx_data(server_key_id: &ServerKeyId) -> Bytes {
|
|
||||||
service::functions::server_key_generation_error::encode_input(*server_key_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read pending requests count.
|
|
||||||
fn read_pending_requests_count(client: &dyn SecretStoreChain, contract_address: &Address, block: &BlockId) -> Result<U256, String> {
|
|
||||||
let (encoded, decoder) = service::functions::server_key_generation_requests_count::call();
|
|
||||||
decoder.decode(&client.call_contract(*block, *contract_address, encoded)?)
|
|
||||||
.map_err(|e| e.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read pending request.
|
|
||||||
fn read_pending_request(self_key_pair: &dyn SigningKeyPair, client: &dyn SecretStoreChain, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
|
|
||||||
let self_address = public_to_address(self_key_pair.public());
|
|
||||||
|
|
||||||
let (encoded, decoder) = service::functions::get_server_key_generation_request::call(index);
|
|
||||||
let (server_key_id, author, threshold) = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?)
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
let threshold = parse_threshold(threshold)?;
|
|
||||||
|
|
||||||
let (encoded, decoder) = service::functions::is_server_key_generation_response_required::call(server_key_id, self_address);
|
|
||||||
let not_confirmed = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?)
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
let task = ServiceTask::GenerateServerKey(
|
|
||||||
contract_address.clone(),
|
|
||||||
server_key_id,
|
|
||||||
author,
|
|
||||||
threshold,
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok((not_confirmed, task))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServerKeyRetrievalService {
|
|
||||||
/// Parse request log entry.
|
|
||||||
pub fn parse_log(origin: &Address, raw_log: RawLog) -> Result<ServiceTask, String> {
|
|
||||||
match service::events::server_key_retrieval_requested::parse_log(raw_log) {
|
|
||||||
Ok(l) => Ok(ServiceTask::RetrieveServerKey(*origin, l.server_key_id)),
|
|
||||||
Err(e) => Err(e.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check if response from key server is required.
|
|
||||||
pub fn is_response_required(client: &dyn SecretStoreChain, contract_address: &Address, server_key_id: &ServerKeyId, key_server: &Address) -> bool {
|
|
||||||
// we're checking confirmation in Latest block, because we're interested in latest contract state here
|
|
||||||
let (encoded, decoder) = service::functions::is_server_key_retrieval_response_required::call(*server_key_id, *key_server);
|
|
||||||
match client.call_contract(BlockId::Latest, *contract_address, encoded) {
|
|
||||||
Err(_) => true,
|
|
||||||
Ok(data) => decoder.decode(&data).unwrap_or(true)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prepare publish key transaction data.
|
|
||||||
pub fn prepare_pubish_tx_data(server_key_id: &ServerKeyId, server_key_public: Public, threshold: U256) -> Bytes {
|
|
||||||
service::functions::server_key_retrieved::encode_input(*server_key_id, server_key_public.as_bytes().to_vec(), threshold)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prepare error transaction data.
|
|
||||||
pub fn prepare_error_tx_data(server_key_id: &ServerKeyId) -> Bytes {
|
|
||||||
service::functions::server_key_retrieval_error::encode_input(*server_key_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read pending requests count.
|
|
||||||
fn read_pending_requests_count(client: &dyn SecretStoreChain, contract_address: &Address, block: &BlockId) -> Result<U256, String> {
|
|
||||||
let (encoded, decoder) = service::functions::server_key_retrieval_requests_count::call();
|
|
||||||
decoder.decode(&client.call_contract(*block, *contract_address, encoded)?)
|
|
||||||
.map_err(|e| e.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read pending request.
|
|
||||||
fn read_pending_request(self_key_pair: &dyn SigningKeyPair, client: &dyn SecretStoreChain, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
|
|
||||||
let self_address = public_to_address(self_key_pair.public());
|
|
||||||
|
|
||||||
let (encoded, decoder) = service::functions::get_server_key_retrieval_request::call(index);
|
|
||||||
let server_key_id = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?)
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
let (encoded, decoder) = service::functions::is_server_key_retrieval_response_required::call(server_key_id, self_address);
|
|
||||||
let not_confirmed = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?)
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
let task = ServiceTask::RetrieveServerKey(
|
|
||||||
*contract_address,
|
|
||||||
server_key_id,
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok((not_confirmed, task))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DocumentKeyStoreService {
|
|
||||||
/// Parse request log entry.
|
|
||||||
pub fn parse_log(origin: &Address, raw_log: RawLog) -> Result<ServiceTask, String> {
|
|
||||||
match service::events::document_key_store_requested::parse_log(raw_log) {
|
|
||||||
Ok(l) => Ok(ServiceTask::StoreDocumentKey(
|
|
||||||
origin.clone(),
|
|
||||||
l.server_key_id,
|
|
||||||
l.author,
|
|
||||||
H512::from_slice(&*l.common_point),
|
|
||||||
H512::from_slice(&*l.encrypted_point),
|
|
||||||
)),
|
|
||||||
Err(e) => Err(format!("{}", e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check if response from key server is required.
|
|
||||||
pub fn is_response_required(client: &dyn SecretStoreChain, contract_address: &Address, server_key_id: &ServerKeyId, key_server: &Address) -> bool {
|
|
||||||
// we're checking confirmation in Latest block, because we're interested in latest contract state here
|
|
||||||
let (encoded, decoder) = service::functions::is_document_key_store_response_required::call(*server_key_id, *key_server);
|
|
||||||
match client.call_contract(BlockId::Latest, *contract_address, encoded) {
|
|
||||||
Err(_) => true,
|
|
||||||
Ok(data) => decoder.decode(&data).unwrap_or(true)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prepare publish key transaction data.
|
|
||||||
pub fn prepare_pubish_tx_data(server_key_id: &ServerKeyId) -> Bytes {
|
|
||||||
service::functions::document_key_stored::encode_input(*server_key_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prepare error transaction data.
|
|
||||||
pub fn prepare_error_tx_data(server_key_id: &ServerKeyId) -> Bytes {
|
|
||||||
service::functions::document_key_store_error::encode_input(*server_key_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read pending requests count.
|
|
||||||
fn read_pending_requests_count(client: &dyn SecretStoreChain, contract_address: &Address, block: &BlockId) -> Result<U256, String> {
|
|
||||||
let (encoded, decoder) = service::functions::document_key_store_requests_count::call();
|
|
||||||
decoder.decode(&client.call_contract(*block, *contract_address, encoded)?)
|
|
||||||
.map_err(|e| e.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read pending request.
|
|
||||||
fn read_pending_request(self_key_pair: &dyn SigningKeyPair, client: &dyn SecretStoreChain, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
|
|
||||||
let self_address = public_to_address(self_key_pair.public());
|
|
||||||
let (encoded, decoder) = service::functions::get_document_key_store_request::call(index);
|
|
||||||
let (server_key_id, author, common_point, encrypted_point) = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?)
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
let (encoded, decoder) = service::functions::is_document_key_store_response_required::call(server_key_id, self_address);
|
|
||||||
let not_confirmed = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?)
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
let task = ServiceTask::StoreDocumentKey(
|
|
||||||
*contract_address,
|
|
||||||
server_key_id,
|
|
||||||
author,
|
|
||||||
Public::from_slice(&common_point),
|
|
||||||
Public::from_slice(&encrypted_point),
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok((not_confirmed, task))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DocumentKeyShadowRetrievalService {
|
|
||||||
/// Parse common request log entry.
|
|
||||||
pub fn parse_common_request_log(origin: &Address, raw_log: RawLog) -> Result<ServiceTask, String> {
|
|
||||||
match service::events::document_key_common_retrieval_requested::parse_log(raw_log) {
|
|
||||||
Ok(l) => Ok(ServiceTask::RetrieveShadowDocumentKeyCommon(origin.clone(), l.server_key_id, l.requester)),
|
|
||||||
Err(e) => Err(e.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse personal request log entry.
|
|
||||||
pub fn parse_personal_request_log(origin: &Address, raw_log: RawLog) -> Result<ServiceTask, String> {
|
|
||||||
match service::events::document_key_personal_retrieval_requested::parse_log(raw_log) {
|
|
||||||
Ok(l) => Ok(ServiceTask::RetrieveShadowDocumentKeyPersonal(origin.clone(), l.server_key_id, H512::from_slice(&*l.requester_public))),
|
|
||||||
Err(e) => Err(e.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check if response from key server is required.
|
|
||||||
pub fn is_response_required(client: &dyn SecretStoreChain, contract_address: &Address, server_key_id: &ServerKeyId, requester: &Address, key_server: &Address) -> bool {
|
|
||||||
// we're checking confirmation in Latest block, because we're interested in latest contract state here
|
|
||||||
let (encoded, decoder) = service::functions::is_document_key_shadow_retrieval_response_required::call(*server_key_id, *requester, *key_server);
|
|
||||||
match client.call_contract(BlockId::Latest, *contract_address, encoded) {
|
|
||||||
Err(_) => true,
|
|
||||||
Ok(data) => decoder.decode(&data).unwrap_or(true)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prepare publish common key transaction data.
|
|
||||||
pub fn prepare_pubish_common_tx_data(server_key_id: &ServerKeyId, requester: &Address, common_point: Public, threshold: U256) -> Bytes {
|
|
||||||
service::functions::document_key_common_retrieved::encode_input(*server_key_id, *requester, common_point.as_bytes().to_vec(), threshold)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prepare publish personal key transaction data.
|
|
||||||
pub fn prepare_pubish_personal_tx_data(client: &dyn SecretStoreChain, contract_address: &Address, server_key_id: &ServerKeyId, requester: &Address, participants: &[Address], decrypted_secret: Public, shadow: Bytes) -> Result<Bytes, String> {
|
|
||||||
let mut participants_mask = U256::default();
|
|
||||||
for participant in participants {
|
|
||||||
let participant_index = Self::map_key_server_address(client, contract_address, participant.clone())
|
|
||||||
.map_err(|e| format!("Error searching for {} participant: {}", participant, e))?;
|
|
||||||
participants_mask = participants_mask | (U256::one() << participant_index);
|
|
||||||
}
|
|
||||||
Ok(service::functions::document_key_personal_retrieved::encode_input(
|
|
||||||
*server_key_id, *requester, participants_mask, decrypted_secret.as_bytes().to_vec(), shadow
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prepare error transaction data.
|
|
||||||
pub fn prepare_error_tx_data(server_key_id: &ServerKeyId, requester: &Address) -> Bytes {
|
|
||||||
service::functions::document_key_shadow_retrieval_error::encode_input(*server_key_id, *requester)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read pending requests count.
|
|
||||||
fn read_pending_requests_count(client: &dyn SecretStoreChain, contract_address: &Address, block: &BlockId) -> Result<U256, String> {
|
|
||||||
let (encoded, decoder) = service::functions::document_key_shadow_retrieval_requests_count::call();
|
|
||||||
decoder.decode(&client.call_contract(*block, *contract_address, encoded)?)
|
|
||||||
.map_err(|e| e.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read pending request.
|
|
||||||
fn read_pending_request(self_key_pair: &dyn SigningKeyPair, client: &dyn SecretStoreChain, contract_address: &Address, block: &BlockId, index: U256) -> Result<(bool, ServiceTask), String> {
|
|
||||||
let self_address = public_to_address(self_key_pair.public());
|
|
||||||
|
|
||||||
let (encoded, decoder) = service::functions::get_document_key_shadow_retrieval_request::call(index);
|
|
||||||
let (server_key_id, requester, is_common_retrieval_completed) =
|
|
||||||
decoder.decode(&client.call_contract(*block, *contract_address, encoded)?)
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
let requester = Public::from_slice(&requester);
|
|
||||||
let (encoded, decoder) = service::functions::is_document_key_shadow_retrieval_response_required::call(server_key_id, public_to_address(&requester), self_address);
|
|
||||||
let not_confirmed = decoder.decode(&client.call_contract(*block, *contract_address, encoded)?)
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
let task = match is_common_retrieval_completed {
|
|
||||||
true => ServiceTask::RetrieveShadowDocumentKeyPersonal(
|
|
||||||
*contract_address,
|
|
||||||
server_key_id,
|
|
||||||
requester,
|
|
||||||
),
|
|
||||||
false => ServiceTask::RetrieveShadowDocumentKeyCommon(
|
|
||||||
*contract_address,
|
|
||||||
server_key_id,
|
|
||||||
public_to_address(&requester),
|
|
||||||
),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok((not_confirmed, task))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Map from key server address to key server index.
|
|
||||||
fn map_key_server_address(client: &dyn SecretStoreChain, contract_address: &Address, key_server: Address) -> Result<u8, String> {
|
|
||||||
// we're checking confirmation in Latest block, because tx ,ust be appended to the latest state
|
|
||||||
let (encoded, decoder) = service::functions::require_key_server::call(key_server);
|
|
||||||
let index = decoder.decode(&client.call_contract(BlockId::Latest, *contract_address, encoded)?)
|
|
||||||
.map_err(|e| e.to_string())?;
|
|
||||||
|
|
||||||
if index > u8::max_value().into() {
|
|
||||||
Err(format!("key server index is too big: {}", index))
|
|
||||||
} else {
|
|
||||||
let index: u32 = index.low_u32();
|
|
||||||
Ok(index as u8)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse threshold (we only supposrt 256 KS at max).
|
|
||||||
fn parse_threshold(threshold: U256) -> Result<usize, String> {
|
|
||||||
let threshold_num = threshold.low_u64();
|
|
||||||
if threshold != threshold_num.into() || threshold_num >= ::std::u8::MAX as u64 {
|
|
||||||
return Err(format!("invalid threshold to use in service contract: {}", threshold));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(threshold_num as usize)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serialize threshold (we only support 256 KS at max).
|
|
||||||
fn serialize_threshold(threshold: usize) -> Result<U256, String> {
|
|
||||||
if threshold > ::std::u8::MAX as usize {
|
|
||||||
return Err(format!("invalid threshold to use in service contract: {}", threshold));
|
|
||||||
}
|
|
||||||
Ok(threshold.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub mod tests {
|
|
||||||
use parking_lot::Mutex;
|
|
||||||
use bytes::Bytes;
|
|
||||||
use crypto::publickey::Public;
|
|
||||||
use ethereum_types::Address;
|
|
||||||
use listener::service_contract_listener::ServiceTask;
|
|
||||||
use {ServerKeyId};
|
|
||||||
use super::ServiceContract;
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct DummyServiceContract {
|
|
||||||
pub is_actual: bool,
|
|
||||||
pub logs: Vec<ServiceTask>,
|
|
||||||
pub pending_requests: Vec<(bool, ServiceTask)>,
|
|
||||||
pub generated_server_keys: Mutex<Vec<(ServerKeyId, Public)>>,
|
|
||||||
pub server_keys_generation_failures: Mutex<Vec<ServerKeyId>>,
|
|
||||||
pub retrieved_server_keys: Mutex<Vec<(ServerKeyId, Public, usize)>>,
|
|
||||||
pub server_keys_retrieval_failures: Mutex<Vec<ServerKeyId>>,
|
|
||||||
pub stored_document_keys: Mutex<Vec<ServerKeyId>>,
|
|
||||||
pub document_keys_store_failures: Mutex<Vec<ServerKeyId>>,
|
|
||||||
pub common_shadow_retrieved_document_keys: Mutex<Vec<(ServerKeyId, Address, Public, usize)>>,
|
|
||||||
pub personal_shadow_retrieved_document_keys: Mutex<Vec<(ServerKeyId, Address, Vec<Address>, Public, Bytes)>>,
|
|
||||||
pub document_keys_shadow_retrieval_failures: Mutex<Vec<(ServerKeyId, Address)>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServiceContract for DummyServiceContract {
|
|
||||||
fn update(&self) -> bool {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_logs(&self) -> Box<dyn Iterator<Item=ServiceTask>> {
|
|
||||||
Box::new(self.logs.clone().into_iter())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_pending_requests(&self) -> Box<dyn Iterator<Item=(bool, ServiceTask)>> {
|
|
||||||
Box::new(self.pending_requests.clone().into_iter())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_generated_server_key(&self, _origin: &Address, server_key_id: &ServerKeyId, server_key: Public) -> Result<(), String> {
|
|
||||||
self.generated_server_keys.lock().push((server_key_id.clone(), server_key.clone()));
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_server_key_generation_error(&self, _origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
|
||||||
self.server_keys_generation_failures.lock().push(server_key_id.clone());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_retrieved_server_key(&self, _origin: &Address, server_key_id: &ServerKeyId, server_key: Public, threshold: usize) -> Result<(), String> {
|
|
||||||
self.retrieved_server_keys.lock().push((server_key_id.clone(), server_key.clone(), threshold));
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_server_key_retrieval_error(&self, _origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
|
||||||
self.server_keys_retrieval_failures.lock().push(server_key_id.clone());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_stored_document_key(&self, _origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
|
||||||
self.stored_document_keys.lock().push(server_key_id.clone());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_document_key_store_error(&self, _origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
|
||||||
self.document_keys_store_failures.lock().push(server_key_id.clone());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_retrieved_document_key_common(&self, _origin: &Address, server_key_id: &ServerKeyId, requester: &Address, common_point: Public, threshold: usize) -> Result<(), String> {
|
|
||||||
self.common_shadow_retrieved_document_keys.lock().push((server_key_id.clone(), requester.clone(), common_point.clone(), threshold));
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_retrieved_document_key_personal(&self, _origin: &Address, server_key_id: &ServerKeyId, requester: &Address, participants: &[Address], decrypted_secret: Public, shadow: Bytes) -> Result<(), String> {
|
|
||||||
self.personal_shadow_retrieved_document_keys.lock().push((server_key_id.clone(), requester.clone(), participants.iter().cloned().collect(), decrypted_secret, shadow));
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_document_key_retrieval_error(&self, _origin: &Address, server_key_id: &ServerKeyId, requester: &Address) -> Result<(), String> {
|
|
||||||
self.document_keys_shadow_retrieval_failures.lock().push((server_key_id.clone(), requester.clone()));
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,100 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use bytes::Bytes;
|
|
||||||
use ethereum_types::Address;
|
|
||||||
use crypto::publickey::Public;
|
|
||||||
use listener::service_contract::ServiceContract;
|
|
||||||
use listener::service_contract_listener::ServiceTask;
|
|
||||||
use {ServerKeyId};
|
|
||||||
|
|
||||||
/// Aggregated on-chain service contract.
|
|
||||||
pub struct OnChainServiceContractAggregate {
|
|
||||||
/// All hosted service contracts.
|
|
||||||
contracts: Vec<Arc<dyn ServiceContract>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OnChainServiceContractAggregate {
|
|
||||||
/// Create new aggregated service contract listener.
|
|
||||||
pub fn new(contracts: Vec<Arc<dyn ServiceContract>>) -> Self {
|
|
||||||
debug_assert!(contracts.len() > 1);
|
|
||||||
OnChainServiceContractAggregate {
|
|
||||||
contracts: contracts,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServiceContract for OnChainServiceContractAggregate {
|
|
||||||
fn update(&self) -> bool {
|
|
||||||
let mut result = false;
|
|
||||||
for contract in &self.contracts {
|
|
||||||
result = contract.update() || result;
|
|
||||||
}
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_logs(&self) -> Box<dyn Iterator<Item=ServiceTask>> {
|
|
||||||
self.contracts.iter()
|
|
||||||
.fold(Box::new(::std::iter::empty()) as Box<dyn Iterator<Item=ServiceTask>>, |i, c|
|
|
||||||
Box::new(i.chain(c.read_logs())))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_pending_requests(&self) -> Box<dyn Iterator<Item=(bool, ServiceTask)>> {
|
|
||||||
self.contracts.iter()
|
|
||||||
.fold(Box::new(::std::iter::empty()) as Box<dyn Iterator<Item=(bool, ServiceTask)>>, |i, c|
|
|
||||||
Box::new(i.chain(c.read_pending_requests())))
|
|
||||||
}
|
|
||||||
|
|
||||||
// in current implementation all publish methods are independent of actual contract adddress
|
|
||||||
// (tx is sent to origin) => we do not care which contract to use for publish data in methods below
|
|
||||||
|
|
||||||
fn publish_generated_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public) -> Result<(), String> {
|
|
||||||
self.contracts[0].publish_generated_server_key(origin, server_key_id, server_key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_server_key_generation_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
|
||||||
self.contracts[0].publish_server_key_generation_error(origin, server_key_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_retrieved_server_key(&self, origin: &Address, server_key_id: &ServerKeyId, server_key: Public, threshold: usize) -> Result<(), String> {
|
|
||||||
self.contracts[0].publish_retrieved_server_key(origin, server_key_id, server_key, threshold)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_server_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
|
||||||
self.contracts[0].publish_server_key_retrieval_error(origin, server_key_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_stored_document_key(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
|
||||||
self.contracts[0].publish_stored_document_key(origin, server_key_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_document_key_store_error(&self, origin: &Address, server_key_id: &ServerKeyId) -> Result<(), String> {
|
|
||||||
self.contracts[0].publish_document_key_store_error(origin, server_key_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_retrieved_document_key_common(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, common_point: Public, threshold: usize) -> Result<(), String> {
|
|
||||||
self.contracts[0].publish_retrieved_document_key_common(origin, server_key_id, requester, common_point, threshold)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_retrieved_document_key_personal(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address, participants: &[Address], decrypted_secret: Public, shadow: Bytes) -> Result<(), String> {
|
|
||||||
self.contracts[0].publish_retrieved_document_key_personal(origin, server_key_id, requester, participants, decrypted_secret, shadow)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_document_key_retrieval_error(&self, origin: &Address, server_key_id: &ServerKeyId, requester: &Address) -> Result<(), String> {
|
|
||||||
self.contracts[0].publish_document_key_retrieval_error(origin, server_key_id, requester)
|
|
||||||
}
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@ -1,78 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::VecDeque;
|
|
||||||
use parking_lot::{Mutex, Condvar};
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
/// General deque-based tasks queue.
|
|
||||||
pub struct TasksQueue<Task: Clone> {
|
|
||||||
/// Service event.
|
|
||||||
service_event: Condvar,
|
|
||||||
/// Service tasks queue.
|
|
||||||
service_tasks: Mutex<VecDeque<Task>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Task> TasksQueue<Task> where Task: Clone {
|
|
||||||
/// Create new tasks queue.
|
|
||||||
pub fn new() -> Self {
|
|
||||||
TasksQueue {
|
|
||||||
service_event: Condvar::new(),
|
|
||||||
service_tasks: Mutex::new(VecDeque::new()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
/// Get current tasks snapshot.
|
|
||||||
pub fn snapshot(&self) -> VecDeque<Task> {
|
|
||||||
self.service_tasks.lock().clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Push task to the front of queue.
|
|
||||||
pub fn push_front(&self, task: Task) {
|
|
||||||
let mut service_tasks = self.service_tasks.lock();
|
|
||||||
service_tasks.push_front(task);
|
|
||||||
self.service_event.notify_all();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Push task to the back of queue.
|
|
||||||
pub fn push(&self, task: Task) {
|
|
||||||
let mut service_tasks = self.service_tasks.lock();
|
|
||||||
service_tasks.push_back(task);
|
|
||||||
self.service_event.notify_all();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Push task to the back of queue.
|
|
||||||
pub fn push_many<I: Iterator<Item=Task>>(&self, tasks: I) {
|
|
||||||
let mut service_tasks = self.service_tasks.lock();
|
|
||||||
let previous_len = service_tasks.len();
|
|
||||||
service_tasks.extend(tasks);
|
|
||||||
if service_tasks.len() != previous_len {
|
|
||||||
self.service_event.notify_all();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Wait for new task (task is removed from the front of queue).
|
|
||||||
pub fn wait(&self) -> Task {
|
|
||||||
let mut service_tasks = self.service_tasks.lock();
|
|
||||||
if service_tasks.is_empty() {
|
|
||||||
self.service_event.wait(&mut service_tasks);
|
|
||||||
}
|
|
||||||
|
|
||||||
service_tasks.pop_front()
|
|
||||||
.expect("service_event is only fired when there are new tasks; qed")
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,101 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
//! Secret Store DB migration module.
|
|
||||||
|
|
||||||
|
|
||||||
use std::fmt::{Display, Error as FmtError, Formatter};
|
|
||||||
use std::fs;
|
|
||||||
use std::io::{Error as IoError, ErrorKind as IoErrorKind, Read as _};
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
/// Current db version.
|
|
||||||
const CURRENT_VERSION: u8 = 4;
|
|
||||||
/// Database is assumed to be at the default version, when no version file is found.
|
|
||||||
const DEFAULT_VERSION: u8 = 3;
|
|
||||||
/// Version file name.
|
|
||||||
const VERSION_FILE_NAME: &str = "db_version";
|
|
||||||
|
|
||||||
/// Migration related errors.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum Error {
|
|
||||||
/// Returned when current version cannot be read or guessed.
|
|
||||||
UnknownDatabaseVersion,
|
|
||||||
/// Existing DB is newer than the known one.
|
|
||||||
FutureDBVersion,
|
|
||||||
/// Migration using parity-ethereum 2.6.7 is required.
|
|
||||||
MigrationWithLegacyVersionRequired,
|
|
||||||
/// Migration was completed successfully,
|
|
||||||
/// but there was a problem with io.
|
|
||||||
Io(IoError),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for Error {
|
|
||||||
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
|
|
||||||
let out = match *self {
|
|
||||||
Error::UnknownDatabaseVersion =>
|
|
||||||
"Current Secret Store database version cannot be read".into(),
|
|
||||||
Error::FutureDBVersion =>
|
|
||||||
"Secret Store database was created with newer client version.\
|
|
||||||
Upgrade your client or delete DB and resync.".into(),
|
|
||||||
Error::MigrationWithLegacyVersionRequired =>
|
|
||||||
"Secret Store database was created with an older client version.\
|
|
||||||
To migrate, use parity-ethereum v2.6.7, then retry using the latest.".into(),
|
|
||||||
Error::Io(ref err) =>
|
|
||||||
format!("Unexpected io error on Secret Store database migration: {}.", err),
|
|
||||||
};
|
|
||||||
write!(f, "{}", out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<IoError> for Error {
|
|
||||||
fn from(err: IoError) -> Self {
|
|
||||||
Error::Io(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Apply all migrations if possible.
|
|
||||||
pub fn upgrade_db(db_path: &str) -> Result<(), Error> {
|
|
||||||
match current_version(db_path)? {
|
|
||||||
old_version if old_version < CURRENT_VERSION => {
|
|
||||||
Err(Error::MigrationWithLegacyVersionRequired)
|
|
||||||
},
|
|
||||||
CURRENT_VERSION => Ok(()),
|
|
||||||
_ => Err(Error::FutureDBVersion),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the version file path.
|
|
||||||
fn version_file_path(path: &str) -> PathBuf {
|
|
||||||
let mut file_path = PathBuf::from(path);
|
|
||||||
file_path.push(VERSION_FILE_NAME);
|
|
||||||
file_path
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reads current database version from the file at given path.
|
|
||||||
/// If the file does not exist returns `DEFAULT_VERSION`.
|
|
||||||
fn current_version(path: &str) -> Result<u8, Error> {
|
|
||||||
match fs::File::open(version_file_path(path)) {
|
|
||||||
Err(ref err) if err.kind() == IoErrorKind::NotFound => Ok(DEFAULT_VERSION),
|
|
||||||
Err(err) => Err(err.into()),
|
|
||||||
Ok(mut file) => {
|
|
||||||
let mut s = String::new();
|
|
||||||
file.read_to_string(&mut s)?;
|
|
||||||
u8::from_str_radix(&s, 10).map_err(|_| Error::UnknownDatabaseVersion)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,50 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use crypto::publickey::{KeyPair, Public, Signature, Error as EthKeyError, sign, public_to_address};
|
|
||||||
use ethereum_types::{H256, Address};
|
|
||||||
use blockchain::SigningKeyPair;
|
|
||||||
|
|
||||||
pub struct PlainNodeKeyPair {
|
|
||||||
key_pair: KeyPair,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PlainNodeKeyPair {
|
|
||||||
pub fn new(key_pair: KeyPair) -> Self {
|
|
||||||
PlainNodeKeyPair {
|
|
||||||
key_pair: key_pair,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn key_pair(&self) -> &KeyPair {
|
|
||||||
&self.key_pair
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SigningKeyPair for PlainNodeKeyPair {
|
|
||||||
fn public(&self) -> &Public {
|
|
||||||
self.key_pair.public()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn address(&self) -> Address {
|
|
||||||
public_to_address(self.key_pair.public())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sign(&self, data: &H256) -> Result<Signature, EthKeyError> {
|
|
||||||
sign(self.key_pair.secret(), data)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,245 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::fmt;
|
|
||||||
use std::ops::Deref;
|
|
||||||
use rustc_hex::{self, FromHex};
|
|
||||||
use serde::{Serialize, Deserialize, Serializer, Deserializer};
|
|
||||||
use serde::de::{Visitor, Error as SerdeError};
|
|
||||||
use crypto::publickey::{Public, Secret, Signature};
|
|
||||||
use ethereum_types::{H160, H256};
|
|
||||||
use bytes::Bytes;
|
|
||||||
use types::Requester;
|
|
||||||
|
|
||||||
trait ToHex {
|
|
||||||
fn to_hex(&self) -> String;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ToHex for Bytes {
|
|
||||||
fn to_hex(&self) -> String {
|
|
||||||
format!("0x{}", rustc_hex::ToHex::to_hex(&self[..]))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ToHex for Signature {
|
|
||||||
fn to_hex(&self) -> String {
|
|
||||||
format!("0x{}", self)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ToHex for Secret {
|
|
||||||
fn to_hex(&self) -> String {
|
|
||||||
format!("0x{}", self.to_hex())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! impl_to_hex {
|
|
||||||
($name: ident) => (
|
|
||||||
impl ToHex for $name {
|
|
||||||
fn to_hex(&self) -> String {
|
|
||||||
format!("{:#x}", self)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! impl_bytes_deserialize {
|
|
||||||
($name: ident, $value: expr, true) => {
|
|
||||||
$value[2..].from_hex().map($name).map_err(SerdeError::custom)
|
|
||||||
};
|
|
||||||
($name: ident, $value: expr, false) => {
|
|
||||||
$value[2..].parse().map($name).map_err(SerdeError::custom)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! impl_bytes {
|
|
||||||
($name: ident, $other: ident, $from_hex: ident, ($($trait: ident),*)) => {
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, $($trait,)*)]
|
|
||||||
pub struct $name(pub $other);
|
|
||||||
|
|
||||||
impl<T> From<T> for $name where $other: From<T> {
|
|
||||||
fn from(s: T) -> $name {
|
|
||||||
$name(s.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Into<$other> for $name {
|
|
||||||
fn into(self) -> $other {
|
|
||||||
self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Deref for $name {
|
|
||||||
type Target = $other;
|
|
||||||
|
|
||||||
fn deref(&self) -> &$other {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Serialize for $name {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer {
|
|
||||||
serializer.serialize_str(<$other as ToHex>::to_hex(&self.0).as_ref())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Deserialize<'a> for $name {
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'a> {
|
|
||||||
struct HexBytesVisitor;
|
|
||||||
|
|
||||||
impl<'b> Visitor<'b> for HexBytesVisitor {
|
|
||||||
type Value = $name;
|
|
||||||
|
|
||||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
write!(formatter, "a hex-encoded bytes string")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: SerdeError {
|
|
||||||
if value.len() >= 2 && &value[0..2] == "0x" && value.len() & 1 == 0 {
|
|
||||||
impl_bytes_deserialize!($name, value, $from_hex)
|
|
||||||
} else {
|
|
||||||
Err(SerdeError::custom("invalid format"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn visit_string<E>(self, value: String) -> Result<Self::Value, E> where E: SerdeError {
|
|
||||||
self.visit_str(value.as_ref())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
deserializer.deserialize_any(HexBytesVisitor)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serializable message hash.
|
|
||||||
pub type SerializableMessageHash = SerializableH256;
|
|
||||||
/// Serializable address;
|
|
||||||
pub type SerializableAddress = SerializableH160;
|
|
||||||
|
|
||||||
impl_to_hex!(H256);
|
|
||||||
impl_to_hex!(H160);
|
|
||||||
impl_to_hex!(Public);
|
|
||||||
|
|
||||||
impl_bytes!(SerializableBytes, Bytes, true, (Default));
|
|
||||||
impl_bytes!(SerializableH256, H256, false, (Default, PartialOrd, Ord));
|
|
||||||
impl_bytes!(SerializableH160, H160, false, (Default));
|
|
||||||
impl_bytes!(SerializablePublic, Public, false, (Default, PartialOrd, Ord));
|
|
||||||
impl_bytes!(SerializableSecret, Secret, false, ());
|
|
||||||
impl_bytes!(SerializableSignature, Signature, false, ());
|
|
||||||
|
|
||||||
/// Serializable shadow decryption result.
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct SerializableEncryptedDocumentKeyShadow {
|
|
||||||
/// Decrypted secret point. It is partially decrypted if shadow decryption was requested.
|
|
||||||
pub decrypted_secret: SerializablePublic,
|
|
||||||
/// Shared common point.
|
|
||||||
pub common_point: SerializablePublic,
|
|
||||||
/// If shadow decryption was requested: shadow decryption coefficients, encrypted with requestor public.
|
|
||||||
pub decrypt_shadows: Vec<SerializableBytes>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serializable requester identification data.
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub enum SerializableRequester {
|
|
||||||
/// Requested with server key id signature.
|
|
||||||
Signature(SerializableSignature),
|
|
||||||
/// Requested with public key.
|
|
||||||
Public(SerializablePublic),
|
|
||||||
/// Requested with verified address.
|
|
||||||
Address(SerializableAddress),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<SerializableRequester> for Requester {
|
|
||||||
fn from(requester: SerializableRequester) -> Requester {
|
|
||||||
match requester {
|
|
||||||
SerializableRequester::Signature(signature) => Requester::Signature(signature.into()),
|
|
||||||
SerializableRequester::Public(public) => Requester::Public(public.into()),
|
|
||||||
SerializableRequester::Address(address) => Requester::Address(address.into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Requester> for SerializableRequester {
|
|
||||||
fn from(requester: Requester) -> SerializableRequester {
|
|
||||||
match requester {
|
|
||||||
Requester::Signature(signature) => SerializableRequester::Signature(signature.into()),
|
|
||||||
Requester::Public(public) => SerializableRequester::Public(public.into()),
|
|
||||||
Requester::Address(address) => SerializableRequester::Address(address.into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use serde_json;
|
|
||||||
use super::*;
|
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
macro_rules! do_test {
|
|
||||||
($value: expr, $expected: expr, $expected_type: ident) => (
|
|
||||||
let serialized = serde_json::to_string(&$value).unwrap();
|
|
||||||
assert_eq!(serialized, $expected);
|
|
||||||
let deserialized: $expected_type = serde_json::from_str(&serialized).unwrap();
|
|
||||||
assert_eq!(deserialized, $value);
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn serialize_and_deserialize_bytes() {
|
|
||||||
do_test!(SerializableBytes(vec![1, 2, 3, 4]), "\"0x01020304\"".to_owned(), SerializableBytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn serialize_and_deserialize_h256() {
|
|
||||||
let s = "5a39ed1020c04d4d84539975b893a4e7c53eab6c2965db8bc3468093a31bc5ae";
|
|
||||||
let h256 = SerializableH256(H256::from_str(s).unwrap());
|
|
||||||
do_test!(h256, format!("\"0x{}\"", s), SerializableH256);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn serialize_and_deserialize_h160() {
|
|
||||||
let s = "c6d9d2cd449a754c494264e1809c50e34d64562b";
|
|
||||||
let h160 = SerializableH160(H160::from_str(s).unwrap());
|
|
||||||
do_test!(h160, format!("\"0x{}\"", s), SerializableH160);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn serialize_and_deserialize_public() {
|
|
||||||
let s = "cac6c205eb06c8308d65156ff6c862c62b000b8ead121a4455a8ddeff7248128d895692136f240d5d1614dc7cc4147b1bd584bd617e30560bb872064d09ea325";
|
|
||||||
let public = SerializablePublic(s.parse().unwrap());
|
|
||||||
do_test!(public, format!("\"0x{}\"", s), SerializablePublic);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn serialize_and_deserialize_secret() {
|
|
||||||
let s = "5a39ed1020c04d4d84539975b893a4e7c53eab6c2965db8bc3468093a31bc5ae";
|
|
||||||
let secret = SerializableSecret(Secret::from_str(s).unwrap());
|
|
||||||
do_test!(secret, format!("\"0x{}\"", s), SerializableSecret);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn serialize_and_deserialize_signature() {
|
|
||||||
let raw_r = "afafafafafafafafafafafbcbcbcbcbcbcbcbcbcbeeeeeeeeeeeeedddddddddd";
|
|
||||||
let raw_s = "5a39ed1020c04d4d84539975b893a4e7c53eab6c2965db8bc3468093a31bc5ae";
|
|
||||||
let r = H256::from_str(raw_r).unwrap();
|
|
||||||
let s = H256::from_str(raw_s).unwrap();
|
|
||||||
let v = 42u8;
|
|
||||||
let public = SerializableSignature(Signature::from_rsv(&r, &s, v));
|
|
||||||
do_test!(public, format!("\"0x{}{}{:x}\"", raw_r, raw_s, v), SerializableSignature);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,139 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use futures::Future;
|
|
||||||
use types::{Error, Public, ServerKeyId, MessageHash, EncryptedMessageSignature, RequestSignature, Requester,
|
|
||||||
EncryptedDocumentKey, EncryptedDocumentKeyShadow, NodeId};
|
|
||||||
|
|
||||||
/// Server key (SK) generator.
|
|
||||||
pub trait ServerKeyGenerator {
|
|
||||||
/// Generate new SK.
|
|
||||||
/// `key_id` is the caller-provided identifier of generated SK.
|
|
||||||
/// `author` is the author of key entry.
|
|
||||||
/// `threshold + 1` is the minimal number of nodes, required to restore private key.
|
|
||||||
/// Result is a public portion of SK.
|
|
||||||
fn generate_key(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
author: Requester,
|
|
||||||
threshold: usize,
|
|
||||||
) -> Box<dyn Future<Item=Public, Error=Error> + Send>;
|
|
||||||
/// Retrieve public portion of previously generated SK.
|
|
||||||
/// `key_id` is identifier of previously generated SK.
|
|
||||||
/// `author` is the same author, that has created the server key.
|
|
||||||
fn restore_key_public(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
author: Requester,
|
|
||||||
) -> Box<dyn Future<Item=Public, Error=Error> + Send>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Document key (DK) server.
|
|
||||||
pub trait DocumentKeyServer: ServerKeyGenerator {
|
|
||||||
/// Store externally generated DK.
|
|
||||||
/// `key_id` is identifier of previously generated SK.
|
|
||||||
/// `author` is the same author, that has created the server key.
|
|
||||||
/// `common_point` is a result of `k * T` expression, where `T` is generation point and `k` is random scalar in EC field.
|
|
||||||
/// `encrypted_document_key` is a result of `M + k * y` expression, where `M` is unencrypted document key (point on EC),
|
|
||||||
/// `k` is the same scalar used in `common_point` calculation and `y` is previously generated public part of SK.
|
|
||||||
fn store_document_key(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
author: Requester,
|
|
||||||
common_point: Public,
|
|
||||||
encrypted_document_key: Public,
|
|
||||||
) -> Box<dyn Future<Item=(), Error=Error> + Send>;
|
|
||||||
/// Generate and store both SK and DK. This is a shortcut for consequent calls of `generate_key` and `store_document_key`.
|
|
||||||
/// The only difference is that DK is generated by DocumentKeyServer (which might be considered unsafe).
|
|
||||||
/// `key_id` is the caller-provided identifier of generated SK.
|
|
||||||
/// `author` is the author of server && document key entry.
|
|
||||||
/// `threshold + 1` is the minimal number of nodes, required to restore private key.
|
|
||||||
/// Result is a DK, encrypted with caller public key.
|
|
||||||
fn generate_document_key(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
author: Requester,
|
|
||||||
threshold: usize,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send>;
|
|
||||||
/// Restore previously stored DK.
|
|
||||||
/// DK is decrypted on the key server (which might be considered unsafe), and then encrypted with caller public key.
|
|
||||||
/// `key_id` is identifier of previously generated SK.
|
|
||||||
/// `requester` is the one who requests access to document key. Caller must be on ACL for this function to succeed.
|
|
||||||
/// Result is a DK, encrypted with caller public key.
|
|
||||||
fn restore_document_key(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
requester: Requester,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedDocumentKey, Error=Error> + Send>;
|
|
||||||
/// Restore previously stored DK.
|
|
||||||
/// To decrypt DK on client:
|
|
||||||
/// 1) use requestor secret key to decrypt secret coefficients from result.decrypt_shadows
|
|
||||||
/// 2) calculate decrypt_shadows_sum = sum of all secrets from (1)
|
|
||||||
/// 3) calculate decrypt_shadow_point: decrypt_shadows_sum * result.common_point
|
|
||||||
/// 4) calculate decrypted_secret: result.decrypted_secret + decrypt_shadow_point
|
|
||||||
/// Result is a DK shadow.
|
|
||||||
fn restore_document_key_shadow(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
requester: Requester,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedDocumentKeyShadow, Error=Error> + Send>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Message signer.
|
|
||||||
pub trait MessageSigner: ServerKeyGenerator {
|
|
||||||
/// Generate Schnorr signature for message with previously generated SK.
|
|
||||||
/// `key_id` is the caller-provided identifier of generated SK.
|
|
||||||
/// `requester` is the one who requests access to server key private.
|
|
||||||
/// `message` is the message to be signed.
|
|
||||||
/// Result is a signed message, encrypted with caller public key.
|
|
||||||
fn sign_message_schnorr(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
requester: Requester,
|
|
||||||
message: MessageHash,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send>;
|
|
||||||
/// Generate ECDSA signature for message with previously generated SK.
|
|
||||||
/// WARNING: only possible when SK was generated using t <= 2 * N.
|
|
||||||
/// `key_id` is the caller-provided identifier of generated SK.
|
|
||||||
/// `signature` is `key_id`, signed with caller public key.
|
|
||||||
/// `message` is the message to be signed.
|
|
||||||
/// Result is a signed message, encrypted with caller public key.
|
|
||||||
fn sign_message_ecdsa(
|
|
||||||
&self,
|
|
||||||
key_id: ServerKeyId,
|
|
||||||
signature: Requester,
|
|
||||||
message: MessageHash,
|
|
||||||
) -> Box<dyn Future<Item=EncryptedMessageSignature, Error=Error> + Send>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Administrative sessions server.
|
|
||||||
pub trait AdminSessionsServer {
|
|
||||||
/// Change servers set so that nodes in new_servers_set became owners of shares for all keys.
|
|
||||||
/// And old nodes (i.e. cluster nodes except new_servers_set) have clear databases.
|
|
||||||
/// WARNING: newly generated keys will be distributed among all cluster nodes. So this session
|
|
||||||
/// must be followed with cluster nodes change (either via contract, or config files).
|
|
||||||
fn change_servers_set(
|
|
||||||
&self,
|
|
||||||
old_set_signature: RequestSignature,
|
|
||||||
new_set_signature: RequestSignature,
|
|
||||||
new_servers_set: BTreeSet<NodeId>,
|
|
||||||
) -> Box<dyn Future<Item=(), Error=Error> + Send>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Key server.
|
|
||||||
pub trait KeyServer: AdminSessionsServer + DocumentKeyServer + MessageSigner + Send + Sync {
|
|
||||||
}
|
|
@ -1,148 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use blockchain::ContractAddress;
|
|
||||||
use {bytes, ethereum_types};
|
|
||||||
|
|
||||||
/// Node id.
|
|
||||||
pub type NodeId = crypto::publickey::Public;
|
|
||||||
/// Server key id. When key is used to encrypt document, it could be document contents hash.
|
|
||||||
pub type ServerKeyId = ethereum_types::H256;
|
|
||||||
/// Encrypted document key type.
|
|
||||||
pub type EncryptedDocumentKey = bytes::Bytes;
|
|
||||||
/// Message hash.
|
|
||||||
pub type MessageHash = ethereum_types::H256;
|
|
||||||
/// Message signature.
|
|
||||||
pub type EncryptedMessageSignature = bytes::Bytes;
|
|
||||||
/// Request signature type.
|
|
||||||
pub type RequestSignature = crypto::publickey::Signature;
|
|
||||||
/// Public key type.
|
|
||||||
pub use crypto::publickey::Public;
|
|
||||||
|
|
||||||
/// Secret store configuration
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct NodeAddress {
|
|
||||||
/// IP address.
|
|
||||||
pub address: String,
|
|
||||||
/// IP port.
|
|
||||||
pub port: u16,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Secret store configuration
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct ServiceConfiguration {
|
|
||||||
/// HTTP listener address. If None, HTTP API is disabled.
|
|
||||||
pub listener_address: Option<NodeAddress>,
|
|
||||||
/// Service contract address.
|
|
||||||
pub service_contract_address: Option<ContractAddress>,
|
|
||||||
/// Server key generation service contract address.
|
|
||||||
pub service_contract_srv_gen_address: Option<ContractAddress>,
|
|
||||||
/// Server key retrieval service contract address.
|
|
||||||
pub service_contract_srv_retr_address: Option<ContractAddress>,
|
|
||||||
/// Document key store service contract address.
|
|
||||||
pub service_contract_doc_store_address: Option<ContractAddress>,
|
|
||||||
/// Document key shadow retrieval service contract address.
|
|
||||||
pub service_contract_doc_sretr_address: Option<ContractAddress>,
|
|
||||||
/// ACL check contract address. If None, everyone has access to all keys. Useful for tests only.
|
|
||||||
pub acl_check_contract_address: Option<ContractAddress>,
|
|
||||||
/// Cluster configuration.
|
|
||||||
pub cluster_config: ClusterConfiguration,
|
|
||||||
// Allowed CORS domains
|
|
||||||
pub cors: Option<Vec<String>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Key server cluster configuration
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct ClusterConfiguration {
|
|
||||||
/// This node address.
|
|
||||||
pub listener_address: NodeAddress,
|
|
||||||
/// All cluster nodes addresses.
|
|
||||||
pub nodes: BTreeMap<crypto::publickey::Public, NodeAddress>,
|
|
||||||
/// Key Server Set contract address. If None, servers from 'nodes' map are used.
|
|
||||||
pub key_server_set_contract_address: Option<ContractAddress>,
|
|
||||||
/// Allow outbound connections to 'higher' nodes.
|
|
||||||
/// This is useful for tests, but slower a bit for production.
|
|
||||||
pub allow_connecting_to_higher_nodes: bool,
|
|
||||||
/// Administrator public key.
|
|
||||||
pub admin_public: Option<Public>,
|
|
||||||
/// Should key servers set change session should be started when servers set changes.
|
|
||||||
/// This will only work when servers set is configured using KeyServerSet contract.
|
|
||||||
pub auto_migrate_enabled: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Shadow decryption result.
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
|
||||||
pub struct EncryptedDocumentKeyShadow {
|
|
||||||
/// Decrypted secret point. It is partially decrypted if shadow decryption was requested.
|
|
||||||
pub decrypted_secret: crypto::publickey::Public,
|
|
||||||
/// Shared common point.
|
|
||||||
pub common_point: Option<crypto::publickey::Public>,
|
|
||||||
/// If shadow decryption was requested: shadow decryption coefficients, encrypted with requestor public.
|
|
||||||
pub decrypt_shadows: Option<Vec<Vec<u8>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Requester identification data.
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub enum Requester {
|
|
||||||
/// Requested with server key id signature.
|
|
||||||
Signature(crypto::publickey::Signature),
|
|
||||||
/// Requested with public key.
|
|
||||||
Public(crypto::publickey::Public),
|
|
||||||
/// Requested with verified address.
|
|
||||||
Address(ethereum_types::Address),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Requester {
|
|
||||||
fn default() -> Self {
|
|
||||||
Requester::Signature(Default::default())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Requester {
|
|
||||||
pub fn public(&self, server_key_id: &ServerKeyId) -> Result<Public, String> {
|
|
||||||
match *self {
|
|
||||||
Requester::Signature(ref signature) => crypto::publickey::recover(signature, server_key_id)
|
|
||||||
.map_err(|e| format!("bad signature: {}", e)),
|
|
||||||
Requester::Public(ref public) => Ok(public.clone()),
|
|
||||||
Requester::Address(_) => Err("cannot recover public from address".into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn address(&self, server_key_id: &ServerKeyId) -> Result<crypto::publickey::Address, String> {
|
|
||||||
self.public(server_key_id)
|
|
||||||
.map(|p| crypto::publickey::public_to_address(&p))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<crypto::publickey::Signature> for Requester {
|
|
||||||
fn from(signature: crypto::publickey::Signature) -> Requester {
|
|
||||||
Requester::Signature(signature)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<ethereum_types::Public> for Requester {
|
|
||||||
fn from(public: ethereum_types::Public) -> Requester {
|
|
||||||
Requester::Public(public)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<ethereum_types::Address> for Requester {
|
|
||||||
fn from(address: ethereum_types::Address) -> Requester {
|
|
||||||
Requester::Address(address)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,202 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::fmt;
|
|
||||||
use std::net;
|
|
||||||
use std::io::Error as IoError;
|
|
||||||
|
|
||||||
use crypto;
|
|
||||||
|
|
||||||
/// Secret store error.
|
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
|
||||||
pub enum Error {
|
|
||||||
/// Invalid node address has been passed.
|
|
||||||
InvalidNodeAddress,
|
|
||||||
/// Invalid node id has been passed.
|
|
||||||
InvalidNodeId,
|
|
||||||
/// Session with the given id already exists.
|
|
||||||
DuplicateSessionId,
|
|
||||||
/// No active session with given id.
|
|
||||||
NoActiveSessionWithId,
|
|
||||||
/// Invalid threshold value has been passed.
|
|
||||||
/// Threshold value must be in [0; n - 1], where n is a number of nodes participating in the encryption.
|
|
||||||
NotEnoughNodesForThreshold,
|
|
||||||
/// Current state of encryption/decryption session does not allow to proceed request.
|
|
||||||
/// Reschedule this request for later processing.
|
|
||||||
TooEarlyForRequest,
|
|
||||||
/// Current state of encryption/decryption session does not allow to proceed request.
|
|
||||||
/// This means that either there is some comm-failure or node is misbehaving/cheating.
|
|
||||||
InvalidStateForRequest,
|
|
||||||
/// Request cannot be sent/received from this node.
|
|
||||||
InvalidNodeForRequest,
|
|
||||||
/// Message or some data in the message was recognized as invalid.
|
|
||||||
/// This means that node is misbehaving/cheating.
|
|
||||||
InvalidMessage,
|
|
||||||
/// Message version is not supported.
|
|
||||||
InvalidMessageVersion,
|
|
||||||
/// Message is invalid because of replay-attack protection.
|
|
||||||
ReplayProtection,
|
|
||||||
/// Connection to node, required for this session is not established.
|
|
||||||
NodeDisconnected,
|
|
||||||
/// Server key with this ID is already generated.
|
|
||||||
ServerKeyAlreadyGenerated,
|
|
||||||
/// Server key with this ID is not yet generated.
|
|
||||||
ServerKeyIsNotFound,
|
|
||||||
/// Document key with this ID is already stored.
|
|
||||||
DocumentKeyAlreadyStored,
|
|
||||||
/// Document key with this ID is not yet stored.
|
|
||||||
DocumentKeyIsNotFound,
|
|
||||||
/// Consensus is temporary unreachable. Means that something is currently blocking us from either forming
|
|
||||||
/// consensus group (like disconnecting from too many nodes, which are AGREE to participate in consensus)
|
|
||||||
/// or from rejecting request (disconnecting from AccessDenied-nodes).
|
|
||||||
ConsensusTemporaryUnreachable,
|
|
||||||
/// Consensus is unreachable. It doesn't mean that it will ALWAYS remain unreachable, but right NOW we have
|
|
||||||
/// enough nodes confirmed that they do not want to be a part of consensus. Example: we're connected to 10
|
|
||||||
/// of 100 nodes. Key threshold is 6 (i.e. 7 nodes are required for consensus). 4 nodes are responding with
|
|
||||||
/// reject => consensus is considered unreachable, even though another 90 nodes still can respond with OK.
|
|
||||||
ConsensusUnreachable,
|
|
||||||
/// Acl storage error.
|
|
||||||
AccessDenied,
|
|
||||||
/// Can't start session, because exclusive session is active.
|
|
||||||
ExclusiveSessionActive,
|
|
||||||
/// Can't start exclusive session, because there are other active sessions.
|
|
||||||
HasActiveSessions,
|
|
||||||
/// Insufficient requester data.
|
|
||||||
InsufficientRequesterData(String),
|
|
||||||
/// Cryptographic error.
|
|
||||||
EthKey(String),
|
|
||||||
/// I/O error has occurred.
|
|
||||||
Io(String),
|
|
||||||
/// Deserialization error has occurred.
|
|
||||||
Serde(String),
|
|
||||||
/// Hyper error.
|
|
||||||
Hyper(String),
|
|
||||||
/// Database-related error.
|
|
||||||
Database(String),
|
|
||||||
/// Internal error.
|
|
||||||
Internal(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Error {
|
|
||||||
/// Is this a fatal error? Non-fatal means that it is possible to replay the same request with a non-zero
|
|
||||||
/// chance to success. I.e. the error is not about request itself (or current environment factors that
|
|
||||||
/// are affecting request processing), but about current SecretStore state.
|
|
||||||
pub fn is_non_fatal(&self) -> bool {
|
|
||||||
match *self {
|
|
||||||
// non-fatal errors:
|
|
||||||
|
|
||||||
// session start errors => restarting session is a solution
|
|
||||||
Error::DuplicateSessionId | Error::NoActiveSessionWithId |
|
|
||||||
// unexpected message errors => restarting session/excluding node is a solution
|
|
||||||
Error::TooEarlyForRequest | Error::InvalidStateForRequest | Error::InvalidNodeForRequest |
|
|
||||||
// invalid message errors => restarting/updating/excluding node is a solution
|
|
||||||
Error::InvalidMessage | Error::InvalidMessageVersion | Error::ReplayProtection |
|
|
||||||
// connectivity problems => waiting for reconnect && restarting session is a solution
|
|
||||||
Error::NodeDisconnected |
|
|
||||||
// temporary (?) consensus problems, related to other non-fatal errors => restarting is probably (!) a solution
|
|
||||||
Error::ConsensusTemporaryUnreachable |
|
|
||||||
// exclusive session errors => waiting && restarting is a solution
|
|
||||||
Error::ExclusiveSessionActive | Error::HasActiveSessions => true,
|
|
||||||
|
|
||||||
// fatal errors:
|
|
||||||
|
|
||||||
// config-related errors
|
|
||||||
Error::InvalidNodeAddress | Error::InvalidNodeId |
|
|
||||||
// wrong session input params errors
|
|
||||||
Error::NotEnoughNodesForThreshold | Error::ServerKeyAlreadyGenerated | Error::ServerKeyIsNotFound |
|
|
||||||
Error::DocumentKeyAlreadyStored | Error::DocumentKeyIsNotFound | Error::InsufficientRequesterData(_) |
|
|
||||||
// access denied/consensus error
|
|
||||||
Error::AccessDenied | Error::ConsensusUnreachable |
|
|
||||||
// indeterminate internal errors, which could be either fatal (db failure, invalid request), or not (network error),
|
|
||||||
// but we still consider these errors as fatal
|
|
||||||
Error::EthKey(_) | Error::Serde(_) | Error::Hyper(_) | Error::Database(_) | Error::Internal(_) | Error::Io(_) => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for Error {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
|
||||||
match *self {
|
|
||||||
Error::InvalidNodeAddress => write!(f, "invalid node address has been passed"),
|
|
||||||
Error::InvalidNodeId => write!(f, "invalid node id has been passed"),
|
|
||||||
Error::DuplicateSessionId => write!(f, "session with the same id is already registered"),
|
|
||||||
Error::NoActiveSessionWithId => write!(f, "no active session with given id"),
|
|
||||||
Error::NotEnoughNodesForThreshold => write!(f, "not enough nodes for passed threshold"),
|
|
||||||
Error::TooEarlyForRequest => write!(f, "session is not yet ready to process this request"),
|
|
||||||
Error::InvalidStateForRequest => write!(f, "session is in invalid state for processing this request"),
|
|
||||||
Error::InvalidNodeForRequest => write!(f, "invalid node for this request"),
|
|
||||||
Error::InvalidMessage => write!(f, "invalid message is received"),
|
|
||||||
Error::InvalidMessageVersion => write!(f, "unsupported message is received"),
|
|
||||||
Error::ReplayProtection => write!(f, "replay message is received"),
|
|
||||||
Error::NodeDisconnected => write!(f, "node required for this operation is currently disconnected"),
|
|
||||||
Error::ServerKeyAlreadyGenerated => write!(f, "Server key with this ID is already generated"),
|
|
||||||
Error::ServerKeyIsNotFound => write!(f, "Server key with this ID is not found"),
|
|
||||||
Error::DocumentKeyAlreadyStored => write!(f, "Document key with this ID is already stored"),
|
|
||||||
Error::DocumentKeyIsNotFound => write!(f, "Document key with this ID is not found"),
|
|
||||||
Error::ConsensusUnreachable => write!(f, "Consensus unreachable"),
|
|
||||||
Error::ConsensusTemporaryUnreachable => write!(f, "Consensus temporary unreachable"),
|
|
||||||
Error::AccessDenied => write!(f, "Access denied"),
|
|
||||||
Error::ExclusiveSessionActive => write!(f, "Exclusive session active"),
|
|
||||||
Error::HasActiveSessions => write!(f, "Unable to start exclusive session"),
|
|
||||||
Error::InsufficientRequesterData(ref e) => write!(f, "Insufficient requester data: {}", e),
|
|
||||||
Error::EthKey(ref e) => write!(f, "cryptographic error {}", e),
|
|
||||||
Error::Hyper(ref msg) => write!(f, "Hyper error: {}", msg),
|
|
||||||
Error::Serde(ref msg) => write!(f, "Serialization error: {}", msg),
|
|
||||||
Error::Database(ref msg) => write!(f, "Database error: {}", msg),
|
|
||||||
Error::Internal(ref msg) => write!(f, "Internal error: {}", msg),
|
|
||||||
Error::Io(ref msg) => write!(f, "IO error: {}", msg),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<crypto::publickey::Error> for Error {
|
|
||||||
fn from(err: crypto::publickey::Error) -> Self {
|
|
||||||
Error::EthKey(err.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<crypto::Error> for Error {
|
|
||||||
fn from(err: crypto::Error) -> Self {
|
|
||||||
Error::EthKey(err.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<IoError> for Error {
|
|
||||||
fn from(err: IoError) -> Self {
|
|
||||||
Error::Io(err.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Into<String> for Error {
|
|
||||||
fn into(self) -> String {
|
|
||||||
format!("{}", self)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<net::AddrParseError> for Error {
|
|
||||||
fn from(err: net::AddrParseError) -> Error {
|
|
||||||
Error::Internal(err.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<secp256k1::Error> for Error {
|
|
||||||
fn from(e: secp256k1::Error) -> Self {
|
|
||||||
match e {
|
|
||||||
secp256k1::Error::InvalidSecretKey => Error::EthKey("Invalid SecretKey".into()),
|
|
||||||
_ => Error::EthKey(format!("Crypto error: {}", e).into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,23 +0,0 @@
|
|||||||
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
||||||
// This file is part of Parity Ethereum.
|
|
||||||
|
|
||||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
//! Types used in the public api
|
|
||||||
|
|
||||||
mod all;
|
|
||||||
mod error;
|
|
||||||
|
|
||||||
pub use self::all::*;
|
|
||||||
pub use self::error::*;
|
|
Loading…
Reference in New Issue
Block a user