openethereum/secret-store/src/key_server_set.rs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

982 lines
36 KiB
Rust
Raw Normal View History

// Copyright 2015-2019 Parity Technologies (UK) Ltd.
// This file is part of Parity Ethereum.
// Parity Ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use bytes::Bytes;
use call_contract::CallContract;
use ethabi::FunctionOutputDecoder;
use ethcore::client::{BlockChainClient, BlockId, ChainNotify, Client, NewBlocks};
use ethereum_types::{Address, H256};
use ethkey::public_to_address;
use parking_lot::Mutex;
use std::{
collections::{BTreeMap, HashSet},
net::SocketAddr,
sync::Arc,
};
2017-12-20 14:02:21 +01:00
use trusted_client::TrustedClient;
use types::{Error, NodeAddress, NodeId, Public};
use ContractAddress;
use NodeKeyPair;
use_contract!(key_server, "res/key_server_set.json");
/// Name of KeyServerSet contract in registry.
const KEY_SERVER_SET_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_server_set";
/// Number of blocks (since latest new_set change) required before actually starting migration.
const MIGRATION_CONFIRMATIONS_REQUIRED: u64 = 5;
/// Number of blocks before the same-migration transaction (be it start or confirmation) will be retried.
const TRANSACTION_RETRY_INTERVAL_BLOCKS: u64 = 30;
#[derive(Default, Debug, Clone, PartialEq)]
/// Key Server Set state.
pub struct KeyServerSetSnapshot {
/// Current set of key servers.
pub current_set: BTreeMap<NodeId, SocketAddr>,
/// New set of key servers.
pub new_set: BTreeMap<NodeId, SocketAddr>,
/// Current migration data.
pub migration: Option<KeyServerSetMigration>,
}
#[derive(Default, Debug, Clone, PartialEq)]
/// Key server set migration.
pub struct KeyServerSetMigration {
/// Migration id.
pub id: H256,
/// Migration set of key servers. It is the new_set at the moment of migration start.
pub set: BTreeMap<NodeId, SocketAddr>,
/// Master node of the migration process.
pub master: NodeId,
/// Is migration confirmed by this node?
pub is_confirmed: bool,
}
/// Key Server Set
pub trait KeyServerSet: Send + Sync {
/// Is this node currently isolated from the set?
fn is_isolated(&self) -> bool;
/// Get server set state.
fn snapshot(&self) -> KeyServerSetSnapshot;
/// Start migration.
fn start_migration(&self, migration_id: H256);
/// Confirm migration.
fn confirm_migration(&self, migration_id: H256);
}
/// On-chain Key Server set implementation.
pub struct OnChainKeyServerSet {
/// Cached on-chain contract.
contract: Mutex<CachedContract>,
}
#[derive(Default, Debug, Clone, PartialEq)]
/// Non-finalized new_set.
struct FutureNewSet {
/// New servers set.
pub new_set: BTreeMap<NodeId, SocketAddr>,
/// Hash of block, when this set has appeared for first time.
pub block: H256,
}
#[derive(Default, Debug, Clone, PartialEq)]
/// Migration-related transaction information.
struct PreviousMigrationTransaction {
/// Migration id.
pub migration_id: H256,
/// Latest actual block number at the time this transaction has been sent.
pub block: u64,
}
/// Cached on-chain Key Server set contract.
struct CachedContract {
/// Blockchain client.
2017-12-20 14:02:21 +01:00
client: TrustedClient,
/// Contract address source.
contract_address_source: Option<ContractAddress>,
/// Current contract address.
contract_address: Option<Address>,
/// Is auto-migrate enabled?
auto_migrate_enabled: bool,
/// Current contract state.
snapshot: KeyServerSetSnapshot,
/// Scheduled contract state (if any).
future_new_set: Option<FutureNewSet>,
/// Previous start migration transaction.
start_migration_tx: Option<PreviousMigrationTransaction>,
/// Previous confirm migration transaction.
confirm_migration_tx: Option<PreviousMigrationTransaction>,
/// This node key pair.
self_key_pair: Arc<NodeKeyPair>,
}
impl OnChainKeyServerSet {
pub fn new(
trusted_client: TrustedClient,
contract_address_source: Option<ContractAddress>,
self_key_pair: Arc<NodeKeyPair>,
auto_migrate_enabled: bool,
key_servers: BTreeMap<Public, NodeAddress>,
) -> Result<Arc<Self>, Error> {
2017-12-20 14:02:21 +01:00
let client = trusted_client.get_untrusted();
let key_server_set = Arc::new(OnChainKeyServerSet {
contract: Mutex::new(CachedContract::new(
trusted_client,
contract_address_source,
self_key_pair,
auto_migrate_enabled,
key_servers,
)?),
});
2017-12-20 14:02:21 +01:00
client
.ok_or_else(|| {
Error::Internal("Constructing OnChainKeyServerSet without active Client".into())
})?
2017-12-20 14:02:21 +01:00
.add_notify(key_server_set.clone());
Ok(key_server_set)
}
}
impl KeyServerSet for OnChainKeyServerSet {
fn is_isolated(&self) -> bool {
self.contract.lock().is_isolated()
}
fn snapshot(&self) -> KeyServerSetSnapshot {
self.contract.lock().snapshot()
}
fn start_migration(&self, migration_id: H256) {
self.contract.lock().start_migration(migration_id)
}
fn confirm_migration(&self, migration_id: H256) {
self.contract.lock().confirm_migration(migration_id);
}
}
impl ChainNotify for OnChainKeyServerSet {
fn new_blocks(&self, new_blocks: NewBlocks) {
if new_blocks.has_more_blocks_to_import {
return;
}
let (enacted, retracted) = new_blocks.route.into_enacted_retracted();
2020-08-05 06:08:03 +02:00
if !enacted.is_empty() || !retracted.is_empty() {
self.contract.lock().update(enacted, retracted)
}
}
}
trait KeyServerSubset<F: Fn(Vec<u8>) -> Result<Vec<u8>, String>> {
fn read_list(&self, f: &F) -> Result<Vec<Address>, String>;
fn read_public(&self, address: Address, f: &F) -> Result<Bytes, String>;
fn read_address(&self, address: Address, f: &F) -> Result<String, String>;
}
struct CurrentKeyServerSubset;
impl<F: Fn(Vec<u8>) -> Result<Vec<u8>, String>> KeyServerSubset<F> for CurrentKeyServerSubset {
fn read_list(&self, f: &F) -> Result<Vec<Address>, String> {
let (encoded, decoder) = key_server::functions::get_current_key_servers::call();
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
}
2020-08-05 06:08:03 +02:00
fn read_public(&self, address: Address, f: &F) -> Result<Bytes, String> {
let (encoded, decoder) =
key_server::functions::get_current_key_server_public::call(address);
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
}
2020-08-05 06:08:03 +02:00
fn read_address(&self, address: Address, f: &F) -> Result<String, String> {
let (encoded, decoder) =
key_server::functions::get_current_key_server_address::call(address);
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
}
}
struct MigrationKeyServerSubset;
impl<F: Fn(Vec<u8>) -> Result<Vec<u8>, String>> KeyServerSubset<F> for MigrationKeyServerSubset {
fn read_list(&self, f: &F) -> Result<Vec<Address>, String> {
let (encoded, decoder) = key_server::functions::get_migration_key_servers::call();
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
}
2020-08-05 06:08:03 +02:00
fn read_public(&self, address: Address, f: &F) -> Result<Bytes, String> {
let (encoded, decoder) =
key_server::functions::get_migration_key_server_public::call(address);
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
}
2020-08-05 06:08:03 +02:00
fn read_address(&self, address: Address, f: &F) -> Result<String, String> {
let (encoded, decoder) =
key_server::functions::get_migration_key_server_address::call(address);
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
}
}
struct NewKeyServerSubset;
impl<F: Fn(Vec<u8>) -> Result<Vec<u8>, String>> KeyServerSubset<F> for NewKeyServerSubset {
fn read_list(&self, f: &F) -> Result<Vec<Address>, String> {
let (encoded, decoder) = key_server::functions::get_new_key_servers::call();
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
}
2020-08-05 06:08:03 +02:00
fn read_public(&self, address: Address, f: &F) -> Result<Bytes, String> {
let (encoded, decoder) = key_server::functions::get_new_key_server_public::call(address);
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
}
2020-08-05 06:08:03 +02:00
fn read_address(&self, address: Address, f: &F) -> Result<String, String> {
let (encoded, decoder) = key_server::functions::get_new_key_server_address::call(address);
decoder.decode(&f(encoded)?).map_err(|e| e.to_string())
}
}
impl CachedContract {
pub fn new(
client: TrustedClient,
contract_address_source: Option<ContractAddress>,
self_key_pair: Arc<NodeKeyPair>,
auto_migrate_enabled: bool,
key_servers: BTreeMap<Public, NodeAddress>,
) -> Result<Self, Error> {
let server_set = match contract_address_source.is_none() {
true => key_servers
.into_iter()
.map(|(p, addr)| {
let addr =
format!("{}:{}", addr.address, addr.port)
.parse()
.map_err(|err| {
Error::Internal(format!("error parsing node address: {}", err))
})?;
Ok((p, addr))
})
.collect::<Result<BTreeMap<_, _>, Error>>()?,
false => Default::default(),
};
2020-08-05 06:08:03 +02:00
let mut contract = CachedContract {
2017-12-20 14:02:21 +01:00
client: client,
contract_address_source: contract_address_source,
contract_address: None,
auto_migrate_enabled: auto_migrate_enabled,
future_new_set: None,
confirm_migration_tx: None,
start_migration_tx: None,
snapshot: KeyServerSetSnapshot {
current_set: server_set.clone(),
new_set: server_set,
..Default::default()
},
self_key_pair: self_key_pair,
};
contract.update_contract_address();
2020-08-05 06:08:03 +02:00
Ok(contract)
}
2020-08-05 06:08:03 +02:00
pub fn update_contract_address(&mut self) {
if let Some(ref contract_address_source) = self.contract_address_source {
let contract_address = self.client.read_contract_address(
KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.into(),
contract_address_source,
);
if contract_address != self.contract_address {
trace!(target: "secretstore", "{}: Configuring for key server set contract from address {:?}",
self.self_key_pair.public(), contract_address);
self.contract_address = contract_address;
}
2020-08-05 06:08:03 +02:00
}
}
2020-08-05 06:08:03 +02:00
pub fn update(&mut self, enacted: Vec<H256>, retracted: Vec<H256>) {
// no need to update when servers set is hardcoded
if self.contract_address_source.is_none() {
return;
}
2020-08-05 06:08:03 +02:00
if let Some(client) = self.client.get() {
// read new snapshot from reqistry (if something has chnaged)
if !enacted.is_empty() || !retracted.is_empty() {
self.update_contract_address();
self.read_from_registry(&*client);
}
2020-08-05 06:08:03 +02:00
// update number of confirmations (if there's future new set)
self.update_number_of_confirmations_if_required(&*client);
2020-08-05 06:08:03 +02:00
}
2017-12-20 14:02:21 +01:00
}
2020-08-05 06:08:03 +02:00
fn is_isolated(&self) -> bool {
!self
.snapshot
.current_set
.contains_key(self.self_key_pair.public())
}
2020-08-05 06:08:03 +02:00
fn snapshot(&self) -> KeyServerSetSnapshot {
self.snapshot.clone()
}
2020-08-05 06:08:03 +02:00
fn start_migration(&mut self, migration_id: H256) {
// trust is not needed here, because it is the reaction to the read of the trusted client
`Client` refactoring (#7038) * Improves `BestBlock` comment * Improves `TraceDB` comment * Improves `journaldb::Algorithm` comment. Probably the whole enum should be renamed to `Strategy` or something alike. * Comments some of the `Client`'s fields * Deglobs client imports * Fixes comments * Extracts `import_lock` to `Importer` struct * Extracts `verifier` to `Importer` struct * Extracts `block_queue` to `Importer` struct * Extracts `miner` to `Importer` struct * Extracts `ancient_verifier` to `Importer` struct * Extracts `rng` to `Importer` struct * Extracts `import_old_block` to `Importer` struct * Adds `Nonce` trait * Adds `Balance` trait * Adds `ChainInfo` trait * Fixes imports for tests using `chain_info` method * Adds `BlockInfo` trait * Adds more `ChainInfo` imports * Adds `BlockInfo` imports * Adds `ReopenBlock` trait * Adds `PrepareOpenBlock` trait * Fixes import in tests * Adds `CallContract` trait * Fixes imports in tests using `call_contract` method * Adds `TransactionInfo` trait * Adds `RegistryInfo` trait * Fixes imports in tests using `registry_address` method * Adds `ScheduleInfo` trait * Adds `ImportSealedBlock` trait * Fixes imports in test using `import_sealed_block` method * Adds `BroadcastProposalBlock` trait * Migrates `Miner` to static dispatch * Fixes tests * Moves `calculate_enacted_retracted` to `Importer` * Moves import-related methods to `Importer` * Removes redundant `import_old_block` wrapper * Extracts `import_block*` into separate trait * Fixes tests * Handles `Pending` in `LightFetch` * Handles `Pending` in filters * Handles `Pending` in `ParityClient` * Handles `Pending` in `EthClient` * Removes `BlockId::Pending`, partly refactors dependent code * Adds `StateInfo` trait * Exports `StateOrBlock` and `BlockChain` types from `client` module * Refactors `balance` RPC using generic API * Refactors `storage_at` RPC using generic API * Makes `MinerService::pending_state`'s return type dynamic * Adds `StateOrBlock` and `BlockChain` types * Adds impl of `client::BlockChain` for `Client` * Exports `StateInfo` trait from `client` module * Missing `self` use To be fixed up to "Adds impl of `client::BlockChain` for `Client`" * Adds `number_to_id` and refactors dependent RPC methods * Refactors `code_at` using generic API * Adds `StateClient` trait * Refactors RPC to use `StateClient` trait * Reverts `client::BlockChain` trait stuff, refactors methods to accept `StateOrBlock` * Refactors TestClient * Adds helper function `block_number_to_id` * Uses `block_number_to_id` instead of local function * Handles `Pending` in `list_accounts` and `list_storage_keys` * Attempt to use associated types for state instead of trait objects * Simplifies `state_at_beginning` * Extracts `call` and `call_many` into separate trait * Refactors `build_last_hashes` to accept reference * Exports `Call` type from the module * Refactors `call` and `call_many` to accept state and header * Exports `state_at` in `StateClient` * Exports `pending_block_header` from `MinerService` * Refactors RPC `call` method using new API * Adds missing parentheses * Refactors `parity::call` to use new call API * Update .gitlab-ci.yml fix gitlab lint * Fixes error handling * Refactors `traces::call` and `call_many` to use new call API * Refactors `call_contract` * Refactors `block_header` * Refactors internal RPC method `block` * Moves `estimate_gas` to `Call` trait, refactors parameters * Refactors `estimate_gas` in RPC * Refactors `uncle` * Refactors RPC `transaction` * Covers missing branches * Makes it all compile, fixes compiler grumbles * Adds casts in `blockchain` module * Fixes `PendingBlock` tests, work on `MinerService` * Adds test stubs for StateClient and EngineInfo * Makes `state_db` public * Adds missing impls for `TestBlockChainClient` * Adds trait documentation * Adds missing docs to the `state_db` module * Fixes trivial compilation errors * Moves `code_hash` method to a `BlockInfo` trait * Refactors `Verifier` to be generic over client * Refactors `TransactionFilter` to be generic over client * Refactors `Miner` and `Client` to reflect changes in verifier and txfilter API * Moves `ServiceTransactionChecker` back to `ethcore` * Fixes trait bounds in `Miner` API * Fixes `Client` * Fixes lifetime bound in `FullFamilyParams` * Adds comments to `FullFamilyParams` * Fixes imports in `ethcore` * Fixes BlockNumber handling in `code_at` and `replay_block_transactions` * fix compile issues * First step to redundant trait merge * Fixes compilation error in RPC tests * Adds mock `State` as a stub for `TestClient` * Handles `StateOrBlock::State` in `TestBlockChainClient::balance` * Fixes `transaction_count` RPC * Fixes `transaction_count` * Moves `service_transaction.json` to the `contracts` subfolder * Fixes compilation errors in tests * Refactors client to use `AccountData` * Refactors client to use `BlockChain` * Refactors miner to use aggregate traits * Adds `SealedBlockImporter` trait * Refactors miner to use `SealedBlockImporter` trait * Removes unused imports * Simplifies `RegistryInfo::registry_address` * Fixes indentation * Removes commented out trait bound
2018-03-03 18:42:13 +01:00
if let (Some(client), Some(contract_address)) =
(self.client.get_untrusted(), self.contract_address.as_ref())
{
// check if we need to send start migration transaction
if !update_last_transaction_block(&*client, &migration_id, &mut self.start_migration_tx)
{
return;
}
2020-08-05 06:08:03 +02:00
// prepare transaction data
let transaction_data =
key_server::functions::start_migration::encode_input(migration_id);
2020-08-05 06:08:03 +02:00
// send transaction
match self
.client
.transact_contract(*contract_address, transaction_data)
{
Ok(_) => {
trace!(target: "secretstore_net", "{}: sent auto-migration start transaction",
self.self_key_pair.public())
2020-08-05 06:08:03 +02:00
}
Err(error) => {
warn!(target: "secretstore_net", "{}: failed to submit auto-migration start transaction: {}",
self.self_key_pair.public(), error)
}
}
}
2020-08-05 06:08:03 +02:00
}
fn confirm_migration(&mut self, migration_id: H256) {
// trust is not needed here, because we have already completed the action
if let (Some(client), Some(contract_address)) = (self.client.get(), self.contract_address) {
// check if we need to send start migration transaction
if !update_last_transaction_block(
&*client,
&migration_id,
&mut self.confirm_migration_tx,
) {
return;
}
2020-08-05 06:08:03 +02:00
// prepare transaction data
let transaction_data =
key_server::functions::confirm_migration::encode_input(migration_id);
2020-08-05 06:08:03 +02:00
// send transaction
match self
.client
.transact_contract(contract_address, transaction_data)
{
Ok(_) => {
trace!(target: "secretstore_net", "{}: sent auto-migration confirm transaction",
self.self_key_pair.public())
2020-08-05 06:08:03 +02:00
}
Err(error) => {
warn!(target: "secretstore_net", "{}: failed to submit auto-migration confirmation transaction: {}",
self.self_key_pair.public(), error)
}
2020-08-05 06:08:03 +02:00
}
}
}
fn read_from_registry(&mut self, client: &Client) {
let contract_address = match self.contract_address {
Some(contract_address) => contract_address,
None => {
// no contract installed => empty snapshot
// WARNING: after restart current_set will be reset to the set from configuration file
2018-05-16 20:09:59 +02:00
// even though we have reset to empty set here. We are not considering this as an issue
// because it is actually the issue of administrator.
self.snapshot = Default::default();
self.future_new_set = None;
return;
}
};
2020-08-05 06:08:03 +02:00
let do_call = |data| client.call_contract(BlockId::Latest, contract_address, data);
2020-08-05 06:08:03 +02:00
let current_set = Self::read_key_server_set(CurrentKeyServerSubset, &do_call);
2020-08-05 06:08:03 +02:00
// read migration-related data if auto migration is enabled
let (new_set, migration) = match self.auto_migrate_enabled {
true => {
let new_set = Self::read_key_server_set(NewKeyServerSubset, &do_call);
let migration_set = Self::read_key_server_set(MigrationKeyServerSubset, &do_call);
2020-08-05 06:08:03 +02:00
let migration_id = match migration_set.is_empty() {
false => {
let (encoded, decoder) = key_server::functions::get_migration_id::call();
do_call(encoded)
.map_err(|e| e.to_string())
.and_then(|data| decoder.decode(&data).map_err(|e| e.to_string()))
.map_err(|err| { trace!(target: "secretstore", "Error {} reading migration id from contract", err); err })
.ok()
}
true => None,
};
2020-08-05 06:08:03 +02:00
let migration_master = match migration_set.is_empty() {
false => {
let (encoded, decoder) =
key_server::functions::get_migration_master::call();
do_call(encoded)
.map_err(|e| e.to_string())
.and_then(|data| decoder.decode(&data).map_err(|e| e.to_string()))
.map_err(|err| { trace!(target: "secretstore", "Error {} reading migration master from contract", err); err })
.ok()
.and_then(|address| current_set.keys().chain(migration_set.keys())
.find(|public| public_to_address(public) == address)
.cloned())
}
true => None,
};
2020-08-05 06:08:03 +02:00
let is_migration_confirmed = match migration_set.is_empty() {
false
if current_set.contains_key(self.self_key_pair.public())
|| migration_set.contains_key(self.self_key_pair.public()) =>
{
let (encoded, decoder) =
key_server::functions::is_migration_confirmed::call(
self.self_key_pair.address(),
);
do_call(encoded)
.map_err(|e| e.to_string())
.and_then(|data| decoder.decode(&data).map_err(|e| e.to_string()))
.map_err(|err| { trace!(target: "secretstore", "Error {} reading migration confirmation from contract", err); err })
.ok()
}
_ => None,
};
2020-08-05 06:08:03 +02:00
let migration = match (
migration_set.is_empty(),
migration_id,
migration_master,
is_migration_confirmed,
) {
(
false,
Some(migration_id),
Some(migration_master),
Some(is_migration_confirmed),
) => Some(KeyServerSetMigration {
id: migration_id,
master: migration_master,
set: migration_set,
is_confirmed: is_migration_confirmed,
}),
_ => None,
};
2020-08-05 06:08:03 +02:00
(new_set, migration)
}
false => (current_set.clone(), None),
};
2020-08-05 06:08:03 +02:00
let mut new_snapshot = KeyServerSetSnapshot {
current_set: current_set,
new_set: new_set,
migration: migration,
};
2020-08-05 06:08:03 +02:00
// we might want to adjust new_set if auto migration is enabled
if self.auto_migrate_enabled {
let block = client.block_hash(BlockId::Latest).unwrap_or_default();
update_future_set(&mut self.future_new_set, &mut new_snapshot, block);
}
2020-08-05 06:08:03 +02:00
self.snapshot = new_snapshot;
}
2020-08-05 06:08:03 +02:00
fn read_key_server_set<T, F>(subset: T, do_call: F) -> BTreeMap<Public, SocketAddr>
where
T: KeyServerSubset<F>,
F: Fn(Vec<u8>) -> Result<Vec<u8>, String>,
{
let mut key_servers = BTreeMap::new();
let mut key_servers_addresses = HashSet::new();
let key_servers_list = subset.read_list(&do_call)
.map_err(|err| { warn!(target: "secretstore_net", "error {} reading list of key servers from contract", err); err })
.unwrap_or_default();
for key_server in key_servers_list {
let key_server_public = subset.read_public(key_server, &do_call).and_then(|p| {
if p.len() == 64 {
Ok(Public::from_slice(&p))
} else {
Err(format!("Invalid public length {}", p.len()))
2020-08-05 06:08:03 +02:00
}
});
let key_server_address: Result<SocketAddr, _> = subset
.read_address(key_server, &do_call)
.and_then(|a| a.parse().map_err(|e| format!("Invalid ip address: {}", e)));
2020-08-05 06:08:03 +02:00
// only add successfully parsed nodes
match (key_server_public, key_server_address) {
(Ok(key_server_public), Ok(key_server_address)) => {
if !key_servers_addresses.insert(key_server_address.clone()) {
warn!(target: "secretstore_net", "the same address ({}) specified twice in list of contracts. Ignoring server {}",
key_server_address, key_server_public);
continue;
}
2020-08-05 06:08:03 +02:00
key_servers.insert(key_server_public, key_server_address);
}
(Err(public_err), _) => {
warn!(target: "secretstore_net", "received invalid public from key server set contract: {}", public_err)
2020-08-05 06:08:03 +02:00
}
(_, Err(ip_err)) => {
warn!(target: "secretstore_net", "received invalid IP from key server set contract: {}", ip_err)
}
2020-08-05 06:08:03 +02:00
}
}
key_servers
}
2020-08-05 06:08:03 +02:00
fn update_number_of_confirmations_if_required(&mut self, client: &BlockChainClient) {
if !self.auto_migrate_enabled {
return;
}
2020-08-05 06:08:03 +02:00
update_number_of_confirmations(
&|| latest_block_hash(&*client),
&|block| block_confirmations(&*client, block),
&mut self.future_new_set,
&mut self.snapshot,
);
}
}
/// Check if two sets are equal (in terms of migration requirements). We do not need migration if only
/// addresses are changed - simply adjusting connections is enough in this case.
pub fn is_migration_required(
current_set: &BTreeMap<NodeId, SocketAddr>,
new_set: &BTreeMap<NodeId, SocketAddr>,
) -> bool {
let no_nodes_removed = current_set.keys().all(|n| new_set.contains_key(n));
let no_nodes_added = new_set.keys().all(|n| current_set.contains_key(n));
!no_nodes_removed || !no_nodes_added
}
fn update_future_set(
future_new_set: &mut Option<FutureNewSet>,
new_snapshot: &mut KeyServerSetSnapshot,
block: H256,
) {
// migration has already started => no need to delay visibility
if new_snapshot.migration.is_some() {
*future_new_set = None;
return;
}
2020-08-05 06:08:03 +02:00
// no migration is required => no need to delay visibility
if !is_migration_required(&new_snapshot.current_set, &new_snapshot.new_set) {
*future_new_set = None;
return;
}
2020-08-05 06:08:03 +02:00
// when auto-migrate is enabled, we do not want to start migration right after new_set is changed, because of:
// 1) there could be a fork && we could start migration to forked version (and potentially lose secrets)
// 2) there must be some period for new_set changes finalization (i.e. adding/removing more servers)
let mut new_set = new_snapshot.current_set.clone();
::std::mem::swap(&mut new_set, &mut new_snapshot.new_set);
2020-08-05 06:08:03 +02:00
// if nothing has changed in future_new_set, then we want to preserve previous block hash
let block = match Some(&new_set) == future_new_set.as_ref().map(|f| &f.new_set) {
true => future_new_set
.as_ref()
.map(|f| &f.block)
.cloned()
.unwrap_or_else(|| block),
false => block,
};
2020-08-05 06:08:03 +02:00
*future_new_set = Some(FutureNewSet {
new_set: new_set,
block: block,
});
}
fn update_number_of_confirmations<F1: Fn() -> H256, F2: Fn(H256) -> Option<u64>>(
latest_block: &F1,
confirmations: &F2,
future_new_set: &mut Option<FutureNewSet>,
snapshot: &mut KeyServerSetSnapshot,
) {
match future_new_set.as_mut() {
// no future new set is scheduled => do nothing,
None => return,
// else we should calculate number of confirmations for future new set
Some(future_new_set) => match confirmations(future_new_set.block.clone()) {
// we have enough confirmations => should move new_set from future to snapshot
Some(confirmations) if confirmations >= MIGRATION_CONFIRMATIONS_REQUIRED => (),
// not enough confirmations => do nothing
Some(_) => return,
// if number of confirmations is None, then reorg has happened && we need to reset block
2018-05-16 20:09:59 +02:00
// (some more intelligent strategy is possible, but let's stick to simplest one)
None => {
future_new_set.block = latest_block();
return;
}
2020-08-05 06:08:03 +02:00
},
}
2020-08-05 06:08:03 +02:00
let future_new_set = future_new_set
.take()
.expect("we only pass through match above when future_new_set is some; qed");
snapshot.new_set = future_new_set.new_set;
}
fn update_last_transaction_block(
client: &Client,
migration_id: &H256,
previous_transaction: &mut Option<PreviousMigrationTransaction>,
) -> bool {
let last_block = client.block_number(BlockId::Latest).unwrap_or_default();
match previous_transaction.as_ref() {
2018-05-16 20:09:59 +02:00
// no previous transaction => send immediately
None => (),
2018-05-16 20:09:59 +02:00
// previous transaction has been sent for other migration process => send immediately
Some(tx) if tx.migration_id != *migration_id => (),
// if we have sent the same type of transaction recently => do nothing (hope it will be mined eventually)
// if we have sent the same transaction some time ago =>
// assume that our tx queue was full
// or we didn't have enough eth fot this tx
// or the transaction has been removed from the queue (and never reached any miner node)
// if we have restarted after sending tx => assume we have never sent it
Some(tx) => {
if tx.block > last_block || last_block - tx.block < TRANSACTION_RETRY_INTERVAL_BLOCKS {
return false;
}
}
2020-08-05 06:08:03 +02:00
}
*previous_transaction = Some(PreviousMigrationTransaction {
migration_id: migration_id.clone(),
block: last_block,
});
2020-08-05 06:08:03 +02:00
true
}
fn latest_block_hash(client: &BlockChainClient) -> H256 {
client.block_hash(BlockId::Latest).unwrap_or_default()
}
fn block_confirmations(client: &BlockChainClient, block: H256) -> Option<u64> {
client
.block_number(BlockId::Hash(block))
.and_then(|block| {
client
.block_number(BlockId::Latest)
.map(|last_block| (block, last_block))
2020-08-05 06:08:03 +02:00
})
.map(|(block, last_block)| last_block - block)
}
#[cfg(test)]
pub mod tests {
use super::{
update_future_set, update_number_of_confirmations, FutureNewSet, KeyServerSet,
KeyServerSetSnapshot, MIGRATION_CONFIRMATIONS_REQUIRED,
2020-08-05 06:08:03 +02:00
};
use ethereum_types::H256;
use ethkey::Public;
use std::{collections::BTreeMap, net::SocketAddr};
2020-08-05 06:08:03 +02:00
#[derive(Default)]
pub struct MapKeyServerSet {
is_isolated: bool,
nodes: BTreeMap<Public, SocketAddr>,
}
2020-08-05 06:08:03 +02:00
impl MapKeyServerSet {
pub fn new(is_isolated: bool, nodes: BTreeMap<Public, SocketAddr>) -> Self {
MapKeyServerSet {
is_isolated: is_isolated,
nodes: nodes,
2020-08-05 06:08:03 +02:00
}
}
}
2020-08-05 06:08:03 +02:00
impl KeyServerSet for MapKeyServerSet {
fn is_isolated(&self) -> bool {
self.is_isolated
}
2020-08-05 06:08:03 +02:00
fn snapshot(&self) -> KeyServerSetSnapshot {
KeyServerSetSnapshot {
current_set: self.nodes.clone(),
new_set: self.nodes.clone(),
..Default::default()
2020-08-05 06:08:03 +02:00
}
}
2020-08-05 06:08:03 +02:00
fn start_migration(&self, _migration_id: H256) {
unimplemented!("test-only")
}
2020-08-05 06:08:03 +02:00
fn confirm_migration(&self, _migration_id: H256) {
unimplemented!("test-only")
2020-08-05 06:08:03 +02:00
}
}
2020-08-05 06:08:03 +02:00
#[test]
fn future_set_is_updated_to_none_when_migration_has_already_started() {
let mut future_new_set = Some(Default::default());
let mut new_snapshot = KeyServerSetSnapshot {
migration: Some(Default::default()),
..Default::default()
};
let new_snapshot_copy = new_snapshot.clone();
update_future_set(&mut future_new_set, &mut new_snapshot, Default::default());
assert_eq!(future_new_set, None);
assert_eq!(new_snapshot, new_snapshot_copy);
}
2020-08-05 06:08:03 +02:00
#[test]
fn future_set_is_updated_to_none_when_no_migration_is_required() {
let node_id = Default::default();
let address1 = "127.0.0.1:12000".parse().unwrap();
let address2 = "127.0.0.1:12001".parse().unwrap();
2020-08-05 06:08:03 +02:00
// addresses are different, but node set is the same => no migration is required
let mut future_new_set = Some(Default::default());
let mut new_snapshot = KeyServerSetSnapshot {
current_set: vec![(node_id, address1)].into_iter().collect(),
new_set: vec![(node_id, address2)].into_iter().collect(),
..Default::default()
};
let new_snapshot_copy = new_snapshot.clone();
update_future_set(&mut future_new_set, &mut new_snapshot, Default::default());
assert_eq!(future_new_set, None);
assert_eq!(new_snapshot, new_snapshot_copy);
2020-08-05 06:08:03 +02:00
// everything is the same => no migration is required
let mut future_new_set = Some(Default::default());
let mut new_snapshot = KeyServerSetSnapshot {
current_set: vec![(node_id, address1)].into_iter().collect(),
new_set: vec![(node_id, address1)].into_iter().collect(),
..Default::default()
};
let new_snapshot_copy = new_snapshot.clone();
update_future_set(&mut future_new_set, &mut new_snapshot, Default::default());
assert_eq!(future_new_set, None);
assert_eq!(new_snapshot, new_snapshot_copy);
}
2020-08-05 06:08:03 +02:00
#[test]
fn future_set_is_initialized() {
let address = "127.0.0.1:12000".parse().unwrap();
2020-08-05 06:08:03 +02:00
let mut future_new_set = None;
let mut new_snapshot = KeyServerSetSnapshot {
current_set: vec![(1.into(), address)].into_iter().collect(),
new_set: vec![(2.into(), address)].into_iter().collect(),
..Default::default()
};
update_future_set(&mut future_new_set, &mut new_snapshot, Default::default());
assert_eq!(
future_new_set,
Some(FutureNewSet {
new_set: vec![(2.into(), address)].into_iter().collect(),
block: Default::default(),
})
);
assert_eq!(
new_snapshot,
KeyServerSetSnapshot {
current_set: vec![(1.into(), address)].into_iter().collect(),
new_set: vec![(1.into(), address)].into_iter().collect(),
..Default::default()
}
);
}
2020-08-05 06:08:03 +02:00
#[test]
fn future_set_is_updated_when_set_differs() {
let address = "127.0.0.1:12000".parse().unwrap();
2020-08-05 06:08:03 +02:00
let mut future_new_set = Some(FutureNewSet {
new_set: vec![(2.into(), address)].into_iter().collect(),
block: Default::default(),
});
let mut new_snapshot = KeyServerSetSnapshot {
current_set: vec![(1.into(), address)].into_iter().collect(),
new_set: vec![(3.into(), address)].into_iter().collect(),
..Default::default()
};
update_future_set(&mut future_new_set, &mut new_snapshot, 1.into());
assert_eq!(
future_new_set,
Some(FutureNewSet {
new_set: vec![(3.into(), address)].into_iter().collect(),
block: 1.into(),
})
);
assert_eq!(
new_snapshot,
KeyServerSetSnapshot {
current_set: vec![(1.into(), address)].into_iter().collect(),
new_set: vec![(1.into(), address)].into_iter().collect(),
..Default::default()
}
);
}
2020-08-05 06:08:03 +02:00
#[test]
fn future_set_is_not_updated_when_set_is_the_same() {
let address = "127.0.0.1:12000".parse().unwrap();
2020-08-05 06:08:03 +02:00
let mut future_new_set = Some(FutureNewSet {
new_set: vec![(2.into(), address)].into_iter().collect(),
block: Default::default(),
});
let mut new_snapshot = KeyServerSetSnapshot {
current_set: vec![(1.into(), address)].into_iter().collect(),
new_set: vec![(2.into(), address)].into_iter().collect(),
..Default::default()
};
update_future_set(&mut future_new_set, &mut new_snapshot, 1.into());
assert_eq!(
future_new_set,
Some(FutureNewSet {
new_set: vec![(2.into(), address)].into_iter().collect(),
block: Default::default(),
})
);
assert_eq!(
new_snapshot,
KeyServerSetSnapshot {
current_set: vec![(1.into(), address)].into_iter().collect(),
new_set: vec![(1.into(), address)].into_iter().collect(),
..Default::default()
}
);
}
2020-08-05 06:08:03 +02:00
#[test]
fn when_updating_confirmations_nothing_is_changed_if_no_future_set() {
let address = "127.0.0.1:12000".parse().unwrap();
2020-08-05 06:08:03 +02:00
let mut future_new_set = None;
let mut snapshot = KeyServerSetSnapshot {
current_set: vec![(1.into(), address)].into_iter().collect(),
new_set: vec![(1.into(), address)].into_iter().collect(),
..Default::default()
};
let snapshot_copy = snapshot.clone();
update_number_of_confirmations(
&|| 1.into(),
&|_| Some(MIGRATION_CONFIRMATIONS_REQUIRED),
&mut future_new_set,
&mut snapshot,
);
assert_eq!(future_new_set, None);
assert_eq!(snapshot, snapshot_copy);
}
2020-08-05 06:08:03 +02:00
#[test]
fn when_updating_confirmations_migration_is_scheduled() {
let address = "127.0.0.1:12000".parse().unwrap();
2020-08-05 06:08:03 +02:00
let mut future_new_set = Some(FutureNewSet {
new_set: vec![(2.into(), address)].into_iter().collect(),
block: Default::default(),
});
let mut snapshot = KeyServerSetSnapshot {
current_set: vec![(1.into(), address)].into_iter().collect(),
new_set: vec![(1.into(), address)].into_iter().collect(),
..Default::default()
};
update_number_of_confirmations(
&|| 1.into(),
&|_| Some(MIGRATION_CONFIRMATIONS_REQUIRED),
&mut future_new_set,
&mut snapshot,
);
assert_eq!(future_new_set, None);
assert_eq!(
snapshot,
KeyServerSetSnapshot {
current_set: vec![(1.into(), address)].into_iter().collect(),
new_set: vec![(2.into(), address)].into_iter().collect(),
..Default::default()
}
);
}
2020-08-05 06:08:03 +02:00
#[test]
fn when_updating_confirmations_migration_is_not_scheduled_when_not_enough_confirmations() {
let address = "127.0.0.1:12000".parse().unwrap();
2020-08-05 06:08:03 +02:00
let mut future_new_set = Some(FutureNewSet {
new_set: vec![(2.into(), address)].into_iter().collect(),
block: Default::default(),
});
let mut snapshot = KeyServerSetSnapshot {
current_set: vec![(1.into(), address)].into_iter().collect(),
new_set: vec![(1.into(), address)].into_iter().collect(),
..Default::default()
};
let future_new_set_copy = future_new_set.clone();
let snapshot_copy = snapshot.clone();
update_number_of_confirmations(
&|| 1.into(),
&|_| Some(MIGRATION_CONFIRMATIONS_REQUIRED - 1),
&mut future_new_set,
&mut snapshot,
);
assert_eq!(future_new_set, future_new_set_copy);
assert_eq!(snapshot, snapshot_copy);
}
2020-08-05 06:08:03 +02:00
#[test]
fn when_updating_confirmations_migration_is_reset_when_reorganized() {
let address = "127.0.0.1:12000".parse().unwrap();
2020-08-05 06:08:03 +02:00
let mut future_new_set = Some(FutureNewSet {
new_set: vec![(2.into(), address)].into_iter().collect(),
block: 1.into(),
});
let mut snapshot = KeyServerSetSnapshot {
current_set: vec![(1.into(), address)].into_iter().collect(),
new_set: vec![(1.into(), address)].into_iter().collect(),
..Default::default()
};
let snapshot_copy = snapshot.clone();
update_number_of_confirmations(&|| 2.into(), &|_| None, &mut future_new_set, &mut snapshot);
assert_eq!(
future_new_set,
Some(FutureNewSet {
new_set: vec![(2.into(), address)].into_iter().collect(),
block: 2.into(),
})
);
assert_eq!(snapshot, snapshot_copy);
}
}